From b2bcd452ea74a30a736f5fe38cddd5db7f94ddfe Mon Sep 17 00:00:00 2001 From: danhalligan Date: Thu, 22 Aug 2024 01:58:52 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20@=20danhalli?= =?UTF-8?q?gan/ISLRv2-solutions@ca445e2a66e0816e671f23ddcbe820f1a21667f2?= =?UTF-8?q?=20=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 03-linear-regression.md | 4 +- 04-classification.md | 6 +- 05-resampling-methods.md | 2 +- 08-tree-based-methods.md | 2 +- 10-deep-learning.md | 66 +++++++------- .../figure-html/unnamed-chunk-12-1.png | Bin 165925 -> 172911 bytes .../figure-html/unnamed-chunk-21-1.png | Bin 86636 -> 83368 bytes .../figure-html/unnamed-chunk-7-1.png | Bin 51315 -> 51339 bytes classification.html | 6 +- deep-learning.html | 86 ++++++++++-------- linear-regression.html | 4 +- resampling-methods.html | 2 +- search_index.json | 2 +- tree-based-methods.html | 2 +- 14 files changed, 94 insertions(+), 88 deletions(-) diff --git a/03-linear-regression.md b/03-linear-regression.md index 58d150a..13314b1 100644 --- a/03-linear-regression.md +++ b/03-linear-regression.md @@ -96,8 +96,8 @@ plot_ly(x = x, y = y) |> ``` ```{=html} -
- +
+ ``` Option iii correct. diff --git a/04-classification.md b/04-classification.md index ddd11c8..dfc3986 100644 --- a/04-classification.md +++ b/04-classification.md @@ -679,8 +679,8 @@ fit <- knn( ``` ## ## fit Down Up -## Down 21 29 -## Up 22 32 +## Down 21 30 +## Up 22 31 ``` ``` r @@ -688,7 +688,7 @@ sum(diag(t)) / sum(t) ``` ``` -## [1] 0.5096154 +## [1] 0.5 ``` > h. Repeat (d) using naive Bayes. diff --git a/05-resampling-methods.md b/05-resampling-methods.md index f7e04f7..646a231 100644 --- a/05-resampling-methods.md +++ b/05-resampling-methods.md @@ -170,7 +170,7 @@ mean(store) ``` ``` -## [1] 0.6424 +## [1] 0.6308 ``` The probability of including $4$ when resampling numbers $1...100$ is close to diff --git a/08-tree-based-methods.md b/08-tree-based-methods.md index 46c494f..7408dec 100644 --- a/08-tree-based-methods.md +++ b/08-tree-based-methods.md @@ -1150,7 +1150,7 @@ bart <- gbart(College[train, pred], College[train, "Outstate"], ## done 800 (out of 1100) ## done 900 (out of 1100) ## done 1000 (out of 1100) -## time: 3s +## time: 4s ## trcnt,tecnt: 1000,1000 ``` diff --git a/10-deep-learning.md b/10-deep-learning.md index 733bdbe..d681516 100644 --- a/10-deep-learning.md +++ b/10-deep-learning.md @@ -393,7 +393,7 @@ npred <- predict(nn, x[testid, ]) ``` ``` -## 6/6 - 0s - 54ms/epoch - 9ms/step +## 6/6 - 0s - 61ms/epoch - 10ms/step ``` ``` r @@ -401,7 +401,7 @@ mean(abs(y[testid] - npred)) ``` ``` -## [1] 2.334041 +## [1] 2.219039 ``` In this case, the neural network outperforms logistic regression having a lower @@ -433,7 +433,7 @@ model <- application_resnet50(weights = "imagenet") ``` ## Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5 -## 8192/102967424 [..............................] - ETA: 0s 8085504/102967424 [=>............................] - ETA: 0s 21987328/102967424 [=====>........................] - ETA: 0s 36618240/102967424 [=========>....................] - ETA: 0s 51453952/102967424 [=============>................] - ETA: 0s 66551808/102967424 [==================>...........] - ETA: 0s 80912384/102967424 [======================>.......] - ETA: 0s 95641600/102967424 [==========================>...] - ETA: 0s 102967424/102967424 [==============================] - 0s 0us/step +## 8192/102967424 [..............................] - ETA: 0s 3956736/102967424 [>.............................] - ETA: 1s 4202496/102967424 [>.............................] - ETA: 2s 8396800/102967424 [=>............................] - ETA: 1s 16785408/102967424 [===>..........................] - ETA: 1s 25174016/102967424 [======>.......................] - ETA: 1s 33562624/102967424 [========>.....................] - ETA: 0s 41951232/102967424 [===========>..................] - ETA: 0s 50905088/102967424 [=============>................] - ETA: 0s 58728448/102967424 [================>.............] - ETA: 0s 67117056/102967424 [==================>...........] - ETA: 0s 83894272/102967424 [=======================>......] - ETA: 0s 101908480/102967424 [============================>.] - ETA: 0s 102967424/102967424 [==============================] - 1s 0us/step ``` ``` r @@ -729,7 +729,7 @@ kpred <- predict(model, xrnn[!istrain,, ]) ``` ``` -## [1] 0.4133125 +## [1] 0.412886 ``` Both models estimate the same number of coefficients/weights (16): @@ -762,25 +762,25 @@ model$get_weights() ``` ## [[1]] -## [,1] -## [1,] -0.03262059 -## [2,] 0.09806149 -## [3,] 0.19123746 -## [4,] -0.00672294 -## [5,] 0.11956818 -## [6,] -0.08616812 -## [7,] 0.03884261 -## [8,] 0.07576967 -## [9,] 0.16982540 -## [10,] -0.02789208 -## [11,] 0.02615459 -## [12,] -0.76362336 -## [13,] 0.09488130 -## [14,] 0.51370680 -## [15,] 0.48065400 +## [,1] +## [1,] -0.031145222 +## [2,] 0.101065643 +## [3,] 0.141815767 +## [4,] -0.004181504 +## [5,] 0.116010934 +## [6,] -0.003764492 +## [7,] 0.038601257 +## [8,] 0.078083567 +## [9,] 0.137415737 +## [10,] -0.029184511 +## [11,] 0.036070298 +## [12,] -0.821708620 +## [13,] 0.095548652 +## [14,] 0.511229098 +## [15,] 0.521453559 ## ## [[2]] -## [1] -0.005785846 +## [1] -0.006889343 ``` The flattened RNN has a lower $R^2$ on the test data than our `lm` model @@ -833,11 +833,11 @@ xfun::cache_rds({ ``` ``` -## 56/56 - 0s - 64ms/epoch - 1ms/step +## 56/56 - 0s - 66ms/epoch - 1ms/step ``` ``` -## [1] 0.4267343 +## [1] 0.4271516 ``` This approach improves our $R^2$ over the linear model above. @@ -906,11 +906,11 @@ xfun::cache_rds({ ``` ``` -## 56/56 - 0s - 136ms/epoch - 2ms/step +## 56/56 - 0s - 133ms/epoch - 2ms/step ``` ``` -## [1] 0.4447892 +## [1] 0.4405331 ``` ### Question 13 @@ -966,21 +966,21 @@ xfun::cache_rds({ ``` ## Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz -## 8192/17464789 [..............................] - ETA: 0s 7127040/17464789 [===========>..................] - ETA: 0s 8396800/17464789 [=============>................] - ETA: 0s 17464789/17464789 [==============================] - 0s 0us/step +## 8192/17464789 [..............................] - ETA: 0s 3784704/17464789 [=====>........................] - ETA: 0s 4202496/17464789 [======>.......................] - ETA: 0s 8396800/17464789 [=============>................] - ETA: 0s 17464789/17464789 [==============================] - 0s 0us/step +## 782/782 - 16s - 16s/epoch - 20ms/step +## 782/782 - 16s - 16s/epoch - 20ms/step +## 782/782 - 16s - 16s/epoch - 20ms/step ## 782/782 - 16s - 16s/epoch - 20ms/step -## 782/782 - 15s - 15s/epoch - 20ms/step -## 782/782 - 15s - 15s/epoch - 20ms/step -## 782/782 - 15s - 15s/epoch - 20ms/step ``` | Max Features| Accuracy| |------------:|--------:| -| 1000| 0.84516| -| 3000| 0.87840| -| 5000| 0.86400| -| 10000| 0.87200| +| 1000| 0.86084| +| 3000| 0.87224| +| 5000| 0.87460| +| 10000| 0.86180| Varying the dictionary size does not make a substantial impact on our estimates of accuracy. However, the models do take a substantial amount of time to fit and diff --git a/10-deep-learning_files/figure-html/unnamed-chunk-12-1.png b/10-deep-learning_files/figure-html/unnamed-chunk-12-1.png index 9802397af06aedff453ff6b2c9ed62c39ed6ce0f..bc6d686eb7dfab274aee9174eddacf0522359e51 100644 GIT binary patch literal 172911 zcmcG$byQVt^fihgqNJpRfRc)o2#81t2c(ovK|-WMx=W-41Vlhuy1N^sq&t<8?(Vt^ zd4Jz`?-<|x=Zw6P9H}|qM>>B^~sj2nJz1Ld=#bm zyJd%<2^N)S2u*Kzj8RfjLK=`#R5UQ){vDM$-00mqAp~25BtvK<_uay}g&j{pEf<%X zbd<+;i1l|hfAo5|H{ZSynbBeR!`#Y>-2@4V1w-op_Z$2_uA7_5GSUyPZ{gvSW#-iU z{FBYi&6AUpU0q!RxK|5aCn12WaS$37=5n%D`n%LDH!ZCvG;ehE8p;huwX&Yk(b1y( z|K5oqh^)~D8|}Ow;vgQ=^bap|QWJYRx8Vb{4u{iimI;4Rs7Wz6?$z zq&in2_?^y3jz>&P*?Rb;lRq7OQIXZP%4IoLW`ieC%2 zvVxI=V{frHF~G#+ocK>JnP$ z5x07#U$$;2&ahsGl}~?02q7aQ!$C>>?_Ee$j5M3pqYd6TzQ6kG4?pg_)pn$6Yi&*M z{eJcTQ=#|VbKC8BC~#X97!r%wl0W-gS4bJU3Q)IExb<**dwbm8aUU$1 zQBYa}DJS#ay_EC*pDRMYRVBQ+wWU(18<3v(-~A|g+y3uJ<#8{yLig0ZI6u3?toEz3 zv$&*We`kk`)A)O4W@h@#)j8c2>Sff9seLRQD$5)2C0<4c_75;Ri-TItgZ(y8AYsgoG^{y4ik}`NpGcEtb+ zaoSwRo1RojUq%+=?_`i*}kI8;q5wXa^CNV?08}Y?gfd`q+mCyRZR)Jd-lHk3{KHK#_<(3P^Wq+{D_J{}m2p#OcP3|==i{(1#IC?0TQe-^Z*U*qS#6M5T?|aHi3tmybO}>vX-J8sYJpmT`CsA)-$9?w;E2 zzeX6-SvvffJk7I#@ZVb=EuT0nZ*ndu4g%?4(Te2Yi*LG#6Vwfy?zuG^YHjk%8wsEs;W-0JhA@m2+jOW?5err z^~w#?iK_BP&(_8os`9hn*p2-rRQMG*UTw=bLxHP8_U1`kgAQJ>_)do>PAKWCPP6Y< zg;(=dkiN?GP;u+|jtB@sB0@r6L;{?l#`)wjet!j1!^VY++fBewdW2(RVd1bnD<&e7 zul0PrgbF)^Bs?m$Pk%)6Y>6zeElH(`HYX>iqFC?h=6;agch5c1e)lf*Ra96+go;Xm z`HToXeQM||3^Kq4o z=|SD-2Gm41eI8_%>C}5ZdLAOXGMITV?M*qf$#Iq9l#!u$M2IIxKC{#n7Djbu4HXGE zOHFu#!%dc~vb#qg=hRnjsrUT(^J;v(j_>AcW5uv3k1nMOOa z!Z@_M)K5)CrKlEga;{pH86cY6ySN`ST<6POnb(2%pnuNy{tJ`z!1_lPR z3m3Vo>ZuY+%R7oPGQIU4XjoFSAGm&(n4GK@c3&5quHW@<3eU+7s181)nH(NIsB=fb zAY$Z#%?0&UX|ieVt+|+uK}2fmLi97su2|lcnHf3!J6B~u(|!}`r=PFyeX8X@gLc;8 zIaagMl9C;j8#PACt-6H^9T86;7`}i19vyv`IsA8p^~!u_)QX|Hx_UUXdWF@JtfyeT z4qN`}6+o+t@~X;Ye#F2)MddIQZIe%p3y6Uy6_qcJl89(O8YOS)>U{#8bp*#19TO9= zKBD2xWDJb=sHzIE|J}UwqsvkZ1M)@5=uMT1gH?VMDz44es;c}TR7CjlI6FVt zs69W<6s2L?BfPqpIyup)m9n6qplZj1*KvHjkLDSg)Up}x_%%_B_#ZB%#`&})rawtD z;rsW=nQiUu&z%l8 z*M650qI*U~y$Ph?b#ivrxL~`wg=HRyMh$rSEE+XVr-xgR`e*0no^f$qM@8LRAAios z=ueEPy32%G>g@2qPx!_CqILR6?xTTpIlZpvrJ?NK?KTH1xwXu1N*MY1YYQ5?x=bMz zK#qD_Y&eL+fBHlE0YhdOqe>s-0S==fu4m7x_dTyR!oYjcDJEp^hFdt7M=oQz)6q6y zs)uE3{G*PvkBl7jkk2MRqM%_{86)$XO;iPtJuM*i-_(62sJ2{4+)?(Ys(WL#-k7Xjr6Hhnrf@pH~joLbn5lTLnNBPjLwe zYN~Df=3Q$I4JvUdDc5vPsTl5J$S^M-)z@pGAg>H(GD>x^tQK2Nj#t`}-OQHqC;hv$ z$u6@o+Grx~w(;HLWnfCO^TD{SL>Qxevc%PL=G|2GoDk%VY3Z6K&Yj=(NlQrhn>==U z`Ro=`C&n{$HxQ7ELNqf2Kb@lXn(7?-53Yj=v5g0}mQ!Z+0E@(!>Z zcQ&q~zphUcz1nkmB%eCGT>=wJ44mk-NIPYhBJNBCEzGi^G<=IfRaxnc40i zG#rYKsHYo;TQj5qB1%6Ob}nV$m$ydN|Kpf&~81U-XuU``q6Aez4x6OpG z)I=3uo9EL6-}LfIBR2ouF%>rlwZ8c-@4G{GvbQ z)syv!>PHVv4fQgImB{o`-`N&o>-{17^G7H*BQN2vv0_PRs6wV#8XR4>CzAM$Gq7svCGR>zNFa1@6L434*nf0HgeoweQf-p(Fa#GA@Fsy z!ueU6+RMbju!jl=_Jx6RNip+5hH}-j`riIz^d_jiM@}_wJpgq4{B#S?SI6Op#u(a$ zsY7PVg*Ep0<2Akg7A-Jo{{S_Fu{_ z!L7p4-rjCKr>Lm-uG#@(di-T)OVv3mBe1PfwlX(6pJ~YW+z6{r|wIT->CL zsHmC~tezA;P1;BQj-?uZKfiv%;@RP5Q9yk)=Z!iOHO{~Dn%mlzyZJAiHmVP_wY5do z`DKeUAZ%(+x7)vvKX*A^N+nd9fg+xnwcMB74o^9t>l6QF3;JsR;Bgk7#WRm;9v6R! zkz%88LhrM%moqm6wQ}T1L-#ay<~tz^A3^?I9?aCUqxzRQ1eiRpQ|$6OM|Sqd^_&y< z39GFa>f*Bl8taJ{dimwbmosnS)7|B^dhqa} zSh8$+MaBN==o2okLzQ42hg~0^$kE><*#T`10tjbbQZtuKZ{>SEghan|HYlH2)6A?x zK{!|+Kl!WlAFP~bPDn@y^gyk}oVb{n zueY~%ZS6&#R)cc)2yQmEe$S6a(707m+_|`#8`dWCx?V zali6LwSu)RcfLTLWzQQE*WcFAF#s4@aN_>AbV$;b%BIM!OMS`E>Shiaaz>fx>4nF{ z#DsSu4c+{QQoNCkbU- zZ7u=Z^a($Cbgz-{PIE zYQjc*3klglufJOE@AMeX$Z{mY$)b zQIb}}d_VqEzSTgRjfwG6vq_h;<7zt%$YzvIo9?pl z%0o=XB0nfY>U%=({xsxNe7tX*i!r?dSC8 z;dY*X@+mPM-IjGW8LucVE3>m8iwpdC`l!&B%8&+cooq~_ajrl)YqhUn%lnI8?Zv41 zUU$w4XSZKX+Iv=?McFQ)M3c82E}{t7Nv^FcSGb*3gXsAB1oYiLeSLjJs!TH*Niy-$ zixbwl8nrb!G}P1{KFG+(yRUI>-Ym0T3Ck(TfvpI|BAU}gRZZ=W2O7?GIGhw`%YWVr{ z{CFj|qoZS^_QGZMSMclCub(}82CE3@8v4MRnwp9R3O*-Mgx5;nia6smp%HW(5U$ZV`gEY}cPKDdqb z8q1A>9C9=}ht1I}^iXa#-kJV13|jQ2i}M5iY7(NWkS>n&q}$}7Fx&du1p9=`aX)MQ zR^DsqRbhiE9_=bKgyIEjF*6XLr(4uop+IFcfBQD}?eyk!gVDTu9A@&`&en;{xWOSO4zayRnnH0Aca*@Dd6Nyo)#4 zvv?i(qkK-!&H~eL+4n};Dy-UlKs<0p%PP#L|$CUqhj3Rc~tyq>s-#l^*9qv3+l)uWw-mk30b zTKV?Dq-z|9QJ5s%n*tp%;O5p@#|v#EBvWiF)<+%H$I;H8DKidzx`i*AoHQpP8h@yy zZvHr1O#~t2g&6^x;p}*CZEdYdJ3l0(|B{Yg=!(%Rex}<&Om#f}Gc*DH3@N>9#W|aa&X{K5-v{^Z-#S9M~=%1e+c?E=rh8nPwRtpxZnkXOA zK`H@`GC)L5UVfKujOF3OB}lD?h79FTQ{o$13!fRVTkYIfI|@PHMnOhqV`Ka5=jYo* z_0(3Ib@AL;_F;Bb7N_w@PpO$kq6r|EeL&%{*fx`&iZ{jw#v>bAA23XOIGYbNzlBL; z)E&!9{@liC&JEVCy4qPqWwcONQkgVf`WIlZw6ZegWZ4MNJ2cNT zRP65lKBuweOmhH~i+ZR&&3Adk5tvl!i-Pf+-jptjJ@E}IYJTf#*@{LHaSFU>>%J6! zuwpU}t{os!w}*WIXg)8a=O)&VMLj^^tb1W!Qi@u>x2gp0X$OOG+{VS{ljI z0{#W`0_E~#%73xv=2_TyJ+^(WqM}0g{d@H)yL`YD3YkFCAP~Tn^gEN_;aN=8xsQw} zm#gJOVTvFijr;ogsxd2>9akr(S5!xL1yjHDX^1bKd~C71iCbiO0;D!54~I@QTD%_h zi=z;#vAmg2e?dzCg%R@amK_gliK!{gtkA1cTKvtE?g{nFFEk04T+A~|kpsgD@#N3` z{(lPfa6($|f_AW;HiU-n+**O(N$fyqz)EE$G~VE z49M~yC*t&uv4Zen;H7YOz8*E#nVZC z*Zg_d+0##Zd_FPKf0Hi6CrNe*lnY3w@VqiJji5w;7NWdXuk-%>-pViw3rlW#%-Uku zTV|>i?ef35yOrVneK&9@s;pOrD0m&x*{R>g(^bvs(?M$y8)d z#ec1?t`?ril<$ z1gLdp<|`~h;xx6TY-rk{GSyGJGHQ&zBI$D8xF9ETBZf=7 zsma~l{p4Wd&duwCcp~EB;;FA=sv7sr@tgCoChS}{Hd`k{O!0A}T344Re)Uh(C^Pdr z@;^^4lDv2O5+nobH;Csoisy@{KqaTayJd6Gn_pZ!sL!69oD9kYr4P-+^{yAwb{R*?9#yiFI1wUF>(s6b}y}1A~Vp!TtYKE_JrKaM? zI%9dCv$4tMzsJv4X1C%p=RzP1ImkaJSsVHId=gYG`CN5jFXY}_{Yt}$=+2!|!@&%o zGZZ9@WzLNHXMa8>-zPYfm4v37gQ3gi3kS4*ke zBzPy^<7s_=|IR|3>%2Kvkrpo&xoBKRg49A!-ism!TA|W+lSMceq1E`o0Zh(kSmx!m zXZumI=~-c{R#L5M!c+|z7Rk#3s$PWN&YH>>ZL2XpBa-uX;_LC9-Vg?-2Q4BYU3meB zh@c3`OZ=yZ4r1KmkxxB8JBj15FVLuUQL{&FYVIk{ylbEWgd?y9`N{*umBE(L%fYT> zzKE~U(P~VOeGpVsUx}ky>>e(}OC~Qb^fnGQi#qUg)P?&bBqU_eVqRsUWzUDZ(w$Y+ z)vmzLeE$3yq&V1e%gcr)CgXWpLU^M=`Q%f+KFiPWdx3Y%phbcXjEehf@R3YV?OTP{ z1|+!lzH0%oX^)^EO%G$eTB(dXcy@NwhfXeogyX_gGA|4s8nElER$DPVyiWzfR=HYi zSePQTB|l!E0L)QZsZg)90RocGabser@tdtvRi@&cSi7(2zCWrpgS?^oZ0-=tzfZ0s zyS!X{fGcA0KSqeDt=UM_0~bDU!EW8(++2igWoKunsi_H+tLw$tfzk-Ad#7%$a{kAL zX{Qgn11B$EtJ?)x=9bMmOoVve$zd!f5JD_a@~}JnJwkhi_!KjHbrzoc?%t8Cm%t*S zI5WMCZWDnZ#mZKHl}w9y;~ngK3!ixgE!RfSmfNjPm+->lM)mgltBhRr z?7JWiM?9oJpz)vCtP;N>l}c=pjY9fs@o)xv?a0HFwm_NDQ}Bg{SI=y^d^wqu1s;&N z0BYjTwz#wt*fiJ_-8d{vp;3#K&pVmV_|7&Fso0!P*x$M@7-sqm#bC| zBGJO~XB}8Cp!O$$J%N8)SXdaFT!+R7YUW+85BK^PNgc1kT^Bqr0}nJVkI4*LAWfNY zWkOP#rS!+Xb;USuPQBRknqFT&FVgt;b~jbzZnFBu5o1o=-oliT^Z|qwbf!?P9PR9Y z!9O}YtO6DJ@#AKqqTGk10XH^c|NWW(mJl)y-Td_Q3&gi*oht4vI@~kp(4ZzV1-G}R zzcqEK2PN%zZ>7|3>n|y0h|hzkPcOv-)88f8fa3u>3keB9-t$)y_(zrg&;@@F{>o%6 zKW=YsZIEob#vR81yc~So^78(IP^(#*v;078^5j7NL3B?j$P+~dasf^DfO}uZaA#}O z^6jjTdr@hj7-jD6?n1@(_VjFOY8q6&N?DruL3JL!SfuO^SXqNi>HAv>ADnHhjm}wc z=LOcBM*I9K=R3=b{rLN$SJCvUr4q3Mel?RsmyB7ijg)fn8XihAIS~lJ$DkjHg~d z1gX^-@(0K5tD>*WMzg8u0P6B{a0h&)l{h^;Riqm%F3EY&sq(X~1c)k`5fSXmc#eTKGD$3?NYzAG z*i+!%y#$B@eBAuFE%mnP+cIKett_>tc=-4sad8`qi^O;D@)(b>zG_x$Tb;>mRPH!>th5m*(`69H<%r3akuF?w+8lD|13+4@>aHvmy zei*pDXN(qR#F~V8-D-vMjyVO1%mXvF=P?Q3h#zqEq!w;H|l|Q4}pmE#Ka^Lah`}rnP$T)rVmgeR(*p z!iGNoJ39JwwhYM8NSyXJCgIA6 zyTprRzGowL(0e$ryesG{aPPKXQ?7OK)xPTnF1AAn@-e_EIn#+B8$|sVx*>!8gENc0 z)W`FWzKZ+SCkgwd3yt(|H@|wo>Ld7EzxMbq*izOesw+il9Jba$nF$RF>ggSP-yQVf zIfaxGS5EQ4u0U;`iR-LbyDe?%7lT2;jVukkui^xsfe$6}+lw5=_;>ovyULB&9e1KS z&(}&Pqhn&08qG=hK0$OVG6OZAXe%POY(kZ=&U@ZE@yOFAiGmmGJy5QX|2jVmZS(QD z$rn|e?Hbp%s@^g=YVi6u5>=!*)pFpsqPFB^cGWerry=|S6?M9op!cjT{L0j@Nd?u$B#Zf+*a%^s`@XfDu8_Rv;FqoEL_4i z2LGW9_Q=2J#a3!}Hd1x&qJzvuaV=GQ%Wy| zdA-Lu)MAM-lF{qYjdr?DBr`)HZ{+#y)$WN zX$c~q+iEeWwDe%*;A(GL-%VaP_K{pj! zpPKp|bWnoK3j^QpPaZ7(sZ4kL;BlPz#y*CaSI4yS2kh}c{`0bm<9`?9R&fomwIW!K z_`wwhn^ULOI6>l0x{<9##!D%R{?RAjqJbQnwRl#%9e*?_J=eee8C|uM?qX8|a&zF9 zFY_O`986sH29>}+G8q~j_Vz&p#p1%7Zm|?u;mPjv`}@tn`T|gc!@8-~XqY)!_8Ci- z`Uel!S7@5InQj^#C>@W(QY5FOIM~}OCX3m5l-UvIt<|crh{XYe>jL+VX-w zfXBOHp6hG~NQBD`hEE(hMLr3)piFLQqTsgD-<+y`PLRbPFxnF z9xL%i)lOIGq|2Lt`j)09+}^r*3NHo88;P}g{og+REGy&s&!s7S3rua(7UiIQar8hG z-=m@nIf`7#97Kn3k&u++aXmlv#JEF)=Nl8F4(YGIzkky8!g+Zx3Nt+G9jpGP^k=_n zD9MoJG06!0njSxX4CRa0V)hqMO1EGW)93r4uR1#ZEh}$~1 zt#tTuK%X7*Jh$_)6>yo-4=^z?@3QImWM`YqU8~r*O_ialKW%mJ$tZ8%-4l#DPJELC zGT(6}4wH{RG8+D{HwW(InMATG!9QF??CE}EiV!>-_YI3xlV!nPi|p>^1I1^sxO1fh zyb*A;u}H=8h#_X@O;7a?=nz(r_!s`fUG5Fw4GqPLCndqEL*+md%B@P$I!$?b#o5_w z?fX&D(L64vhi^(v6_~);G1QkV0n}0M$=CL|<&t!{2LzF>wQFYi;7(J_RCqve*#qV& zGmgi72Wwoq{6BlHZXwwA=1yhLq=>kBcql#U0pE!y%VsM^aT67~{V%63F3*es`ah$m z5E$wHDW``{e4v~f7R#m=-KcmXB*lN|I%M}F2p`w42_!!ydHDfQ!n3lnoSdAzu*sK~ zmyg?G!otIWmPCk%q@<)|3>w`&>GIHQL5dwF|H(0nan-fKB2u_`TosPF)Lvh`2!H*S+Lc3;Fch(h?dA*};v>a-!P zhVw$SJ1x3}9HaqF=f4&qlqza7zlqbX`(np+L+?^X77FSo)I%pe4KVS|S;)0$6J5FD zLRGNXMW^D2ULfOG2c)LbdCB)GIy$qqlg^huD3|?F zJ;`|qv(BK{SU%YMKtkaK`?Sxo5rdlx4&d*hRl>%=c*663cd?iMbhBak`yuPF*Ere5 zPQANoW{l;rnuv@!@KurQ2HUb)PN!Qfpl<7ah@w@0&UVgxcX8=)PUt}$Z!p-F7C|eF ziu&FD_6pQ|zcus`6+@ETLs*(2lw;?95wK3{^`f0Tn@@$n{%{IqlA%xbA?x>DPUCP< z{oY}k5CwU8=_r9?P0|2IZ-;%lmoHzbj)m?l0NYSuyKyc%4sPd~y`eXN3eO@rMu%#< zGbm#COz%LEE>+8}`4(Xfq##rek{UYb9*oS)Tz3zB$6M2vgoW8ck7XiuN5->oq!8q- zWtU|P)O2vF+`*yd;IN+l={ce8J??Sxn8ZIrVZDSCz4&;w8jX{a6DrZ{mYp$Yln>Bp ztc5b(-3#w@-_a8JHO*}O;?d=dyu}&SIcEWS_$5=UE-;~DBiQrG(N-o$ zE4wsvbwjVL(V%HVQ;k-AvR(;NJ8((;S#QORltM%ib93@X`K1yOIGYQNZE?;ReK@*TNU#$5cE#r(m*uYWn8^ z5N^;VDl02P$N2WI?1iGTvM6FNE1c$kEMgB2el-Zb)w-fJK*;aBb8 zQB_s(z?0)N87l%F7Cd_cIlTGGB3(7s%ow_KGhr60D1O)|^8N-hM zqDG$MZXF#Drb^wGfpvvN1oH0#um{-QGjnr;@j__hrpY4K8xv>Xkoe;?Gj#Fs_o6~f zh3$pS3oQ^70kmeG$oeanTf7Iukz$KlZS;=*N3%>i8_p(+rs?Mqwe;hP2J8|!)QplzT~fnISfPLWPl=eb?#L>Pzf-Z&~TzrPc@H#J`S`gU&%sC3QDiyDzn%_vAnfJgb3+1jc^ zIj zpda*!ub~`!%R;YsMbnd3w(OO1CBg1TPp=1#3@@w_h@im?g+jozR42)PJ@MJ(DBqmx~TACOe1L@Sz&|qThpOt@Q&n?CfGBR#` zlxA}|tMa5q&~6XUuL;_#L@?_w`l=RD%&Jp=0+@U1j>5R#WH!jvynuusbjZZLvMCS5)kUnoY) z$SAW{AKZD6x-JEw3Mr@mvnK}%(&M}OjCcQ+Ce+;9+go()-)siQ^;=cxJS!U+-@aYi z1i)(wV)o2TKePy7{`~kCZ>++Dl)cAnF4v>T@CdZm;arW2=o%1GidKLrT9+Mk5uz|nv|22QDffr5gfl&v}lQtJf|xvLV% z&q)P^h`~t(UF2I;6ks0@s$2Qr8fl%w86@9y?N7#eQR-OWbkvRSQiV7d(4At3y^{XL?7L&$SU~} zP*$g>r@_kr#(klx$nxJKJPaWelHERjek=W{-H>WLJUlcqUi{l8X-qIMbsLx0<@7sH z9W`5q?ry8Au{`z(gN86Hu=Nfz8lVPPe_?1Qp&8rgUeOSt!^m$hN6r|>`O~oxG7aaw zL0A_J9?@*Y=is=4;ipy;Cq?UxMqR$~8{lj*sjap$>GjM66{gy1sn2q#<6lU+jbWTz zp4b<--vqtRnLd3{2Jnot#8%_?f~M%R_?wxWq0pB6MlYDutZ~u7l({#n64ghB4+kYB zrF8AYGDQry9n?K`1ciCR_I2U$U6kF?yE?;02={06J@GynP5hWwtUMK$oJC}5=IcjHc&uBqE!5zhm>Fi7SD zBy}%X4}_bVcKf;e-i*qqimVLX?uBf9N#BA7=;h@Fl*`toz6SFVq2hCeV~k8pbA_YG zC0lnC8j<;~de0z`rc61h5{sdd%O5fSdk9Da&+O1BrqV$#isN^!nVNEGYmJ|#ZcQY4 zNMJ=BTO!;8rMtc!_K<{>R4xx6%rSx0jsI*{4uSXtQ!`o~kmn3MJyF>rl<%=-IZMlm z|I6VN@zM`y629tXS*S6F%gt3@olJTG=rp^w!9i=_Bk<{8clcC`k?t#w>Apg^+ z;U7O5bZ{B=ek~#uuMqxCg@6EBT8x$8wYBp3mH3Ijb z%(9aDD`xV7s|=v3v2m6>LvblMYigV!eful|yfM$N#*%s#W*+EsX*}M$N|bIY4FnEu zuApLXgYpKQ)MfVrR%=nwUmz>Pl*!`ydTdmb1g}GHfx-Y?&~@a3f|RL}A_sC$8?bz+ zm6}4TfSb@M9G#4=wcvbIX;_Y6ro3aHB_B`x)HJ$oq`}@(@Uhm{KWH;`%mZjQ?cO?D^c#4OLTo3k>j18B6i{(b5rN8&~!1JjPLui{$ z0qG)`L7x1R&H*5xj7RYQuiWX{+_7_!q?q}p#8^dHR@@OMC?MNqZ-$4I<4>_n!{MP_ zgQY7VR8$NiR+3Beaz#bu=d$Re)1Palva#RV0q#>#Q(G7Q-E-RQeF@s3v94KZfnzGg zt3S$^UTWEVL1A015j%m&bBIP=w$vMkUv^E`O^tT)vQ`BQo0q?@u!S0e*93$Qm-9J@ zRf@ou2*|L>vejd}>F@quERMS>Cv3{Z|93eHK;pt48cZf#IX)r=1!fi(^~aEq-V&ws zpqzR`>HqmtaNE>?#*se3p=)-2|K7@5uue`h9xwH$4reND0Q2xgvX(5uMM>$Gc0QOi zitVF0XQ!cn7lW?2E%GWJB_ z?Pv2*X6H{F|0h-4;;D1BJ08so9efgaTX_llNrl@VNY~(!y-iN;yfVa8t~LQWB&gEF z>btM$L1)VxJZJe;%jTFnAh^2uCGfy&#TVKD0C%-Oj>0OfuB~}|{MhVIvJ=gDMKWW< zJn1GS01e1{`_n=*gQQqPuR6MYy8<*ROjC`&eYe*(Sqz_3*(k z!*`hm{fe$a+WH1jKFT2u0Cbo|*N1L4jZ9c{wX8Fv)fi&EsLtH=MB?FT5Qh>bMPq$!ty;V3^J#lH zxY*RSfJOr8I}Zl&I6TDSoJFdSZ+mR*ep)f#?K}lo^_Z1458wgIgbcKEA3Z$YX=^u= z^X$vTnPMZ?W&S;(<&H^mAv# zla}7zE#UoO#0wnXL%{g6!D%D?h&#TjF|#l-CW!?2bU$_Zv_v>TENnx%nDsX4G@eh~H!32m| zuVZnS^q8EG69CVXRH|}P$gCNh0+h4DrUKQLG}Vj;3+tLN5g4)aZwb(%*+gN zu9vS2qq;y4Jzf%aTgq2vB=QsP9vyfUe~;?CubDpZ@-sROmNv_B>ZsO2|aTc&guXug}onjJwZpZ>7W}`RiX`%)p5Q*i{$w zM0`5QOHL#(up4wZ;Cmka&8C3oBgmZUOns+;;aU{={BnZy16esaBffu9xf?g;ty4Kj z0l@NY)SQ?O{siX?Ua+Z2R3PArD=)ydlR^bZ`q88qI2}Vp2K{R4>Pt@SohRS=`eVcR z91xSaKTK-3j!yp(&kmuaoC%xP*qvxlJ!kI_xt#3{Lmkgb4*UxyH5HYJp!WAXGe~bJ z0)4<1X2`~HdE;)}-C%6!zjR(THa3F33y4>qDO@}mp;IK4dfg~`28o+{^QDrQoIfciz%$Rc><46&CJ<{P_bz z2p`aSao-c&!4IyibTm1lgMI~kmGVdAKXnVurL3&@A71fi_Lnv`IVO}>!noV6(L9V#*Ty+2itQvDM=-aViTJN$IMhIyl5 zY^}|A#`I{ZcGD_5g~?`=P4rF2PdRGk7NE{Ux?(pR5X}{*!4spw z1LvPoj^q9pXK)i07X!*H+@nb4SLfC0(WIsaC0kRoe((_hPk3s%!NeCHB8lbpJG#2M zbWxrA`}?LtS)-QNu|U&8TfoQfm>=Px{?vB3ZTo~aIT7csf&n|^{0#Nc=l|m4NC;U9 zH1GQfU@IU~gz+^|8oUN2duv+wBoVUH*$^pAz2VC8^1+UTKNW$5?4$vv=ltM>1fUG` z=eg$&3>Mw?7V6pyP^v8NAAAJ8RQ8+FoctizY+Z5u7n{>Ud~uWDQ3sxTsiX|d8^2-C zXLjA)+S@y;y*R18gdip@{r#5pA5kF-ZIqq}ls4zY{2{$HvPxzzg`G71P$|PgW-RZv(+od7_sdOIP3iVAvtk>8kD zSR9PNcwHJFug6QLIg)hG*52;?wEiqlJUQct3}&d(e*_H}vQ>Cis==%>XQeK3h$u~* z>-iD5$1-wUHfCpcJD*O1NM<@(;Q4}J>{oI?VTd4bn`etKzXTOt6_nWL8&&U$>aWJu zp@^WSZt!wBZy3y*f$(E_>@UT4gh=ciNZ1hIi_@q?c1bK!&0gPNC{_({On{#BGM4_` zY?)flIzItAD&Ig)y@1^RkjweI*UzB~QOz!~T<8)>J*g-!2kP-Pxj124S{9o3 zy3Xq;&Hx{fb}zpiBDLVyO8K6I)D-wx1Q0M_!J$g_pE>eEZ=(h!vZ4aXEiPQ(=gmi})#3QKgdZOH z;G=2hE7r?o#yJ|97>%cyD16x9U2YC|aPM;)UTAf=n}6P|TQo=Kc44wULkv~dd1`BS zC!(r$?tv$C!qHToAh5-#LsCvf?1sl<_-=@0WZ|_QXq_l|9j<%tH{|g4Fc!+x zS^w$+I+2x)4H?NI6*|=$J#Z7{-i+{J{axZjsX)DFt`*d6I=KW8q!%8vlGr$54t>f!d z*G0Ed*agaO-hf5~ENyU987L{axn|DJm&Sh96Xs(sAXU)N&_u;Q3Gduv!29LwhKick zO=+{C!%7TYZ9ml0i9$Uo35j-aIDU_egq_a`2+_JBEjybRtn_Ri=FQG_Gm9}gt%`)7 z5$f!NCnU?IPE)(4G2g9@frV6kN^x~*Jqt$}dQ+{ToAdMY{;ppPBUQmJ4x<(@GysHI zfWJSun_*+c#!f(7J@h9imayVJgW+T#Q~i8=+z2JyUZbg~q-CNY(b6UZ(+$Ei8!avB zO=SO9ZcT>2x-TbIad6@Qd_nqdWlGUtrlU*A$uad0pnX$wwCmzx&4Hwx-<0}J{#z)A zv~IE`*DIwT|1BF0D%xfb?3BQw-oe3fV9UU{0~gib*EeEZ0`MU`{Ej^Vk051gP7cNG z+aCZo0ip$u=HT$~${ST|5;h%ZUtq%n4(!RqVN|(M;edohh5L!$o!{mzufqVKTd;d& z;|Yn0-9jRM;MdywGpQX3>7aqct85RMgoK2rl<5AyWir@QaAnpg&}jw3$ayhIRA(%| zD}0iLg8wAwlK?^57j-(ADuu3*)>Ov})`s=k+~Dkxw> z`cS6*CFil6f>z%W%Ql8R^0h+YjMJim?bBsL@JCgQFdjp|TtY7>LMXY zM%-YcFYRRdG3nOUCeGIIH+-cp86!XhqpfYal*>Q7DQGPvxqf=c#GbS_OZ)*|wjIF@ zBv5e8LRtSs;B7&Tn`oOi#RL=;iIChvLk|s&R%(AcJ%%rc0rXsxopJtVJnWjAG>t>B2G?>onQea|z+9KZX>Y|_C|B3G>shl*yL(gYvnUNR z(xGM~_umc9Y%PclK$b2t`CGKfmTQ%^eVW4q!)Wj4PjPYO)Y{>@Ii#JOPFtVeqMxLB z@T=|SC#qNhBqW%H1@m3K70%V|i`}|gM@@4LzBva9Ur-qOGO2b(zw9s0Z0lzr55RMF z0#XU5zn@Zfjx!AZ`t;09A>=er1L3C8ZrlJXcJGpvkzI^$r@+vEm?z-c_0oO#w}9|Z1vI_4x2F^E@#9Z$nFHWROiMeozXK(254_b|TlZ^k zLPKtLxOvUOWErYI^nMOU+wWh!3P?%Og(erS!Odt7_upxcxe^EuMiT z&I@#7Uf%6*-+t(^NXxi?#q;@m{cTJ@KquHPMvV9MG?c(a5qZtY)N~zCH4GI%C4%hm zNSI2;#idb ziHsx_^QI=8ot;%?RD=&sLJ1}s>1C%bCJ#uG9_lTkhJVm1kOz2qm38{x6nh-){!)|; zbaP;!7Z(<6L6)$6*oq{O*t@#Aw$K?>3j7#U^$*q_wCyk6o}Zz#wQKHpC8hnMl?aRG zv6uZCe?bNfr}K#o_~)C!90QWX<2P?Ehn*;%Ss-30>IT2-4xUemMBgTvBTSv7c`Gg+ z!}h)n#(83*qiKkrT|MDGOueaxAD?V~?PI1B0jb8&(6Fee=v!zgC6C=ENTY%a%u}T} z`WHNl!AfX+a`Ach7#Ta7o9B0OgzFoBK{$b8kkqKzQM@s4XqfciQPj_e`~)X-JS>z* z%9x`9xwrPYRbNqBY|cdq;|Kyr3T{8}HUj;XnQ_U~-z6tM*&93|oyElV0HB zoQi(H1lJ|z)i!x@>Owjm*jP;v=%jSwTjgs+4a z;-V9BI1t6h=U)O6ZT~N}-aDSlzJCKgMOG;xGoeJ;3CSK=$;?*Bu4ss&>_|vLM9N4- z64@i6K}j;RGeUM`uk;+>uKV{q|2+PL%TJ0o?w77! zO>c6(IJU{_Nsr=_XH{=rgPsi*3dk=LV2nV^g-#InKj!XT?ogX*?#L3s4Ca`aXO*c6 z#KUmC^X5C_)|iQk+(PEHhgOfW$ojzeRJ>-#hTRSlX^6*$d^KbKB z=57?3msiGfvedikX>^w^m0n*G5layBVJ;~NjFY-}j7Q?A=j<|YPJm2dI|X?;%0Zk$ zjFwW27=8KTdxL&uZh5xyE57AWecnCPwr!76@_*N+3H2tE`-vzVcvHTKkoNGfP0oK3 zw}r^e%xvx1^uHTPLptL!C{_R|B22Q&Un{tC^QO1zjns;b!+qYDF@?*>*q_pJdW}o0 zo@#zxS)H<=;O!hoM?M!56Y#;<#6-c)eh9bPvQJr_uvT3E;gM_^;4~P$$?(t>xyWB$ z>yxoV5_WcjU<|Wza8Nmoi1;mcto{6hV|3!kFvjG>MM23Elb3ss)lvAPD$~El|G!Me zMhaTTK%hl!tgNGr7SPbjYTNsps>Q$REfeG4d2Zk6Y{7Iq9yNu7a+svwhPR|-A(D`_ zre?+6nWZ~RNYO|_+8P=p*7xbtr!o9DHa70+>H>%-DJBM-VySfb$?rDi`mYJElcIf( z956m{f&{1;_0jzA`A{JH7ji&%L=f`VLvwzO5hx$Eq+Y`&;5PVlVq!Cpxk8hqr*{D~ zD>%&XoEvk$R+qyM39b0mUnd)06nmF>BstrqezYAnHvXFs?45Q%-H@IXb#m?c1%?1qB63c>vGT#)gpRtSUO8WuAcE0Nxggva<3a z@6ifRwdW)!B_*K{MjAit+GS*D*ep~0-|tGlYA;P5see5Wv_O#khZVO5#iOky2EFjcoyvipp1CFKyp19B``ANs%T|#$y~f%vk-;slPgK69U47VI~z} z5k@N!s0s8+ON)ep;@KdDSpK2>FGi_wA(c@V@>5b$Dl01kIZ89W_r9LvqO;=3H^-S+ z=yuD+b5q-h^%tM=i|i#+Iz`@g;pX}8gM)+6K5n!WLNEr;AN-udh2gHv+k1}yn)q`; zeSBAP!uQv3l+aBOSNq}g^}qN!Z~T_cGpLJAN2mTjz~}(VjjXKWz^9?A0x&5p&3dym zAV7JJs?k}@i?-Ysl!rHZ4K&*c42LwJj@&L^_Uj5~tC@2~RzbV#E83Xf8b`m7@Sn-f z7WSpqORz1$BpVhTI4w1Mf9>OB`uEbtxe*8R7BM~L+^82w24n& zzi!Nru!J5ZBO}`?^QV17SNL4};*<{(Fskp?_V&8ZpEvGT$>xz*aAM8y?8b?7zrT0MM`N5n8 zKwcZ`b9xZ|joYbaJQ6>6kc`mOVzz#CkAeV-l)MpyB9y3FzSu0A7v^#k*Na)y>iDgr zAJt2JyE~uduHX0dDZkKL8Ix3jK#41)DW>x8q_&dj%lliSUj${7tm@;(5TWN;S?h2} zdysT&Co8KoYym;D4zoLV0&yAea^RMz{45G({mVzjG&Yn_^T{k8n?}j;A|t~{Zd<^- zgjn&i#wj29hev1#z^^b1+!Vmg&mU52pZWYbP4K(z@+5|FdAT=xdb!p9dru`Bt{h+r zjXrh+-=3M78HJeZMb&$cTgD{Vr?h*;QN6#+&bAu0X?LIBMjYE(^Zej@SuO<+V13Ya z=xAt+gx_$zTu|xz9sT;UQ_#`feg29O>L{G%V%;T@b=80V`KY~lb8w`)gmtXww6hGu zq%-|0!@X@JMg6BH*&A=2k59Ve+`;^W=)gn+5VhVz>0AICxVaGTpl}~By4n7WI&e9M z-_vJcnEkaL1tG!y(r&s=-^64CW=AGjW@LhK@A7u`_iX>PR$mwVs8abDc+zQjy8ua@ zo%bg9(}2)X}iC_e@qUeW2eU%lj}S@E2ng(@YB&abZ`9Z+NDfy zy&OlgWed6gbXiAHmhD0NkCv9ZZh3L;-mL&wtsyd6AYz&cb`O5)nnO-H>t`fxaOxc5 ztVmmNk9dE`tyQ4Y=E;ZqS0y-Uu1HY(&>ZVKG&K6Xs;VEH77!-w2f&9Wvq36?)`gsa zPzE$*B|Wh_V~M^T z^m9Ow0m*Itne3zEDjt$UKM8-ZVL`eEBZiM5R7q`fR#rfr@Tbq~5VDitMTqsfIywrx z-+>9xSLShUCG8_^o_LIG_uWS?sr!!^jt5rJ2XaGh*?Q2*5w#9}d}ii&esaJ%bF-$J zmdne~FmVDhPoJTWkR>&@wBTk#Dj_5!#Je1IA6-db9VVHit|XYp_`j~?qi10;?WFUA zm1{B&^uXHrU@uOVf{c}#s@y!F8T{{HJRf(KkdWZzAXNVDd`iCj;loh^+1wWZ%*Vl~ zL#8@<#j`UrC0D1dEG*b#b9q{OcTnVFJjA1VbF#a58p&oMl!mZzbd-YjjFa;bI@y3d zTbMa|qPCKBMGr`Hf4&nlxVJvK3( z%M^aL6wt}lsV)ypRza#9{6hST@n`XCTNxX}FSQ7^ExEkBycC4HyStFkhVE;yQz?k8 zls7(i(2r|Ej*9YOWg4ESpgfIsO>v0tiHLYXBUwqf4$Aj?a!(Q!Ftg4Xp3W1R1{(3 z;&R--+h>ps*0#CL8ggo=a=b(d9gBdj8yNanhVBgt?B3_*5B`KUucF z*z6)Nzv|y)pTh+8)~!$WT>CGl;F$#Z0*XfB6+#O2@Nf`g9WploJUO&g4z<(b;x<}Z z7)4Vk3t1N3CcVy#)p(>y!5^s_?kY5zkD)H~B^!(! zEo1J4^K#W}2xTfgDe_nfO8xgid-0&D`~jAeXcIVfDC*HtqbQ7vi;E~+U0uaQ21y02 zghX`!Ppbh!2GTF;x2m$jSN2D??pohqEuD3fF4_K0g>LKCXL`vZV`7!+$mj$;0K4AL z<-|e9 z0L}r5bpLuvJI4MtWeZvf36$c<^;$YQQz&GYru#r;j*E)QEh@Tl^=h($PZ=1NPslHj zau8?{<=2*{V(rpJ2&+`NIJ0MRsqh8u3!Z{I2>aqoGicRud*#wi`w9L5pt5)W9C zf)CRALDg1_72`~zYS``E*|Qnt1+9>V*yWnRG$DH1Q41T5maSJL>S4VV6)p(gHU~R9 zFXtU2eFK9Xw87|!@b3s{8f)D6Soru#NHgyhY91|fJYU?&)un0C6XrCccH9S)RgFE6 zw(%n|vi`HS)CXVz<{s9{sc|W9yjO4!7tk8(>gdF}_q*AH^M@4<(fLGsu7W>~X~JX~wfFu-syb?EP14 z-<61oV_SxI5GqM{G0V!xAf3@u@QmHEVEbC7O{;rb0tRjHI@kmMRy3^i)KCn5@98P= z@DTIedXiLh-fgFsU>^O?2bXj4*Je6NcZK`nSrRy~-;_?uYyj*8-QWg6Fxs($XC@ zHI6XQ0|+tN`n(0~BlwYKY~Pxi6M&ixPdXrgY(=sYM$siFMZUy?=k(jx-)+SqY9vondEzdPnZZZ$;irb9dR2%8fyK2-$L3oN?({? zAjnV^f4L;kg@IOx!fjuy`Hp_z>D zsqT~ta2Vb72HZN(X!B+;Bpw>|0j(Y_J@|v0{QoOCBM}f+X&s!L+S}W2ynGoCWNLIg z@TQ}>`b{P76uANudKMP6n+4+AoUTM>=Q#P8=1%$<*NuE%aXfigSSLWKQB@;h0S_g@uhxOo~7O$;(^S9E8xm z5Z&-xRYl!uN6bEPi7tA(_o{nCWRgm_MHgiu)6~BgOXiQV_|Bssu7AHxp)5*qFk8rM z00RR}+#Lxp@4u;s3$)eH49I_GP~JQLt+~CO3Oyc!^(o6O)4gZTo{bjd+4llde@u+f zH8If!M+9TKMACkO2WVL&Dk4IX)fk#s^t{%{7qD}1*u))sLS#{8LFKf%S1?lVg9jFZ zHIe@ykH-6H?leA#b%ey90!}tIt*>OzQoPE&d-7CrRFpC@xKA*O5b$WWcAC@gpA^ZhG=qDAicsvr_>COxZ~Xy}=V z#-^s+SFfCfgaiaO5yvZU({Bdy$%Ni5Ic}C?X#b`x!e;>t2Oc7Wb^X!bjjoBa|gx%vCYiRo?YD!6XoR@880WF zQ^fvZw~(aQKU`8qQquM@g(kfbz9k~G$)9u@(bTwlYX67f`7j@I>W8Bq(T}hOkYrC3 zNreJZJJJ8^S8oTOh{!Me1%Y8Dzkr?U=!*qNut^if&!1_V4F`U?QkSEYLQuhJ6A}?w zoSg-XN`B%H>k)SK2(ruJz^pOm+yGaqHE3z5mXV0>slCY~5|wndSeHNG9>R+M0J5QORx`4enn|SJfCqbq8W;5F|0{s*a z`ttIgZ{K3y+RUtY6Jm!C^E>rJpI2ObRbQVz^5b4=Tgz}`04apkboo-o0&5kshe`YaEY8DR2gxHCleiaWAGHD3_ z8B&5kyuk-Uea|JI(_0~T+MPwmBkv@|#c`c?Z8MrsUZPe+{^sN51tW}};xxQI?Ce_N z{_m);q`DJ1j~~0u{nP=Y5i||A`Rt4g&;BYh@sA$H#@|RCLmWaM4s4pZRM1@n!q8By zK|gprA7BWzsm=4d-Ah(g%Vv^wb3qYyL@|M6a$a$k`yxe)iUL1U5zOJ631lU}ORzpf znF0_1<@xPfw@Q5fT$^Sb31Q{9(gdjmwMzaRgZ2@3nePn^kv-`oZ3Mg6#J z;qIL~ROvt9=8>huPHr`{m`ixx2&Q z@xkq_zAydn20hQr#6XaF#9mcxb4G?lPWH>-0^r}VsQSRGhUQ&LLV}fztrV?PNlByg zOYib(Aj#lb0unDhw8zou=yAIvFi#g_<))|bEbGq5|GO=zwnx$1Sxb1y#NsC=XdgsY zQt?n{7Z$!lX9iPYA#Wf9z?gvv0KT>i8yuKhfk5>6^XG&OP9VUUrM`A<8XzthMIpca zdip}XE<0S(@?~_x_Rn9A%nF=GF$ahc5{el2+oY%$O&g@i`*=B;B>BM|Mxw25BUz#W zu!fR`N#k1^J|#-I%`Aqdru@_@`40NK`vok(g4M5IvOrg zt*+4Q22V5+=(8iuX86HRkB!&;;|}PpZ{NP{D2cubl*#iCw&oNffFuFT24x;&V_Y=! zLTC@rV{=|`@b&Ha6gQRpMwr!maTZ}1Y@(UpPVneI)53cNK7ClX5+Z&hx2v?>O#5H@681SLAUQm?(Gu@Tz2r;(A{kJQ)MhylXi5=58-kf5Wq>UT?Q0ok`-GV5a3TN|sjKtBb{6y* z&_nj^a{%`gH#%*SQCLMY3&eu&C705cf4PEUGd*VGvWd8xNbaPo(ob><1fx~I-CxV9 z@%y&;CL1S;m=lc{_J8=phnkv z+la1zhsx;SZn)Lr3C_>EFHd|J#lj;^nf*aB)W@i1(8$0Elpv%M-=NXOgfk{YF$(tI zn0`r}?=iEmkii%RbB2krF$2_=83*X-BEYi-D@|B*nTJA=LXDZ8`1XweIA?(Y+fDWU zViu!sy^2*Im~NI-wdq{Av5AC{qoXLgb5}Q=+&`tZZ2lR=9s@fLnzCyP0?7^5zj|I= zn(uWkNvJi`&PT}raX-7{StCmg5@{2C?AZeXCzn|5a^{xW1rG+R4QBU{cFcgZwV#?? z5;tTo6Rk#kqFD7ClaJ;JqTci{r&05Ts9>1a{Czmtf?Il!Je^ef_Gt<0!4lX&f>V zZu?Z5h42p!|GWE5jI)GU-c}}a(&@8lxRsE=MjT}>XSC#qvZf|nAMyUR2A7cBKRh(5 z!@~5$V2zZkE0QO1(f)}xX#(Cc;mpZ4*4#lDz~XKfQNZ*;^lhX5U48=Xdq7p~2%r+&Ip6NI_u( zVV^|!{bj3SP2=D&46s$vbA}rQj;!n&(lG`&;gOM4&vtdl-tzJf+~Zz6=vK=0`EpYj zrt&D<1H~`z9w~PtxrGN zMp?5-44yrkmGdF&&Yf(uloOizhg_sM2-?i*ze9cBTEjNZ-srlc$y{%L_|nR zN`g{^EhbPXZPEG|tS?J4Ph4JJR<5CJD1TG_`bwU$6B5Cj*H^CnoRa+1!L6X+i$ENG zXGiLl$YvBaUtYsV^<&}@S=qx<=Vf8g=(#wV5Es{mTYL!z5BJ0ugelamKyq~s?L4fu4e;0yoFGj!VdS9*-Wa-oPYoUh^4753cwl&ujUl} zUQ=z)4~MQ-#?|oNyVxU2g@CY#eOx~}B|e=b%!gxb&mF20B=Zl$s~jqOZ{cHV2}lLWY(2JAAQrw}h*nPuS_!oG=yfY*%g%J6*zGtzJ^L45Po*_0f5WqQ;TC0VM8(wPnXi6UL;_&b=5DnMG?=FJ+?G`gqccjeml?GYDGOL$eQGc%pPHXEp$u9qmK6S(7wCmxd@qBuX@ zA_DYQ32`7Or*8Bc8h(Zq3~aDa%7ENQrS-9ckAZ<94kQXr&hG_{)zuC-BNz@xhtU>V zk0X$QJ*;WyrA}>78rxb9aGj9DZ$4Q6i4sxAl|>7F zv$!NcVjuk3O!8STcI|Eh>NZ;mn1#T{Rl3uwg@O@{9F8bvJFYA|D$VdGqG2EB;Wayc z%3w!%7@Arnh6iw&N9BlrqPW3s9Y8nTd*V@)!_(*-!s;G#b)@9(adyzUz@Y#au&DaW z71+{&p+*U|KaLcbD!TDvsR*zn)Gu~wv*_BxdJyZz#>Z<|?lT*OA9vTr`6QeLAz-=s&-`5GV*QB%^_vNO?>|O zDVJ&QL%MP5I4B&O6HjKaKtd102c+Ml}VnhhLSUeoO{3+EfW7g%K%GtUA_RqZ5{1o9T%(4$| z&d)6AYFk91=SY-9sT>&|zW3aYRa#VNg52E1;%dP217;R~P8k@}aI`7=z~yvyZjL;V zlJ8kc#;1llI@}gII)I43;WyMV{(BzUKdjgx1qvQf(LVM3Kg30ZGdygpDv@=q4^XER zGVd(;(fpdt9L{?v#W6#Oj*WF)r00wALO%`b3m2Nm@&^wUYMPBytrT)HS3&<$_J{x` z4uTnnS~rt2J^l=YE_`>;L$1Toy+bA90h$G)%FZ|RN5{sr=xy(Jq!m4d${)on07f~8 z^oP@4)wrR!ia6H}gbumZ!xKON;XmwiEzk0{%w&P2yz9%a)%=^Os3O@8sWUVc?%W-D z*y$^_F0EazR8W*zI`7^6x2x|`(~ZW#oI!MHevnhH-|*jH{0E_bQoCr2Wg0su0!Fg; z&Ij-3timZ@MVkP)@YkwfVchyMkQCNKGzKo4Phi|Q^g%$NiPB4 zwICI0FW#<&j~G4(3ki{EFD9k;;}kAzO5mmrLH#c0y?hbOXlxtM6Bxc5EWuv?{X6RFDJB(2ewWhZyqwJw<)_{?pMKib7JYv`T3u+e z5T(Tvz98X8uuC3OQ~PqvXsn6Mif1VMf`-qj+`_M`Wg|agFv?Z*O`pn6qoJn$R9nmD zaJsCm^lFP6<)ey^t{2@0Uf;TP3+2uc>05P1t27o*)5dHRa?{SVWB5BcIcZhl&6ae3 z+NE)Q|L9EldREvmkmL|={u}sLI3YDgf?^N)5&$L~EYdOx^dsoyh`WeH@$s5T&&Uaw zsbIa|YR6?CHn#;`=y_3Vc**THXJur(jB|j#Z5y~WLSD%VWwonb3|>TUNV6o?Ny(ug zHy?oC4vIO;>1`XE;gV@={B+W-Sr1qfzM2g*@5hNw{jicB_JE~Y6`4oB zI*O-}L{d$;%!&u5g|14lZ@omOBWxx?Lg!%$bc4w^bFJNL4WVGpv)p?1h!iIYbV!p9k% z2R1)}TTj}yrB|$OXl(4WJd+v{^0B4EY)P9;=LH@qdRbeS^+<|^<9so-_P#a2G6J+4 z8D%9U1!Lp+ZQl;cu!|4enWQpwi#|Xz>b{!Zb4GUD`IE=36R&S|K2YYrbwRefT;v)9 z8{5ioPL1fKvrJNvB{$zbP*V`E6W=9al~2CW84U3J)#gA+0O$PQC)apkA|P0 zpSHHPxR17vk33OuTu*JWF=DS@P=yL6jur*ZnEhZV91$KqIJbgX8@3Oiy4Xy_|J=)` z;nRM;G2hA8$R6J6l}YM6!JqPlojj&{@;;x`aY%{Uxbfv-e0)4wm|_)YhC|rc1DzPm^#P!^ zy8TvEu_m9rUhu*2PGOlvUDtB7A>q9E-GoZt%#50-5%_F-4mAfyH}`xpX(Ec-j&aJQ z$NKu=M_H?*V?`nzzN1Ck31ecp5R_@Vna_o_@KQJB2E4Fe^x+_0Bz+xG2Ho8XI_ zfb}81FrLEiZ}=+i=E)uh7nkxgEqrc#`qtKd2(l=AFk8c;wJdepM_^VLd;k9B(jLd{ z{3E@J^KSsyT?QkSGce(B;|vR^MSDB)A>dxuHQ|F ziH-dRP~Rk!v{L9HsFg9gYlW3*dhNP0Le*WXH<5+TsV2dv6cUbP=}UV+P5=oKGbqrA zU}4O8(3YZ9VRR7eLTSi^pX~#aw+<<{p6ILpuhMJw%n~MxI#+||5MSS?l zvQxIE`SwPGaNpCDzs5?A$~;m)#}fH>vHO&U>aVdE>s30&GoJQ|4Ziio=QUamdfRHW zkdbiWo*r*>T_3HgxvuZjV+ID8b*hwvlnimvfm(8rhQPcne=0~feGNUq^0Ma8I00gm z>8VpB?>WZ_PrxR-+1XcpDK^1n5>Sifv*dl+t*sxtEw>Zssh!Qu&}ITNr3L?;@`f@! z3EBDizp1Z_$o+7x3N19KLvz=W^WFC|+m#;@Mu78N$mA@Rmi^_O#afE~penb02H zQ&#f&MfM0ep-lhM?Hs6M5Mxk8jW#bgWj~uX?GpU-+0)E&t61w*1&`mDzC*b^B-5cz zqIhDD6U)~N5EIT`-kIFMU%Z0{qy*;hea4~|wd0zc^_VMTAh&Bbm*U)&evx+NOh2D{ zdV+$Ef5N4;Dum zWgpx*jeG=#(RMBc-pj}16B6h+W$t-L-lcnznf8_p)60tH@*Nfy761c<^-^1#n>YFY z_+cBRd6b@_R67J)M9zVzLYbT>vL*FSJ+y3x4lx>@NV&{%@Z0lRdyW95yHQaFmXrbB^puQYa({z2W&gPXB`PVtL zzO(4^7bxw;-SWN z!+S`-o^ht!^r09*0TS6i#%kFIAhj6#=@fq!h7>-1bH}75Q+CX}-LM&#Fa#d-&u8?h za3MsJnD&3~>yl4*EsXl9tFt#oseCg#Wj#O@Dv%vRT>%4-csvYn%kRZ<_P^FVMwzpfu z)4C2t#P@rDXdgS`;n5VVCdd`6Q+CCYqJLtd+lpb9C#bRg8J$x<$%sbO`zAoO z!ehL`5pXrbm4{>xLA-$Pc@a!kiK^!2==BT0H2?iDKcqdd81vm-klR({67;v8N)kr)niKXf8G!u{|kK>#P&Z^2M zRqnd&o;I=Iv$Xp4X}{@;pVURE#%lnBXLk zHmzV^9WLM#)tIePiGM&KbM4IA2cy=GEZSC!^OG@;;vt?DLHpSG+UH$biBt%15 zpF2oy(-uMfyCDyE?LRdeHQzBX{aa=jM+HjsFi866u3BWDmfq+u6Hqlmo&(Rw=vKC=v>x3cDFC(Jv~t0 zBav*tXcT;dXm@{x_cmgvba**8d!Q^KIc8#j1das?$J|aAWJuTm=pyhorvgr2<$gSs zcY}fJi|xYmE{V%$tn3H1IsT6eVAYF7cL-_-sIm|(jJrueXtp+TU;O&M)-B>C(*-KvkH9+(L<@B`u#0qfIRE5_c{5}iwGST8T zT!(%F?;DiPy?fI@TlBnsz4%u^2mXfC;IPN6{9K#v_$w#!%iDW)zB>Rj?DM<&g%)#< zRZbZjo9bz5E|+VAy5KemyT!kf#aL71|zWEZEfSDqd)NmE~+b#Sd&Tl$FRzxxnD#?JZ(e z-T`a1vwKz;&=(>u17U&1DqtE=A3=Uuot;j3N}5$#Rr*dn$+^CicWn`Y2nv5 z^+v|e`e@hTZU6?mP$xHunl#VjD(#f)kT>#rl8z#^U$O{Asc*Y`xLr(p-~bs(?cPNfOm1Q>!oLhkxX^J3%k)6Z&{ zU1FoV%BT{KtN@>dNf_|kj4o*=GYzfQa7xUOz*@rC3EyAFANKFZ-CWuEcT z_6W!5k%@`=(xr>lVLM3!y`L5S4I_VHlwa?*9|FJBEbX(~6BfaS9R1o4FDoDo)W!ub@# z)K=EKG@;|)h3n_d!_dqw&CK8tD>|IU3kTgAs8mb*7mnv+m6f-Stnj6#JafdEC8yl# zX=(gs*1#8}Ux$R8=jd!Dy#K?}9(`EuC_g-Sxf=6|m%)|tl5@>3L z$|Uy7=m{UB^1VSBPL7Tc4bWU%Sy?@(-T^`8cGiCk zape~lcEvdlTwUc5BoVdxs|@e2w1@HpCx%kEv?l49*=L zao+Jq`t$F{b&`gD!j-ZNFOi92j%~M`=f&aAJNG&~)i}Y&zCZWVyCfNl7+G^azr&-5 zAMkvEAJE=TZcEPOhV&Na`Ry+BHlG`7rCKS2k&9CGg2_FD=+)FCcl%(Cq4ah3RXTr1 zb6)Z0fhJW>v zCj4U|Hk+VG+#+4@-nbI z$Q3ly)!#iduX4P>&Oy5+^%*^cS?TLk?M{vcCtYBI0zHxN8$vzCKJ-mz@A<4Sup*=c9(InLfVdl`8dAl1x3hcw8Jv3KM-E-040 za{e6gdO>9XLox^ZnS3%t2;Qd`DR=W6<;1Wa7aqk89J3#pSZy0S;}&=mSr4_&{f3bn z-M1<=rXzMeI-MaH^1c+C&MiYU?pU&Kcs=ntL8r2#AY`vp#=zJZ*0@JjhAFO&bG5Yg z+`I`2xP!6LHqaVdTa63{{s=-dNxM7eS%sC0tFH%L=gqE^u3d@s*i@BKUcUY;T~uiM z!}u$68xOB#YsA2!$TJQ-Q!C0UMrD zckKPsuk0lHcX7|^=!=qMmG`fAEBtY?4NFZOxKR0rZ;VwDTw{ezQKM(wEb20USe(Cj zCHwJXTL^$Nwr9PBhF)?1{!jJwFXJtjoNPabw$p&DPq&qhRD1&!)OxQ)|L@q-pX3Xn zkKPLC6Q#;*-}H$`KF-1Ad9(x|Ieq$XfJ#yBEuZU{dvpFqqVZIix4B`R$K}gd$@hdi z`pRjWFc|)GroGNTpzKrOuVO>pg^quwULX%xC;eW!kSn$X21<}W#+(Y7hfI5xy zlDoS%LKG~^;iL!~%EE#IWGVi7!4VFm%Qe5hYkerKs2)~Gl;$re?In-Pnwf)I>#)mZ2So!OK3%G6q0{{4>F*))dP-V8VW z$?qSGsCvnfSl3V=V}g{Gk)a99uD910`$=9ZWVWMB_S;y8&-m3J9`9<~Y4|b)@+2(O zz0_{#!;%nv4V(?S%SIye}X00uZ9DCk!^mt|S33xz+>D{yWe zdOlKOW}v`ooC??;h7zRk(rzG~iF>>CIB{n@ai z7uTsDDx9&g(Kj_U1uJ3R;5gr9q%b)6|6#E@^?&vQl5rTKqqsi|P+C@&gOn~|6qtK) z0C3cM(ek)?@NR~x;C~G6hxb4}WM`miBbJ#M8DRo+1QdN)S-)DtDY!mj@(6gktIL8w zBZ_JPlAn$H54T`RuP+*3;54hN*YHijU$C&WJnRa@=A&)ygE#9HjwC)c>elAYCQXyN zJ3m!%cX5N^0)qv{RhoLb>UVa;TYWgi1b=y5HUR$-@{fuS4?EiD#qL}7SPRD@C{3y#T8 z0zryl8=H2f*rW_J6Al*H*&>^}qn;F{MWyKNs=`nB3YO}#vTH3>72(%9Yu96q#P?Lr zbaPzAT|l)CZ;(#r9dQ_v?%atv2}W%N3s)(m{WV6l`-!AI(*F0E#{QmEJ9uW}{Z?#%P)=qt5y z-`XVLx>;=pN12KHP7h<3hTgTuf&x-Uv;6l5hEoSNKe?)ZEvi@bf|h1)WQmZ9eiOaY z^pdvI&#w{JuPFOf1;@ck9tv4ZM2oKkWw@khog5B4z=Mfb? z1c=vdh~3&0=|8iVn^rDFiD`MjmbcQBZp&Wt04ETW@ddzG6n;Q2UUb8%4OcK;g6*M5 zKC91!jHjFopi>f^?Yi30fN46*-WRxdz)g_dMIW%r9A9nd_)17~#9R~-H&IjDCnvWn zyxMo|A^JXq^xiE(Bbm3SzJDcN&Cz)#Szbg*0uspF8pVeA`;00KBrac`gGg7Gp0QIx zOZ&v}d&km4?%%W4KK5dIG#;QyVj{=LQx&NP3b#8*Ytk;ls0%n~&&l{&h97(6dRFKn z5KBRd+Pp|rf?E`%=B)O)wU&vsALwT+3%$*qLg%N*e!jFc^J z2X{~%`>+@C#By+)u%KEcRa0A=gN@DSZP0F5*I8MefBCr85-M>f%<>G)E}45;Sz!wW z1n(7>7T{Km{wef_txv(#pAV_h^?Z^n>FMdkj?%7+xqxEGLJuL>h%^XeGm_dSRs%~q?Fuq z!i4n2et#rbV)uyBo&k7BVCK#S>@D>PZZT=~Ha`2y_;$guC46+i*rLWD~inSU)ZQk5)!BE-k{ zvwP<C<7{XN0oxgUU!xur)%G1*c+bj!UHw1{XlB@`oJ03)MHiDIZ>VvhNRzxnAko zGGL=^+qU(wct%4!$;M`W>XZ&I1Gp2g1|a#Rnwl0XYs_6>OiLP`f4c8}B-i1I-}#s% zfQP}(%KB=!4V4CX(|c>`=GwlRs4ce~iMmq~KP4Ue=EB}bj3>}!lDO(cS=Fa6N}s&0 z4Yjd@rz*=!4kG@%yCQI}80;kzjhpxuy+XU`uWMd3g+jy1<>PYRwk<7i4)}>3g~3{Q zGt%21SMBBIzU>TGY=Rg+Yksxeh}<8)G1b1+VbWf3_yO;}Vg5qiwP{JfRA4b-5P+y;*AMtfvq1$_YP*bLTD^Jcp_h*8mVLxS%1H zHP%rnDcr=xhceIqZr9M#w5fOy{W0X>r%#$&wuCF|SfV`!tc;xB8-esmM@JV;Q89PO zY2IAdK)=5r>g|L3IgnL+64|@~uyQNqV8_-!yQdz}(DvWU4Y0O44Sy*Z7Xo~;j2q+y zVXjG1GBnnhx3+@cdaHl@>@mW|RA(8UQ0U;kS+Kt;yvlddxghP=lj;U*Kt6kDcQpSBrWl?RNOc)Rz5zoL^D3SXwogOwfZYb(>M=et5CugzkUQ-^Wmy_NQM_V=qC zkLA^wi1*0Zg~n_0gJN>SReBMPUFWMhwS3I-wA zGnL*e;&b&o2V|j>)P|E)c(|R#r|IvnHPTxmzcePUm#)cdyKyNuUh-z-5~`CwhZ3;f zov2du3KqI=Qv|DqvF@j6-q7CM&dHz^7Mbe1F2%Ql_)=ew2R`r^wq#h9l~+`h1fn_i zGeUxZN`sR9i@V};7gvW)?Xh3k=cefy7}%|*^U?iipe)>Oi%kBEMv>MP@ed9lx=Qm z5)1aSv%mWO2r|g}aU^s{+?(xT-ZX#%dvHr-=)m4}fGO`=kR3B1paXYp$ISMed z{n@i!uff8e#Sm?KpOg6ufqDbO(eJ~tu@i}w+;D!hwH4q08?LX#r{7b`R+@@1=a0M) zIQTbP=hQvBs9gLfkepzFrmsGuTN2PnlF;U=Kmdq?m}7QycVn*nxH7^5{jI*fSZ3Da zyh5EkYq@udD{7n)4}P~kR~1=e0f%a{f9!*Cxj$~V(+o1`WF8ne?8Nx|#zM=60yf@Z z45ORgQdo0~{qwQGQnBfC6RKgyws9#uymyauX9wgb`^AtAbZ}oXvWiT2=BFJ5LT-GX zH-WN;nfaLzuV?LtTN877%|-^l487f|i4dIyXgdnddo5PWz$4;5*7rigaD36Et(jJS zz%-2xJ4glwPCGfV#3da0^On(f*BzFSmuJqUA7TaF7(<|>&lSn#q%_8BY@9#o;Hz45 zXDab96B*g*4rL}!yuqkDK-dqx_4M|Z>#1mJ$xcamh2IbpG0DS+-wz!SYiED*BsGov zhuw1pJ=Cg=XiUK}7-004_>B{Y6X|i~3KldTkBzB0EjA|U0b)4eub%p0+0)!=pBi^n z<&Nn$JD8Z{@cyw6mw9D5r@8r+!HM1P>nuCFe&Xf*=HZBVi~Xy=x7uWJ+yY+*+T)Yu zo|h6X_)A2IDIb3ltD^Hf`%HQ>3;+NmOTW9E1fHObv~&vD#zXN-PoLe~s+jsTd{)nQ z;CV(6P8Qa{V5qfNN)cWSUQ08ECb52vCpZK*toHTgPt@Mw86aL=nXvYg!Csw0^1z=?;NBhd)KJ>1-{Hj~Ot zCDzTLbU-H!vBEwXnX-zCBqi?SxMLs$LGR^q{ygRDov(Zc{n82x-nQ=A=<$ZH0rrE# zY7(#59mXe)7cTUD>-6f;_mkBMNtk&^HnS(E>g$s6;Tr~hy>G=CUYEV(B=BlhuL_eC zi6}D%;}+ivPpRs?e)Vbw{k*HA<07XU$&Nlz#v?N(=7g=St=H*;1U+@oPVJvi0BaZ; z&dTx)u^I_@pc98TxBk;wupTtmu83U@urBVo_4#ERukNl~T zuW6DNgZyFP9?i}J`y3(_?2AvF0b(jCCG~T@-h1029;rh|uK3A-cgH$;x_;>rKAb@8 zwPt6F3kb9|;bC&-Vj2zKTW~~xJyhN?1s~?JyN3ttl3tek{FOSG#Lc&fkdXNYO>s^(Rc=PN$e7k+hU!WFK-}GOJdlnDmG^OdGmF3@d zMn+7ZKD{fW^9lJZST$bgKBA4HU!4j15mexa!t&QnERO1oojFQo_F{j%;axfrf#3DQ zuac!x8;m(6ENlieB-rKkCDs5Dpzws#MO=Km84@~(++Ywv{_btvDZmGW77xl12@o|p zmO$LUKQca2D!&N<_5f_%jql};vD_9j(TA0&*4n$rNeVvK(9mA4~O3JU>^T)nm40RmFf{#J{r z-+x|9R3Am*oc37&?4-5i=-V=^Th*~f(6n-8KhroMFcN|Oe$>I?Uv_MJl!HRjGjg9N zs3@oE7W|r7p zcWyV`kD8pa>#Vn#q^8Mnam@#*QH4#Bj4S|-pLyTwEio~Bu6>&s3}iB!qD@%w$*5N9 zbJ_szsZO0Bhk7C}fkG7=6a=UZeh#RH(QfZx_yI=;_=U)eUvATTD5f@>9~ii=UifS1 zb3AKh<ff=xXfH`0L z{X-2K%Wu5B8xsRbLpKZ!Z{Pl+bpKXZ7%F2JU}JNWG&asuSkt804fk!>QTQ08Vi&C9~a<^*74)>029$n!iHq+_3M_gd8lwN zDJbkP@%_rH_50EumCgmao7>B$_;ur7a@R%vKQw)JJeK|Y_a&LhC}c#G3JD>bL?kOw zA|tzO71=9PDut3VLdxDNksZp+DzdXmWy|JyU-$R-c=gBqN4dGq&v|@~x zDHZ3l)5F8Ueja!q`X&DBS3Sb;fnf)I!jlVXc{In_&z7?|y+Uc(-VFASL;o}T&bZ7=WLL(?8d zQZ@e~J+M*g+E(H)z8zY%9AZc&-;RJ?q5Q{=V`yKRpDzV>zRz68S(3QPfyh!PYJczR z!z$s5zGP|N4R>@R=-YKK%u(!?e<*xDJ$?0w%>)~Fqxbl2}P-~ix@I#>r8RBdhBhqQF?x$h_ZZ!kI* zFA!E#lv!qHM^LzU@tMXEpt{IfL1sUGRFtBvk9xt_j=;_y1DV5KviE;WrLUO3z9=pC zx^Tl@L-G3cpHR3!N8jGCzzj!AJGTB5B@;|Pq^jQbAtf8Wsn|Fkv@Od9O{NQ2@OFJ0 zk5qu=?~(e1b`|3=C?$ZN3;l1d`qo**V}-m2lpe9*yi9Kkx{H6n_EfF z7PQ{YFDF;t)1!Y{cB|$Ot9sX^m%o@IGHmw`o%xvjCex(+JN(GT@}-bQ!eXPInR&!# znv#I4MJ~T^M_~1%W~Hzk#UoFl^)xaxEW4<*^vOj|`7{Y3)nTTDF@D2Mu@Niyg%#WjKUh*!;!10#PYNjDMjj z>($DY1Rh87zP|YgY=vls*wqs;y-7^RJGEm{M84K6Ahf;)e`jrH1-5M{ zE7$eM#{NjQ_8{4k-`m0ZD5%j^jBOJgI^^8|FX3%NLKP+(RCXTO{8NxaA$Y;aZQ1YK z02JacFATkU`F=g+*FT|mwt=HgoP zM~$sLUMqAO29vWCzds25Q9g5KXRsm?upyie7*~-;Mb+0Y!AqQxVOL_s%0M4-rj4%qv)(#$m{YHe%*Oysd}G+d=xho7UGya zW}>Hu;{Zc(?Jc@LN#*6n0P|z2d7brZzX_E}Cg$7LlRE7QVZ_#zl;7~)0eQjEooP8a z)9EUD*uv2H1*j`9<6dv^p>lF^BHkNtCvG7mYvJy-a0=(ST@BwU7d@0T71HxKTHqac zhI7{!UH7GBe45+abmdCC)nwd*2TsY_geoRxvNUNq{BHqZr%a^km-exMu~T(X--vGTY*=#=b%)i z8b*~itg$+$wm&ZM8t>6iWCR%T%>1|> z(`)lG1}F^oOrDy@#sljkk8PctW*m6-zlAm-`o#;}zjq$d=lwMA~`O z=+bVj_&4nL+@cfS66t*jIj}8p;BKg zS}lbLIriY8-D_!n7tW~y%=a)jH`enaX;Zio2P%N-LY@qK2dJ6U`5v3>(mPYcm0JQ zC>Z^eI@6Z)Koq8zF^{g8_L!3EKZnQ1IUE^!2;r(AVGxW%M@_xsTa4UaJPI4cNBo_- zGn$fcK7abNu4!$udFm~jUbE=e)H5q^+ylL z+@^WbFw^($NoZ3Z5_pv$Uh6<4)9JIf)GAr*vrfGJU366PXmW(=8_d^0+T@G@J)#HN zdo-Vyq{<8zJ%qZw9Ub`?C=ZDuAg$Ca>RqAWf)`VKm@_7RGz5!}wx?7At4#_1b_6%a zi4BZuWqzKPB2q#^PpVBZXP#)OY^J!ALj1bs=_wtl1}Ft4o9|d@LJ{$gu3e?Gz5R4b zXCA5dqi568ZyMPM!WY71L^~C9H5duerMwcl9p{znoCZoSDJxsCsC#VhXWz}vnjM-9 z88fJD&^u{+usDFDo@Bv%klC3!aIjgq*>`0BlStdg`im!EoTwH>wA1%`Y)Z$ zX=*XH<6K`oF<1qOzHj;AqhSy^9r@4ike{{&( zKu!w_by`JLYR`4dL7>!vf`g?j?sL|MCYP3e$C=B`4LfkJKu7oa_))#WVzPZqL(uX` zN=kBZDIn6I_MlmwzO}<0(3)m$e!s8Al98AN1@g}B2Yu(<^N{Ao6^lDt-#!I;b7^?K zwnh}IUvk?S6b>b&q{&KdZf*kMhtKZB_~AqT1XPlf3wz<&kKGQYB?FHDu{#x0B#M@6 zc*W;Z!0XaB0~4rRE-7aY63-l#)zys7iLvbtRS_LJ*}MKZ=G%xpuhZtj0^^pLmySN3DNOIVP;mx19|oS4`B3U^Bp4|E6R z<(<$vVs9u={;`RPdfAgdYmu}`7^dUM+A^+@?SI&H$uZdypHgp6&tjpU!3tLNLa{H~ z_W6`$LX$@9ASvmvxwi-b1|^z*duJCH`|~U)LRFp~q*smjah;Lp@;B__AV#o)3`Q6p zOP9TUz^V~PX{a^unOCn~34Z?NwN7@ZU#*4UKYG`5Q;y@gZK}zm^J;$E@>qNGa{nuf zM?s-Spn<5ZJp}I#8V69tRaK_wsK7G*XBx!$!gA+(QaZ-QJ^&Xm{_QUv^ApZwZvoB+ zC@kw4lT+IXrXO3+KEDoD>;*+9DoR}QjWV~tRrSHDM9WxP-s(Xv=NH`G7rD_I(d>(F zq@}a`ozR(B9%n;`4aR|h2{6lhnoOt&@iN-tlN+G98#j{?G12yCc1Eqqtun-tc4bP| zUDo$!ej2hr3I6X7+66vY)%PGk=y2e5_dP~y4oPdxlC3cbu^lP4b=b#@@e=U`t_)^w z#}+m;?bO6`77F4dbWOx1B-|h{2mZU6lb)HmlfYzl`uVN&5K0mSq&mENi1Gk-zW;QR zfyescOWliw5-zv1=qRZK`4O4|PYFj?77+XCo;*_Jl&dFWUcR#_`gTW(b79hQJ8ueI zj|907!rJk~PHRf?_f+`E(B6~bWc?ixtCF;Rd9Gb3hb!Z8bFj+!=cYdTpEb6y?yZgc z^jUHgSv*8A<>-bUA~Q){-3uK&c&T$d9q7SP*TC%|rzc5(=Q{6EIX!KpCD@@-sy`R_ z_2Fn9%$|;N!{DzNuti(T_gNtkyOy7-zKMv~=I4fs1`8K^nwuXb<>xa&wS?vf+JF!@&hM#d z2q6Fc+V$-7=aqu|$)YS!Ol>D?n1yRSdMd12P32r5sD9*A%YbMy)>UJ<32-jNr}X!4 zHtu{Z3)3cMDxu{^%}dhJFWOoRT*ybYzEco{MMQr1^FUmts;1Tv89`~3@4tNY31J~` z-r8bZf~?}k`8HXUsmNoO*No~m!eq#u-5papOlQpof$BH6w*FgQmMEDydBI5IoOVLm z!#&0(?OJca_aIinGwYcbg@C|P7|3!pltQabGksXSM{3B>#GG!Cn#be4 zi5}J2*_kG!gg)lY(Zd0?rV;73FH#isftLBNGTu`mC16EzYDsO&|DvA{-fx&`j~6+6 z&WuLL>9j>>;Ict>+6)IS0h;bCv9GS8rd}?lnU@l-TI@g&&K%bI79m6AxW+hse^!8S!*sYkHKFO1uB#&sLswV7fgme-Yk!JX&7vqf0)TcRB!h>Ma85_@oN7W^#<9}O znx>{>hyrbBxVEt}SIf2dEW5eN zF{`VoP5t_{TaS~Gy67e>D#tFP-NlX}2Fjg?dKq-32R;Pk6HN;cDEMWef+44Fa6lfD zon0sAq578YZ_g3gkq3~}d`O(~GGc;-d4&lA~ z+z)KV#@_`)c*WvO+Q8!dj+--?{lFtJ+r0ut*l?Ig$x!~s%&bU-M@9KT=#6*mRDGSI zk^PJy&`g8}_W1o2#o zS(;pNP5iZXDt@w9c^_CTqe(@y)4BkB@l}&>7?91muSjSmN?=gH1cxbCg)aV;xVW(H zv&o5?uEL_Sw|>xc5qOE_=ru;J{fv<4_-Ir!QjdfEE`_#trjZi30A74cgMv5+ObDJ7 z0m}jP3ClF>RG4}H4>Fvcy^fSmRUVB~5);)2d3d_k-ESZh9TU_0xHXeUQP*3ua- z8jsuqa#(d+?BvO$*RR2!Fz?xevi+dW9J@+jH6mttMnnilMjmhP+MjKVa-Y=sD``2) zEok)5A6W!hEH5_CM$9XZUK}I4fATMf(9K;s>2@Uk`dbS5w7bv!@+pkGbZh zrm+$}8gyTW-kSeEd_#X@<1l&fk6QB>KE{dINv-&oGH!zG<8QQi;T;9`f=LuVP<;rs zo3@S9L-nTx`N-@a`}5}mR_w_~%`PlFeDGi*;`3Om)`;(i@ZoXP@-zFV(?Z{d$7-Bk4`=mUh_9gBy}KOQkJ~qy;`8l?4X*XB@Y}m>|C^na z1cvBzz!morvd`OIiry>VwHuvu9az+X&QqD)Y&j=Y3f(TBR_ZKzBPPJyS0-40IkA(N z6HxcMoo@2w@{R6x#=Yg1=7m3$hmq|_PsT}N_EX_MvOJ!3KP}O~3-!=aA(RV%Kf=t+ z;zWSu`0iu}5gI}$@XqmG(+{I*! z3F?j~&SbAbN=jCQJry+KRsmpMKmD63e)x@%|%{ z4T-kj2M4p0Qzc2U5R2mu1ppir^fAhcnTd(GXc8Z5i=5A6b}hp< z*=W4pY4cXRCtI(59nJR)VvwfKyK}ko4?_Np@bkNwVoelFvXOZo^s|Nb1@a0C`Yugf zLS=Jvdj~{1FK-UxZ>2E-JkbAYbkPdqpKskKhwo<#u1Fk`sG;XRcC4ux(P2!N{$njL zijRR4!B1g(|6i6V#!hT5P*YczI`%8o5~~$)zFoZcX7heJrKyshQeKkQ+d9J(S4}j= z?u&JA`S0uAMMaR(`0yMBn|0Znh##Inr}i;>d+9mBX`)ssX?;wO>o?KQ9yKXU)T01#-fg}jUpGBf`qT+9#uj20sI`r@A5-CM?uig(sJBEsK} zqgO-?FE4|QWDD*aFIEGeE?X0j6*k!py{x_sF!w-eWX93mrO(=*K>ui^8cX zc)`R^oc)??#X#F#6Urmm= zeYuV`Hh8@`OsLQX$3#a@@bcw~{mANM^ZQ|J1byVTAQe>zjHVzbJC6UY9Vn#B-#8;i zeW{~g@tQDB7!Xflo*UziYgSh9X9C3DMM;VO)2h(ayL&cK(≤s>imJbS>y@sDVwpJ3H z--*m*A$z7yKe$r!(H~O9*tj@oA|+F?(NomH-hS%$@6{V6FD;nvOWI%1y`^JAOc+28 zHhw7zHNp)jssU>uDyOFhN%JZOH*P?=i9D3^c4$1#tM4Kl3`*%Xw2tkrAjUhEBWjX53HPAJe1p5 zf^z07w>Od7GlH5%LUw?ZT3&>Gb!FwYV;^xRv)TED|2%{i6rT!@o*o{pHTOEoIkP+D z%!5adTyET$TV56-hIGTNLzLe`S*R3lhn)$2c&d4M@rP?rdCT1A4ys!r!}YP++bf~E z%$7`d%gyhsz3=&meF4Qyt54B!1CqQVBg0A%I8de;$4CGY?DHLHBMLjL&Hp}xF0O>h zojrGh=77^_r6jl8Ccwr~t4d1Tb#(Up%D$JL8%7hZ-O>M_%mBU}2&At?ZGgr{Y>NdM znf0+;=WkbECl)Vgo^f|yNr=b-%ZGiYuPZ7BF`__OBGx~&au zZ%KR^$HBKxyNOh1MK1c+O~=@z%ho0ro(S+cO}&QVSWUr# zk9DY-FbD~U9@W4(!d+aBU3dKzK87mEIfh4elyQZtqN}e~u1lCy1rw1*c%vMgAre{l%1qef~cCe}OhUEUH#b0

~9ndxP{q$kOx%qkiS3nqv@B4HUdVQ}%btul zm%#Nth^*V%@-Y4HU+tRaUwdFWUxYJW5I&+fH04ADRC3 zi?~tZV+%FONCx2a9Xo1-?g0`lbKiR8B{96fZOa$B@h)W?_bwosRKhh$h zEHEI}-u=hR7cZ{k1wt(-GT|v74w-&oc;N5?lZz7sdw-GJK9=RNF_lR zW|fpyuuH!;lwqv^oht~t1__4hCj0y4&!ow#DstV%9)s9r-x1)-^AA#E{BAKAP$MEb z`uhbyHRt3E>DEF1h_6=&E44Uy_eXeZQ`0WOU$L)eFA~0}yshEdl?w$5X_a_{mR}T< zBU`JsYMcjd;zUbyQ3()C5dsQ1hfj3e<*uGWtND*1d(4dsr$2SftIzRaO)+}o;-aFz zewXlKliWLb!7@DV^rNT-1r`1=i*q0baKGKV;=41bo%TPze&Yc~Is_8qMyT{$N=HCU zup;!+kOkl+fJmIO-lnRqXI~YDaSiT<2dly9s`atCGJ~k-X^`rsU8&Sc>ZXaT?la`B*&&Jdgs(zW2 zk~iosJl%@4dDj(_2&O%vsQ?slpJ_%#N;}wpDdPxKj1w1EP0f7V+zRj}Ue4?Gj7hSl zd`W6ht7*czk1?PD5(PLD?^|uNA{8tI%n@-H;=0ND;<>vwMciqQWF`N*si@|BAq1~( zBGzBa27(JNF8-%b4%`fm7hUTu$!5!Ae3y!_fG;op-22S8MG1-Uh(C5Sm3X>E42Q0A zzwniLeQv+T0yY@_`2D-gtoAN+zld{Np1y(rHM}cp4P-9FH&LQ#Ztwbmo)fIK4ZDzQ zvRjEqbVp6wPMvY2{EqAkR^@@w{v;Py0*p2#B|Xrs;5hX4u@kCpTprrEZDcfDLXoQv zT6Zuwj-jU&-CN+DJNEYW_wU?M4%N&WaM=?=%nSnT=v}RS8!|ui(*wW1c;H8L$2@M)jv;ye?XDwxl7TP(IuIvE^ba`~oyR zR?$y{vwy3g(W~&i{4YJDdvE3N>phs80G@=7`5)O&D}fnYoPSBd-6dX7Ixz3I=FYfk zeH8YD3ClH4nr9PFd*$C?(QEkAr(VEC5LyR&xG1835mrkya6xAR(AvU-KcLza{mzt( zv<>N}tKHwp393taJA<4^{zII(ykD1N=Df_^gQjKdT?bzCUnp*KfE6A3fh&G zh68@G?(s{*K6*3R=r0EJMeLG zbgs7UzH!$JbGWC`VO*R- zc@8yW7UR_SsA@nBOULw_n*#|XW*0#Sg2teXAD*0q@%mZmm4wa!E1UQ!TLQ#JvVd2C zoL#$?f>{Sb7YX|wuH(mBY>)st-I<^fV>;} z;GwIpe_t`N{xOFgJO&T?wSjVC7?IZ3<+*06l1{%-M#46cJlk^ZmYL>EG}1E2cQs(n0SsnLP=?9Ums?? znFPkbY8FRUUn;z6=%OTZ!?2x~Iv93H1VoWGg?i&!hPnzJ{WpSCfZs`IS{BL^^YR!% z#;u`hQb_5O!R`1KFX+>LdB4D$@yh7pMKSG~sT`nN|C1T{ zDl~JWYmSxk`UkmDe8p1&p{k{&9G^P`A#`!a?G_!y@D?Fe9D035YGnXpFwkJ?OsK{I z=h>5%^>%#R-0sl(wwkO%YK_O^SLW%7ggZ1KpHw>UwU@N_qlu~tAid+hVPg?kHtPq|a#7ds0@(ESwaz1~y`FWGtmlY}9W(NC6M#^BtpoQ+VmTcan`(FEs z$;qxo-yBZ}6G0lB)@RvTV`psGl1MEkzb&IUca}iVJl>N_C00+$w!7%$@B-s2_a7xC zpp!uf&i0e8v$#@w-@g41cY_Ag_?CfX%T531ziW+dD&ciX*dtF$2vs3k=oS|>Uh?-y$2kLWQGIqfIr13c_a$)mh>9X=;g%{FLbd>g~$g4@P zp=*U6st`A^BN9udpf(;GA2%?WM-~LIh*9AeS-OP|4QY+^R945c&KKOu;Yq$wT80eg z8DLeU`R$7qT@GT972wRpIXH|QW7dpd=`71AF1JW8yJlr*h}8UjH^2Ka74yy#6yE9_ zkn{ZLEeQS=M+}DqeHP@X2V!4Z07xb$BN~2TL_riAypy|Ul$f9*RpGe5qm7%M%(p~g zFk?-4YA{1mA$~a>VrSw($<==$wO%PbTb+|wzMG(6cTm!JM|PV-jBneB)6xGVaaA|w zdYe;qhhcX{BTiN#tW0b7evM2%Oh$SH#vcUuvGKlLCI0jr(p}dmsz>J#a;9Hk zy?ddOcyuqkYZuN46Cer|4?lcn|1F~9c11`J4l^d6N-rvyXUjnxt#XOGwhQ|i`}*^)!>jUEg7*a0gw=TTF&mNGZ*)VY8|k5Aq}k7FZXnXcI%wZMt}3K zH4zr~o5RH*yM^SfYXCM9< z{+^`wu1YraDRgvnLkF0e*KL+yUInWv+4o4(h-mO??0<{%*Mm?U_Il*jP+TfIvd zY4PQV1bYA{`@>eFtguTEO`40dv*ecGKO>Dh>ABhxfvO@e7Muakff`xT?9A3yf&+ZP2{Qb)%@etuh1(_BFNkAoADKS=5?cB87$ zTjY$Q%`J6fW8xig?F`#zVI^+n$GX3U>RN*HF6yD<3V-op<3=DI@ zVxy$t@$qy7z@`FTDQaqjDge{y{=Y}FQC)Y@ycCJ;MxY^xQp;JD3I^Oy1THrhS~36r z-d<6f0M-nl)&}W^i97*xTXK6(voIe#bO=H;5J>=ZPJwI3+or3F%{SVQsf5vEGg7;r zI+T%?k@GUH)znB`jnp6C`IZJJuJa%v|Nr$uLm!WxcaKjISo^@KjeMh9Zk^=eit|1SIoC9U2e9Xx5xt`-~44UQ3a5vp58wtn@RK}b zKA!Gq7r(uz?9t@sN_~a**%ApiXh4V{DmVYJUkf^|prEiKa(|Z`@mWe>?~z|gS=l=e zk2|@P0!EARLOzbP{caQl{XFWDHGnDIrqQRBEYJM6==}u6_@?XXoOun_`d~*w@6wn& zcPAn2igApN@v(Emy9^!sEZPtMaQ+S32G5E5mBeLi<-wW-OjYYGjn>E|F;kdtVpScjsiCX8*!FJUn z^4^J6Sj}x2)<3B2KqZ)kJACXI7~sHfh|rLp9#x(_DH<=$t*!6Yj`WnNAt(oRZua`( zl2o#~*hQ~KPWh(KL-h1Fq0c0WlVij~%OB>Cp}$3n%ZaqKsdv+D&!WP)_pnRL4EYK; zz=pVQ9{{HV^|AYiy>fXq@{Ck@Cgmna804-vYwoDb9rq&(Zfg zk}iBJ_c-QUpvIzif%Y@(2vkoRVBW-0@!nVo$D%Q6YTD|74PK;@yzG6Ul5}EsdnFen z{76Bu`Sp77Nwho@N4(LcTb8AR0>Kq|<2pMl?%YlOnEwFXtFuB1R)TrJ(x>PfaMh9f z)vmx=Q`_&-AlAN91ik6*g5(3X(i=;sF07d#v>aCr1jZnSL1!T12lggt|FF~t(p9w6 zy+vQNfol+3?vi}9y~8*7}jiHt6HVG^~U;6*NZu~ zcV94*B6;oPcpA%Fu07xzo?AJr{#=;l(WB)V(F|KKzxh+2U1+S)SR)GV>TZAJM&Xpe zA&9gCOAK1rDFxNq49QSQ`^LdXsiDYradqCkqvk$Y8fhDMm^3;{omcPZlCoumC|E9f z48nX0y8=r0ze3e>=MuG3al5-u(+RlFH_(mz+YKZeKlv+hMrtcZM_;H}&z<`k@U^z? zK7X5xQQ^CH5Sm`Tc+oyiWUeI9_lb{8QnBMiQc4!1?PBBMr#$tGm9ywoGy|N7_$`(p z(|$6ze0g$s_{*t_JzZT`IgxQ{I^;_XlGHab0YN*`uzQI)m4DZQ^W}F%;45PvJ+OPL z-4-il;pEg-Ki<=dR}ReMr`ozAEw2wDuVd*Y+-}SDo?P-#w1QpWx~XXbf{@(Kw<7fh zEe_BW+;dYlfFop(FcKQtLf5+S7q>H5kX*zes%lw>l&1d|8Ij@S7~tp20Jak7VQ%iq zJPkgFH zA%KSBpKsE{=x85e;;~Vkdg+&uIqy`}6BCs#mB6lmvn;L~rPLp=#~L5I{%zw*-}2=P zcGs>=0`7qEB_<{Y#&)EZL*XK$f1dwLzyU#evI@HUJBH$C+;h@lQ-0w{sdz`*c(<8TVzP>nku2shU|*W=YRFZ(yt;)W zqN6dy_H}jfG`BeL9zE{gFd4!AgFpQz4LN})tB75Qzhj#pi`48)Dh4sH;4P6lV*9@k zH1tjvc%Kmg(D%1MLN*Q_#*~jEc8b|;nr52NATBFB z(*Wuh4-;4tN^WPy9lQPQ`}C&Hd|A!i zH-0hJt4+?$|NTO<)TC5+xHR>=t_)jDYC2<7lvlZWDI=$G<0$@zZV+qM9OCoONk1)j zo!9^9so3N)@`|Z%ecNZPt%+eYd-v=i@bdHD1#Wup9&#y&N%F3_AH(T8gwrxIF1I`u zcOF(ie`zH9M9^4y`Q^jMimJi{jhK*gy0dN9X{@&%$dz?+%IRZ|h;ye%M*=!RPj`20 zckiyN8`sl_1(c<$tFDeEFilWcP9JZ!^hX)cOY4ao6lTZmTbINOsx>B6^`#Yzrii&qa zLO!EMN0$f*Hbjq{%*?lePQi!r&E^Re_wH_3r-(ypYM7~g@BLtPZ7EkNVS=Ifjvb3z zR8LcUZ__>uX3AjT7>6;RDCvtkbIUH=6CTT3$bSbO4G&;|0yMb9;U91y{Q7^LXPZXI zJ)7G|EBke(dvjIE125#6oj!zpb7TqU)gAJR^p3l>et1C7se1@xIkxLEaBw)Jz81?- zPiZ|K9koE#Hk7i#u`v(y4xAora%)aaHPDL>nx?D~eF0U!xJlDhW(b*AY_?D4&b*PK z!yuqfLE^0djX?O_A1OSKM7k;&IsZ;hUOq(fAtEB;!2`9ihRi<^I*0ilt2&Up`*X?j z?wvb|51C)T=V8B(I{+hTW4`{iyELhf27{&3)28#MPcCeYqI6#%oTp4P&~#&$G9%<9 zB^|n?IUY3!{RZ4b&!0XuEW4fMd?yq&l9>CARAE_JG?P;cGrJU8bKK8*l&q+5!go>ecva z(;kE!v9ikf#fz+0Kix5%(yz`(Ms~=te^Xmq5**Wvf#!fPY7a@6)x+(MtIrQpC6Gsj z{2i)1B`xi#5Toa-Ti-U_Uu~46Om{q#X`qZ;i^C~1{m=9?L?GSKrFx{ma`)}o({bfO zb#l=QvXh5s`uNq=J-ocWRhOn90*7$ivaapMm$0W~+PZd&kn&=m<$Kw|`fC;4cJDmb znIa?mdV9M~1?!`i;1aE=QT(mBE<)vQE^9JDY6|O89CuJD9fa*d?x4+Z`d4ncUM;- zB%p*ZKX$#G5)&h=tga$yRT1DE#1$~M3=VGL|IYYXLIW)b^l^H+x@GjtX?|miA7$wF zm3Y!r-uU+Or_{`wXXx?&A}*$4U4J1cjoG5$X5H6{&#ffdy0vd)IcGqi+f#*D6mP!a zIi)U<#D8e>8^Ml1ykx`ePVUD++!3H~{@exotet_0eH!6S9W+$220w$^6DlJ2ATpB4e^jV;rMu$tk9qfNCd!1|;^ORZyK=-) zpf*jgO&$MOrz}@^@{-jX00r#5f-hB{NwW5e>Xgbc9 zRV^e#$$cg@pj9p1p=Naf$ENJY=v{DSkZi(V4{`!&s<@AtC&do|2yJk&hRY8`z_JW4J1y zbV_oS9HxPqXA`(2h-f9?d59WLn0MeaU0-j$$xZEj0FeL3gHtK2!GWY)JvUu4>2 zK8|i2g}0wH;eCnJX9A0V^dlE*Ydt`A7HipWpc6v_A=D}QF`N8+tI++=jbGc_(*g~P z3k&VL(<2RQ`YgC}uDMNX7ft<(DZxjCzl~k8C*A7=jZ29u7(nXa$iu&}z0NG`?6)NMx4E5LHOlrbI<-wP7h>g2^p94$NXNIi-MUqFYf{VJUf@ZCU1xF& zGP6;gzJ@*h$Dqh|=SXR2KOi;aWC(7&11kuPoipb%2+WAf3urm`GucF~HDC%wXQa$5 zhL{4!-hAb=XD4SWx3dRVU?7H7LYW#OSBppC(*IuzAd9tN(Unha`Ip~~&bhlfzrTwy zL-?hJeEg7@Uv~zUSl0;9EkUM&A~>$b*9wAh?PH}oWkn!`ZgYQQQ} zE{;oAmzsK}uKrroGt?Bk6?mXuKM|uPz)%T@8Op{K$lmJ*mSFuzkvp$1n0MZm}dkrggsApp@)_aD> z`GIM#*WrVAt1Tgy3gtXLaaOlGic<^%I5f&y=H{>Q(L!H`EK}dbAO@hEr+`ICS_JOB zy?Ho;M^B?s=;v>jO=FWS^h}tpwf~-e3TG>73QE`mvFq3|2BjVXT1!g<*V+>}#fC~n z#kiNU&poMX`urKuGhdpUHQ>WR<_vCSP+Ib?Q_o-<#5GI;3V0+w#NiUvWUi0RZ9C|k zm#SCsn5BIK6PAz85B; z+f~>}BPj`ScpbR6l&itJb{PnK#o^jo8GXXGpAWg`SuX2uU%WlXZ+YUJ;U?G za#u%~CFJsN{w~q^vT@2g4_ARY10m{Jnj+o(iBl}7E0lbBBZD0q*nqra34zr(g(L<| z49*bHp2Ay=xeyH<8JCbyJko*W3bKU{%tTq|k2&% zqW&Oi1oDXA3;aJKm6MYFE@4w^0jeRN+kpX2BuB34X7{ed_OE7h65#e^`~GBf-A}0t z&P}N1@cFOjly$Bwlw0#Da^4sF+ zF*$5-8>+9L0ArHH#ANL@{pDPihgAIbAD{WpQD(&U60|F3hQEX7v#`<@&`!Lt*&Rrh zGS{CRYeKd)O!AP+Us}C+$>$3(!3N*x`SGIZ$6GHK&aX(TzFYDa&$8}!D`Vs&F3v!b zO84qOSK8f$1RKd)9+azUiy>p8=FM90hQVu$Vq(^(wYcZqyIVbW0?yaJX1;Vj=X@B1DC)#x zmJ_d^uOYfU`08n9%t*}4Upp>MbwnIihdsC|JdCAVtFr9*-JO99i`<)PmM$dk>~Xyz zylC}Xo)M%n(6%-nF-|K!AO=nY`|)O%moGn!d^yw!6%b(&u+>K^*awSS%oK~ z3b@k6nKv~8FcS+qum)Kzwn#@jeTp9!|Lp{y&L)SQHeIw+T&S{Yx!vnSvsYhdXVYT* zQ0Jn@?k+Rm_NfEfL1k~mXZ!hs!pM~&pdLF``GALQZte%^3c-(ZuP1A9Etkx78*Yhx z74in0)5H*PWaxmupL#uXs)QILUE+LNN$cO>3$yxkrufJdHxYw@TKi8$eaJXPU`Ibf zRDZvFdS)l$^@MkE8x`uIu$7mIfP>K{GyDn^p~l*nlWgR9B5< z>#ww3q*XY3dP~9Y*t*?2pg#*r6tgtswco$59g{HUg}40ilzBd)RCOM+-EYAS*jq@mh+eo`fT~Vw>Ly1hJiM> z(7snkpUCWBi!G4+c;B@@qe}?GAwfA#ruEXKw6R}Xo~iU$u&$0XWI3@X|7Lu5tl|YTlFSu-A9oZb3>A_weThv2M)W8CnqKl4-_2}v#a6tRv>J05JkaBQxx_z&eZ&R zYG!QPcxk22HkaTHEP_YC3b@=gr{jASD|B^fhH-Gv(%~QXy;j~8{#OniFyZ2nZLs?# zxX!TW!p0`eS%k$O;ep?Bt`_wEk@s2ay3t0(9g28@>e-Q6AhRKnO}V$_3b%T_1?4zm zz9khhW?k`PF5l()`*FC%10fk~!?OZM?mG1M0A+j|Q6d`tob!u{M)Geh+ZN5}j39(s zy?slSay9&JGA>%Y=h-tRVKWC~3sbj(IsUw)xf`9?9FrM~XpkwXK=#AXQlKu6ZQQvj ze^tlmM{Q87EaCW-i|USRPEgjpvu+_$(T(GI_TpZ^!O;t^(V1i}1YHtwI|9;(F>HNrJ)}%&v$(afa2~qoGeH*fLN88H#C)@8 zm(^eiyzfl-X>pjXe3!v88DuHTIJFSfA)<2~Ff$x7MuP7g%Z9#S)hk*Qo2I4{m0?M# zZ|}(ZL*HBacON$nKOJl?02(2a!NJ+u3_1Wo;vWCP*9Y^8yaTwBa`mrVDGfO@gMA$; z09|jXzi&NpquY*Dx3fk>1a)XVi^Ll`O~k9Cy+1i_c`ds5YVUx(xz{09@>(?ZD2GF9 zf>H7Deh_^h=4+K&KYh@!f7z+?ro;J%V0qD}`RT?4g++HTo*gE6#3_fJpgoAoBT5tI z=K&PpfME_q@I4OrrOFf|Rf+it zrxw>qI5X!F0o=#pA|mtu{@tD5x5pthbq6IC)d^A2j8d@+N=g>^Erd_)E#Xc}&UUua_GZf8D+pdKlYm8x4| zI@$?*M~~{P^UV2Z*`&RFYgN9ORe_KJaK-N78P|@HdF*OAAFzg#_u1_Ll|LjKzPf?% z1z1l7Ff=1a*me?PTJZULMnxTXK9}d|aK-Gx>^&EBhH8furfA~Z z;&GQl-zy?2vR2$-aNT;%Zjx4ozY(#fK&hcDwe@k!oj`y!Vk1MEMl~xZ_4M`hium~| zO#?D0{NerkdVX>$1hLl;>F2AdWyWSd`S2^hL8#{hmu2I?S?d&``pGe@Qn3d|Jk&*u z9|J>Tf92gqLHyy2#t+ApY5Qza9b&8>;H3kQgiljhi_6?TvHO= zKR?ohpvl=G+*#8g zhSXh=(&hzV%^^_I!^5Vra=|vDr!scUDrtyWj_2NZczt@!ao~iPj1yNB3E`QUqsx5_g=yons?7Ww3Rp`)n8!z+Hz}b$AlsoUB@`$iL z%BZEd41|%T>dikDKC)o;_%%*xm!DYHn%SWBl^mTl;7CD({4rEGJK6GMO{k)BYf<}u99VBjm4m7MO z6OoU<^tUf){RR~Q!pLo<%T(M$Pj(Q0=llPr2ke}j;{B#+Oe*gT&|kMXbd;iEuw-Tj zVZcklr2l7we}JU#$G$#8jZ05IS{DHLk;KJ?eCjrjl{dDLpyAQ-U#Ac8PJz?Bs;UZD z9qqkNd_KrM72;Tht_;&E=M}c(sg{gyts)MUtG&BM93?}a;N*d3M~Lty?m~RfoJn2w#8zpwmfSF!h;C@VjolYZ&k zD%tN3-q?fWeI%&s1obc#_Zx1J6DQt#EF?qGZm%k=c;{G?)P60#C-;Q@-p%^HKO&;$ zZHSSegq&O)45dzk8-_Tx6(g0<;Y=ePV^IQfqsKSToWagRQKx=Vtz`QTPc6 z!l<}`wN)2I%Z~y z(peHG2KqFl7LY(`ZSUk%2_Ir|Y*yBPti{sW@+jr1H1Jcj-OvL+*#GbP)7xNRdJiAQ zjD@BK04RtTO|$3y=|M^fhsQq%6&V=m2_8P&t)H}fU;w={HUJ|Er2yNVY;1nPh4mOR z4~)*cMPp7`Omq8Yc+TD|#u8N=DB@tfHE3(1=MTK;41dcRDU>6E`AUEagSq~|mjPiTyYdau4 z@q{_qa@@3oWZcSN?gp3&;#ChZI?`>Y^0$M;^w63<2?=5SlH6^u9uR5h@b_24(BhxE zc;C~wlxCNf#+~K+?htWbh#Y13-o~~?Ws371SyA%jjItX{)8!Z-tTKb6gmucp#)gj| zC4Nj!V)=sY6dnLh0c`UIGctRB{r(LKW*rCm`Mzw2qCcwz0k0w$j}|4&dh3sA#S>mB z~d) ze;+=4**hy+MoD&*wCoWT*)uyzS|YMnL`oDgvm+ECk)6??G9ogf5E*4=#&ceMfA{ko z&v88eKG$*I_qT7{T-WFG{=CNdI?wZ^)08)oFf(gzb(V6|m0ZBKSo%5%WxuI9(CR~O zV(UN3z*ksXS%L04;z9z`d>L_l!PlAs$-URT5+2e|ebN*?F6?$XEw#h^%k`lqT*D}y zgN2jkIP0JzqjU;bqj>sj?u+mtN8e4csBr?OCi(}u}_a(Ie@n< zb?Cz$(dMS6QAF~KY%#*H7>eu}i6B?H2<6qg)%@ccMSG7}S@ z9pXf8?($ZD-V7B(%)9BKy1KHm15s~ATwmgCf*sBc-Xq{rDtb=6lN^8BxVvr~OCXJo z=BJLVXH|X6h+)nao={Nde5c0gl!jVdap?%@j~IBa^7jbqyn5J=wir^3I&5q6^_3^! zli>gW@XbjXyBmFlh~;e5Syjc(hFVpjQU)s!d$XolGwk-LHfHbM-MX^js>rX5Nk!Yk zqi@cZUML+IO%Ao7vFkIhU9U;@iF=LKLsk7D{fv+`@@PfF2OF==-*1>z{T;Rw z-((crXS?7JhE#yl2A4QQUSVNj@U+`|I0EiQh{eX@$YLsPVb0FU=?QESAP6Wp8)-8| z)Plw|pE}t5?MC&?HXqs5poc=6hZ1!jj4+(?GsR}8-GDc69(EPF-JD;e7_$ZS8h7BM zyDQ~y6pvWMy}EEdr`FY}&-Xcf6cP-PCEh{I>pd5a`7Zrr9iD5i*|T{hSn;z{;mse;6K5jp z?xy>+LDz{Mw(C*=&8oh+g++c|-k=}r?+3G|vM2qlH~IZlu+IxS{FXWo8kEAwgV5wK z7XCDHGl3yp^mqGl>O3FfRo2>-k(2Ye?1HJ#o^Q93ogOL&`bgPI7dyP1_~CZ{zF7$` z{SSf)$V*}Se?&`l6O!wc8K#6nN-ritn zQ0PE9Vkw;QKPFei1_w`;h25|rAx>@<{Fzmgq>@y1F<5#7paOBLd28hQcD7d@E*ej) z%>qX>^)1P*6dvc@I*d(cm->X>yaAI+!Ea3#i;p1DLy|}CM@e*<{BSW%-c=OHpyZfC z3?PSf&0qbE3t%Yw98K-oBxjyEZU)Zy23!ZjY_7lDH&9rX8T_m|rEO?wCqipB<~>A| zr91{zY^chJqvb{ zmL~62a_Mx(VyjLB#&antmKan07e;g*A+kfJP1I$hs09!QY0E>|^Y=qTHT;$fQV*0+ z=^$NJTpfWF{o{k^XXl@uNq-V+Xt5?V_jkS87hi)77@F`A8=mtHj7h&ydl9*qh>%2V z?#ek#-KWaQLVU$(ho`71W|u-qJ((k?-HaoheA5bm$fK^C8XMcaQFaA5-_xfvASJ?3 zy291)J&mv8#Y^>Ls%ort+Uk1@5i+H=^+=5@iG$ORP=S$JWo;zo4g9>tbY>MsZsy3} zDuyN|F9NnU5y#Osn6RVkhbP|$$PF)8IQin;sgTGE4&BIU$3Z;v38xTtSW@xph%R{C z;fM_NHfAB@l$1LUF#`+>3l{XmMCcPY5cKt{oG<>TjPRK}_!qJw(nwRbHaRn57~Bs$!F zRi#&ODtOxuzSd<{jJZ`Sr)S*!odSz@_4M^$Ui)bPk02G5pvp>bfLkcbAbK$^KdAZd z*R1^aYi=VM0>3GEusHqY6G%#Yzc{7!7AN*xCP;hbC(Nzh&6(&ryjvFHt~s{d7d^mW z!9F2C0572P!HmM60S5CYNoIV6la5mmC6~WGl{0!pk(oumU>&l_Xlq@Lw3n9J3Ak`w*5p4Td0^=f-J>MAB~A5u>k>ugc88& zD;;sUsMfAyILskT#66FvPx~PEnOpPC&NH!Ndh)}<0w82~q{`7V9d4=elz}ucN*cf8 z-dAP#M9JEuq3~^b{d#SE9nB^%ad&Lj{@!#8qlq%VqyOG*Bn!nX{y?yu?8-xXyfill z|1|NvdwV)N@v(X^Oajn&!|mKNzxauLE3-WW+dgw&w}(~rJ?DUr0NpQn-``I}ynxCi zg2Od4NW3w4<}`?JIhqzI67-y)f)XOYFQub{ronUc?Y;6^#ol5zw{cVY7YnPNlcBu_ z2?D!!*FJqFP4JcDsnb^F7Z(v7IU~$hb+yRaqQ<){Hnt0{QInG|{{47vtZSkX4Dr2( zzs6V{wcNw`UN%16b`$>2PS0g_b8|M$>Hx+A?j4^>N{0W^^KSdIL;Yx0gsPX<`xJ)# zZW1=tz=W|!77PW~mjl7Ur06%cg&AJIu0j0y^Jg+87V#-I`9ZG-O_qu(>_nV^Ai9F?3<6VW=>k@}=455v7;ke~WXh-f%CGdj z&ygTq&NP&g$Q20Ga(!*>!8>dkjxNgQW@mqav`^$mj*s`e+Cf|&9LT0lt-om@EjNs9uL1#67TC<#$eKwBL;9+8kw6MRz(to>`TSY&TfZg;WsQq z1F8sgK=yzs8Xz#5S=?8Sewl@>Asu-zm`Rup+yD<(boXuwqQ=&UGi=KS<0hB^c`r`r zk9~sP`8Cj3AtCY$Jy~1I9jvY;GR~a67gOZv7(TS+N6^Y#IAU-g6(elKFsA<0*@?*m z050^9d$pgM?lqHO4{OA}hQ~$QWM%(Ps#r|VyUPdiprTWSrBrSy!ZS6B9K9N~B$F#j|*{@V|Cg8lt|8qorx zQ)XB#nlrVNJQ|^%-;#g-aY9bQxDM3Cg0a)YdrE!20^CJ}y+$-1#IX&oU_iClO-8S@DBE2^~JCr=S(7hEJP=S8Q>pEn2`ePZzf(kE2!FpXMSUq_NHHIrav zWd-5krVE?Gf1yD0UnodA2NLzVQ)-D?p=%xrFZVkbk14BtP}lPcS}pY$(1l-q|FxmS4C?*ZHE^Jx>!iD zfz_hm!Dp*-pL%_^vH`xTpd#9W@uUIy3&|1mz9qn+qFKYCo^bV%7$#uYvN zq(3C_QUs>RNpY`AGUX|`-eMkZ?zEOP`5#-gerU`v^F6mO5tsFZwsvfEH1^ZN;Uyi} zOmcwr-{txH--meESK@dQ0idetsQ<$txk^1#O^uo9}%vesUAT(Q@Sj{0bop0_)vIp_redwf=CBNC*! zPQN(Qc=itGhq%}Qv}{0T#g`xBupoS8bWjsReC2D0eW=dS2Q9)*0@HLNJh)>A0oV=x zwmG9t`pTOtlq6Vg`JAhn|&!PX{EzTgGBER() zKcwW~0y!Vm#=qM?&0~y&X+w@RM zAt5}GpxhA&1xTZC_6-ROLtl%-jY9jvg$wX;g~^DC>ZsViJHtTumv*_Mai}rIY(jQ` zUE_fRVA2s`7(ic`oc)=fZ)m1VkR2;!?fgWdtYPfyW^OJbMj0rmCgbkiZ0Gf|;+o7> z%Oj*R{KA2%>qV10@xj`NZNQoCnO&8$eNibvG6O>3ta{ooBY3C3dtl1Ncel=fprxn( zDq(J70PgYMN3JedG)b~TU1%2qeNZA9ZkmB2^FC1 z$sZ!`svbL%RB$WxPZI+ROM;@I_U*nSzy~79!W1tl4wbM;tlR$P|GE6@GKd|gw+ z`+{bw^bR!ou2GHt$aI7ZGUEKlB|*Y_t63Zp`+dc>%&AK5SFds@ct4MiHytDQfq)-~ zr37|i$UPt)b*-9zkLX)QHGTc7*gs3hF7em`!f)s;xtX_zCmPDQ%F_1-wKW!)R`|08 zI(1f^=;(-!4beC#W6Uc$@j{Z026$(Pv)i?w(AWvpv~Al4&2TPui~-v3z37282G|CS z%u!O}3UlA?uKdU?)CWk~BLB|*Mz4dTBkXjER8g=%4*hy6aZZ|3LM}P^esCv?OSZbM zxJcl0`leI2_KK;ia}u;uBaRy??TASUiJZcErb<)@AtB^2ylvT!K^53>FObmL*y56E zjuL-M^ojqflPutUHx{!EJtJ~1 zu8w@4>-rWEBsZjI7Md3<`d0ojYV(edj*=7l`rs*l;)IsYHw7Q>k=G0$*nrOhYE2c` z75{cIV3ABhaxx>=K6T7EKmWq%MCDBJ-$OwIQ6_qwe8^=aUivIcEjP)yWJ_N&Yrf{K z$p0%!KbnyQ|B!Kb?4gZP=FYQ$M!|;Fr z?mtglX>tXd>M=*WLCHXh>kVCObYdcgh5)I@k-oa#e;{7;Y?}Y#ah9W#6Ltr`!UGk* zqc@E1oZUrIN>-T;t@V0`eH8RJhu;c5?+87mlE9|S0&pGvwxH1dU1U#%*6i2HIL%hDi+LEFFV-I+in3hU}# z0hbfI%B#oQ2+$f`9;`yw1poi_D5cH%Os{9pRpSeXU6}a8!Wi`oNC}ow4h|a0blXS? zRf6_~O(R*^5Qv!+z7nX?9rJtgZ8M?#QY!=kD0&1XBhi_eYT9KChqgiGhGdIfKO>`~ z*nKDo5Y#Ibyl_kn!szoVOfWZvAsfuOP}>|bj+r4|*e&6Iq#Seb_`nFm8=trO?~T8O z#WQiPj5!8`CGvc_;IWvSb1CDEgJE@jy7%7L966tE2-Qg~t%>KCA0{M($7f8Ok?i~Y zCJO!zqCfWe*NR9q={`u)_^s-9$z?-pHH>khlWpo?mU(FeL{KS5QUl(d2hN6hub#fj zmP-5QW5;PoHw-syw87xVmBDu5?zDhScp!?<|31un7w80rrXeGL@btf}JA&#I$shOz zQD9cJceLuijBy;NXab8SwZ6rQ8Xk+}2AdoHkoqPjDBsUqx&)Jl!`Q2lqDbRL*P*h~ zKuQpiVs{GyZ(t=aDj|PSHv5r^2F2;?{tx!+8nBe^7~ag5m?4v>v>W-XTZXx24^{H# zr{Ht6jnk2nKF(9qWps3WbnhPUC|5VPqi#R$U@8l=3M#j>=xBm6Zat1G86~~#X6d0i zOpHZEZ@n48)|wV0-5z4Q9L-Pr@A*oS1>BF`$LOOGReDy|CnzTnRo-8(FLLYXroVsx z?Z<|}ezw*TGBR5Si9S7l#X#MxDPsx>Re`_{1R1X1bhOclBwiFxh9oTdv-bE;Hu`+0 zG(Rk16RK!pD@{m$zMZ3uBUS0B`=k-JCgJQFr(%n%3n&QK&kGP0Zn>LKb0W}*OGuE{ z+ZRJ_S7YByYnQ5|n8v?-`*uD)BGi(X_ocve@3$x9I65J+|8QGs!A!6UWc=2yfnW8+ zdfRdK>_Z|Vg!;3ktrVo2-6x-w-26Ch{)?85L_qwcwp+t$MBfqn{05sFZ`9hXqhnYN z*M%twpRcHw^QF-q=7q|py1L=?L7N)yP>*ArN+dIPNCbA$bL>Tcg2dtw0J>+-m^wQr zoM?~VDp@&%J?%sg5^N-J@sQ@xxWwt81BaIx7LHq248ld<2ps_2F3|y?;B#{GA;y!S zfANHx8PfCs7$YM?0Ww;}gg`?>LvnFxeevghczvUC*3{H|pL`?lQ+q$lhuB#`wkW>g zRYSt8%Nym>9Y3MLK}cJ$d+`H(-E026h4F>B?u(C74DghdQpgL7EKiON54!-2`c?-v4HrFSAWH4M7SAV= z`ve)t|DDWmPHo|ZaXg5S5IxGyM)82fcA;Yk7n0)QN1^^1PT#fdhzZ|bl$iZbzGy1h z3{>A_&QiJ{uE0Q04VH1fGvVmqNJ1U9tl_6c5h(Y4UpEc8=Ol$N>q%2z^b|toP93Cs zgF4R0xOsH12MuDnL$~P~B1AVO=ldY@%8!drPP1}yuAmHE@RWbvp|Q*du+`Pzi=i$s zS@h3{x!7U1g3%&0&+>dxAI?JL2U1(9>ynC=)?4Um@P5JVLfg@orOCL~i^t9;ue#b7 z5)@QHc@kJCCyyQjIptwWNFb^xWxV=oG_1{D?6xpU{rojNjseHi9w;|KFXth$`O%h#{HhthfStyWm>(G6=t zV9~*izqTDSr9X_4$pY*6J>@&}Qbb7zq(KR%MjdwTCtIV`(`Pf};U*Pk#j?%36DP?6 zj~lA9PK%44SvsP<8v17Z%rD60>J^hou?Ef%C%sQKD?;wcgyTUx+>UVNf>5nicxsIH1`0y$iUKlvC3TtCG z%FZ`f(QL};&cn%x4RZzPwMc^mz@G(nh)#m|K!SY?@+W~8&QBO;!q}T5IdpTX|9^+) z?ZN#rGOJLpY0!ex3;P0@fX$8aAQEc&#}Yjq9XQhO@%t#IOwN4Y-rJY)k~@NSZsYJN zUW&j`p_Id+T&0Q^?tY*Qe5^%Hp4M+M*(jiC*_56_ayXl{|#qvU=EIP(`$5|w)F%VKLJb)MCIi-@Zkw)?_nt4 z_u`VcgbmDciQH5$68^^GAtZC^DTHb?p!@Gw&|?CK-AJmh%IOnyze&163uOqN zcC{jmIDkOK%WJD7cQuB za&i*vFlX>X!!Yn^kINdlkkD+KOG~=S4b1r$_NHhI@NCGFN}kTF;N0}Dm?kHPzVr0t ze{n;p>dt)@Lbe~%Co)PhdcybDGb9dvA=wVi&+5X51qjc0cIJrf=6dwVsgUwbU3$8i z{#%F4)7Ph!!fDQ6b}Mt1*Z<+|mct8KZvbcfvbl@#N9L4;F`b)^&omFjf-H;o~4H8%OPxlpI@F1@wtAJli|1OsoQi zEf{#-`H3g$vWeI07O5uz?I)g8RH_Bmafyj9!JGWHS&0h)r)<0+c(8;c%h+#p?gkGL zp@2HWpz=VqzTzTJxIar69;@=IANU%tj011`f~s zWqtXQQ!aBSm-Si7yZj`9T}IXP8hbvqNi{mC9;e|-Tv=i6E`4*dRY06>zyL*lK?zJ6 zZssJM_MW65WW7c_6L)k7Y|}it>wV&$BRaNCBA)W^7%iwglU05Z-IiTVQuF=)mY7aPAN6TtG6#s8?D zCAksZddv`+^3h!~<%?q532RFnkzYw>SG7)&2I`z{pGs@Jnr(ZY;?u{3{X_+8Zkj)C zZoYTt1Ig3y+IxmNgn;W60H;i-TCn*TaSU7;I2koB;yU>|067EapU__8bG?{qW9oF8 zvH-;PAke{w%ooEkqN4eHfVMsYT>hdF0>Noz0%Xw zH2}p7s3jok3Tr2{SHG+<4<}Zf2klG0K#B|SK1FL41356HfT&<-m%_?VrL3uWt|^=u z4?MnJ?K(z#R_BHzKWDNsA*AN@!a0$>BQA#?UB9BHO8V)n5q+}T%L+f%tHlJD#VgmQ zMUJF=lq3s8h*|GmSi#y$sYkx2wh_YpxNn9D9_6vB^v9_Z6|wJf;=Oh!A|lxOH2sd7 za+M-d$6N%KKRyL>2BwjxR;UZ*8+&?|aR@*aCFW%!Z54oLMa7{Y<@OvM9y?SD8{zh6 z&V+8Yce9nxP`3lzDn@<$4G*gqyiv8awc%G4@Jm^zOl|U*A>%OkalAWrBpEG|p$a_ghT>Sjx3uLt)-oGcpt?pC8zrG%ulCv$o#~F2>MJ{>pW78-m&@1l3 z!n&b}hqJSz6R5{|!l}su7tXbpa&&bmO{tuxAvC>d%)a3??l49WP_ogk8xkf}&i(lP zx0ozB4%eH>A%WGlT;|)CIWIR3GXs1Uiq z2u%&o-)jq(qLx5(`1Pf9PaswYfb6J|8e%HV0i zZIA4Es;5k4Y?(G!)6G>5NsTV>s@;t6D9Sx4b=S*A* zd*c-oHMtF~6!<^tnP`iajSEajoUo=B#9mlFwdKwKfPifyF1|3&L(9s!lVk%EXyj8n zME|pAg?TS>z!8yD+1U76yp`Q?T&o?|?&BJ-DxsK#F>66QYBqaJ@G#z7S()1v^~MnI zZex#;;??g>@N~lgVn+*UWQ6T9gP7N&R&qO(g7*NH-Zr1D$9=JQ%68_$@n=rfd>WpcxZ|#0q^AV~!;`x5+$hNfFoei2apJ;7m+Cj06>h=R7uAZGO z?JXg9WmtyOteXChk|}Tf_((BN!0%tMxrPT{;I7cLu*T_hXuYgbl2 z|1nVR5vcH#LUe}3&y~2^xQ;Cr!ZC*dl;COXusE!$bqVREv%MXsb#7sS)koRM>9N*3 zux$xiCr^SjXol%c_6sWg=kw=vaQIHX!IMt95@5t z9<+kE8(*xARyxZ>6w#q~Ya3u(NuP@AOpx+-D|X; zX+e&;7Q<}6$-1xDOu}0yBxL>OW<}A>UIjyFK(K&rYjTV@HH2OcryWL4^9u`D;sM?q z1Vry_rE#koRfA_+IXPdbxgE&P8UDaSy#i(=WXRP!P6PDw|C^C_>%F%8LWATv!t8wg zoiCZmo#SI9TNiKc*?E91lmZZ`eYyACJb9qa`1kQ|X|##j^qk@4G{Lty(x2R^p;B~Q zn(AQP^~SdGihj^0`XwZ6(NDG=3gM>>;4KI#qYFP?ITcPw|`UaS`j;b}D< zdSp_u`OMz5-x*Q^T)w>qj~_kiMlbt&aq@x zo}2X_o5Sdsy~f+y%j;XYac}j^rymRQ*3Fl_%+2kkY|N+$uHQ4S2NC>hqaVC(x3hPS z`ASw_9g2NYR05I=>lxIGnsf}jM`(ACMa+A=b~<_GyG~-yHYQQgF@;*pG@3Yal*A2^;&Q82(emnh(&V z3BP?~!>qJc$*dv2jIrWZg7lfsxf3<_pU4vy%U)H`q{X>q&-QzV)D)W8o$Dm|?3a-l z@kG0#3g#tnFm-izpXthr!3$hVe0;!ZFfkL>O?{GOc<*%`yuBf~IDFU~Q(V{>tPwb% zg$y;z*kcnQF^X6X$2`1 zLChk3|*>nRGeD_C`wDOUN8!c+kZCO6qe0r+rT^&kO3EBI%C<&6K_4OQwIV+pX@B zxS->ppE{K5`Fr~Pms_rf_b;cek3{Wm9Tzb-Gm9O}`DKMQcP!*8Qt{{Vp3+#Z!e13i zXWU59al>e=Yn+eM^6-%3&l5AwGtkJ;t9H`YcHG5&J8pM7Vp8j-NI{Aa%Pq``S2ySb z_I81^bGZHSKnxg|5budGNJ&T#=W=L_LCcJ0b;+negqK7myFM;?{DA307{Lj?BD6oPT5}U?{T;=9TV;1` z&-mZj^}>Or3rPH@Q7qiQZ!q8nh!L9FtDq2}ZHM<`8SJXA770)zAVK|f&BH@V(ON9d zKYH)X;r7_(pq%XQgR)``%2@`yMmI2|jERnRLHCY`kDSIMQp_>zsb8A#DKTR>{ylY8G@h<XcOc5R_w>J&IXj?uVL{{293MK-VyrCgeH(SP_k{0B%#D5Ri2&vPBM*jo5`3 z5${doVyB)s?Blu;ZZIc14AH=6VSNLU?XE9V0@Pw7mY;o75Y0(hYN^uyJT<_Ikpj9p zorg#G3XT~5j|)IF2Eng=YVha;-V$;NDEPn&*;rjjc7=T_wSH{Q>a|+I7k)-F!}5T_ z7L|mRiL+YO${}h}J_S{i2T5%6l$38js@i2ZoBI%2=9UBcf5&C=et%k!_p|a3V@XMS zreof;)%jst%R})l>vH5I50zBn3!jx~cps6nie1~ne5e{+VSG}xlXQiWc2}aNFE&RG zBc`F|JCB+Xp#r)kA_Zq{F;7bf!D9`(3$WR5sHnLf)=qeaQ1z`jUY4LrDmm01+Z1$6 z601Ex0a?ZRywz2F%cI;}oCP@X`FMGC7uf{heu%xf()Tlr>x$NRd5C0 zCehpMnp~i$P0aMJ+^aAPJ%5$*0a4w6P`R>!B#_?~+68`RxvT3T+ad9=7u^7Lh zD9;0RZWkXz#(BWvKz1- zLUavR#~{flBG!R$z`3gzXB=Y&GqYX{>tGZlKpl*K#Z3mXc*4^BL3A`kYv@~B?Lxf( z=bSy8laXQfj++Vf&D_#bjq6y1E1fl9O_dvz7&gI950wQG*2cy4f!7OqEu}HL6rZBUfdaeB6@nypa%gy zKEC6yk#BWk;un|>gRGcb`+M(pcKcpa=^|Rf&!4?DrH{i0iwg8|FXt`)3GK{m@C@L` zXgXg-q^E$&$}pxr7n{WUY<^uCRCg9jk~949mE_PNLf~B~_AN^>dPzy&!@T#(OZVCY zKVYxrzNP4Q`vk3(4`T9 zi@xXQTf73W_7)WZ107w=iASKj?Lc+kUtTnC%8_TpjX5I=%l!10{c!NW6p3ATgpr;e z+fRHjJpxz+sUrAKpo?cL6XP+{#8Us_|ZO zy?%YKKi5fh^`fkOwLzvQ6YDYDt#BY^w8g$cJiuU@BV=@UcDA&&ZDCT=*w6sh@FE_C z>dn<;MG;72Xs|?={!&=HHuH-EpDEc7S}5=W!p~HHJ^QnKzF9 z*nIp$Jd!)e=eo=XItA|s&ZZ*L<5ijVJ`YSL7AR7z(xZ3D)30x;Iuq9o!(<(BeMZ&| zi!wp>T}QYNe*rkC^mh?#;hv4Ze~qOaAx%PvX=-W$cnpSCwnIE927Bu^=BJA4K<$0> z__&Zkp=En#CyF^*Mn)Ln3J?;K8>_`SKgGFIUFgnd+p&Xz)lfU}=8Kv10eXo)Bqma7 zZy5;fpI^`3oA|=hGDf(*tN0_;!C=5UY_u9iMzJTNaoPGSu4di4*Fntj{EED6Y>)Ir zh)ONoOXSG9vq6NhBa;!O_su1@$>I(K-MljVv!;=}_U&yJND(~u?k$2-3>JlA6oXvA zv3HjG!ZF64vt@8J#*&5wqf&7)WxAoa`;eG_d4azfb<27nR4S3}P-4&4} zRWkw~?WPR4b(UX6mmg%s^*x1_uoP}XH$-siV3D+F=p~Qh|F$C2^agWX*pW0gZO?jL zKXZnYQ$Uh!kFM?1kIefd-52McawPd1ZGZicdZT2!{<0wZnVDmUK8vzd8%4BL?o{G; z3s9$8Hn0i}T{$a%IgCtsft^#D>8`tAjNl@r$7N0JYrw#W1Bb8Gd9F&hA!!gIaACHw1T+Lpe^&7#bctqn?<16A(#Ye+z$f7x+9P zHei4b@aF8fbEdwV=v9!OkbaE{atcWs0wT$DjM)b~-YtsS1b3P^JmGLX9UUsbq67vcD`VhHvO?j#5r zr%N1;qr%1O@5Tm94`ORDu^_60FwUTQxr@N;=b2E?aURcF$Hn4(Kmq{->wTPq>2ybH z-qY_U2$RQSgySwj+%SV1g*^rc;&CxCXJGpYpw)4VxP9Oc5Nl+}SFc{7fu!4?H|L-{uZ0DtypfKn!A`=R9b57~dvgY+|J?R}RB|2s9v9b!xRVq8TV8)Q zH$sjR+Oy7~G!?~cCdH~NBtt^EJv}3-aKks`rg9zaYaJs$SH#(*16PmoKJB*;y514u z3zvt?m*PL#{9HE&P*T~Ri^^#1$fkK-#_Z1Tvl+f^1Ka7v#7din<3Yl){_0atIp>@h15=PJ!iFS|Z z0T9G5!qNOcmqiS#;j+eaDz?ViYP&5Ktqhe=4zppOe^y2?8Vq zh8cR`iAau2*zqb@4nT%v@94GRHckyF10DFVY?Fhjii@WjK9kZtibJ-#I& z&PveM?!I>Xk=Ci;GKM={Pv>d_7=sBt51cD650VC+vg*2~;7v`?Ny_^f;X|sdb6knA z?=T^}ONT$2oixyFkY{>N)V=sHc1bLP%~|$lB4-xX@k^}{zoqWfPCed+^7D!1f-hbD z56Rhk;yF6^2dR(63w?MVi{*vz+*Ql$+7MF0&>>xpWDpTsHkLTTEHIxxw_T%lh zrwi65|J^Wu*JviO27TEx6fYnh#l3T2B1fnMfAcU@BlmWJ(upo75?dfpPT>%HT1uHpScLbQ0pN0Td+R{GtzoT<;Bt`VU~tFtYx5T!$O(C( zS9pg?&qYrt&kX!tlqEP@XO9Hz-#9r9yj85z6QgucZ$g|81a#-f;nYRnmH3^{_9pz?~)m@)L zCbz9P9;W;Q3aH#-LA~4dBLU!dj*gE^IzCzY0#%H*FzRCBqWE_UAE2nk6VESWPfi?r zVCVoe1OPBxRfe!Cx}wtG*?AHH1gHgt3jQC!k8504ms{VB&BK@m#U<1jU;6vYoxAfL zem+9y1OX%3P}Id$m6f%dl^;Pg5#F;WIWaM=t!0bj-ib%3>%o>ma2G2V6!13G9=?U+ zY7H3038SDE`KJyeP#*z_;bmeX)af+OpoFnTvxHI=t36Z82O0bS>NOKqS4Cn;U0hwU z;;<=d=*(x!=PN@O7F5r7O#Mx8YEsl2d}4i?=e4kww({AVN%6BeIpKbRfVB)K3XDhs zpJ>NQwiOQe|DM~`~!K){{q9%=t@LkXR34x*@3p14xYVuN8WlxF;Bb#wJ(Eco-qk%AOfo#LL6}$wO?pWP zrlweJ?`+sBK5S6*SYMNh|GM&xW3#*_&s$1PS>aeuPtQ$rZN9wkxA7ZQnaK5}g}FKX z&9M8fr@wEl*KHx*zC#;+@h|t#W0CDGPuEc~Vy|oe^YcKMNYk-5 z2ZbbZw83XTa$ER3W3~^p@VL-@z%MxdhhfYsBh!Hu)Cl5`n$GmGb^D<;!c6a&uP+hE zN0T)jcXV%Utfdr6fEDy|aW)yCkHEc%Xa=jkQ~%<{X$W}ngY6t=jue{>poaik2TXv; zxZK@4cbjDF-Sb4XNEH7KHlAp;zdC$h%;@ScE&j#WC0&%rBEV3Hi;D|bZzMkXr<_PF zAyOtD9%6|A$Od(-x~^`Ke%3Ki$0i{8LCo~S?5#e7l6OjAFXJ~mEnU{z%_w3XPlcq# z$DimjG??vXa&XKTBHT%S?7b?=%Ir#(^~Nqe%(*8b?dmMy^(p18eLjN}mh}VXP6OoP z{>FY^HfIHIdet|P`yVytryU*7*y4EmKCq18tY6g4$cR2e!(vx1(vM^)=ze_E{7b1} zWgzNtvH9*LCiULYFo0GM^~9R@?67mD-6G4k6WKnoD5L#hxX$K-4!=C%s;4cYi(PvAxwK(>xLGJ?8J%`5r30dh)y5^70B(lVbh38fhA6(PA6;B;THc+NPElaUdyXK7 zNAY%exZcZHSP>(-!JQA~xn7;1;u2nQe+1Ex8d+D<%4{r#=2#IYR{c~?GSV0P}vR|Sz#%zl6Mm;VOWm?)i6W}2<5 z9=^S|)(p%stbD=zI2nkbZv)U`n4vwx;UZ)koX1if%*2}Qi(vrb`c=i(cLOmT40Yq$ zq=JGeL~RgHlQ7RyQ0TfEY(^&|qji$)mOAh@o}(TTvO&fZ|GJ12>7btN!n0xt=y&OgiWldmMb_fMj+-}?rx{SV@q;NlXuUx zxlAM^<`LdCr{*_nq$lqzE=`RbMdpb2%*2C8dXHI`Br}5tt@JUv8 z$`xwbt9EQ%DCR7b9M&Uy`4Sl8n<#V|I3Ae6!}wKyo+=Bjpyp46mwSnB) zFrmJ{mehuwb>;rQ6Jkw4#m#o*U^~p=+3G4~rKJhc4<9OEBZkuEikehB2fr};Z5*4g zk4|#v>bAkhQ%mUa40SQ5zP{%JY(g6Z$ul4zfSOd%Rm076VsfW2cdqWvqy1Rkv|J0(fojQvD`>dcPE#fftLSPOa`0}tt+0A>*2KTT2WN|II(Dm5v(j`AA^{Gjn zKL>|WbauLI@r}-dl?&(+YKVrOp7NEMVyFo)y8vAmTsxx68P?Neo)P2hI_j!L0aZjX4*-|k>%fZ4VaareF2r!tL z?+=+dJ$IGry`kjs@Js%^m0YP=(kmys3w3y&T{wF7BT(9Wt8=<%HW^o{`Gp^R=~I@{ zOlymdiInWW?ZGdSr^}d~dnPL_$!fWpV|er`sALqkjS7|{d2ddL_$pBHskQh{MfSdA z7*q`H(e8$mEd3-MVMI9d%GUVIJ}!32rk^p=2g8O=#hPg_625$y_$!Ly&%$EzCu3mj zcHhI)ZK)Y&C9yQcx197P@A_tNTamq_J|=Mpzi|BfNx!g#qvy znVR-|{c5eN%UBO$Jsy>BQ&Y))YeJ_ucAp1~jc`rFz%cmCj_h?LQiO)v?_E1xmiCRh zsxWP@t!wyPX!N13N!^l`04~4Ti|tu&Ue%{Rp7Lhe#!m_s$+vQ@##--GzH{ex9ik>| z&cDe`%VXa#Eo|HxTAMZ4x1Xf`y|#|&we9=%w@>dn@Wsahz>$9GgQV#l^$+`*$h8x) z(X&Rg6O@LDzz-lk z$cV_KWyp}Pw+cMR;q~_OXCl5oHa7c(6KoprTD@JXinOd}pC4}~K?LXFIElgbb{?Jr zFyTi>;YfG#mU<_qVK^)Bs}lOz;%NEO(sv$$S~+p?R%~+pB%Gx|Cwrdu7Wwk8Zv$MT zV=pT!kHPnWS1~+hk6sa9z7a6DmYE5t2R+a{p6mbXvLukCGSLD>8=llfNXTM?U&h)0dNr=w5Typ%xzQFPRj=0zdl<8gXSM5VX zJvXRLK0BNIB-7HlQCpjIKZFn)6L_(^JDTn#3E^S)C9Z_o?SamYD!a!DNp!krggNi<m-dGHaGIN;5Pl>mkPU6E$iI8v));q^fVB4dj-eG@8CquP!&W6d4Y0kTLSD(}km|GxKAt$)H)16(I59F8@PDmjqcuexKHz}ytlcrb= zk3Q*Jn&z6V`(4O=lb}MduD)z4MI|71bazmAi!G@x)?2A33Ek5I%@#Dp({X=5DZ-3% z0YMGIK~c0`_)PeUjwh50^E^H>FFgFi#;4RcQD4Z0)R#g4Fnw_59f?Q)~T|om7#E-?|qq}u7gy}H7TDlaFN}i_DCS^L~HK%a?sBNh)Yz#!Zq~` z7MS??6ILj1V|Up+!m_?Z0yAYMzMHM_F4H0(Q&dNH_*o62_e7(1yy)+Zl|#L~^-bGXS1UZ&ls;$nr{p~b^TVi{cAD$W2Yno35V@PdREqO@`1>v zBQ_~V1lezoB_>sGQ{oJUcOWd1DAkKcs#;o+jNhWMTja%YwO{_i9 zO_oU;)&Dk*_42M<(Sc*9D9Z1N-^RZ;T^eoqF6SzS%Q>u^_5B8~({|csq9!&?iJzap z2;dcxSexQHfYxbXfi?ku-62$KnZrS~!tQIh9TWSd%7MW_rL~DH$TLX;Pb$b>B$t#_ z|N1J@R%E-{O?r@hMXEBAaD(7^h!Pw8G|Mib@Ywa1!xY8;;{wdWcnvCtnWDPQ;Je$_ zwkcOd**S%b3h{37Xhu2R3IVB^EffI|NTsftedfNmz{_T5T_z?o-A|jD3+|yZEfSDX zfzC-Rj=g0J8GOx`B-uIo#8m|*o*wao!u9E|I1juq3J(pPO`>%?PI~Fm^9xblOaBy1 zAJxQWP(uND!qn~gbF>6#OkWEeG(CNKJ>JHDZgzGhekv6M3x!8o~drWo8M3EOIg?n};C4x_I#k3_a0OaM<{Q^L+iD zR<@qVDS)uBOGYwMVFG6dhP@eR1<*-KdeuvsZ)urXNb7XMsfnN_7!KHc9+S9QeKQS1IWU<3 z_NAqyfO_460UeCT2-4Eh6TQVFP-}qSy;Zn%utrNm%Ut~9iUIR4${Lpc(>Sg8#6`6H z@#&sI`-HzMK_i7T8cY^RyG_{QvmJuXj{4dQljlCfX`s#iDL)rVup8wY-W;n=BWO5Z0oId$3(!T7C*L5}*xL z^*T*m%`-Wh`dIHP!`GPmo(E5SS2ko1xD(q{rO*ER7H;>}Qy9=86F>-8Twxh=^ra<4 zO)lUcqWX#(cNae#wB?EqgqQ42=%i*86A}_CaNZy`LEA!#^M(ms+CIk#LZzim`~T`3^=z&y@%2!U5k*y?>q)R2 zPEK~dt$y9`lUugIE%g%6FHi>8H6I?*AWch6#lT~p{Z(unC{rNpQL7&I&EGk~?TIoU zz8uTP0CnS3!M9!^S=LkfvUQx3l0c>N%yguS%##w%- zg^7go!(dAwG5@TfA{0fB&i+wlT)HR?#&jg$p`0Fvu|h%pGH!sy8CKy z&<{8;0Yp4(Tn8I7q6)BF<@MjS*nR@WK4?mgBj7HgZuSPsj8G1sH9I$#|ER|kv_0TO z?XA;8%ZZj4pd!9z1dAtN(a;1CzrHVV+QuJbiy{aCH#fntSjDbXJmuh9GSJX?ttT5u zCDgP2_e>lFDPz*N-6**eNT@mR)gaJpVHC-dEW!yfHIdv!n%58jH9pO>KR;D@Tk@BQdI>g zQhDX{Kx7G|BWWX!@n}phVGP; z``nTq@iar_i#5s0+dhK~JtEpNpeD`&{2A~evL}0a$jzG|UeWU_RzuU!!d`49fAzaM zw899p=NuhtMlS{n5Eqzqbm*_XfmXW$lPUs^-sXo~aE*lGP7qfQV2$2AE%J0&Z@^VG zpsL6+_5Z$N{IOLmwb{bST1@F39~7Z5A(nFYCBeKzwTe1!HxCHXsMBwmh7t#VAmBW> zm}g!p9Ee*Lupl}%zm7%C59H6nO&xP?uXpFggIg*XHxT;qttJqu!w=&BV(d%6sa)H( zQBo=*DpEcF0u86p}fiOd<2|pO4l4 z-tYU5@A!{@9s79qev7r9^*r}|-Pd(q=XqX*pMK-80gzD$M{O9;fWr}qLI=uf5C!db z5CSx?`W3hWD?xI-WMOga)BZHloC)Jv1$i1wD^MMYV@=UYd;HbT;}{d#1o&YPLfF_) zN>!n3(2h<=0!R|7-uh$thXyKPNRQF6$5R!#<%?tZ?%mRw&SCQliAr67i*ZkJpc+if zKyiReL*tWb-+==Ru(85xu)^&qvLdJQ{}i8CcF7x3C~s^4U;f`yrxuO{ytH4LGB95JHE#cwAi3Ffb$9{{nqd; z4O|=IzYUS729%PgkDLx#C)pIaGcqtBxI)|ZTOekVza8yzbmh_b!RQ2a+47XL>0c2o zDty^)px#5+1HGcKH-Kobd3r{_%+4k%LL^PI6HUL&?^++o&3fZnRyaqn&vFpkWb%ilq_6Y|ZrZ5{Tc3ItWDN!|ExTcgvUcHFz9X#Xwwm zwTT2uD(K_Q3!XNS?!ccyqwZdU9b`{WyefZ&e$m`%8lIKg{@j8chlE zPJ}1;^RjQ>Uh?A<=4eylx$2mt=xJ~c{D>2bL}(2lmkk_n3e z)xiQ-allG&_3XFyOF`d;!wM^^4^bDm2)+ZBk@o3Jpox=~9rT08#+??a1FGOOrw@Pu z@JfpAYvJ37YoPg~dU>DujjkApomjX9f)&YWH_{dD7<%=z906VZ9P4gaa(}nUQ%q3i6^K!x1xac3(SVvh_LMbh z)LvvR3_>R)RtwH573#aUZQC|5FhGS)_vgkhJ6Qd|r@A04GA~kO6C}@FUMp;%6Py%; zu0T3S*X5ntUVXb3tA=J3Err0W8MlTV@YlyRM& z`=cN^43z~pciG2}>BdfG7&{BHS@GS!5o@3wRK%aa2uezpaq}Up03?Ux3fUf=O2r@g zT@EyzIR3Kd-|zKl!LBRJ_S6HV2?QcIZ#;^L$&A{thI9Y^34};yukXG7a;|(wDa*Z1 z`Yfp0aHv%|_1jc-fy*v)6c82?vwV0gT~|UM8+QVd9h`_l!Bte>`}?t6&yhl~(J~gM z&r`yDyyCA_ZmX70;!r~Z1!CJ-8wEn-UQ}2JGAI-qK-vD_d4s}=o$B(ZNt=B`2qhkn zwimm*(Qe-!i7fEuVEqZCFeoWe1|m$sP~*^az@C**89%Pc*7+7DDRY40OxhL~7n^ok zo0=ZD^zNQL2g}N_2Wr*W#uZ2l8^h@~1>GY4Ggj<$TxJxcfB1ZyD!6;Nx27;=8l+r< z=fK8&?#p9X%*z<>hWZs=gQ~AQ$?;g~LYPwb>C@?#S${vm+>mD39mUuX4o79>4XN8= zUFgDK0hF}xvv1F-hk(%gKo11TDB@ut<=yrD)-BLk)jw4Li^;5g&CB9btaszjrm)DS zX)q2=V4x)|uFicZS+i0Aky&^6w766f>WQC?do^E{7kbpbB#2pI;;br>Kwo!C+bQ3m zW?s3Px?i}M-AGS3sBC{RSXws89z9aqK?QGZgN{pouQ$>NKjK3WGo+pVwA)Qlfvi?7 zx^hdACMzj-$r&T(phCO6SXyLsR+MrTnpvXm%K@xfVI@q@i=}6zs32Uj@%?+ZI#A^@ z>LBUGB(3Xn-0!SXrpgbyFYf{xh#W3rtsmv*IyT_P!sFt)ffO)?Z)kYdHQgByK~JTl zbI@^HZw04-ITAczs9$WuC?7g`^&8c~DoKFVXeU}*kG}g;d~S5TkWi&@bp~=ZR}Aop z*y#WFrf$ZYcfx3)z;>{wzuz8?d?2YJ#Nf0g$lOSuA{hQ&i2l~iGm8IUa1PF^h=+In zUfs^-zqvP45#7H@<;s6*Fy&py7~Dkpu>p^;x7v1K-~kY|v9p6(Dmyn9T;el;kD&uf z9(=R%a|5LqEaiA~6I=x27kZ&}Uwzb}&q~K_;r9d1wLnAuY}ZqYQzZ&<&O1 z-(BaNNSJ(LjRCn3;n}nKzyVsRe1Z{=0ucTYu)!4Hv18>i7O@+Y6!ET9lLQatid4Ys z3esdqGm`x!XfBQ#?1Gy@aK`64k7Mqh`=0B7T1||vBTnf}t5qH-tlZyJ6zh`c6}T-g zVGwiP1*^-Kn}LKOi9ztuYlvGE04jj202lO!?pB_g|2k(Lo)E>Yg6F7?t{zP^`}QGjwxz%amz4EuLqgoc*QO^oR~@Y z;|PYaVWbc+SKcz(+y}EG_%+w`xAQ&wtgXI`VN8aGT<2doMi*^uXe$5QO~n8^j3sx< zLRSLvzH!?=!oY0i%__SEhUHZ=?Yrg~E>*sW`t}Wf`rzKZ!e86a7Uo%dtTk@{wHC6@ zJDjU&Y4_^F3#S7)H4Z9BV&M6TF9=540MBK8lu@lG1_@#=e0>Ckgnl)qOn?W0DmHa- zs3sFa7`d!-8#oUi*4wli;;yZFBUoNwC0Krrb%*Q%4jd?EmVqt?(V>)4+C)_BdJs?H z8G&^K8A?^vV4HP`%9%56v+47|-n7+`($d|Y`_p7EX|a}*L`voO8{V{04#N+GSIY&2SO;3jqAX4r3QV z(FYr-zM%oLGBBm<4BX+G3j>bD!H9VhffGV3XzBx_ql-~e0Z4dq*y(IvrM7;}|6K_; z>R$)T6@@I4=zg4=5CnoY2b>OF5a*?ZTOoT&OH1Je1yS!8Fi>!9ooE)Jdx-P*QI&D# z>~IGSc3o=nXO9_LbN-w%DjW>|#vRAp_}A45?_-CehC2-2))Op zGLPKlpwhp!PWZfya!fp0aG|66GI zU*?pmE-x?7c=M)RIw9%=!7Kwd=nO^+`af`_`CL~w2%hP0Q5eErD0H}f?AV=gf8bmI zCTfs#R8~4XQQ|i*(N0S;%{tP`$Fe+rz8dsY8z5O|L4^pzgM#YB{M4d<_CuFLy)1Lp zN-!RW4ivd!_RHK85{&F@k@q&QTXhgd_!yRiFbc1TkRl*nPjHG^S&abmM^WLPZa$ zq^PLcAvaghL(7YBG737>I2O+Z!us-7A(~v;-+Y<*}m>G=_ zdfp(~0<3?myRBMh(k2fK_C?%u?2KQKid?sbp*B_D{jqXX?ZETLBl=(G=EAvD;mjHB zR&m1M2bj8W>-cD9Vk1(1Dm;75EAWhBy$vLwq^^}%bdy)nCg@YG2wF);^fj<@vG)nA z33_~-SYRSY10izQoNI%1MZ`RmhKP2Lp2q{0pt%TSab+L>M16fMjw;~s_Bc znT!erDeF5|Vnid2+=2F5di;RW}_R`?(;Ay0!Po8B!w@w9% zsislsQw&|~hav!i-MDJN0vH5}ScoJZ(}AKZjfVEr+WdLpaueV4D3L)$iugu1??ErXPGc6fXf1^f}dZ~VfFz~pac-2WoNM- zCWZ@HH@H1mCzC=Lu_IeT=uVPzTJwPYMGla%$*HLkG>8zw|IO5{L<5{gYl*rMUlFZz zo&peNZo-=%j|#Xd)-CF`i->E4(iIvKurvu7t<*#t?a_0nZUDl`E>3C~8TI09M?H;) zW(sh~+CCkbvedzYfc?lPPNWp|phyEoDTO2d)Ts){!vI=h9G0-Scn+4O^)3G;jXO5# z)J5i4763;2uI)y>e00-yn_Iy3bz{C`djOE6x3u(S_t_b9^wd_WopSHLO1yE-!rjMw zyX3XCG5QbWxsfx%l-)5wg;VY#(xU2Rdo&BJHJf@@!IoPvv<69t_zrmw&OmwG4l2To z0`7rQQS?Xg9N)g>L>mG%GRtz+k%`^iL{m&fhr>)URdtmLo{hwIs@vZCi#6{R9L+3~ z9DDj66lGR=T9AiD=2#&m8X9`>cDs-+=C(s6qmuBq=+Tl=?owz%5e$neKdGCb=QYU& zuH93-=GwsL-5bf6x{H^l7vu|CfLW-z)X%usKgr)@zIDg(mA@kQdEjvdRT)!(PE)MA zDVC=v=)lVQi?>1dNfR+;)sdXJR9|p=W@ia7wl-j8Q+ayg_aNRZPEQxI>LAYuNwk*? zul|fhG0p(yFkZR=r}NZ@j~|ICy&q}iDHq7* zC9#7_i;RZ=RXjq(?Pl>7?Nfdcm9@tm4%%d6HA03;m{kpKqkxvAtq3}QT+t>4+<_9i zr|$UbsES-STlz%wL{J1qp^cN2KuIT4ZC867iyPcV@<2&Go)_@&Ap@1iF*Q}yv6!fR zs6!7NIEy9>pm)sec%uhpec`SByu2r|K`?a!NvmAmH@DN9Rs*#OQS7tHOEOB?RA!j< z`05#G_~SdL&8a4ZdEojt6H<*0~E-a_x2 z8cS*2?W+Hj>_(@Xr5Bw#x`?2IpEoUTcV1<18X9>NAT`~@e9AkV$WM^`+1q+SdI0+y z+B>)J`>DMaHWvGq?=?S-2MV4PW~ZHMc5#;fruZ)xKpMFJhYx}R0zTHYk6`B)!Wh}m z&>*qYQg8>z{NtKFM3D%Fr^j&KqZpyW=K+2c-*MU_7DMMRUR1r_5ny)V&<63*tqkJ0 z#uZzIm7b~nC)Uf(DQoTT>U%)~`jHS2qtw_ZQTpygvi9<=irJUmC)NY~ZAU4ElIM!> z5z6{y+`QSraX)0oG`I8Q9#?lzJAb1d92^g+A=()bdis^E?2bv-ZYOKUJvdYAobRSQ#ORGjB?lD zMi$FGq02okcgM%cT!Cb+y|4dn7t%U}{WTb?PGptR99P#5zpvQ6Hw#rqb!XlxWo=#$|pFbEHKaD$N=e$dZ6{?Ge19-JiSmVqA^liI|_9I zxSq%!L9^HeEjN|i<^}8a%ye2+EzOn9CgF$lM%b)VtiG`LnvE@C6Q29D$15%v?P$&fyRuPCtZ5v}Zx%DDA$+BB6HV2-Wqx$-7RpE}edf z4Sa;q=iIr<{s~rzJygW&NITxTw)+~E28es`RTFs6>w%L5#8>soexZ?$s*nGCQPmSl zi|taj9SYL@2hLzy*RBRktX)ZNZEb~3rST<&i1VzrX~_Y(DoYJ~IO$)$c%fS9yYl{2 zH|Z06T41}t9QgXRJ=|Y{C%)+50LG5&$!hIC#Sq^5zD`xo&of6$^S#8)wd{v4XmgH1 z=hQg8cU)khtk=b6wf`wHQhoYx+zGs5GcM)aw#SHssVSn=LW>%hXw~<|n}nDh?XP|H z9Z7nNjwbnnxw%QXSJWS5u>H7h@}O{n&l&XLDv|z#!#H zf3ly{jw^AV1)B8-Q3L^^J?DxB+={{CMPBNDM74VcIE^S<=9HJOFL%7vMrDDaVNA_n+7}y`=%pIQ z%dF??5*!tp2@=$=U%Dr1C156qSLgyb{Kbn?Z7*Kz%YGI2d!+GQ;Ec1kPhxO|Mut9E z#lTa8($g)qxG5iyyyKC^TTFr@l2m*)C`ovs_66GV{(TbS3M!lOgmHS~ix;m!sEcON z4(QzmeE4teiTsG>CteFA9DG^Oh|q{USdnrB62dQ5jnvUmvRK68!D%{9{;X zp;R+{N4a;6WWjLd_hXMhnE)|zp>f0ngndLFn4#<|O-VhkuOCg*?38&gI(jD@A{7$> z`6RYM8U=^Ku*Z*+pgjTpS|3`$*45L)%)z1DX0O)EYckKx%q|fX90H&irnhMQ8=v+ruIca}BK&i~Qp`Iu>Rm9RM}5Xs-_)ds0ta}J$FWNaneyD@zklmw8J++m2Yx;% zQCZ)Vvhi!Xy2_@cGW%#(T2Dlc z-eW^yR3WT`;LsHQVT~409#2{0hG!!jO3JY%cIk90?{b@7^!!_VqbHsO?gUupjNc%- z=a_iI1TZut&sen_;6Dh6khMUM;(5=Hcj(!_5NZ^?;>DaP%^|(IFXQ02>xl@?g z3mq8v0&Uo^fhC%!Y*JFodfHw+h59<#}MR zXbWuGu|u!^UGHzCpt%;UVjy~XRIGe?s^shEHppEa86B0g9Z;Q(0i_nf4yNIr1NcpR zB_zhc`AHJQE9B<1#FEmtLiD@AP9~JLbtkhdD%`#^?esdGoF+4h=#^QnnBb@ zQ`nK=1i%Oa{(Uz-Cc-c(KK>XP*TC#WZoQsLi>f6>Gh!melHu1j*%*;!KcG&a{Zv2H z5L&yMCQg-zDnp!2@Fg?*xi-qI`mI-zOq7{o+F3q!0OEaXsAkloPz8z|iou3n@oqYY zt#?lFw6Zdwa)PCd{&KmYC=-lm5JRUSF~2Ovx(WuxVTT?dO$1>)T>0$mG%Y|b92T~p zn-9ypk@G1hwVMcPUd}q#tY4y~p+T5VG`;7+_-SH{`FbAa^Dn}ZK0e*@u<^~hn*;Kn zgjv|a=Z3Ce2qJoiC3Amt1RtlSUIpp`>vcj zCUhxVF!y-DpaVL9QkLdvJ^a+lJzgHZ%Ob4t%N^C*&^ zBjmZ%RMsi>nT zVR(v+CIcWM-;e8A5kGn2pMLX>k#-D?R8l_7@+c_i7}@wjNsm?U92vt_KOAAET*gn| zJlz36!eACiA@v8BLiE61XGQP>JuBs;UcHE?n>tlI89DyY zZGy|!?l?3_EpCq}{ z>K80f4iL_^hF2-yo&8?ji6H>tMMXuB`y4`-7UcRWk;=Q-uyY2<{t_llB08+M>ezdw zxw#okBR?FwWzH)b48Nl|l_p`B?6 zQKE6b2-g()sJ{s?5zqd=@>B*fNlC`FYl&f?2$XO=x_9gnz5=F@AoT^G)V{Bpb!X(^ z*f@{7#nJLydB1sg!%7;oPCPw!8HX1%Mh+NMllKiEW&LBn#I`?w{-DQ;j#dr8>>XHc z-K7tJq@k~e#z6#@Iw(slVSl4;gi)2T*bm+`vzp(E{QKIFx1oU>85TzT5&%GK{D;Op8hubwu z*Bq#?dn{S)IVI;JI$W(1?X3t8a>R~1Ty6k(6JWoI!DRhEenfejwjh~-{M;I2%49Drg zpHDo`k>$j#Ibxo?Ja7;*y)Pqg^M3Lg{Z%w|NDTws-PcFH9ZI!6rjexHHdWTErZ23Q zccHNPGCCpX6{06tWN5UqaH%xU_?~UVP!{FWd|P+y81C<{=)Q&x{1wXOo&KSrq0?~v z>d3os84wGaGjQe(;Sd(-LZ{pqu60RDOT#w96X@t5rWK#vsHi>4!<%E4ec&J@(E;w< zxW|WXjdQbYtDk!EG+{nVOHcl|Ue%X}NoREM?6AmXt;m!tNi|J0pT0=_LXo2uZ!iIY z1HeIo34P1pN<(=_k{9QN9TOjvKTx z4@%+9;()Xio&eh-A9+?=pBLWZ1F=Uaf(Que)zk5zopR7lErMu(xzF%dAqpN$AIr}_ zucM=N$t+P!Lc$WX3QTgDI(6pRYb&*UffD)beFMTwq7$sjI3K(`dwQ!1kJ7eVcWkQb zx28mCD%Awhe7|onxD=&u_(Meus_p$Rwjh^9mWj0M!i5V%0|N&E@l?NlNXTAL4u5z1 zVUrgT5iu@w;~>W;fD_2TNHRYQMvL({^upgm_uz|2z~tuTrZ#T8w~!BhF;+J7ViVM)WV&pZEjqN z+Kzf15&Y;~@9KH=bAl)3`kSJT$?neeT{!&Dc6FLvLE~d3f%RkS_Av$C8T|ZHx1mEFz)6CvOrV|X$wE?wJH%pK7JGu7Uqq|!k1fKc!^c%N~_P$ z0w?(*U@;ejB}GI=(TT=kK-lPDydo$v)w@Elqu`*Cl=b38K=<;|h0sCtac{tZaI|>- z6QAHWKEbFOQ>CZsx3sZ5;l!miTfgXy`Jq>>t#>g^c^*iA2G_v8%#gM9>Q`n(9tDb; z4xIzRv-Z~Bh0Z*gLOzqOje8kK6|Oi)R+l=dvv>H7k3~Onmwbx9qB8KV6Y<3?4`njm zTNgYGP0~#p8^CWtB4he1c!t)A4T18uK(f%XfyX}3eeLms(DVU5CUFi4>0%UO_Q##P6U#PjfUn#xp0?6-j7wYcaiJOKK1i|W%%-8$ebGf-Ex2<$5C!F}4bw zH#RdP*jKeO+?eXk&FnHrHh|2`zuZtEQGK?3DfcZBG<*R^z(XJazMFyM2Yi8E)Jg>5 z3Tc{u%x2jb`t@F(<%YMQX>fF`mb11vpsTGd$;a2<8{}{^`acD7NMYj5C=gRMNgu1K z)a(MTy4UvC?7EJ8`hUrrPYg@fEu2DSj2!Y7{&hC}w0KCT-+I-JqK@kiht>?(k{<;J zZ{E2xBQ+I%rRS0el@yeHB5o-QH@2~{F{wcxrOaD8ZKs8?X^uFqzoHg{JqdbK1eH(H z`cwY|tnbi%RAGXNzidSX!hhrWf|PGw-l5cK5NU-&1z70Q9Jx^H zpv(>01?NzFuri6}YyK0#bj3A+HJh$}iE@p2VN$aN5E8n0sSImYKK1Q}A3IIY(nE!a zmS)zu1so`1gpD!T_{x>p6e9#&41LYQA_4duI+%ZJf|sZkNK)2*Oq)0FN7;+cJ)Ee} zph2TgSicYgpYSXy;x6&?#~u}S!?7eNc;#~d2qjn*`yW%zoY+(?ADZHy(f)>6=kSPm z;!%EK6wt8_*aZ^fxvr$>yfXyX$$5|YsfHVX3FKCH!eFR#NmZu z+}`2)C^vt@#kn}!JZQ!+A)_DL1`@*B%E|!6M6qyXQV33NJc(j(D(ItN{h@}v?2OF{ zj{0*{9C2}&z(xvDge(4EiAGQ$R<($rAcv&Yg{mLdAbi{8MTU3|d0G3q%7gn3H?o zCb5nh97(-X@^8vbulR-_+p}X{333tIHEVzXDJ*}zc!4(ua3B0`lyE$z3HHW zLi_;i%HqI0{CiNC;SqpIa2ZIRfZXHKZeMWatAcpOCC0K}z64;4>Jd{B>r%A@XzxEi zienS2G$-y<|Lm}C4KNuoRgLy91#RStB!CpaBTYEpS3GFjyqPre5XU)@4pQ8zBiFtpc zTBs{DLL_^pskx<$TToEBTO*W-nOV&2GiUZydD(f-(%Dl6*F{zAo@t9B?hm$P?XjEQ zyQ*OR(Svpu6vPGdnwud$-C(gtd(CNalPH0@A4GB#L_+qIKDOvg3Z6{;gJF`uEFfgjVJ|Dm) zRF<>U$lyU(x{5>?e!EymP#=f;(k-XHc@yT@64N2$Uus$vxfq&Pi8Qj`TWeMqj1g5_40b+XE~A{uyRALfFcin>Qr0Y^+N%xSoo&}Hz= zx;PdNDprfd)I{SXeN4fOs_M+t@rT&_si~w@m%x|7OtB@{@Q#&n<=ccm+?ga0ddsj* zveOx2x0@)|&}srKWc#yG0i%Bb%0oe#4>!A2|4_D6DouiR7vMJ)@^bz{q@Ul$kfF_M zn!Moj4BY^xNV+YUO!Cu3iWTWI?%cX%Yt8hq(S6unqecaXXcj37*tUSM0FzhS)1e!o z15}2X=(2np!Km(xtm84|8eHN$a@RuU_%S6I>KoL2mAQo}|2w zJ0B$jjYn0$yj7`Pp2)i*6-z8uccknNO0ze@>-Ch{WYI?&C&>sDyN+id`47P+y+&Fx3f zKcQj!F1^E70o6Z>#BAW}a${J6 zd8J+Ea>U{(-^lqezR@(M)=W(SDMFK!kUoJf1uqIkM-wrfBS+rC21iv>)A;Juxsv5Y z|0;{I6VSW3;dy`@g7I7jB7r9F-n917j-LrI=i!Rq@{rYD`W1IP_`Pd3Q$B^C3abO| zN(ceAwzjCSPzoa|Dp|b-b0q=lses+b!HhpA_ywNR_=&I&YYdKp>xxqT)2EC%%~9fz z^z@WvRp6X)`U|6c{Kt&?3>h+F>}F6|dM9Sd7=g}dja~(^65e=h!wyWXMuD?|?(TWG zn1p%q?=J2cx#z$I{)FsZFUrDYxm~9_&<9WzqOt>M;(~kpzN`!g1?kYCR!9!Me0hZ! z3wYw3+s-8)umMmTw}#YAwjWg25NI17<=a*2OgUC=FCSY+TNnkq7+65b-WVhB7eSq% z@Nk{Pz$3ds>jJ{$0Cecz{z@x^Pe79Niw{e$0x<(>2gkX|E9V#X-x5v;y2W!M$BD0< z@l{R^hnTVarAys7Jn&4PrKS>;M8s9m%`0u5`?@^c|8fDSx_idB^Uvv3-=O(*XwKK*| zibG!rsuA*X3`amryui!t^Cfi#@_@X*rBroB=0cC`WqUMvkA{;?tDrwhYaZ32;h~ zX3P523mA+$Zjajx)|JA2971jmS?4zZKsInws-hWn{;s2!UO|Qd3Y+!zwvp{E&y3kt z(Yds*`s6aHNn0Z82pd!>Yin!McgbmKDE%Bk_y!7zq)KLICU7hqtpGVU(9?6J-On+U zXj(SkdOYRP!zJOV_TG^ZbU9syZd2Ar)24>B&&P!@-lq0a))%{9nyyUmOrA(5|9(T{ zJtUH$SP)7VB(><2R&e5s*osS*D5XT{j@&7 ze$*@3b*Q(r9%!nm?dBr!C%-9(z3qva#%WT2 zGf(ZeLX)xEFlicVNQaj8ls@_K=#v}6X8{a5ZSLJ}RXVSugL`VRB`q(`ee`XI&y%kJ zhLCP|~43>WkvrP5Y(|dXew7V{

TV6?u+`)XoN zkY$tg`^Nmb^|wtyA{A(0@X3OWY^P9#-IpxRu1Hw3=Mm%?eC{3i=hwCk)D`uLx8)0n9Is4jm3baNlO<1!sjcixa?V;Pjhtk zxS?3KXEHd8AGkS04atZLa7RP_4(M`qL4jU0FY@Ah1(w_rHvfEdaadc1R(PODef`~KWkmY9H}5$1 zpr$8$*oqfXhrk9EobH-;X=< z+{DC^Y@IY6e@MD+`m?ri^)=pFQ95gzb0kCzA=sPo>-lMaqvB zuo#Ndymc!V94lPP>TvNGKm+2=Bm9d}hrW_=_}27I)33%QBFOR3h-IMWPXL4?Bl=a{ zV&Ui?4IzHwsQ}smlRGCnTiu}&*&=KLA|~W)>}Cs^8hsz9bJcpRv$w(|ipw}n>0`F8 zJe95+vA5~&`|dpa6?B+}hKcLsvR;rbh~6E+G7lH?DBW97bioU03E={acjXZh+>(Zh z8uwGR%V+|uIQS~W&hd(0Q*D*q;`5EK?nxU?xD(WrmVdRn5WRyJ`CZ@H$7QhRLh<6; z=T52z6pc6vnIkw308jVte~C4amYl3RmM2ozpn(MNKnloGe~s8bhqy& z3vj^P#;~S3Bq&G>6ev(PK=C5XCeW?K;RPr6-SMOB+Sl@;D@@&bFCHfY*;kXNLN$&3 zqOQs9aX2-09xho`=oX|0{3VJM(n^(rb{L_b;6cX@Ht zjZ9WLJm^~4nI6P7x5)WK1}Ftag0M(X*6Z6)!%4Oo=x-1^kPF~T5L2giLBisOJ`1qi zQzuVaea)$AZVs(-h-ER%ngPU!AhW&m`R|C4(_h`vuh{c-*B<3tMP^nwd-SLWU3Hs; zI7xW441XgRkLpe5^n1l?M7)mVj=c9JV9I~#`va8=yn!zVt{t}L&y#Q$eq6H5j; z6K)gNvR-|y%Y0y$!a67&9`0-Y;|Mvlx8tlsSK=ETRF$9`pd9vB(Crh(E$#O51!#?iEL~in7q1>Hn>AP-mw)hoR14 z0yB7j;JQb{KqtuJ&-Bv9(@Is$o8IvWI-8rdT-=_Pv}P``;9-$(@tjl#B3@OMLe{M{ z3>?J7X>4pBZtiP&7j$)X<5cCz$YhbZW2?bZ86^)S?4VJDCSx7q%tyHwv75jc07(L< zC#v}nMrbPexCE#5PH}T{Z&EXIDH_+?cr!;TJp7yYnW5`5{;k93r6jY()isE;R=)@@8*^S+Cm8LnGkIus_xP-!tn$PI7KoZ)@5Nfe*eTo9;U;}iWx7akJdYJ zfp-EEWK$H)y5OTA;)4zZ;enL2^#0%%Z{D}JB%rHjk=D*O8 z>Se%zMGqc40Cf(uXYiI#3jt)fVrZydFA!V>E+jE8CqAAKlKVS39<`Lja|A=^`5UJa zpS*Sci~iP+?(}T7E>r;f7Sto8d@r(@i`|xG_w2dL!LH#U(6N16P~NScHc4O}Z+v#H zEUZ+9|Fu2>7~H2v$c3O*LJ@HCPI(#No` zTt)0^T!mL9X55O2p-UP<2tEV_ABkK%Nk$6Ek-Y zi)DL^UfQp+(ULW!d6(?>EnC3M!$1kboQu9Wdm7zqf1JBDmUv4^_*+@-`&RIK1dVo`Ttd#6=#4=UW}>;c^2FscCxXBA2|f6 z&??72(NZ_B0)vpfsR>i`moIO?k}W#*=i8H8?q9tM#BCxg6Vf2aM4VBY1JG;E5yc)=Z1D_o$7}qPDksOj0 z@8u&XG+bSaA;IV?H5O|{(-CGjrw+A@4745ho6d6hBogFsA@BO+2~FHgkqd87FD&B> z2B)3jLBmsp&8T}bmEd`ihdd+r6+^oIsVN3Gdzp0!JaQUVgb)V=yoH-0pY|c|Vr6CJ zs;;hvT>(K9!4Ol0#QLZx4uVZF<6P`4K^G|h2*cQi4mRC6!nujw1FXJk26a6&)coFM zwOYWp4@Epa+2nG{7U2Ahd=d4jOr+mT#WJJXA9?v-Nvcb-EWFO0;TK?Y%zDFt8*1F%F$ldf%rEevtDux7ZJWM(LeBhH+RZk~nqM{G;gj?XdOC&n39PqazeClXikwZ#(D{f|_@l@%Z4S!0_DgA{$=0(h8VV zu#7^bqSxFJ^VQGWn=2lyXA?R#ZsAZE!T0vRf=N(7;Y1t5TQ8Z%emG0X5^ zPjD5X-2s>B7U>YCeiE!MM@QFB8)=nEI`szpEIsSk51|JTTy=Fi^FzOYjt8_jv;xwV zc%EGE_@0t?9IA8Pi$1ygk=P-YCYsaYecfaY_W4Z9yVv8XXXv%&r6;i55cxx{Ja@h| zhAaY21ujgj%Xnv84LybVZ`%zTT3S5u3-R&ede*YWo!)1ji?Bp*lTxkh56PK06!qt@ zRE4oAxSg01ssOv)%&<_5Ed|IhDZEZS#TGu-P-~TStA<)NK0kE*Aj^&rw~09%Gyu2I zs@6$AeVXU8N{Xhpn|4)I=Yy|{0u!@}vb)8?H;j5k73!B4sO{|7oELOvZO5iV=;xPu zC2g2}EpxDr%Go0|U4WZeVOGmc_QsA_r~%I&+@{3Txfli$Czyt4hHZ)oU~wAHAS5G}&Ok6yv%*B8QdqP{WyPh{nnk!`i5b1`na9 zEq*2fI<}eVjDO#8tYojs^RIgfep%Yy^c(=l>#YcI^Eg@D{F7WNQ25kSC4$d(m2~Es zYv+=>D5cE()UPxhjg7V#B*Y_Ey?&-%h{~Yi^XF^8n1R|1xJ>$k!nW(m<%GsJEbktgK? z33(vwnA%7sw-duZA|v5u{H3+o>SM8;&qqTA6S28pI%T@Qz2tH&&Mqa#&XBpXU%%Ft zzwbXE8WBF!qse~;SSknC^+iwt7z)Bq~5%24_y2UWbbvGJ6L8`ZVfRlz)%6Qj zv|<7R=5UizvTE10`4vqH6__(;{bpS}3x^~L`lw%#*2U{EZdNGLtmXizXw?f%MO{e^ z_m)3xYE6~`4l9RovR7)|R3jC=IA(W)g$5@%9Bq<3Jc-dsNeKzP@MD9g<i!F&H*wZ;dMU0@)WgVxgQR3TV7eXsjr8*oNZ5ma2*4Ny;Oqr4j;)X4w@K+F=r=o2w9 zbyCRrbLS?1HCsc*&cRU8+1c69upb6Q#xkKbJAfO2+H>z%oZ*t}%GXz=dy(3932*DI>1^!;|IKK*nXTE<*~p6*^yTAvdnTFRA`7cb`j%>2OW5UzXx86f!X zOo6!>ulzDjKYr8od!8kbY-ncY0DqJd!~6J%na%h%n|)L)+tkv&aBun?*=##ZGI4{` zga@S66B330XJ?Xp)`&EkUV`A zt^TA&{^x8yX<%RgTkm4cT&FeB<%Yy858y{=j{?Cp`||uwdLeajguj0+iol=iJ&IR3 z-L$^QYYlK^>$k!QU57G+e#~g#<1gCM?zRc60NX)zb*9hNO^x}(ojZEpHw2v^LNt_6 zxOpsMlsd4{(ZYl-`8MRV3ye66s#R!mmDSYhPypsxOocPuRYXXtXI*PIJt|T8m9%>| zVYZ^(NF1f0Fi+l{CqfotY+{Xq)*sA|Gx||X!SIu5Q;n#XF(QOZ9UXosqwTgNYY`wR z$b+@1M>XzdzgEif{PD{C1Q~%-tUO2seLFjq&U#c1cSfg!hiZ36Gqcq>&75e@}IFbrxtKiC3eQ zuk5O2>?KVi(+Ba05#tJHej9Q9#5_@ryx0uP9w&1zmc8>US7&3oevspY$}@C>1H%<) zX`jZcAGJTtz*M#M`by-Xn$gByLy5NhArM$I0mFvecbBTfGxt0e93(L0681=$jBc!V zL_rmy7-^3s_%|n zuvqd?6b!P?{tdC$K+8iP7)@!mL3>xDT45wqw=51Yl^Yv>%ya!^)l+~??!4r}wVaB4 z8IDX+@VHW$LA4n7I9*=FucxPSl%<^!YF8boE#iNy6mPfB``6KQAlOt5QdMsUzKF3g zh&8+y-W-_!ntosEd@HTTb)8+Fpi=)pws;|f%WW^li>72`xDK_5etnf_Tj1eQj=;z2 zP(azljgGjlR89W?3MD*!hlkP59_Fh`Asbs-9){Ztu>BNtH0^|u8N*`i>;cVKKk?Seo5b>_zN67EelqWd-ZX~?H+Ax+$d6}x z&U-<&h2;R(2Tm?AQJ$NB-aUN^unI3Q0bpZzOy?iU?>pl6U6BAU|7KNvd)yM;NKgoyF z4M+kHW-n6S+ES}qXp)2B*-w3r*pp5ZKQ=N2rt6ir+u5<$PGFes242e3{>kwA^D2IK zO%e~6p-a!!fbylk%{nTkz2@8VkZIz|h+_;OKE%I*TY!6D=s^z5dtvQ-LHNMyyXQZB z`?i3pVUl!za*{OQ%6goT@z!?{N&Wb_CLg+eG7$=73R0W}!%B+}zym-=$svhDr?I($Uo=JdN;Q zXjK5nou3h(-%1!!7Zn{o>~gHeE$)MFiFCAV+^v@>$z~{KU{c|&NZ}UeQYA>H0Z023 zDe3Qt{wr8iIP%GHs^{tjn7yFD1Vj$29QX}%preTccRw3rEHgZmk-Pv!eNtg=WWgO$ zrJrQN0pci_RH&cb@x086dd#SQfHl3 zZtD@1ax?1D;bEabdF+fMM{WlPYlQ2gQ4VPYivCyNd4Lp*&Q1F(%k9Cxsbu?b)}eW! zab2x$TzE&)tE+MEdzsZvU+Vrb@87)U0lhUdiCBl9D+n)EKR+ z1gr}FQiP+*()k8q85w6l#|URahhhAifzhQ0Bqf*q-6-clve!1DE{G<)l5trfjK8qg zK^6FZmwFp-a1}5pcxXlh1b~waSqVI|ui--V+%GDo{phQxz1n6oakqky6jm7vZuuo( zYW%X*WJ?s|x^--#hWE}1@|th|*%)s7XJFBZzL1H9r4j5~PzaE-3HV1p77+Hh6@PjR(?Nc+V=LO|`yriNQVQQ&OHX?I6o zGmsbDawc5tgy5Fod2XBehl<54GfQ;iQ;LjZF-U)b%@mK+Q1G?nylr|O9c5+rJRt%W z*u}f+qN(Y7GVkpY@NtPYB+MK<0{bDI?-O3hL5?~d3CWpJFnWs# z2srXYs$oEMIV_|mk?m#G*B>C4BXiUI+aXZBOA#VPdcq9QKw%?}9v9>>T-dx@Z&Xov zf@bt2AYk9SPjjEb0(&E*at<8$dC_p;#)AXX?;jd79X_KH`l$2e6aEr;=8!-T&Qk(v zM#;9|vU0`CE(#17y-cx>`OA(vqK8FOa`7J;B#QtxU>OIz3dnAK5#VX~sO}IqQ$2c= z+P?yr5PNWI>_f2L35PV^n35uE#t}K|kk|EJu3B0=+RZ#lzh1+$D`*%Lt+*+Kdl zJ%!?*D<0f{k&MgAWQ273aEdPU>L##!B=_$J6yJ&9f%FZWo>#_I%+k`*vp;lkzQYEt z5KO^uL@vPg)o+(l08ITC_9#=c9DF`r!A+A2knha8&~*^0=R# zKM&;Q1180)DF80>^t8D_8u|2b@o!{p&y*zofWp3bJg=+xME9cTae=FpOKdhX0hjy^A*j!lUxRz7(~L;c2j z9|DXYYBe%3NlfO3chg~z%-b1vSDafIe zQJRk@$i!ekbfWO5a3d}4Bkg8wygts?5^OU;8e&!Qp77zOw9*Q3bx-culb>c}j7U|Q z;0*L`ZzKjO;*GY<$0TL6S9Nsna%4*~9-#^T)M=Alw6ybjy6y zN?i@v2ULuOtjqoL-wGX%{Shl#s>|{yF9$aS83X2BI!}-6i2R6f^W(Pz{TV|Gi$XMY zQ0zoV_?>(R6k0*sUp;8>r-@W=g=rQDnrTP|#%@v0&WO@nD|q$D#>k=jOw4)smM6gX zVB;g}9emSVLM|hIy5vdtTUMW{-fG2voD@RLu!GGS&}Y<9 zgI_&reY;i?%_Q{rV*6`%xG?%#|M>jvGiRo}jlk~Tl|J%WPaP0C@XceSrcjikkK^a( zCv-(rTs#WrB={gDBc(&AxO#eQ0M29MqT_`M7pzrR{C_+h5K0JxWiUp-f^~kO%8#CZ zq?}uEQqm9Y50uJIf<}oqSX8DD9Zw7+#?@^R(HFGNL*$1^4EDo!xv9a1e#k>%4!40W zP5u8axP($hejYp!nVyywiPHxjA(?m4cwC+?Std-SCr3T<^XHKC0X1tG@_wxcpaN5! z_?U7!~IO(EtoT;3Oo#Cb}Ta` z)UbNPVcqYxJP#138Q{(>nP|}0A)C8tYb$%R5-1BK$*J#qmqzN{jvV(1ntJiQvHMvq zrsqDzd4!I&y5>9FY&n;?M-c}S%1Z*`>MFiGSN{59>Q7Mt(gn1I%Qp(H?2lY#RNV-J zeNECawUQDNA(A^mrv<7+&O!Q&HZSIL6M840{2;PGp&t{UMDT?0_Bj9KoM%+gjzg*j zceARkJD0T)NeMg6b?d4=eX;^y9||Yb>7QVK5VA3qU5Yk$)Jd@ojbltq7Ye2$2p6`Mr&^uxpRT37s3$)zOKLUh@ zTOS(fNHaLt*k&^;K$ZO%BL|f+Cl3$wI7B9c1+SlNyw2(lOP>X%DnDq$s0| zQfX6?q`jquCM_gQB`s7MN;GLG+S>JguAASx{B=L~{iy3Y$M-mn&pP%4>VS!2d0}A% z`=ep42+yT7gpXsfA1|33+6a(<;6PolQUt>Ybha=re*OH}h-WHU728rVm(2MqkrfRb zlE`s>5vZoW!Nb~`mnF>Kw&q*3AxQ+6{QrD15=H2~T#;*lN4>$wg9OQ_r>*>6LM*nw z7AO+~030}=@fb7|eZe3Ol>T*myy@}dpK9-pu<`Qo;TmK5M9%aN%~NgILfqOKQQiL) zvPdZ@10LFxp@AvaZHz({BN4*0pnf3PY%If;hrjCRRpeOJd$W*)zX%R+z;S{!ZOpy6 z$4N4d7LcS0w6d^x!U!7ped4bQM{i7giJ=sC4kU{4UQv5m1_plA+@GW>@@Sucfd8F^ z$I=69V&7|yBe+UahK`aPWE|cnh$_H`d;BzBRJsA?qNBNam%XHf1OQ0L%H80+0S35< zmG%5C#sFk|k&=fK!|c<{uBVi&@Yb)osLOFXqE4aH75H^gy1TvI8S^mSfU=6zKDe5VZrXXnCA=2 z%WV3XwKbXOH*OS2)%f{S3|13apjy|i&3p=mreyc-z&mh32_G~pe@Y73X?)&jrs24> z(K&i0Fy!E8NA*XaB+MTc{l<3Ry@j{-ii(THq^t+{jyXQ+D|vL4&pOGv!I_L6#pr|O zrvHwL$a^j1`1A-okTCEC&x9*k$tx;|OMTlc#N| zme%>M(31npLbIRYv3W7A@yC2XWYG!|-s#k7q`u(TM{3Tq^7|9l-cJCrs2Z@&)cP~x z*)X!7qe$U~hO(0U#DL7vkd0f!i){;Aw8ssdo#|Y5v{u#agvBB3{+|ym)yp!^`>q{O zew6!Usp8Mm5T-3^5?$c}nhg_)h%pk!+91{-`=OI`rANYfQ`ZY!ddalSFHuw`F(~=u z$rRY$xx-7Nv#Ce|mJ9tNANpl%qIo@LH%Bjk6aCRN>itY>iWLdRZEg>We#+>dFiG?;yR zu<2+DS=Tdn`Y|if@c4blO^(6`rMGBCNYG`P<%2}W&K$T@GIQ#)k*i*=5AsgKs$4J_<442U1 z!=%)m7arK5V%Df+jXQ=(Vs)v=M>Ylc2Eg-$1=q0aRJTYmTj#zv|9I)C{CbGGw_=ia z=F!YZT=Zenhs{4e%~9u0gQr9kgv#p$k2PS5u$7Y$j~*XUVb6-wvt%Lw4aMPTK-HZR z7DUPt%F}DdUim!+shVg1`?kzVmo!s#`$))3PibaK3K^mNM^s42<$E0qct;}R>C=sj zjDm)Bxk$YGfIMkPGx3LSPm_^>f#)oXY(O;`0VQeJ^#M0|eJ>^+kc}u}iSnLbK$XKu zfiyStPSBtsB5y14e#E|GS|B&;0%3K>LMC6xsd<8G{r8uhH?;(ThLS?eu~s`wCm>ac zDozg39_S)rS#d=V`u--zlzL7IhovL(OU zn3apCrLq#3L=;wc>_b?FurPhjjrEu^dVBlo@#32l?r1lWKmoO=|22!rMwuh}`e9AO zpCNy9+L=fmtcqTe@OknCn}XY0`km;UWN{bzb{Oz%vr~5JQ*1rhuh`1UDEYxT%Xg@{ z(Vpd}AHS@>1!YVlOl8I1>s~fC{Q1tO60oNR?5_s6?yh!Z3@;REnP0fWHHS5qyua|J zh-B!p&-$i1h9PEx=+pU~9c|9XFiZ65ojTt5jMZ?p{ay_Ec`SHaoV|e1eKMkRj-&U| z=N$y#XoXcjoE3P5Qa(O8!@7WLiTJ<9x;j`c?m_*52?)p&dJ4RNZc_s)TP-pW`%EIn zTz+3Xcq{0Yw$9h}3mfwnPeHMw=1$7-gkkN)>5P%?{cyGr#vEMw3v2w0Zz`HPb8Ua* zxNoa>kWTq+;2hlDp3ts6j!Cgdy5?w(%Z&2d!%ftLRo$u!1L^c)R?4^#KD+6LU<4Ko zav{UMAp7M@X=rJ8?Xtsk2B7Kf_v~DZYk(E?kV_L8$$y+Zr1R2Rwfd{FGEeLjL&e0u z7k`M)9~rrcizKql%MD@ntX9PqHQRt4} zlk4djo?5ft!pAUoa%6UV&rq7thHdiRAa0?CT*$e=@`5KNL(RkGG!KEzF(vCm~4tcG-dhp;Y3=n8n-FZYJ?2L{d zpZ@d5BHwyLu)M$}Ph1x#7(hfc_R{{lCu!hrRtM0#C8! zfhOdEtn;3P4+vlQ4Gh~a7~L@L2kvHcR8;%t&rKgbfQOKkm4yt*;VE4iR1Vp;QptuyT^ZjdXYUjFVAlK1MM-^^`5@OgH3xc{ArBMG7g@;l!Cg+ zm@Cqz&a5(y?{G*q^dmVn7fa&l?MgZ(0z#1aI95>1O~;}-PQHZ%DcPTePqc61)q9* z6<}tDbsGQ*9yjDi3%$I#?i5?;5;-yKZU8t0IwB;i?b_`-NYgw@<@{n&lFs5RGQU^h z4ub)Z@FDqq!&y=GAb*uj>}k<=p<5`+D%P1R%ZjL^w4h{^5TJGr%9SgX!{tc@|sozoBIsk#p~0j)4-N~k)a2Y8312~jkdbjGI2ivX11uK6sf`3q zuAPQee17GhP~ypmd-vYDjQu1t_?bp;_OFH7{%rf_O8TPW&BBKcxkEzmbgl<<26{xm z)RzKYF)x*uE^B|k{MLn@vU0e^PE!5$-;2z=M9wN=JwFPPMOK#5ubc&?k|69e2TCj- zktJb))t>s$f4{;qc{|$zZtBb4dKRUmy8pHX@*Y0Com<`~EUjm3@M`~p{z6eD|G)EQ z|AksgFW~3DT(0^xr+2@0&JGBmUJv=DcbtJukn9#2F)2as!F1#*!p?s{D<&(e`u7tx zA9H%QdlvN&k1$xqPAo|}R(rF#sIsMn#MeNbZM{Q*TqxvLysV_OqO%khkax{s%0!*q zzMb#it>TkCLym~CEt@v+BjS`+PAN$kPH@Zw=3B$TQO4 z7BFE-ay^X+{s8_D&TBpqkq^T&SUQJ4NKVjB(Szp^4!=8xj{I%b+cMQ;eTS!95pa(9 zmMfqME>I(UgWLyzkj>Gf8I7>b&7%2iI9o`jsH2hPQxAy)4I?8&p7K|}-@#5MV8qzW z3t2hj(ul~pT1=3vK#1ttu|rI*iNyYGJBZ1TwA04TZIRJk;a}B1i+68k(F5%*j$)67 zH(+*tZD&&I4aez?)zI;(f{s@?#e1{THy?fG+gKK}d}DR64qXBkMX%sWybZIFFj>e# zk`o9hYzkInqL2Ma_T)+B;E}7WlnY4JErh2X^5ji)k8ky`{kyOVtlE9JSUNTnIAP(U z15ymqpWFu~&t^MG(HL~Nh>oeV5=Y9Alx@Q%^>?@RV&46@3Zp(j{zgS6{mQ%B%4rNy zvh_J7w;q1%D~!R0opPiTBD6iQbdt2uYu7HKmSwVAkXi7j>O1kPk}Kaab6)|Fu)}NN z$W6x$|2l2%#le8*+kuP_;`{e&YG{DhJqf!KcA=700($uTc^(Mm)YKFPV~h(y!NCGy zYT@@brihBb8kLuKIWnwV+@0fiRG5iE9XCM^UFmDsTyZ`s#xn|glZDleGf=HeoFgXZ z=N&8ZSpI(hB%M@R4;x31;!eB4%m4EWbW>?pz$g)w z;0!vgPoE~k&KZ-c&9aGW80}bJ{N~@XRmt4+%o#qS?q$K&2YaLFi2ud~DEEk+^6u0G zEA4mo&WqAluKs{=e#U49b+YJPomQZ6W8fRym%fw_sfyPD0 zv$pJ5#v{1oVWF+Pkx0%txuWhSmyu&7&I#|$C63+yo&kDwN#BWn#K%M#fZ}l zZ1v5bVm|^&A%Q3AzYqBZj0O(HAG0y+BHV=(m0=4u1SiNlwGyyV&>}yo@R8YIt;5Aa zH8+QG_+o(T|6GHo6s)99rM;u0yhJYUqJzW7vxRsC{Q^iWun!(Q{xe52u4QFr)I7yJ(`K zCgy(FRF_u}mAE69dj9oDBV%e#TMr^|4!;}^Hri>#up+>Rnz+rdB|I|nIoyFjJvMF( zhR+EtHsOO%CDLa((SP{>iIIf1L7xYcBxWllsp??rezB1Y>GBWa}_r2q~Kw&>yJ%$8-7L#z5BcaLLXnhc46vQ=l=w05C{~P)E-I#X;AQSP!NBQv1Tka zgNh!tQrfaUH90vCflxqkSp!d+m@M|ap|G9`f?@|aKX8ujsa4Gf(8nh=aeQQ0`iKf; zofdlgHr4+@uHnZ%2QPigF?S>F6N`8!=~t)P2+1Df5W| zT?SgH!l(q^eLql=5j2bk53c}xJG}MRFZ5=EX+gtcS$0F^@o}i_fdnHFu<6HDl9LJW z>yZD5y<|L`cfd^TB~8uBf8D>;n;k!PvNv0nR;y2?PmwQGm{IZ<9os@Qy3uDRHcG}; zrimW=L|cl1h;6DiaS|;7kO71)!-Ds=tIG{3{M%U#mfuj8r#Tf*7yt%GTl6i8J?TTD z=(^YKQ^rQ;28S(c2hp;Qj*NJ8Eh?fx zR=@%rf2G9&aar~ii}q9LOBU_wR^(!&Hrd=E+1@d>vItZCwZ!wuZz#E)53-t5^q!g- z-%gN308q?yFv_E;fdwfm>#bv$e{<1y^ut76s-@-P=B6TN&U=fVd*|gtNWeV4#g{|j z7!s&9Z18<4J>EqvyY6sA=DOVb6*8~IQ8X#AekJWcr{f;r`?uHNReMgi%5mZRd25Nd z?b|mfg##C%COoBT#3Y2i;IbPWKYkUX8lEitNR}V`Umid#M^wgJt~d73a>eI)Q4k(y z#!mhHVL!jij-vxm+iPv0DI7KAyXy#vOGyp&%e#UFx+D_U*Y}8-e@k22rX4$&esVR} znrgW)(Kol^#mmjgI&Ns_kR8PPuP05uAm@8*Wcjq69SspEFksYu`$q+t&-nO7sVz%Y z@?|d>x4(M(r0AL4Aed|nbV$@tG4t0qF{vM(>8EHPl9D6{qYZx>T>gErm3(c*NYdaY z{?zifrn1(^^W&MwTJ=zZJ2T)23Dl1#XVxxlW*k>`h=M2`2UhR95(K)rk&Y0ADLi>kb ztKF8L-eZ=J>b@W53y|d+!O9!3I=R|ap%D>4=lx|igs$TnsPD5mN7@&^wVRpyAJD+? z86SBRs?ygbB?{VFn`9IYKf{N#)D~f(#Ag`*fMlwh@ z|2-U6ND<#~ed6XfcVC_L+*==RE4bYBQ2^4w9m0XFtOGUl+#%`N*@y5{xrF@wR|+m| zWOMbC-46XK8kg;8i6P{-&$UdCG9r91qk(U5b$t~UzQE9>Y(h(_JYV)O;MFC~ul)Qz z!yc2dQw}>8IP5R)CM+rn;PHbjp|B8Aq~J*br07F$j(!q{`T1<|e^-!FtB>X9Jv&K= zA@N5Se514T3;f`Mf@#2~=&?SY&zF9vS9_Mi2Ps-x-!U^Y=XoqnhlVmQR4634z<2!z z%2M>bioCwTBF5bqCcfJgRwW?_Uj}PeK|sb<|GochFU5V2Wk7aJ`op)@T~$R+aF_3i zweL#%5yoW6PAH<0=$`xunKIP3_~#P0nX^drRowa4BLs8fHmUQQnn&|43C$KRPizpV z$-CbEuU{M-Y`q=!4^AfinnoiJqC+?hKGb>$-`8SHur;3tqzVhV~WBh5umHedY4ICE~t!K*g-f;Lw!%>BDf zmQ)oMd~LQ6%K^)bYaA}ID9`8HBN$`yfsKal{CTFgg7*)2KfeEgr18T-97l5DVX819b z*32}NDALIpG!ha6AsDCC12J~Jec=7C?@6en(gTG1K@cvtFYN+CVT+9hI2+d);OeNk8EkNehp&=91SJ)J{l4_C%e> zhD%}LWnat&B4EAf>@%AiHJ;~Y(uT9*xj}G4!m&I5Jx$H?r1xdkbGMino+a<8fei^C zHcQL9P2)r_hFcfm2iv4C#zJn5BNFw+^iUvXB%-o+IXPed`9vp6#xrgi2N-P@R)7BQ zD~%VToCBBy{xahCFehP$1n4UG*W3V5AP57iXOa}WpEZVFlak^-cd;%x6w6kqe9qZioAJrhFo3@=>#`-+d32PO1wvZwP z!PD0k_QyYWf|(T_uSPaxEkU4(ix>EOD<|hR$kf1*kv2h)5|1&2UA}(ZAw*_D3Q!>8 zpYcnNQGeQv(Zu?X^3kIdK{Y=|F3WC^P5fvj{(s>@2b%Ao!o);JQxO2Pe_TBz`#^{6 zZ)<_L*p7U%owLQVXE%Cax`R64*u-`KtkDZhfU7`o{RyE@acl)<4*l9Q9-*;e$s5mG zN-ma0f9Tn^Hm-Mym3VNOTdL)G$)u}2o$Z{D*T!wXKc?@)XYIt2yT)tSeY=5@5=wjv ztWG<8IPt?-d@)SKXG!66L+`N!iDhJKrIK#m7?XsAUMSO%&*z_+BgmDMoZMyb_hjao z-OayBi6yr`19o_!3tNu7Rq}S+N7qlc?+~3lXaCG1dKZD38eOm0^XX69OS2;la=q)C zludTGU%TGOVJIa%x)EE0BDEZ#U9pw?lYff1I2~W`{-Or zpv?pkYI%a5C~GsQDc6e=eVV<~61;Kg*Kfzni?41VtWsj^{xA}vY?W^%rKIL4eoXy4 zoS%k`Y+Ay3z$%lDc6)qu)ZWTUQSc1=VZSd%P-hXd$JrttpQy7T`d-Ml`_~Rfs6A?0 zI!s#TOXTIV^nUxixxfF>Ls1~&Opt~vIhnsQ>!?Uhw)+XQb6?94 zzO$5W`THBg@liwhUlI~;*RN+a30(lQxtG4dNI^rZ7tUA_dB}+SS?q(7*IEA-M z57Bp3b(iBI?)`MNF4q^W*Ma7pvr_yqm54cmIggl}osH~NIm849-QX1eWgfsA1kS)D zsb7m*Re8fe(Zn=Hy>F^ulaQ38^odO!h&FFkrH5w@rWUi7{)% ziuY3_35dp!viP7jMD{m)mDJSK7$Qnb6i1{!^s$i_B> z)DB)guzkBuy5i&GaMXJ#`CU$C@udi+;R;s8x^188{>jCa`;6%)D*y68bt}tuWGT;- zuF}c{)$p!e^h|oe>niuq=5A-ps1s`-KQoPN=`J;)BcKbMZh(<@F*8#G2le(Xzu=+e zp~h22CkBVitLkSiDDlN=Yd2nc%Kq4%n8Z?&YYTKX0OQ(6oSzx7>t z-#1pqCMIOW*HXqt?@gXZ+_osIG>^{WFmu9vz*3-@#l$_27?smn?o1eLW2E*Lq?AoW zPNKiUMiRY)XBi_L8?6VWXhUegLXND@RAM{4riX$RZ^n ztSo*rf1P2gTyBv-q6V4IgRGxYDg90KK{%b`w1!^Z88O%2hq+H%&Vswplt{*jKQWPu zap%ti%nk@MXQQobQFO_8_-s?$149e$$S9 zX{|lBlljzZCvd(nasX%mt$sjO)>?((UyG`h+Fd7N`n;~Gsi~@J05lDQY7kv}ujE(N zXK8)E<^3?+LUkP2_@hBXKR9aBSmy(yb}7Q@;zca77vK&lLdT8*!M4?Q3n!=6g9BGk z>VYcb?L|MEdZLa)=pemJG_?=@l7o@M6Qh$@M}-!F>bBJdY6hzB6*n`~tO}`tE&%s} z&BV~aKuX~>!~i5$@9)vFxBb86!J2*Zl%@TuszU;_Nsmu=-HU(2#-e;WRhp=Fs>(Q0 zXSU*a#nKpLaf7Nxh=AJkPa4lAoGK(i6x=%)nbV^U2p?Qez;$qc|M>o$utGr~F`m0N zH5{WCg$XLMX?SAW!RKx`Qf}X+Ba3)&QFl>z*KWK&n3mp*87XtzdNXAtgIpQ#*K4rMSXd4S3#uD{ z_hF`y6@tj#(GKtDnY}dDnjVsrlvG+udY}?@9KmiaTKTCBDeQp@<*wF4d;6Y(##07v ziZV~{+^m9>h$}c6omz7vVrK!A6jEBe{ zIQ8|6j~^cx8R;x@cR5{>lw|4Yc~~o9%>q}8pLMS>Ws5EwD=WIDoTpDK_KN@e`j_za zD~g;aXgtwFg~JG%ov%;q{A1#FiEZoK)$-c6ZwZ27tLUFc;!d2U@9zc%Vut*S+vVdk zo*8BIE8hqw91x3OFEh#HgqLbYd@&qrPeN7Ga0#Vg%Eo$^@^TB1F$kVE@0Ycm@}E*P z*hu6FZvKlxJq@!X4`E)C)=H561#`240z-+ynF5f3GtV}j*8efL5l^crh&4Qdeo}GB zCBrFBM#LHLmTsY@>6z$2dok2vho ze0+s?{o1dOp-_>F~!9bl|QM_7GpFEKU>rGO4 zs;WLYbLJ@SYPmq-K3{CpK;%?-R8$T6(T{;jDJ!6haOj~J7dn2u)blty+YGX36O-bv zFPoxMDEym-lgdQm-3A>_U)oM-;p=t1xoNH9GY#eCFH9C^;W_|Fn*@RcM+1--JQ}c) z#!A|M66$z&jK=v6@HI?p*tQj_&VNDMnPP)K?`GlhS5AORpcXAg;**_Q+Zb~eH>igAzhT@d#w1X4 zd3iIx5IQ&Ac***Xj_7(DI!ks;V6a)BYn45CP<=(jD+LF{e_2ClT5TO2LuwAS$e;yw zhH|ST{-;o4?d(bSBDx{%ONsM|)J*#&{%=CQ~UhPn$lPVnu5Xczu!Wf(x}6c?;il zd~B?{hew?us0LF*Xr(^{3E0q*)KqJ2MF~q`YU*(_GvQPBRudy)VuXI*2e%x(TB%#Dv?9_Ax!|t08p^p+=4i)FNI1POzG*Md-vXB21Q0}um4$=Ry#>(bO{*ot@4fk ztLf>jTNqR^^rCAvk=$W7Jwvi7oE-h6PVcQ3mLj8&6V|xZUEp!iJ2QgmB-=IP*eLk<)%KoBVvGpY&GsI5g6>;KZaG`Jgt#S9q#$cDL z)7E!2pp`)=|9(s;e_lDD)Ly>J{PQaR66G3fE6RTy?Cik48ygxTL_&lx&a)y5R)t3; zKR+M197u8;{G>=!n~O9l(o--663p`cY%QARA_opRVs1kgA4mW^=MWP>L-;p7$b;yd zK6F6R-Kp1mC&RY4-7gnoEWNfsW>6yGn)nIe*trk!HD?R84I)*ya_!P+y&Bh!_Z!n% zAaH92hPTJ2MiL+itQwx-n0>jazK6_5=kda9p@qkMKhi}GT_azIeh~^yjiWl(TSM1A zehY>qWhPc?{n3;4-kAl2)~OshLh@yh7^`2_NNmAvIPqRv=RinwO1UOo2UEeJ=+Nmi zb%RhB;0zSrz1y>^9(WiisEq`ify%E*9W@SNs91J~{+ffJOz8+JGJO|)m;mB*B^ z-45}~t9=}DbfUMS*S+zVt6e9J$= z+s1t-%Q^aImbLp+$u%J=}e^g>QB{pXRhRlwU+1(mDG*U*T~iSA-mA${ScU)T$8u0H%#V z8ZW3?rAw)pb%@E*JmZUwPN(xHIYnb|7HK~h;^#*NbNI34TWAWwykkf2P|HhpZ|`-S zU~Xk1Mqj!^6WG^UA|uLVyHw&tSHWBLWkpYDBM1tFPTrp7qPy5cOXNoVaP=VDLDbm< zdUY-A_4W1jkoSIE77XHv>x@mG_4!k#rr3wm!IPh&t8Z$$1SlQ-1j2p#oK~9Ts>IwBqU5~ij%uo;#}<`K#kB~u#3+nAz-`v)0nnK0IvXK@FX&_ z+*N8c2FsKo*tvl{F)@vh*z7?5@UhRS7z=^TKqd^8VS({puu>^U1CYAOCiB&d>+lW9 zVAVs3)7Ej9cJK+*44Fp-2QOQ*5|u!np7!V`Was8m6YnavTHFmyS0_^p;OOFg0HHNW zh8VQFp}rmwicZM#IeGrvSN2!tfLft3%!WJK=;1ysOCx<#+3bQnv(9hO|02ck-Tg$DdF+kB1*~rjXFFPK1>mO>aGM{5W{bU{yXYgk}>JnT-#eRpYbGcV+eO; zoxZis(j*zdz$va+HSwdBXp%%=RsGFxDV;x3q#l<09W($++2H03(NAJ%|S0fW&@rg zBN8>j9v`RgEjj2^A1spKLBYym`QW6tI^cc~b2biZKg!Wpyu;jP?$;IfYrZ*eT=#_=ad(?SO| zHF9mwqC-0*BJv9q_@*ni$G$Y-?EyY%3h}a)INEpo5h%Xo3WFxLCUR}vTEwt@p|EV@ zT(1;Y9J@j5Ee8c)?~p2(LgVswU^Mj(_1XDpi>F*W&)P4QeKo2&BhM00O}lZU!4u6U z4w)U7j_hdE?$<^C`gP@yg3<^*R7jo}^oTxOlPYsbz<$V{Px!z{OUuHI_m+x)Iq1J4 zWC(2pGD$1Y@nLNGv~)WrN^6UoKP=5ij2%PC8_H^MIsEkw*rp7d0(Yj?5^e{6;?7;4 z>=+eYj?3KEsmRX%YaM}Z1mBM316l4UIHcs(&4UrkLB2t8# zUj5jY?kxZ;NX6uv;e%xO6n_d?YHSM2!S348Zf7FhjT7vtA7M-EvP*lsuSyS^vOX{4VsZz{BgzL( zRZuC2-byQXk|h~;ATs>jl2(hW4iyiupz~+Xz62Z7Wd888&K>jpFID8lvK^U6>0HCu z=bq%D)Z_B}y3-u`{cDA{sxj|`f2pL(w@Uyjx67hqsRj~B8MIu*XY zLd6-w+mJIep$LsFk>@xo?!Nu$(64tIcZY@U36YU#+Y=v;=5Q=<0Reznh@OGrWJHQs z(XT?6w!B`d?)laZEnQt?zo**B_YjI&tR~j2CAKjssZ|TVZ|~hKEGug&@y5w~6P+!- zB;-~-F5G*Imv_C+dzG5Pla@N2J3PfWENUPvjoh~Hg${?=S``HO=R;^fXbaW2QMC#-7u|RJJPrwJABr>E+HGv(i&}R@@(rqfZSo{_eOR)$TcXQQ^XPrS zC=-2l1TI7-SSU1?|Ii5Sr@Ap%vbE=<8?XhaRCoPJMSp=m$Q*k4(hx+RvG2A`o1UYv z%la48T$#iJW-#I;ic~k}H(#3X08;{!$(XC5o!*MRLmpkax$YU!C+C7?q}y!jh71jQ zUE@x)ceY{h#W3%5(T`weVd2qDD~=1w+|X%u^n>Y%!~N~qpsWrb-uHL`D}BIGAc+tn zV>h{nH*Z!!p9A0C`E%#yH-zPwmq*>L~Q1+pkKdIvi8bRd3NF_U;u#<^2(lrzfN9_ClPcce;TqUlbod?N!qZ%|M@iC z<^aCWsZ?Y^)#}1xpjnKJj2v$@vn<_LU*hfr)BxG);Mf?J_62T6swrd_n8shGr1-ij zQWH>JKfvlsKcFEmgspGwpfmB{))a^!Dv_Pn(LsVRaCMQ3fU(Qv@@3RjL@x3g&)wdb zaAdW&Mrz~6%T|tE-@4IiFw)bHO&(eTGKob}n17QvR(kgDI(V=c*Y9QYOtNErfXqq0 zF9L-rrbvESNEc#ZcOMt zOLxwF^r?U?FcTI^95t&stzKlZiHt<>sb`U|9=B~g^$qqIQnFkte&ztxMzB>P_ z{_zjO3PvqpMnje(P&y$#fi5d6Z$>xt%*s6ZmMz9FHs1}h5EO)nxun{05#A^qV5G&& z_>NFk@7=6vX66n}4*ll6JF{$39J%vzHBRXV1_kOj_Qb*=5?XnYMLOXd(AKAqcKtEz z3Vhq9D(V<+GC!!3=0_c>{wVXXmUqa#j~N-MMbqEnJ9occ>|E7H(?D9?a_N!S`yT&I zkL|~(yPD|BT~PF;b^E|OqK8Dc9#GxV+B$|t=Q6k61=6kzfJYc`F_nM$VhYusySqEB zoch`(m`gMscRVlO$Wxe4`__OS7%ke|kf5Nk+_X7wz5v#!WAUz~J{)n`JN!}f%#Ix! z2Fs=i&DmQ2H8vhqXl@( zSVAo*vf9H?1|uUBKm}p~eD93F;>W7*^!2DJAGjQArT z;<9gFYI)K+M@PH4T4Q8*I2W=k>awBxWF*ATes%9Udotz2TUW6|S$36Z!E6=E5^dswx5`VdGGy*M&QWt*OWE=DJQKr&Eah30p;K z^0#h;E96@R>=YBLK!;9BC`)Tv1-*(tU!))A9uthf6cdLh|CFiD-nVwoUI%VBH{Odl z@UK`D3*|U=pO#6r{dRptd%^(}?=u74YwSN*){IEG0*Gnyym9z)>d+nE)SI0aM==71 zYI$PcV-d)6w9f%QUD)EXJo{D7UepgPE_Q>?13C!S6oe(?A%?7WWqX3_0nvJ66BgoX zc9t8tFSr48%5!rnj>9~D@-fOmoiA?38V51|Jo$5M3@9Ae_U)IRX0A2)T@mAGYBZ5p zui_Uvbm;@0y4@$9K(+1BK??+-7p>Ds)xZviua_*wMe-lLUmTd@lJ0J8MTVoncLR*_ z8whdnP2R^TjuMCfU&v8oKF z6TRA@uOHpo3xw0N;+1$!Uer;LS>4MgBevaRJ0VmNY2bZu73oB)Im;*B4&E3wd?~}n zJz8`r^Ro43Etmv$nkPC_{E|Mz5c{P%(%f==ILRCOu%a=qE_BC)4?9oD)*f$qa-S%s zhj|CK;VMSExnlP*k+apNciMN+$KlIIK~#{jceO7!awxDQAAa@vH7S#%__@cU z_4a^Vx>p|Z#3rm<_EVU7z3QWsauF-u$xnMAnIS1U8Xdq3I2q2JJ$v2hz%ST8uxs-@ zuFDCtWm|z6TLJt7F4KPeOC)q-QXMgC-lt1iV&e(#J1!M2F?-3tlK~t%=pK6e z=Q5a;uxMi}AujG% zR>9TZL*)gn9miv$>NB%+L`B#L2<lwH(r{wd)nabYoTTD`SDGsbDuiT*gELwd`veQ89~qs@wLgl%qujO+;x7& zqWYY}*0mZn{!f^3V4mEH&dOuATD(%qiQR7#Hz2+jrk4wd7C?Oj*oN8yU<Au4Vmz#9Qif0u)d=cZ#`LmvB3MIk$F}_|4!_5M53B(J zOM1a+p{pwULcB~etafM9(( zKX~K^v0Ex=)VTrWU{|!B9*hB&aQu5**&U_vqOcHI8f!qM1YWYef!yzG(?d^uRNr$H zXHEZbrgm$`YpCxPJsdTC^TQyS;&Ju3;ON_R*&`=4Tx@In1`3W|Q702CP~Gx#w_2-( z!Dl(I)!Ws>Ob7=<5>*oWJFM2RlQgwb{WHWv@!V9aQGqjoh7d9O;E|8>f6}$ooNt@* zd1g-G-4+kN+qgIMe*WwN+evsPU*}8wJ%mA_&oege{F*QkRsTFMk7PCPEa-WA?^cIK zueyHSWZa3J@OGmpCY5<0vG$UeW8;vy+xwr1x@YQarlCTTU-h#*s;<8KKjYvC@<%Wo zpk@x;R)y^V>=$Nc9Cm;NPt4@TW1K>%MeI7Vv&w9EDSbfuv6>N#<4?Znnnl!d$a$K@ zhQ=MlF%By6Uji+vx3#apqAxo$_b{TUF z5Uy+37(X-E%g#&ZNfVK-7&8A^($L`Mx*Iep`HMN6%;y!b+6XdYEWWMo$Beg+lOc0+ z_ipt{C~C2WnUv|kyKhd}dQS`D5OC3u`cZW3aD_yd%J=I^BV#w;4oHB10}q9Y5;SU} zTmrcwMt`I^4wth)I*B5J+0SMmDuF{VSjO-;|Ca>U(%{p&z{ap|_Y&G~%S9c3NHur! zf7&LzPdMp=K@|1q`G6l4h_ejs{4|2fhXgi^X4nUbuZPY8mS*g~jQZ3Nf0GFXqYNW6 z{vSxU!Zx_#Q_oUT`hfw#Ce!()A_x{Pv_tS{-hL3v<&|m6KIwR=MORrc0%1SD5X&|D zJ8jrsV8(r3Qr|Gk&aN~uV!pPpv*ETXB6jQLe1-IO!Dk2)xrEB2RSb#nLM7|(3c7To zB$hL?dDN|4YWVo4iJ|g%7(mcQW9_p5a}Q{FtVT1iwGk~oD3wde@*q)Wey9vLzO%dA zyX&l1I1Xtiobym$AZ`qxK9Dv@TCsZ7YbA;%<@!<9=LP-R?mMY;d=+s}{2m+A-1Jc3 zp&IjPh|tB1^ZJlWW+QP>K;Vbj9>pqX>vd%9{*Sia&`_iAYT^#8kHv_J?XEck^}XUC z)l&WVBkETw3u97KQ_(hb+h6c+y3XP5(^`{!ESDbt60buye;z&?GKxFe+G4YIX-kYf z6A&^oHgXZ!FBD^<%^OBZPFA%`lOp6(vPdoV>~zk2$-WcQB)ey-vlIcPAJu5Oz4 z>uPfXZjY*r#~)q2g0``M)^4%Ky&8F|aPnO?Cmxw>mR-b=8V}*QL)Tav*;^nFcuSEHb zZ(4^pCy=BSLjCagaKjHC`HalKOxl}z=LF$FMdloSAzocT7yT7qfS$(72qXB9iqKEx3P<_4hhYfVDyP0#5@UeVE!K%+yc- zO(adfXJ%V~O?Ey@M~R%)!`XSxv10Z5;f(ZF=uxN| zn7?UBQ&klMm7^4a%BFaTFpf^Q?DyzUtnR-|Iv{aM&@nyH+l3($+YL3lJJO{0??3ao zJ!<{A@pw{Bm&|XAkFHy`Z38~HE?v6oh)puw^lP}*kJTQ%xHVpyejxqVu(f+QH{2tpr>B{bC_LMlO9AVslsX^U*0 zZEkgO<35!d#I-q09P7nyMy%%0)93kiL8s9I;`(2eSES=sFyiTyJVEAAHOQ7HPu^vg z^jjzNJZaF_c&g`?(V~0mpoYmDqLy&@tPMn5JMds&z#4ZlEv>b`b$lO5XQrIq%vPg= zQs&>~t|`g&p|SA-$QT^G;Xw>Mv+`WQX93#aqJ)-yK66ib-r%msiP***d{mbNXe*hy zxDe>K?_@>yv;L#;MJMXNuWyx-eD;d}bzZC;&&(pZV1yPgi-`_K4a-v3md;KZzGJ>) z{LQTaU%2kLi5~F&4F&vSB4{eMpqT=pfyK#&4i2J$$`C4Fg$nl1pWpmu7T!r& z+1aob1C9YW_QWhUE<39&ziWPG`luzxd*JezSFvpVe03vqJ)|viaJC~vSx0m?j-X&^ zjP+n9ub?HpFH z9YpeT7jjE{8)h1H2@~!lO#uvAcfQh%WCmyka>$3!lIlU2I7uwKAr;n!O?gK&W~Zs;Kip{d}?;% z#u5!h8GXb9zYulrf}-LziF1E~-68+S{&c(UfOZsU;Ax~@4^kq~4%jN=ZZ!PlcqR$X z2g|i=UiIzU;XQlKx2H`mw+aPaeDG3=_qCq^Nm@nw-dtB*9q-PTW^J?BGWvQAeHq44 z1p)B|3<5iJ`k<;n*{ruI@b!K3dDAGrLkIix~P26ZG0ad*8O$*=dhgV zE0;c-YY{3PWe-Fx4q+Dy!s<%A2pUqhPn%7RLR))i1Q`*wxc2+rhrpI;QshDM|(0;K4j>U>;kyEe4<>a;zHW??WqD&WtKKzAtNj#5T*uos2WgjjJ zGAE#S+>s^PMe0Dt(Wk~8$ohZBIq_^quu07W`kg?~d-rlLI za!ZPC^SHp>CHtJtO1fP%yvfPDG-@29W%DncHec`hHc`uYvxGjs&s3r*_q$HX{(}AU zU)?`0FD)#1oF{g$u=Icbj>^FIruJM`j$+l|ey(@~mEe6RWv`~3i$pKK)PJ7PA0M*>SNunT^urBekaHZo+(&S{);Bh$AtK~+%732=4_8{3-=+@p zGO~N|T$ySe*~yFqG%#alt*j>9R&fw6^{%hE!2XBk8sa11)oXj}l9Q9sL?p9_H6TVA z=M?Vy4fH4by||N5wWsTkygIEadWGi z{K?V%kzukn4nwR{NJcU{MboE082r9+q0+acw2-2NtiM~J5L zv2#~66Od4$pg`^Urn0gU8~bMG=0-K{=o;vM=VLBNh_{R;MX^r~)CA$+qWTo>t&#I1~P5PkE{$+BQAvI)JRCTv_wuhIp6i-w zYJ?BZr*Q-aysAd79{gUAmJKEujU>%`z1@wo_*!E$W<$ooy$Mj-63L?FE?G;zJf&Y-LpFPsNowWObmotMeFOXF_(k(rl8?9me}i~30lZIl`B_l|%#aZl81 zW6NXrVgfwJ8p2-VQ8LciB7Wdz?I7yqG7kDO&_Zr{ot4^9%z>=fvG3+J)49o;(QjrL zc%_;&D=8Fpb(w0PVh)BU#Yxs&N6#uJNAgy^R_0SQ$Zk$fvuNXx-?7kDhV?WVLGB7j zZbehdM{bpyTd1gMZs=1{T2?J1r0_h$39yHj;I*Q-{N+bes}mj0x6__Iv32m`egW|E z?OWyOy8R*|!K!H=I=Fb=?vAR~IaVz&01M`)VP|RgT)%Y=tK(RXi*>O!~=9cYw!OHt}oKz9@ z&ytv6NQ#1hQcHHVpk~%`t<9c61jI@=F7pJj0zUA z*xbyIXKq>ct$#Hy>{p>ZSN*iDdgli+e>;v1=(cC$YiZWouhOjCs-`Is7y4Q2&-63|kYO+lKT?l4kTetE|HcXd zypybx%R<*Ui+?A`t=+g`h%`bx)vtPs)(_aJ+$s=1Cve&JG|%^mP3-r)RZgAyiU=A+ zor;Sy2hT@FMuLVR@$UkGBT0Vnbr9bp4hq>1OGS_+_a3^o6D1L;J^b6Q)?a;>754f7 zB|5TOSENK*=M2VSb$mujS!+!BIIZle0L^p7Oy&*v$=KE$i{bDmXgDfnsWGkA&lS#IouPwI-*}}*-(RWt2zS8&jPPW=U%#$ot|CGS1o#P%U*4KCWal{7PC1ItwbEWd+-@OX|cns2cg&Zv%M&bRJW3nygQxqO!8j7 zMDmo6k?Kc@!)_;hQ)~9+f6??E&{+5H|F^9OWh9Zk3mF;7C?h+g5JHHO zkUc^n(U6s7g^-;QAuCBLvrtq>Qua!=|LcCf|DW@m^L$Q?N@^-lhqY^Fh|`846iV+1)Az;*8bX{A;sR~YjWoYJen2s;$-ak`me7Sv zXpeF1ijm15*&citGnG71Q$w)%!PUEuG2tigdr>fxpUHd`QtQFn;$$N*2*mtUQ$HKv ze;yfPc6R78a5{bAmw*rwGSrVhj3l!)Q=+}9x2|{}AOyz#;zyEQQBlHfhWo5)9zpT( z_WF#~Wo3J4h7U_@F&F!rQd3itQ&HWHj0B)f6c!sAmIgm@0>>yQScTMb$)r16x2mdS zeJqVo`s%6Re#Ydgll}3~CMkW3fW53t)zZ?1BS)qKI9wO!+&M$te|K?vf0;l!#PGcD zt;m~V`UT{jX`}>_h{-=$d#frFDk_VASG5+QTCk zliNNc8f&DTFV`p!j~!gpQMYdWzU0h2$}Q8|Pt-@fdp7}&yV*A*W1T>F-DE7;V8k{+|!1j6oOf!Ksgnm9VvxE)b9G_MofFi z3Eny_*#E>x9d@hd>Zh5s@W(fJ2ojQ#=ZB=pQ48-xMjpQZK-bNz(!ZiU=f%^^`;QZD z4wO4o7HCe&N=pMU4I90VkRF^MJ(7}5;r!0&_0IyB7Fyr;&ra;eDHH(J_E6Az`_Paz zxRU<<)n?DHSFgFKDs})0M_eQ%=jorCgIl_~Bm@L5An6IcLuPF34w{1u>9Zfb;Og2LbPF#O+CAicjtmFMxlylB-SItF9}-a? za&HH1>#J8S66eXgoaT-&2hJSczyFr+*PHHHUpoING_4%J|DkoyH4~nqe?CgaC zB3G{tJdx)$Iq4g?_wKSpm79;rS45@rb8|QSc(Wvf^O6cRm9*M^ zi0m(g#EY9yk0DJ7N}sJe2H)*2NV*y8Ip(fU$`Xh>qg3y0X*~rKl$VF64O8^SubeJx zx=Z0$k#czE*EJ6Z3cbVKbRviHGxLkfbx@rK2a~V;d0t$6Pb2G3+xua;F{#BpbC1qv zUi^F4UeZsZ^O8sc);PctdLr8h9k>9}U&4-$Fc?i|Dk>7XbVn?+ht!||4|r9L^_6G} zPo3(@88#cZ$o0}RZsb|Z|60HY|yC!*~-P+H_jm+U;8+v;B zdo8DWDh+3;zfu=Yjq9Qf23R_4L0reIhEJZ$%6unyah0T z%6W0yQTTk=3X30z0z#X7tyg8_h}M{UbZE7<5+8H%J?!p^ANu<0N<4M2spB(oYiOmmR2jh7W6Dx?@YuD;v)~KvphO7@bd&HkkJDUwz^M;^0Bn$%MLc@R7!UDJg z;Bti4$*Udz2Dv1R9t&}dwk?tm4>QKcXB5!vVdNV9wtno_p^!s^+lJM~hT(stY(G+- zRbP#JwdbhIt_O@!bN8iN)XI||4Pq3{7Li27Xl>Ozb80xD&T2a|iV(9>a6K)fFn%>P z0(Rqys;X_a%FkrnF(=_>2*S``SNPIeqf~7ETW5!J&M7Rm+j?I2T0-tB%w z?b4g1)x>yIwY*#}RG^r2zo)C0m~ssQKX_6p=n&kAo}l@^ezjbJJs}pI4tRlJCvFR# z7~l0}7j!USY}FD>)e}rf2y_wUAkr{Hwq~1pj>L2f-w(fjL%@l)QVF$`xM|$jSn3%) zmiGPk|5;(tyIMFqL27fz?tq{m8Nnb*J4mL>PI*qtb_apzU%&PdR}IF#nG0hVWR4VZ zNNF1wy$sb3FhAoyZFOevT_TuZP{jJ~!v`k5-SQXi&@e>CB{1Ef(E(f{?@hV&Af~*s za>f<+UrSGqLsaxy^QFE_>40WI@i37jm?tLI+tJ1XqQUr-sfMQg{f`N zURqKKi6GK?RYEijc5lMf1!-yTaMFMlLrD6yjV-)NI7dMY?x3cAOr<#Gnivz42XXxF z-JgeF>X)fN)=C+g1pn} z>N^_s#NUa$l)4%gPBcPx&a)g)4nja`N^EQyw_;gCsv1vqO%3$1ceS&j7Eub`!ikfD zgfl0n&jSNU#JW3Lwkxg;|3&=B=;*^(LNLvf;8+HQv*%`{&!!)v!NgW{pr~QGWfExv z?ouJ0ZEb$o77E5HRiE?l`PTp)kxjwM!BHnFB6fcDxY8@CtR;Q6T?EpF+wtdG4oIhW zZTlFhTrrOoV%Scc1no*$wn z{S>+R#TAP^V!go#F|$hpY1yz&)aNJtzIBe#P@si zj0?(e%r7{WT3cFBSmqWNp@CNr6@A~{zKgb%lwNReio1piUwzT&MBP4n`37;mzI}NXeD@fGI8Zj@=g5yeGslUbQI8Cfph8< zH+Y4X?$NvVq$=n2tPFKTZ*T#gAA6K}fbu9T5OBMJ|3c0o_-b5xq74>i@!NmrGc9xDoK$R2Kfc&PTNo4OSxlQF%*>-oFjpYYo|D&B<-cfYz~_Mq^V3(P z`mBF+zj%F4;ac8vnSerzQ>UIGkx+@WsndJrvP{C(^0heKy~lRm4PIY{as@I=d--?& z+s~#XzbbV)A#pPJ@mP=;e^H{oCc|stB|o)`Emx1ZP;fWhoSA9d;3>Jot`LW?i|(c^ zE4U>%8ZSIjP1ofBEV_-PFr{`P!svMi-qeJk^;~+fuL! zsP&S!NkR16wvM`nYfgU|)zw=NlN;Fmjg$}q11HEikT}*?uH&DwXAGP%r@r>|wc54@z z^#{w|gtsmNi4_$i3~Cm~FNZySTm91~zFsL=-s5J$1F8fjo?WpkQI#I?9BY?B#$#&M z(a~AAfLjeSv!{lW8t4g$DO|7j}m*8V6ICE|C zo*Nlus5_Qbvfo%4JiD)LlQDFDH+94Z!TSOJ#SQOaDt&}q=QfUri6ACjK_492guM0i z!*(}Q*vBuHaS~rL921<8I#W5_l}Y!Q$Hyo_-duTCDyx+^bOBcghh^&KQ}~zwBXXoE zxZ5nz5%vTIDlH!RTk@?Mj2%LM%>o;6_9LaZ-2d;T)~;J28?PwNPF%fl!@6J7P|UG;;u4PgqwQM&(Z42dd(hld9^XiTI8Cw%kCNrZA+y>PQd@9W8& zeMsP@-IHu5T8+!6+tXcR;by{`h6C_+&P*y{(J3%as%naNP4p&6$$oD=OOI;lq+VZFcM41wICvc8!D~x-t{v-1){v_@I*tQUWuRu)`1>?V+uK-5GqJDm%>}~n zqt#n!X#gHSfgi((d#B?HZYvRPgO{%$sI_s$_XS5IW?*8#cWP?NU4HXMQeRZM&e3Zm zF3ntY9!xtcs^IM%qmgB+sa;PVBd5Rwi!NS{E?$|wB1E~Q+Mz<4>it)4-3(>=Pr2pQ z)nCqsO|aOVAiJo=7CK*3wziV|1P)@zOwbDfEkRT|wtJ&aff(jIHr2GU8*5#Pi;84S z&#y{Q0g66cpx$5<#{CCkbx3L`P^W^Sd z9jWL>zrhg|&XjPIX;W$l3lh9=x+9+tXBuuFjy2>6V%Z<&t)oY4J3XiszzX5!mr(jA z9=fp9c}J|bx2&7h_RrpjE6dYBno?6z%f$6MAz?uO+g33FblR?E@pJ*;;? z4hO|L$xrOxuE~5<=SCo8isrBVTOB~fRnydrj*X?YzA)(8bK#)i;(MWQEz!rVK_m`T z{J{!SASG`*I>1{&#SM9^S)IR(i0y9-B)FF7ZM(aJO0E6#Sf!s^_3 zo@?l%X2a4EvTx(g-)E@6gu*0-Pa2(=SXEL&uX?|@SghEJgKTu1V0(3;XjL0uIVkow zh3xwBbGTwMPsy!k(TU@Pq`o1+N-LSP0XC7Ual{SoDI5@Wp`Q6F3-o$sW)IQtEt-XHx2L>vrz(|7Tn!6H&tyK3q$|MI zza#e6@|pr8(@zE!qsfU``(KsI%lhd`n`off`?(wLBQ(Lp(im^dr%&uv4xPF}-MVa$T$CGhhM~Z9bbfg=aHG=T&=xF4)Ck-q0oG zmXYzI{<|e^`*>#^6bk6cCKGOd(fR0nw6>zy_x7#Zjt&mH5_2DcNkr@|IfcCIMJr(I3DLOn6?g8{`6O?IXMnC>cP$uipjQLKFpdcki2e+Nl9Z z7@5G$%~=HnG^+Q5+kdk#FgS6?$!iahsOT8b6Q0%f{cRTORho*2{@2H+^Ng`=^mh_a zDiwUn*2w$CmJ4yq0W1&8%=|IZ91%*RgE?qz{bkVR8tfxM`SFk7r@^dxOi}T|S9-b2 zO12r5IjrLJ)w$!gS4OTs^wqXbUoW<@7WU_)PSp?oH{G! z7S68>ZEwNWw0nCdb?f=jQD*xwa&opB;T^rt>pZcPif9PFIVQr&%4!)}KoW>tb^@N? zlw$!8lKWf0phnIwEDY58h$m>b@ZMtOboKEu#ik@(U0_|2*!=*n`NnyADaX*8luJu0 z0~WV#lSp578(ddK%{9(Zbrc-^6bZlqSq{!-H4)7tNL0EAL4UF_!x@t!dg@xS=_O@# zyfvBmiTbh<5-N^IBEoTiAkXj5L_WJr`kkNlul@YW%gSy}US<6np`E>lIxO90W`Amh zcK(Rv&xOk3>u+qFoZKWL;c`?USUykr!daK~cE?WN!4F5~7pft>l%Sl07y?#}-&0fU z!u+MDEQ~cmN-~o7+&6ugl5!akHJn@M)kbPqXxOKRJ_GA~T3C2TNVuxoK9%Z8&z~5F z1UYtnYwK-M&pA8Q0)cQp%3YP}e!h=tEMT@U-}%>YYI=HlY6`6-GQgjj01=vix=?BG z@@2FOfKGrXd|}A2dwDcoU-cEF=U@0;xs@TWvGuEE^yxYQuyt%uWat<0zLL)^LpEvF`$XrP& zDGLh=c>UuO67Y#Uz2raLDIFdkhbr>ey<{=#dd09~GI;LyQt#RY73f&g=so?U-*RUS zQW6t8?BAU3d!gPUz|T)K%``QQY;CSrXm0CGr60E#rs^HKSUJlt%d2W4RrD0ekTu2b4at@wG!Vs8ju5V zD_&Uyr{EJ%-vH$Rq@)<{+O34%S$6yxmIy7Sq@Y96@l@{O&XP)6xBEfT2-ajw2q{L$ zSP&JGFi&c3^@)#vlJ-Q++tAR?E-Rjisx8tWyQIVwCPZBGqpkZn9xB!kb$;-A+1>Oo zjB$rBM4l*eYn2T-2oMgxd9y~nMgP?OxS!ix0*b83r+T~PAs-Pd6rm^a|GRLcBgnPR z(;sLbmSr-y9zSp(4croV-^KSw0kWZtvK%xU9ve%*0*$X<3%P~i5&HoS77Y8?_%-7Fw^35gQ= zT_5cxsy+Mn2LQYxI;qg>*-KWV8bbwuZU_2;6P!r+Z%t4-Z(|cENlR&mwntZB@w=i< zLBn6KdFu1Ug>!+cukw60k+MWQkfCS+Nf;Hib5G`W3DM-YSOsWT&iMx)X{O5WSeJ|RD z(I3c>p8%JHVY2*uOBB%$?EV%~JS$5}#K#>_HBk?{NUH%cD1I-Xy~)whW{&ovk|sEg zi3Ha2pW74nI)>id*~7P+gsUWJYFT8)!8{uqJRtLXbro114f=luCe{x;$5l^C)aF&^ z*U+7y2}H|@e@79=(2UPd3_e~;5oK#6dg>t_bodx8>UI12yazC8 z2z?KmYc)l+b7+Q~{3_e#>2Bi$xjuS2B3vVzkeEnDI7QX}XkwZo1m6amUmgpb#`_3c zJgi#QpKSq_0df~tY-7NRo8zjg+|@Y(wRz^&v0jg-lH<_*RjuqK8$(wD-;Oxmy1F{6 zvu6jsehox*Za!Tn&x$|6nA)l>5DYOLp4=KLR?e4bjiKzo0ifi+B-Gmb47rBjDFssN zeKOnLxZAz<^&>ofn%#SBP0vPkzRcl0jzw|`ifSMF=2j^1F&B^h`c)|^awS;zM%1lS zf(d5^zfcgB!}zpQ%pGWqfOP-aHCj_s02x7aNq%ol340!+l_B%qz5iUu zdWd_e_ z@KXPn`d}V5>*#>5i57l;n)*%gat<@v9^DK%#T||kNoXRQ9UP*rU!PxW>LmnW6u`<3 z%s`;q;l=0m`K||^2pkpkMgS2{L}&utz&0vqw;@~LWk;eR@Vddla*yB4GoD+ zCJ&W!_LjG2`t`SMZ98`E^k(^a!hrtSc5!ktTxCddQ}6wQmKT!rY9P5)dKw&Tp6oAi zY>%lLaEh1hlM}lmQZle_-|gm+n?8?qvyEVZnVOy5t|ub^#XvF*k|5-~r_kIsOPr(O zS61G}5euI(hUnuAm>^og7lW4NG?&=ewS$Jn!`;1!1o6`A?}e-!Y%?CDeecTbX?Bmq zZ&6|Ri5&-1npo}`rjfNN<2-S;IoQ5a=C#_a_`}*C^B=8sbCJaS@}@(4aZ;utNyN({mlsJf z-#+UdMva8KzG{XSP}@HmQ!-x&bz!l>D_is81$Ie#z`TIh=tN=qWh@eVFOIAWasoPZ z2lKBIQc@sgk5|!ACTR1=wc)KZTe^3Bp5u775lM*jNoG!Z5_-Z^u|B*PD9D|i^jFS> z?h@m?Y$rL=<7XoQH-IX#c@ABVp$y?VWkS70kNF;T8YdMBM*X`@S}I%{%Ko}_4NwrA@+5KFPnQKYNEgIxHR% zu3M3j=owsD#m51-wsHoa$}{1i%M}pm`PiSBoE(4^c$}P^(5{u#{o3Y-eibmI6e}&~ zVD+NRP|2;G43U_u+V^Eh{^VM9o-00WBfymEeX$6lVB)a`BHt1Kd| zG|VX*LPfyxBDj-C3E{`VWo(~~uf@v=1V~Q#HZ6qYzAB#De3Z&MAFaRr)gfjRembQ8taX$#t;K z^%Wqtg!p*4Lp=}6Y@WZQFmS7N6m6Ct9RHZwD8MhShZO)@hb&;1)kl5c8ag`r{l{&0 z5Mcu%;Gz@RC331lB1i)MMx<%FwU6QWrWJo0WgjHvh@3eANrKx!UMxb`MNjW~?Kjo* z^t1EKLc(MOCgxW~$qf6+2#KWV&|J1Ese=en1<>w^ zgVuOB**K(9R7hwFqEfsn&AvrYBd)Ekw$bc-Q)qcmbR~$y((t6)tQsS){6&jxL6l$! zAy|uJe#0?Xb@|J2wfnrrANuy)JANo{A=$!Pz2zqJUM!K2R!~^0O}%_=@s}Rp5+Yps zyN>hp?h)u1EUjDC*4Jn87Y^t2^cYcw+VTj7jxxnux)rHCH;?nvV`*q@WO%=2d;k|t zJ46BKYimo+FMPRtS?-)FU4)9Z%ciyPEdDk?Xei`cn+G|m@+Gz_UVJ)p`vl9{#~+e_ zj~2c}(*eIm%jWHU@4qcWf(+C!y)X*su5@bTtMRzwIfmdC!QA3PLXSWGfg%Lq!-j_Y z)Y5T_$VOdXur&tw`MK-v-pEImd7@najP?XttNk}SWN`FTl9Q`pZp2nTZEcXOIOTSY z3|ivJ?eA~xImr_vcXlA?n1s6_lcy!l#b;DEdX&3bN9WNB4SN5gNdi%W=iJR!%g>@V zih5GvbNDA8f&^A&E(iOc#tuK_24tpXD{TmeDN_Um*+^bLPSiN7WgFS9>)V)L5R9=I z7c;B;4Ouak#NvH}9QSE%4Cpi*tqq)pw2B8BocoK1g175}9~b84I$plK5i{VmX>Le; zgLbn3t77dXlddq{Sa?I^jIVQR*^{OlW&Crj6c3uDQC{?F1?})o6 z+pdV_Yodn;GYzKmzAw|$hamI>C|34YeW_6oCXWULmq-Y_g$fq#s-eyf>->WD1=qdi z=lsNv8lVXs7+{NTwc})Cph)GL3z`#Ht09rpWb(3-q@?!juld77T zy}(d}u?Xi99=Fi!o5Te@SWA}O-O@7V93M(YNf&{vu+f3B`e1C|s^8vYF1C1V{CY4S zx(ECf%2*B&xGktebI^*Nc^n`ZYA?FD zFtf0vN!Y%s@tVAOMqt|%`t+?{2RkQUKfj1`T^c&~KgjR%CuwAIXh9!|pXLQI z_XHZrM(h6G>2ykcx`p{u1b*( z3ZNrE{y{V&1PVzVhp`Ts+gpPSk)uuULL!7^H+2{W7QCuNSNKamW5ivaI1y^W0ZsOB zBS-?wG{;YzX#Mt5+0gKuj%8u!rOijG|EC4ezI(9b@xIV)^%U3nQ9Wl}Lo}Ske_iF286ysL+A+ZoU5T(BFASS@n)~+{Jq{3hZx*HS~ z;O&h7{ks~+R4FLUKWh%w*4FN4V*_}JX&dA6n`q8$zit5$4JV$?>ca!}Bg4wNt+=e7g|kdV^X$I@=3 z3PCU;e-wRYENyJK6z+!4mtYMNB!g(*uovPn{!uY^4uD_6#3UYl9t!5O@GG=;(kW*s z<{((b&`YpEDC+auWem7r7=y8C6w4^EwfOpbC-x-NQ)nLm4Dj<06@1%CO)V!ckC=ZV z+ytQ-yJ-Lrwyx3v6?|1E>49foTe=>bmqAN??C!QQ(Zcod&+nbAln0_UrL19K2*NT> zrr_Xh$m7!GJp^*f6mrVgw!9z7x&H`z^k|@UV+JWUac%goX0yg1JeLSLru;OEqpX}j zU=a_uP9<2eotGTZJD9Rjo?GXF?bc2%9jzW9J;9&@g{{rewahHY<2z{5|K;z~$G=bM z{y=|*==6V+)4R!4%fNY;o^=JPr*M`(H%PB(R4F>5a~F;(B6=eY*z zESJ$3>HE++yi1LKud-##$woy*MMHBNij{VJjvAMLwh8wDo@Cd9I3M zBI$$tl&#Co@E)YuzD|R^{!67x za?yjDPSQS8e+zM}k#YGHZ*evxxVR>tkJI{^sgIMpa8gg!)2FO^_T0F{P)1A%h4G>M zg$FnFD|^XyrZL{>>Cg1SuFn0-FM%r|m-w&nKLsY_|3bkB*q@xeEj1CYDibeH zQY>%X@?ZGbW}tV^&C`>1$Bv|e;t3b2Tx+G&Ci^tqlkyC4SRf83SLb1pb)-k3SEL{u zaxXnBX%t^yA2u=Y=E_bGHSmpM9Zkc^Ipuvbflg?qAjDukaNyjTGdEjDah!~|Cp$uZ zz4=@3F)_^%1r@J8IxTSAFnA2Pj2!M?S)p$SxB_+h3pLZp2kq<+tUtNhiqF`OA zHEeIyW@YiV?p62o_0`mj$hxki17z7h_Q&L|UHbev*n9F*bVs@SldP;!+&$bJNCwf3 zKn`i&IXUBq-UMq}=ne{f|EB;^x$&zZd78W5;+dNax_^JOjsO0|+<0Z{o}`-nuZ*Pm**?^}b{0;{q^X#<^0#EB5_nP*l=wr(L zsO3sx(^qKTyud&R(at7{Cd(KeV=Y<*<8Z$SuA;R3^ilVJS4 zvg6Pp%*SY>pxPxghPFyQ{P1~g4Rrzq<~uA~M@wmuu(Z1QZzwJKP0JI|30&0(zo`whkX=x@xCENZowONA1aTEJqp>LwD zQbR8QXPPv`_~>l##T zC6l1>Ifh^hK=Z-8*Hz3gYyzy6)7HZ9`t60g9i9k5Mx(&13x_nG-kO^;sqlf$+`HR} zV8bIG@a6oP3XSyt)Mom|9+sAtcn%<8dM}T&2*2@Ij)C`K|B?|wacVq*l>=-7-<788 zKJlyCe(x*KEqEzTJ^Lps{gy%fqX!6YsDa<6qV)N5>`Z$s@O=~29Ho3OGc&RnkOPkV zfm^q4(=SV7X);oE9iNJPt4xx5w(X(ulwACgqxaw}VmNS{)1PSa<9;Gq5%2rz`g)f9 zq7b5Z!M6c_I>GLYv++OOriGy)j9wMbpPSFj2xdXpLfn%&JuSNA1&u^+V!Vw+l9r5D z#Cb;E#RZl_XR*uXZ+p6mt!>8h=Q2sU2f&P&mm^l2exVUV( z1gvbLZA7x{Q6ufl;Cq+3e!P4MA6DD*zi3RS5+Yg2gby8}2ywrf;=JpQgF`U^Et^M` zF<@g&UH z^`k%pt|x;`pg>A#;rX}teaZ?%S?AJkLstX(jH<+CIfesNnbUIQwgiI5eNs~q<~@uA z!sTDhYj?XiXINMoGd9uTLydFQ!%pud>h;Yg^B3%OC z_t3$E1mf0ZMDd7-l#XtdU`=~n6B8yK zLOR}4SgSQOCVY6$Py}Iu=lyjdIZac>)2s6@O-}*?r!ar_xBk7>RysLL(CqsL1XNX2 zu*QMJ$JiuBRZNGpbk}%nxhxM59n2ZeBh>IBLNCK z76!ELFa0;9a<)g?YTDh_9f1Nw&gu_0>;Oa^5x7^>dK@tjyiUm#f71>LE@hfb~+xKgwq4QaXTfBky)}v?R zxaTrbQv1)xQj=}5y!y#YTT{lP;$ITpR;RT2@MTpB`%m$G5hsRS#nHpwNhjcuBXi8c z9Jqb@urIg*NR^2p?@czY#X+LA2tlL_%_u0iJ#mtk`n-dK7rrZ=%PpuVF(pxk0No4; zfzpZ^wF>^rV84IRP1TD+KL)0Yn0b%sjfBTAHkw}baZ=Am)ofD2sXS~T=KW+&f7_Sx z+c%pdJ1VzG$RbWXI_tJTD_8sykS=8NkO9HCiZRZRp$S#mQfvSsh0(<~sOt7xV>6ZH$o-U@n z6d`DeA1p)IKzLY`^U9s!QbzI4PE{)_h%N!t|2h&!5^|ES1GNR*4MJLAT+Tfur0BbG zQh0|#sQD@DU?iS|gb?tH(b3Eg&%a-)5DuS zRO==`Om?wDJ0Q#9$$`BrNN)^YBHJQ>Pv`V>Sq2h_lY;Z9;2tI>Pc&3OUUyBmt!W{$ z1*JS{VeW+iduQ2>2rj`~y`91biR!*w$~n|E;OCe&IiG5Jz^n)@4nAr@LHXv>2n3gi z9zJYnZFJsd>0X(o_0h`wUmE^>xY?-Ni(0vxC7@bW`S$1x4zMC?(taP~E7X1aZoB}OMdg#&DYuCi1gptP|yO5jlG1l^&PIB3U&GUI|-Ch#dw7mmzLDF2j9QH zt(~nPC%3d;_0YMO507V%Q&QH{*3#_Qf%=0TBv>_u#|qM-$SArQnQ;8FX2|YW5!-~8 z1uR;8RTUZGKyvn^U=7sXh!xbBJ-ba$mqN<8iVHPRK+IS_+p3+3^JVZR(0)NY5;6)a0_4Inp3f0>=iSk-USa#0i3D1foO7iF*C? z#P?2)3jp*#A_R#ldgzj5-x(dNsPvMY^7n#*+ZX(o@%Kln{cvm7MgY&hr6t`n#=rR8 zo|YRkG8;0nh#<43+bR7C$2E+RxN5L!Ot!LpFgne`xBIrmKyj{Pot{p4X{jHy_JZNE zI4ud!PMXGzJ{p%iR(adk@!-x0j9w5z1 z$J=2I8>HnzZA;BrMNXEdM?Ad(9DfeauekE~M&jX+8NNi=PNPgl|Y}`CA#rStbG&{)391T}$ zxO_Nk$f__=e`uMf=Kj|k1oF@3|M~-PMHZzsbxq=pYk#U|e=ITUFq2=nHX?mMH78%F z{#jMBpE4KLQ=;EQ0bN^JzZ)Gr&db2!dS#q7uk2?4&N8)`;c&imQc zh|!1OZ>BUen7?&1pDzC-N3L!-mavHj>hFUB_MbY2-}~qYP6IbUpQ?D(foC?(`*b~F zk-*i~`L7|#7!sQNqYC1kN#CIZd3uX_Ti&JTRfwQmhz^SsJMGfi=(DMu3yVkTYn>`8 zYBaq6c@caq`UwOg35v{!U|zJV2tY$>TadQuC?0L>h{BS)wOqKLFS5_h<>Pil+2qu0X)oDJG_FZXBI1}kDeM!j*cdV$bEn5*KQcYu(^{dAtZt- zUnC=UeDlDyln<8&2e`y2=X~XS$p!Sca8J z_zFvtNeLb@pNL@hqnd!Ho@-7Q^R|9(rXSb?X>WVs0dfjT%Ai1kLGDN7Va11$Yo&y# z=Xv@oRZ|cOWj-n97@T2bR^31AI6iyp4l<>0<>!mkiq|}U{uj!qk|z#@?0H$d^QubQ zgEcn?ld5qDf#p7^2@r)OqM72?UXH`EC-@1JS7k-%37AdZ_I&T{W#-^G)Y3Vw*cal! z7}|FE_r!zY(PXLP;hTNu;?EF52FXXQcPUATcO}MumTz*}o?L+mEk{W78J{L!f1mOp zu@ek@f`XblmLJZZ&`XJ~%FkLf3^D{3isKLKV`NKAIK2TYe#O2_+o|8+UeZ?|O<*L5 zUbv0eUipK!=YD?_<*T~-6~tG;!bksy9VRu=3e+B2kDU#%ufC%M6e=ep|o^J_8bC8K`tPidUhzu!<;bgVoh za)$(>Pw;SGnrp^tX_!^!a#gFZ_f< zSunv&N)#NsuU{e;Zj=#J=t||3CImFLTLAd-}=Cdr0 zlrwCX0K#Dt<*0$&g%8kh8=1XwcRu+)AJuZ4!fJX;Z|`3m6QH6Y65mQ(TGs6u{QQnh zogYoU80Z|2azPW3zwkvMLjt$UqQWT#l6GXBLlf~RIl21F-Odx*+9EtW=b=)_{PCmh zxmQy-|Gr&%BKi4fl>W`nk32n2R5^q$#FaFH{sjMtz7e)q{Gfi~P=geY`UYG%b%CpU zu6!eBWE@^~s}?vyNoWid8UMKN&dntsd&zzF_O|a#?%xmVW4RSMlb=r-a$#r0-?}s< z1;X5J`9Zfm?sAHBvZpRg*|EZzhM8Jd{du+ICd;W#&HrcGBm;#HEW- zk(Gaq(jUaXIny`n_5$sESwP}rFC+F7o~nCxlQ`|OPpZf%Hx7%?j7mSzCrc8MoGbrI zP*Az>#gw^L^@A4=g~SaEb)1d=1#Fi4J>vZ;m_(MIg^_q9Pa1N!9G8FuytkMf0OsX^MbT6Witt%m#>un z(2}gG7nhu$^WDfDc!y@3KxhqiCa4_yaBA^JH0UKYxa^9b%`FVlKKK;<`cchd-YoDg z7@Cw|xpaWiOak3RVPWB`SNH&V85tm4gv0_Ge7wDZ^2uS#-vqN9gfUO%XUdbh2~6 z;R5>^%jNu+Wf=t)_tn@1MMK3x$7QSBrcUF!a)!?5J->&B9Qo6O3p^9yb*U>Mfrbik zNk$gYggfgJ^Dj8<?D(PO`|{os-z$vrwRuU|lm}pw zSP^5pyD7xvsbK@g0S&odLIOdBZx8T@-Q-xqwbiSVzR9rXfNM#KhtLsj?7uon5(p9t z@feUANS`vdf{C}K1(oY*V1K4zR_3#tl7|u&Ibxp z-vFX8I5?P_lXGM7ECVxzQjpUXQW0iV1_3OpW&bcPDfPH!XGMkWB&USL=KhFKulUz3ys@fY8OKu^~ll<4|(+0Oka$N zQtQdQ8+HF}8oy4<=fs#&TDu_!`@~~|X2EuyX>Wy&XR61TFlFoAULN$W{g*bxS#`1c z+ZRJ$bUpw;L@l5F^Pwr9*fHw z$3{&}jTnEqgIWm(w(IxXdiJ6NHMZUXoq2OHbk)@A^5h)_A;-%x&6fW@gj zXvuJ{q(HMM;?JIJ}r=}rkVsoOgYR>h`0+0*Umq`?x} zWQN5ym6hJ=>J)k(!Ir%r!?Z^xv9h^(E-E8`X9(Wm;H89Zj#4v42qUp;i{eN?S$JO2 zy(4#NRr9Px-1 z#Zev*pSC5$t8=vrE`;~*C)115sk%@S;5*bQi5(w2t0QxIr#eX+KC0KBo-t0Vay%aB zM8T0SkD0b5TJ!fGNQ{dcgkTzJ!`lW&wWKppt>6iE5}qrr-DO-dL zwIM?6?p0fDfpIF@8y~tp4N!I)D4Uox*6*j>byHt7etur4;>X(Qu8NaE@Hs1i_>Onb~)FvCnw!oq3;Bl5OJ~KH5 z%}<^_o%(9EyE~R+r_OBn_XASL^XGbI-ahnc;m37~FZ_(OAs9X4`Ir+A)(+Eeji9+-PWM$jH|YS)UtNBL7(QVeg=id$rqNV1AoxfA^z%)vWG+tQrdG%x2v8 zwJ=Tbiju*BLz#aPxTAT794*YuJf}shDmRtckogQM0CV%DOCq67SSWD9C8j_45+c6= zr47yrW!9$ME7JeA&X!@Os4T>n6|d~n^*A`jzpK0ZFt)P;@PNVqL>W3#JQ}FlvHm+> z@BIn3^L(jqW_9GHthyhxQI8z#c0Y)Lsr661HH}_7$PWQ)b7=Ux0WwCZ@wiZ=>FE?*ijh+|TJ^Zh z2+EMw)^Xe(C^_R4vQZqz#>OzOS1Jrlve)ACI7qRU(CqxDaqQ^A!F!^DK3EC~jFKp} zc}}uH3rjyK%^R=w@JM^a=B>}ngb9oI{fr~){JrnKY{PM=I>e&n19*IEl z*jZUuO0WEbsT*!Z3^NuSF8y@V79w*!Iy#$sV$TJ=HATcRxcBj|%iH2e2eiZc%`V_^ zqq>dId?bo}d2-{GU8%U8hjPW`FAIQ|(c}eROHfv!WD~ajs!|SP15wt%@wH@LVRV@Hu8s+;jYdV@(NWy^x|Uc6HOy}{Nl@2qjB0Yv%v z`$zQ`sg`{I{fCd7IHGslWG7#A*HOuB#n zl(KOZUOTXius&oPtPWxm7`TQ>WmBkeJM;n$6{J|Eg}NN!MW! zf&eTqSg%_IR1cso*n_&i0?dSn=6#a(Zy^>&lnnq(PDS5gh+EhGI+vPVc5;$KycMq0 z7=jN|fvcMlk4*=4loFYf4Xju1O%D=YpMoSD#RMxQ1xz=wgv3u=bwJJHdB>Ba$H zp_oO>1YUx3Of05HwJ^Xt+S*bNFm2gN4RcwM)=fYZK3>4 zT2%(MI9?J(RfgBOV-pkaE|kN@pR)JrUTz=fXrz>l|NP0t!`^XsCx_1Y^_gQ_9`qjc znkjN;jGybAW?m~YJa_L?JWgbQ3oEOuUcb9W?7Y%iN6~Fw!V|8U-kN&XpyFa)_Ny_0 zKp|fX6IdZ<8`80Bx3#7}*xu362Pz7@!GLt5GJ%Irv=)O!!jDld)WDDhqN_&E#Cx|JNL()Wi=%-b&QRF zq?qnK`1CwBDONA@d704#VP%NLcyo*p-2e*~d@BA3^o6Y&Wf8A7+qn4UO5|85K_moR z9f<)-=|ivr1L(nvR+P7{E`6+R=FYP_|BUU&a%taxAnn>1GTKc(f0ygOODH&wZfR;N z5U3C||L6~~ln^HzlHpsvf3KJLElbNzKH0HL|0Gt+CnyV1E^q832npM69hdu?_vLtc zdNf%;`Hj%}^%(&FUD2#3FdAVeoelnK)Qg>Zoun#;xU;<&!IMYsoq}*XxZ*)_%Xth~ zIz}Hi^$>l*urnOC@Y}OWmVy6lr`-YUq!(F4?ApIfU$gECtZ^h-^e2f>VIe7(x$gXK zLna4jXT)(fihGv8tunAOfA`FDhM|{A*VcqYMA8g=j}??>o76|sk1;EabQ5mw+K=SI z-H6vsP8P-1W1Q%4#IW~q;tCXT2MjASmQmH`-7ktXs((=wnXbxsMGutySFi+47n-jf zTw5;4BjdbE-;k!_CG*DMRfR@%Vaxd!)tS#2j{bYqldsCCXV)tkc5L8jra{5$A7Y zo#Ttp6d->YY)f+c)+8h0{;@go`JwxSEh%o2Kx6LC(?<2L$MX$}E~Xa{T{Rcl+436F z0eJH5QEia9x4koOUs`|3u>7tweV?mM-;C{HoNNAvoe)KBRlHnGI_=S#7H{LGft>WphtN(#H)#~aE*w99!& z^ucaR7!zR9g)0-)EE)k&_>e@YqxiYUFIc^;_D_ZVs|`sM@A|l-(pyrjb>zp_KX40G z^t7yK&9Tyq2X%d7YS6kDxFp2S?|?QX_&cZ8a}UDtjAv-6?Sgv6S#G!daFat-)%zAh ziCTcbT8@~FUm6P*>U0+OTHcjBeX%Ow<=>c^`=uJ^7}50jGk7`r9<8*|TNU0h8V)CR z4i30uu-hbJW25lKaSDEZIIYo;!^whj;`TU0wabS$qC$46Fl+j8|E2SSjJ#f0eO#s5 z)Pzvrn5j#vjCx1A0zFX-aCGWwX?5REEO+iTvNjj4eZnVj1R#Am9@*f1&9Vj8iBcLD zD(WBUJW_M%JCd%uirAl5d$(8N@Pd|1XJib#o`9c%SLR4=PD{8kczPP1z47dn;!X;^ zW*6EE?J@2HTXl0sqs6@EzEfYRbD5L>@=$s2msu!J;2#Gx2|V?LVLW(eykY-sM08#r zlr4yixsd7C(MSdZ$o$e$AaWT$xkH;rMnxA`bCDmf3pd%__>zl{+d4aCrKF5Uul{h< zHMtUhD`;#}69)yJjdR%Vk#3#qM<>1AftL}XjJ^l^W`gS8Jo2(PVj|Q(_J1Gu*2N$V zk`Dku&o2vSsb~~kkm{X4jrBqJZN2py`zs19ey0r ze{l5sSXl#KiEC}hVs3|$7X1}5Y5`ge;i!qOjArEfE1%#vdSgXHF=)e#Z_h5-OZ{^G zR1)96z7|wekd|H&CtE3L1N!asXv@KE2u+?dy~x$ilN=l(;hA-Yop^A^r>DP#9pk%I zvVX_$4s^1@Ao);HP!(W3M0z=BJ_~bm`J+d%GfQUCoc4TrQ>N{n+6o^bam{hD`$@LN zRsp9#<1(-9YJA<+^^q#%j_1a#+H&e3F#!?^Q6#oZh%+Gp4y`nlKpXFSGt`VTg!19T z36{FcqROys0)K2spGevEuFF}4v|O^X|H_y1v=EJ>7_j`|q}B7FrE|`u*Bpm(J29JU z5eyj8>HhawpA)B5xc9bVKq+@7Nl_J?v1vLlx12A`YibrkwoXu)reA1IU*mLQYu2>c zdQ}tZzh+j-wsR+isrW3ibu7hh(uQU;n;qm6yzTm34x%l@*rD5nJXpC`mJdG4V!je_ zoh=&=HFxY)cC6nGwpUuu;?l);mwSzd{|Jcyl)R#tEbha% z(<`xnAGG31Xw*(VLDg85f>V8!_NA=cK1~rgmHgCOYDYi7`ib3hf-Jh! ztn??k2AH)AQ^qKnMUk;GHK@!}QW+v7!*|@f=fB?Pd)9i_`o8zI_Fiij+kM~HbzbNB z`yIbyK+|ogufG}+e*~9fRRNxgZf@T>F7aK|*3^^DPPkpf!1Peq_4OSogYI%6dpkQd z<99tjtnRX2VNcyi>G0^uVG2E{&KR<6(!xk8_BcqwrPkKmCr{w|bplGB6DRIv2(w=w z|Ni~@n*sAWx^usO^*Ws*Cr)T@79xCE8H#wTmTq^wQ>WmKeN@AnXu53A9IZ&M#>Kb( zk*fKIDCWn_qsl0LQLQ~2kAtTLAX#+sV!6Atdc466M5+WDaTMast8}aC~du*q< zA5t|MT!3z6>#k^T8l*j{J-u(I(^xWn?)0v4x4h~3k}C%9{SWKuK@I}mbXpI6!=bcV z*0A@!i;U|ZjhZ6zM8vHl-Ze&=M?bGk%Z@qE;bbK>D^%tlc4_yIJ_`2ggFOv@&7a1% zI-)@n7Jk{iStPHet?{~fdTJw>z>O6%e)vMVe%RZgq%6`Rkp+>ZT8CTbh9+rGDi z=EMGOgO{!qj6rHaFEk|o9-8%)ZQj>ia+WFnmL>4{)KnH~ifsqfk5MQn=o}^&D|_|~ zHADe)8~E_ZrWV-k;_6DWP{k2a^ZC0Bk3dio>0{#4{yNSqgOv~a}tiZ{?MvrE`OmBiNjBUbDk zCBK(?K(LYMht5uU|CQ1hgUq>%o!Ar_0}qlfcZ5;S8iXsE>HZN(&0o)a{G;+PdS?zy zhp?p}o&vDbE1)FG{>a3VNxHr@zw4>ZgV~6s;b+_qQe`vhU&XsQR^D!#dFuIBocp8N z^Yc7BA2Y9SD_c=>{Z&&;oZYo!^NVi!cGHaPBX8ywWZW0D9cs`jfLbq1e}*Z&FeZb) zCV(ogrMH*l0zw`<1Oy1Y1eDz%b^_JejkL3v9(eZ86l-`!3NTh;Cz*Lm+5F zUxyepX*r*8+heM7%420EC4U{JOEXY|I455{c;ZenncAveR*n&yR*%fWLP>M^!L0|< zH)&9-8O10rbyn=SxpaVX@WsTL;u)cSwvEkCmD3+a?Al$Q;MQr<)I7=hs6g$6MuB1GsG<0&Q9jX_mOfZJ} zA2h51fEaLI`IwK)n{Zq)g4Lj&-sz(@j7IF4%;VVY6Ml0m55~s810Z=h(Oq87 zEio5`GCgOS>fn_VIyiwT=a{^Jsb9QEQUQWcYUk)EU@`m&1r(YM7-3#jxsta26-+lY zM#Dba^~`i^^`uLL&ije|RVGT^Br^A`(m8X7T61g8Y!+n~*-K{k%5~w}iStG_+iM@# z_fYM7NQ2o4C1uc?tH(S_Wi_1y9WtP0!r2N_IbV!+QICGUp8fD44Gj$m8u@4&_#Q#a z=fB3*)dikACvDIJLk3iVya&9U*WQt`+sK2}3=Dos1RM<6K})DXdR{1lN6?Ai1Z4FOD%3&f36l-GaKakCD9sBG;w)i3_L;)Epc#{squsG6A9?pm3O$ ztu>n9hCd}0(mmd0*4yr0_AMxt#VTygKN^A}A@V;MT9XOa88hsEG&9(-i5QaP**delEu!_&W?Ge#H}F&n-6T>_1TB+>LD<@@p;rde zKRlg9kOursfmc)H6clr#qOTv&=($O9J}PMR2&QEwhlh@ov!n@+_+hi7)Ip7ujGO}g zyIWT--Hr3?cDtSU^6mCQ@9cYd`Ee=KmoB|tK3Em?0$Y*NB}O+M_qi7{uf7geGiLh&@?-MwEL zwv*KxUZ28aL`^|W?Mp07#U!|WZyWNG=lq6L6J7;?-#-*79}MnZz2I%vL-qBD8Seo} z;L6FbTdVwxj&2?w*8(7j88tOEXb%H*fg4bA3=a>#di4rpdWg^<;Qxkf-Z5z$TPPcF zX5sJA@g6;Xd~KdDbw}hxD?F!dZB>|Hp}!5D!78axMR=O@&TGbJatv>w#;dUH1+ zVc7*&CisEB!R)FC>({qL{o3U_({#DTrqW^(PdQJ&sZlo5K6XL2d*I+9nw3JQQtdE` z`r9tuT0h9fK)tyA?LyaOu^Th9-&;}1-nn~ncyp<2fx`B!bg3lB?6AH*@IyhuQ@*on zkGL?5qVaX^{CP~KzQ9;7cDu6WJ+0JMM~LYQPLv-pG<>|sj7`l-ygVR88C~1XYZ=%W zBE!SX5;!xiqnH58nJrL0TV!NaO_-l4NW< znnN;shrR9Gc;VXCI65dE4^BX25E}xG2ox*-FqRLM~L@DQVjo8Mq>V}7nLDMib%|RBQ zLkYrY@fh_E7QeYsW_md@CH(X$GmfoN!g;q6C%pHb-zt7OW8+Lsbb~yLymv&(_T?*@ zVY~K)zOah~8+#@2j(qluGZp8C&c!VV;RfKn1 zY?rX;mKY!QCuTYKo_wrwi>`!IiR#B(_>ugoP-VZ2P^qTNXh~lbY^e#jdi9in+wrZ; zE9^G4N)C;Y2)2mRp%mvR{L<;<1oRolIXu7=1J{+3+LbTPf|F4jJs+aM5gXn8 zu9oyHY5sebdK{j9dZt)=d0~=l=yiMJqdkKFA9inwx|UCQjkiqcI#vBo5L%&1MO=Eq zAVX0th!YLb*upwetS_z#OYf6x+jjNpqk@+`I=rga@?;J!ydyUh%Y^`Yz+-hKm7#mA~4-Re{M9Ub?W~v#5`gY zeZ0M|z|CbzE|i(sJMpBH?4@6Xvt2_bExZ;rqt*9>W^169VbpD^P4t8gk7ZoS_6}W&okO%Fn za~R(I&BoP*qW5|AA> z2x(46mDCLk)M3C2{DrCYV(8#;Gc%mVw3Hz*D?!#;`{e$En*#5lnF$U)cx(y9#ACky z5SqviS6pB4nZJ(MD(kfKzAveL{-itN7d@^E#%pl)0?3s3V+ECJ`MW5%D#J8k9WLWQ z!S1D`ydCz0armJaZWK6Yk&=dm`s0y~rq*E<4`=5J)MLfP#el-?%XaftHT01)Qg0re z8&b*3SKy5uYPujSY}frRSzu8nrjy2Zm6lk2o2hX~{{44tEh_15Vuu3V&(C^+B~z}% z&AV5`C0K%fSo!HbHZ~#{@~OM3DX&wpZy|&X8yZ_>!I?9+IN7{i|6Yrso(2zv`=CX+ z&|oEBU$b4;DiEz7pP%goJy>4eT$qhKO&DIfr$B^(OFHaIK4bib`cR;9Vb8TErKR_Q z?IEw%)x*ZfsQ&9b+V63voTc4>8E7;17|9sOUEq68hkbpk^!)BXcQMP-G~vUd>>O+@ z;`%Fv_L7`H+M&if9XR?oKv44lejo12pmDc-C41Cx` z6F^pOKlNARuh%r=skc{~&A+8QP`w`d{LVg>%)YF9)719098FhfIxM;kvIRM%*z)q^ zeRP{H2h*Lr85gW^$f@A#jmyE6s{gfuChzTBso5D?bz9o9gk8~kgL?_v3$XPMBnLHX zxaK*!)q`+RhX6fLioj6IdH8U2Y4Z~P>!O!eZmklRbJopZZF(+e?p+iH{;8+a{@lw- z6k^q!KXi%&cSd#?oH+3py)eX)0AT?%8KX=pDM6&@sBP+>hvuS**bhw#6|ZHGf^A{I zi#3M$s|eH57Q@dS~@z&!rCE~+?$a~ZYRn4;LI5gF-uZGj-j>ai89fjE^H~< zy5g_wB(GLhJrCSaK_pyzlHFW))z9fU39lJ(!m5t`BkRz9eXoc43`9q2EA5b=+poJ% zy1!*V)DQ@%43D0C&~Zo`c7RSZO&;lF*D?;in5T)pNO7Hskco*{`X+V{w%BAu(XP4L z)7ID4XLt8LnM<+_sDxUMgaJ^0=2X)KqJsSO0O3dn z4pe1jX##tOp8?1sXp^3-%dE2}=gjn1bMW%sPe^z*e$GH&A9LZZs%Hh+^dPQK9=yr5 z+;R1Fz1&u5DgJQ%Xc=uG?I}0YExS@<)OP3?7!(#2eR%)=c%fN{jWC-^z^c58ip`Eq z2zkI$Q|8hm1l>YoBrC9OgvJyp-%g0TcYQ&I02Bv0TJg7NU?C$*77s$m-50eTDTS#o z{zyEMz_&=dH!0&D#s0Gi)28||?5mOM6vbDbc^}iJ7K#pj#&q#V+?@YK_s|Vl{re$% zda~~A_rPB1IURA_E&JYxtwY&vqW9Cu^^B^%fZ-b@{(Du%KK(NVAhcj%j?{VTP$|#@ z;SsHbQWe?&>`ZSvI^g;Et*>$({7|>AKY=YlRfi`Gf@k0!XavoKZrzQKm&Wh}h~oPh zs_U@sz$IaWfHG4|a(}%%m0XqM|sE^z4ub%>MoTNyZkh2fue9rRg<}7ifmy;6b|FPwnmL#LkS3?_A`fBu9S_ zM6jjRerkzRp7wCtqay9)Ek6dYFIm`0drdxCceyYwTSG$CMIF58g{+RhYZ^$ zOaAJxjg7W3aVg9P+z;m{k@wqt^=H<-?O}F5Mef{jxZ&|8%fHYk_s#)U2Pw*N zv8@Zh_l%5<|IGKmFub<@U^N?F@;Kp-2T*r<*-GBY^zo{V!nmu}yUm*j0O#7E%00c=JaKlw5 z_4G|_R9adlChF!)^{-$5z#in0%V@pbf_ zMQQgLPb*=~jR7DU5EEPY$@0MTk#O^7XLVXd4{)&U`tcBSn{(;^JbDZ8U(KBO-!yVW@Vi z-dRBUMyk9&7WI>$rBqcu4h}OL8_3&7$Hrvf_1s!hYc3i99V$*H`1YaogYyhfx@f-Q zIP6iRGKzzvy}cbkcI%55>H8l642yz5^Y!c7G8ce(fxrL+XRoLzZ{J(Hn!8b(lbGo5 z3OcE<8s4J62B=YJ50ft#51@5k%lHff9Pi0aP>%8*J?g8y{IR`#FGOUEAHWGBjf|(O zXIyWY?4~7>WnNb_Ox#LlN%SDBPHGW?qdNpH^ANh`{!`PkbGH{pN$@H3UETG zaj|W>X6C|^43B2S)vFWZX3_Dq&ayaAe4ma987veF*29pOvJP@sCF=t#cz(k_=A3X(v3QPb} z5P)uJnFmodrv4KXIYmXNy;ZR3q9**Hnh(+p24Uc14HVlqz^4RzncF>4F)>(_!R5yK zbGd(q#Pzpt6L?SWmB4%m8>RX1j3zjOlD8 z7e&|f^7_}bZ@BF+6>vargXTM4R38EgY?~ml{p`-x1BDCUM;C<_@;pph&^^N>uYpDN zUCyg4^n*<0ZgbfBCuF*`o#{`+3+E68BSv$J%v#nwDqr@{cWnPD zYEcL?EBjYmdiO3kkc*~nzNL*)Yq(E~D}DLprN{v6$8IXNwgyBXm!)Mlj*`*xFJNZ) zQK$xeDKEZ6akX;fp;ouSrAkjeIs>s)Khpyz1HIYU-yRxVH?P!g@)&-)Rjw%0(6+#; z$L`$oCz~~B)J2=J-HgtZ_OHD?<8H;W_Mvz5OWJYKrm{D*HFSjK=Q~PU0ahiU7%8%T zQAn`g((M7`G$ea#)l$$k?k9u0Cgrjo0*wRf`ZPTCuBg z<(j%u>Kd$nqhC2-$PH1D#t~*^-tK)~!@To-5w`L6y8bcc#Do0Op-Us-AC zDvUwkphz-+O-tMHqt9mJD3ra>;xWbYHeJ*HG$T+wa%0ylb_rPodB_2L60{R zM8vMl!EY{v-3MLmp83>tET=jvu$-IVfr7%2VG1S4X>u3dIq2l-Ix!&5*hM4d8kap5i zS19kVsGZ_maPbd*MyjX~uSW>oO#bDXrgX+}Vre}M5 zYcmfEggl{7e5`2}k(@K-dS6*kK7fr-YI2zPQFRsrZ`tp_bC)cZ?T>_l6?j)6vG8R) zZVA=o&*-~P-PV7Ey{D}a{9NaQ{erM_y;?cn*~Lasl2fh;4b*wyBnL4JO^|aJd4H9L zF5RC$A;p~ls8FK9YeEBIS};8W4hY;Ik)B9e0+vC{HNy%G+){9xF!q{%Z2%njWAJ~7 zHf5I;D2bCYeU9RLLgg?M1?dBZtFdoRVNTKo;d-|ph%B?Sv!FeoKhr7GM}jsd7uWB} zu3cZ>p6Qgy=?J>0M+l3(dNuF69F!q^K!iNs%xNi12VCt>1YoL2Nxv+^l>w?9aY@#r%t!u0@sZj>@EhC0fF! zjyxh%yi7WIdXOT>WuK$wdc5*51&wUv09mp5ojHoL14IzhJ^R7JiHak8%`F1|ORcbdzkA~I{#jOeKhEsz*%hu255C7f{j_|RJU|(M+AW@~%!H98 zxi|*}O}q19Q2HXaW1N3&rFPyOP(Y2<>I^007yIVvDt~%jW@?t;6qSiL;(PZt%(C8D zJNc%Xhj6;${`uz0r@I5Ou8z$BEKDgMeFDu5Z3-*{Q_U?K=(dTgE`k(LH-w5I~OeemFoog^kh zmd{*&pI#zi{d+N%Lw6lH5!>o$X<^Y?P;u6S^d)g&7VzMp|B|(7rw8o{(`T_4r$+tR zHTk#SCZB5VS+i~%9{!}J6D$G#Lszst+uah=BUz@3AG7>-3em*@&zKRwC5 zb*l%?86amktr`U)ZYME|8xBBEVqy&7@JQh4J&Q*cSnY4sT%6A1TmV42Sk-!uMiC_3E6d+&qp<_o;-{??+MEUw)is}n) z3rR89;z?6`G`5W&n_zzjOGCV2u$D>NHi0gQjDmUB_cSPeV z=^NmSfF|I0Zu#Vuud!uUjZ6=oI&khh87!1Jz~rB&UM;2l^-H#V&z`1HNrHAZqMF8+ zDal+fZ$$Z0bZbvmgWjb}oIJb4neTl#`LlzGU%$1+|43Q1$1A(Er(!SFS$(%?T-7cA zM18&{G5KLF^?@Lr+XZhY8Qte5Cp5SW;=>u; zEf~1W-6c6Utsdk~D$p-BclHp-)Qz8gJeNU7AmU2WyCS+c^$qns3c~tF@i~M3{Sm*U zfAl79<9x6C7;;i9Ne^INTwHYj zL6CZGp7Or8qTQ0PG!$WHgU$}kSa}%^tN$ii-E{=p>koUJ-zli3!k*&Vo9`0pg6&?1N3=VI?mib4(J{nW+0)C(&!%C&z$)ry}`VNa?Uso zgxz6J`PB3CoS`q^2QURJ;;5??%bzcc#z&a~4JxEhmp`}1R{QIj7xx^M(1GsGPJ~(E ziwufPIPbPsJ*H4HMO4ZgR-~il>M`6HB587Th#~$sZL?qq?>&@rplGcZy2yoCJ#J+QEf6= z8nTo_eSTeus=z#Sah<3$O^bg>_5!%@$y3WA+$+ z?$3Z|z}L*+!2wxJZ`YH`ii>+;Ac~zDbl9{a=7%bsos@NK=`7&0fM>aPbChHY6+_ay z!b;C+hNPX~&dS6bx0#$U@=++u*2ju5j?J4#rl(cFD1jH|2g3&ys0m@pfSxt~(Id>?m;cQOdxOwyq{KfH2?1;dq0fLe``XfhTuU^%LlOkL}9~Twb96bu&Z9HHA zy+l|8zjT z3ow?z5G%0MD~M5sa!%r==t8QllT)_N#GSPgU7IbPGS<}|KY0!rv)yRU&&%VF@)WGF zD$2?@4uE4#_o~O;|7hJ=M}jGDwcdW+Yl6=jnDRHVuG>}7(S9+XJTL|?bfo{p3)4nE3ja|76wCubT{lf-&3XD zXttO4;l8qt5hZM|D9FjD1_36PU;_iBeF#&9-!3VL*6aKE7%XDwEom^oX8-Mv9WqLiOj`!tVTPgd^IRO&=-kls@I~+b820@{J zL6lncP56H(=-I;tFK*L0dxSMKXKBo>vT!PsS5+QtoygPDclqOJkN6k6YxVOpmvGYf1C+4*EPvsMUzFx4 z>ZTjP#x%StLj4V&A5YZM^*cGEW49 zL6}FDgM^HE%Sx@0i3zFY3;Z*l6c$D)ESz!35NpDV^=0ageWA^x+vGj%;EQ#XhX&(Z z01u@2iu!srPkB%8yxiEiRfQDokQ|g@XK*^<;YY3Y`6)Ji)E?i*#wekwe$~{39C3zl zb=1nhC*;mB8a~;#*_KzDV&g92(L0W5$2O1Zrnswg{7hD*4#hJ>Nlr%BA%HOk3SkLZ zSIE~&%=Ook;vjX7CjiIC4{CLWyre576iYT-rQq2=CZ*0>QB1$dof;;p*GZu3t;?ONwHpB+^9lDgH(W?dD8_@37t99wrDpvRQ2A)V&H zeya@8*1JAm%c6AFl7t+>z>db#>Lk+$S-> zHZ7fwkm`~qU+%Laocpy`?qtvO;p6K1`&aaKVnz(v)yl(iTR9Hy2r{PO>Z&%6 zUXYpf_1q>ndhrim{rJnG{OFwK8-!ieL+^#yLAkTet}m91tW}S{mpsM$najR41XEWy z2;C^T@G$yZ!}I=(b`CHf6Y||m$#f(+WiM~?Tew~F0$nBX=`Y9f6@lFv)!EUL+tmCH z5TaK64|3xgg1_r%hG~#Ugm&-JCwyf&IU@e`@&{L~u}q-| zV$+IxH4+j+-g;0xsT$lF6yfmpKKvvQc(HEVkivlj+M1e>`t{cZ+CFLc0D>_>#r*#! zXTL)qnv>I=-VaO>fQKKXH*Lhm=TN{#z>Qzs0n~;A*eu~R|Uu{%0^J7%qtnsFu4r~ zfyJ+V01Emi?w?RfY)RD~c40FYMb%GB=&mEn^1qctq1I%g8g!eDD1MSy@V;7`MTW>Pb0~RYOJGt(=_crwW>BYWHo2*A3Cn3zjD#oR@QXjanvhK=9i2 z;K5t6>;8Pd)UG?#(Xd{}xiB>)J>X2xR?-mrPLuQo=gv*`(+uCRe@FJk=l##KL!Oe3 zoY&~R7C6i;?Yp|E11a-d&1;!=MRd!=$bO$<_u(uJH{iH);bqQmphl`5nrjN4)D&BG z=Ji7aB8Q;BlWkcw)xhKxz?RQ+e_qRh#Cbs_Dq;>Ra{TJ>-xH`Utt zeV;%+-ur^pXT_Sk^*noZj)?g2PGB!Y!A^=c$aU}yS$^`5N(jJzR(a58^B3B5EHQR z-Lb=7ocH}Sj`3VlsPN%V6iG&?h3CTx^{S^jx(Ek(k*7PAIl7+Gq4EE@Wpu%gVn z-~A5qeBuT5@_m?*_8 z1vWoB^W|cF5E;|}X#0N8&I$twDL?zN7tEy^|6P?6;0%xoxyyTrojw>drjQSu9zM?R zjK^sfEb2S+6wjA5J8X=Osvjf^U?|vA$?=!>?HczQ+0)>``JcMEd6YX84WA3_QOhpS z@?@l64$UIisGJ0EC$<#H8||rlP_W=WbCjq*s$F|E`5Hw`9S6~xFTBm-Z>b=iw}1;n zw$9Pb0=0=<0+$4h#l9=+TvBKpNj`inrZ$pFOSWakXs?ea5WK`1~J0Bp7b-0dolnQO7Dl(Lr*E z;8~;q9|2R9^2*9TXt4j~2bh?fw~Wp~F^G^uc#M;g7A96E$if|%l-*{6-zo@8ly+`z z)u%i3zn+%;A1**jT3Q;5%M1bVGlo;@C0sEPaRG+S+TSHegbWWK&Jb=x66gNE97*j7 zf3`Ow3G83yJEUpDfUwpF2OczQS^NwnHeO3gGhcs_FC58FJ`zICZpl&JU6KAMp)xJ< z+lAnBd-m=veD^(3@0#0`;k%=4@APt9PGyBWGUxX@u9?VYtadRt*dicMX@5*dv}WB( z)aaFgK&h_^`_7+QeXca`|CTz}gnJX449*B(F>LJY>wgC~)>pni&JTY|kl~R6+6%L} z=Ns#P(cd;bd1YS4$~X<|Z%CKhGV1B=TWfb!kvIm9%v96~vIX zI96Qd;Estq#?s!FRxBOh5aL-7^tj|CRanEHzP=c%%_AQ?`N-ZWDvV4yIm`Hp^tQ-Z zT;Zsn`9eQ+?EP?(ZD)Z(3ZLWHRE#Z|U7O5YW%Tr!xh``3XzVQyBF~tj{F1F10}O$Lx`tmU9I=pqt^LLki<1MiE9hky>8k>>YGPvEU>U)!XlQ5% zQ}6V;PuDP=#m~(=mI}BD$K7OEHv($ER!3?3!eABPf^u>vPc*7(qSixZMF|RFB={ zx7`xT93;#w4(4ih(_sWf?AfzCet%i`vX75Ww)|!8Kjn?1y>RqP*cCq?oNnSOOz$3Za_X&Nb(PJV zw*O+f>efafAsm-xv5$>ow9kT4Suj9xFV!@AW*IxR@!XdZ@+P6X3<)vfR~}tiwx_2b zZlpX}eff3*h2#@PvY?mr^=mBRN3F<@Z^NDLyF<*jE3_nYeljKxI!&{@GO#?84ZgO$ z0we4DP&y?r2D>zL1>!L~vhH4&lU8c{`xv2-EP4T9Oyo@MF#$yYIg#z$2wGy2#H+mV z;wS1Cm`pAZBxLNtf$*h)7jCyCh1pC$bUM$rcbHdmb2G_szrMu7z`pjp$R=OwWk6pA zlBG&*e`*`=wAmKUY}5J3Yh9`A%&UI>T$8*{zZKrOM#om92AG*TEPY)beuSpBs+ic4 zxoJP=djC$hkneYwJ5 z5k0ct;@i9a2*ls&%wM8N3V5TXOGo_pv)SzN(5n}oK8KtHj~*FTBfs4SO+T+{fYnoP z*8K&c5j3|CTXlpTcJlCeY$B3>F)+4`KKfckot2Ut&BoL5_2@OjM2GPynzJ&u6V-FW zOhdD_On$NL)7gF6(MIE9SsWGl_U-b_S!)iC*0r8{4{^G41n&GvHe@AYvrX{%yBe<9 zFKy(kaz5sk{A=W{t~(Am+T2M`lP+sf&pfER=l+9NhFvcG)El)x^Ues(Y6eFBcT-sV zcgN|V^5fy=u0p2|+YR(HpA;3qU{0aNlxBPO4gl>^kWZ)!}2dt8*uBUC-V4F|HhJC`L_W1TXhRe^xE}zg5 zSeE4wNx-v-0~v0|9}7>I@zgQQReN`EBb5S020#{@rEm0L>(uY)?d6xFY29sn;tQaL zYe5~a##4y>nRhq#H6kwV#dUFnj!6PjC$IS9)I<`e_ zkXu(reXRO>fjVf}dUA=^%4&Os<7pORknhFy&{rGX)@wVHcKv2N!prganCC_6pxei^ zpDg4UbPa4V6F=!Mn$zKzCTL1;ZoWg2;rP5QEbsE6 zF%W}pOwjTlI&`ROqW;tokp%9y?lPw_DLhxntLEae=SJKX833V~T_&`u_g<%fV>vx5 zd0z3)s(teQGppfZCTlq9Z1Ap^bxtmc6QBTu5J%NbUoD3AQs z=9@s3&$A!KR{aRZ0|p0*k+7&J<5wdT^FjBuV26JWxIwFH9R6hQE}sW2K+`AabOrE%fQnp{OryptWTInf;p28^;53N zXUvFDf`CMbNF+m*KLY#ro2s4?6%|Et=`m~Twecj|mmk3@?6KX8h$Xolo?b7;TqfW1`~veHtF&bN>EE9I=Ga4y?TQpdGVhnXB6;n4iA%n3s6TO7(1z=j#WDZiQ^$E4GRZblp(;@x@ex?L zey3iQUw??_dgWTtiiZ9WlT`4>X3Bw0TYOnB%JBbiUf7e+20#KA3bJ@mdyjY@Ra-+# zT54H%mMl;M*%F|j!hxf@stV=uG4q63Quxd43mOa?A82Q?<^gJl-sSoHTSeF^+{ZI@ z=~CtTb;yPb%}e-2MSlQu0Yw+ZmI$1n{!3iM^sf{dybWLGCAAfkk!!f?P^?w0-|@^Y z_!%|!n?}=d=Vu$MUsXYA zty}%2N8CJYe@2XP0_ItGJ4+VE}h z#SPHv?TReUWWA(+AQ>1Nd>%1W#Lc<@1=rjc?IjHNZ2f;9M_S2{5F@3W2`zCne z#K|u;YyRu*hai2chIIz+8R@Q6SECHBLs5;&C@swn_$_{2A?w?23MD0_FqB||j3k0I zKs=~k!5AS4@A0JL5wNZD-UGFeva-A~ zYWWjK4@M1`GJVEtq4tO2gl^1Pj*T=pbMq-))R$-K>gbRV6@C3;a(cn<+V$&l?!BJS zIlzB{Vs>GHk%{*PNa>yxlkXm;gJJ&yyq z#J_h%tH`bK!)H@J_^Pz8niN&eUG3-7USkM}Mew%#*_pr7S=HO5pB< z!woNAUS;8XtN2QD<8+|X`KyutzPPvjSMQR3Y)d_5%D=yCp_B~XP5LVo!eshGQUCr{ zpZI_Kqb=OkWE1NAaXJF=hi0|0%&UB^P~CLQ)+3wzeWxV$GLqD7! z1^oLra-IHvd!1vG|3BZ1CrxLGwKI6SFHZM?T7A^UW*TE)_?UI;Nm!Vh%UrB}DD_yRG%!eknGXGrC%!?%G5;bun<<2uybVko!0Eqb zPd-@hYuCH!edw{t)J+&?y?*<4aef}!ukUCl_Ph7uIlc%UI(Q5+7keg+Ne(d2ToWt{ z0~SA>Chc}0b^x(@^hlThwhvxk=X=NAP4FPAz<2~|`<DDNo9sw!TAkgEW@FRFTKVA?%J=_R&8-%LgsZ z%|*?N(*S=Uzmz`??6coM=WPmYi=fm*`Pim>qNZ`Dm%zypgTI9 zIz|7UQ($5gTQ$Te+mXWyauc8F!{E94;+=VXTb6eR&z={Nlr##oGzOLQiogFe%QH~f zjLODv2hXP8ZsE1rG`iZ$0rOIkyk`{BfLxJ^vQM z4Z77^LonDSAa&R&!f`k(-%-UxBmuJV*}aETC3W~kqiOIS;0TGEcIOUf=C0Z3O;0a? z2TR~hl|Jgjfal}Ka@?l>Uf&JwmqBpKTPwT`v?E-rz3PN2((FNjkTqiL~sweF#$jkfJKEv19#hb@cA0Z7>`wjI%uU%cc}dRAvbvC)`JJNpmvfp6pD&p z$H#$nfIRFCf*~*wby;ef6ug`v3jnMrLL-YUxIiZG{B|#H0tc-YK12_*yhWbz_#< z6}?@#3hQ58p`pn=_x%)r-H%8Bzaym~o}^m9O}ZW%6)0Mk=I5_RMmpmm^75L)$3Ksg z8m2+MmoCxm^~O^GCJ$WOSelj2hDuRj1@!as{`dmnpab4+o?(hy`!Mayy# z6)8!kEzAaO2H4)`5Sei}du8OhL>PYP{33WZg`k(A2gWELDWOT3*w5Q)N@4UFB} zDJkM6CvV<-PTfS&aP?7u{!l9AOJjSmQ8Lb@H^Qfo^|NM7VNJ~ytbM&iQLyR#Chnew zh#gP%Bims6lPRP`TzoWaYtP{&_xqzG?idhsa#|*)tq&ZfoI7_+K5%@26$6qkA?ebc zg=ws{J*WtfDdd{wt8T2GPY*k&zQXI#Dvjbk zj*M2+FeLZ%|KLYwgn>64VC0#{ViSweMiN!}7p8-|#l+6wo)7@ay3#dhu(xA}L_jIq zgPpr?Q=kKU=lX1OR2Zv68R}>ZNlDS0QR2UUeA)*EB{-D_)vx}~8^MAdnx1AJ!cZdP zKRP9tZf;@wjYo6y@<>$!x-=R_&f;_9hciDZa_*lvoNiPqy@S!&95gPL@GUf4J5S|f zXCna7i;1AZqa2!=5_w&68oM8|#uE;w`-tbp9?mAWb8>pMW2XjFG|g2e<26+1bYcAt zfFOR$ErQDgOc-V*am%pj&Er=45&BIyHQPvAcsS3ku9RAD@o4w!FeZycz2KK35h{c zvu4Wu@17}(s;tg7G@YebV*-=l@4~lZAf=d_-$_jc3wERLe8-z)*>-SqOYUjkFaZk_ z{2S%1_(>Gn-humYID!R)vW28?#8CcQ(mwUqrcY13Sy}vYl03sTt(_U{&&0v<3_^23d+y( z|0>%(nMlYdzW$$d_) z@H5N)-a8d;*#UG8)SBI`md2j8I6s2iZI6O{rnk=8Gm$ibW|!(#wl}0#w8v(8S3et zUS3$XQ86)S@(aK2y0X;o1JguEiH(HWP^|;w>3I8geSVM<*H^hjz_PHcun;rO4TqKa4EmdyJwb7dVG1fZ7Cx2L%o{4wS8_40goX*z&2Jp*UsX5YOpC?J ze*A2ARx1|CfbS@i@gEGd1_L+z8-ht;pQyA^HJX|^xVe#>1OwMs(Q+fl%iP3PJP9*< zm9>Q)mbEclK*uAb%Npkb1=qsjBE(%}lCU_yMS$`zJ16HXNW73X*F&{2aOvx|^d~p# z0al?LE;Qxgn40wiumW5@o+#_}RH|D6Aw~^O(cVh}64V^ZB9P}n0 zxUihY_z7EV6h#4WVuXiL5GsCB5iBhHsG#5n_AwRj$xQ@41fEyi64FchOm^h|vL z1w}OXCGBDK_@F2N`=**&kmolrqSez6XaAa;BO4kUyM6zD!Nv*zS8q>G0#Y?Jz=CXG zl$N{nPzM1KI(xP#z`tLUWdCcO4r0DhFQ(1#7==g1Ri-WCKo{p92OPrNS;UpEQOi&m0jT?QIyEX~64}duFZ*@m380P`8==J_mS-DS!e4@X zhfxkyJ$@&q3%_mC@wsPem$nCOtS{BUL3}ekVCmhj$82HU50oWf?S|@l@4&$JdVrsr zG1x=V)^rKZhv1L&fmU?^wFd{ONK5eBf47`Y(RVgY@&=?;->#bd{rw%)8r2TOzk9pj zZ%|ybZbfQE?dq>~0-1czEDNWjpk&`7q%V184feTo7Uk2w>o(RelloCyY>wlJt&RXG zz+IgSBBRhuO1U(_rK+lGoG!lc@mvWHUYCq;473W5jGXkk0AEW21q!=R5f2RVdVuL6 z|3g@Wg`Qs866SBdXl9^fz?D4$`>Yb&4U66xgh*{GL(n=F;q}q$SDG;3Xg4=SRU=mV8mG zZoyfR1Q9tojv&}Ig{qRRv;0P*+@dr$t%6Ry7!Z?8R`PNo$>7QN*Q^HX3nt`|!a z_FMf8446o%gTvNi=PqejX73WMAz>eAH2%BqC=Vz$A+#!@x@>C3j0T~}&65oD8q>>X zEH8slJ(bJ2Ls5|@sJMFor3X@;f+U$q%XneQfrJ?d>B|e_jQeh{SeyIa6_>xuS#umV zBl|2j(0vg&v%{6g!YJ2asrZt!S_f65r%v@**6P`a;rPohFvJcG39-2{_eyo$4!=~B z`rj?za^!3sg6GxMLx$f2OTfc%SARcxxUOsvFrtLsQ$YN&nK|t8RPMtt{QAsM$fsc% z@36cA0j`LI1jiJEp!yn)Uvy;GMcibPQTB_<%D!)Fn=%YMf)CG6f*c&Xsadv*{@6{5 znnuC-%@zlA%(ZJ^hTqf?z;4K)ETyQZC?up^X!`vy(Di-jfvTzw5TrKJh?A9dC*!Zc zg|0&{hiZ-h-^6)MhAP*qufj`x_@2jem(! z4Wy$b3?n>JpM)m{El7v*QXlX?C|gnJpl~h&P>5!hxU=tcEOzk8DEe>*Jids4XJKK% zGJ!2=ABXJ~#GTUYkdY}xU#N~~*)q6;0(wV3do?VqX7RJzOU}#Z&i%sfjXfd=T8k<6 zUPXEND*&ExTpxfj5HMkIGT@{OA>|S#7f6&Ka4PSwxrgmv^WCQS2(jhSqfrDwE~u+{ z(1cq6`^8q&M-2$L4_i2qY9jIgB1IHk7X;Bh#l*%^dtwNH<{eh>*HWZUy}pA|;o7N6 z@TR8<%LkP>z<>uHad83YY~Ym@uPuW>V~Q*4WIzLo#*3sI*RNRvG$ilM zGXY>(@|P1V4oY!^2O2-ffHI^qumdG&0K9w&IAC!sj~i&n#rcOpi3JRN7?xikdVCId hO9W^t&6-d@;sx~fT!^(e|BwL)JYD@<);T3K0RRw1VuAnw literal 165925 zcmbTe1yt2-*EfiwAW8^GO30B=Qb9l(<p8?ft8Lf)(V%ajueIMMFcwk(59xp`l$CMngkyymAr# z#r+a84F1A;CZTDMhDI59{^vsY?4~gq+ATCmm_V@z#%yyVj>_cV6hI~UuPgfB$YHMi}b*jc}q zEDd{eY0K@jSKPg;YOmHl;r9moO%biaNC$^MfA!c{$zr0uL_<5n!Qw}KEllZii3;^) zDz5NFJk*y8n74fY{ctw=n?`ihzYbiGn7n}catV!g1N9Y}BN~AV>PsJ9wEwfW)se+a zWomD4fB86#oVU2N^l6a>K`^b5kUq|sxe!+Z%k?W#Qc{deOq`ST zafHPC#b>22;^N|vh`V?1_B!0s@@<`oi;pLWP`Ja)#MJNbQ0sGJqfhtz{M?)c?{#Zm zUol(^avoB%8-}Sll6e^9OiWDQG0-;Z@1g%6UAQ!3lm#zFl7gn@H)0e0Ys!! zrpBJCn7q6^GqE%+fh=Y^(L)N{ZM7T0$o+keN}W$Cj6@+V@wiB)LJelt7-=Ly@TH$G zKbw^BP{=ttmSv&$5~I!!3q2Hp{ebhne(rlpbewH}423`gjQ>8WM!^jW1VQjX6<)0h zBMT#=l8Ezg_)v01Z7s#fHv7|&?da%eqd%{GS6wY5t-G}~HFGvsUCS<&;PKb17~kt@ z`z(AL6LkRdL1917$+WJ|*#93(Vv@{7S5Z&zbn>&`=%|jea{rHf?SzB`OI`%(@w?zj zOfEM>Mn zW*UvMh91k+%I8nyJ2UPoXX(^9g*H!_aY>-gXJ209%O^o;35kc^ryhhH{TU*d--?|P z?0qvfR6Tileq$Np;jFsV?>}GpcL+*2ME58C207%6wj1g5S=%y$5)J(db4ldq&!3m@ zuu*S2PC~GM$jrgHAbf>+@CGWz-{L5!k^%(5m6e0;JW zm1X~)Tq!Yp_oHTs3DRM^YKzAyE7APl?$W}=^6>oeL7tkB@U4g)ma~5CyF52No#N9j zPu#%mgZUIoTCRULnhkC=2M0&V!S=kmG%b2~;xNT-VQPA0PI>|_re1N&FKr#ljf^zy1tQuQmh9+69Y*xTAF-k*lLb&WNTFBTAL zm0MLAHe<+&|0@#IxZVHT(lPqDV7a)s*lBf`h@AXzYgYR9?b~c@Y~J2ypLA<1cv(^B zyH7&9!MpnV+uI8lE*Kjd7e}^+Q&S6ixO@r1zsY4ax#>dw8VB{07I5&I7k+*X2Xi+g zB}~d~IjW*Qbo>Q`JYcQ-EI2}ZIzujv$N)IL#Y zh!XIO*VKBZ!OV2Kxrk-7`D?hty{WU^pZXOxV}1X@*A0qgD1bPOD2RD<8|v{yjF14c=VKpPwjj=UpB@ z_X_3vO{QwJnO1p8gmrJB=TY%W(*4q$j&NN*6;)O1k#gD4hRZ9v!9+?>Z^Alt*x zf8tw7)$C)oAm|P)Ez+|H^=m$16OU6OBJ6&*MKH)G zKQ%QS`St68<&VyZiHY`hsZW}sqAjNv~^EFg7gxJ;1tv!BrZVqb013EhYpw7A$b^jEwp3 z38qFy=~Wx5jbFad-nnD8*q3?qXO)kXgpJ8|yw=sj!^2KBsxKp#j=-?2Ez39Zn=9&h zWDD@dH6ca9Q?kFm9~(<5m?8wFbMA*5A773CHInBK6DKDp6B8+9>jUtVl$6pn z%uGz6Vx`#4e8suR)!EY0^7U($``%jjD`_?4XoRJvh9(d9!*@Z)$HxQs|F-q9h&bW? z>(|(b=MD}ZHQP)|C@Cm{?g$7Z-NnMgV@QXhFj%WSK0G`;Hm1uMB_<}8%QhWuf3RgN zBqSs*E^caSs;xcp*$#}DMY*w{N!Qku-f${MPwE&R(}D-m)?`DA_?gsrV@ zRdZck-IFIz;df;-rCsUF#+lne1bG= ztHP_jm{?eJ>)g)k*!QMn0e7#X)rw-G6?jODjEv|1^ELG5Pb(+*O*-l5hrGO}o2}HZ zOsV*snBN}!+?kEhKRi0h+abEckZ?0bDsyOPNH+dK&aHIons1+{^jBYnEObb|OY)rW zN`gL-OLGs$sK{Z@^HcU5AF?FE3~w%e@1m^?rrcacPVYyB8XxWFyHr(FdU~d|hWPN~;v**R9C|hbwiqt6{Lt0- z;NnErt7o#mCtuuY;>^KP7s3A|DO*YY-+#K`q7NH(qS9`rV#15#{(a5LXKBoyIJmf{ zGvPvB2eUD7Z%VC(B7TMJ#K*T2ZMpF89)Ho;Ci(vU zJuL#^j5jMQz`vN{k2Y{U7Q@i!J^kvzcWrkABP8xP?W+w=CBkuwAZh|2}nyz&*GX& zF*7s2OFkZaWp}2grWPF&^C5SCqN%Shvc*V}HWGeMJ>jd}!}f&dRZG}1%D=42g>T(q zVe#yzB&`1W<;zP~iTByr60`*7ycGEcCMNU|67Ot^v;AE7skRB<|1XgG=Xyc=o|1iP zuLHBz1@uq~><6#z^$fgMMGu|UQb*I0BdLgBVq|RZ>Y`8ovpUkDdY`+Sfq{WT2s1t* zfiPm4`VOpk(s)nLo9`xs5gN?V zc}89gTgW;w-pw7}ExC0D78rq|hDNN8s*B4*{Hp3|_Oov?Z4-6w6?3w7P{Z!uzt2xqARbO- z@oHa$(`M8KT9e8f`7<`3WFdFmh#2u}bNz3BcO~&zp5>u77Fyr;OUXfEVHA?c^J4C_ z8jnQKAR;6j;FQj>$IuG?ZLxdbwMxx;0r~;_3cbnA-oG0a6~(0XG5gb}weh+dsNNX* zO?+BKMxr0&lJ4C1rGK5dUZ2$>q}X`Yn~m?l)HFEg2(3n=8if~A<$8KWSD@cSzI_X2 ztIF%-5CD#*hQ{x?IcRIRfgI+2w0Pk^JIXOJFitm`@n~phU`tyLWcLDi$;-`MSy`c% zjeq}nebe~#)5G3K#3yKu@aR~|GH@R8w-FwqLZ^l&Tu|F+|6TTHI=vhrZ zonzhg_Z1)Z51t6V3={k>&8zsI{1#sk5GE{ms_&>D?$MH%bQ}Ya9Efvs?)UHCu(MZI zM$<$Bcb9+WnbryXyBp5%Q!y%xx`hV54GrEB5)x%a_c<-)!a|jll%ZjdSJ+N`{Kzbe zc?I?J1&ce3JG?gV5Vgy#XzZ_DJ@y_byIC}ZM3OC*ddW78@j**zcs6x0;pA(Iiw(eu zsV4u@(g(PK{iWv0`uY>l3v$@pP;W-*2Ut>7RTbL9tA`X?WdWn{^`8ozQ}xw)I~VuX zC-AObJv`p-CJOi5oNfUa+SJ@^OgG=1GEr#QyfM|RUZD4VetYmbsw?_@`pmM!%gD_9 zv9Rz>wbPOMpRUQJ-q=l~!=Dw+7W9x;uL5u>$*HKu(|u>7<=rRZ7pQr=Y*hZqTt}*x;lZ77B}f5^ZqQ| z8YdQJ=1$AjQ!Ew%Ep6^M67Oji$pWVdULx>ATKeV-&nS9K--{~`X?D?mkP)CgJkkm8|qRDhz{sZ>qa{MYfJ-c{Dr@I8E3E8xz6pbB^1nEVW-j95M7$5>eWbqBk? zA!)vh84Z5D{w&!{rNt$^u@lRu>&OSc1{b^K-3faE4OniAqMqh}X{>MHp2^dvJ3tM( z5_z_l2J@g+4fglj+uJXX)l|UiZa8;+X3aE>NJ7i7_A;cRoi)=i>th>EPt%> z-?`ppHo}y;K}6(vdN9u*m-t5GidpBwLpz%9r@n%VQTi%3u3z8CDqDajiHMY#o4BI< zQH$IuuQb=KC-qS$@?RCKW$SG8BY;Nca=d@P%u+rC&V4hl?{oc)?Kg8?4bNxW0lt)5 z4JoZ<`}p_}GRUojy6Vsnoc+oa;gG(Dkh;sPMdWJxv-u$i8ddi5xd;CLp2kgy$w$cw zcle)8qLGpHz7Q=aaTe=Pd$7y&?W-fSSs+PCqrM<>iHA{;aoddcwgvP-+}MyK`h)rE!Za&^@8Ds)d0yS>?){EVKaAhF6985bA1BzO{Z2Z4CJSj3`TUJI22 zwk5P_ML88jG%PG=odN#-jw`>4)S5B=o>uk*XoXww3JM~BprB`M^f=(0G}-cwc6Kt~ zJAV~ArK4(Ck7BGZrfb0;b2e5bJzY&nqNuE_tR?J!GST_lfMD`%_PMC@x`HavT~=~x zd^sga;0%KM$L8kd0Dp;tkzrwBELx?L&mK|}p$L?ZO(Qmc8{w5ppvt5kdmKKwEtz?i z+wm>ydjnx_?ENQ#lbxVgEKM|c1Fr`p&u!lK9ux%^SJ%~FMIwwpuc7Yl9RnI?rEp-_ zyAJOQ3gnSWIiC1@>7%zMB0j=$*${r0WyfLR0)6$aF{HH^~ADgQX}i&2pazaFTXNJLKP_D>!i z)|jg0wA|dQ)ZPK-yi-+O-IDiuexpfA+%*Joh$tc=tGV|o1#V!+Z0@S1&QZ?O)ExQ0 zVr9k^bXi#~&YpYvW@wIA2wXUG;PGZ&QpMf&2Nox^b!`*WE2v<>At8C^q_W}ka9Q8` zB0R#)_SW^^L#8csH@R&RxvYm z^;W7~{928bgF-Xn%-^jJMeAV9;7qh1+WCw&Xv#@9~M3~b{Tdc@+3;yG zx+`$}q;SZ}S6D$w>BSI7KyWbD_7i(ULz>flli0X8fccS;e^qBd?Ilu~{|jnwFy6z_ z&1mw?@@o8l3cyc{jWdVP(8fuKi~P8!pFQ*gaLw*jurnVFffu`!`~w!cfs#|gqp1R;Hz+tIIjr-<$z?UnJRdN62Gqt^~UXu*tW}{v53OXNzK$X=zi_Eg?}< z`|#iKI`^|bBerz@{*&74VHpH+@#NKTjK3lLkpZv({yEYqrC3w*`eX z%&tpl8g*r1DPFsM-F_g{(KN2(z(0q zygD3}l>fU5g<5?MF*!CiywpDF+>Mf2JmMLs`6^RWFBqC4k!~?nFR%<~rN$}0@5%1Y zj#xOA0BD-dE6X78ARlDtS_+gOv&nLK%-m;*KeJ;ygwB0>dP=<|g<3^RH{g=o@7%dl z>3PJZIigS?D3yC6h9Bd;IW4N{*eWwuVU_J+SEGiL9|V+FR&@;36U;cEUsLt<_tOzz zRaIUO$$TH*&2V#MWyK1%ZRFZ7D=z$CI;G4q8J@sqMM-_su|TTluf?R#$l`uFfJ72t zg|rwkjc%SA0uu^a(2L#*omj7RkwgOjU|dAiTj7o=G8Y#-sOCDX>hInO6JXsnS@ggB zZntTCTu&cl_HJh45UBWGI}5$Bv9VA!K;;5$rrh#pSdiL8HTTE3Jgji_5>F4k(=_qT zfPjE=T`P#>(%%zOFa>95+%G4`9G37Mws5&51*JN721#0UOk9tNepn|Un(Wl9*Vow1 zh>1!T;fEw=X08sGG2Ob=*xY>kZo{+2ske%n1!@T~5}6;~CVRR4R`HjhASE@ku{n)k zJGVB`&{E}~@zkb(=MWeWpk{j*`f~Qc$~E=}v&RimkO(td@$7f}$jn z5Kt43y@_@0KbfB8bnx$M%?uKaqi z!r(l9d20wn`337uZU6=nH2TtBZZHa0fi$D5e=z$ERebO7q$ z392e75#T%86{>12&lr#-C&$Ic{>qZQNfq9Dw5R0s_nxCwqhs-V+>n;;2f^3g7pkeA1w zl_AjAB{KVf^m_nKge`1qF`B1dkiczw>b3Vd?2+eeZr`g{ev!%KWMqyYD@Ti~|Ivb5t0abV^LVgAe%gldh$qVVX-D=wzq+GvVlVn_*e>+cl;0CndsT zGquOPAG)rIrVnwLlCviaqVDm+J=%>_c0`03psKXAw5R5x&0=p%uNjrxJ;hIY=miIP z{rWXX6gNpp+bE4KEUMl2Y=FQK;X6CE@*lpB@WYa4%SpZWTQfj|arI}x1W4qZThCFK zTY5Fw7c*)I>ZY05ovP~IE`bMb%&uiw(fJG4yoZYdwOq$wxj=!J!dvM{Yd&|uuu0$O zvhp9_F&y#x1naBQ!P(V7Xuvtjjgh>680}2A0K$YD|@(q-{Y3wB`TE+&?dRKqRF+C`%R>B ztykLyIRz3~U%)a!WdZJPLfz-$;f6oy_WM&#$@32Nm>7TqnU50`@jW6aP7-!bQ zrD>xSuR;}-lA@*V$|pQ9?<+JE8T{kEG;mj6U*Fx`otv8*{tHW9`GXwI+rJ0#ITRO9 z@ulW-va++=)F%I^Tk~UZFzR6M)vH%0M|`DX#Z%vp~7mY zX=YR97vH zIiDW8#`7m*94Lhle=+Kx5qFq-Ktv3M2ilsb|Jcb+zdSJ0t~ry2-Q}UY;M*Y*QVuM; z2d_Imn|38?78-ocRn7Y!hi~vaa>|^AigC^kK*52V*7WY9b_FXPT?_C-*j*sFLK2}P zhAliabPQM+z_|euJ6!;`fI8}3Hm8C1s!|Iolz8m?D(Z-4QJ#9|VqviaMgCF%hPpUj z#q3QEf9CXYS&D)%@^Po1X?5;+!G%5pez0U z{WM2q>G$se*bpF~4mvrkjq(&}9Lpw@7q94k4X`~9b?z`WtmsbS}(#Y2aH?*SksS<~rkK;KtG_O6Blv^6b7wOR7ns{D!h8a1e^zm!}gy)YXB-fzWIg*9?UE`2VEN; zCuq|6_%Zp%IGyh}{#CXmOxleCtFdZTM1)N2y)+X;knixp=KIEJBE`W%(vLI6AnE3S)baM7~g}*+$KS6_hYGB|~M@rVo`I!$X)`3aB`-Jrh200l@ zaDVBGG5O?{F9Tw@#+fbQI~6#3IYt85^|+Cu?80YjyAD=h(nGtZy@Gx|%w0!z?kDZa zS3|OrYa+E&_yIcu$AF)J1)UFXDKB3DVii;hN+B=)BOLDWWpW)%p5Z?cy$^{){4JFT zKLsFk-o|fa=3e=U#1&+Jey<131M)*R$jIUn6GzJ|)iE(Ks~92n00k~h;f{J&XI+Ci zwF~%f%F4=BkEy~xBvYpu3t3nU+Kv+hKW%$M12~ZZQ=FVTLaTy?DqJbKr}wt}?Pl1P z?(k=CY|9${0#hxd(!HXk+|$}RpHgTHPV?VZHV@2{Bh zn*P4PsBnhV1oufp|K;HW++ODYm{mb7MpG7TlmDw-^)v!)fJkC;5lnRCK#j9)3nS1n z4Gj$~Ek^aOceRUx*-#pph@+#TpzV8FeOzz+zpWS#PKUiPP&A*P96COC%)n`Q+beXl zq0C&`dblrZV(W~T-SP(%S*N;n^Rv^V^$D*dYv{HW93?h{_=?Q|IMB>Le)v$~u&4xq zg~7UNJlnP0Ruk#qrhX3ePotIKK{Pa+0Tod_*jl;)wb9el69N8S6R$TmL`Q;==l<=Y zaOuVH)+b&sZd~>6e1bGEFlYo#c$-p>!E9bQPY{sZ!Rv-6&j2hS($`IDyT*;P zykuk8bZcFTKwz$#Tn}rV`JODm2XYA~XB1DZ@P{ji4F{Y zUfar^O4_3k1REOSC&1WLH=0m8ctG@LifpypZmtUBYHvnhUu$V>W*0TT%Pn;(N=jMe zbEfgn+F!nYjjR_e+fq_$ol#uof=!Brg+=Z3oH%3_3LjuCD15{rjwgq^U}mtz*8rX) zz6MZF10{b zvKiXkQcFwUVdE^yh74^S>~#BKQLle{%YYpCHm+uFEzYN{c@x#pc#$6n_iJlwpxZA~38gFtbHLxqMCtziUm7nYZ2O&c+KH$RbMW7jWJylk?L#;Sb~ zN$UnYWhw#{;L5gl;Po|Kd+BUv=h{L1&ezx1#KZ*LALs-JQ-Rc9Uy~8uylH7_3d#^I zrXQ0ER**MhGCZnsevR_~_soIHySlj4W`^eH zuL37L++8*`GmA}3w6d`ggslm#^wO5#Q)&Y!o*?9uTV>T$oT)~#?J9zGq7AFB_+9S$E|9%4cxVt#j@xx6Gefj@i?Nbx7Q9HC~rdDUzJdJ%>NSA z!2EW%5H(x`02f$DD!RHLTQ1DZY}dOtG+YDS_qKQ#w7T7Hp|jekgK0GrV*Jg6J~A)xc0Ib!w;n{zmGj z$Aaj)iIA(A&IktR0v-G+F0L0ONg^T?0ic{5A0O_mcN^WE@IZlB;h8s$eq9HE&j2KS zV~%e^L=t3XWno;t3^uDXurv=r{r{xL?)YA!DisAXWM;Mitq}b2{CsPGPjCV%{oog{48PfX(l-+IM@fGEk55B4ku4*9!t?>y{Wf&k&CoD0Wz zsOEK?psiv>7t-N)-%iyLg}hTAgT{;F4(A2J@p~Os7A+}V4?XqQNy5@30$6yumCvTA zy^qr(>!)XCg0sbMV`i46Jb3=54E4x*xXyoj1Ud(ByHN6@f|aBoZ0vudSW=v!WpoJz~ zUhn`(=ZQMeY+?eJo_o-Z5$TZxvx6-y=jb~tvi@~wD45k+TB-ua`*HIv1@7-8GH*C!gNiHUz0 zjnvEQ##d^^#;B_6%ehADD74LN0k%zIEzZG_xkV&1fa4)=Yp$KYr{SAFqc10<`&=O_%tvf7_1Lqo%20@fHlqc1^_ zUjd|IcCRoVvNRANaD}YGmoLZ&iQnLK7w9*DA4m#qf)^EE%4UGAuT}$hFbMxPEW>Z( zzPwKKzCP?6$TXTjJnDK7OhGNY`M@l7k@PM^n&5tdg#0Fo-~03!TnM;z;{f!aMsSj% zdf>phz2goE9gp2Xb$~|?yb!XnGFE@`Uag;oN3+Rd;laHct*}iId-5JQCR_bJWV~}q z>Q-NTqO5uNYpG~xPk-)Xo#Xn`>9>%i(yMyj(%Y+8^62lg5Ef`WU8hGQe-44HogIgz zA4&AGo=J6t5fS%KY_W!Z=~X_X;o+&`s%B+mbc3A+U`SPUa6}tYS)6c`0_BX3jDTI6 zX|uN3=R+%h%6|j~U?-0MU=k8K126JZC@L{5{1YC?s^j*YEF%3;%H5oq&1`*dM77;a zfCV2Ug+M`ncCIBmuaBd&WZQ-lE*6$m+0}4LJ~3pTyW@QEtdOj9we$V{`6K~%wL3R* z|7DkZ9-ZGDK<$x1ZC!ecOv7TL z2Y5ITV##4drB!=0p`5MGuC2)-9o9c>E!8*&we|NatB&RCR7D2;Ilv`7mH#7$^hh2r zrqa!FtpGR*D1i_kz}P70azNV;4i2CU&CSh`(Pa`Liu&y7Np28CZ77eAKL>$?cV+G<4|G@At$V6+5ix6#N5fmKHGxYP43nj8_SpO9*>Hv`57ah~h z#RZ%Eff>Z|*4Ea*oi-mTHg+vrFwm);R;*wzp)#4TuBsXuX_&f;0+>F|0on_LFFR)A zV`H<^ZWD*J94iZq4dz1M@ZKA92H)sC>UJ5U1&$=Rry54!mF zkFc?^LA%#JkCGB7hG0ZXS+2_Ni}BY1afUU?U|#}lg-&TPiG;1WsR@MUke0(!t)Qgf zO|*IXVv+O``>)5I$w6NGQ<(XO-k)dd4+nr$OJ*v4$H<_A%MWbs%{`MP|8i=|BL_Ret%oyLfP;1SjPLf2tC*&0Sp=2yw$HfNnrz(76*%MZ z*jBJTNbsXn)$w?U0o?v*{T$Y62;NP72$HdqP-@i_%ViSkZ zMM*8UiqFk_8>e9=z6K)?I)pknqLuWVz^wiydWIYuh0!-IXzUxQhJ%E*yFzDTZvM@v zh?YPSlIP^r%Hc(DBM7#VZlzkh#DL8~3&9@?e*`8C&2beNP}zW4|hzp?SG_IZlZ z&DE8&q%qZ3uLF*fD|SPFKJ znJTPNdV2c9!$YKshQ=L&;0KgpYWE=UF9A*v-TCCkh713BF9iOs;T>W4mo>#5IruCs z3UA*Zw@dA%{O^QG{8?=bq3{UE^GiP9G6LwyQmZK}DFKxu?A0sY8!@+HGNIRro0w?c zycxP~&B`i2LD=xsHy`ieZKrUKj1yghX6HX zP~R(=>*-OIJSW13g1QB8uHof`W?x;2&XFh$zN%4CK|w(|iy8{g?P8lurpYnhxua41 z6w9t^XIU$Li;kz}O)wTuP0u?v^If;;B^J%%-%p~&O^P%?@{@tZ0$p)-`=CK=9H;g< zN8Zlh^PKJw>3TtL$Bq+HU*UW1f2@dV8BxTKyBV8*KoV#l8!Kzp=lX%%G7sGjwVlPr z$kc{&rOoMMq`;J*@LPJ>KWk%r#_jK96?+ph(kW$R;VRrD@8WY>{dxvg8zh(+7g4dY zNm+;zI4W6LG2<&9;kM_EID3qViNd3E+MfFkEvG#AZU6Rkf2`1@80>NLKAyg5*|A*ytPM5Mu%wrJncUqdFrDDN^kV1~DZE6e7Z}OD+w3llnB{w%WA^ERvs)Bk5;)eKq*(tOH-`2)z zAXKvgy>WGQtk_uE*?G^nBRVQNI+rEj-MbsC31h?6zJ0@hg=B7)wVj92hXl~Dk$`Tz!}4$P0xgdiwm*J z1HcdT^2sYxQ}+!TeITj2i;1x-Vq>!g&){2I?2nJ$=TK1DNeDvuPrs{XDSxwV|N%crq%XMOC$U ztK{2VuMb_4!E;?nAqld7k2Fb{vrByV=FJnxQwSV@^;3^))!#9xrKsKD6xdT56_4PKtJ z?NQz^V-{+BuLJqbBEHB&1E+gdm07NM+eL3O2* zeSCZ#KE6B_Di~^ksg;jR51yAL%scQDl3C|Kj%}Jj)wmFFr|K4wy_Vmxf#-5y}GLT4T7Y>ZQvyg4yvlf zCMC%t`KjcSiXPR0DMCz2Dxs?Sz@6AXkbSR{5TA~&ZzXPrIUiOCjIs2ay+$M708T^D z3kI--$vX14xpCDK1!}}OIy6C??{gpLj2~n!BqT8~2o)4`1?F(FenaLENn0^@41(2_ zqbJSc;M&iD`vk(4v``lvp>lCqX4B(#FG(Z>TqcT*L$c64SU5S)%@SaRFvnr`tI!a# z3ITZ306nu8D^u(7ulhqVg*idB_joE6_Yct*$tZ4^TWTuH1e2@I0{ui)aMJsat&iS(76-Hk=|gy{s53d;WFc=V0)oh>C}T6TU+JPD zz~StxmX03syX}|(_q~YjXZa+6yTYUFPTkuoP5lpI%j=^lHz^KJ_9h_dBWt}WTxV)% zI5j!Bap(5!y7vR0U$UtuC@PW1yfwA84QKgFSWDyJq7Yk~o55jWf{;Xla2iY`J6w6gj~ZZtv{WsrPWTvRVOH0aIa_J6RjzyfJA|7 z3XB`#4Kh=vDV~28dNW|VY-~I;(X9ht{=d_PCiP=oT{d7)!}7TXA_|9z48(gL9I#KG zu;|r3q@nTUb$EZqLU%huJyU;QxeaGAQdH=8!!JBMJR>90xLP%@vNAS^8G9${$5m{c zV5l_`=GHK=(iz7QclnlaJhyEf1K1U_S!9`t|D2JkHT2|(y+zTFs zg^SVU(wB90Csp16-(H>i*7!EeC}f{0VeE7Z`T- zKj~iAxjFOx2+Uc(o$cUINDA z$yNs&JdXTK^{c}H-%X(ZkTwjTHDa(k&^6!0$CE>)i;ZoYdX|`!1Q)MvFDD49z-b(Fe(+rwy3DS-lEzg#JTpA3 zn_F8PDD&z~QyWy3On#Q>>@ z4xvRx=kRu@FbH1N~3lS133JTX7 zXOYx<=n)V%c=zsjdr?N2@Ju9u{DLr4AT;Wv7cZKiX~M%uq82*2&0{SkBp`~lnK)dn zofGH3ivznELdYvrVjdp+Xw2khUz`KJA(S&PHLL9G?D)qPK`I26*{9N;6o;EM#P{=^ zmz&UQs(S97@1RkfkAJvMJfUu)6bREN`_x4y17Kc-QMK}O(3LgQ7v3~Ug=tg0Woh_M z%!o95sW6!H3T(!=xrexTGyYRy+M|Vr>t+|bzmJ!dmO`{(XZK9Ud!)4$6ODqB60(e! zyd8-^=+vwCsM;fNvFY=I0c!X_4Io^L%gg~$SvWzk=-r3~jAbZ)VJc&0PM!h4mgFzx8|MBA0M`!5a_z*Kmg(aU8s zN~)r8T}5G36UG2`Akk=UX6E1xk!lL&RNzVpBQX{(#NUt)S0yBLW0^)SG-rX(lhqJ8@G zDQO6jV@Ke+p&39ZDk<Hw1k z3}~_CAq#nl;c{mQ6(2H+DINPrA- z;0RTr!+{WP?#GuM4d^~4B_)l5)xe~l8XI56!~`vai<1+s8B#)@fBhnL+h3ny#a0~o z@paE?r<@DzO-#(ZtF@~u51LuW)|tI(ROAhYx8QIz%n6aMRm@qQUUHf2mT*oIx_xDX z?Irc`wZ*GvkE@#l0s_eTbqGE*b zElBzECb9Z-1Pj6_X-02+e0-(1Ie0Bj4h~jT)3`%J_w0!mmL5I1{RR1XH9DS!w+ACz!Bz6cG_Y+vvWlA?RYTH_X6ufM#H1)MSrg)1mUS*(XAcz9i|J z(%Wd_jT+*+=hK{A02dX1`37BLjB5A1G&DTio+S42OG`^;!`k{4JS#JojRy}TAh9GP zgN?Sdyj)}Bxy9KWki`^XVOCw>=>H~&(Ci|pJQI+o_>p;bkCyfl+Q7g7*vJ!_ZMvc# z)jxcJhWvn&b905Jl?BrW0+G0ZBDb!?faQhMBOEDfYZkOSjEsNuUS`LDSl`ysVF^el z(~#ymai6B#t?O#}dY%hCG*kY6*1v{R7i4Aqxx=@oC-#Sb#`_}=iQ3MZlu~YK=??UI zS3QW1O3@PN6W?$y9C~MyL59A4l?M&5M17V_+;b3fp=#=RCpev0YnfKPeE9{21wnK9 zx~B?DHksFPDY&h&)Z*}~p$G#df&vV?kb_`x z8-akG4All23P=w7r;<7#vazzV!Yn!eEU+rDbuH(&a|#IMWLKo#XVJg9vPQALv}9q3 ziC}~-vZ;vk{b#B@u7|EJWtZ=K6KXp>!{uMHzf@iMS;=!JqG@*9#FwpLc~fdC-qI`Ag77{S~n%(pwOYO%rnsIoWp^sIxQ z3k#D2LEP_iPfyQ7xbyOPJ=^|RwS^x0o~nw9yP=cSKJfVMw{{0`5`dJ)j1zLfcXq4Y>ge5)!_{CQZMsA z0lVDX-Cb?bs4=s)7I0j87Z@mR+LfB0Uk6kSG>A9f&de9D!eMaN^QsAQju`P#$8a&Y z*o}TOeyuQVQR+Q8xy+?>?2rWK5)9O1r;*)FiL_OIrJPrRIixB=V^IeB?ZG??%LwWbR^aNzGS5<2?r z9z=m*M!{`k($@!FM4tzOF@TyRD4}{NCrjpPyIHA5VA1Ip_PiKG*eL7sjF;TnvA<30+#-FT$h9%XZ1J zJK#Pe4GpT&+F-l6bt#8NTb-1MK$FPb`M3Dd@8uy)vGs-Po+}nqFRP@d5RjDAYgANW zP?XTHN)6z4c>eq+svO9&WVUw~j4BZOQ0==Z#?Mdlcz@t8_X%kN^RH<7-KX2?MrQ_V zhd+N-A>6PpOR`ugk|Ya%M?%;eR_1hsc>hrzU20&swc8twHX-j?NsiZ_p(cR!4GrW3 z`)Gl^0;rp)Jox9soX*kDlYL30C<~`&$;>F2{rU4x%h_Q{t_Y0MBr+;`U)A^(J>u3 zZ9*2W{NuTQnI4!jfo;|40XSL{YnYO zJV6wsfFrXw+yQ5f6ev3IMoMnpUsPWn%Fd=E*wBqz8jk)PKdKsWANSf;g}Ib8T2)L? zz;W|)a_}z*!iwPN*xuc}eSP%5ZyyvSpHB7W`EzqHF>-Q27-(R73JoO?Y;A4NHw1sJ zH7?f0wE$?Djfd)tc|b8$QEzlo`|sZkC|bC8TGOPr54a_W`>YG4T;5GfTT<YZL0C4lx`xB&iKHePo?-9B7H~KfucrUk-T1`GFs#B0 zZ*E}$s0}uPimZSoK&-F>k`U0NpZ{?8tJgrK3rt{y*RMyrHPUnbSY&^5)_ucRYF7=| z0saltujPu9+&ny!c;6-`*YMGS1J@EMyOP}rmKgKehx+=AEFF18MJVMH>W%q@PG7e) z&*9_l_&DMIAlZ+Xmv~H7xEAXIcBJvi{_jYQ>>(4qT3uf54k(1hUb?FqkNT?Qjf?&4n#&8&|=;(=}dC{^3t9#xr34a z;HGw&*l0xN8AfqJ8K3d3_xk%;clDQD)6-kq*x1gJ@%oC?gnx5eh(Q4Q zw*88=%AqH^Kr2zW#ITH%jg2wWDA-+3k{%A^%K|MhUMT^;zqY=9yZ5DhUBJ=KSm~-k zP0dvu2Fmm&&E~JHt*pon?2AI25g}nfEz0g!r>|#9BinzU>iY2jbT3ID0-$oheFHoU zHgbIJAb_hy_gIT!fSci?*-^RRD{6?iAP^27^pto~JUaI-C`g=F?fmr!yA!IihY1IV z7MW_4(Q4P!)ZnzBaLchn&_~o1)6vnD_p$!lzwcFjs}!%q*?w;y!UuGZ#^rJ>sS~sL zy1G6&zRY;}Kuy~F21#5T^TC67z^FMYr5!BnP@=sXZA|rq-3i!a;2PZ%Na!hLK zkRGrd_w6koaIr8$yvWVnbz0=VSGM~0K3C8dd4P<)qa&%vAzw)f7MAT7)^>qLQ6zQv z(1I8Sj9^!#C9DX(>`%y>Eh)PyC>C{|fO2*QcstZS#bQZOKU>{bA}@vIhIiZS+$9{3 zE&^7{UFJsmS)g zov%;!vd6eX#WHX_IK0VheRVZ7=3>@N;~uJ@0xd@Nh0k*R=V#BNMF6~26s*|qztl3xYHw1gPA!YIr)>pq79Hg@V=z}m@Lu7;IulOeRGVS68Anp zYKUtXbc1lPz-8e7;@eNQg(iG)go+v%U&Nua1&<2)hlWzq(pt>~fzDv`^qS;NQwIi> za+x(d`-?$;R7k*`>(b;E0|U5iwP6{k4+;7AX1k(hU+oqobtfgoVCC^ZYb!Ab4t?79 zea7iL(M^wMKI22hMF;S&i7eG8688~3hIAF zh2G@|F$FdK$10Z#@*qov4005V&GrW2@es_3)CBV7zh}El9nO(2(;{3?;VG5K$^V|t zE%{Si^VOY`A<-9cz-a>O{o_vx3{TEoB_*->ez zyu5Yizk;P6nERDuDGeIZ!yg$F#G}+wT6Dj5Dqp;a%q%AN>($e5gLg$nMixB$xzk3$ z@s%5ajqv&Ol%MH3BjKFuOv05NfA>SRK}HyFRO_&wF!*HJ+PM zG>JN@rM2&cwSSz@Hplw%w>Nw|Cr@s@)%N%MGjo8%cw_#?U@q56hp>jaXL72crslJ! zPdQe5q}~G$2egL)FvO_pjE{`;E@3UE5SFz9eu2X1mQkC&*GliR7C4CSUq&GV4!4gO zX@CqH{W*T^D8(XOf()0Rr4g}fJf=h?Y{^qo5*Qahq^%w9NUFw0k5<;&#KiIE=L2b< z598PEihK8(alHc<=N*{;ugw~pzVwNYelI6=FgP5IE4pODUFkjfQ7J8VBb@PjxIrQ& zJaIXiIGmI0dMSFCeoadtR1UvC4M8)ivxc^|0vo-vvokTc?3ag&XV4X+{R|9FAaQ}m zm6n!57~J32chc|;QRvj;K>SQtsqrLk#iAJlILUN!n{Ie+G{0(&)_J{$ia~CBwg{ct z!na0@TzvfeoG}-1b)y7&xBjyL9s~wI50~Z5??2pg&Jl`l?js{TZ)8;a=;;;>nhG=+ z(1G^YxSEQej?JgK;yU^KT!G634k z*;9!kw%4v*>x(L6-%gN9oNwig`o8qIPYnE!uyBbX-QlA_Qk`&3BKG3x(G%4cab4ei zE88`#n5zG*1bx@fJ^)PBXLUXUV{DPB943@4mVl1MbGl0hO3KQ1{ggE^F(K5A+B4JK zH7+a6_W18-Vwfc!Qnv(*((P4A5dCcOMjbk0Hai_!FB zQ`4yZIpFSRF#Ew0@96kD;=maY!-@ofE7AM8V;!xg`x5w(orFdk`XiO&bF;*^A5vS- zR+Atj2GW2!1J{@q^KTY!<)KOC+&kpqHswSc86N%#+Oz`E%h$}T-fJ|F6ji#4XXgBb z_5qrVG;b;rAt4`1wF?}?cU45lz`;-RywJg<*=k10uyZOSl|`>$$h0=%&i zNxq!5z0qbw@-Oat@X#Uh4zCZ!0ti413L;xca~AUf+6KgtKp-Q%a=gx%;GdT_gJvR2 zC6V1f+8vFnJ*!A}R~H&)^b$LsO1iz{6+J!F&_CA9r#L(hw%+#Et&!}5xE>+e;$dvY zods6{+G+xrm3pv)P!4YY;L<&APv@w(6^P?BwXE>^m3=*lc)2uM7 zL(*#}LF1UbU`)@~uf`WOdaQ(<%kLhHkqPDDlV(YwXJR5o86s?o72@d-R$rhpIB@iv zCF}IEM%=gWvVEEhp(B_zyeFh*XDg+jCf1TPIr_lLJ?mmHcbvjc5}`U$w#!)baCud| z^{qS8Og(Sn>y2mn%hbyEI98Fa!ggmbbuz5UJnEnJ(Cf3WtwJZ+ZZ-rczy1-pQ-R+t zDLpGCHPsiO<7^B-)P!n|Ai0MS>JS(^7wEXgFud+0Kt><%fp|Ii>{S`rGX>SU6oFuU zR%T^c-TVs$3&{bf}X_c%HS*O;R+TVDHfEuGB1jcPcbiX@8aq z+g?h*KlP&LD37*Tm?CK1k2A}q@JU(e+K=z*$KxPipq3!sX~0!OYBzQY`NzE`!%4CEuF)G;*!obCQ@@dY3A4sDn(qTQ8pwsW z)VuLcuiKy7c_d?~y|0g&Q84=A-`s`<58Cdt0%UX{3c|N|BthdXy6D0guej}M?Rm_QgeRAhs}_j9Ot6m!ti0qrly_#Zjq7&Q=08PWbCh8L7Ghh z4nqPtGVL>;J)_ut1cA=7n6YIaMJ`B%1KR|Ka36&vy(y}r?Ez;jU^L+0P*otm)YZ+6 zdwJ;%#ZgX8T$LDLh$FtUxw*5YXj!y!CQ+`0;TcEnE$zSWpG-m>N2h-Iq5btcjC9Ka z$_kz_kC_mo;96RO_w)^aH2V!T7$i(E3b}HKZYns6m0gD@URgN@YNou2L0uNrRGo&0 zT4rt52;XO9T%{@6F97rdsw$Vk5H4sMz&L}Za&U5r1?zf!KQ9kYFKiki4PxR=UmCox zjK2!#oJu0EQA4cA%+H^tAiG*!Dx^4}CBno2$QdmeG@(F6@q|ov7p4_`lb!h2cFM%` z5FIi+-|g*BGBQpB+1lel9Y#C@Yy&$DW$anqZ|oWS^9G|C(eP+#8+fAM_yhmM4DCsN ze;=>(@>@_fBJpUCy{05~8tCh9pzQzs>kU5Q)-8x9CWxMBqgq3q%Ob|3dA`5>TIo*Q zzU4gc8?9QnF}%HnR0%-IM#TqHXIZV}a`i1`yh8d!Gn< zZ(Ll?EeeA?nhj5JET8cC4+&oh7!TM06psG|Z=mcM8Sj>mt(G@1*hB}5fqe3u&)!*& zz~Ez{;TO(Jz5hT67Y1nqTSuvnr= z^LzLS{OFFl%8bA&oy6nqAFQlv`1!$CFs;9!KHpbrn+#=q!Q+`xho^^WmYyAtd-?pQ zwOz-3#Sb%mCE*HB0S;g4IsN3+f$#=IhM5TteFhS+er@EzCCX)XMa4#t*ceLBE1i~D zA}H- znQFi4Z5_$Dj{r;dl9R#ZC1jI(dU;tlUko{sB0cQx&lpTfz#sBb&>KvVq&@c-1Fs-6 z&5e+-8W~(}A;>#LP6J3+fHA?r4Mh1E7L>5;$LP>GJ)a>gTm1>$|L|}vW?;05s9+f1 zkia6FfeK0_B4GUXJEEA`-hrdrj;#{D$mq^+yX_Ry^qm--U{Li3moIMq!*p5rj}Q}A ziw*Gu9^~S}gsq^b*W27|HSsk=lBH=ds8w4r+%7-mK9l!9k?R1OCL<$l=t4f)*}i#5 z$3;N!takxu;@OXD2LEX1Tz#{PR;9B$(JFmBO`rbAk=qs)+z}IB_w$K}{DUbF+C)P7 z3BC)Q^pLv@4h&%aMl>2A%3u~Y;)@Fh<#CZX7T~M$A@b@M@JiCN$*sHJg~qUP(P_oc zzc1W;*odK>k&zK|dC!Wi3_$4^jCIy% zk{d8Qph8r!wi@Wj-g&$2rt(0R#2>@ETIl0bco>TA-TUZv^XY3L_ne-?U#1_pk&)Ja z40YqL+mEFi41^C>iv7z%G^CQc5uy%Q#OyDuw# z$Y~B#^t>msh2CrJFogEpU`N%jCJ*__!ASh8-?@t|G)4xr`7ryhC@0ZCv zOOi#2S~RQlBwN`C)bYTDAtoH?3ac^^V20P;%F?p;toi#?^>^ZmFXotUILvE(FaFqr zF86Lw&^*7^O<0FPPgsm=36(jsHf(yyQWJo!oaE;xwv-TuYVvmV^thu3#N3xQU&91m z6yUKOH>b)Qr|l8l>Ega5GxpPk1zXo`+B1X7FeBl_tp|1dF~IBZEph34DL82TYN7=2 z;!=f#@81Mj&RNzoPrSPB$2_z!Wq?k)*;wF(X3mJ|#&Lco;-L28MW}2Bu9tb5HsW~i zd520uR<@4Ci{_`~=h-7-G-KZo3$t!QYGhylx@4V>;@kJ2;zK9yIzJzir;Xg@o%RO5 z3wIC-&o>XPD_5=<8V(CBNk97fZ&)I zJqUc0ll!l;N*C(Ur3^ZNKTuJ5+x3hQm@%}waKO7sjsxX~$?DJC@OE+8s+r&#>;~Z1 z!GF*h?*1sp9xwO(vd^Q~*eYZN>6C0q5PJIhz?OYIA?5L=KtxT;z^lsQ8Zy}rBeC*| z$pxclIARb`pbhi;Ep%8=_U2E>u38{@6T_cBW7PHfF%ZIh6y@WF zLW0Bq2!AWE0O8uTgMszT>G_hrf1yR+zi(fBl%&29ewa6~T1b8VGI(hHr+>@EW}~XT znj&TV9*nwO?T9xDlAM1M)?&VsjBEg%JsuMbS}4?zn0Do8vDdH)x+ywPXE*lxY3|)a zkn@<^A(C7T7ZEfp>;VGWiXAF?*^v#@e4hB0XK`p@JOM(hhHVQ$^m{GQiE)wvYD9Z= zz=i>bjaz*IytiI&{V zB&lm{-A%YPHddL_yssr6cN#H19>OMcaH!10SVieGS&PTe4M7b@NlB?|Y~tSKz`j;E z;22T*^LPCY?tvOVG)2G=R34vOFdb$~R53()RaXAgO7Ncj`-$nhaM$}q-dj}p^msrG z5Ia9~Cv1i3nSqw(=6?`7u}fUxy&`l05E`%KHfuo`aOlwV?5r<{REQ_3PQCWaKu8zV z-WvMdUrfScE=rWx&~cX}UFM1v7G67*cbpo=b*w(fJi+%(vftrFj$m2zj`_xZp>B(M zT-V5ZS$!=ktI?(Mm6x4bg<4d>GZM}8PSDD)8lE!X#VyBE3?f9eI29S zM^81Gy~IT!AiGgm9w-C9_0+8|7vL5q4PMzzyPR=>)0_^LZ3f-hEa#}6qoDmmn>^(8(r3)nja?^ zzxvue{O;X1hQ02cT`&hB0)jX>0&?P0;u(+y`AU1z8ezhlTDCdeI`-!t<4Oq$2}$PV z*}8Sc@lNV}&fv%D7i#08XPCBKVQ9fl8bB|Mt_gqlPzFK%PrNMv71{B7Sme1szxnX2 zFqYh0R8pck_QtPa)Sl?m;-s&{P$@&uy&+PRSFtvd`wqm;o3FAe_$pafD|Q;AwM9%kum@ zo68~F0AK-UFnRAKMPCBB;+Hh(+8-ZWC~5phIb;+ll(r2I z!`QQfyuv^DmCIINt4T)KXc@;{QFlr2t-lo^A&p?!hMewV%L<4)zk4G zUk_vi!S!TEj*c?pw!#@F9v#TU=e^#az@{6=-l7{2RpWxU5fKMDaPnk9X{olBmJ3n> z;llUW7L~8ZV1rlS&c<)@_&$>w_uu6*%dmY~XoCTtf&axw2{{5=<5W(!pbvyiHV>?itd=ZF_jIj;#AEZdQV?J&2 z9@sm^_!Bb|LyFh0A13rAj!Zr2R>8vuXJqV-3=#FOvpfx35 zpPxJ%eF5ytP*UJJ0cb+-SK-9#AVi~Sgar)xJrDzl_Z~bAsCiSa>u}*4S_Psgv#}9g zWitpi0qfxaWOPgdpT&%jel;681xf`uKfNl-;=2gxxw)S-Ff^9BFPNZ7+vnRVVIk_w za^mq-tygDOh5r@yJdz5Mg5y-|^y!!;Gjt$)e2a0 z%snf9Mu2$HrLwZH4C+03{=5{|s8+7_!Y|5S)1X{&hN-6yEqynu>Nf9~PM@<0K!acm z+vCjf)!gjtwb`mUq{+M;wHMokf(M8RSjlC1?iuE_D5*s{5igy2YXWga3q*3DK7D+29+r%8A?f(oiWn`sErnLuff%+%?HlmYTr`DGyF0%SnWIExt&FpYr|QFtnhI$XABG z5DJSc_YCWko?B$*|EE0Jje$1@wh&W0yVk|G7y%F|k(rf+PCf6iiPiJG&So=;{SOC} zRqae){!?h=2f6fY%>PTfi}y=~)o`9Zy#TebCxQnps*KIdUL*VoQa{nuygATSJ%r)0yLq0!NKEAW5+Ol1e14r4mQ)E6jH}6gH zVU;S%u(Yy*ev4HmR?SuK!;1fC_uQNOye* zpPam==E}g_cDOe1tCun9DWabAQre{vxEG0jkWyFs5qdk8kR6A*Wfdwxba3z9W>O(- z?Zza4o~QU@=R^|6oI;1K%0JP3XLn5>0*eW%+FM!2518g~Lj!fwMp<5N?8YoHxSMlh0MnI@WpYl7`hON5Sd zw)Z#l&Jsc}=tDwqrH7kR6WB?u3VL)=vi~ z3W|)WTXxbnrT>M9g(BbI%L(577e6U65DQ$EpQoUxVuG zs-0RY=;9)??rq3%_;x)AQ1&jh*843m;%oi|&|&3m)pjj=`+1<7VB;Z_LMukh#j@(k zfpXOMz4%{u)}gOB7BDd;T+X_5_2UICt@jt7sd)aX`oKjRFprqNSr8UF-xz7om9C$% zr<9a^a*Umwlwdq&Cgnm#s**r?U=ri*WcXqk07F{Br=7GjhQ}^*i6$~136Jnup3+*8 zn?@)%Zo^YXHHSS0or>-~sCj(d!^z3pPa1q`(Tbn@ z;`(~)-6EQnT9uBP2?CtNbL}MAIETgK;rTRNZ7`#iEbVy_m}P&8lmgmIyaoe1@CZ z!i0g;aTg(d;~&oJ3gG^(pv|jBuqSwyUl838*c8~al`eI)PZGn|aL)sj2OhaqQ8Sm8 zS5xB)&J0BFJAqfMws{rS^^OGZGfpCsKKPSL&@#^!o|c`+4F<&Z-R)Duyac!}HG}E3%JSMYRXh6yQT`sg#>Lg|#k8E#u*ZO->QgX1f zbLrK49O*CE;-a)(dDdLz;r8^x&hU3@q7v&LlW)Ot4!ESACLk3l5vUS~L^;IDnx?&J zvA+`ZQ5k9$Vmvmh>`wzyYE8>}{`?=JB|r!wTI@|Y<=#@OMh`g3OfZX9C~ovD?TzB> zw$b?)z@y;xEr@-s$N!I-^d_eQ`=(g^-Eog+H1X_w3O?OEogd7?7WI7ua@F#$*O`Lg zz$bG5Uicy3%`fJc7WX7gk@+XQyoc_%oKH~qMYe>#P}ZV)Xnw&%{Q6)wiy2y*EB5W& zh1-+HZ;hgx8K&91+gYH6LFGTYfc$ScPX~IXqO3WP?r7sc|SK8 zes`VXb_JHj4PRrADMx4%)b_B7BNDK=scCXzV#!KJ$x&Djs`Wt~t^jGzrHdL0 z>tFJ+4ZLnDk-e=s%^_$tv2w;~7UEDumTvpz@4j*K<}zvk^aF8mahMh`BLJ@ROA!Fk z@Wy3YJN3RQ(iQRK=v}+^$NY^C)ymPlQvU7g-ijY6^b!aX{DET=boXH7@b+6ypFyt! zy_o#1Kkr=741N^%T)P{ghPkF}=wS}1z}2f4W;;-~&xX;M&J zyrDgt`^4@3=5kfqqSk^qK|n<0C(kKfUPq+k((m`Ga4(oO3O{!*R%w?i&515e#jR1| zzCJ`qP_L+8G&W`^ZH^4q;|S52ivQOFe8qsBI= zgq1YtcB{+gG|*bE%=9{K{IP{dS#Py=d(*BtjtBbFwcmG#U+LyymJ;SsB6pB_ItG!d zTcj$XsP6rI!XUGYB@e;a>73h{mt|7B*T9ov@~wp=gj#IkWO=KD?qOH8okiyk6DGFE z0V&!Q6n-PjxBS&lf0e1&A_7HS!r6zpMFJHVETDRBzB)rx5taeMB z6A-Y+rc!<;;+_4OgaZ8>`SP;iq~48+3Sdic747LIA0?02pf4?rck2_`U-o9>-q?U; zok3%?+w!mZ8_?Cd)p%!e9TWAb8;*@YPx?@bn#>&^m zkjf!j2)%zVL@D@!tpHmI+SDFiVHE~)jQQoYgeSqYF(7eGddjfIKhz$OC|Yl@W3Kt2 z$%ny{Yf;B7Wzj5F|{JZhVo?d$L4 zQfSoNQ7!#kZC1rOjO?u}!v$5#>@YWPBKkqpCOeG1fJHlAS@p$>v8pmxRcmWQgX+pk z&lSm0xFBGAGx^k<-`Pdv=`On~@gV9&c-ubA2&jCm zsjL+_nD2#!OD{XRo;D0m7SVAcXZQte1^_`S}33dUF!uuG705N-~8GZ226uO zW!UV~8-HZ>9;W-d+`0GYQrEgC!|LugS~&#;QqB`sFy#`UKEp?iFCfvshN_b-gt%nW zg0XZBEiuRvubvuO^VZ)KouORf-b)Gm8?VjShLnNp&I7^D?(?_o4Q2lV z{lcTh)9m#b)ePwZqxjTK?L6mRmL~mE%c?3XQCl#?acpxX$ZT(z^VoZ;3~s{E&=9Cu zu6wU7!hasWv?Y4f=B!rcP^G-Xrf0QSSiQzc{+L}(AC}-Gd}auS_^{*w4hXEZ{bDqFg(W1eHPRG`}glh-kOLT*rxV{=vB0^u)r>S z8yPj9n(y&#I~`yIAPR!)cCX-;SJAR1q^){-6_qbvhPAc%7?YC7=v;oDmn~Y6rfyL))o?TW8w^V+(*u|z~Qzu>gi)fvM>I@ERX@2bbEdNLHy~kDFjZAm2zCpeJ=45)%@by1+??QfN3K@Ccm!M5 zy?3L3k&kWbCOKMQVNn5YBQ8!8+{3v-|9!a`uk0*-+PF`AI=5JhH(pU*-beNzn*fmm zwLeERvr4TluhbWce?nX=utDJ)Z^@2+_~3%L0z$dKO>c`oUyjLVTz5^lHS?ztf4>%_ zU(^%<4x9v0C$5ysI`I!4KP$}TJ*KBSOrN0R4oMaieQRrLJiNav)ES-uc6g3WR8&+e zkAKMgwMe^RBEOHse=ct)fyh3b;KjDQ{j;Pb0KA|-?LD;qJglww=IRwa-hBk#WPQ3| z#R~4{$Z)agFU{Y`eR$Hq6X9?3adAzlH6b6a>04VyQwF_Vzl(nD)Q8%A3=G6PzE#75 zjEwrGCLS&>3Ia53ah%8Q9XyC)hA$$Ek|s4M>Gboyn~&a=H@y$arsMVL2(040saTuZ zQ&ivGt#e1kQ0(J-WdO`ilt0Z`n6ZjFa&mKzqurIX{W@Qa>5s%Xb%9V*vKT=T#P|?9 zVhSKgdo>Sj6mUHW%(6JG(wAs_K4Qv*$Hgx!R4Fg-nyFNeu6jmB%7ResytA{nt^NjF zP-~aZ+JpXgaL^lF`Rq2kbi>N8(Db0;)ym1qO9W5?bv?zV$fCv<#Gy4cTi8^Z9;qsZ;05S!E3oFy)J3ZB8_wekEWo4ysB#o5%~o-W0qZ zyo0Bp(+yR!8UOPJ>ngy=b19tYdz~~F1Kc~3_XLg+#>LIL*fWWrp)!K9ET2VewLC?2 zoV^|0dc1FI>d$?6u;Vv6WkP`f+S=Fv(r4M2-WK{Nsee&psMA}FYt&hVlq7w$N|n*K zmEV_=R{FrcIizq~d^sGTxValmM{l`<>7`4%urf_1*23YO=#m&y$Wtt9&$z~P>IW&aB{#hR61!{qp z4kjZ$Z^-AVIYreZ5; zsiI5y_##7X(O0*<=lkre6pGz$=~1sdh**$+x%`s+;Z)9`b(F&UzpK1^3Fb5tW%lVY zsgG?uG!_H%2KuuP*!ZJ0qjrlYBboRYx5(Ff^&C*acoEQG>|9RIs^0@#rX0r87@34; zh1S^x&7=10L-FMMnVbm3$o3`jcIg3l;D&%a!DUXhx5=yY8(DSj;?$G7AM{p^WNLLF zv~XlO=i&fa(;NU+!8FU-&1oF|h)zzK4sv~{LoX(|}x5pr;7)uX#mI6zHF z`SJ5-A;Ga%=YfNW zG{73J7dgK@Y6<@gMJx;0a6?2um*~_SOQ3u z{O-TCEK)YT#tb{i*K&S7$=N_zbvY7{={Z!TEmT7C<=&sUx~a|Y`Y z<}kbqwQSV8OWGc?RaK%smht5GdGp(Rv7n}F5egV3HVQBnXW6jYvt%NmwkoiC%&1C4 zTcj-Sk6)^WKC_FXaVnE>tQc4dT zpboy;*52OH{Qait3BtU3AUTqd3C59YBeUFbY_X4MMNlSqIXUIZsU(Qu7H`m+PHp#! z&&lMhoV_n?aqfOg@yhWt?76bfc)}j4UmI}6QTpeU#_c<%5FE=At8Js#nf^ zS>0SX(fwoM`TYxPHCu~o-i=Q87g<`zJyKUN4FXnxJH2zDSJ-@}2hnWv3kygqo03y` z`zn~pM@rV&(!*Unth{`+sP;EItfUY;A0faFQ=~iNWDg2A&%y+YOw{vGSqV z-GQMv{jePk#kYxxa+}_lXyjlpmrIj{e)#N{*liIgUV-mJd6^M>EF;*c{QzA2kD(O? zKDGPSbA+|Fpz@43)r>7yaS2|9wc+N8{LzMEDU3M?wu2HRag)lI40@#b`FXtAP<{LH z^X(wuUtK(K28FP1JCc!{{3fHIGAq?Z5(#PP+Oje?D2%nV;=ypfdGi+}A|vCL<<~pT zRlm}%1x5flF8*r97+07Ql9a@(c2?x=seX3fAZ!`LYcn-5VZW!G)%@qggU@z0$_053 zqNAB#)IB(g2nB+Gw*SJmd>ALe$WHhj9Y8d&+3?(XT8MxPmI863Wf<@+cSJ)YQm*_Q zi9g-Egt(oPJu{0j*Z~gJy&e~XJCKh-BBS`&LDNX>`qEsSv)sySY23+Rz-q9E5!w#y z7Q;V`E+4^hyM>H%^Yb%1CIxp09ok7aV3_*yAsG6D&pVf{>FQ#))%WuX57%@>`5OFO zQmayblMwj5mPMu`dJ1Pmwwp zMaJ~jt<`sZY2ZtMor^k*1;Oh)GA?Ttot*6T?)MM0mTRMQ94g=&&i zj9dEU=Mz=f@adDd)iHr}e{b%OE2UugplhT(Z+GD#f85wwR*Sx=PB-tiF>eYn%JBTL z9UP~UQW!s|_K8*U5M)4Dej)GY@KWI18}rXCxL^T3c*G@c^7HMtw1_ttzS7%AK>*K- z0D*V*NuTyc&ylUM9_{+nL-KyK$e)Bj8X)jhT9dfvWN>p_Rd(t1A@QAYe`KCxpkG}( za5nA;BPl5@#5M;~9754L_!GUV2NE{9@ViAn(?YIxOKH9T{nhkDC9L{j>U|osobPKuxr=?j(?r zk%3evt_nh5i#8Dka$F#GWdUZ?)vQ9ptsgyv1RibeKFG)zH)o+KK96bvomyL07d~64 zpn>>e1yTcAWT-5nd4<2zh6M&bZJiC}I~RSNNG5HMcw1ZZh3hvyO_}mK)6O0weHUCI zmS!m}c~R2c(R;Nm{D^rs$rc<{XlVgm0Pw(HL%JfASIGUOZ#2(1=XF&~PK<0`Lqkgm!A|s*Vo4sCse^lX6rP!s4S1nIAZm9(t8*dLZPGI&x-iuq}&wJ@66_48QJ|m5EDh#W? z6{VX;AW^q>cK-bF%MBN+1?o5#z>-gN8r1WN&z#ja%(XOMmkz46V*M%qYKVzI0+bi(lS`3>!w&N&wBG8_wc@5#B#B2fG>=7~N3Gt|i(wIXheFsGxcPxPCLl^&N zZ5u19z)cZZ3?Pl^ITTJm?*wtaE|GO+d_zUSyNtOBOy=E~Hig^I>Wiw>aAEw5xSnjc z^T1Po?_NZ*@isTezil+E@)hTz5}9>*BS(+XTlI+_F*9Qczi)TUt*WZ3P`CKbfwVx; zh{v;1IEfcC$`N({YCUnAxw`*j+oUu43sl|%j)L8vpDX$0%c>RRZGF?asBN$bw;6H| zK9J^u+ehD%5*z!JhkZsuUEk<>938!m_T-VYICTvK?Uv)o*g9aQlCctaS2+{%HLPf2 z5lSj=m8o6XZaVX=BdP8z=lOc{9CN3xeQ9nM6cI_yXWK9-KK71QbE{IPbKvT=Yhs;= zWoClCn7q@|>$-Z3yE#*aG#0Bj4gF3^SX#YSwUgaufh^>?yTnbskdLS{h{S+`Xa|8Q z@J8dP*l|X}$d4Zgig9dpyo#i6>tVZDS~o`Q#EW{&EtkWd|4{0sFf3BiepdXA1SqoUMs zb)r28lfy8g|N2%N0E|Ni57zZDiP+pdV%O--`&lGV+Km-TKk%(8F|Q6DgOH5<(h=!a zQ2)U7pd;7b`6j^s>|u9_-=2|;-_QiC#twPmF_V{vRVf5;{H~!-dVz(7bLhLIy;uGD zPw%3p25f-+B`G{V2BfJd0xK|}!kRy1W|6z)<9Fvp5k0q}gT^IuTNc7+gnIgIZ?IbP z!yJc@rCy|SLF=TwPQIKfMiCb35olh#GwK5(dMK!AuW(vU!^p7PViOuKo0; z`+Gg~-6*ElhOpSed0*lV9433=Kv2}RutmQ`ZREv$GC9{u_TuSV&M`RUf+XCwvZ zj?%FcghWK>vOOr_j2Ie{;^9$JRQVoS98es=d5B5>Ymf6Wo?lkKL(tAb_r=M@^%K~} zMGa4ciDSzrIsoti+vD}>3On$e;=x98nrMm6$*6}9{{e5ntzujx6@2H8;7o+V&YJ3K zNg*M|@CM)+D`8V?Butn_%&sdaaKVfqEbIbSsQN6r^1#CQ=;O!pLeP%DxQW(v?f}>m zXw69UM0IPa*5C7TPxwa~JZh|lnvIB9ca=UNpuFMi_Vx5YvL_9vG~yz)HHo=`;9H;! zllJaG@daojb#~>_lwg2N0*?RI<_6j&WPbsY+Rw>nnv424x=q zd8dmI9xxh;#p2TYqc(gMRWGs=<=Bg59^vz1BG_ofud7ixI1*%f3Gv&RC-Mnw5?4N{ zJs@F(YZz$#C@WtvS;XiR;hKi=o)8=>fJ0@KZcxzqps}<@i|qsECIP48i+`1FvaWMXv)EfavLg#K74Wiv+~|GiO^_ zyZJJVrleqgdWkw8Q$zy^Kry7p_*)N=x+fw68RNZBf!q9NJBL7+N9 zk>5a*$w{c=?AXrWPRwh_X=%X~C@t_JH5C(E1x(xng`;d#j+q=Jwl1`qQI4u_w8n&w3oH zz%6^es}`L|(cQM=CL#64Sk1AEaFJn;8KP%8Iy%mrJC_g=V$t;A*vE8K;+HQSA$x); zD(3SO#N(p9fZ-Rm{HVvi8OM(sQA<)x<;v|1Dm?Y{I0y7gjG*+P%YkYG*D4woj*QxX za*su>u^)F&8pMqhz3;HP-}=NO?W~whNAg#=_t2gI&N}uj?wihu$(~=GFP${}fBjW@ zFa~%FQyPaJ4KeWpkTcogFRgt!jj+zEpS-;L(isiC$;Xcm6L&qoE|SIoj7IX^kLsx7 z82mx4U_KzaK(LGgdpC~ERYQwCI5-GPA{P4rGIMivovltqDlJMq#7HbmOn3PuFr4oq zXzvVK9pATPqmbFLt*qqc!HNM5Q*iSgwV>4#E-WdVS=j%mMMf_5&p+a|Vd-CQRcA?@ zks92bSC^3?=A7~i2to!NQqO$xv~I#;%a`dVSGbi!CG~FwxX%&%Ns5^H=Il%x?lz=~ zIz4|CTc35|z%lS0s~a1a7VOb^F1ycB>N}S_%+5vg{bxe#X?l9ZVFgpNBLvW=&@2-1 zWV|Q@Khx3O$EYfn@&(2ZtURpx+}_*kftQUb1oZ&d0cZ*{K|Msr*Vo@*{-t<+^4+6{ zA8BbLvZkk_ZTB)TJbm&66%TzSK=|>`iGj+ZG^d=!HZggEIFY$mz1Bn3fQhl80o)q~ zMVOOk(xOx|uE1@%pgXfDzxW~iFGuyeh7YdoogcZy3(G5Ru5b==d@y?AXQMkKq&Laa z{@|hEsdq15;&ziXF};m}v!%(NFZutq04*)yag8}t%Lez}ng~>JPX_Ia2Z7f2<;(0& zI^@(a($fQGzVNgSO&OZo%`?lphMyM{%)<&-Rl{6 z3_|%osmW$_Ao78S@m_TF*CNhScz^ABUuKrMcB8|@J&kPXG1QsSQHRmD0Z-p280 zV8X$b>V)CEL29e{4r!kWJ0tIj=Xp~Bujl?p>}C2o2L3S6Pr$idgvN+%i`r>?6`3Q8^U$)-Okz_X)kg6{&jE= z7HDR8f(#5=8XG~m+d^_u<;vo@y(LNLdBMA^Hcar3KbUjmfF2%&&kq7od~Ps3?F_)4 zPJE1LTQO5(7D5~#Dj7U#IA!=&)n$zETiK7k#T^#=-~mAYi<}RzdpRYAWmnGJl`A!A zryAJih6CQQ(!87)mM0K1%+qGI!DjSBGl*NOP4MIB7!nG>1MFve$$DC(NL%Z>g@xVG zSRW^;xv0<@r*S$&4sL7j$H@i~^y>RfFRwYu7BfeA>twc_jlLH)r8I1G^ZH-&RJtAU z^zw?*-C6VE1tsA_xS*@EuzV0ndQp*mQOK{UDYV^RQI9<@31O#!4d@ljA}XzPKG1lf zPJ2!B;!<6QE)P>B`ti1%)WPtP#S^uI4^@RvpLTF@!9crt`V#VJu(WLD_m4~8;p%^k zjSWt&uK#?U=wGv$pxnKiD94Ue9(<8M`9kz^YFb%#?)38i(e&N%ShoHD7c#PZu^m6HZhTgD+{()ZVRTU4LNJ9+3$#iaEU8w# z1cI3;(*0Huf}qJ;Ol%3z6r(p^vadC0g)rzgc(RdT3r>6AM&-%0TE#4aXx`VKXqAFY9`VFP^lyx$et^^|pY2f|K zH!O?BHcbp(0EDT0`bLAEr6qA-2Ez;09lLfVY;VqnU`9x3Ss9ylLlo74kq3f`dAq1d z1C_twREQJOengg#jc~Mrf|aa$i8}eHI?C&~_NH)a<44=rsH&)Nxo^Nuy{-3QMw4C1 zADtyQCv{VLQBidRdWCA9Lp7sU>QE9RVB7rRz4*?1cx7--GR#-;R7WZg%Iyj(wY2Is z)O|ZVm<&xHjLF8PrWGsJD6$BW26xqeYgq@KlZ`*m!~f}%PW@RcGsEl2`$a@(emy(1 zgL~3Ry*QWd^FRvyVy0USH+%fg$w^jDPSK|NGHtz}XZdo>;m3=L|0(h};N_^Q`r6x@ z8uO_EH6P%fYYnzXE?n#{6u#l@)$`m3_botdExf#3nm+pMtIICiZ(!^P=%t~li49KQ zkIgYjO*)O!`V>1O;~})x0M|A`WnmYqM8g~vML|kmaFFlg%@2;-4CxleY6RzXmqFe7cUXJZcxK&1rmnr=TuCsBa+Lr%bKO76-P#o;hnCt zoem^3y-+BIGHiH6*u!PvU&d_+?sKNUB`mhsD3C8vAW}11}IhcCK=7d2t zkW`SDcmtz0AEw;8RSN5dmXz%;IVL}q!J>+*wXXx)<4(eb{d;ESeB0@T1tbdK*+M0Q z3JjX{V9n@j_wI2J(wiNk%1>C0Wx53u{qxBEK^O)t1&%x-kO;6EHA<$@pSry44TGxc z>a%8MGRn&lA1_N*5{@5qcE9$O)ps9ZFZDixenCSGS?G4U=0zFupXOl;J&+GzIPfCl zWi2}rpqbcm&1BW{LPV{?vOMxWt&Vo=L{)y-+xxe2q=Wj(!Rn)GufBNEPb+zrm33da zY>d2_Y_eW%v>ODSr^V;x?wJ|U5vZwyPPf&xp?C-n55G-Le$AETx>pFJc-ldUa_fNC zw|iTw-o3*fgTrpKV0sHS=2kQgWrTL-PlruzX;O@@tu6Au`x%#5zUc3VBOGInwyY;n zLSwmgN9dgV*?#&!(4&#u&RmxeYsc)JaD3K%psZfr8hx`rjt!|N zkY@virc3V5OGhC}xXUNj?8(g5j`BtNrf+GP3@94(|g$?D&MOL&d4j$SDGJN`0Hu{nKir`>LIV|7eGG{ z?urY32@!tq>{X}Xzc5DY#DEZNMio1NWOnvh0jE)YP&vpPA3T+04h??&A}rgh*6T=%+Ubzh!| zfE%LPbw2POBo))su4D&e1_u@2>iea=u=xEyFdVecQ;hgSlg2D&G4|sJAko93JyHi$ zRT0Luh?_qkHw}PZRi=C5=Iw&ZS22gG4{Zr5%x!V#!lANfY z!aQ6GygUlZ#_sOh1B31b4=-Phn(t$KfAxay*z#4(kV4pXslYEX^6?dlc;f!0LJXHs zQrf_xMl1QaxOfcOiq1|1qyeQR-8^3BOv0tFU%x5SZ|82NwwT3ML`gyrj@-FSXOyL zb(optfwKWq0&y{%0Ijy9Yh%kFiW{<289 z`PrN=i_9&!#83-jG*N1*p01R&Hrrm{UW8Ej&DTB=^3>jyU!)*%gKDiOKnadm6rnkP z&CIoL7viYcR^;Er?}WNyHCPW^ODXF|mGBy4DnhBg)spim)S8$S@ZIpD8KyRn5q5>u zUt{c4w~BdZ_VYwkYIHK4jkWbB&pR)Hh?CUWJ5m^5U=2Oma^ulyVw5|E32cuhsG8Zf z?}9)TLHr?e!}WOfucIV_i3UY5v&h}QeEw$=OW1_dU7f#y&5^+-X0OF1Q}iEfy*zi! z)btLFdvNoD0|Av87FLf<a$W(Abue_i&qJNxED(r!o-bv;R7|Vy&puSi*kEhI+JQ+T|Ym$`tRl^AQZet|7Jh* z_O4)c8Y>NK9WfQYW^-o(rOQl)rN{hYBX$g!?~c6gV-^ysB`G9Jxc2vKSff2AQFNR= zy$TAEK<0CIbcE#U^583TAyG1y=1h|zNtUtocD}b(wEOfxfI`vp5LPKRaWm^HBpd$EwXnGQW{t2zs_aucb9IznYjm+0oyB!OblLxzbUM?5#R2$Q`b% ztd%%eduifkN=g^XCcKIs+k61&HBl?SjJLHuTHG{%a$Z~nKLpD`t zf5=Xke0%`h?J+P|=D{&=MLZVsanPcM~JUsDso=;SVQVn=?VTLK#X{d>lPpUeB@ z-XztlLSixv!M!b`5Bp*vRg!yq#^I^5cAJpRNQF5eA=RC~i968GK$qCBlA*DK zJ=9iKq{ha@GCRrZu>+#{_fZKPD9r0ScJ-!i!FERaP1i3yJk+LXJOhUI?wwZ9&O-@)`B z#BAbLxmpsa!kA;9T=3v_4o7WCur;lg$TzZdDS6gDxc6{n-I=Tk+3R1fO|mQ&Bx^hf z`TkQ8R^y?bgIW*GZZ%zSaENHM&kAa}k|vdwuxX=ffohh(aN5GaDgPPsSyT0#4x-Pk zUt`rjy4Y=&Rithxq~cXZTJ5bIZB#cF7O&cy;Ce+i{3=?hu@+%i$jboNqiirZapDc6 z^E{N%ttrRRyGO{&w^&$kpFf{-crc$K`rJ z6c2PK{iaLTjp7ym(y@?N3i5my6wuM3az??z#g&Pyj;|OAa|E*|h}3g)bLVR%0V!H* z;W)TfBUn1c(})Wh}I{qiSon zvEME%ESS5FO=@AjS67!zK~4SEQCrOk)&4V2#%8)ncoB4fZ{X$S1(t;2h{!9peCa(7 z5DD@nfCyWV?gBy=g=_F$!t5nYlPdwy$Vf$Zh`2#w{xsy>>byC(uGv?9&5wzN0$3?BI(cZDZpkb9NWFYYD??YcV6mPVWmc-Td?N z^LG#u`5kFjG~QE=xBQs*g=7COWFX5mODt44bzpBN|LusZH2gXc_*ZL4mfp;$E|!S3 zsCIV`>9$Rlp8>6@%EwM%7B!RXVkQlRmyYnd$uaQDLfaZG0nM*ysl)Z*k&!-(gupHW z1PbR57{c&t*RTsBTA`)=6IYq*`~|Tqb_Vo>TVo@GhmTL(ZVOam4j5}7kT_trTVG5m6>Ni!pattxyqns z6lUT4!}els|1? zU*>83pjYr^eet4}x;mSv^A0ZZaarh0Y~h=OR%tkxeR(E&FmW(fV|AUKBAk(lsk@^? z{a-RKz5&I;SNIZHMY&E83o6__)e^O}f8WiipFsx!0aE>1BX$7X2MC|AwV^vMNAo&9 z>L!p(rpgi(8QBJc9GnsE9?Ds=(3>~uF~51Rvj!a&aRMN4D@@-2G6p2a)Rb#k`~($r z3Nq|&52DyZYInGBQj{M*CSCJdN=D`~P`w?bBNJ9r@HWA`;O4t|q{&@;GM9>XH|m~7R7(gr5snz zkyNO|nJpXH2=CRK*0T)AddDZZ{2378C8LWTQ!^@y;uI9KEf`okF!j$ZJ{*_nO zqhs9V3mo_GqD4gfTUKGE#-OcqSXJ)aez4pdB=X?UjRRZnKRvB$k4$@U1urLl)bZEB9p_>(t9 zN^nV=5B9Bc-AdoL!^XyjI`Wxc?$zzr;mt`uDvES7)3Wd|8&Xa=ane^J%mV=ox3}us zS=y#DViSkHKIfN%l9!V+cuPw)90XZZl(vTdtEu&`;o8amLyYbZ&y&`P`k6;n?kqp_ zjBqP6^V4o=${oKV`;I!^Z~AgHXW6yE63`IQ6=rGq@%?+G7ey3@%j_D6%_%FVA^7^S z`D7kbS6?$Qq{P}<`Ow(Nk79EAgydKV#ba_F!CimcZr$GzERkJq;xzr)k=e`$3BE=45?fG%LiH5-i;Fd^XP~$hruMDpWC91 zamzwQH_ZULhY%@&5jYqzB4cE1YzHAK#|7^S6^U~>xigkyHho`zRo58yJYP=m0vpa^p zv*!(c+anJ%MRm=j=<0jgG!nw!xfyR;n_P5okbx2m!>%z@3K5ky2D~_9*QPp=Y^zd6-@XC-Bg3hX#^Di^jbj!?rEh*>}eC|tlTygdWAJ_*( zU~UJ;mtZR64uT2WxI9^%jJZqu$CLUgW<#h{aBk+A7z+&iz1+oFASR&k(0C{{<@pDbC}-zTfS%H z@m8bcwMqIKiG#g+Sf*5C5B%k6R{59<1qtwSNS+=p{r0NE3;W^2N?Aos)yty~Z8&T4 zU-Fk%80hN$KJ(}}cCfN+dL`Q?A#7CTNvvDiQMja= zMiZo|c*PYBF8Br5&>CzP3rSpr{-ApV#rFAg9pr6xZzHfLfrYR$xakC(g|c|gY#J<^ z_Y}wLa^l>m;Kp&L{1_|QJe6gF+QUvlXv}z`ndma`a^Q%>khHSN)l4s13TaT`Ku0Cx zbO9(pErXoT3$Zcxr4CD_B-PnV(q&iyfO%Duu#T`Z0I=YM|IHTWV(0Uy^QA@?4&RJ{;jAm@4JDSEoPsUiYZY};IfbzErCQ&4F8Ue1?NuFZ2uWtTW9Ah zr2IHLJ9~K@5|picn%)m~8ZU`?-}LADdQ1r*j^Q=6c>m;ii19y&&>>HX8vD9^o`;T9 zj&e2WC6%l4H%W4Ir*I)cSB(aaGycXDn;-)f0tKtD{;}_g38tWf2_>nH|%7Y)2~4(+jN8xC2E9sDW{7++wv5!oHZq} z$vgh>n$Ce&FJHRB4F?Sc;5-DlAWddqV1vUy7(5Ij;Xo4c>M?rfw6s*_(?o>Rq2G|R4-uzSsHb%nCS>O6ZHrVSy+*G($GLYgQG9{_7qjI7IEaJitG`W*h6zl zm!>#Ih7PW%?&9Vi0!a*28u5%cbqd8gLRUdX?icQ_92u9C3f@?~{@(}qH3ERZ^PpY; zzl=n57Wu4%lZVItR939=?582%%Ay;r;lpX#R6lXT4fCPFkqYd3ZE*6Qlv5n1B7Luh zfa=!WOOkxA?rAGN^T7D?UbGp2uO*E%rq_P><(K>phba8);`5JO>pjmaqxbH;r!?1p z$jB)-f|c;@_L4foMo@8kd)wb(6B5c}rlq;;Y(zi?<&zNAi5xZ9&@q+JS?2j6 zR#N6z{zM(rrB-Atj`6}!eB8Qq4j(VCGB_xO;=<0Zu)6m5?YwhIM@b1NdvQh53rO2L zoS)wzLDn4X%i^5M{)-#5$teEU+wW9X2wT3%%gbB%w{-DgR=OGw&#WJAg&Uc()HW?1 zB-9vPPoJ)cxXMIqpN_KnHkcE#;&t@wS)5t;dtXj&ZoBEPKX3|R{M=64GoK%Ck zj4at17$jf6`ZBaLQUh^!Y8J7ZPK=^y`cSn7Bd8(E3)?(40lY*!A-TKKRW`hLrKA3O zV)3FyOZ)eW@7Gl7KZ^^WUis93{uh!gm+ML+umZv9UwvZh$XD!;@=0z8K50?;bYX&nrhXJu2i0#+;Py|gL}BKGG0H7 zM0eRzTFFK;)bN?BPU zQ2S|K5Tm|$S2>^pQVXrC%geHi1t;SDjWg~wkG&xAzObu8r zad2I|&;qr~PjPSC;F0PdQnVD8ps@XUUL-L5-#?J2>*%$Sc@`765$H%qhjJY~>{qC} zfC?b{_Rq*jB4$!x6qvBMyR&l@vPiI01^O{e_{`a}v5OH?suv#>)-^{JbT|{0>?x0i7mx!YUImQ0 zq2b&I`fJbWVui%7%=A`Ku~CMGJ^y@61?*5!P~gji|E~pj`V?C5SEG+_(UlC{xOA!e z>gDH2)g*PGr@ns=s-oK=l}1b0p>%UZRBG$-JB!*ZsF6_o=>7GqwQ>JqqWxIshw6Fz zLJ{$bce1uXSi#bK7-0<&6RKtKRoVjH0)DACS@reDW?60#!Ioz8fq}omCD}FQNY-2i zY2Dy9N_9<(4};cbvA%Guu@3Dzxaxi4si&Xj^`!m`;HCvN9E+9Du+f33QOhuJ0Mprf@SSa6P4S? z3=D3n@zj0LpPV@_X=OqE;xKv$U?oSSq;?bIhFCCk8rI39_o}4wk){HJ19}f^tw4hC zKLi8>pmbFAECfP`@IJ~37V<#xY3bA5J;6!}B!s)2BQ9ojwvxwgauUgm$VhS)ZW}uj zX@;+8XuwYqMFu&OS7gj?`Oltw5x3A#eqoA4P4`CGwUv@sggOv5^=f185ukv?_;UR`^41HZlYE$xtBffI$}v1iVRm8YGzae zgzTEU zh9s9t=sSqlM@N~!9Mg^((yLdq zJ3~9^2sWo2CL|)BeA=Go;UoQ29U5CbUN!;kAb4UW*M|uQ5c-8-=;xc72Qn3DzP0Yl z;1m1h;r1Y8r2|JbFalY#zLyX!<6tB9K48EA1fU5(S-!lyjMw$!)()ZVYq7CYcp=ximnwuXXK^{7KNb0cDtF*5vHIDuQZM-v= zFJrH+!|}ezcU@p4@Mpv6n#g$b&T}gG?1dOICosz<3$0J3vA#b_mmV3*-N==$kU{Ej@K=?q3QA6Vnn|zXsy^ zuql2l+wCZchB7)ba-T||9Q;F5CgM^UkNygisxel0TV>QD(BDt{Lrtk-=%r0obAPnrX%`F zqeM;L^w*BQz8I2W!Dl`%Dzp$+qpgkBoVWvE`-Sx2BI)3wYeKy(#9!V}c`@ES6M5~g zVi*x57vkaJA)eNt)EDD0qXkC@$NM;PE{Y)gP@Raf8&BAN;Vu0bWVLz}!__E4=lSA> zOg_X?1q8K-2@BglF%nXZjf?w;;sh2G5JWCA?4WtzugiW;8!R?GJ&n5tH{{v01Em1~ z=r5eIIu^az+Itvy;?=9Q5f$cmlKbq=N6nI7e4IdZtRY3(Dlehv=rioZlgM}wav*DK(b?6Q%y zL+?&`+aTef*tWEi0`DO&=mdjt0x*Tsyv8(XoJN+*Tbt{E1c$19JPN{M$owJA#M`tcABszPQc_aw_Ld@)NHLe& z?#qJ!_1-H$jPfgZ@POac^j<>Zlq#emUiYje6jrC?9}PNltiis z{7!B;PusDnWi%;`-@e_`I`A~B&`ngVu|8_s&?-QAO%qKQ#QS*cVnRZ414p(hfT7Cs z@Teue+rC_HdAeT0cv(ha(;liJyIG*4P~(d5@kzbb-s)tCc?Wbs)U0$E5|J;(oMZ=g zlJFg>tT|Mcn?w)B^plp`O{_*J*LUW=RUTYdJwNOfU7(-2jy2!g+uxsV?gX%RE%^VQ zz8vQ#VE&Sl45S{J~FpSxmVG)+U%c z>eO_6&z9A`eKyu?x6Z)RM^tND{>*{>3GhBPRZ$@!s2?zS2y%elFJCkjR1-YG2*3;u zoJ^qqhHbnJR>4 zVt|lcQNjF<-}aZ(=XAo3t4*zuPA8w4B(aEDV6cc%(X-PNZTFzi%(!*mb?M{m2JIe4 zbrM2$=PzsXm!EPNPbm-!u^ps#Rc#w5uE)pYT|yO(NuXeg(K>PARl7V!OavQ`{&u6@ z3$tAbo>}e#VEGt4s~(^K0!b;37+}W=U(YiL4Qb*xV))=1>&wBxfL$EVKiKH%)6vjq z?pqxMm%a}%A1W#}PFkl=U%~E%>S*XIbb-W~(&(yTK!RKXWvpi6AOa`I3DdssFJ$cC zWR9)pA*upPx&H4?z^{SO7N~4aD0=Ok8KijO6b4*ACX1>b=Lsl}Drv%xR)O}i7 zw$DiJUwOP7LWCV5ZKEq#DkhI@9IOo)f~iteL~o&o%r96a_^L+mITI5Ww(xqSptvDs z3#It1nz?x&BC*i#2J8aIiLgAPvbpwwlv2wIOz-b*Hl!UAKF7%OjH6ZO-vm?O#;Or! z0FFQ0+)6>ZU+C`Rp(Djgs@x>eA|fK^o*3;TN0uPE5_KRGE zEEJlU0o09^K7zYr_6X>+DlFXMU!8tM5mo+jDgKm#m9FlENm{5~*C>cY`!y*Zu>P=s zC}8?iV4%{el>Onb{$Plovhu$B6`OcCwvweTG+GhsYin3mUMp#vPe6^((bLzU%mj0f zfJc&0odXDJ34%yNKZTsD4M7M%K$%%sFpA7F?}3VeS?QB%#oVgt^B3M@!<(9#!Zd}b zZ`vs%`S4GKHmMkC>NzMr|9mYYqxAe9_+IKtH}na;YmtvPuC?Enmj;}Px_~aP4aW@Nqk8g2{QAvu zVR69+q;;5R7}X#H8TNmRU=7^UNg|n!ZXcKIMRza7t=16z3?C@xn5 zaxlIl1O=kOSuse01^E#RWcW#6*81tw{(u&J^_iZ5wAjE6SO}t@v@+~9eVnk zkAVuWs>;>rucRKe9~RoWBWnv)uYPf!@j5PREDbL&=e~W>Z#B^JZSNqKT83L3jEsp~ zOSTFbgPonE8>EuAtm$QU5oBa#(ey!c3(XrSlc~u`9^Uj00#JF92_LoTE>3xuW5&H(&|Zr!xrP*m$6%Pg>knb7#>Y6Dg)QN zqAsVRhi^u$yx71^qvwfgtQd*`NC+j#VVR9&OTbA%fBFxj68V^(uIQ3>GkyDClA=%w zT3iBVa*&#SYuOEfEbw7ORDkO1kQPFV0w5brm4l<>!NeU$rknv?e-7iavhX^12*Zg} zlIdpUGV4pcX3YHX<^d}NI3e&<7DzW%4k~34dHFProMBHnz&*ge`+%w(2$~ohLjcdu z+1V15*Lz)#JF177di!X3xtQyTBCw@K_I9=D3p_A|z_zV4UiLaJSNqNlYBO2^M34LZ%-isngIoh3K_G{`wU*A&Iqu%v zC)MBTRjZy<2V~^!FXCTIhJFQ#=A_Q=Z;UnD>|Xb?l5D-Yd9!!zD8@>{7gTCiY~uNK z;{AKN{&a|Y_OgpT%f~$S%f^l`GBd4bpYqqzK^uaZlu}YjvGHzcuZmukqBk11I_~0Y z)C|^w#5TS7`3fJ+&64kCOYa()9|_%jbM;tMYHD))yk1oi!@=6JN5#c)=3K~mTtP1a zD2t$WJy}!x`l-SGJCM2GzrTU%0y0WzOCwD0!2KZ>+&Qf8e4Kjws2gZ%&Dk<1RfT-ZFO`8J5pCgBUE!4V~H z#T(__qE7(oj?<(b1H9?%faC z1=JNpp{{`eV$eXYgDHMqS?T2ec@{Ol6B^K zqdkhXYGj)N)%qWmkdwnJB-A9@06k|gEiLAEyK`uhi(aeC!hnC0S|1 z)Fx+U9(Fi4xV2t~wqgHKOpRTOme@uK225FjQtX6ozW9^GNvMFQX+>T|ab}I%zLFB+3I(aa5R<`!~|?(Ww?f(y8s^s5-IHpCUhA z_IUc=-UF3M)h|2(m}M&gdm(c_WP${Sp@%24Y7G6RH6P1wI(@4?s4-EZcU*6mk&Qhk zee{RX=r>M8doEB@4vu@*uDzY>agO^+Nf)n;8BSejrUL|}$?CB7N-SSWUB3B2$nP__ z8GX#w_B#(~%Q9j+@sj%F?PuZ)ZY5d5h&#AR3Yp+Xs zgHfkV&;;@S^TS@pZ|bezfBG~}bLCTBOT{uysM%T$ zY4V3k&UYIpAI)m6xvA}+6D19=FZKR|vL*Ys`I~zCGFJT%yNnK<_E|(SZkCd%x^(6f z(>Ub4cT~7hkZFGIq)_7etJ-jn!P$e1U{*@SyuJS9s3qsktZUs3|uZqb*ei=pm9mm2|SB{tnS94 zS;r~At6cF0fDKu5L-@SAX-6f96D`3eLnD1%Q{%vH)S?qVe&iMw%2^83Ul05YD=aaE z#HzZdyn5o}ZhOY-|b*g-idL_L;}q_F?b_o!|Fl zlhFum!^T$iArsq{a0+tAu4@W&qPckWPpa8oXV|;Uo_Iv69%t1Ew-22Ktd}MBH0_!`N6NpBtoDiq#?P(mO}l+}cV;IL!OLuV^%a zoP-B1C&S7|*ib>G+q%`=TVMEieKo{-Ri(RomM&x$0YKxv+;y%*ow1)kxt`wpjoXN7 z@I=8Ev_3Lg5wC>0-rW!eGYUO7h+1O0+mb;193>sb%fgk8xE+7(sQynzf3qJI6_FBD zuD*ZteM9Ri0dy>}AnXbMN!irgEblXOpOC0LFlb>ur)T58o6_i{WtX9@{R5lN(2kYklIS3XbSKs_ohW8~{`5B% zbGt!7NtnDID|x?r4+}YFH;XdcwCekPmZah3%^5jNAa^YNk7?iTt6m6YFPEhs9;&|l zO^<_#pDD)Zz#^>7&A`mx^-+N< z)jwrp56%Z}rOQuPeR}g~C1~){-~{jXXH$JM+E1rt9^ouk!vFP(j)mlxH zRysOJhNq`FIXKQri|t4DC5$_e)EfO<_RwBByEDt&z>bB)NDAbJqKlBf_jqtm+9@-t?1$FJ&p{4Hoz?C${UuMpzvNJJ z)%U(h7N2AjXjB%D3Y!9ZA_zTAnd6JObM1Ao^!Q})G>g3761SsvP!ls%BoJcjlHISZ z`3qZ`O3gnOcS;d(_(A)H$hOs$hCPg8-|Tb?XDA7x=a)2MB(5lE=#V*oj^K;9sXM52;2WMu3wlNA=>P@C#WvWex12+VM% z$Yl7-4-q+@Dm7z!N5P;NRch>TH9^vD_fLr(>*thTzE*PWW94A_H8!^R(y}Vwr3@kQ zIKJ^SuV%zGTEEs86;fhkYaQ{7+Le0i=1e1AzMm#9OxNLP2uzdEM*~cX!Xx= zJ-q-V@$6#1(6G4ldqv@@tRgGdL4W0%`e^N%V-V#(FDrw2rEH=4%6tFoflt!21@r8Y znG7OUAA(rMV^nI3QYn9(I+G5sSCwwZ-q%Cx4!Jvsk9#xGe^@^kTs^~>x;_^~5jt6X z|5JhZ$KPE!k4ml%N1ep@1C_@r&!`1cz8Iret74O;GHodq_a$yUbfy5 zgRFSVi4xtvQSC6-%_nWAKXH zRre$>Ma|pi<;R(}Jd!idJ3Q-i#d?ykF8DLM`^m^janYhIGkP^+JI0zxg>AGmc(a^` zk|#VCXqfN$%oOIBHjh&B7r2M4%J9}^Lp%%ZL+$4Bw?V=7i&r7Y$EBTp{+u1PEPk=Gm~_%`C?F03>P)}6d#dN~B*a-!o_!8B+oAt5Sy z8c7e5kJXwM?7wEcx;bP$2z5edcUCtypZm;^Gtr1f3sD0QtGtGg1g$nc$mhQ}oS;^7 z9hot}WB#qK#=ZRU_}xfhbMtv*-+MkL=p*V5uJm~2Isvtm`{r|N2L=Pf;I|C?me$^U z<|tmxp`jt<&4J7L7Q65VW1~# z|BTgzSe@`5Mm@7FB<~41IcN@mP-i!{T@TI`5m1M!WF^p~W`8$+>)snw_U#3NUg6t& z%61pl0<2*uh&P6qJSiM=N*)Wyd;0mU)t;me-2Ar$aRL&W@{5c0)zxzV^8<~1@$A{Z z-TU3-azz*A<&$jqHfPbCZq7CA@^8_V_}N~}ohQh=mz`VD$<;Lw*FeF=YUTVcFq$PD zg$iIIJQtuPz=Zlfek@)N&!?#R97r4$k5<9^S-BPE@>%=Amk-|*-;fx-RZfMHN1>qED8(?+OB@Ew70Hznfv=0ua^Vb zFHdanv5SOKf-fT){r5Led^++=sK;?4V5+mV7ypR`UIem)8{#tdlN$RUGLrccAgRO< zlyUEuRu;~2lFnZ&t!LbV>&I7Ru>gR40rGcFc}r*094P=@j-ujOoD|276);AtlPK|S zQ;6jVMg;oNQk=uMJWT&81j;h{9t^UUwhq;Bq=xS{+Rf;o(Pq7mX>psD68+l`hiGgqEd!KPhAmWxpB&jcsaGYhW7;B9GWbO;8Xz8&Mm0OAG!CdZ^ zAAap2*eX*mCA*ZHvATqZuLEmNxqH{Fx&Z7I?1-BNyOWYe$_YDnsz<}tM8L3=+!MI_MBsGNE_k1~G z!Z-r7#sZA6A}lO~1qF|J)ZdZ-Wh1|t_PEOwEhQxec3M*!7Wz{u$6C4>Xa&3SSqe6TMAS_u;5p?=!i;#z|nw`xTd z6#D!6)c~4N0&qdsdI$~RgA%+xP?(&eNfx=;&hde9B zWAg|}%3-+9u*%V#rCu8jzw7xs0hWMql4mJ?1M_W||Yod-TE z2x;hy;>5>TQ+Z+G(lNnR%cEY5guTpf*LV3^k%Tg59sI`O&rP7E^_uain!eUxUwrlL zZ0ex^1MKcNnh@m4@mmuEVv|!76TLQ8y&PQ(>Qc(WRkUXb{uN!XlpS3~j*_EGf->+u zC!b3?Vo8AZXZZWrbakg#O&yJ^ce^YR$4+4j<{(~HK_h*4dKmU1CevawBV%K4OcEWb z`Tn(%!`Qg6h!hxN1VQ%(|Fnr|lwu{12u-S$Vae(K)W)|T`;dUVR2vMRioEKpdrG_LN^4&+(n$&T4OOYWkd#YJ$<hAk@lyh$F$0cr`=x*D%tk5GA!=`*c%eIIjRNve8CmW)3zb?ngk#%D5^MZ-fkEpb9r3x&GGIR(r_{6})357+xaE z9J{_2B5zqk2*qJPafBzLE1OtaokoH`xETm*6hgi+YaO+n^mM+E!^54k;yDkO%HewJ z5A%5h$xF=V+n=~6-qGGBne5o3$D$nc59uH|kjuVULFMoP-6GCiUO~YX?%)Gh>lVL$ z|0ccO_KEGHgITStf(RuVgeM&SX8AGaBw1k&0>}_}&&D}BSG1>6w{mP|&6} z8yD{hnm6RSRBQco>@t%$JaunS&%KE{N^>Y!=I5ukX(pmZOnrq^JJlSqvj-DN2nVGbASA85qbBj z#qV`^J0A({K85FpqO{YG7)7mRX>w6bU3nROtVTi{lUA~diY_$lin}EcO*||1*q%GL zhl{HQ7%rlnuFMShVW)z8340=Nm+SHBb5`^Ftlvba5ll=<`JYJNh$ScNF}bf48T{@F1t40333<&cXio>6rZpCh z3PBjK0@sj84EK|Gwt1blT%*G^=qj%+igsu$-AF= zDAjr#~GR-|t^4k2&wAY>?UO|pMt{tDK zYBN&;wa#+v9U~)z?{sr3?bFdC&yo-n^uhEDlvx|cxW`1)NdklR*2U(~#NYLp)o2)< zH^4Y$lRHzt@4k6EH9DqZc)?DYy8Nfw2|9wVuICzGu;K^1a`a|&Fjq_g zJHsC*w#d((u^+PEarTGBroeU5)qg^vESkgzGxz_iO)Qa=ffz z`^~Mfy2Uz*s2mm+vR$@p67fu^LWT*348B>;Yaa^P{tg2L-g;C#Hox~UYBcq#oh5$O zw>^$E4SiVEf1#n#l$A%^x!f$!5a1-llvA>5m&q?I|Dpsp5|$$PB-T_V;vh;>~NM&KL8{ zrFR|+5 zc{yZ7`1@0T$}DnmD(`b;OS=(wIpwyxfx(7a$&Y#m4SNy?7ni$N#>ofMbM5q8zef}F)Vff#-8yFF6#10;Gfv5nCf#+3Bbi_Ngib<95BSP+m&~4mBrQi); z<|*k*mm_XH1kFE!LIx!d@j$n*U{7v#MT{$gdLd#(;dNak^RB#JVAflWrlxCR{A<^D zdJwRJrJeGeJepeg_V26drGC0j^t>Wq`iQjj!ph37UArX2#Nf>U|B(Ql&?f(X7sng2 zk9>|$H|~v@XC_$dwIYIoANu+*dX5o7R&S8Y_?$YGfJ2Z6e4uR$_o0i>*)-} zKzT!Y(;{8KBNp#9`ncgr7=fg+ew@@Y<&K)Q`i7d7i2oS~d!= z5I{AA10rdWnS&z$idqB+9G;Wg$-8|Rry~}-xMfx8-$`EI4T{m5wo>yy4?io1bJkGk zUB)+7Y12Si8*akgrO|@PH&IJMz8hOxLF}xww53`@Th0%!74NDFlIE4NM<_Df1rJy= z|F_tgQX;va&{|nofJ*!65EB4Mq}D-<;{II$0b&G56+qxSl$oKVeIfXTig$<>MLW<7 z7z!+lifQe3%}b`F-9ssrg;b?OY5JV1Hzo@e6!)_1U*sZOo)RDNY&_c3Oqv{HQ!&iq zWJafb<`OM;Oc3QEpI{WBP{x>;nBX;&|6fu+hF1{rp-TyScM>0A)5>%}Rv2-f-la>> zBln?Q!?bO_p7psAO#470XeG6A8ad z+z(4j$HvDqE$!B*ISp9`uBPz&GZ+_v6C?AIU-keMuX?UFbL@khM_3v$A8ETz#?SLH zhek|V!(Sv)bdHKo3+}w{hqlQP8d*FBr6KeX7qAl!L zSlCT1Ek}Tcl>`r~^8dR{!n1#W!-!{1)1i9`9X=W%r!Ibu)TRZX^*QokO>IEVT17`> z)UA+FMY`d&z?wq$0bmnASye033QO-p1!z`1No|Fxt6nAe>aMK&NJ(`WW|L0Oe*EO; z`DRl8qkezfKPoprE-9}I*E%5)vMSae1s~NxTj0GmyLd{x+@>x zPHCs)NlA@vus5Tlh$hQnC_5xqwT;REFCkpkk&ho=gd=H^4&wGF80JxIUao!mhiix! z4hHCefriskG{C8dNOybYlN90VA=(%6Jn~fj8C=LyEn4dE;fo4(#Bak-8pcloJOYwN z+OZ`Ce86V&dE6n?iqOm1|H*ynQ$%)>>d~PKU4_HLwg_K_{OENs|AmyKq;xsAbl7=E zuGn0j4O7|tJ-ufSotnmJKf>+XRY7}1nXk@7&VN{$W23z+RUhp0wH>>7t>3~!L!pRg zQvGZ>#iIKUdO>Us2oA?H(N3YS{}I+Y{7UGC9RKSKF;=2&VgZK+##3n>ojnR5g2?^D zXVYPoVkopRH$s;1fp{RI>zzr2B78v72 zmKTBU`%*A{V-q=CozQWF$PCsVgQlh47Gpw`I_^~tDGMZn6tsY@?aCcwvWzS=gtQ+X0|d2q(kXY|zWr;mXsu~LpHa=jZ!RQ};3#4z&yXwsbbu4X zHs5eaV%{9;bYOjX%LZsgaO|Lp2-%pYl;fmjqs9RtImcHaCpS5f6>>8!&JuBrsL+8` zV=E;AcS*-8SqFU)(n)NBrB7u1uP$%cp!3)bP%`R2Y|>;X_0{J<%%;Byaw+uhU+<3f zqWHq#ry?3P-b3dIS(%yQ3{=*&!EVU;=>gJ)5qUzysS$@|WL$fS*+T!x3Sny`KzYaR z@H*m_KzO|^e8Kp1xpS%pzs6&M>vMtBegE*)6V(red^X!SSlwvKA|F0_MEG2m7jcOy z(wCK*g6yo7ZU5T;qwTxnx$fJyzh+kUN?DaHBS|4-C8LmCS*47YGRl^ngd|e-$X*f3 zjznb?5i%fQbYO`1#T*1l01jzs7(nxLOQKtdc;&>UrKU#5#60ua@VIz!;x!Mg!+HF& zlJJzHviM|XU=W?0+%bAh-KNTZG#gQ=aJ)bf9EOxcR*VQD;&To?udf&WGelxuWQpfcCRf>a)A) zC?7l~QHd+;`+hrNo!k4b%{#Mq3HNeE^9SZcJUF_#n#b(S>YhV-_nL@k(>jLAZx=8Y%R>O7~UxQcNvze?=7$hfM=Q_$KaEp1E;9yrl&fVu&+4$v=Rq#df5iR)D$Zc&Gvt-NM~R%EliSB<#ow>pdJHX+k8 z0XvJ94DWQ+w6=w?S+s`1%2@dr0p$v9$gyL`*uyah{>Rvup|P>7eJ39(6e0+i;UFU= zRSDh$IacsiIk%=~5%bRR6O2<()?Yc;Jyn||#OSqjO{7&=^tQR_{8<9)S2f|ltgNrx zYP0?g_3zA?U;JI#7LM^D0G4TYrOZix8yc6YyrPV$XZre`sHaF&j*>V|-8%dkK04FIrf$5m|x#wt}7t5&~J-a_G>AG%m!?@U1n79=kM1R}(2Y{ooeK^}}oV zX}byfu@92ee%Bk=ZtnFbJlX+T@wd}wGi6Sesm&GPthUPi`w z48TR<=TZJoHTw0WK4HrjybWX!8=Gshc)mJXTKdMuGcco~oY2Swar!mf{$o~znMDLV z%`LF5Qc#Q@14bD7uwPhQ)n#wnTXS)C<^7~x(NZ}tc`T1I5oi_!o~Rg5CnRWTH-_kz zWy>*0vXauZ?(tk95VG2%I*m-m2|(*XhC2jN6iC0y!Zyjt`H>vA zzL!|@BYcBkRe(GIyj$j%xNSfvbk~ogSen4$8WPe1_ym~6eI|~8N=^d0dzK6DCeadr zaxC%THpy4rB0+7G!p9pYK~T{>K(o-5Z<+X(S2ya`&X?}CA!2>yG}W{#);j!>3{_Rx zR%iCeu+UNVPwH^mYDp1vlUGGNinZH%S;q-MqhU&$SCzWAL zrYmlW8U?TAEd49w!P(`rwO+)`C|lbx@B~$7uDqWIpMquyli^8K_!wb2+zl)7t5KkU zM;C9*QV@Fyy$B>eVBaI_5~w^e0~I$E>{{!sr=}sX0f+w@@2jN+z!zl7h*y4>sL+do zU9cb@W1_$cX>4Ia2C)d79!|BL_xILrWZz>WSiw}mCmtK9pyb|xhLMVNfE7KFC?7!iq`zV;6 zn`A^|m!d>UDJj86Aw2WmSOBBzQDGaeGz)ibpQz41zp1sg=hTD_73TWX^6lOiVC>{9 zX=Y|j(PB)jF7xsZi;I_BXohs2f|TUzS7~cL7Z?3AcZDL6k^pBICiQ%V1{W97#q;O8 zr+d*DA>6lD!ldKd^+&pbkM%eX5g!(1WXowFKzLY=7C;(^rYKm0pz`e=uEl6tL^Oc& zaT~`m0Ic-K#DpAbdt}S7hyU^ISj)Vp#zpsK7?+hP>Jm=GadUBfy0X4Hk&J_S2BY>? z=FeF+-5|i3g#tGjH8#ejTOFZ)RsEUsT=;X%?6KXV%tMyK5?lu_5gR)*GtMV+JxUh1 z2e7!$diBtL_z-_M&qFh@qvichVevry(3P9(oyWb-SBny?bzWpE({A&NKIod}9^J=j zWSp9JD-n%L_zsI7RLsK-Wikh^29#cRU29ApaSigE^aBVR_m(1nI;P$JBV}xC=p9Ic z8bDP7ESrDDpp4wa6+BJITtQUOp|nGn<201lfmf@g#YjaCd<=yOlftbNC+Oqe!j3+1 z?a+N6dg5K}`vzV|@NfhS9Rt<$0S66W9{dg)7}JHeWPEYeb^um(Q&!gs9D68aAxltA zox|CIbQX_|zvY0@foeRIw4xsYijG6^bFoc8llX7njsGsb1IOWO#s^AX^lI3ghyh5` zaQ^o`hoQLfXhlMVx%j6itBby?-w?c=17RJ4P7J1QA~_7r^3ecF92>}5N}s#GtM0$sYgLB{ou#pX&MkEWKc$VmE^KVO z4^wNmT^l0NXg)bZNBLS$W$x^iOLPayi|uyy`<&4gW~^Uh_>~%fQ|a8Z&uBfY_owDi z_>bUt#{IrLH%EkH+S=L>v5zF5j|pmSlbtq*1p_bv|34D5h|}gwOxOuHw^1X)hkCbd z1m&`W6gYW2CsITOf1Q{xUF`OOoC3g&0yK9UXK`YYrr@$AlC}A+T@d!l$;#?!Ytzuv zZ;JUZ$H~(>ND*0IEfzt5%?~#Ufu19+`(&~b+>i!AmqWpM1|r*kY(UZ%GJ4?#MY)t>YH);x5hmL zDrjP!+AMTW<*em19P(ax^?oO8WzYtVEUwyrrr}n&-Kx6&7HlB2+QfYgqN$^;jWnFX zo9s=S$zGu^Koejg$3jC92-6cWjupHoIs&{3gvel8Wl@n;B9mXscnzHQm6c|Y;Mdpt z8TeL_0Nz;OK=8_OqY@(JSl&Ab@C1HFuwVFw-9xpdrG|WrWsW^<4Gl+2ercRKe%t}1 z1KL-jO=s)KEb;kj5ufiHxol(u`?}McV}v%$3tzs}WM^x!7TsZ=hCuW#ud>p-jWADe znz7iMwoz}cwzhcPtc3bsV|@SB0t9QiHOW+7X}NC~wPm(j=`eM4G+}Oi$BRFV$`QgZ z*!8-?Ti;XoJ=`bYQlqD0!~epifTgDQO)H7t;YFzeljrN-_Z45@MwLAqqM}Wcmnd~} z@~OZE1F!YKLv*BM^>fkjb;a^CypbV~w}{L z4;@HS?vrCw{Cf`omWC?N<2Pg7mHnJ|iGjqA<%-_ zo2YuokB#xM0=8%{mZaMunABi-VA)5Gz#KdGXg#Hm?1i%SVfbsH(~G8q8*X=On%dbcaCxUIj!HsSXopcUga8yL6Cd zVf04C+Q4UAr&!F2n1~0eM}k97 z&mZjhG)Z>{NVHXshjjwcWZ^g=q)xy})J6 z!F+(HcLuqp*?2J@8yawDpGQY;Hs<&F1};q_kw+C<<2%pX?K9g9Jn``YD_}5_~NL5`aBwt+9Os7v;7T&oFI+M1ggp_g~dY(h}s2FVR4nYFBQ>D zkZK?Z09xgdYn@Md%gPP(`KWxSCMGzzxTY|t8XyihL;T`~E{f*np8-q)Gk0+4(UDf6 z;{Uo6BQ^Q{Mkbh+HBD-J`D`be|DcG&=?O`dgk;?B{(h@P;x$bP`2eHpCbF6FQPsZ> zS2C6srgh(1U9dwh2_7dkl|P_?ktPV{=WX=67Z^nFl%xJ$NLhU{A;HSZ3t?cC4^vm-t zc7-obtE!U9KN^YKr+=#G*Vy&>pV6d6Z-K4~q)ihF{-g|W;U09BCgL*aB`~l%EbKU_ zMU*?I3=I*Kg>DLW%qdSFjFk`;5D03po}8Kz{nWund}?BX8zs{}C$8t9I{*X`+jBPc zIl3z7X2LApKt)3rhDw_4ET6>L7kjnCmQ9I!lv3@VJ?b0&YP7qsi5|*44E6K@j7>ZW zQEg**BDh-g3K!oeAO`CIPH^m0OsOCJlb3K7oe>sv4MG@)PH(i0=GJEdvuUG?iC{ObGS%-)`o$^dR?75lFqW` zwhCeK7qRTwWWVOFCc}1Vn3^K!vbN9ch+aGA@*r}XYme1O9@B7)sB+&H{ z5$}=MMC4|coQA4bF(U$*7VCIMh_Hr8tE-0N@%vvOoMU26LE}UK%0!f(c>KB3GLCi) zLZ*DTzAXSH2)&bt1LpMwK?o(sWU#8Zg3%~WHdJJ*=z3$-+jgx%T!cA)WFsNGK15y~ zG>cObDPeADX*2xkCblh+TtxM`70c_hVuNes&j4;;GE!1vE+sx0A{q#aFw6%koZ`>Z z*zrhjD(m~XzgnVq$Zn+GAH`LXd?hdMerY$w0|{z;_2GwhDUcF)_cM8MkWqd*98pHc z$~vdC{noAQWU&)EwA5GUl=M`q6uKdz6lW zEovv&&BQCGb7Rs@p5S?F!l$gfXgP-=H2gv#(FdiZ)Q#EFc9-%Tv8cXT0#wo3nlG{? zAc5;{KiNo)*AAE_xwu|{*M!;=#bV9{J3#`t3*3fy@JhE9np#^$kgt}d1KT+UnV@st zMgKG7BC#+e5dj|3Ahj>iOB{!I%0sa@k21ng|cP_7ktCC69ROO zcwQD|_WYmTj?Zt7<EmH; zPB`SQsTya$$uF>DMj_NaOmIl!=Tb8(l2Bi~2$V2ju8n*&HFc9Ioax-wWaoP~hlLdVa2w5KuBZU>XB{ zbI{Xf`uZRg*toed87n0#>l3gB9Gsz9ztF^BueKH!7Xya?RERYON5-dhuOQV?-~_0u zVZufmukvgokkJr#JF*p8Fytjk@zw{-LoXS^zuzg(Ac_lmdy9BoX@eS(CR2BFNYZuJ zhbSV$$++O!)!Au=L|cF~P5s159&aCgHS`R~#C+5CcE@9PZme^lRIqbTO;3vtRx|bQ z1Jm!NWZA?ocMMJ_j7XVW(z{j_e`MQ>0+JCmL5U~FF45t19;&4_H-GoY>a3R~pUOz$ z6ve8gul3m)f+eVR%cj)J3iA?O(;f!Vfswx_j;|0}yI!jJc{W&=6yN&!bn zN-!qvAmCDz`?525*QgIz-}fyF9C2cIT5oCLj|lze5Yp2`Cjt7p8sl_Imhc&Gk!ES#GUA1ZIH%RGLcq~z^kF?W`HIOzWVcwGoG^iQ3-as4{DJkKH7t1DK= zh}dq_gTQq~$^d>LNh^;D_5-rgHyi)n+ zHzY%(`=|UsZ(8#Q<{pPJk{mhe>{@}LI z!bTzN;4r5m`FkUMi`$<3hcuiYgkwa4kGC0R2!eOnrG-AzYO+*gptazL(He+U!KCT z$N#DYVyf=P#6TPe;PhFf><~z+6G@6c#>df@?M&70gF6dG0ZFrBm9uA$$jID4^#FVu z91x-Y!n3vCKQIRV&4p)h03kAfM%LA3hK52QBWvW=p_O$!`-^sVsS!#KZg6Z#F?EbovZ@6i zH~X6W$X|rT+Loq5jk%WMw9|vwh6&0HyQY`lw?gHLMxA#g{K@^8s1$bNsq~G57dn_` zMTP4t4XpQ8^->BCUY2b}MkAB_ zXBHuNBlRcLtkt_I_Lm*KMUfOT-gx-Do$HSe-|#+g(_5FMWNznS;N8#93cho&tBaU2 zg@o6r`>IYyq#kLVP_@fM;tuF4)Os*B8JCRi+_3{0^}8|P3xAUKCvCT`;0-KMI7zUa z2+)~f%4>GK>PAn7uj*KkrZ6jb2z4-`YyRaQS$En z`_HaF`L0);!5&5&(d;n;xH(_~w5YhUQ9=1RYEtzS?^%1twOokaL3t0MkqH%{mqpGqJBhCRfm0kL5X!84t zA|od!QEZQIMRj$UxjZ{J7opZ$(59fQ-41N1R72k|IBoaBbpuO!>o-ySAYi>F^r$=o z{R2|6g4Y$vzclOl;x}fc87ZTDHeuaIB`lU+${fi%I>}E6z1I}eGFDP@SDc_#{jwmb zfM7OU@I!=RPQKKiK2+qScgvV~`3YSE(*ePu$7Tr+ zSQ{qPOxP8cRnk8(XlPQtH5sui=2PZ9m1TnoN`)o`);3`jH;{=2xnD51;E5a`IerD& zrzbD0T7iCGCXJWp{&>NtcQr4&K30eA>B-ICA5UUU?K>cC$Mqr}<;3jJ*qw>IH$>wB zP){lnm!JN>j_~aXSe~VDzk-a!me;Q>0T-im!ob7qTT2)aTxZ>Y3FsH3AH9R^7taZ5 z(gW}`K!b}41z08AedKue*Kqt0y&_0G5EBE_a|?$M&?aJ_2S)cgKBfLWH`fQD69iho zS?b_X2G-fzE>e)5{tS`?gLF+|ZURn0c)8NHZ3q60Ofaydz)Arp;#<{#sd#_=6M79$ zXjCxm1biWW-iB}2|Mh@~>+M^e zOKZ^MnVmk(i@|yLQ=HGpB{MNG!Qz_f&WTasGef=uSU6%M@M*{AzCNPP1w}eAMf8NS zCnYw2;S5aQ0iB}ETtv`?k><`OE$;);h6ARu(hIV0-oKBCjO3sqN8Uf8&e9wV{Z+2v z@&@ok%;(R|)y6>!Xbr>TiM=}jPDD*KZ<5M6eY%7~A6g5PS%sT5r}S0KcaW13#WT1S zqGE$KcGK-7>B?}r&P+f3YVo4JQQ%X#^HZARiahxt{;hr$ypFN!B!pB(HSyGJa!Fq5 zR@F`e?Bs*N zmOFPHGMbp7Ol3SRH-3fCXSiZV`*&%r+t`GXxP1Wmn4LL8cW|fXOqVXjjvrcH+fuJ} zrX0m@g_4PV&8hXmGc-c5rV_^@iy9=Vvgdt2LIp4uwB5Jw-y18*E@`SAB zDLWr`w<{3I3_EKZl7Qz>5`Mj*t{9?!=-g{MUf$%!Mpdc!&PyczjOM(2tT5^WPP&Ra zJllVbq%RT!5#RDQJG0VqQ8UE!0t`1}kgGCV#6vW~~Lt$cMM2rWh@3GzqhXT^)iRr~A zI!acy=;V-$rjvP}oXS3_{q+bxKZe+zba&oEg}3`#POr)pm0Z?QA$}0fpO^BK5Z|V1y!}>H<<2*2Gt)?H zA*fhz-Qd7cnw^uHPsi|Ue5I&I%=*}XZR_>!2g5(RXkTescQpbzo7`#LBGE0*_Q)pW zX=I7b7hsSL*4B;ZbrXXx01tQ^AoKM4x)+q~B!VpAaW-Z99QQAMVPJXz2+rH{kCx^@ zTHfIdyQl|L!%iSkaGiio=|eH(G?9W`dL1n&hG!so99@0I@xO%u!7qM{yayI<*k@YLh$Ra5#)BR-YCq8a(gR{l`QKQby(E^4qQRk_AUN7r1z z!L2A{+L)?d;%spzP~tf&VNO7={$VVsUv+h0;XB>AoAwt2|He+}XzLr&d6Q}7tH#Z7 zefThGE0`*V;nua)$1#QOohrL`KV>GO#HC?pGGi>sXAA{=d7(ek&Rfw zMUpHg22Gl>6?&;pzJcmM!}k0Gp+>#Fc;NyR#UcbC*yw|0Mti~;DTeQT5MExXO9NU0f8cG|0!<72Zof2(L~ z=ZemA?_#%ZSB!TLxl4DSd30l2k`g81Bm+2HnZ=VAnW>`wkPt{`Lp!X=Ci}M&)~{NR zvCiAAroC7$A0pdQ*I-*QQ$2atzR>Kt`5}Hkc`+tOv3&t)X@SMODn2{}E#1|nNfLtY zlf9$IkDU<@RJ~ia3Ynd-h`)a5Ci|EO_58frpQ*DO>uJw3)E~s9f*tG;6N8rE!TC3> zI+h46CWwgi-HYpteL_y4oo*Imx_&7B5cTry)Qr0UA$hdIq@5o$D|ilhg!;CU<>nr` zneoDdl;Ge%wW50prDLROx&J=0}-b%rlvue4|m^} zI$z>lidRa4+79+zFNB`sG{?bo9YOAxZUD(2z9kZm@Z~hN!Fb0&M~8gdg8cmM48 zvCOGc1jKtn0}o8lOqdlGIt|3W#h#lY1=A{nQ70AlKWNxz> z(tzL;ciuN*e0#WiHyF2Wx1Og_y*+Y+E&a7u|1sgF4Mu{NZ=~MOk}Kz28xIrCNRZAF z>SLtT68w07_RE_?Hg%S)E2MG3HTzdKbWgt zA(h|=qOkw0z3_r4ci^-ZNfp~mPRQz5$NFLP)?g9zzBA;Mo#Wr&_keYYTV=qlC*t~b zPWfGeSu4(%|4#nkLCH-KO1gZXDM+9Pm+(M-168Z6&|cMzU#v;fcp0ngd%xMGp}o`&CJrw zWxjsp8#!nR)Uy>-X0`8c#+a7$i;@Sf!l)2 z>(@3uR!Ac1@>@(eyu??w5#}Wu&hH-go&IvmTGwsN>8_DuWCG1gPyUJ+XB8{yQ(cDqZvLX;Mt zR!T|ehKR!tsrybv!Hhv)Nc0T$cI$i&V6 zcIT5W_SXp|jrKgejBEBx$o^M1~Z^pgtRadeLhE@PYl{IcjB~g}3>PiYzx`PAY}1kBe?r zT6lS3m6;TtfyY)6fTu_UgZf;+0 zy;W3Xz_x3%JcRtS+v9SxpGGHSW$`KQA{@M|GA`K9P%k4x7-_$3bfc%1W9;LPmJ<0Z z6_pFh7QJm-{jPjeRMnx<)zWY1zlT5yre}WezV{GYF73&rR z0#`xvUUO|!6$~`!#DItUUsmo&5iQJuX zB!1_P%{NHRJiBz+#x5d5UHtSN$t!3rnTRvB`aO z+=VpE4MoY(+$6+75?KUJaFZ%Rku3RDF zeZS!xK}?K67n3Eoeh#xCYvr{~2xX^+YFTCfBfq#9q6(Os zPCZeNNlxAZWWBNQRuWM{5QrnV5Bcp#amz?gKj-K;RciMZ*Xy1_ZbtG{0LoY?#H{zD zM;jnrKzql|#g%K4muX0}oory3xtA9~IJON%8<+crC2hsTzOaEsN6}LQiw~IqHahLBy8f$<+a9eL^ut@Q6;++FgyacEozUiY z*f|x*YJs<5KH!d#DBzlLXer=`fVZu7^Y2f5(>r8&(a{Dd!a(Yt0mO~H*8W)T^@@_^34E@HBp!OwG%C`9oqBIVfemi32;>>u<5Q8rS^v6q<|GNaxewv8D=9eIn3 zCAK?!wiEnb>q#x<3F$z;3Js`FKROA0Gc&5*smWht7dg(`jb3NG$E$JFzof(&x;Tq+ z%`N|IOYYUxN}uhgeZ@VJuAUD(DU^QhX<8ATk?1;?U+nPF^@$fBUvf&9+A|-u;98n) z`gK-6&-!w}w>ui8Y{ZvnfzoO1hzcd%qQp zVrc!FQT?3G%$-UbV>v9#(~U2Bz>9{RRML34q@kS9cg$>_Tfz!ilJ7b?LQd=b{uS<6 zs4wa~!!W(IW5*8|vcQ^A`1QN$za4*NC`}Zg7)0%%m-q4K6(=305lv4z{>}L`eXM&?Xmjz zFZ+?B<>*PD`~mRm@slTXp4-YCXcav73XH8E-YbY$E&!kytoo{tbDu@3 ztwzSv@#~EDnGv{6H0D4B3htpJUAl)96AhGFBv!)i}fw#qb2Rn@5M zy=@WHslQif2N##m(&vkSVnwu^FjKr5ecAgF|Tccb$) z<=u@GNA-2~#w)RK%PUnGTJk&;Z~75K80q0jKH;0EvUi+n8iJz?8F{gEYE@6)dr+;^mTQwun16NZe$ptn6)k5z&9vQMP)Z;S;;^ zOz$t!kpbR$=R>znv}BPDuZ;H+-aT3DKFz+d&SJ7JWY&7cJO2xt5AU9kSDB(deQ6sJ znny`qxZ{6J3IdMJJbgh!E1kk%$U65c)Ila$URsIXXHWD)8d@DM*{vLpkckePhBIHk zk^1qa9GB_NV<4mcHPoCLE-YMJTB^;KM}7CV+6HBm79(#+vTtK;GI=%*>FWM->ls2Z zur%KhppA|v5Pp@r@cG1XA-@Q#7-s#=*5){(}DRQam6C50K=A0YK|pUUeQp)kq!|55RNo#od?s1F)D|A z#G(Ku1RroB;Pi01VdZXjE&SfeAvpK@wGC;JknV^MEz8Y)_=hcNQ5dz?aV*ekZ!$d` z=tN-h>afqxPy1rEab8+HzjPSnHcDVH;r75&0M_o?tlmr2_;R-PCA5|3f`HNC5~``x ze9>2qykt<{1jx@0-}tVJ{e-g+hMN~|GZ<%04dQwk;ZO3g9-lv-fvtw@W5_uBVO`~e zXcHNaV0PFgF9M+f3<`8W4)!Ghfp2TGbqrKgjL)(#;SWJyuHcJeMU5>WeMgCWK<+Bp z$d4byEFnN~*+!xVE>m_qEG~vB{~ou7*OT=0J$|qhqPE|@^VMrgXJ@Lad+T0MJKViX zS7smdL&kb@If<&O%IpQXP~1)LMOG?1A{5M!Mjrj1Q3z8tsY zZJN`$aaN0bddv9O>PFI+6r-^@1Mcz-P5NVKn3OYWhIr~Pvf$vmG(hoGwEwK_mAVzD zo6oy{^;dA@nU1_NSh0?G>y7vse}HG-X`9qKH`5dJ9&SM;(r&XK4US!V!`GyKZrKTb#hy4{dNGqkwx zZ`-(=@X{|$T^IDc#6AP(H>;j5fdCW^!wwv}vke-RXBbx#r2U`D(f8J~And71Fjo86 zT_P_{m1+1WHr5Uy9dqs<%uw&3xoK`{A_pCrm1SXWj>h@jkJ*SpxG{*TF+gspa$7n( z;eCG5d~%J1SZo517myT+dmBQxWB>WkdqKm*qsL`3N;MTlISLejz7KtitTEvtNge^b zIj3l8Xqv9>xYL30gl!<7AQpn`JjX~h@yQeXJ-#akpof!(7AC|rz`5K$`Z*uftlN)L z$o$|G$9{t-KtXcub{~FfY1`>Oq>cQU9adHpW`x9MkcCZ*Uz()BG%CsD!DGFoK*@{B z=5)yqgSR`8j5v!wzuHjDbh1W(jEs{Di2)q#~r$>CL{MEed90E zde2(<-C<$=77^?PWzXCRmfXeNyj6$e|FHFYu*vp+e)Uu+!PB;H8`eekXfQ>1vbeE+ z-n^4aLun>Bler&ZC+k$4Kb`Gjy4-Iwxn_PmEPI%7Qzv42U$);-}A0d82m*-v*!R z1x)V7TvT*C%^b=Ae<4!_4VbEUihpQo~X9!v_N2bm2XrIXB-~?v%r+XZL=u zk)PNVHcX}Ea%30##ASyox6Q;4Q#<}SWlVkGY#lwzsj|fnU9k_Sa#+g|gMY`9j?m-E zR>Dzd*J*Ag^;?iocKK=g&kn|~-%}i(34QjI))8Z6uh+Uqp_4M7$VyIjg! z4i49=yg-QnCNwQH54wDM^cOutoSt&Fl)wh;A%MQPL$H*2&w$>a~?6hsjA@9h(I!9T;As5CkUS&_eg+HdUr2k+b zYIR2Znw4jXoL}0)raVV)XxyjOpc5v7;Q!0HT)deAsuel8rf$BbUr*fkGr=~+&}r9P z8LaxkB|WpiwT`t7&O)4MaL zg?(Hw7cR{(M_Vp_e`9?;X2#5&)HG5IfO0NsS$%JN*7q;Mul+UO4#;=LKYHXkV`1wq zZ;cBOrz1v;L|00tpjbUrdPqTG3vp$r9F=jBy$WH#$h)ae1(=~RfOelSTfNv0KuE%> zHs5ER(+?V)IG!Wl-#t17y$`l5`lDG8-?vNyNZy<;J@MsbN4)f_SFg??S^#~2jpHIi z0tw!>8^A0g4gd)Tt?ytUZ!&8MA{gWj9)t#oGRw!mqxR;4*JfniiVYcIkoHyMt(8j) zF&kT@`<=f^5y&aV-ih3Fy*c$IA>kP$r_S~Arx1R^XUQ%ae zOjeqlkRWhAo}AP^LF(5Q>+16DJei+ESk+%blZCrL#Xi;zCb}HhBzn7yKXU)#VVDhs z^9@D)*%lJ@41enCB&yri`lU2)e^x(f7jgUcP<^*Z!I-d}p0kJGg@VkTx;yx(NVm@4 zVcW{o+RHq|61@`F_%b!fSJt9!KxG`M(g+yuflCsR8GY0&f}s^@kX2T$SV29EA_dcv zcVVhrOe;+J2J001DDr$(zs14!3PCmjs?BBv`h2qz+r~g@)cdsCw`02$fQCSYj#G}- zy7j&Qrd9zNs>bwT3{V8cOteZZ|0>WI9`auP^0;@aKI-T-H~)z3Jk5OkBr2$>fKQ$~ zk*G3fci<|?wrx!m>ShN%E4G}+YR*3E5PSqc2yfr;UQqDy|5x>48%O@=B z{hc94M;cVkxr~Pon@Mrp;86UdZgGYyDkdOe_}pOA(_hj4B)Ym$jFBM*{VzY0*!NK} z?4En8XzuliWwnuo2pmG<%sJJuBEZxBT04 z-x&EyU7n0mb3Y|vY~01g-25oxovBeLgPasC|C%o`vwCGlIky#3aTTF?e8Iq%R#!q( zlT=LO?O?;^u`gfMiaD>-$(HT!eX}4-+HG9>cG;Ib_^T50_%RlAKjd)X1AmV1y$`_7 z2XPlf(c6g=#JSEL%3Dv8lO=h?V_K24Aq5vWnl6+r$DPEZv@?@^H*dk~h8mb8#nA51 zVSX`M_t`YQVTe2wujR&KYl*boB-V|4fB#tSzRc`2JJ4@4AThH#CoRo67M|Nv z?2xzg&a0fO#yjefzuxIlL*uni;R@>o79sMk&9Qhxg zKJ|KU5+3(-mw;Z8^Z>QSWw#A`a)4%PK+ z&GHaRe!xS@YtzqgE;;$6Ver@n#wDVMT&>&Mm<2sgu-+SDA@FJOhN~+ZIX@osuW~7u zLsOpujP58@lsE|$T9%ZSw!M3Y@mVUFZT8)q*AJt;9H;EgMH7oS6=Ku|Bl#g?{~QsX zmOHL%ns+&!Q5oPgRNH-2&)v*?55HwTRVT_2XHU`t%jzxd#U&b!qU}$bf);P{pFU1fpW2|d}!=2wrFdg$Sn<^t`__k z7HIU>Bre*k^D623!gze3HBpQnh)aciZ4DJG794It6tB0Izqn)84GCn{mxroXp&-DB zj(^yI8k}CdUUNs07=Wf7+r8X!`2G9KkYd4i0A#ur^{caI5DlBs>emu`Ow+tWBQEUx!}jZ);BbfC%$Bj{aEM9(h|wOZ|!O~=jO1ZBuQT}y+Xy> z$_mC`t=9cLUQ<~bRUel)pD8}L-9tif&ZCjMwruW3?8Azc8$CMb{-=S(y8Y*|CfwJi zKOh=A1$8wBQP9!Sg7rY3IfGpdnlW5%Kd9gu*Pjm!bMg%{wZWs5X^X(5f)8{sv4bm8 zP~^gvuNp){&n5eHad9z{TY;UGHA7z*4sz%ahsvEyAL%|!N>T!*iIauBNQ8j?^T+Hg zVwwP>#2;|JsI(pC;WKbe5}7~Prz$f>N>^?`Q{kF zp6s*RwswXEZ@1yoKri_@h@}N#4p5F5puvf&t}99r&&apC=p{ny9*`7{J4*@(>iU_? zpyWL5|Lx=d_c64s5LXwXdYPG-VRqOl>u8j&*(JUVOAzeOxFh5R1vLjvhzYNN$JoPJ zvobQyRNs^q71b7v1%UhvCZOhFLB!ue^1lG=nSlFibY|uJc zA4tQ`32VkSa&ly33xPI(X#me~7h;+Y{d#bxG^+IBf0w`&f*KkKb@ie|BT;Usr$!nA zR`sO2bjBq*|M_#!YlMGTHsXK%|NqzDv!HDNgHQLD|E%+9xLUia$ruQVWNI{DF0QPM zEM+U+qJT056`n%We)R&A@QR@Nm#MU~VeIo)=928kzH~O>32e%wg}JBi9b&-|ym^`? zC=F(q*RM52dz`L6HOvWS;JspJM~uJlwyb!Xl&vVYh*m=h1* zxPV0u+9cmN?^Cuxy6w4h5GDvxHA0SL_3FslRVgBsh7XT(#HglJedJCh8Yd16JmL(G zxSrFnlC|BmZlJcf_!0XPYoaM&bj+su(bi`#=a>^!Ph$Q`gC ziIW&Gm<9Sbv7tx35&v0cU5}3zn7|TfEG1KL>556IDIvkgML(|x_nQYpftHa`Hy$1; zj&QgiRk5V81%^PAmuQH_-s4q9YP1r5jXitDcNF2h0iVR##J_o!c)czE+RlfWUB>OO zgyCn|L=-LvQ2}OZKaf9OYcmy+%MbsA{qK)47(2H^_VeD`euiJBpW#M?-l*r&j>Ff_ zBh4NM+JT)Y@G%-~zFHqgC@T0p?;qm*kr0zbVxKjAL%{Az>UU|V*I^v(xcRvh>=v<~ zMn}D4FLPK-%*R(qU(BDT$r2P`sANi}s&8)Am`!EF% z`1b9{au4W$GK;2d-;1uBoH)^rKL)G|WUXOgm`;R-k9yR{Y*OtZz6|~(wzJc!)IpTBPaqOh+nYt`@HS+IFARiU%v=&0+Srf82dyVGEKaZ$u zXF4ugRDwHz#PuHa&97Oym`+2#pRCm?td-jQGu9CnJ?eSM8P4R=>)*5q=#YLwynSKq zS#L?55|9W`H(*vk_MpUXo&kB8X|_H~G5I864!sV0BE;2 zW_>SMTN9rusD0=UfQ%j)e}mtU5D)1Kgr^l172)AJIQzj{;To4A;C3IX^?vDND`A4x z6Vj(|6BDqP4G#^~q5=YrmA2vvW-8s)~+Jiv72Ms5!2yYa|*AetZ9;{7QpNL*s0Hei0EULtnwz%lgWbQt3aZ zf7Vhza5%OsKDh1QqdBxK8tHKR;AMblIN{qeVku0Z>KXE!jkUGzX#UTJ3vG*oMFy|O z7n~mcQBhn>%#y&e$6cqbP&o15m8kyhUS8f$WH9L{-0=-@8UeRR{1_tW{=fa}pk`q; zW)ovjUoN7Ai;L|CD@$r>yd94Hr>NQ@Wm&1Pf*8Qm%DWssjZ8z4=g>r<0Ru~jX)HM| z=Pn`D@`Tu6X?RM3mOHht7bK-&eyik^L$^CQIRi%?>tgN&Eq4Gumj zPq$rq{*>a~7oHpakY+=+pe?ES&#$oCga6xZ|1AV%67ypS0PrxG5`Dq`{R^laurAPPszBsp$I5&CNk&)#6w{gNBU>mBHNIkKoUNbqfHj1~ZFg z!(C6zPs0g=A{}*MeS7-}tJ!foFzK9xC%Kcq+;U0z4jwd#{O6QqV(QZISX=go)wa^2 zU3>%gSk=A*jZiBl5Y6D}qg zcc*^pRqw_0MZ%l+Jzo+ZuYKkG_lb!P3~iEcLeoRwI$)pjSvc-(Gk14VVtw-A8-g-2 z&mfgVz14k*7gs1!$Uzh>aE)z9vh8E6^wD!)E>|WX0(nyU->zv zoDSypI68Y&#m4r?wAXm8?)QTj#J+q0>~>$@dT;b5rc6x(etYD$fO%f3Up!9!X;x~i zJz|(shUb>a%Kh(C*r2`YOFCSA>qfytT(7w1(e}Z>*j;#bZO6o&jtRt?K(YkU38q-! zh#i@l>W9L#;Nn6866V0tmvi9;2K8XIq_<9cn=f<=Gw1Q~y9YaBp;c(bmN_^=WkRC3 zW=N&$))(Hw zHbsz{IBXabezaH(Cm`fhUsip%6yf5*|Lx>gfYFh*7ayrdLOIH03Thmdh_E-|2{xxy^^W0plB2 zmb(M47v5AohY>coL}sqFk>iDzTND_H>YzDm0Q@c(LoDnt)KrrA|IQ}?27BPfXGifb z#CR=5B#bN~3v!PKK^yU?zcynR{*YUqzjSF3tU01xuy@eHD(>-*drOR_!u|NsqM-*T z9uOJgl;#O@(E9s6$0JxIHule*=sK#ZGZIv{Eebw$d}~vJKsWB~;=;lKj-wAQGKZmpsFWc2yqzJrthPnjm zsMvR9%|)#+EhRa7fXg8uOvRxbT?A>iQI4UrfaSk`DXn*E-^med8+;-%OwkaOy#NwL zVg$Hl$5Sx%Z1w1<%1y2T;2`zGh#=ox68~&PgIfz5%fqy{r5rx&o%gL<{l4=ksZwGJ-b%zn|6(I}Fe1fi4U`DNz?jOkYNAi2eXfZ$@-<7s7~suU+}~`Y9_J zYN9pFF(Uq5nG3oO+ZGmvHg~(Bq=y_13bF)bhJ=NMRaM;rRim8=O9&UjQosLQ0*)dU z+lEs(OlCp@MS3zAW+?CwGBz_k%}N7DZ7RMMiZoH9Y<;e)|9-c&mvD*0)W~z_+W%qf z&Eu(j-|taLlcG#1W9^U-5i(P4GZ`X7hC*g36`~B4kdP!9BJ&hdNkSA7k~ygiNs%E_ z$?UftyU+J?zUOt$d7b^odnoqa&-2{(bzj$7>snVY`~+|bI6MJE!x-U$ySwmclZ84A zOQG%if~zc=bj$xWs?&=y5{{e8(3alECc zfsqe}_#eb?Tm!G*XG;A0`HIK{!Qa3WnCeksK-)e(Nq0#02rtw^$Efgd9fx%@lLW+t zeRN>h4QE>4*1gN>YK_x(b&^eQORq=nuUxx^GSWIE;t)Q;{qWf+`i3^kp6|S@ zX_&0s3OB=7uU-KN#V{1!jqzn~3xV_Rh|*Y<`VUxG(upP%RS*y87o9do7YT?^)aH)u z5}23~xMMw;TyY3aE2%u>e$+j=zPohsew? z3#50G!idg2J>|%qa5}nlEOx=ia4#T6Q#%&di&fn@4r{CR${5M&{jKz zCIQw&8HVPpzHW zPKvnYB6h&KiOVlIxE4eX9=qYB8{h#f6M)vJ=+t#|!h?cz`I)f=;7f=BC-;!4$$8?4 z1F43SpAJ)RQP3kj$Mzx4^=;c`u8jTS1#sGd6!c@VatN|083cTc2NM@Rj|>jS3Wj<- z{NTEK<%*+Z3Qak=xaghOrH-{U?~SNPIRfnpnOx;WcNjIquP*iqJg5-%D2-D}N`4TI zlrJmz-MX~}84pq+qR(aKA^45YpFbnLLare9S3n?&Q0G#?$H$5$l9%b?U^yE!cK?W~<7u1N!7Q(9vCf zRbNKKH~zf?mkI%{*pnxaw=X>BtZD3xrqW7S>m5h{)nip^D@XgOYdHLbgs%Ng42jT8 zqk<68(u1c@Xb!iLxd_uzU?e(vf77GF$KWemP{u4IH1hOf)ViyMy0+w z)4T*`8wJOVASoqgKeo>ZbWo$=lNFtxQ5sI|*=g;D=sBV?onou<$W?c;s2Hx;J^sB7 zYZZ*RUj9<#NZGyIz1NpE`!26xBo>KYXLEtIYNo-*A|qAH@l)h$-WcY=!dm3*^3=AO zm6;j#AWd)IW}@@Gy5TLntjYHO+fu2T31%@(t8$to(2H=$7^s zR@FnK1O^VrNv<%Qkg1gCrx&#!TIbto_10&B+*+;lE&WAFocNkE(Cf+LYKd3l0m$o$ z&B8`MgMAgZbF!qZD$U0tkTX$?VD3m>cLt4(u=x4*&DzCf@ATaKyiTn6p543c00RK2 zLs4qL&kWNueuduRDj~vaiMOPxc%s^3(UeNy*hBsZHDJyfcO!IQ1iIOC!pK`&D&S>2 z+)>v;``*=+o~{$NlgJK|#Zok)1OR1nBeuDX3XF6H%lz39`PE_@m&&Rc;3a5v`OOpz zumZ(C@Qg!;u1F!MY*yg^WnS#Y>xYm6{!4x8A*&_n*U%6xAU*g+7zyn$8o{1zS?Dn; z-%7P>R|?2R+#0lk0q~HCHOJvaqidF*$2_s&lC)vcA?W!pr87~h17G}^X4$5NB0rOOw|;sGN`s*jIO9zI)-_HhW^Qjg`V0<|Oew|dRNd;9I^{Qx7l zetdNB%itWiC7k>pvXA`D&x?wQnbkQLI`A%LS7fcv%-2AOs%}JU>gW)lKJJVeO!OBP z64jpV420bi764oLc_IJIx)BhDf>$q4=5V5!tps%QdJu%)7 zfE>^uCPu~J}3IRdMDXSBHCHa^VF1&2+jHZhd~Qgze|0D#585ehLI@vQ7S67`cd1b(|dfy*;*h-d zn5|xxd{S7rW?bs%-?ee!Vqz1ei(FT)UWHNb>|paQ;3*iGgh4;T_b9}Z01NSkFyW<| z?xtt|oa(Qe?9n*7j|x0v+r`t+sGz0*?3UjNjOksY!n`xEGsX=Ar?;$ppPwv%>S8i7 z>`;H^e<_t7_>v~=a@_I9`0*v$uFJ!iE_)gEyG8=Iv5!cqQT4n|6Y%`6w)hNEK&^(7*9FEfe^ArgY5YXZbrsr1A&Pn$M4-68a&;>WhwF!Oz1FO zETuL}K+rRTh8Q6L1fy5PRdn-HRlHI0C<@+iGZe|LFY5}Jq$mAnL_R6#>7%<+4o{vv zTkCfR?WX74Zbe#8>HMTn8w_P!TD|JWk#3(7+kWM0#Qofq`4NZAc31?odYi4pPV#p) zCeIezlP8-ogd{cNi^LTwVec2aWRI)ylriL^3*V(9I1w7XlM8ND|p2B>Wn(!j#zHMawkf zHj1+xCjqFO>b+Q#(|`XGybAF2a|gF*h)Pm^vL&^gMq%g9lhATdc}cR0p*)q|^^KF^OE@=^L z7YJBxar-9l#jqHh1_c0Y8yXq7Ckgqt%=iDhEa9S5-QC%@fee1KImOCY_81xOl-B)JnBs;O6$l~$ zU^b=iwQUIaJ=VjG_PMt0k$g03*ZzV86&X#w;Au)cpVR;!JXi9u{L5Xf#DcQZh%P|r zHnR%6)PPmletK^(-T3JH?;4iaq{+hV^v-iJAw3qwk{Do>d+0Z2BH`=k+t^w)iadJU z;?pdfG&7R|7d=9gY1s3|w+`=?reCIAekA`8DI&->xO8_l590=^C=J6;{&zM2;XMGo z29UNY@5vEggZd~|4*NP5dhuAPraM*sDo113?0-+q#t$l{cI+P4cbM&R2PyJ%;A&Vw zfhg4qs+f4z7*qHFl`IYCmxn31_08vThKesu$ZXmAa&m|cCd``^W|W4N0Rv^|@;5z5 z3LD{6%Kh}{SuuF})2H)h#@tK*!2A2d%x0CM{S6zj7^!1G|DxwIO;omR-!A`GCw&3c z(XCn=D%y=3Ay(3qjTbv85T(JxxOAP_`;~wCLa$Ovz-n!E4tw#aEPs`Sb)kcF$sIRe zv@Mf0^-#;MWEngd25=BJ8{9*q0`Wuv8TN3px${c;oar(>{o&Z2;)|*zQusC1Lt9=> z{VjU>w9P95a;VYggJgE%oFQ5w zH*O7r##|%K>sO+d^v=lIszyVc)1|vfm}g;c|F%SLq?dYLsNR}_4mGM#0zx5K8K$duTDWx(K^2>ny&mz{hiY4!qIUt zF=cmyD5O}OC~9NVG>s_2I1#86#0tQP*xA_$5fgxwzK&#Ggh0Hizm$ap>YZ_K{`K-a zLrl!x&qd2c)%29JVX#49ARnj(@j}hn_2mmH`c8leP*UF=NSjQ1z4BMz zl*5>5PD0|2Bl^{s&hh1~l9D=eB<>TP`cz2!ytL0!wrP}I+ARS%-JXDv8BpX8&d{|0 zh8O|n0g#|?Y^*28NMv!Hiij2JXrjvT_{DL3v&w*Qrpd;DH8aCH@pANO80`ENS~h{v zZInyyJ~Vfvw|xFr2?hWKFT2>})VO1#2CEv{+t zeFhdr5PSgW7ZQC1P9r~zLr#Zu<3ABNx#*q+(+nWMOB2 z5q#n5)mI_AfKlCX)1`A4y#5^!5{PAJD>Kp6-4R&}un{JNx$RCcXgS+gg^$5tC3G5N zHF#pf&20)7ClscMmnPM*i^y9>v~NC{nnJ#X7iKxa;ra-QMpe}sPz~izEB~FDB0O`Y zA5k@}LaS1;&9ucs+yn@EXwD!{UsP7n!718@5VbL?ZhLN=BFFs6wvU^c6eeU9`^zK^ z1wbsd!oC>L-G2|^-?0_IeW>g^AAf(c8Q6m1#47^<+r|WQW8>%Pnt|JM<%v{rz0&>~ z+H$&)u5^v>7}SeG=C1qAe+fc}1(Ro?>0)1CS???0HC^%76aJ$GK>yl?4O!N9vZ;B~ z-EP+zYfGQ)T?Me=k@z7>#wt*b&&NI6UIDl<;qpCCCB(zu3`|jHmX|J5?G=z68ga46 zN=_m(Zrz&R^MJC->txIL#{1|A3t%4=dS4)`(6_8oLZjgy*6j|M|F`;t&4~w3j7aOR zuVqZr&-!1#hhnwx*|YZ;WeZ&s4or{&65iM%(5iMJ(JU>MgH#BR!tj1c0SsWJ={@rJ zke#cIsh7KtzvsLCsTmo8*RG)~lZQ~Nd^3d{&Rl37mUJ`BHs}OSf};-b!y8Np#HKRE zdL|^t&@|IQ4;A#e5LU{LY~DZkXM>y{HV3w4MOPQ6lB1&|)D{%?UxJ4f^#U}O)ygY&hl*w5OK12Q7KkXpc( zlN6nECG)`x(9J5Us@yQ)nL1qV;ovZ2<7|9v&&$}@rS#|58V4}RrRd^R?5-&?Gc=*D z-EyJ~duOhH`J%qGwah?+fi*SG$K;uF3!^n(&vVu6)0pi7|6GH|=# zDY!KRV|Dxf!S_B}itEy{P2B;(}I^j4sxKxgaR7r3`22ANnFgH?&M8v81)sqkH93EP}U^ez58Z> zm}3E8c-T=SvKCQBO>>yL(>u!ls)I)bF{4gH1AatzwjEe`QzfQqGiq0O*`r1+FeWDA zeE(kk@xs^DNEY!bD>Omois5XXpBTb39>g~A4NLaT|ApJiqOjd+%0;u?J6;u5`4xtQ z3=s=y9`;sJlLxwaZ_6B4;~`&L^Y76tajF<4VPxbQ1{d_yK6-Skc9?qg?>~Pa|JoxF zb>jxJfzh#+sR0PLP@h=IiT}Hwe$6!9_4Q6T+d$sH3K~bqsmYcKKgbeqJ-+xg^~GmpmGHZX=bv>4ggkjVzRN(Tdf&gi+gj6(gBDWgL9E^lr$)A#`XZ!d?1 z4PFm3G?!=$T3BEXxj$IPZpmdRP!^U@RYsiEbQ%osGCK}1tC`^LHk|{3@pDidmDcvCAEoN@*PH9 zwI5uy@=M|-H(#zUN4G853?LqhNCZE$wpvfNH2DE|2UrhH$nml>I2$mDj6hCu#wZ_F zS7AG8g$xRLx$o4EP*n5I&O(vV*RSiFn#!Oj8{~lL(9fl|lA6wfU5z*0m-CL>$qRb` z2S=aFom!jhFou;kda%LjJPmZJQ>cw_|1jbzHzcIqF7fh*bZvGB`(C)S5&Ank>~I{ylwu`ReG^&&CZygJSyz+t5>h!xMtcVHq=lD9nc%$DfcQ-l8D{3kTX5 zt!l~>X$lgJt=PO>EOibZz{Km%Q&sBx)2aGDksdvFc7O1BN0;xBDAzq(MmXAB-xl;j z0Mijw0gr-w;{p;Ua%7WtY`FvzBx3RwrZJozq}-2Cj+6~@(IGAa>_s?E;M;Y<^^|R% zV5a%!&vjpPLy$~dj?sE!Rp8k63*QXPi&)G}vrnxVoWV7ibl*4JL4*mEm4v~zwA{*j zD#`!P;t_w2!-?dWl#=`CuCh&b3TIp-Mk*RuN4!Sm1b>NoadG?a;cY1cJuKVv9cn#* zi7e0eEJIitwOy9-X5ZcnlH8jJYkPQ2YkPaH9&#Vy0%NUKyTx7J zyGc*(KK0srRtm*ASbk|eN`Mew;}}s9Qf*}zNHtak7v`#}ehU>_$CG-apK|s%6rVbU z{4QJGmyl*x*|y#~NqfZcr243=jh!8mS;1YlE8l_$ts5?BXxu0VJ%hn)2VCEOJ<~6u zsF|kk>${wq$`f^sX6#CXtxmjDfxf^WX^#Rmo@|RhFP?k856~nN=-sDv5Ha3P;M%}k zDy>6qtBVf;9o*Wx>fe{#E}+)yyEJQu=d zP-s%kk&+lNBtgL60AWkA; zp%2>9Qc+H>6iTi__HFF!geNvIWK0EyB^`Kn5o}Fws7~?nsd)p$LKc}dlElH`pW>Z? z3ZrV5v^6drDd*0}i3V1W0GQThPq~L|vMNk7=rJV4N}o%AJz^eK^9?i4DjOTeCajfP zbCb^Yosq(hNI)|w9DnTFCx?(v6@GeRrvvuafQ6m1u2U-PsT1SlwaCQ!D<{ynVE&yu z{e$oM?OD5aEtF&B@QGS+Pr;}X$TPo#IKiKWe;=mY-EJHRFw`TA(wLdsdPb0%8H{$# zterq1f^-+w@(>|`VE6X+#>8h(Iv&!~*m0QB=QG(KtjiA#A83K!Fbq6@-nn92fl+9y z699C82*)n$pnOE=W<0>pot+F2LPrm|x`PfQW!uFr&VNFR2Fx7^?;<2$$PLrd({cR! zfPuyRb(NpSV?vxMMYEK<`!m$p78b-@isDo;TbCQqn>Nr=4mYHDic zF7SzDV`e6k@uXaCh4At4KskR-DjsGS=(Z#LopED{Km-FwlqbB5>hd+{BEqO~Wm3d% z;3fD*NXgJJdM`YDGXu`JO|C!xeVFw&?OyOR)|f9!#sPpCwHi7)`4)naweRDlG3pyt zy2I(yOI;p45^t4J^6*F2LU9NHDnBXdAQ_<;tE%aYzaBy&7z|P)Y zjm$w`gT`J|rD8E77;;9(xRY`VD~=;*w$o)e@Y%DD8cvRP4Bwu=3J`_nUjSj1bh=XK+r8xt} zD+!m7&Yzsf@cs_PMJ~$wK3FCCg0>9yWVJZ9whEhV+q!ic#~hAhG%h+hIeB}}qgX}) zcwl613Z6R>8Zh$xJNg-M9sCEH-V7QGU_?$^k7HSz4B-JC3lHU;l@^V$E89g&yRhjo zh%f>ESpEaEjlN5fklDaRHbPBd@%K^4O|7lJ{gqcgfBrYzVU0~piZSV$l{NYk9ab7I zcX$42up`L5m&Jbl)sy&a8NfsX?GV_wzayRdW3+6ot@q;`W!t}FNZ-UH=GHA;J-w3% zzP7d>cD#CIEF>8R6BhRBBAL}6ou4i27n6$=aF&cCAe8LYdK)LXx07zy+IaH0P1<3= ztzlbIik{%0y`z%qQFfWwp|x@%^UJwPatO-oqLp#HM- znN`LD%Uiu)OzhZ0V+Lks1ATpx2qBP%zdR} z?%jcn13HL9*#8>z&n6;jZ7cypfB^mNS?X|K=DdqTomBx>X|2q-lsozrfePcFks{o> zmB?$rTS0iId`aQL2w8^OD+&%{`vHZc?gbFIu!i#N$ItSPyCJ;>_kz40zrAt8hKo+r zmKewhyU1^E=tx7TZBey=DIap+=HM_Zd4yvXvcN zzo<-}I;3;|lCRU(F|v?)^Xl}$Xq~RGL`){^1H_ZAlW6fR6Ov8nMIiNvh?8xI)t0$Z zF2JSQ8p{nmw7rW9F;&@@lO?VJqFo_80EIoh*6BGpIayiiiiLR)iWI0_%+=Zi#X zgxt7cz!_xPJK+gST(SYGAcCqJu)okXw9E7(%8GGOTU*+~o1!9@9t2qq0qNp;g~eWhnD7?vZw< z-0BM*RZKIWXK?TlXiZ^m%6ltWw^uQFZVlnJbU6*61nB(ap$N6#5bHtjHaWWh zfuNy;0;(3YOjuc283tcE^Rcq>u<1J6F_qk(!^2E^M@%l7)4A2v1!aN|kf?(wlp*42 zSXmIy?efo*uYaj3*2kiMbd(b{2@}&3n_5PgHJY1uVL(`x0lLZXHcpO?rtP51b$J-I z6XZHSe7GCL!>Dd*%EmA|KQAcvBqpX4_4U2tjej!m^ZotsEoHwS)%7a%V(6zkS6&ak z8^H!Y=G*HACpIuO%&eOC%~|3P2;U8Ib{^W6xNhZ(%-D-hFnE$$nwn;QW+Yx^iExPl4Z-UDub1w;wSg1wiFU8% zE{sqJvR_tUH2NU-t#bbSasqs=a0jDUB`G&HrRldS6nB+hSKNbw82%iP#dz*NA|RWg zHoddm=;(gfT|g#*2kTD1;9@bL^P*7f)RT{Y#l(twyzkr-!NhdE=j`zr83VX>__#Vu z6!n+*nI30iFg(^WyN~vxx>zSNgo>|c?!DYjxz!R=dOYJ5o9O65AqbtHohAAOo73J7 z8A2chL3IiG4MtNiwFx^G->&3;z$lJFai;#VzQ`|~UOLrG3Vi5Q#Vnnpa>HMKHl=rxo zeuZXfAko+T{CP8ou8?UW8^{e7HF_3^lfc6hk8C;nZsk`T zVpgEj$WNing^wPe^=b*82sI2fzMYV8WN~fN#EH2^71L)|p8ZN)t@<`^6n96|gp$K4 zaN%vSChykT%+1X~G6dQox2pnjNF0k$VC!d{M=wz1F=G?*TS`70$bP9n#|1Gp5s$pd*RIjep=mGu+(GU|?1K~e_-2?sGumkGBPo`%J3ndtkM+@*CqufJ@N_*Fk@3|Q9t_hCWZG6Sc<~9{`4as6 zK|yc?tuwQEV1rs5>*A>F)A=+@5qT~hi?_A~Z)7{Uw~5{h`jBUSd_J?>efz@bJ{Fb_ z4>d0x@m0ZOpMad56o5%w9&e9~2u;enNg^Wo&_bo*n8}Z-wZTYC;Bip5K)ts7t7Pon zO}aZnNK?sApL-8{GCb^J>aov(i<^5H(*P(Ct-uvk^53ce&wz!wN{^+v_i7KbLUPVD2%i5-D^y-wdl@YKEf+8#njZ@UWm|Q=a3739R(GCmw+7fF#2x02`1W z*~;D8p@&gM1x{lus;RlTm(e8%%`^H&#;dLBJiD}xe{N{l%fn*>8J$K51sGNu-6umW zVrOOb7|RdBG`K_n@Q}lSx_^J`r%#_C+yTV^wW+c1%byNiFaDtYG;(sI^QPZU9o@c> z>f{d}ZWM)B7Z@Xk;)BEyBz#S&xfu zXg1SCqxHE-=-;xtGcwmb_MrThy{Jq8><}SGJAK*d3Ght(7v$R4{BQXIUw8)|p}P5_ z-Tum@cfxxkS#R*%?%JIAZhgp!0%P1vBbRHGE#er~%rFDB)0SpD`eE0*(a}Y)N`wg^ z81&l_&y{`x;zbiS%DByP!SakuV<&7LyT{Rg$Sr207Wx`FSjV{_GKqmUfDBz! zdevJc7W@i;IEWX9P!u6PL&P*IJBpNRa(Y^L&mK{h!QX{q9X}jPw`?`@j(UckX=L#j zfvoh>jAC85&*QqtlxcO=b4t zd~lGh$B=Fbvj2{TXAKk2uVJ%HLvEttqN{r=F_HVMg#E(Sy6hu*(7TTy=Xm15%u~vnh0>?YS0GKpYu`m>UEj$yLv_4X0fj79 z>V|PcAg?idl#XrEW7^_->k)BJdx#IvxLX-Wfyf__5CSq%Q%jA!fxGi6Qx8?5IZ{$` zT4iXM!QI8N!$QYfn~Wlp=qwrUJ&1||2*wL^jGCEgk=qK7j~)&RdT}8`@x20?6`x>W z30@Csff1mU?2mULG)i?FXxf6H0fmc(PpMCdrPD@Ey3irR45r)3EfyPTu0UmPVL|G3 zz;XwKVMi3jY7cT|Fph)_%r>>{EoFV(tFouwtos>^7=G>|gjlP}?=KYVG+Nxvl@dC3 z+NATBs!oS;*Qo+yeA}&FFPyyoZaeLio01e^Zt6bh@TJb_KpOc@RMt$Yt5{Gbx;nJF zDTlG2KClu{kuUPH(JzVoi&N1@PFgyq!FG8-ad~zqCm&JXAfVa`{gOvw#eZV=91dIK znL0^s z0}{vQO0)9Rl(K*d!7;+b_bS9`Sjy1$o`hp6!Ciu^+wbr12s~B@TOIyENK3szTAx)9 zPKz=G3|wtB0r36h*-_)DGNXynh1$jgV_Ow`$R*VFb6Qr*%WgOI_l$OFhjYnDNuhT( z@GCpz4(f9Ki9M8`l?6G<9K_-A;EFmrIzE3s{w+~N`S8)|uV+@~AHBOb6o8C=N3zA% zT*sn;$DNkGpOzGhel<7mIzP4c�=(n>%rbjf7k4ZBViJ`B4cQmAx#Ze1yWaPp71S z=s|ad1}705$sr+5puVzqQAh0&5!p@3m6<#vTWL!sY?y!FiGJ#9X;_TLXWUaDk@Z@) zC~NwiZV4J|5s3PiG2TVrh3A1Oa1`=yIZJz28{HDgojPh{0DU8H&_DpccIYHBQMmB@ z7#VqY{sHAJ>N3JMEFfqHtEsAH7z(aAm+|!*w04O&OMZFHcs|dS7)~@)CN(EzoJGYA z)fI|swNL++uXTCZh9^~46L94SUVrq#N#u<{lvpDzXO+!zZ>eS1J<-spp-HUg7v3)6 zcn8gYw7V$J^~9!>UE{d74>O>OK<-6k_Nb!BZRNhc>3!4^)!!yiXXC+dAMc&WvI}0F zn`=5~5BS_H?jqBs|7Zc!j_k>021E{O+oD~Rl9cSb27npq&e+GtSi72ch4dWvHStTy z$SlncB*Oay^D`9b6+VCAd*t2`}*VzknSwkgo!u}mti@?V9= z0MT3U&6_;*fuD+RnP}EEX4HfOn!?V5o}!?z(D1@d%H@u&+aPzCfFSDYw?Ds?l(aLX z`nEzL*oN}JGK7DGvVc1r;kl)g0_31@0Nf;C`p+QUGWWpfYNuv$4FsQXE z$gb4e2+#(u1euv??f#i-xWY4Xax}@Hy6}av=~efR&tBr>ypMqohU=1ro0{7&85O<} zn50T3MPys+ZTv5$gBzYrN7Z|Ik7!_o>%h`Z!bxUrFj;%wf=R9Fe6tqX@X*<#W|{cG zm2`QrT#$y^v`@#xq#&FdN&owlu1!bLT4dM?@;FlMDScuGOEN zu$oJaZl>K7XdVmjJMKeJUK8Rhe>;1t4RZUw;o&d_Ut3Ezt2!-QW7|4d@OwrNp0A9a zD>u#F>`}L@xCGJ>GWl38i_!7p%UzVx?!km^-+H-jFY2fnr-F(XFBb&_mrqkFswPi< zQUCl;WEwwp-`suNQ+HIt^mRF>?hxhQJv!QzH|{-rl2%?^;_}?@=GNAfdf%2OGqD$x z1J3tvVF=}r)?X7r2|1Uj+IhwqgZptXSi%q%ouFW!3ECK&o#2c<7j0%5`?`7q%@y$4 zw~Vzyw(5VB&d%IHx~=hZNq+LawyPMl6R<5}H4NOa^1L$kpU?DsR))`oI2ApB0y;HN){yV_IVa04b zFD~(@(-|5YH;w9rHC59@$D z6xe75KmxUMC#)a#?cDjQvT~P{6hNx0Uq3$B0|iGX5Lu$3mxU!NG9oD@rM18)AF!*J zrze1FLsd%3INpsj3In*%2S6r6YYMtKPSpCk(vHk*aCo1A4FVW~90&$9KhRwN_>?AJ z?^M}{e zH#C?Ro#u=zILLH;CnaIGr^a_c%cc%#a$-!((P0A-~N#@nR`)yBZ*{aMwvhC%IAtu_=>Fc z?>9nMfq>e~AvF&80TOM<9I~BAi%=!u%{9rew!+kynM}leV(vF8Jv3&cbOXJ~j%Gu2 z!)AY@#OzEaoCZB;fCLq#tQ=5MGLP_z*pkzMk^%j#=Wwpz832L7iU54T9gY(F{O|`C zB=xvO(D9W)?2F%l3sM>8WDu~)zlZZ+8{QgXBxpnkD>&#r;fjH;0&EppI&CtV`T_a@ zPVSy0`f5Hu+l$-!jdyDZ6e0=m>;`ZNruqAazvhI!24sSF@7&SN@9zI~F!xCIo= z5oz?$JK&VUAWwD}!OqJeCVi`Mr^~#E?0BFLWy^BreyYpBX18l$v~+f^%+G4XrUx zA7r&UEH)xfR->SFN_1I#hCj|o9R*l~{wIw*dhJS}O0bEYGcg(ARA|zAuuJ3}zi`ZS zO|{~;Zzxs?PhI>q*l;~PKoMkeo_TPt{hz>J@4xo?YN3B{QZ&f6Iu6}$M+A%+0IF)ok|K8@p<7>Vj#rluQZ{) z<^|cn*!ba!A=qr71gu&cc3q`rF?!{iyq9m znVAJ>b`5!9?T;B7Oq_LdV?$lp9t+tr&NhaK$O665Y^VH|fbT4joEV-OGkhcU zslY_&#)~7o)rFf=H-07m?zBskxb&RzwYhKpr0E9c(bXjn`SYGV>K6LO#!^ssAp^{o z1V6P+<~)C;|G{ss30z8FAlIDbev~#D$&;he_Q_eGE=Lcp%3bo;&|KE@kiYFC3mg?m znAyA%<^7|q#r?x?-uzN}aX-U^ryOq3#jjXs&~s^%(esY=avV;H3|;tpIl1`Sj}OkG zr*voX=bzgPb8aY;NG;zcn7Ia*V+0wRM~{m;$c@g4+^0+G32!DTnikyQWk8y3n)b=pz&RM2aYCUg|JK_}G@0!@e0@tZzzftCY|(&vfD4#! zPE2KPGdmYn^L2~fs9W*EBrQqozyZ5~f!5|8`)^)zW9~0JYF>X?09_6O%*ef;a(T=2 zDIp(G^e{9dqyXRh&WoS|sSa5jy}H@978VwOEGHMgu42ZxM8a#@7h;#C^7~)JAN<`H z_rXl^xKPw}-m-7*1tb9Z-eD9F4W1bWiK#zsrlVuwrxp+oMI0?WTh&zNu#EwSNPQEY=$8O1glT40$h^^9gIP-)Xoa@0 zcn|&+our78`?khChUf!_p6HpK@L2&r7Nvn7-S#|w&iY9w@&oXVlkHoF2(dVt!J!#3 z60s>T?(XhJ>NGGltFbXs8gd`dZe$+V6H+o{!0E8{`EQ1tk)cXt$jGw(WytkXU01whscbD~n8#n|}x+H;#aKi*O!hjZ11tpme|W(+jW z-m13BlA+`RGit9W5c%Z2how2{R|}pV5fMbMKCt2^k0QupC7>1 zp=I4`q2WAjndUO;&~#$*K3oU5*w{=LFZ^R`_(bpH97V|lHPWaXmJ6ne{rI+OM>6su z0tHo4np#|X-~Byy`>io*(}f0*QxO#_(Zvs`tA9QqdGxYDJPlDRQf8vhd)uZ<{Wx=w zUS!#cw0tNPWM*ee1U;V)Y_|+Q?LcW0y=<6MKN_m$kAfhB}_0{O$cK=N(2s3;_VJA zPa0y%EWr7l$$$=Ps;luVk=QlDIRx4QK>5<1<0pUxBcTuChpJOb>R#=~-d!0GiyYoZJcX2M_CO*}m(3uQ6Dpc~1QH$UM{L(I4*+c-LU!SWZ$(1+6}AV)!a2QIn{ z795B#V7IKqrhbAB`RtF8rs^9G|eli4+{X zlLU=a00?*F*5#PB0G|$o&G1)ERM7eiBdfum6VnR&2VD)nW3M2WTm&FwxQ)WDP=WSw zaM<&Y_CdJgBPHp+C2NhJ6$EWCHH-3MDJ?JW7on)xYkD5)e?Yuvi_M_+LwwmtL*4#{ zQt_Z(cRhVN0oyw^*3|b2eu{yB`L=DKRZu||+J(Mr1bB4Cn-I#lO0-QN>4)Sx`0eZ0 zR#3(KN1fWsO#qt&%f`&eNQzJ!azs3W?_uyDOj_)O#b)k7GF9EsFp7if@eNu#>y*3H z_aVu|HbNu(ICf5U7)5ugB&wp?iwu((Dgm$_57QL@5eI#S%IYkG7bjj_0s4#(j|BZd zZyKB%FXDON$X_3D2(by6I@DdbN)KsCdHF?rU9f1-SwVG-CQ3vkUl(@z&<0dIz+h8q zzU7W}IF&vr;|n}YK3jaTXX)Z-Kd`KM>hhU5|_cA5E3xh#7IiMZ*25M@&LAAPh>3??~tOTfIwYOk0MmHPoEk< zW(dRgrIj~X7PD%|Br(?FE@@{#)2W2SF)prKl}C_NAMF*Z?X#oDKJv z4I4L>0E-F?)YH{{bgF)9Fu4Vk2*Ly&g9guS+|!DR3b4Dt`11(E3C_60#B!IN9E**5&)_BWRF6VF;2K0^73W*QOewcQ*4H8&d-+Z@+!mbD*$Om zuasjO)KYHfH9(Ff?#8TrkNl<~I7m!v@Wct*8E#}^<(O`A%!QH-rCI&voPo{I(ju0v z#M>-PsN-i0=HTRnIK2PpeQAy`+)z-c5ZD6j9+sAb$tD``U?>hdH-d|=f(D==76lrH zJ4B+Tdj|uxH56C}$>#5v?LOt%rW?9A8e^%TJ>jx+#MVnwMv&y#+CKH%6Uq?j zE1UdK?tJ&PDSezWxv=N_mS8emiPkwdG*{Kq(#Of%e;;uD+O>zri^8h) zFWIhqoZse*DjFZC8tY_dOPE5c!QcG&QJAjfWWPmkF+ZnvxAJa6DDgP^ixv+fgvKie zpgw7BT7)mVT9<9Tt}z#!~3$2)emwkSw%X?nNx3rcAjg$w1nS@3KKID~GpwPD*< zKCO2dg26>u0RaKn58j5EAVRakg7g2<0RG|lj1_`~0ha`XkZ{;CVgW%KgrXES1Y-#7 z<|2)%%F68)!_dCpj=AtYTfx3!e%5Vr9mz5x34stt&qf`uS#jAS^K&Uf4L0QRFE4b+ zNQpZURS|w5buBi_d-CKQ$g`lJ=WG`&5bu1FR3_eaE|;I@4zI~6W2&lAU(#Z$URGFZ zu-8(*@a>kji5ph1HVK0XFmaM7@3)F8KE(O1SN0yzo}@^{ZD~+P;G#ski9T;=aOnc(dk_tNRrV` zN`iQ4pQI#I(b{@?%@FwFGBC+IieV>zJ2*HjMz(F+r1lNoxf+plHR9mGfWh~}2Y92R zkV?YD3Rz&JdHjzHvsIlR)p#PzG_@^RE)Jho;rsrhC6U#kw>Q>Kj$I>-_o`~>p=rvQ zp+O{q1_Bq(or@g%y>twm0PXtq_8+Oxab18@40Br)5)~nlcy-l@*LwLpVk=Z|(3uEB zse|^h$=9F@fXW;tMJyoGqn%pDcZSNhjsN_G3%PdXFVp-^mNT!XA!hmDs71X4^6}YI)#xF+E$Te-^jyBN_YYrEKw?-nfBVv0 zrh&#lrTx^}FkzlXIgWL|wt77jw=?ad3sJ4i(7f$KFz88o6k( z25LWA+1T*yGO?eUnhi3JafU9ivMKF)ZBGsP;>gdR>SRzNzp%k!U5s-(=xt3+V9mGS z>OL6JKuDmrV+)$Iy)Isq=#A~heMSrf{=Op_rRb3uF-&jk&s9Yl0)%fk&h_O_q$7VO zBX{e-JENku79{Kpd^E&%l^Fc!iRijJoAurvBfpt*c{t?QkN#dYvF+7wn&LLm2R=a=j8k#n84VBxm>E%pCd44Gc~_Z}N7&YCUjp+SOB@&(kghvhjNY9NVp8MMznniB@^ zbK}BQRHY&+8BOSJiw^qq%mp{Xe6}$cBg$}gfZ{>tKG5J=8-Q@Y-%7DmK!>moy+Fo* z;`Mz)1L)#-S&uwqJ|!H6;CUfCB?I!$()&I%W2hr}c0O8VL0NNFRZ%Aer>j zq5H8rBJ=%srh}87jKU~MCXjU9Nl+n!Z#w?>7gD=n3?WUSfCd$+!Ob)lFMrO#MXJA3 zmVu5-j-dYVXm0Hu2cZTn2m~>cI~(8%aCrtoT}@CIFo%)Fzy~x+n!fb@Qqt7YV)djG z_0;rDso9!Z&_8dx<2KE{S!K$0!1i0PVW@RUvOzq3x!mp0;tY-wz9t~wyn7u zS_ku86*4*#40%!MlkY&oYzQcYVDc=7^9~Bb);t0YURYI#qGjH=IytV!*RQVx^)rp- zpb8+7VA#7hcuSlArO5KCh1d|*x8I|U`tRID$@JJ*2o3T?&n3`8_i6a;&H%p9+wg@^K*Zo61NfIE$wlmkXV?Bn%s$e)$FXe3@)6`%zvTF6A~*Kh9K@?G&a>2k8e-taAWE=q>i#6juu z+8U<|(gR+`;I~b;;^TuW(*`yWx(+N981X_k|F%tg<$)?4eRH1L7nn5eIOc!BdOSJ^ z!gH9#K;MpH7+8a_D__r-Ve@0bzB#BNH-9O&LH{OxT=DYUd0a7CD2j`VVHpL;sQ<}u z(jExwijh|VkSL*9lC1djxGeZ=v9a5zq1X9il5q{)FV3dCj^;z!vhoE(9Nx=Kj`Ijd z2dVL;yv=#Myin>sY6=<4s?}@vUEaU)C7b&9#a6&y+!oav@;k`-ag@Q)n3o7gC|yNN z9;ZUZ$H&Kq&H`k0yd=o_HkT(Otf;BIm6cRD+js4?8~zLzuaTZKp{T)o1LtQ!n|!xS335DmBS(B?uh5GB4-GTo>Y!p-Tnwv{E!EkN`G? z1P4dhjR9kH(dd*F7nbA7$Gt)#{AYYTQ_&kO0a(H$CZ=k{l|l#jM)fBe{^QQpBP=pS zT)+MXl@6*cv&ExB_z%EkvtWoo>7joZKp9NmcmQrJAeRqpu+{w6F-;z3R#e|7 zQ4-vsbOb(7Kj}rgSblV+wZV{cjUP3HVLXv7`6bM4i#|6er@(>i^=+o8R(cPMhtY5; zxuCDJ1O1x9*IGZd|K|Ia-}0LJ(~}UN#l-aVAK1x;Y&%xIo;ZW21Z(eAbi#1p1Vrug z?zv}zaKi4$tySM!;x^sZSVMgftT^)RZ0i-e$}gR2*Ekh-9ZZVBaRCCweYg{)p5J;r zHWm=LS&6#~dSVS5pgce<#YNB_UIFy{-fYD0(V+|>#=L=~X9%g*&$?6d&CqZ|z-t02 zg1AFo{(cbJq!d7FA;!T~Qu)Hf3SXr3?J9$(-}S2R^qzY7-aDw&$p;PGa_q*iHL&k+ z_BzcCZ*p|eb{Vbz7>}@AQe2FY>4yW(Qhq%JzfN^(kD%bx^z;!;%@m*2gTvCkqx*Mn zZ4Ef{^SujPkQ`y`g1)xwO3G!vS%t4#=Z0m~)@CsGkaaeH!8_VhIQn{V^9}A`dt^x2 zHz|k}8w(HdlfS-5lagyK8A1%7f?lQnJ2=r($C9@{&3SedkAk zJohs^$vF8{tXICEpUOt`;XhVO)3+*M-c(*+@X;4XGx73Vhs}4Vx~}dr~f+LyMR@g7&v7fJP@8^kiX~**BIzM&KOXBM2+~7tuWm}LjHpJ zV4P1v}zAbvy^VM0o9 zSeP(j@+vXfl(ZUBZHKojU++OWn<_Q1p0Ux<+#&v6>)njOuvP{n2)c1j;elfdA`vEh!dhD+jmkVHUZegekq+@i)@p)#fkvOO&)z{1`T4OK zL{^l;97>#P*}Lr^wPSujJz2_mzQ2kYQ`m)ZCmk zJdVbYc({o0@uh?01`LD%@^^fE2BIr`gP--m;->@*L%-UcYSCCmZuStoKJLnN`+4dS7SB&zL-%nK{^1KH&()yc4-d zjm)#IG07lvmoCo&0~x*XmKurN*`cjQZ4G+qbn)CGx%i3f#}2&GHeumGw%_-$q7H#o0pS+MpVhDa2S zGw!1MCG>ymzer0seSFtdyL<~2em{qXj^)b$CqcP$|0bQtW$70&>FK3Sg=tw%nyoqc zE&Fe1zI}aYf_HCj5I-;$|FM6J z_KY=b$OiB!#gIEXWnwsOQT-x@i$Tvka<5=ra8TpG^55w3__0fx}n&ColdOM0}w|0yKQNxGRg*{_9@)zyRZ^Ieg( zc04pWhw3&(bveAOgg(e>n3~E*(uD2q)?2sMDl6|R*mBt_q^pbe!;^%dt{Ze#C#{n1 z?6}Q%#jj;g@s2J{q2oQ=DM9spb`@8=e0}BZZ0&wcp0I2Da<1{LJX$58aF;y&B}m~e z1I@uLGzVWO`fn|eMehquP2T~IW&gf^TUmufiHwR6N+EmCLMTFJ5z1C%j|M5LR91G9kyY8dfrLb6NJxG9M6Sx= zrROKztq3+anAQ+fMMP8I78UQNz@Wa;q1p|gKj>0<_ETIB4ZcBXcwaIsYy<}Feft_) zTeZQOGP!cBJQq!Pk6Rjf212*P3x4$u8XhMjGVYvsG|Ue&>!SmS=PJ|XCB1&qTVM=X z3W4LMps;WRg9W|3j)f?(k;a-wMdA4P1znB^B7y~qian2YQ`hEQG4w;y{q)bD{@C6< ziY7ijB>Dh;jo?{r&?_+HzPnKDa&nKXs$SKFHBFg-(4@arUq1rLLY1|N>kwx-B2RGi zp*=JMNck~KyX)ge>v_XIYYGuEBYDm!uFyGs%$9(dKQ(awQ+ZdjpyPg)_V$w~^Z%ZX zSxAw1aBw|K?feCst{a*iBXRdR75u%CUNMv_?m&pGt(Rer|Z=;&C$g-(3#N65vWzVFaNjQ zL_)UA(acY#cm06#A+6J4O#Bm4%BBhhqA8a3jRIt*hT3k7W9_f4t&x;oFma*CSxec0 z@LD4H$$@`WNr^-nrshHGi)X5HAXbS7j4Ua%3lOk(^_SHXJarfVevuGxc5s*tzPGxC zz}&=Z96Lx;Xn1JmuUC%sZeSBZ%+)_I;Ppp|X?(NFV}dSHBPnBOI^hrR4s{>lX$$SM zQ|}ZruWmb*)%62AVl{oo)Q=xPbfPEf)+~exZ$Cc>R5c>BL}Ri}-E0OK_q@0`0j;w2 zlrT@-e(Vt3I`7|2-8iK{U$LY0o!G%Vz--jUU*+eM7xP|u$_nL(~6XnV# zVg;foq>fz@sBOFVCs|b|hvjx^>IfsN0~A1XceQTtN2e79U$ydJrmQ9>az)iuPn1*^ z3uaZ9EBr2@> z`Y*B_V36Tb2hei3G4y4TheUid;qLx4pHk7vt%6K=Cs|mYlT)sf6FDEBzW$AQkF#Xk zh-S)068euwK!7h%{;|!e=TYw9W24!&Z6;{!Uv6Lx_9CAmgk~eQO6o+vpaoj<{u~R4q9R^Ghqf$oAaeh z80yIWNNx*%G+7AYbc}$VnSn@Zy_s^U$VQUQ|Bza0>^A-=iR>&Q7|HEepS4=NA8|zj zNBINI%=L2yB7?uZ*O5R1Imfl!z57VZ=h~IaU<#l_+1a`W-22n(au(rWCd|4`W%A9~=CS1_w~+IJf#C)qob+bNM) zwurR@8XMiv8+6NrlD0O+p`C!6J})fve-CXv2(K18AqD~n7X|{GaEKt(0HK>8fk-21 ziE{H_fFg0@VFs3%&D*WYQQMRQ+i~@#0z%ra;4>CnD_2Fn;E==m0685v@)h0faKHjn z>oG$cu!RtHXMQ2VdT1ZHnEk7j=RkH9-9^R=GqSQ?VuKZMQT^YpdqsKnHAB*zDQ0YG z@A!$cg-i(n_e*+v7_K5pURSpbr2%#at%noM{N-cL18Tg?e{ZPCX`OVfk-vYYym^6- za6%KAS@34UgN%6LxiZ^x(O&j{|M)eYAW9r6Am53!XU~R8)UB+z31lRkEyY*|*pu^n zl6jg!c@#CRtTu7I3m>|gQ)V?X{kNxP>x(~(N9q>(=0nVURL77!0|m-wgI>@XYjsWQ zpPdx{{|^{SRXY70hno+ku{k&pN@%~QBTZ-z0ek!Ji4EWog__{!%nWJHZ*KgiHf(~^AhzvuyTG^!twU}RN z)zzPyn)vznVE@Q@`V_qYaymW#r2X%@U#<^0@s1V4J2W6CSp{4xa{LYGPD8`{uSUB+ zeL~aYWP?f^)uSNGyXc-@zuFU%k|0DT4t0C5A`c?CbpxU#@~~Ii>kEA`neNi1gGp^Y z-QD7%qV+F-Y~8#AdlCd5efu)p78shF{a+TQlL?*I(@ymjD#I;x*PwuHUPdcbR|xFWE$aTAyblM#R`BBP`K&d(#Hjl6UMg)iWWS*&%;YN~(R z(+?F83e#G=;cd@ zP7W>_UQhN@+Pv(#^2kZbXX~lc1R=m;Qq2BuH)F>6`qBC-lX8DDj}la6n~OhA6G)fr ztGV4^ePzYsc@h&d^H-0GHmHo{V_ZU%c&wvOn+orYZTUGhdZhrQiDiX{`VBR6hsTN; z*|%xJQChgmD@bQoYUt@5=HXF-^`3xfE?jLjHbroAA8L%>W+M(Vs)HQ5zP_tCov>Pc zqJZom`)B7;*pj`fWMWU{r^qd9N~$p>SBeO=~2J9MQxw5*t zy6S3wRNf$DJv>Svq)bgsB|WRD>1nEk2|W!Be4y@9ByS#>dBGYC!#Kza3Zn4l{+QI> zd^P%_-J`IeK*4wMDWZE-RVhl<&z|*#s}7no6FfQ)p#*W?H~1d&{W#S?E&@>Ifjir> zIn%5*3r||;g-Hmd2QOh)Vl_h$Nl#0AE)KRw6z)TbNW^$Z-TjH;$4=PX&)B~xF1~Bu z*~N`KbP}N8v9Iy)q>g0F#>f8jke0;R(XrSERnU2W7^fg^s%+n}L+J3~*~$j5!{0St zja{wiq}kdL34GKR3kr<-E5Cz-iA%G2Jgv5u8|f9e{C30|m|nnS!qHJ>Ub-s=*Cs;O>HLR+m zqkHOnO8Ripo9DU@SRgt>?>Y3zMomWj@@26TGD&F)j(@IvAjfEkhFG;t0D3 zLL-Y4w)g`lfry*oke5~4A9Lj5DL{pY%sDcp#C?y2`AkhLHbB}UB@bhZQBmf|)EtWD z&}DsM-PCnydZ*ut`s*hAzzytW?AJnQNpo|V;ea@mgSiFeT?=yGGLoh!|(HV}?M`!1-mR3w(Q&C}KSiOFL^?J7CsVc*gor%O;Z< z->1A2x2ihgyZ^4%kfwIQxD5ylW7ti$uoM2ijk^1O*Re5B6F68Y_wFrXqmcg;(n)!w z6K@&O8`#~6M{G3g>#WG0=H?y*R>8^&AG3zh-@BW6B_*d$X=#-wzqa5zVnBd5W%`*$ z2|!;65})E?0|cbh2a~bTW#wruPHLSziAl9a(}yb1m*dUFM@F_`p>5_eJ_UQTy6USP znZlgv$Xa$x>A3P4HMUlo>A81>T5&Y=^dx_Xkr69bSXazF%HZlo6@LC~2pNa4=-YPl zG#5^nuxj>weTw6l^kpHzM!CHR%e*FVV?h7RTi0J#Iy(%?Xu``&3JvtR+C(;94g3qZ zS`|zP^|NKZZVO$Uq>fW@Td1hjcMrAAxQl5i!hMM)0*R zf)o&Nj$Airure~fqM5=y);Y?TkK991a5=fTxufzAjM+FY9@mx-O$d};F&^f7gm|>t z+JB%yIQ`!Iia41~5j@^IvU)a_nfajWQ#(<%@7q6e^C&6?!K-maCZHz#ca$>4xVk!# z_M&#w+xGoL-!jSae&fa>o%3ThqdedwXPLwezEH#RbQ z5P7lACNuQqdqKw2Mk)4B9oDFPl~>Bx?af39ydM_WBjVl+e|FULah)(XTKll zlnr}@nOK-_AaM9F>Hpw8L(q3~AZRrhHCDj-GSWfu3EpcN2c`S*$D%WZE<+^SM~{f= zr_C4djPB$8+S1&I*e3I8?_zrD2%eze0@@@9m(gk=*c+KA*l)$h(vhTw;8a|f-{~di zn^3-!2P?nx-+BEmtv6x)vg=D^gPm#;<<4dE%zro?i!O8YaZ^rovKFB< zab1i`5T$h)L5}mvkG~&ZB2#Ya>(>n415-{Cb`h)DVXGImMGlf%D(bYwOS)Qu!LHBP zKK{1^6QmI`5k&Dyhh?HzPESWHsis{rEwKTADdp)?hh0%cVmS1kS=FVq+F9+9z2sw+ z`Ec%NcBS1nv02hm8Tbu431VGYUJgL$}&qNHob0Hct%ro&*UL#Pmo{ z3$o!-5EgzvI4BFGqjiFWbl}`V6!}PV18HOu@b?SqH_8~ri@WuA?2lD2G_ndvU6$ML zh$atf1n}Fhn7`_oofzXT{+O3BnypccU4Hje?Kuj`8oJt`&3_|1>XBrHTKP3%sP7xk zvqVq^XJ6!=O3)F~ZauV{BN|hUz+wJgtURr+FNL@t%=3~RyN$#q90y=|osOBQs@mW_ zhFY<%mF^BM3AC%2OJU9Fj1+C$-k`|UwO%J*U%AiFFWt9z%BdQedw1Tb{JlWA!fL@( zk2Y=*(gJ7gHIItV|6?z0!7l?lU|z3hQnM~9$mR`{yvUg=PN=c${Jm{jLaOttx z8+!@N$jN&A7`n`cuPfG{clioXBs0vOPcH>@5q5Mly}6Yl!5r-#a?Ozp4nYxf_~Xn> zF=&X<@wwFQE3T+m!F|RnH*E85eAvTkpuRbrX5^7nX>RyvT=E8*S)2)8qda$A9P!8j z50FyRd3N=F+E|S+JiUD{^N!Yodq((p#d+}cV5yzf z8lfM+WQuNBg_;Qf||Jo28n- z=eR{d4_|3sYwVei=rwe$+Dbc*m`?whyh@U3WsHBP~2o|4!iAb9KU zO8)EZe0R5AxR810HnwngcQ>ViE#=B(8kt~4Kz*QdiZ*d41AA}s2tR_#%hS^n4SPXe z9$Lgl!RSrN!DgZ2i92PP&U3Kwxnp8M&*-hP$&8T>u@6QpYnPS|XI0OiJbw^G&$y)N zUS3`jIu40~w2aJ|>N_{D786Rm%x$n6-?|li=FMbXqb+%?sj)FsnNJ=%1B`-q29_R- z!g)A)ym|H{9y`vi1CEZRQNu^P_xu`BJ`gDF10j+?u;u_rpNkM#f!x}0_Xz(!o3o_| z;vEOZJG2tFnN3AaO+n}_Mj6E3FY4A`c>g{sGLod{&t}Mk=@TiNaWBT-k_Gs9h2PWC z)L`J;oLj*GIk0684G^Spx1*zbAKV>w8VHIh*~b-jN93S9$Ht+fM?Ve>tl}x-*~k_8 zqE03Hk{B;>%AYS~6!(dF#08)gQp>%x!N7V6p8buAd(BU(u@QI^fg37ycMuE6!5ys@?v1dlgrRhi^kzgs_s&s{&uWpP;aV^HG1 zbH$Ubpg*>8D?B+^NoiN~D>5bd*cB_S*AK4J-dN(^*zsG6sJHvMopR+OdE3Y1 z+S(}MHVGwTV@YTf|2NF@!r>4lgjJ=8Ut}h5R~+HvQ;)GxWm0b3=N9(;uk%`wHqG5| zZ~x-~ufDJ81(bLco39?&xu)!~DrH=zvC|Q%rkOXayv!{V!uBe_8+;8LZr^-#fYrapZOGAW#c%w{t8Ye{6HFsflJ-M{DCIm)gEgbbE-gQ{^2#sWbGGd*0Rh zsis}&i-`BY1a$ikG;Sw8J zbjQ>D^Yd%94PV_p3gtqD45Z$==?+g2dq{hIs=VLB)(L2yAf3(4%PW&tI&)?T*=K07 z0ARyNUK_Yk$rBdFLr#L`Ft$CeUwYb~LN9{rz&~j-iQ;_!^d{|{VVPN3f8f-)s3)(k zrRKB!?ml1n($xUaDF}}kQIHN%BE>JX`kn+x0wFVaYwtzk+xyL_523zNvjh;lqfY${ zA|sDAqe2^0&m}p71%sVk!SmxjCL&BoP!Jgg)d=VTJ4-CmEAJsnd)d}}ZH7%=vki9!-;+i6RysDva!~{c^6rRG~HGhi;L(oHds7mao6*=G>fdB(1~p|7M0{!w|> z-<3`5&x^u0d{}+YOo}s94~1>8TkVm&$N%ZkttJ#W@A!rOqU?lIW9WJPWt^Gj=8qVR zd~+FG-_j*0u|0b-e~^Du!+(A%uv(qZ4u#Q`0N^1p@|{!l zKrAUj9+N+!e0N6i3^66>K<}^wjit*0UPay3J-Gv_i7G4lMw}bP@${k~Ti|V2U0JCp zFMkd3dQUed&>p!?T?p9$^gf%LKb`$g7*|cvtgMK(v5SkR71N#{Zj5wP%D#S!CVz;K z3n_?U61JjEl0-*a|79TB5n&v0Y^KM;Q{vR>FOe?2InT~9K;8# zbaxe~VuHQX=kmMuqfb$#6V*b36KnVSzA_UZy1ReQHSjc-!GQM}20<(UfZYIAfqsIa zngKs+98FIs0jI*0cHX+QnJePEczElW2*?m*{n&sO6EQ|cuddE&%G9K^bh|!JtjWm{ z#6T+56Zc$G8TgKL*3x+#QRl-3l~Z&^^9jm&9U&sQhJ(Aled=zU?1;7E7ZQ?1xrPq7 z@HcJwv4#@Q*v6RkYc{SKEw=MkRRx;B$FyJkbSYBbCfC|?Rv3toZ5jjoLz)?3Ri_WI6?WFd$7=GiroYCvDGXO@vMp`dKAg2o z5U%?O;%&suA-}pCe`PA&zKA|jF!(hA^=-(rCCcsFnOIwqQ0j4oq!Y$Pxw$gQ{nL#h ztP3D!Ff~2ZkTFhwP=*U&bZ~MPFV0w~P6Rn)>neX`dpY*{@v( z4^nSsn~Mzl$@vEQW=s$ejeclk)TMp)o_*oM8ZW^H!Cr@#)4fLn?P@ZFz90*MdUBNZ z3u<6?PR@YC(rv?!u1-~jcK%FAP~~NY8)7-u^+_bySeP~xPPY16ohmIa3pr+Vd-(Iu zxY$i`k=V8gE(~IaD~q%-TdlW2#)n|;22jiDyWjq{OJ*p<&YW$7udRR9%=~mH|>MLy$6`!6RIn$O?(9b+4Q~f0q zp*s-MMq6%I4opPk5tgKu_qyHle`1R-{BDKs?3I+$ZLMb7Q3mrdbNCAc{uOYJBAFP@ zFCcXq!vW`A6&xZo^v>-&@+@QiUDb`e*&Wz#n(Q+IO9|IvaO9? z-d~8F12dm`4gRzcO3KO&n0Ed4?E#EAot?h*a0d2@&3_wMpDW>)AqhHUQUvD|C!ysn zM;3ht{7GbF4PFQo3JA56xilBV@8t5sN2BM-F(cOE8N&L~yKIR8Jw!GFi(l2seqLc- z`)7FGaXO7-*9m=N#9<^zRaHx@#_r)K^LNwrH}rp|Ja;1 zosNs{OjMfw7qcRy1|EE;{CtvGq7J;S`A-Fx zEoAs=a>D`N#yc;iRw@aGhcBIuS<^h-Hg&fO)!pOcv6V}`Tf<(FRG}D>1&mrO)rW!r zv3dfSMkNeu_?)Q3?m?_r9kAAe#MhBXzfFhS-S6IgNO+$nclMMiRftkK!u|(Oo<-RT z)(IC1k7Q7UuU<+GK84dl2hkl3K3oA@Cy#?4#pE}Ipd)8>Js&+uWJ|bjV7kG>SM#D) z8QKygIogaQ-@8YOvcNk*X@xX6h!F;=uDYT#PSw`T_)lekfm6@*jbCycYle&_`c>9_ z1!{nG0g%XjuS-z__NTrZ9``GpfD?dC~xYyqP0`>)s?%@t>iI@p#0$ zsnV)$K0d?OZ|LxxrYROzWwM?pkP%8Q{6-Q21Y!N>ti#}U?|0&EmF8Afj2pVv8wWo8 zq$29AgQFz}f0Teo1Da!E5|7z0+WU6{zeKYHn&hUd#QvY*maRW!qEXIbtsvg-#J;-- z?z6dlf=~jt4LJqCzKx};6JNf3K_`wg`uyhWvEN9w>aX#;s;T>Y{Tz12mv7%5#>Y2X zi(;J2kxShIaFx5t`K-{dtgfChG7=H}=o&t3w)M1;eS#QX$LHgw-%S>xtQ4a&Gi@v_hk2db!cIRst3bQWHm8^0 zM%BtAFd0||dd?sb@(?$GQ$RUpx6;wK3&q<%%@gD^H6QtskPsj#_@HefMb6`KW-c#L zkIXr!ZY7tgE@~ZTZwc@jHq_U&`aar9g>unT(;`o79icQxdFfv~Sf1EmGE`V5dY6XC z`m=56+}%Urf>>GT@+b&!J0bE+YB6 z1`LNaK*pklMe>asuhD8ah$m)djSy;S8t) zfjc?f4kq<1@SJO|8a&=Cbm}@fFx0INhRqCBkxWwU<3V;)7t4mG|LnC7P3pzZ!H9rQ z(yv$^aaVSJ#(GDs)W=^$6W&+9zBujz*DQ)T+>r1fwN~N~<&03}}wN!6kWq%(2rl;J$CWIbvO`VDJ zV+vrgHjK8YkAgCvj35LE`!}o8;TdTR8RBMbajVpMdte=5x`wfF*v0}WB+6&u2~&uu zg`VS655|35+-Bxq6Ym-Hes;Qe(FZrqFVkzaH&Heqv;zWu^q0UdW804BJ+-}DfrW|D z0?TziH=SL@EuO}5neWq@-2OfLIv6D|03ei&j6MO7M_|DxkFh<->cQm#6QBRdxwY7g ztCT-fK$(Gwre@@N^y}M-!vLjfzaHaSzxl75@#VcMRB&<|=@Z(!uC35~_(XMpp84yS z88aeyfSrbppl>Z~Z=O)OET%tjQJj^Tk|??&O`=6N)~?4eg*61XoIW?Sg=y8iG5ZB2 z!%vh55cas>BbxTB!0h2Ak)rD4Nvl3DVaaHn141ta&DxAHohO8XTJ5-6_HUcBt6SX5 zKbz4Cmj2s|)(p1B=nyhjJL_G!7=$Av1XGox&-eZe82h2EM!RWhx_e~FKwFbE`wCd~ zQzt>-QyG8!>Yj)H8xPG6_n`+C;%dk5CLQEwK-35@HBS9x|13@i=_G{>*PEB(B zZRf}O0;Q$XP!Ii!{LTrbK*oR4`<25rc^z_ca*+6&Z7QPau0iZ9?otdd9Wu!BHxb4< zd{|O$GKlXHP`j3MFT?62fc6w(fO@@u3&S4hO)w0A18FR0&V>FEB2DcnFZT|!ng(tT zX$wA6-gK%6d6$U0pUPj0Pf|Ii;IiT`;~8P96uFdV*~K|cUP&FdQAIss9ONLVt87@u)iq8 z5$VD&ODm*#>qHBmJk&XWS(#!A3je^SE(RMj&C&UVgJr`Z(2*=lZGST424#Hd3HwMt zM)O04#;Us8URZoOqmPhS*Jk@Y4a&G4`<04@R*lZd`A|vuGJi9HoNo&aWxNchMg%z( zmB69UG$OQ&pZKh$m6HF=M?^vJ{!;PdTICbBbXt~!<+eXqN#CTuc1MK2Kp_Sv=-CMwZuq0c` zUT?yrMo0JV{rho`3Iqaxk#&x%y_yR`7Es3R*3tZ5y?!Bg zYoFX#yQrFJY}ek>%!akNhoKDI3_}GI^sCSN8y|bs6@|y#B6#A3>lC>XbW)%q^J|rp zWnu^;X;0w4h8qzhV~|Gx3Feoe$e^1*CLSgj85k(@GIze_xJDBJrIlH0OABPEE$`p2 z&yA`=oC5JWybhSs1Et7j^Xh`Y&Dy*)7GD&i7!*SqiUD(4>)X*+VWe4`A3iVifRWhs1 z%HDsNqf&yEoh$o$KQ%-M*4w@6++igzOa$L@TXx;$Caory^2d`-l7WbBx_eUq-Dow& z!FEnw@>kpEj>ra_oQ^HOa|#L&K#jP6yqc|BR58ceKyCTeK$f$jW1@-E1bw^G0c2BM znX%uV)MkmI1e_YWK|@32<`&hCln<|iBgCzbBJl$3Swt4& z2ZtB1x`7P>0=RLr%ljxt@s)|`GY=Oc7rmowJ)}a7UAv~f<4KMrtj#5@wzI6Fl*3bC zICF;Xkz(ddVf%7e$Mty!6rdL`mMpKWwsYuW5Go{EnmIYE?^6c6W@{e5m^f?+arhT( zt>j}L+Z%0<;syqgGEfk&_3Y^$&%dU2>gI7DcR6x_ZjbtMn-2vVKS4l;!@lv?5w zK}h$fb=kG}UZe#$C801z@3*b3O*D|4bU~F%cSiLhFoMYYRUbZJzaeFpl7zN2Sx&x(mX2lED^XdEqE+G9%D&Fc!OgR>9f8v0L5 zZyyDk#3M*Mc(pO+T&y$q*fic;*q<*whk7_EQ$qV%^i%wt#1>=c&pEq!)1?Xc=giD`Ro_7!Y19O$K2 zF>i^Ln_Dll6W(K3u*-VWBlbwoh zEW-rY8JszzZiEiphu9K0&p=8*r-*F;fo60Qz!Ok+l`GdMJF@)=`NmVR6) z_}k#3I9t)@LU~QF)TWye{#<9B-fLZ)ThNvO3O1c$PN9j zZ8Yey&KAd+Uo8muc?mOZtgJdFJd~BUi;IUeIOynMrrphsJCk{dY#}}`7>z!8*Z*kI ziU#+|ADh(gM0|0Rg6S3kjra}L6{@U_e-~o#O{nhBrpQgR{kYcP`fqDKxD14TW$ z0DQ{5pQoCbIXPitp(J?JLQXSSvF^J6;QB^b{Uza|KDT8lw$g(veb9K}1&=D+zX7}=yD=cn9{{Y8i zaxjr3gb%&J(=3Sx8W0eWGOq)Wl;Gjz6}N4U0fw=aTcJ8X|NZdrKdkxp9dS6x zAOkCgfER#JuuiDDc6}rB;~af6iU#HULCN%r-NH=)tTi?Y{=|VDt4aR&t((+`-mu;V zp1ri2{Q<}At)Gupm@Q1u1AM1tWHdfK#_c)Bw|_svDh$=K|9&w(6MV*uI<+@&KY>|X z7>-c|76DDdo+$6ZlA-QylB}n)(qBgK3bRbI3krl^zlMD?k&X!11+Bi{(ztWR@NTr? zWJJQcuL-%5?*&%k%Aj5ENGP9*cIoLN{oIXfH}^2u!=jJ6%X^OtfS3K!ySF55$kdTC zd(lTWG=#X)Zv_zuSOppnn~<>C-;zgEjr?NfmixYbl%(RWAQ1f>FY};d-^~%4J8wL& z_U7js2SQ21qfAZV+1w_PtHwlmQTqegI?32QGXQT$-J~^3{r=`?I4m!Fc+k<)3mrPN z6RLAG^`ODgg$Mbz15+J=rwZSL3*Gz>aFYcDQJEt20{RUATp;D}2k5f^sGuf*`H^5_ zXK%-VzjrgV)r&WW1A_NainH4WEMIx+ch!1XE9Tzn)KTFBgp#RX>h^J4G9rF%x{ZsQ zQF!N8Zd(6^vf}<{0n`n)^>GMhE%N2;?!7EObolMt5GJa((Nq@lUp)36NniDB4k;2K zo<4gq2ws029%Bm&(kq`2skz`|%mbJbv~2O0$2wokIM z$OwVF;HLs(Hs_#0M1U#e;;8k|mMNL<2~E6ygYte{Z<7WB#S)Zwh!BFmJ^1(J6WOm{ zMT7Pl8aKph)?X!acHd3}pOFp7wOl19`ert#EOyXX%kTK^8DS=gD497`=l5ICNZr4` z$Rlkttu*Xa`)O;a-kC$;YV4xc#7OVIWN3uZ5#Tj~?#S=xjP5Id=y3G!ltbn-q zi|iZAJD>j}ebT>g{N=l_%VzBSa1AVQQ@Cj<^@SV}BFW4AF$kpc*0S zp=<-OmrN$45pTTk?AlEvkvm^Ss%ckSo38GuuPuwTH50Y@)irVVOW#Ubqm{&C@$dQp zm?sxrd{@`h_(zXSmVWd@lQ30>p_x@>HIF+Tpmsg3rRC6nFEMdyW(M=pvrs`^beJ=( zyABtREA3T+=)x-C>xoYEezhlY~3?+$SW)>>LzjBIsZ z02fZ?6poVD5_U5kSIT~+&F#H|jF_9^i`d;Nd+Y51V)89nnox1YS<%cgjZi}rXq{aIYZ2oW_b>(*~j z%E3|wQJmI^6R@;35T}d!$q(id#2yApYLAZOn#+?6M15aSz1MV{Ywud_O?LUa+6E>JVa08KGr&P?WngHB)~LA|IV@;k`mQ};@H4hZ zFDPgq7?9!Tw=XLB7}Y@W{yvMll(~*UOL%kqhx;rzL4g1p4)0NhI1&UOghG^r99JFG zhM1WKAY=WPBp2lm1#=4vfP}&)4{)g)wc0}S~90Z*6x5r%1b7k4Rhk{m0s=Vgo1-woyf0@hQbkIHq1k?aZ zLH-hfpyw#LsJc|qKEU65)5k>HAy1p4G?SZr_J>Mm4K?anB ziOQp7^v9UT$S!mf*48wddc^O`?fhJfnN2t=r7QvGSvR>jY=moOUCY@$K7n6Sx1Uc~ z*u6gGcTTX$UpwfWp7=n4e*LXG*CU*OHG!$r(xQ1dS%~SyNHxEj%LE~)jlF#t2+Q#3 z&-{*#QJvofH4bDyFdja6LL@&Eb7GN_Us%(-kMpKRrUiVY2>dfKBZKK3pb^M0NZ#(x z{svh=5q$aK{;aw~mP)V3I-)uM>=XJ6yd3C3W3H%wHuxY^V?aC!DBYhP5;QP8jre$c zJqOHkGp@Ro~k;%6-tRk5WO&xAd z#AvV{UQd2&_JSpBlKjz~z}dum>1k=Ako}|>gDYp*xfA_Uqryp_{O^u06vdBXa?oJh zuTOcsWkSOnmN<)%9V=#_JvRD+R>YHAM8fy%)4W6T{wus93%I?}JbfQfhMDY&5J-UH z@sCguU}6f*d?n3%lcS3ZqIea#!u;&y-TNjJJ8(gJ>s{FN>G^h*iwd)LsHisdOQ&$D zz~;4!{fDesB(${bpR{n@bi>8UCAzD?%fRFGvObAmy%&d_1JEwQa_EJ()thR^)ZEE9ZD`| zE53dE<^kS7;D+|SO-zhq5K|^$2xs7w={B(FseFy95o&O3{TsXdALV2}14D3XuLn^}MGg|_N9(EC=H|Kje~iz|emXw1DFi=Wo2?t&b{ zRj>Cne zo0FfvE^oSuDU~>{-o4A6IfQ&u%$tNUM@!qezpu>I3ej8;zDN}@dd*U8zpE!JwTFfg zDH59u`}TYLwc40&JZoQ2H)17-k%!vb{s475#rVZ6r+tJG>daNro#piJ*~|{IWxEBr zd|eBJy{Q(BjiR<}l&i0n+KMKu*yP4fpHT_ql6v%yC1Y@gEU8V@?q_y+%?p-aBV>e@ zE*V2u$HKKn@~8Q>6h$`$V2b|EK`>AYwzSnMK>zms-8)p1JGr6@F#|4 z@OohrL+O9*o2k(4fXYVz?X|743$J~)jthe%!K%cTc4TwR_NWhh8pszTt?~bvU2vgD z-ctzN1%0wNyydW{0B;^;+Nixyg+IbkTg0j%2aPk4cDLVUEn4Hi9hLNT`A$E)0%hfp z5Md+JM|zPufGZT{ON!rQp?+KV`?o3327nq&@pxL7?A{)}N~_E?ewgP2VQFL}AtttC zTIf2D7IovtbZVi$U#hM?J;6vc1I_!l13tt14>X@H8C7qbPINkMWY{7 zcOMk`qD$#73^;u2mg|mnyB-!&=7sYd+_Hy(m%m{G0ZtXt0EvZ#>ngV{(F9wCIHb`4 zPT2Nmy(uo4ijq9_&(xexE_cUab6$F~7hQ6NskEP~8EO?oKPBo_TNu$2`UYfvUcut5 zEUf7T40H~JwS$8c3>_$%E=O}+yECbzj)@vPirnRU=x%;AwA`Y{)sSB}eb&9g&6=AC zzJ2hK0<}oU!<&5XQ>e|HVb4UA`vx%Gv^1V$J#&op&CL!tbZ`?<5cvM3mOJo$gM>oz zu_>c^p5;_YW25OOiRe>eh>zRB%#2@Us}=p#)NWkAt_mk@k2jopb}%)dEx#KZt9|mM z0{o=b);vq}n8_gx`4N!h(t>hJQ`43Sj~v6P!*8`97DCC4RPJYmn~Q^R1Jq&c2V~zt znJ!BJ5s+2$u`h~PcyAD07cU5?K z-$8CQy&S!r^2o6N7wDu zA-@p&L_w=Sw{JBdt1-yb00szl^lsaE z3wTP@M-DskA^pMpPvfTh*>6D9a66kB8p5@WJ|B@{$eAQkjt}5-o}c%CIjiRfhd6s% zL&IKC(V~))dn)M&SA*i%4j>YCkF3WyOh6I9^`u5F^ zX*=1g;d;AIw)R7HK_gvVpgoen3gcTB78hTirNE#_RAcQufHqH+UlbpJ4hq~fO}Il& zXY7e|mhMgjzhI8b&)M0{@16VaK9swA25mehyVCDz_uVlDL*=D=%o1Be?d_I2Z#igc zYVKj^^-6bzi$q~TRTOVH~E99gP|*_Lx_s}{yfhPxA$St zKx8nhtH0qs_-kWj``)^0R#<{gV6zn4{1Zj85WpyeclV`+LJb27J^4R_2j1zx;NX~B zIm`x-5}(fLuB?>#8T(uwg%g6&anbQ-p1->7-_+@Xgtp=BEOdmkar6aPzW{1b;>6RM;ThVgZ9+l3^4V~37I3A#?a#5Oa;&3Rr zT$IC>KuPR5^w;3hH;uY9Ag)NCIM-FmdGkaTuBXo7VFk>nhsqcRX zm}Gm$P6hk^_BwS3ieX%wIGr}R{p@!$84ZMv`rS)EM_kk=Bcq~HE7oVJs%lXB8D-I9 zQmjx_#mZW&u100Z1kR+2_wjDF_}zB3+|=7cJ1vaWhmxncXZ^k=zj`k{nX`rgGhb7r zZZFuUXWd-&9iLR@iMe|hK~GZU+=E`F%?}F;ski$p_%p3Bz7(jKTu>&zbV)sagIHZu z4OVn9zn*Zj=;lZZ6X8;IZjMQyEW+o>leEf2wSBZHzWes=q1bY*MNOTGeEs(!D@)6N zAou~elSq2Nr69Z>Ju<&6R0i@5ARUw%FJ9bluokD4u&VZUhwd>yN%D)=j(6|Aco-NA zmq%BI7D;i3xZ5|sqs`4z+SgFRU+GDa9Mx^Oyp~@34^cC?0)~f&<%XNOM?mudr$qo! z+w_2X$lfEXrGB;s?nd&Ox@Rv1NA=9ZWE$PmyAb=jl<215W;hN*p0U=;bg= z*Z;~D@zf6L4>_m#5poG#I&=ou)Wt{WgHPmPNH57)*INklKCq^}e33?uN4&MhJPYz) zuZYdc%R@CoagbHp*n0i7hOw6Owt_0ds*3g$`J|KCutI zib^eTLV+r^BNYxd7nfgZsvA30sgLq;ObdQ|C-lwR8#97Pamz2;>7$zfE+QYTT*1pb z*5FxljLEr+$4(Vq|MO=M+nh8>Oy9s4ruNj1vXA=v@A#T~oj-W+_=(%%Nt~RVuoj+C z4EhwF)Aw>n^X013Rrqwej=iKnZ41k1$7I}ta6~3J;`6TC!M5@VUb^oev*;;qnTnd# zT!_je;@x`3Of`C-`o_cyoXtXe_bUJV_?gSGEn>GA9MpmC<1wZO8ncFCXB7hc6N=; zY+d+;#$RE$_U5#Ntvy0dOZuI@|F9{Q*Qp9o(ePk)8CKE}C*GS8YZZ;zE$wfs`CTQC_kUYW@*#MUho1wp&{l@}-oCX% zPq|r6=7A-Jhfmw-L3i^!#yi^BT>ko+_R36XCcpa6nE2o<`n`NNE&R^e{d2D_B2IF| zBvTO|kH$;UK9p~;=8>dkoZT@WGBl9$nx&4KprzG1kx|LMZ5s=7i_S|6wR78n{if+z z2?&k_93i*Ltkwa*fzQ0DiJCARNVkwRJi~LCQ3-u3Qkayp&c79o;JV|L`?OHiM4&zS zz_I!iilR#o!sRdSd3U1NPX2Awt`r%6rf4pr{{F?HBnne$!uq-Udl~h(1>?(-+ z^a+MXaYb?roe0L13I>E4UWk55<4W`-O#d9O3{* zA~+GG?2o~K^4Zprpbv;}>PVE(Ha#&v`uNDhDeec`&)**T>sc{WKbb9(z+a*=y#6)v z^~Vy>&WGo9HaQ~%7Ff76lJbO{b!G;+b{2X^h; zNq!5B=Iz_7>u)y?P*_I9&)$YO6{iSRS<`3tx+7jU(obcQwS09$e1T$X^VEm*S8-FC zu1tf$SKZ1UF{U~9#Zu?D-}lm{64s(3%A3~)*ya2N+PDAj!89q&Tu~{MnV1e!rMBk;5M@(U#@@2prq)bNXw7Nx?^oC z+~`~QO!)#;yiNp{(AP|NX+qRf=ArPWlz<@ZT1AYl+sR~;T7Mrl69k{d-M zfFiUyUneJf#W5YCx7bFLXsx=M_swngKvrmtQ2vBR!o97I&Ta0=i{Ikexwl4=cuWl9 zODx={pukoCg+YetO$>t!#|HV>*BfhsOq(xXGj|U6ojO)k{^ZPQGqcCs{)n-FZ30q)em6od-vwxUwBM-TK|FPCIr;?=61MCe%Kv=yA-nCyNBHmR!iG2>G zHsH0ZW+?^{@tX|TpeJ>778e%8>aJu)Kdh|HP!X&;#KT{gQH$h^ALuEqR^Pe6jWU@U z*D{(L&lB%X-D6L^RpjZMJ--xi5n**5*7cGNwx!-wy*)jF z$V5k)J_)e8e3|U~j~|hkkM;KLmU?iwH8nLbWMi(-y9p2Q_+i^Pc5dJ9KH5md9L>Kx z!tlA`Q{N8}dCmr9HNRspbqJn*g14J3SKG-fCLsYVTH2sn#(6*-mRvk^U=L59=KPr% zy!!uWdJlN4`}TeOLRKjmi3UP;vL!QFSs^QxWM)NK zkxj<$xW3Q--|Knq@v82w@&26e^E}RD)ZZZo+^jvlZWJ%T#bpZ$GJK~g6^}ph@k7mZ+ComC2&_X9bxzl)rqrB?9uJT3akUcA>5HEsux^qW5l_2 zDB{D#c&z&l*fzeCBxU)YmZB>I>BNXp40CHRj# za3$~xsh%ZU+N<2maCcX*M)06H-35m1;m&(&Yu>uN%)ueVco;%o z_B!X}rBxocwSAOy;Q%WnwR_p$7WgH^yIn7N{&%s%t!&)Z;fCa{M`~Pq#Dn)2J^A;+ zP4|*gx>M2wDj?=8i3t#Y4GApn~w^o2G4)NsvaxMQuMAolcwi8OG3}Uh6Z@1WGmeP z;>oqVD=9I2{+TRwkZQ&jVy$tYfKqLyACeWZ`QS%!`afiew1T;pjCyhXPv!TwbD{Ns z&0Kq5sc74NFRir!s)l7WG15_6i|YX%NoHnS2Zz9{wGeg6t@Xe`fO43@@JBiR*Q?GE z4Ph9!!6!F6yX&)k60G+#ggF0;PwebTWqp+Pm5b}RX73Cr`UTjW0iD5K65IrgKdsdi zs1x`x^zST$vaL@=5bV?{DROD>{Q&gI4ZkQSs{27-75J!48frx`f{!`G;>8!PU8j}rIO5*C@PUT>*=SF|SC9=O{TV{zL$ zRae+oaASq+0`=U&$XM)QMsHq0dO}(324{!FiTqa@KSjj^_(?)Hql>`)ga$xtFy-w- z7W<;S#}N}Xrj_+|0}03hJ;s!|W&`=Kaz;Y|O3Si(t9Nwl*Ylbe zE*Np=zv363JDQz##? zUxm9EB43#lDEO}STp4_NMd$bv%HNJY=f5*wYZ#Q0q4juaB7E>dmPFnt&j+cAwvwwV zY|$S{!v;)F3n{zqBRt)`=Q)`oSaXh{yDe&NQ4;<;9riRYpRE1e+xEK^6olttSMKHH z?cPm3=`_4}FL<6*okj2A!Rl`(Q@0GkRN7=kSxHv)A$_-u?uCZA!!NCb9K~mT zMEzk)+~*~Fx0QXM^1Yk~qasc=Y)LV|WB8z&^-nKPzo@jp=I>kT`o~>bk|e0COd_fQ ztnK5a%z&ZMQ%HG?en78-$ta>?>xI)TdUk@Ncpug){eo>O78YA*o8T*X14ZRa3pWMa zwI&ewx4vLUPE9>G$&lnAla$A+kRbVE>OnuCO;AJcnQ3m)@9*8smpLBnrDFc`=POUb zp+o6OHv{+p*WgFoRNhqKp=A)W?T@%Q zLzhojPQZgp64W3Dh9^ds&d|Vp5?mqhBp)tNNBoh@sHo^`Xi!6Y8zukl6Md|fyj6eg zzvEy!yv^H`JtE!X7gul9WoXPPa^|jTb@CC79~lKSh2*87pYC!9BvW%%=@_^_rg|DK zo<|)U@a`o0-AJLXu1A_>zIXnAE_JZb zH>W5IxPSDQ$hi=#WdN&}yY$DUq*$7o5{W|~08dRdjs2ZSOnO~p7?=6gcv;wTd~6jg zN6U^NP=<$zEzhS9GEWnz47V58HurS6g}Kx;z73^ezjr?VjTp;AF9Xm!{eJ}gLNe06 zUzVxtRK%zS^mUbo;@Qt+o?4w&@$wrM7dH>nUK?DMNYHX^cGUIuUW2E8sILG#_~!s| z;k$80G?v2V#7SH`JB%(4b&KAzqX(Wth*Dh6ggSD*1(?5uxw-2np0A&O9)+W~h9*hQ zm_%|^WdA6ea+)qW)rEMByYt_R4GaWb|uH%Nyd*C)~dFP3v!boJXF zv^#Fc*O*sv8r25dJ-99qFd|~%75Tqp;>e?)M#fH%4wKeXddRbBB?$Q`XVDXE9m3T^ z=5iLg z{_w|TwYm|jfi>0Esy?$fmnikJ0{(GNv>cSjg=1^W-g(4#hn1jAQ683fCm_Xf`rAiF zkq&4Vb-&jU4htvn3aHiPgO34@7clw-)?Y7z1F=-8Pqco{~p^;pcM!!xF%Pn6ZOL7 zv2f0)JMmswt$B{0sP-+>H{4X)f4b*w$|K%`1Zl%>kQAvmXHYYRUkAVix^8Ax7AeSH z%s5?uYqhnsY8n>;s35TV_U$^*=wIohLJc31a(K__=7t^5j9os03j;&BW3|m#VEa7mA0t9O();am{$)D3B{{s9POzsCQiq`5km(dVkINfyK_Y$ z=F{UrBbGjG^{}>Gv?76p@J6AoeI(+OAA@r32G6snqI>yI6w04I+9{e-^p36e z^1m-pC4Ta*=4ZXQ2=qxOdaB8?BhQqRomy7cB#+8YPZtf`-b53BP3OqR@$mMqXUW){ za&wA-V1TANkZ-J*Y275gU()0(nXDYh2<(KY``yM5sqS`vKbvPLFJ^7S#~b>l#!vo| zJY~>dhL?T^5(jwqFpbVkPRY0eN&`D5RzkvKPC@c8Qfbi7<7;kri8{Iyyh53|5km3i zt>?*P@}Lv_?Ij5UcXb+)*K-Q(25%gO%Z**h1St%XU|W0&W)tA6BssMDDTJ=vPvO77 zF3~>`AFRyrpY%1NY3b_Yua|7c_f6;IpWsQlXgC`iZJ}q0s_#G1!qS;7zkU!zdgzvM zEhTD#hL__q8a?W)Dt`cXEW@KjO4xN>%hcC%~||a?rE`g z8p0!@F9+m&TJ7h{2fkEj|d+Bh}aVgWBvZO1{!trJ6J9wB3J}fu7?*0XceO_JN1ny>o_e$I=NyyIX@~Uf|Q6Udgrl0qwqCGn&|L&mX zMbHgG!$MlwNBDev2)rk5yWSefeag$olzQ!8f%(^W>6y9^r=@dVqaHci{ES(iKJida zj-oF6PObw8Q(Rmx92B~`F3w1JQwb%js8us{-(uiY8+m;Zrs)N~)^TzVBS_fgS-%%? zsZ}M&`n*CiL2x}46_q*+0zBUmdq<#Z*-CGAm|;jwc=>TInHL5)KZlCYD38^7Bv5rEz}P%^78%YSuwRJ8t7d92xdP|lyk3JrSU zY95NGygxj5CYXaGZ#i#F@->lJEqWikJX&708`7=Ljt&;2m`a&D9ENt6SiZn}0Q&H( zc^pc)(!c(v;7fs^Sd*~Hu8VK3s9Ne-Yg*>V$_HSmX8b>>+DWXhD+qzf*F8OvH8G9C z4C4W)zRpuE2JBAfiNN^`&_O^j5gNq*K$6>cKLd=Hx%s98OiW)=6cl7-tDLy__#iAM zYSREfY`-2Q9by8=iZZHZZ+Gt?)VJtu& zWXQqz2ue&4pYf?0ta;Zr>+926-X9I9`TgUl=qq0D`@cb#UHUWFk4-VU zh|MPr?h7VgFB$MJmq#4N4l@aAbi44>OmagB0|P`okk;T21=a-rbC|Bv4G;Or;%e8W5n~#L`Dkk1hhV<_lMM-^N!4c^JHPF*m@?+0f8XY40Ut<5e`ds5+4IW zC}wm}(6A1`B?t)r*iRAW!{wiQ4pJT3qKX$vRXeXc@KE*;40$LxI$5P_A5eA@?rYqFIRq)$2t6M*)Fr-r8n-l!jU&Bdt;^tA4nJCNIb$g&q9giM$5 zgg3or&KYEe(Z0+l4ro~#PVopa1TT93JcoIio6SRtkn|Vcx^om ztpC=xfBt1E4Fev$)wQZ$e;W`>zKncT$Hu0C|4#L38D=_dy6WczXP+kvTUr>`yv$+n zkNUUQY3t+{y)!r%SsqN`yb09#0jD0@TO26n6ZxDczlsa}m zOXVnTrlfQSpBOF+JG;#EbQ?4;6JP%O2PhZ%26bJ&2lNl$(PryDwsk+W_U%X{Kx%wt zXh8uof{6x0Y_P4cf6CxKMJqz)tR594*Id0Xc#LP0* zC2hU<;~&e7xeqha?(U2~rd^I03D&39_lye4e*AU0)8xe!_qk&~f8KLwZW8u6vWrxg z_xD#?i^|)oXRqwWMV}-G`Und4S4VN7_@sn{#u0Z&%M$N2lHMN1UI_)~VPqs6_bwP2 z0Ytfh{Q9n3h9D5^z*vUjDyyQx3~{kdO;-T%0okr}AGs4vzqt9iUQYgbV0KLG$$%A6 z-nl`rUm(n0KfaUmI;7PeY+guo36GxZCp>amp zp|>pQYnylTZ8rx)wtOg{zq@vIuG(yUh9DW0?ftv-*|SSnF8+%8T46aEm~dXsF@WkW z={;tV+Wjt9>Y88wIk0KEtz9ntl4K`i|KWM1&7>dsH3wRU^1%Zmx)h(O=%eL{auOov zAu!4L|Dlg}ba1!{oub&N!wFrX(k@ZD+ z`OYTYcJD1iOz9}xFiy|Qy8>pj{F89xi$TJV@vY54YbG=I5?pKao8#gi>Bh!273Te! z^~^6Q+C~TE@?;Xu8|BvTDbQ)Ad0f6chNt{bZ^R51TpQ-eWTzh(nqK8OwH>cQTP~!0fA^Yz`?NenAIEA{v zKMe;3g%3+Lf?}GYKD{6lJaS*-=i1wKAuS+S38#KmcA`$Bb98gt!ORI39+|Kezhl$q zm{-%Ini}h*w?S3|BEzVxL9r=*>i+~r?Yigw)}?L-_g%f9{@hGx&d6qgt>*TDJZ zmtI)a|B9M-VG{?D728qBGaw!XR|%A&g@r;taW-KIi4%N6MP}s&wtjCGrQ;tyXo}pu zAVaYPnBF*1wK?D3;(^aCcJaqN`^|CAWR9zCCb5cx%8LHV($Uq|IaGVPH|Es>wtawR z78~de&$r_LfI9c$#O%c5pA~k$Bsuw^cCvcvV#j866ZrGMVsF&X=xA9VSZeas zxqsmxA59}B>yxdDh!Q}B@bvJ2_ay9|VC*mhxjg(FR(1aY+#=H4y?(=Y+v6*dRg1aY z{_^F;_{zY!)jY5+rB9t|`EAovL3u2QER8P7k{tWNFUTcLN-<;K9}^o(y^9o!zzLxc zSZQN_!o$r???R4?N7VDD`tExi`>9Z-+~B}`_;9m*N!Y+;aBk4jAdl}Ig=>Hxl=5I8 zJv3 zi~G}E*pKEV)=Y|flMdC$9$h@<-ecFY7$TYyLwN;XzYS0WW0;$JVlkpOU^p20!fz_7 zs%!>3yC*6Gb+&cO|ILn%lg?)ImpNyw@k(5tB0KNmG7BZioZiL~ibqx8&JVEYR36jK zISzFZUQz!FdL&mrLbQNu){0a@3tctdWsps2;fFuF+hb_4VCq4uS{US=%qgR22JY2eNaTocNPA_SJxg~4|=EA#l% zB9-YuYpk`#9g_GifD2HX0#R|Pn)$Xb$;{rDYeel)4Iud_b)lAJcv?)N*3 zBnr&53;1k^%U%2%P!x+2`@J0+br_Gkx=NQX!9(KzqQatJjt%_m_;I!i(f4Us7T_m< zoET8G(KDvoR$v>3HL~H4Heu&e6|ZAH7uf;n%nki`y(_qa;_;K=v3tkfn`wRfcAHo0 zl;{4~*r&p|FO&Q|=!BBVf;RoAc5!I@$>2Cv514{3`+^=xLd&ne!T}sd4*JLR((bJ< za>1DZh|wtaXiImjFg1v!gq|g5V4=*n`idN-{W3)NLbLtByYU&6{AgG@^bBRRI z$p88`fLmx5zT@13eThyt>p0q%Yv^{dJwvx-eePT%#ujj*qEk{%9y`{7f5Cl*M=lXM zRLCpwaHE$IKU4@{5R8<~iSDe*N|~U&)q?j5ZW0E+^=XTWx@)ShEG{yb25yUiZNXrw z|Jcmv$3~G#&L5Yl{tKUdDTTXJGa`!OEN{31vnrpuyGrD&*}>!lw5pr)OK)X;a~NmG zl1kLBd`f$vP7!PkIwP6!+D%ZvhcGs+`^gh_hZ&fep+T9Nn0O888=BI<2^Qr|imlB} z%-rZ;{_ffGVdcg7k@~-C6H51$Dyem1as5@HjiC1DbL}L4PH(Kh_{Z%lX2pNx)lo&R z-`!H;1Lt-6=S7FHo`cyssEXTNHJ}7wN(R3OoJ-UQ@Qsr&b3-Vyv+&%2{~PA}PJQK$ ziKu(Le`23MsQLV%*#i%Ql2SAAA5G6bk_*)1cJuzX@2?VuV^7-mOWk|`-k(2?`o4kA(X(W1dr*2ZL`=Dy$fmr_4Hr%A*c;)u{Gxak2d#}xf)6v5`vu*6o$2%1i z%9m_c?+^dAlG&Bt{hGOaid<~ZuBtm*15p?O?@n7U7?kcyy6Tj1wjtyL{@d2$hnC7H z?vcu~bwr=??ZE@v))v(~o}R>_8hJa8DKCHjp?3kBj(`nNZ1ExH=j0Fw^wgwhbacLe zMh#1XTa%r~G4`RYklG+8a6GMrXIk0S!^X(0NPhNgACZLz4Vt#9y0&%%;u36ZIO3bX zb#$Qf@96BL_uwNCfV>iXo$_A#R5dsyxb1#ma>P}J@7&d+t+y-s{L91#=pTnoOeyf- zKNJCYk&WIX;Q5O#zu%Rle(n#-KB=lTpOisv;yLHKo#S`!rro;YTOR)1;ikyc2Lr+U z{KdwWzfGS>!@RtP175HY`f`+k@!w|gn zw}Fl0=EH~x3#j`#(jK9b{Q|iNBv!mnx50L0U>q**13w0}H8uIdp%HD0ipo8(R3jt5 zKtv*6{BtRH3@$50z&&j4`HAevBNGW%O|C&6Vg=roMf_z`gw>C}zG(4-kK>C@=|zgF zKF0|X64tj@+x8zk2xFk5ds$e95}w3n9vZWvXKZ@-B?M%kB@Yi7uiuZbi2fX4fU^np zqQ$kqGtSNh;o;3Fe?SS(Zh6(NrX<7o#xqDNj-}wiael z7N$5*J1%^>_SU>5If6V~-hQ{q@~21ECY5L&_j1vH(ysf^*xo^YKlroM8+RdL;r5o6 zf|q}^h6oh@)&1fWdcAG2jRp!702u@$VEPts>q5Rdad-sF-yPadqD|##M1_LR3VAvn zR3a6#q?f2*xxxV!5Ydt%^d!a4pHWrI^Zv698s(rAW$`yL&@2-+TMn4uIG{bNU6Ir~ z_xR=8V#5oOtx?o;!$r<+Room`kN@yu^Ie|?@7Ihm3^nBT1j#9EW}sP_*ItCa!5#AhiV_Bn6syU z3yLGa)TZ10t$c#C+Pe&JE9z`sMzS3=tR-%r=h{xLZ?9$&IjtBj?jEcKX9w*cYTU@k z2p(pXwuvEyefQr>d(T@v>3qd3vU3P(CrFHl;~~dFqpfJeLzWT*(ZDT)?bS0#38$v6 zp!TS!(B;ky9YEd}whyZ(JO+F!Y@7zeJ%2fMPKCIUmqy?ufMrD_T}`mEC&}_8uXU@Vak1gdKA`z6pp0t z+qnYYsAo)_C&*5b4r|#82AyB7_(yp}+7rxx+?FBN4y@ftHalg z)P}!>I+S&AIDkeJFw`Cxy`4O1tg8!dH%vev=EW#b^kyvN4KvMQlD*mnF`T^nw*$DW z`U*_7&3vC9Vj?J|sIu>e#|!XGVjvG8{A0R)E(ASjDDWr5j*st+jWv16XwUR}fP6pu z_6eJGlGHr6Ab}U+`8&aNW7bPO#3FoPeAqLs zpLO4!dyAV9GH1__+`DH|LHb0&Z++5hX^QOHo>MK>zkZ!H+822G{VRvD0>vPQ^X|Wc z?%!_!J(--8gywWwIQ3^nuH&}HKb!=m25W45sj2!Rj>0W1$Bx}D6@Rw+6+ai zm!X>Kw~7x-O2kI=%vRr7g2`*o?z>9=2A&1J-j@3M$0hI`|8(!zt<{1svD5RTbVpwE2S4g1Q zyBD^iQs_P(x)jT{{CCw#zH>g z-51Pb+(6@!_r-3}ySAFfJNAhK#6sBox?Lx(;ZEOMxaCHdxl2>9;tzGe*sovEWomlG z{xfpCdNtgjbYXs;$k26m7JN*fL;*7aq{>Adkyc`&l3ba5+1_HHt(w??F?_;pVGlw- zz`cn?!xzo=gDX=tO&77^x5k?efUkzxa5INWS!MqPj1uCzrK_vx)%J~krWP*_oLH>4 zIYS8TDmK(RL;vUrS*Foromm4Cc9}gEX5=G_o+N;-=_t8u*Z-|B=t=0B-%aOmbg~|` z9pR&;qhxk5a;Uo{`TjkWl3qI1kSDWWiPVvo48Bb$~hKufL_u;ymkJaH-v%^6WlX4p>M!$;3HXyy%Jdz3v z3sV{c0st0Q#e!%5#wt@{XWWy)-r6?V^KQn*#+ZjyTWCjy-0J?VtzicYm;yvcdPyXW zu_y%iLGg!u6-=2nrS`62<^Zk)?VmCIxe)Zb0*JUI2J9X<<=7v1J0Wb>J<`;wqV2#l z)(u0-Xw+nQd2=je=w|QLN((Yw1>&PD0CP8$6Q&6RQ&X^OhmSQjw|JxKuRE${=kj*t?GEnMKov69IyCXXo>M?wdscnAAlQ|XeEnUbS@FYy)iP)9 zCq8(d+8QK*2X1X`fiKC$#f64zMY0>d1pmtjfCywUmz_EMBci56jBNPT)k~jcK5n$n zYdZPL6Tc&r``6H6AnUk-v8f66jF+Ceqit4;q>A2;GvTDBz1 zeqMHkRR*pAVjJL9O1zX|vJbCVC7$Yzty^aaj|juo8Wn0+DE`y=^_=VSh##5{SNqhC zM~@%&yRsj8n9UVV^<}kGsZK!l4dtTIyDsjmH?txCZi;w4b$62UP>kLYRdT<0#(-7Z zh-nw8m)Jq;e&ZNU1zNMd-%iJE8g8x=UfBEZnLZlibCa2wo%@hxTwv)B>YqPfx{DZ{ z2;N1u(@z#||09|2>q}?4PJI#k0k!G>f|T#PS6W-=P&lcez$RBoNyD{BxtJq(cWTT_B=cL`?qhfdjqK+T^~N9%Hz*}3-E7tWb=%lrT-v< zQ}I6A?)r87W(L!$MNN>M?}6DIti=%_@!PTglA6w)XXa)Bvo52!cd$`JhT_6>Kxy$A z_WiKVq?^@23facWtfs*^b^r8B7Ky6&ax@;mGw8OW@blov&-cGmZLJUxTv`Kv%}}~U zsrA*v^U97OkE`*=jVe zrM7sf(>_1yQTSYO`{bzuEZ3mSB05RcYXIE`Rlz2$`_DjVJvVgNIY-q4VpD>td3dx3g;gd{m*L zrU+`Gp(#DEUHXBfS=njs`*)I{yD#Qt2#g2MvIn`|qPyTHpU!cNGx*HAJ1&GrDYX0t zg3V5lDYjoFyeM(At|o)5Fe|dupofP3<-z#yrrn4r*f+-QcZ5y)&>z}UYWs@J%D}GX zm6er+F-2Y;F|%^E?>fE^nvk5f<(KaVx` zwzDnqk%b%@A06APFU35ZoR`7u2cK@1IwQoZ-RaO!dwym0{dfnn4X8JFf)Dil&y^G) zn#*E6dRT(GK=J;!npb?tl@0Zw@FihaK5NE#^4OWGzqK$)*5~7h0hs zwFs{OdV->>oP=h69IvF@g!p{1|p}j;Krf=XaWC;I}rs9j4bpQ zw6!XE1z!5d4$a8Uwag$u3v{;9B3E`>Mp1r#(<83a6ko6x`{U0StFiS^+2^q0lJV6` z#U053W{GQFmU92w*h40VBEZGTNv3ElS1oe+6NW_u&361Qiuv%H8|pEy%D8yM4WA|5u@WIbWrK*9>(9^GRJ;T_F__+9Oj#5TV$IF3|M;z8 zL4Oz@OF{wx6rGbVfAcfyKvME6{fAP_3{}S=x#Gm*3+DF<4zZ@3bWIUbZHXF1Ux2lc z=}&J*N8X@X&Ksj>(dp?$Z9Y&zg99$UTa`S{@)6v?e)2f#~?l4Pg-(RV0V+Nbu2@+&t;bv6qSpmfBKTn$Z@D>>bs zJ|Ze}ZOPJ$ppC$C2R^U<=o**AOTc&DuNJ4OaJ-u^5Ac*!7*)dyN zlGIm63b#&qco2kj4cu83 z6mJ9VF42M1$iQG11;xbpxL$4-(8P;oX1fRNePrQ(y1fy?4)5FwkQSjj!LJ6gmY7I1 z;1840f*l_^Dqt~wepQO>2&sqXD)vpd!grdU1h_#jMAGtGF#Yh`Bmc>3)K+Vn+nAW( zd41pK=cEtLeKv+NavyqNq|wCxGFZ((2t^ zBiH4)D)2^70fB=67}!Z7a%4O(7q+KXSNr$N(uEx{6HVAm@stgu7`=m}j=+o2d6w~6 z-_HPo04Pr2@)LdkbQkib$;t1{ohZF%VnU^8`&gQwBm-UIu#A6x*wuxt08u=WN8tbv z9OG5oGJ{HXMO5E3*6@rExvcz9d%G|%Z|G3FaP_rpr%xeyo%)$G6}K95zmL+B4CrX} zb{3p8U`W~wCJ3`UWOz@ZBizNa9XNNjxAtERgl9xM(TIqx2FtZ9SwD*4zl&6Y-@kvy zsWg48Q7s*C_tEnec8gh;ty{%`BXgspp0F-XI|^MhLf&@{GOA`gGB!=bc_x_4?|5o$ zhQuQo)ee(p;CeAnQRB^x5E#&rstcW9kWSJ!I!3^ETr<)O;0I6=Ru`jFQyN~U;-jNk zM6HQ@tnLgoS_0r^WS!$C1T6v|E53H^pbRJxO{lns;5o) zAOkdnuSng->PCRoJ^N@AE9)D2*ZUze^@*bf#>Y-k)tn>*HGJgendw{X2subKKX8~- zK9Ap{m3;AtnV^L}Jw46OiPTa9Wi{U|olvTVBzL-xiyzw3^7QY@TmBNF@+RLgK4JEk z4&;#i9vn9RwLK%<|L2jE*D2CX^8Ot!Pfv~~jbQggL`GuWM?g1e|0Ve?_Ec!)arFcC z+Havig}06eRmQF<2Cp0Akb>`0=>KV#j-YSHgl=e{JjA-g4PBhlpF(g}LAP1|_m8YS z?ulXa^cj?PLWGvQyqA^LkIJ8&3zU45NVDTyKAqF7e(-y@{JA#%Tsx#gqU2YW zO{9Q{1NarxA-B1|E!PJ^Cnl{hqueA9>2NGXJFqntwRlb55rRI$wKf*aoXZeQRv^F~ z>SKwkak)1?$TOX9xEhv$eQ-FZVe7w97d$r>wcm#3hL4{13I$&N%C0$9z_?26u#73n z3$f`1QD8G9L^I>QB<=CNlc{3eif)}-4dexh34hL8eA;(aLu)^QQi)S(anHHB!LW=C z@#adS=*Ly3u26-)_lZgLfKm z7GY`L$D#}Nq4N|pdArc2u!Btw6mGQC)b)Qev@3b}_>O`%i$MvG%2S8?ta2SSWK+_i zRBAVv?kPdOap$VBuIcTBlEV`7Vx1oAN|*m`=8pAL8JheET3cMBJEE}lQiF|6gw}z1-5wy+0=eep1(0EqKxyA3M$>-7EaN3+$&Cf1*Uj@>mBhB7%?L zc;$@dVTpnVF+ZUM#%cxyrxF$V8O0Ku^N`P<3fwRaMyQ1tf5WFlfIxV0 z&vT+5?y79a=%RtWXtC6AAU`Sx!LhmVL9(H(`e#jb7_t4RyxBH5D96be3@Z>87H#;A z14{s{1;M>N;1l95MIaMN>eopCK%TItDP`=qR3r_3E7&C{z<@IGCS+UGhR|`bo9%T5-g>Lpha%F6TE-GB&{1_6jaKF zj~`E1RK(0}6aLVqrosvPa|JSoVIy~IXF_rOYSgV4_T=iy9g^JhbErdL< zbsQk|I^FrzeRUrqjE5Ji%}k|7b{*a!P;|KgZ6|s%Mf$Unr~S z^<;M>_Yo*)7z?A-OY1M_hkWHWS#dm4koRHo$-TB`_)OC?;O3%sAvmh{+B`Q+($1-y ze~zegX(irCxWUeK>_NE4vVO;t27I0{as^;fiBSg$T=C5k!otjQzFv#+m+-}7XNZX! zqE~Ph?*45V`-=h68#*~?lUbv(b8=33&kw@~X9y2hb=5%0B*XY|`A-Ti{a63BJnh+f zJdFVb6yvjwN#DAI1JuHyVJcZ|fMH_816>cuu!FLj;MI z2_cVa)UJFzul_%mZ#)_#OVi)oV8R{P%OW5r=Z}*Cb^}@=mxX=s2%YUM$wos3EMYC3 zoJd6Y)A?6US6`naTHQ}RMO~Ny8Gs2|_Ru3aJW$`c@VTjpev%7afh_AuWLm)v+e_4G zCw2Q;Uy_*cU3<(HUb*U%gyzlGeqv>K4xJ>3$u;ZO_r88(l5u$oWq-o zj?Yxo)zt+K1eN0*0lmY@5A3Pyub&~iFQu)R+w4-=G4%DaFfv?^4xZIg4yE$VveT*9 zF|o8%fXBky-xW1A0cK`>d8`w+C?7c1HIFTh?|Y{Iy*)HCa@!#viAf;#5_+6(fIOoh z^I7T_a5r#OpLTqlU&u)-ba?Am^DgbH?e5I$XKRR}dg*H+j~d3@Pk|OsdK=Ygdd{Q} zgA37xyf%-g_){G%Y%vyutuN+r3m)GIG0k`yHM`m zjyNi0FAofugZCh0QL+DzECG!qK!rWDv^@H)<3E2Q(qeVC-`Kza8cQ|Xr@^XZVaf&{ zVLeQ=5yrfLdrT=?7@P{LzW(c^ z_aZzn0C>Re5l~J!7$aclSl0foHX3x~J7W|p-7X4jW}z+J!nERGL2Z6R=Cjeo@RN)o4z^73i|Dm! zu`@lIev<$?9rTxfS5|KPTOGJm?s{U#Erj-5pxAQf^278|m{j@q)|LTpcm7c&EiN9z zcOWY#r>EFb0lOPc&c*|MH9T1qQQbT@cz^Tq5ghwUbFoQtcX!9DMUomMcwk6lZO#7o z_<<=6xwlhj6BFROVWqDDcV}a}>ddyqgQRJ??2NBP+LNXS@?wi5EqQnAXb-Io>seV1 zqh$hT<_$5&sc?I99atB%8D!Bp*-lTM*=rlT9au{qxUAQPU&F!42|T{m^Sq}WE*%sz zT!(Xzq!saSTb|dVz?K=fXNBjuhW(SxRYrPxPu$D+%*jhO{T7)>?3XVI&OdUVsms1m5R#4L!2XW z4d*|smw@fk-uZ4g?L`UY{oo$vHE#sNlz<-u2@RmP_Kw?U1>E1NYiNZ(1hssi)ae2u zgq;(uyUlyV$q1~p#$?L3ww3j){apW?KUn-MGV(;A2=Y zjGk-_e{GSrT$#W%3U9!F4}Q=oT`}28xeZ+@*rI1}hoR-n)=o0ovh4%f7`U@lZgLsV zh>p&__+~r$fX$8RV(mon+%J_KG*zYTG*!$mS-i7E1O=T@asl<&)?A0t4`8;NSSbvN zH6dPt73?QmEF@2J^CgV*biSNaZ)v6+o|^Lg`8oPzj$0{40DPh0${d`A2ePJmGQPX* z_8AAE9o=Alk1l^9%NjOg5XX(x8eQkkp*%h1TuW%~NA^D8Uc>k%0Ez__~h|gtZb!4FP)v<1- z`~Oxa41B%y%)iWCn=;qEvVrRQ!$rO58SY`w|93YxM=i}s)GfUK5i#ny?OiMIYitzT zGII&*AK$@9rK6)mkBeRs@1nTgVb?cwT3u>TzSRx&!*&6zOZ+ z931Iy-t^$8e*0Dim?EXE`vx}BIL-F9wg#?U?d`a>&xl>zG__+kUuoD-fDAq$kt=|A z=@`?O`xn^+%>)B@<&JXRfA@u?|K_>h%5}#JE|M`^a2{OmSP5XI4L`^+LuVqW^^78F zgiUn5;p4k&B%YpD_K}1CKIBiU$bL*ta1Oo8L1$nv{F?^OhxY7TyX-Rg&0R^N?yL4a z6=R!~32F_|4dt=!c}?C}oG9+}I3{KnLG@GncQrja8=Jb5QsUxxmAF=e(&^1B)6X%) z#0+vc}X-o#jd?}dqaSoogLIHC@cEVW?NyNBraiG0>oAVyFq+s1p_w z`!`)&i;Z;cuv~4fn{UQB9Ua&H*M$$&yP zi<6z7O5R|rvjm?Jj}RUi6#K1>n;FNKuJ0qCx8b6gr(unXfBg6z+&Y$)YPfgi19$J+ zCl8GXUIrktveHtJo`_@P<;$0|vN{xYHs5Q_yf6`j#|;W(B#56EMA|fwQVbJnawK!YV73T9f>+Yy=Chf@mC&K=I-^OX86WosONF%3#2>4G0c4H)gVtb~y+e5|S zuhU5S!h(I^(Jz907+2PAJn8Hd1&WMwIK?M$Vzxc2+=pSW&wBjZ^&Q!WzAfG(&MSC^ zP$eFO+{}RUF`x;5e8!k+aEZcWj40Jw{R8@QMB`Usn=Unno$7-PQ8r!7TFHXjs(R5+ zr4I-dnOqtdP}UGtjZ)0-?y0ia+#h86bwA7Pqm>=$O6s-4N*PO=R zUvPWtti!94mo8#|21p706DHM6Q?ZYq4Aq1g3fQvi8yK{-wNZjT7qf`9XkAMS7KE39 zfT3n#WP-MdBN|4r1^9GB4EOvqq+?@)&Hom5-XP=QazRUozI^WDceyRmp$*j%%vHc_ z_?ntpZn}PJ>(nyV`)e3b-;wi+@|p6TR(bdZI?yY?gBMkN2WuDH33%|K!v-_Sw(S}H ziomxRv+EgYglApS%L+kw+9xn2K*2Fs_4cl`kby?D@840CG+kJNEhfZPFxGvSW~>We z*COm3jfsyZQhM;}ZD!FiLq>R1&R2Y~0Y*lx9rW#yTq zOJYUxz{)d=5XmxIAlsdL!ez zyz%*%VLyq<%sNkD&Z*P8aUS5`PE1##`Av4kkd-o=v7P!=KiA(9cF)!ap zLCW1^pC^Gz=7KQ9`DQ*UmNjh3sI#tE+4LkU_21fcI_Ha44Jo5+H&bfV=~mqo+$xKWW6Wq@$w)qdSE17~MMy z)j0pEzW=p|;2IF`?kc5W6%0$RN; zlOIi`h+nlj)4TYw{Bc{pq12(>!{`3H(6(B)XV)Fj=m`kAi1xmNq3omRXSBxPj-fA$ zdtJt`5FrQ%5HEP6Nkiat+4N=W^vu+GQK>m~+odc@>pe_@x`u{!HMU485SEy#HSzP? zgtFIcdeR``2Z`X7={Gy)Y!wv=40W0aC?@9WQSLodI;!KYsMINZSis*dvt{(Ln23ke z6MZmV@zck<=dazo3B_vjB*tM;V>)h<+Wo6gh-Je3`6tc-vyL{zyb2cN81SX?eTMr{>>))TJQR4nq(vvvl zJB&w}BihkJCOdDuOwPGr<-Qu?0CrPTuW#7cFQ3gfDkOTZVAX6)%Qid`{NM)0HOZv^ zR(Tmm&RfmRcU=8>AhqDmfca(_rPwb&FLkavRkA;7zL=78>`r z^yu_SVG?exb6|l$px8$r^@F6!XD^|Z%aCf%tzGhL7L}Sk@7WfQC>=}g*9iSV!o@>U zclR9I?7UQY-PCoH8m{pb&=F7a3S4gA-_vu4TsCULa!Ce(W1wc;f1g|BH-Uy_dwU!6 z3F2?AKrxF_4Bj-><#6x>`jek8aDiD=K;SCc1&l=p+WuhC3Zl8}DRqYzq1YoowV06xR&Y{9PW5at0f;3uek zz{@ca|DKw9jg||dkrp$tF#M*#tz~0!mV|Mkqn}>CaC)Aj+jMYOm6rJt@2GP}7f!6~ zrcg<6RFwa zm*D4rK)SXY#67@pc-CtmtO^LI0mmJ>69fU0k&;56fBSf?Rkyj@c6@_;S`Z>)B6p&8E+1|2dLl~O0ly?dW$+NsLp1ht~6P2eEX*kEda^&1d4 z{CZGztOHI04Ix@NqlIn#@&);ySZgdp=1@>r82;daQIWY1bTm!t>t>~nF9WuGd3b

^hVcTa4rjqxQbt}r0IvmCHqaSDX<6AnnA~GT$FE0X7_}giS()lJ z8EygV8}nP(Q$kh%XeRul4Se&_r9+jVV=AR4gRZ+>T$KCkMvk$GdL-E&aL zVDaaa!4W5v+~uK1`}Gr|qN3Ef%(}WeUNmi9auZ{ay2h}x_5pL7Xp(a+{e=y^waF7o zq-mj?0+WZQZaTQR1p;frPg5Z2+y9@@*|YDl(*SaHB+N2ZU=lV>phkm&0HqOF`n7@Y zN_M=kWLaPQWqirbAD<3zWdPk@zm+v*4V=tGH9$OQ^rQKo+F8LxYbx`%q90u_o0yu~ z2(+6hFE0;V`W?L!9{=Rx>-{oZjA2h>zLW`Tb{pF;z2XnF3DA#iEVsi}ld8+?>(q(h%4-PC2_;GZAH4;u9?aGEweDy7Z-M(s2JO|XIS{K zNIEm8w5hfIcq&HwU{0VXo9puUE8wgVYzp$AB_Tp_G-G9Ms5m>j9IT;&f&wHeirVdA zA!c?1M1ZXlZlZre3YO)p!s1FEBCNYql$Dp}hKTmwbG17~Ra`s>dV&BAdw3$Xw$xD% zxUj9AwJS+9uNYqkd;{>U;vnkkf0`HWO{b`defIL$cb7(;nnIUfFJ@g zJ{}?1cp>F-X?CTCF&z&u2deEx2i?Ct2MHx~=W*IUMWsGm#dq2-}yx;d7-|)xl*vH--ZQS?$yRPfJ&U2k>trJKVz?kgK z7nZ$&Z?=j|{%_s60< zhOORFkr-?UDA17qNuE4DC0SWxL%aUzOiu}gy*sKFGlZ8V=jP`(fH0to0pmq-8>ptB zDZuO`!#}jQBrS0*;GuUyX97GyYrv8WllMq;&kv6Tk%f~m00buVC&5@m|0_=FvJ5ob z$nXgpL?&!kCU$lqOq6P9Kid#Skm{drP($H-%Ch8^SD7~*;X7IVbyxWEQa@dx z|9U@#gGu1d!lZ@u_qHY?DSTvoV=V8_Z%jRzDfi>$Vr)=;Q~SH=>F%{EYgUVbnSBOt z&JxyIFSAqA46V|F?gr#03~gW4<)Wy{mVUmnE&bf}rT9B94!)f5`?2a9oU^N?SkWh; z!FP^r|GAp^_ApON8-Mz9;;nSRlUiq z`O7gMuBq>=T=Hd?nz)Gk>s29=&y76~IUf{_A9U)Pd&lh?q{6o_LAiKdT#_;@FkgJF za03|`LiP3nV>U&mDX!@R=b@4Q`efy4eM0>Dx_XN&p?^rk^k%Gn^!{^u7^yo%qD12M zlNlS4D-gy21(zPjCfnP|0RTu^_13s&=|FD-BC+f#%t z^|DTkE&c8@?@+_Z4UAF~+lbiFPiO=xI$Owui zM!uE;GRNCFz58EXYi-}co11HlBRUBb z@6%m*KBM1D<`I|6-#qPF&cq;fJLyDn4}n2E!EBjei;9kS#1OHZ?XvxUl>s@6 z8wOq=dj2D3uY=Crv^h_;+bT7UcMm%s)7?7eCknw_&x5QV z9%MA9C*X=yTUy;;7We)-x6paroAI0~%K05jlh?0~&nA~^87-MII`j~%bi${f)~p`Q zO}W+2fBsV7PW|MR5JO)j)n88!Pbf0J3k&Hfd;2O;VXf`pi_s1m9i1!O0zx8HZ98X{ z@_$fe+Ql5;81br#JhtO_Wwb+FO}%mW8m-?+0cMwrRgvDs<;nswUQ6PBJ=WTruxZ;C zT3spuxGdOo4*WIxXfet}*QjGFkENerZdf9VZ6^n8Oi}jSK z!r>+Z(>KHYui`lgs5K>jIagx%Aox}3CaXw6>KyuC*9&yMt(L(r+R3bU(QXpT# zfk||lQ8zsnKmTS&*$DVBh@nZGPQ1I-3DQJzCzzdP)E!cRQLJ-wOv~GT`~Y7K;EQn)j6;(yj9fqJqR030vYPV&B;A|~ z%9eba>2GETYHBjAFa|P^D-CG*->GUDdzu*Wo+UO;xSu&lWuGACeGavGH`3n9{4q;h zv?5nxVWA0}>ON{K_}Xn_pXViUU-d~Dub>{`iz9^6vrhvpvY+Q0x7dvLy(?f=$SvBG zX~(Abqu|sj-X$+K_TxSJJdA$d+%9%;vOKH$#N{A`%^drgZ~8MMjpUCXqI(6%{6a;x z4b2z2S{Ay#;=IopSct0~Kmjs9h*5(6;Uq&iiB1UGUdUn5&4Ndl#nft64tleg3CtrU zOEt`DTtRzhdWY-w9T(bvXKU>s190>-nq%|UI|T9tUpEn;GqV0WVxAB<$+R!`zk4+YSktMZHDRLVB zn3Z`gjGoe_oWYZ|LjBkdMs#s|$ggqnV1`0+u$e|IDZGkgGU_^i#o!(#yPDe}=H zsFvWN0-p4%bk?nHQs@LAd&3OPp@F!?`zmUsv2ix_4VCrp<+(W$_0N6B6BkJnZtgX?=dLnh1y|B_6F&o)NP;ju|zJ1N@?U&rx zW|!B{f&xUdBP;55&CL{esPZts_%=~8KOzS2Y!H#YSNi&@)#|5HfS`slJr+F@fVQxb z3bfKnN}=`k-p0zef4dT{DyS~!yPPT`d&_1WFRk7HI{+h0jLSK-jFKV;OTUJ!d1VN? z#}fL5VLleDbantmRFBhSvJP7Od~c);Dn^$&Pl3kI!hOW3$e1Y0200PmrE#QoXe46k zinJh;GVjDLfuXgAda#)2=x73mh7kJo(*ndyRZj7q?KnU0W+0H-{H12$O!MKY`#Imz zti%u0we7T-q~5u6X!y;v+;lzfj5{MW7(G;kP-e58K}?)09x8a@Yq+-nLck}|KUC!G z%R4V|2hk-cz;1UlXss6pqrybfQ}|fdZ^;&)G!Du*B<@hV>b84f;-1^&0dGxc=yetU zKqc_khvS>jJC7DQqbL3TxxanXqav8NC@LyipGrWx6U-#o1~uoJMy_r357$j15*X*6 zfWT~1bBBJ!axl7=X1)9Z$EQ_~$cfh0f(kO*fB@JRWPh9d4!Gbl5w0J z2VJE1pFbahTi2Pdr4zT&C&f84tMHPJh_C!Q_v;sBeu{o+=AoAt&PsH#t-;kx0(GR6 z7~hgEN@sj)Zs(^TE?8>3cuZ!Xkd95uz*Bs>omwCzAv=J(_3+4uJ-gHtf+V&mcvM6q z0s$I$z_L+*V;kYNI&r@UhZ=K`uVI!UcIvGTHUb+4%?e-t&)4%CJ+HOKX&b(?> z;wJM*l+{HGhtO1&Q~}?Zh1@HXm_eFwCv!|7c5=&G?R@*DK>M`Z``qu|G?G_(kOFRF zEW-_|{BM2p7&+nQ2!`iA;z&_#UgfcKBi(eaFA7F4l?AC5b1`!Tlo)@q4& zyWxqL(CwA0D=W%oT;;xg0lPmmFs%=Lyxj-b2wGwib_RzTfHgWA8Wsh{*4CuIJt!ej zhRPcNC><>=Xi+z)>g#9B_k72BL~e{edmI|o{tc?s8MnyYmFuskBqinhZunqft{vsw zP`EpG*)b*OIcQWPi+sF!p_eXS{WEH9jT$k|kdI1e%e14JI?k7%!R#qxjw#o$U)`~(87`Gnlwrv(|HlghE zQ`LbdgY1p`yQF~EmEr1M<1SUNxXlbqB;Fu>X*z^w|rPRftczhwyp?z*6P^D zqJs{suAiPIxzE8fo4EPym%M<{~lO9e(;3 z|HLj}9s+a z87-Qkt2i6RLQC*SY$|+qVZyyApCa(~NR#uxz7LwoNj%aHI-2A-OEiIwl;lauzck2svcC%Z8+lUV zY-W~#uZRei{Wf`*z$QYJ^RhHso0qUF>sHr4-lfcCuH|KgG4i*Uw^EZ?Ac5r*6)n%# zFxT5z_Pnu}2)$RwawUAD0XKC#GVq3$^X2yMLX^78Gt%xwy`3(;?HR*%XiT|oq441C zjnXzY0OyT+)@YHG0D(@FzwsNpXmDUah3`Q?KplhPTJsAlEdrSq4EHr?=#Sb^yH-4W z*!pPaAr-zBDAo?R3VmOxDxu%D?ZFyt#mFq54&>*JjUpJRGC!hpiV3$mBI8>3NOuWk zCEe)itXbU5UIO~lYxen94hYSe1gRWxV^a+fVbvpzNks+y@&EAc-6ZzhB_{-+SZfkcghMs3vhQ#TJySqbt?wGTlo}xyw{OA#n z@Vu$apAYU<1kV926U~Ff`}yBaW)yzG!T!e@6aIRxK{GxStKI{TY_rJ6x3W)2IPtDG z-a2)bme4+t?;$Ic5O;lweDOTn)Yh8>|L$$(N+Y7;2a&}dH|-t5X(KI~3M~e0+FknP zqQ@Vz&`v6Jiy97ps(Z`u@};=JspB6S6nE~rF>V+F-)h9E^*qly|FvhxCu{5KE_<-O zEXZ=_o2^P6QqMaCUN_0btRl)l-N9i3&_vE(5?>8NdUn3G%UrtJNoOc(WOiqMxo8LG z%elE3hO|;334no^u8qwoqT!NPML~fxQVI0eRQVpfe!YU=ulXslZn<##h^(~qWKVe) z3PV`F;Fdtpt-}xyZ~>Z=Z{w{&bSLd+F~9kHIYoP}P3pbS!>^ooB{NucI8>Yr8gFOh z+S;BAWX4f+sz+@=%fn+CQoBhI_S+XR$MNp3{DKgdpIHK-9LRglJKleB&|hA;(UrJo z_^4-F-Hf4h(I*`xcE8eO-s3f0$-3JdQY8b<$KT3lRQR8)tD5z+ruNePl<4-b#( zcN`JVg3amn%>K+W%FofGydA3=9rM-f_vbTz&PppEr@8LPD>TqO96-A#9oQ5PwNtOM z!yopDFSO9OlJqX1g!l7;^J4XU7Ku9rf|Il$~SxohzAs$1YPYS$z2@BbV1c zC&!d@A!c4dMH0Op7Q{ z+U1iB+#@|D0kXw0MrZ7vzxy%7o|U;So8R>F27{x(p6Eb1f=8^zx$vNQXU1gsAs+%* za;+XidRiJ{R@p)?3mn}1j&gr(eOIYu$w$QhiejwF%Alxf*ko)R!fiP>q^g$Ux?}d3 zi74mU{+?a#qP1;fqb)2hbBwf1>zhhn9uvT#`Iv`Fjw^(`j$NQqeYl=`_wEBmhTCuR z9ff>?>d52hg9OS#c(;)!J#_ImpaR~@s9X_@wNOOw4xk|#G}su>|G@Ib`P#J~202+O z^+vD<|1~#9!oToA+yVoN?X$(+^{ZU8uCp2;Cspad6*lIz>o1AxkG}rE{99G_=lBPg z4kPA=b06I8Tn6si>v$Iy?ZB@k@x|7pvlqiSK(?Enm{>!H@962_EYnU!kBC_PvzdZo zPI+10dt|2G-1P?=5W*V}o~Y3G@R(OKH7$pG9f*?K6D85s0WZS9#}%`sz)Dx6rKS1o z>lq%E@W`m@M}D0B;TNr~%{{LVPh7iGa;MW{F+7Xa|KD7I^?(-D}t!^_Z4Q{HDJwo{PdMt2LAFB!5YGuVYCy2&U}?G>JiQ-rB$zJG_{!Kh?XXTXCT9$))!XI&CfM$2||Q*fgS<7F_a;8MmR_% z$m5e`!3hD;Uwi$%LXmz@^Y(_I>44G#Uq!OY?Ea{_F@nY$i9EVrJIu>2vQJJ z-L;CHei0Gb0#7xR$y|{o#>I{WyjlugH)2>EPdh1yp&!2G6K;%gAJ>~SR?lG`HQ*Td zzw`pEuS$KymHrmdC1P%FQ85_)a$s_y5#qR(C|ibcFb6EE{5;wn(^$8Ca=1L*Sxa3t zcsO?I(%OdrWnwP_y9p-;NPqZdE>)8O!SqYndw|B(4JVIiTH6V2ywc0sW1pLIvS zm;{M@v)TxmIs>`=_&9o&EWT4!w^W~(ao!fD>oZ;U%apnHcJ}Gg2*J9aRq^UK?lD`P zPoFx?U!S-gzBynwv$v-tT?X8YRCn)Yp7M~?!MI=a5d+?kEMaDx|Kg7Oh-p2zkpZ7Z zxS88gf-pepNrJ8VC_FYfJ$=tMTDlnKYI-gZB0qRXgK}})MBv+uM7+$*7lTzN38)oN zHR7l3TT_GY>qL%KJpQWWC;@A!X*7lk)gJQ>s-HGh-5CFP;~du=GI2vAj!W&?u~F7! zVb|pkEE`;PC^?DYQ_J&Bids+G2Y`PYWgGWTPJSGmjbRmymIW^q39r`~nJ>^!0=58H z67$h=Oi(a#rU9THVSM&1N=O{Kw{;yeBT_kdO4I=7}(o4WSeTjp8WOIF;w+J6;)8$VC*Cu6p}&K(b3er70(Ia*^oYV zXHba$$&Qzo&%rMRT0O!qo>Stl@Q9BW<2WF>O!QqBT&)=Ps&l*j0kAgqh@PGvWVWbc z9bs$=^b}%HykgQ{{CBnnMsEMmFgrwZpG$d8INx|_B*_2g)ut5j3$(<-WEf3EQZ5-G z$MtEoZ`zwPndEn*m!Z?DG4PN~+3o;sUnL7|Us~Etue?kJ+PYA4e5t%=H)}3YzV^dmM{v%dQ3UPvYaz;D+nb z1FBs19+=;wqn?t&2PSSJqV4x*)+>5#6`qX26%>$ioybl>R_RwN`>)hn^m#GVSUIYC>mwqVs17s#qM*blx zr$2iJlmWJ{*QR?+ahy7LML)(=Wte2bDhUo$n6U{XZm@g*%+xUSik{<0Yzn(~@8kRT zAVU@8=RcQuD!PrZ!d`l+Dk(@)7pBa!pM+? za>nqB0ns!#i*d_dKv2kmw|Q z!qgcaa$M8!Iz{DpAL-Qo{iLC~a0^5jij&IG<9KPt7_0(N(gS@in=CtHU8#9f zbN7jelp&Ep0ASE^}fzz;_$ z(mL8*{3i8G)E|}eT8qTDG%q+JKzEXNgQ+kfQ<^z_ChE@`>BxmQ=!@I*QP z{=Kc(Aw-d6bF_Pr_k}+pu0L7$`S}S@fp>FRbwbHfX`e{VJ9HN@d;zRLWMy-H=z39J zK6@|!uhSzg-dwiI%_sXBn2PAe-bgM~oh?fJan{egRRKh{dx9l7oYC7}O= zGT{j4tmNsx^s$qjoeQF(SPe_~{o6P|)TYijjs_8t(rc|Hw<023idU;eTd1~n2w|nD zp?&^?f?NW%w(sZZg=J<1(%cYqiS#CTdHoI4FJ2siLPSKOEsWtx78rH=K75#%?9RD- z!Fza<;O~4HCWpKfMr^#PdqB%{Bb6X22^`P&?H_AS9}wfe-i(|qr;R%QF)Up?aQYj$MM}C% zS?sTyGxbnhz?Mww>Ge$nRB{MoFl_}OfF;F7z*#JkRn;(6M&+A292*m}hWrb51W2%i z_wPp-%*U0rm@Lcd-*yeKkAa?^4EwTIiuXL21d$BojUT(Vuc)Z0S!3>4>(3rvbmhq{ z@}BOWeSSOFtxj4+2zu35Xy-$u7QzY@CtUVW1r_E#lHyVMxsOhscjz2od;pdApiV$r zPIcO!QE%1O#s-w#n7xdzp2(H%ov?MY74#$L0$;bN;lT9w7cambLt&;_8kbo6-CiX; z{&K>5W!*)7TimBjn5fLm!XnGY?*8)HTKR=IaNSgwmytjxK+vJY6YfSulfglX1&Ck% zWrsjF0(=H0dt<4aea~O#GEE#6rcJLkLLq=I_H^rH&j=pMcQ~p}T-y`jq>$N=&$w8FqIYE#2X!#*dLiT;Hi(@9&JEHD|K*lEHksA;Y&Z3758Z z@0j@b@Nf%9M?kb{8r`G~Li+6)48|f+(b3VRrK?ZsHA?@e=SWDh3ksDre$)~ZAs7Eg zy>+}ajQ)WV59GRb+1aTA!ostgTikpXlZtT{|?96v=&L_pKP=UuB0cy+uMv( z7#g1GXQ|hu-8!f18p;lu37Rv;KUP?a-?-nr7fc15a?zOydVfk+)4^|pr zMj-{>H`&9acZn3k{#p8A2#%3J!P!40Gm{p?oz&;gH9DX0rQT1LCjlpaf13uTWBjw2 z_#r*2|NJ!0I9U{s6!io!n3!3&`$C1(JY9pRfDss-O*<;qx1|E zc}iwxT!~H5ZrAN7ahpBRFgSUVXYzgz`Hq-yS51TJX=J@LnVSwNJ4C zc=pF9Dp*+W{__4%gL>7-YvB6QE18?`!<2XUpE5CN+@``v8*o*dO+r~{aDEx-Hlf1D zWEw$xRZ{3F$2~UIyQ-CH_>;CFzTqhcJ+Jxg5A|OT!}$c2_viNZ#b3W7-OM#E7h!5E z@+a~m@XiKhrKt3n9e8k4=&3SPA(tI#jOCx43hxYZlo*;HRXe6Ijq{JXno1n~S%guo zQd$4D14aJ-gh|q|r})pNL^4VI=M4CV%lQ54EaT&*ZYA}n|Mj{5mmd-0H|q8G_s5eJ zjUMmo3CJ$Uo`BG^TOlgxvKh6BQK|N5@=r zoeK(PF=-agT(vq+&mSWM_Uyrc@FcWcK@et^RpC$Nr9&D(O^u&>6cKSqQgWAqmpfRl zb)S-%TEj8G5Ox{|k;CxwAA=trhIFD%0KCpDdvEjcW3IT~!Bv674gP1|&lbD;RP0jz zUbI%pd3#ZSoz6LnF*jNS6sn}X@c6J(h5M`}DM4Ml@8tyEQ#Vw)N<0MS5k$nrH4M%g zi+lz53en+=Gdn#8ImVywH_nG5@qJH^nTX#rrdA7Z$1$>K6D1`X?BE2^=-a;^Itg5z z5Fekbvz1V0e}U@)uF|J?7;(6tXJw5H4Z(N{R9Ue9x;i^g$E)2=B|8iMDg%QybXN@} z{qlz0lg=NjztqgU;%N!OA9{Kqvjc&ZaT%%ypa(7!RMr@1%`=(%U>b4|Cy+|WZzdsU zg`^Z@kqvYid{@Ty63}&ctt{Bq#Y#9(`vDY#1Hf@PJmcyWRKFl(!W{|2J6*7N7)Gf> zqUg`m^v5K8HmEotggeH=2)FtH0Oe>*Bh-!pRfDxGcmNF_J_vnEJ=&UtcI6EZ z50vlTKmGsp$dQKqs>T`M=i@`HCuu*d#~$CjS&iPcG^dV*g@v_sdXcOK@S9v{gxq3CbEL#IL0jx&_m@(8+E6-kqHpWQ@ z^Iw^|guWsp2ziPH^Mk__%5xxIMm@%-ZmMbACV8X2d7})k+=3r@z-rC{qQ!c~Dz;MF z)&uq-W81mY(D7p7H&Sr}fGdWTU4UL4bRrmC1HsA66uMjY&|OFJ9R)pyvO*a7(h84) z0+$j_N^NH>>+n9?BTHUBFg=NJIu&^!LZI=jdviW-6}n%5%+TiD?Yubw@8VZqH3b2# z=yv;dc!CrZ*nWR?U|yY_mp(Q(SJV%kst*~D+~v3mZA%7P80a6~xd6GUdhaTT;Qv}n zE*u{QSjG?rA6GvViHE&EUQ=-KJ#+A6V>E``yAFp*IIzF+^Yeo(7{(<51_@DZiEN>^ z&5s$=$IJwzs|c30Z{NBc;RJIY13XVBMBwa%Ls0b|K^Er#rO7TShZwj}+1dTXbUVC% zqym!@6EUA{!oxpgN^{wCFwS{E>PUrMMr0~X8=OA9`J{=7NPxnFvwP&^)}THjHq*1$ z7B%PF*5kdx&#C#9^MyU7$tpO5FfH>Uz?r~#$gWp=o2nH9Oa@A*&&r55ut+)wkqQtm z#~#2Y!yiN3u-Irz8D%G>A5|90`sX-~oFdG*!IPtaI|ZXXtUflKl-?;~>j&Y5PJ-On z;2{4tTJ?MEUs6=*(x2SXzP+1~8nRjwGcyIJ9yuI$-6v|;x|ZuM5D7Tg+sh(B1N4a& zzqz@&ukQvf6wJ3dD0>wf_X;{ssEr5^f7m7M7O=4afTDJQPX~(BVNP~|)EEC=o5`Db z3^9R^aI!5Y8;@xU#e56L0d?vuvIUe5N&>8#L2k-AqEeGyE#o|(Vqy{xv+BsmFPC(l z;*1~o{0vfjoTF=a1n^K}KkO)X?We(x9Xk{feF5nVOJJjX12X0|^<9vkzYzH|;>hKm zvSmOrcveUW&Xl@cR#DmH%1+O$cjmwBRX@S)r;UyGef?B~(oG{-SEUo@8KpK-sugrE z;CBo84UWHAq;Asr&*F9O-rcxy15lucySvx=3A}6+>|^}6eU^@9KU4)L^;E_~%6s8= zCQxP*dvp^fMn{cXpfZzzq2*1bE3i&U57e;DaIO?OIE_Mn)LIUn0!tf0GFWc-x!7b0|Oxs1s*g28(y+{_YZ!`bBZzU+qx!E{4DBOX! z8N+Je2mwD-!G&NAT}-?{3j{!K7p^TlWzEo@qc!QtdM8gd=iQ2J?sF+=#F1S`81*;$}BQup{2{8nLD>_C|%C>bAUR)gK znm}g>27oTBD7#4gfrGxOczNd%6B3Ywx`{=$wk`Ey0@=;YG|R^SWg8^nNWo_lD}er? z1y^8nWF$UlE^1}KXDD%7r+Aigb4^OfEdbTN(X}82e1$N?Eh~t+E?kHR56^cVR7+1a zLGT9f4HhJm5S5@x%QOhzMB`4GoPPS5sA$1yYAcxrIlWs5hZa#1wZp>jcgvYNp1G z&-uKexWw}?1+1sX2V(rlE?eveki)Q#P~MBMW8p z?GnFzj#^xSNQ{aBtI(12L2Nh8j?R$xDk`idbWfN2t>kzw`wT5FjZb;^j9mHj%VFx8 zVq%kYGU7Dnq-&L8m3Sy69qZNW*C7Whe)$sOAF(qpQqi~MtNJwDEd-U@j|O!6>wm{WrW>AYPNgPl)ceDxcLc&%N7?OhA4E&e3@&S%8|ojQ)m?h zfv+U8umAo7xg6;)2zH+T?!S2YR(#9dI`{)fbnYc52ni)WD$)xnQEYN2{rCUy7ooOh zdBv5LzC}&=nZ7CN8N!dM8hf7P7<*NQ+jEq+Mw~S6veQ2A4`30p%UogaFqkm*kh%&k z6K42g*5sef36q=Cfbj(?ZmP|j@5uSMx)yd#>VU8o6ePZd|G?Rp1CWy%C3Z&mhu(oZTi7 zkJDG{^5x55IN~~2GA&QOj4Z{vZg6&JWCYmNYFe}zxB*5p$$uAxsuwS&`pygVcBg&38%Z%90^nAP6{yyc z!zFtD%s8LNNf4pC@jU%{I=i}dZGDFvcsC!YPso;oLP9c;qX3Bf`IGs<^&XE)=%;0# zp>&V?NRkUdN{gfs+3|PBVx%@+bMN<}_ThUN8EI%{=EzkERSewS_(+X8T(i$U+ZGrD zn!L~T+mFiu;_H#)Dn||EGu7JAfx^(Y#&uK(_&z4=;8sWrkVK%vrtoLsGnhYjgKGMp z=j9cj&B4nrfq093&5Hm4X7TRbKS*AY`}f0D{ZUZuyLVyYpBM<@ZPZbMxH{}ss#Vl_$xBS!t1kXM86JU2jaOPc;XHqdHS>ZF_?fP#TriG%G=R7ai1U7-dNvZ z?se+29T2C|UM7NY=gHs9MI;q~d-m`lGt;;H^S9ae9{x7@8*SMT4OCL-r%&Je+UaxuWs`sc>;darxW&Q)6Y+E4i$DYsE$q0h$Drx24Dr~H| zN`{puCGoi1L#PPv;UKTY?(1Z60EH3Scbp6j3-ew65v`i~;)TnVE6v{zpr3_1X=w>W z1*42ExpgCR@7;U=D}dI~+PplQ1A3Ma?1)fSRi!^m^dZfPOtKl?ILcazR|VMEQ%*_v zZ5RPI=9}xd5S9@sZJ|9r-Bg4Pw9u1g zolpaTCF*PSs>L z9^O%q#Zav^6$yS6QZB28sO~>+5N&QT_K^|n`sm-=+vDz_?(sy$HacpP^<`x^&S|p) z2tnpmsh&(FSH~WD@_vudRme!-eP*_+9mZa8dFmUCj*9Ae<$UZ{R>)6M;KRuve~i$G zW-MA$@^Bbgthu|R)%2sUNA6t6a=9) z7`7{mJGO7n$oSQyu!`+{g&@Q?&u;U?h8AIMxG`NGosiIjKJ~@-57DB-2x2U*6ujFW zJ&ITKHB$kqECsEp->%D2T~yp&Co#Yn&|q;Fs$d1M8`1w_I)l)Ci;~VD?u#8vuzdOh zb|6NYineFvRb;;Hz!Okdo2da7!Q_F{@{#g4N*t%MSv8Oj5zc{fAcL~A%WNM9Mxf|1 zml}kj9#k8T@sO^FiPY?8-$W?wK)C&d;BL6~dj~V~!|3Qm+(>cT!`?+;vLhev6b${g z!tGrpimDTHRA^#;R3LRmO--p=5*8c`NsR_two2WDvoP1lbe|SOsZ&<=eIxDao%8PH zJ$U>#5UR;OD*jo}cX4?Ugu?B$N$z$$ktc&G{90%DHEH&E5!v*A+$^+NrDW;&hOQ!^A~=H?{HFQzIWZO4}^=Y8Z< z2;Y4RqI^j85LHe8vNJR>F)=od>2~y;DjAXE(nSk}I`0vVDceqd+|dVWId3j{mIE|+ z>Met3w0M*0|8~pcJCrEOsJIC6JD6@`1;ITZGdsIX=LjzSE57cuIEMIQxA^xT{@ari h(r^EN|I(!$R~=uRO}V-C4C(D0SJhF;RkpbGKLGe6j~V~~ diff --git a/10-deep-learning_files/figure-html/unnamed-chunk-21-1.png b/10-deep-learning_files/figure-html/unnamed-chunk-21-1.png index 7edb4ca8938d2835cbaa7cc0a52380b76e84bd57..afc6057f810824c94ac336a2e1db0b24a6e94d11 100644 GIT binary patch literal 83368 zcmeFZcUV;0mn~X|3W@|1s0fGx1`t#PRFVe+7yyYPQA9zBO3t7nh$xs4kt7I`Bq>R< z1QC@aIVZ_EXX=d&o?rKS-LLz0y7!-3-#H(ORkhb%YpyxR7;~)ceo01>cKwd^Boc}C zoYZM~5^2pz5{WX0dKG?SxB9dveyqDKb;XQCVsRt>r|{|=y+I=FBAq+^m!h@*K$Fc? z#m>wbvYy1NbuQ8j_ujq@ao%N|TAB8CRRQhj0pp0b_oTcZ{n(nMI%sNAz7!v_k2LG(fatm9BP}swV)cpRxC+BDa&2&rLZt0Je95>@q zR#MuEp*T0GsYX%Z52UOhXZ&=gW*EU=7${b;;>YfjIg}*Q$<@@>_{&2Q1wDRj+|^2f z;jUUOg1?-0{`WrQJfTQ`N$rDHc2;O8M-kPN!j9v|k4K)cmG<`at$X{|{Un1LWktnM z+gCew@7=qX)ys|H(W6J-zkin(m7$eOF>bOJ(Y$o&Y2RP^o?o9Z*F-Apb$J>T6x4S` z-_w5!mkt+U-sS6_k1tyHz%jR_x!@XtrGk8@4u4m z=%DX*t3L6TK9w`u_wE1rTl!DU!~f`qc8OL|KIORZipOR+b9!uiocZX@vakz2G65&T zY5ZG^yZxj&2i1w>UR>iduWIMy+LG zvWDDD1bCC5b6n90tD&6|)Wg{+lf}cO>)ChOk9;}jk9%@v;lNia$QlFkI2u=x1t(+m5u zi3yXX>EdtSq{e~`;!jxgxiGYkTHm;FBT6rlJu3Usr;i__$UnVaK6|#&qu@C?C}0N< zqX*|wt5TEuuV26HgNII6X@%XobxX4<;D?vIuw1)jB!h&lZ5p5AOz<@&C8fx~VC8+> z+~Yxxh)T=X&a%%Z`)hn&zT}vFJ8CV0Y_B947kPxAAK5D2aU^D>k1y!xtzY@>o0)i2F3HK2qnHmhrE8g&pCRDGa`GESs`iP?n(O=c zgLi%;k3RbHD_)JcE(&hV_ZP8RI2Ce*B;rq+h?t zNAI?C$FF{uGeNH1fs12tCjL%Kv;4Y0Z#Jb_L=~cdT;O{1*0CbzqGa6h8?HVSL$K)x*65$3p1naF1L%5*hO`I7+v15VUjsx zt}nX3CQf$gDu!auN1>&rsHD`{+4i!*Mql=7x7`yos!yd6#Xk?7%c0q7k+uwrO_we&9 zaM+JHMnoJ#>EZkQ`}sfazy3zq+rMJeQnJQNgqcc%n7T?Y@Ce@ebshETvZl=q9>xoU z1n#nDqR{G}7^wRs>BTzURXqRuh3tU?2UG#~fLeb3{Ha(<+($C?O#Qxt2V*dZ0AVYc z<>F?@CzOEU{kOx^|GTUIja_}xHi}9rxkk&$;pgka(@j=d#>PXIV%K|rg>B}#IMXdo zzM`U18+z`A!&F_jzgbCdS#Tb2z-Fwo;N|{Rj&l>e%tFSEON%Xz^Mf1roK;j*bYXZS zV4!^I(n!^1t>Qq@`GxV)?4A%0%evS2&g=$pSaV~lz3=FL{GEba0~)& zX{IACwf&LkRPDQvkZWVVi?9o7$4kXqA{D|BCBtcV0UY0|#!$~}=hw}csy7I7n7VT7 zRxiD6>qKvb5Xu37Qh!`_7O*k93u@j3(#qSnS94v~DZIb9k>EZYk};|7O~1k}T>hZ) z%-g#PBt?ZMd68Z{@unPgut80%yKS81KtiFi34+v_BVwywE7gW=hpy&0=Lhj+&9noZ z8Ff=4&-h`>0skGW=09>og4$i^E5W5URX%cV(N1ZyY*4Nt;3Grs()@5x;~iW$B6IZu zy`a@lBa@c>VVB!eb$TWiZl6lU?CL(=P~)N7ek4>Y+cDifb@)=8W~S|AUszv{fPBZ- zCkmnxw3YGNd5T__Dp2r_R_&=giJHvI&CQKJ9^Jlud)6f@Uh(sT>+6YuzCP9s;R$nHSqq>`SOXfIvDMc&OG7B0OzY&-p_*5!T zqh68aFs~?^xaSq0mY8v4s#UX1_}jN{wewshcizb_VA5)g)j;i7&e@wr8b0D5gojdf z@Oj)jvxxQW_KJ?(dX1@fvS$kT1dSV2GLP6z4J7Ck(y^SFK%tXSPuqYzX)Kdt^5gM# z{;Y+uAG{iA3++tVQja2KTD-7G&aa{ zrLEDh(%^CNDLMX5&Cg+z$CHjLHEb3$rBen?laZs-*r~JKLD{8Z^P!e92tP)%Iu36pVHLI7Pj@3#LuVLkdpOi@vPf==8R?A{?_3CV z#UuF<#qp=~o3_sov^8Du37fd%cy?D;m&}`^w`yb)X*k7v-#IFGDW?d2`@WvtesQwq zTmX2QGfVphd+BO?BSY@5Tnn|Pw9xo{E>HdZ{3?BMFSYETq{3}3n?zmOLE z9p`%Y`!nErgNpsllv)iUQ?)wci@mZz0oko_K~6R@^T~Fdm1+tf@B9kCz{vQaxTt8m zu%)4HS9J?JKx>ZinVg#2D6~th_vzQtvx?mK{&HU2Um?>GiD{{g-s#v#4yD2|W(>JO zHmAnk4y2#IRAd|>QSwHBTU?2u$$d3!%#kZ4MKpcSiz0k>b zN?Cc^(PI-L2aoFSJbxi?{!{L2s=cy-{@&gKwi7)ofJg`k@)ZQ93Kpxzjn{6qISsF8 znxK8M!;L{gS@~G7f%Qmm*@rr4Dte`X0ey!t*)y)rabeA`L~J6SAvgB{&5L6jG0KMBtp<_cXABZE zu?>|49qca8pFdxbL;$E%4?x&(VR+?`Fj=f+&ni)C5SRJ0}1%z8M;+juf7)-XSUv>k2B)Mx_Tzu=N4U})y!$0H>_M{>>Dl5X94@E|jx_?3oXL4!yL=YRk~}W~HL;ePT$YRCGIQlZ_gpqM~p|AeQfFn0W0ct1b_x4`=nqrt`WS!j?y- zAOLRmeA6%i6oAiI>{u-~6En8!MptPNx#yjiD+4Bh7#qHgs~Y;Cpm|T({BSn8fquKF ztvPkD!~7JR{m5UKr05zddRF^y>~6T*iDIFa#=w#0E2XD?S<@3Y@x9TeJHXw_Pkc0g zbJZlPgw%x#%*RcC|3$@;Sg-^*hIx%qO`4l6b;`1_aAVj3Js3heL+*3V3t7{t!u>W< zTzak89`ap-DJ^vD=Ka-!7+&)=1s=xqp|t+BbURaLuPG~61Bs4mlqMIP8Ec6D6zFz+ z69JGT<k`gcR*%PD zSO?bUT&V&C-A+G`ODbE{>IFF@r-?Z&EgaVQ`qTfoSzGP{PEJl?k(?4+*5{{)zC-9) zi23dG2`Yxs!jOd0Ec%VXg2xLm>+{uWCen=oV*LE$VE*E*&Xkzg>58DG2|y3o#!V)6 z1OHfX{ehGgM@U0aQTkxmYE9aceN}3e^B@6YP>&cS!es&+$@48%!_Ap(~UuM-NE+)E#6Wmu1*NDFGUKnHuuerl86z-AMJmRv>; z;RTVG%hI8AEpFZb2}zH#&N%7}14v39wnqB4Vw zQ5qyp)*{cdip z0gMTKp*3sPM6!l<-2~okpOA8Nbc76fdv=&)% zB2PQ)>C_P0l_L6&LolXoUR)EjGLPHDf3(%bhV%u0?!*w?R&kh8S3oYeakWYgtK9v3H9 zAllJ!cy&-sncUUxCPrM={`oOVEuCYO*%ikrv$_A-Pc!`81AoU)dU03&|K0}Pzfn{O zIk`mz{RuUbOeGZ+>z{`U6HQtOLb%*@s(li=s}-0Cxs`7idk}D6?ONv(q2^=58RO_u zZFU+ge^*Gumr(}t`nwdpkEchgHoi1P?vS3Ow$HwmuTpK|)toH(EJnlLxB9#G|d;64dX`K0yTm zH&db3uKsXEAzVhf%MhQ77rxNPIpJ<=QWaH&n@B=t;bPRX?G0nd1ypuKj(1~lrDqcZ zDF^TnLpO$~gk9E2`Pef-wtbQ7HgoZ)r^aKcFltfTiEYPjzctXsH(TnRqn*jf$Y96? z4^+??Mp6bs*}(1(k)$e}Sa%FU0}1hnUPBcs9z^-)_GJjeK5-7uB_vS|>o!yy-VN-P z#Zt0fVa0wz;N3P};!87oyhKcqS-5O+_nps(s){=(t~n{q9i4q?c65_gh-RtAm*}+3 z1=kW@+B+^*i&scDWy z&pC_yjV}NA_ANp!#W>0{Fo1YvIxfy(Gx1#c7&@Od-*~6-uw>#*R+_d?xgmC9Gd*p` z$9W-q&_j5Ad!O!n)zk8JE{}8xR53^s(iO;5kR~W?l&sPSZ4W9%Xn6R%MAPR@;^)I` zAHCsX^(yQbRg|UJ_@dI;q%xu$o!w=&I|h#H%3x|{1}dtSnTpolrH-rsly|5@>_t?? zKYw1d|5TTHM+XAW!;O1h@mw)I+u)#eYkf!1 ze!toG>=hNwgo08~P=Goa85rjJt)xU|fxL*45E~I8HF-?nv(8#*DC?S7d#y`WS(_u@O0LtwnC}C zdFynGenyFH(CqNNcpC`Pd`kA>L6@@V%W-`UD0cX#REB#7pRHo?&jlV#LUEU;oke#z z*cm`;F?h5?NdkeqDQe~f$Z9QTWpHYSlZfBvv)EbAt1dHb{0ZJ2Fdx#CtY?I z?7RmI5CepCf4dU`ZNGL@%NQ`Rd?UR$$NW`p;Gi?M$+^*;U z?DrS_`!8fp^g^O!o}ac6+UYRE=%>p&KgJe4MlUjwlP`%BK#EU4XPM(h1TeVUm`NP; zZQI$g&Z403sclEDe?u$Ry8Gvq;=UVNjghA&pA0l#$qE?`jbwKru*riC;$CU@Q(Mb- z=1|gSpgOxk_ZcbWA95Vd>rhSlc|LBu{BpFI%BT{xc5BXELVAU2x%c90)curO74MZ} zR0B-E@826}eQiL^DhAMCSCOTUv)2DNVQU^9+RdbX`!DE~j*I48mLvVrzcX~=-_(V8|LPlUj< zfej|LfTv~wOe;U>l_G+F(<@)t2}VIQZX{s@0B8sFcwv$`S&e=`DYgX!TF>qL??a3; z%ZA4enJR7*LDm6s_6rfUn2&^Pg$iBe>m)NqlC z+Lp$N#?ktn0we}G&wI7_58y0{VH%|uo`d)EJ8An;MEqJt*-39LfP)4fb zaXDmA{Q>ob%Pd)S2z+}3yGs!LtX1aT%E-;G%?y1)uMcTU+_`h-ye?32GGTEbYL!l* z=U{zOe{DhsL>xkXo*nNFw~b}7f^PsND+9fibJmLHoRdCB{}TnSu7$ib{qz;1r>HS^ zLtizmq9Z=U(cH{l8dG= zg`1`7-1raijDe6Vi@YM_rD2)qt*F?i{Ul$JZE>5-k>qn^2eJ^&sA6b%J8>?Aw?6 z!K_eDR7PJ_NH}4eY#e{+k6`+Rfl%C7@i0@Chd=TG!EdP9`T5OqoGA#RiG*13zCuQL z5NPWAw{JI5Nv@6dMaK=*ed>k+ULC8^fKg%v5usDSVs<8<1VG!cnNSGg(41otwp^H~ zkd=JAtsDiSSp2v_A?swVj@M*QxelrokPo0eJ*+H%+Q;=u{8^mjp6`yN2uiiT)}iY? z^Dm10GJYeLnxL1Ll(sj|{7_Eby7fV}!|Q;6rq5>Gp@d|<;D%(($(U;--t^s~!$H`h zHbpM4?vozT8}TW*U8QvS35&3x%OxaTP=O~q1fpy!zCGJh^Wn-kR%+{(E$UWH7VIwg zggE-eXp9gci`0T{FJ{0D1n2<){jvrr(IGG<+DP$Nzo0sF|MVUI%4~A|`gMY}Hl&D< znWYR0x$Y#SYF{_qN%ZiYSAl_I@D~wIA=ICS zEG0|C`&@5$wCr@Cb-R0}-83D$Jz+QMlFKG94x{R#zFK}+!xTz%c4UyC?S?+<6)qJO zy{vQ|#y+6d21`S?90_=bu;oC@(o_nA1ZF;J+7~btl|q7=ipBiW@*<*#6djj!VoMy& zW%~8x7L7K(ioA3aH?p0`L_f2bbvBGkLM`@YQF^OM&h-SK08wY2zJ9@@K!O_h>>Tqg z8aFmaYad<>L9OkFuAaPeXB3U9)Y*3Qu#*slBM$FaEo-&iSS)M1xZUR?y?uPhuO&H~ z9i0lcUN+1sUBjeKG|Y@d28VV1wZW z-pO4k6p0EA{S8We+0t@K3~i)Yu~F*`5ab52lpxc80H0aZdP36zn1XjJ{3{UaFb}JI)Z_M8Oa>)d^t7P5N-=ITPrgt%}(5L)3C#_ z_LrNJYbSo)J3~WL&1%~d{Q2|e{k{iXE;)3h6*r+k=DE>p!5r8ghdm*NHb~aeTtAAs zk+@nr`m~kFY)OW7AF+Tj%^~T)W~-LOcUm>xL5^4)49C!f=C!HH9Y$lh)U`v zpg-a}sBb?&MNE3v8-qZwVX7s;6yvjV@j9MAi+)nGD=I39@{?Z#SCDsj6c__FZ1J2P z~i2UDPD5v~Du>t_5uTd39QL4MpeFwX4ETWGGGW9xSN8<=foJ0AZ4 z0nMkCB{Dk>%y#+=3<*Kl6azES~#8vPXy3ksAB01 z+fX)ni6Ivve!A6g*6dtzOJP?3Q4o40Rn$c&tFrbkHWHZk^(IRx0ncqW3*#z|p&g#W zLy+}EuxF!>9MJ=GZWamQeP7KXTvsjuZMnxg z1o|MHI)IH3iscy(M)WkC>KF1Fo_7FwHe()>r_sbeMa@2XMQ0sCqX*7M5C>{)0FHF? z+?ct=VW%Kp-{bZ(BSfz{VQi|wL<>EpW!WnKT0?5F)1xqO|BJ70k9?uhGA-hSNal-$ zN7xe@VNLdu1ro?*^*d(@Jz4h3_`hC=#OOjeVQa}=oI;OXV@#a<0gYkA8806nVS^;3 zDx(3&o>s05$WACr>F{ZWVQVy{TeV=FQO3V9NO-Y|A_YeN{;3{g)R647(7}dmR)Wk& zBtK{dC*U9;-nm)k7&2RSAb!gxOB*v905HZB9VhJw)(ru`!j?>bTmw*2I{@0Gtd~W~e z?`jl%eSK+n992oTG&*zU3}Nv5UyLv*o`z{nc z2{4F4Uip_c>$*NFch0(@aiben)&kFUp|}!e2xxEo{QS{OFeX6F2n-6+QdiHgi5OCT zZo75~HV8gGKCo_o7{36)vkN?)2Lw1^;z0C{BTZxHBK^oN>o%2ZiIRa#mYizt)K?ja zokoxeW3^%SA__7liqqvbK}IIR#TR289LHV+!J)Csg~+$Rr2$<}4&Fv}Lv{p^hRCC} z2=;zCMs>2?gHO}4?#2oOtEw=%AWtbadU#lTwn#QPvw={dGWoIc1E&#lY-0A-fBp64 zR(ODNw&NmLaS)0mdL5IL^dh&LZMws3(~Qo^c6~`6Uz~DpdOq7_k5B4*`5CA6Hky^P zh&wL8u|+8Jz*XRqk%;8Tz0)r;6)_?azM_t~o+eyU;_x+CMz4q_mU`v=z#AGXOEx z(}jV691ytx4)X(@X7bqRzvCZ(6CgUtb zCj|KenPuYbtoA9xT%BA!kjp;;VwOHs1l09bx$Xgl7Fq-v)4%bdOVOz=|2+pI zrqWqUiA;Ya32}w`4V*W+&PS=N?JV zg(<&Zgi4OVyaU95Jd+dVqX|QQV~m((Lt$SBz*S1G@+xpm%-{uBQoYJH9dN2ZNZE#?$onM@+pI}UC zYAT#c=vuC0S9h4K0^TBW95z5h;w=Rgm18KIAT6TkG&b101$x31O%66pBiHYTEs}@F zeynJ(?6Ec%NSHbvqS@0b;oxzdK3c32-+ulC^*@RHlWd!hj?@B5y<=ZwI7(G21fvpo z>%a>|a92xnHQB}q-3znhm{G#QMR+RF;Q0M_GdlG}Y?-UooCA{}2(O^6`2RFUub2 z!$@50F7BK6D)SkGWB&?0m!L8b;=#L~{bzu%(B8e_$TwKW4~~^+!cAefb^h>SVq$U{ zmV*z@X)+qkQKH@hd_(MH$lWNp^DtI9F_FBlj?L+V3(+$}#H67$GK>Q^`E5G{^a&}9 z)$|)X?r9bcq>R}TToPr#7@4m`%Jit zPaee{(<3m@RHu)&hTP4v+uU|=7rVq0g{1Es!6mt*-wpWlpO6vIblpR!FWb(Ro+voV%s`^jO zwAqUm)NSwmjL(;FR85D_vuA)!A*%KK@0^8Vu~`k>s3G_ZV!;SssTt4y&|3P4<^$w% zKv2cuk<3lrOL0z%{SAJbYQA2cA1z$Z{=2IS{+-V5Zl`&{F~Fnq1M!Qc$sWr^YuAw} zECJyjgjuzsN3LyFPoj_vA68iBo zt!s>*e^Avo+1Kb*SM2ZNpSLe4lq=?M@ji1&aY}m1;c9gYTi%&dN4Q5dGvAF{wj=^TfJmjNYCF3^DQOnOG`^60QQzcotPnrAyxg8B*ZoOcIT?QSMgR7k<1+zZSxFj zk)=<-vvBrZ#bTrpwoFxJ<Wijz-PK=BYnkQTJ!rQ|+ zn>P}#?VPabf*}&N=b~rLsyVG+?r&ZHpzZLa@O&xdq3gq7gJ}HU!MIsjSuvW)v9Z4L zcT~w$CLM9aaNX$ zH(NV9C(&+{3j4)*;gyQ2s%1~vm->2Jc>3w+=#a_1&xbjU9ui<%YmF(xYmOVRT${(% z#ALXvtW4OCu;=~$_6%?cM?Lp)aw3i10+@mRN(dm+LrplQG!*WXesY%zq&}Qy%+1S- z`S2ksIvN$j9Qq^CrB5<)03E;`0sSHr4NW~zjMOh){0Z$7eTpC-pBij$Xn}lSavm~2@RlkjElo92F?rw=meB_7NSmUN^hDJteDWruLOEC+ZJ=ZR>H{ad6 zGD$~PZ5UaOwbQ51I#a9!{=;w7hyJYc#_be;Pv`DEe~0Di>#Mi5x-ajMhmT2=BFpix z@!7_!&i{cgog)ybYD{uc^seQR*haXkrjzEnPe2id6mSQItMA{>2VdI8z;FmYCz$Z1 zI4~1Wh|5MCj89538r<~l^$4Aaz~DPCu^k9Rz;M(cysIw`Hu4e)$t}KeX4r z`+9UoG0vFM-enk$&5bCZ>}7G5waXwMURZTaM`zETJ!(m3mv11%jgC@8z!L5w#A2`{ zy*b^jMlQyVUOm!4LrPpH&7PIpe0?}aK_@aYlGMspYi@ROmoo@#^3^jdYmiTpwUUdj zB3-@q%lwnnYG)Jgl~3twZy2e3ot`dC3SBA@o$?c+Nd5dd>7Ref2IcIx>G2YO31!_5 z!A#bcA<`jiaOH@{%j?v`{*@&A1mOg!dyT#TW0otodkS+@)X0|ur-MeZyH8l?_ivS2 z9>v6=c00@y;Sx199San-JJh|#dOoL^rK(Emv7}K9q;p7qVe+>OYW{rSb{mVe{ylg1 zA^-tG<t?#>U1b(Xg+4{rkf? z;se;fI4{IuWo4zMrGEWHjHl`AUjwX~1ge5;G*DbzO~-QVYmM4I2GT>meYUew zm7g#8=$80@h6%Si_Swq*=o_V5I2DzaHU}W~^7ZSTZ$ymMsn|~0QZX2s*xekPIjQD{ zDIjvXg$KvV@YiBkk(>9*;Bc2{SB+R%^?SJ&zAA}tt*l7%)59iDeyx0S#5&@AiHS!^ zqW(MI7z$jJm6kRtf3xzBJ=2(j&_vaPB=*(PQhW9DNu(7oK=K>vHMHXlD;q+bf^y;1 z_ogP6lM>o@Uc`*9IYSZ=6;=DVGLYSARBLD4Ds%2qa&9V${cYg8)%(etqbHi&y01}e zU>5u!aDF8?L%0s%T3gl^pe3pjCht|BG;QA;oh!idPWJlGbt(#V^ z8!*E<<5mM;M92teI6F2hG2b-kco(0hq+!;p9DF`{a%7}7!{(E_`?oo!pI~&J7 zA>1D1;SmuLagorhUI~`DLT%^0r1%u#Wioi+&+J)^YI3iv`F}90EwAA2|K?QrZ+@wB zL=w$ScEB6BO}*URBO@Y4Mn~7JTjzW1P6&47B9GsT7oSi;k!BdTY&mw~ge8jd_m}&V zlauMwbXUGQWG(#}XFi;pOSpDMRdojml^rEm{j(`2FRwAIj_7bqYYLt?F$bNuMn7ry zvj_kZRBIdvf-6fs%iiYd)vF#aS3t|@iQDGE1pco0^=lO+CE@S@TtZ?%3xJc0%W=F| zXj(HKrxZaARAVfmxuL~b5AV(C(@<9S^!4=-?x?i1v{#WqE6WivNHv3gAQuP6x6Uz` zfSbyOo9gOjA$lJ@eAxZbBM{q+!jEWc3kV3n?T7sYI}PdjkvcfSu9}!=DJq%+1vA!D zt=z3`PtqcZjfI8wt=;sH)zmAi+qXxir)&Br4<9}}ts=O0FO{>OUqis=2NJ>2(cI4H z*HXzJUWtZsje1AtY@2Z|XFxK+H^#58-CPs}`XI^6%TL%k=0gRyy%wh@`fJdl=H9cX1BX3Iii!@Oo`&k20?QVt=9e#D=wl84 ztc!5cqFhgObTmbyXnlq{FV!Ul1v9T3Ve-9jH;jXSA!k7z*i2iImz%3w8hDU_`i;6i zyn|Nas^ajl03wrT$HJvWVw9ra9@=7e1& zf=}+k0cd>y3QyPU2$85_DUMCOleVnCboXv@w^sCH0;G`33sQ(wJg_Ng@sLDiM*Y(8 zW7U|oSM~MlZ>IVBZg&>nd2R_zzNn~(v}OH+m>450#*G`DK*}BM?Y3x9{@y)0I*JCy z6E3uWe;nGN8GY#R;m@T$%@q#%1+t%LJDf&!3;Fu)=e(=WSiS16ygCs9rn@I|`1Hry zBP;Lk+OmGOUv@$-X|du44UNagk4M7f>u#pCHP2_ZP&xrGz@%51o7;}K7--H+*S@Bx zSP5jAg(pf78aWIW^b!ATVo4@OMjG%11N`1h(|#looLcx11etOKOid`%ilnWrjpPpn zwGK=NVx!fxJeoljH;+EyI?wSOPPgo28Yf@H0|MqpFCFm41|;V8JP+fKPN?jr0Wvl9 zNr@ZLz?YBQauT!9lr20_^X>YPNJUi$8ubqnSzf(*Mtt*K{t zGxch049d6VH^Kka(*2W91z)PHtX$Aekxr9!RoG9$m8@EH478_9R-GoFG!oBYUV7pE z^5xMt#>Xf>B_zydt|3k)uU}6hoaAx}XbYqm)|qyErOniIntn0jn$XW22~0SfeTjCR zLU_fkqdshKoVzkri9+GGBHu*{P@v7&3F{MP~?@_v=(EYv;;r@rxxZy&p-Rv z1{=?0oBj}vm{?ibN5LT{CG}K_gL3VLnankuo80~Un4RHmJgWc5({pVZh&Yw}sQ7c$ zm}cM#+zjl(EG#U@7AK%Oy?ptyI@V+Pi+i-orzq_KB#_&{3(_nHRr4|@kU^jn$i^s7 z54W^5HfCs-z%KzPHVnfhtpZF&8?GWN6ne1B(Jo@a}Gl<`e>mQ)<8jfb(Vga!t zP7R`M0DTct5Ub!dM#ANaY5oeq6u7m2SBfv8W2?;|(LjZUO zyg_JezCPY=Rg!$Gg3J5-B&-cWjjF>K0-90qfVqKE+2dI%>mP(*Dx$02ynYQ?aL=w? zZw02_hxMAL?G3UYYcKGi-jM6@X=B;CRTKxfH-5gHbfW{}8)m|dj-|f-#!5|1x`PyM zWokMH>D6+maXT}!9hTg{pdZu+;AY#|o7pLP=zz^Gh2w8WP{V% zz@_&%dP#XQy!E}`{d*>*r~V+k4U{5_6yE38+$Dvy^nPG}MM^buar%?H8U8@RlW~%| zXDf<(BT1}*8!1-SLX`#|yo;nqnPZjy=H`#@r!PJZ3%h|vo@(lWKi8!t-hzqvT3yX3 z&XVsM7p&Z3oEmtKtVVibJJk63d3s^WA6TP18UI*9*2xzqOF-nqFZ#+S>2T8xoRVUC z?aRK0@|=5FKXHK6EpL8>5tp|V3Xe<_LRbOsO&A%u{o?z2|6`u_=kIWSDX6olic*4Y z7zyvz;Hnk!R4>+iH+;T*a4-|$17i)2oFcWix3~BAb2Ct@rt|(8^BRnK9kK=76~=!< z6xOo?5`XILe+L^|nce@LxaB{~8~@Fb5@2Z8J4HOb3YD~@(5t(v>wMTR$UAtN2XV|d zsek3;`kxR+LYp>iLQ#6*@BcwLT0>nO{i-ASmHVFc2(`RG79twz(9mcV+B-TB5of(v ziT13opI?e~)k>fmu~CXtVGpUL-fkm~jQelfxUn4D08YG_+1c&yIk>oxlawMAul3x1 z?6kpnC?fjQfQ?~uDkR_f`g+Lb>sQ~auBBXA=H$l|fqiIA_5*NxnvU1lTC%hb>2oo1 zGBP@`GFzdYKiW~qcHA_-xR^Ev4TWIE1BcBR-%H17$@3P*Z?q1j@5;E1oPVE+-qp=5 zr7Lg+Hb!^uGDHB@zjJY`{M-K z>FK$dntH7-@oRK^e9i|Z3VS#k4{C}nBOtkkLXs>A-%N4ULPgxp?2~XfjkA)r!{E=0 zklf%|e1+Mp=`RoCc9C{4OPh2)`SkVc+9LuhJ494*TkAR!0{pZ@+aVLLbia#dLjGbk z)=!!PA%`458nMf(JQv2`-q-zy%IOy1a(HY9JCjF&bn|7Z57IYCP)JyI?j-FJO6g77 z!gw;q(%jtslF~{V6wN2D0JY;sf8PPcC@!^62M$ow*40@I)>kz)?#vE6C8xc4oTZWe zE!Kekqx{O$yKSKqp*VT-W(4cQRMWws_X|;d9ow&%e$GG?9q}E2mU~L$;Hxap=g(Vy zGFfbBNI_7c^OSt4{7=T(oORZdbQMQ2P@HT#uA4s%kdBBp%bR#!Uw`YFcvXaenXc%b zXHp~{mH1s;TwTDh1utG4-nD_1H6kYO!FcNPT@2J>>Yu(gHDyjt-CxdT5yjU)OsMJT zG!t)@V|v`q4s7q}XX$Fwrb(|_#zi!>erIKydEB8DBhFM`mn1K#y=c2s^w5$Kp#S!oUp4&66MS1Vb&N2s>jcjb@&J9MXiHGf1N zud1p_vRwCPUvEaj)!6Yw7rhMhQ*T2Cv9Oq&oOBra+zkb_>~DP{Y5#_ii4*`4xZ8DM z$3p4f@p-3A$&K@HiC?$2p21nIvihnO>X5_|QqAl9__b!76P$fG?nF-}Wh@+6pd_6P z%Pn=Y&~nVuuR5To*Ny&6NYalL3a(EBF?uNt(}haAwpnb(+Q`9{YS*(}3vwUSY46^K z{r7-(l#;=pcke+~ivtg2BZ*9I2>tlg=&vu<9EqUCCr+gOEkNnp5pPNdhas(AE!ecP zws%V8?-f0iQ=t`Jev@=^LU_;qhi6j$-i6t26oE@8E?>S3XPQykae=KMNNP07D<7-W zMIdMi;Rpz+b}L0@)*UrR))3DNVZUI}PqNqkGo|#HQVT$<2|NNPu6tz3E+RIq>9*(^ z8m&_rgCGsy<*IRijd^1^PsJe%{sx9r zOaG(13Es16?KSCzff+(mxU?TvMel9iwS`{o`p^BM`-xL-Ce1u^nWKY$B2M;NR!H`b zk`iO2TM)}E07e|mURI$l8!)YCah27FaumjaPQe>uG}f$SKfeOriIp4rf2pPMG@=68 z=%(QG#=wA~`|0-X?wV{Tr%1*6`ZFu5zg=jpwZ6>xu(6nw=K5qKzw=@GuWuR{q?`Bt zBG!BBA9e7EC3HSReFFo++4uVOwr`!=w$lYoU5dUld#&8JA}(X>JPx130HONl)_3b5 zK^E(RF@U^|j2EUyzm1O#I-$LLQ+tAS8?2(@I6Vt%W0L-pm2rQ4biY<^yzJ)D7W;A9 z)7N;k?g(cxuU^wp;4@*1j&{FV$#{ z%Nf0L5q*9nBpVdVa~zx0vs~)yx!&+;oz^hAefzdY&7XBRySxrRI?-(3cYu3jR`qD# zUac$cvf>nnpx{^UUm+LYW<~`}D-D5EZJ{XKTYUDd=xcsqc{5W|QmXwr;gk;LB^U!> znJudqSSc0Txv?aIyu7?b;>2CWX{0^8z%*>Q1BwOu%`>DGUAtTh%4u;?X({v({gx~T zJmjzWipFekH@)O zgBchv5{T`kOE?qo3Ej*5^wE`NaavC)BDxQh1D<#ldHGGNC9G{u1($VqeJw1^p7?da zjh<}@PU_RU9T!((c!+897EuP`)_f2 zw@z-&w{l8CEx_rC+;GumJF+E2$es7R#O_A+f?qxI@KF1+0eA1oBgxX|X@Pw z$CHMgI}aPWoDU=T)eok#%R|0Sh_iRm5+{*b=%f=nyH4^@5Vii}-w}Tp)B9Jpe-6Lw zwfgG=&1+hJ@MUkHrx&*xQaA8fSz7|R_TC+tm{{%n@@2ydLHh?cOm^*I8FV;&WM`ga zZe1q|B|PDVxFEOpxs}Tv#Ye30AeK^e&p9~qA(w#eK1~$ZQM||_Z2DV@G-FBC*GFod z*gsIamgYhRd1JLnZf?O$HfQb9K}O!{^)9ewUXYP-e3~w?@{JRS!CQdl+whzeXiza3 zB9i?44jLNf<`Zyf7d~OWtg1Ta=0U&nSUeoPQWpwxYo2<$52}%_7Wwrwk565+6*V;6 z`0LEOa-1BX36~A3?w_PLbX!@c$ILC1veFM9KPDawa)XP@#BI`n>sZ%fk)DcYG3TIZe$rh+{kA5hTrM zl)-Q@*!X$;euKcuX4+FAj3$9hI3$bh6LThoPi(^^nYbfYJ73z>)8A!!z0YNra07tt zP7&)7`WhW0qeXnfL5;M^x}tTYMp2U#jhU_ET4#*u(k%0`yLIZ;|6CMWC5)+db8{mO zz#w?(Zw~*tEg&WVMrLL|99u5SGfGX7e8**$V127aljMkw{QMQ~)4P3zt%e|zkxs_M z2C1rfC~*f@wO7@QwC&x-Pe+;8`rD;mm1JRIfrpLu!d{Bq>B%O(Ie-#ku(iEiAAz>^jWO&aMru2%UbkOVKs5#&FlHx+@d@DJf~I zdtY`6ShX!QN<6}VIr+w|TgO3*(KHFuW?ddaNb+5Fw;cfel)D6PrCGV==dX4?AC^Ym z`UbX0JnRN0s@kkI{JOi@SxIYYBTcgG*WvP9dHp6KCgP-HwyXE{4QGZgzu53-929U@ z_MxA}#keY=YCyX@c;(}2(mmmKX4LtcLYpIwIn2RDkZwOa2B>xM;zi&rbgHZLAp==} z#p5?JGM*Q?Ao$qgPEb(Bd_2pHP6VLWe|p~*RW&to@tg!wIs( z(Be4JZ3_+#e*gacvl<2d@7y9Ec$kx7GjDItf!*ef>Vy@{VUBJ2{Dt`m7Lo|L zeQ+YSLc7JQcI({?^`jzT!wnRgV#k*FkM7vOdhc*Y>(Jf&)%K&?$4qf%i0DS7M{jjM z?ON01ahiBovF}CYD_3syT(b+$#7jLU$5I7LZQ##nDO6 z&L(4z{xT)2=8a0Zft4-s#~h; zzObBU51z~sGD&&d*QXEJHkaAE=(_93lwJxU&!V({EB$KQUHgn8c`_i+K%o9cc!{Iw z((@NDD$q(VE-oe;ilUQCcw%uX&o3y*Yee#})5fv!Ll&Fv-AkCFAw4|dO6JLX%dV2P zlT>a0kj*KlLt&(SC+VA1VWZC_IXV-z#_y6w2Y1td`MH5~bO(>tBmVGR3^{q5eYZ1E z$B$TSVD>!D*M5FgpwmW$aEhGI+julKB<(OB>Zzwatapc-?a1~ydY4Q}y3-+vTUJpt zHfmqH_t<<}>=x?PhZf!^D&&;1Wq&2*@Rw@0cRW3x&~<8Hek1Ahwb$P-X;21Cm9qNV za&e7}xSVuOtelf3v)b9Eets8Ax4-7CJhCcWjNL?~@nWlKN_eZn^9q$VKxW^T>dMN- zIu3@$rAZ$t4*jJ1k#|xY@D!>cgpe_|u0Mmm8sgogF*UfRADm0+fRdr1A(kDY7aR<* zFBETJqj;-$Ku1T%HGGk4GcWK`jK+lqZGiWR z-etM`si4FzhT7U%_-m^=U1>TicM{Hr&eO3m&!spZp^;vmzk!<5&!}TyFki^Oaq-2W zoD_zR^&`+A;E;peH0Z&qzapX)s0_~77&W>7^uHGN>-%?oxI<7loIXcE_eV$km6Gzc zg7fA*58ZXzy1n>nD041rwKuR>JM4F*qjGlFk<;`PeHI_@qUq(MmG6C8L~tEn$(ND8 zZr0GHlZCb|xyHroFsnC>bg9kEQfJLG%8GmF1)i}Kd@p;~ZLgs5gG&z|nTi}=qoibg zhmK3kvzR%-Vza$)Ym-uZLcxzucQ`3D(%V{SNTRyMr=V7LLZa*RBD7GPT%YE0Jf~_q$yUl562x<0O$!%D}<#HkNTu-sl3&KEA>qd?6nhnCv5Fg zXq1&zQnz_c+&$?$VEAE?y4xw!!G3G?HnB}ZL$&Et`)f!iIb1cVVi*};Z4+Bn5?hn;)^3uB~@Fc)`na>~`R^D&TT zOM&TJW>yxAg1*=YKn|)ffB;ja&jldU?cBKWnH_K)tgMpr@&W`vavD4l>N;ftT~&e=mR@2q+Rr$(|S+1KL;zGsg%#s`>eO zkH$-Q=mBBG4rgLvQGBmoQdDFwMCI*$3J2`|m+1&Z(At+&sCDv0-I-)$ZbukU9RSx0 z=x77K(v=|OLPY{#8XSBuFo33!`aByrGE3c&yhha>gM(^3={LEAOpmyLE0i*51L{d~ z9+Pyo7LNbp0)YKDc$Ga;RgJnGyud4gO~md(`_1s(2h!5g&=|uX+L!tv+5<_vTj{O8 zjP2N;UBX4k2SC@h5y(tP_%0KK`VGt_0G=qRub+Uw3Hj>k;?%V!3uL)G=4~c0UW1$h zEhr$TGOI|^GBPq^Vvh?)fxrSoWrPj0~?0f!GBJa`cha4m-a4ESIlK5#Xf%*O>W z&DaDRa-{+S1@H`L5F!4!4n7duX-i4ZWm$;ABO`$^ zKIHb{%KH|$1npI_8AC_ZLs54cD%=yrE|d6#`TjQg8V_N=$hVbv%;?=eaA%nii!+Z} z_=u%<4vv*E!zrYY{b*0cOxHKtH3J9`4cXtvB=68OhKHH<4{<&g9lE&L>ox-W!4`K`9~;7x&oO28*PEB<+tP(6139nPTux>Bv> zZ>*S`p$U`nk0Mo6x+ZWIZZMNDKmR>gB~g8Pc1|tYJaNiU?cEXSw73;vxX!RE01voR ztlKJkKm%19ZS`-xjLemnk3_5SkwT14=xMI8sHd#=by{k05Z6}%Jmy`m+4nDF8`9!_ z)>$}23+H$KLP?=laOao4q6v{hxUmx6SuZF-zyGPaUe=f@RAOc4)WR~v+i-Ec+s&)< zkC>`1M&vSZ0}#NlODm9gtHWBas^((FZ#j zy7QCCGx|k#&C^{l7mU7f62YOz5`5-7YZ4UlCPY6bul1pB=v^#=Xg+(soSYhKYN&~* zL-?(ie#diXS9^bV6(<*Mwm71gYYL}7mF4Jp!?;07_ZS9-U z3GXi>j!D@sWsXJaOeF@mb#&vhs1N|Y?*hFZ^a3Dur|DUqpz%NaA9Gg z2@gIN)+PiPFztj^@K)@6+Dr=C`x{QQId2QUPBWME+Y`0?@Xui0O}q`0|pgMo7bbsD_BgTs4+ zvMZpt-!@NiaX1LbnUJQuOOBwnf(wKpt^_kP%Kc9ilBks~?>=;mGOM;iiA6(83uORg zTKMXAzk`Q&^7pSm{U;FnLU53fc*2$H=i>v@V-|LHLjwbGaq)i@(o`q361h!nQb;!yN4v)>u0=Fl*si~>q z`!KVbn9v6nhd!5{o*u4t8v=M8Ig-AA0ihznRx0d{yfaPrb`Ev<8dNfqjjO=XS2j3dfL8yUh z1;7IE?@3lKhKeSlMxN@!t5>f;fOtbqPj9ZLx3aMz3g!oZpF>Ud>+z!p58%fS6i!n> zmvrm-GF&F8``!}>u@D*4fUAk9GAP59GQj!H4+oMmz8glvZwm_{?1A|Ryz6T)g@9|Y z|2SjoJFQZzODp9k1g=*A3eL`+0C)Z#hf+pP`;|wfzp+JWp!9Bf&PhkiSG#;qwZZ#~Rs(=pbzyJQ*8*SAp z*3FgjpLF}h$I7|}S{b+(5o|yR0%VmJCfvhSa(BkJk8eL!V7vmg13rCVX+b3l)N6-s zg&?T;pky2!9fb;1mxBZ(L>GFk{y0c&pcsG|K2(>$4}e$>CpIJ`B!Uufr(5X;Q2es8 zsQmpepgjU9^z!a^;6c)Xu@I<7H#avUWD0IC6lM$Af1FBm-@{A}U>`UaP`U1wvQJbz z`3yu24}kt4Rl&Q2ah!Ih0IQHt{irjfcaYiu1moQBXLot|FMKW_ikSijq1oBy0#{RJ zsQsYLgqi{xPayMv8spDx;=7s3Zwdt@?8y7JMl8Iuk1n3O3tF3qyzg-3-_GoBsn6ux z#-0*_6!OO{a;gbItl<0~YP7Mo4amh~>T(IgL85JS1I!h8a*Ns*Da zQBDTj?wQ~qR#Xa`s8rq}7DtfE&7wo_vJ3^%wJJcaLcos| zFscL4(8CH!sH{~KT;lfvgH=s}9$Wql;&UUYt>b&a)#TZ&l0R4JIO92#gZ8Ga`C29a zjfRNHgTC0C&~%YW*U)IJ-z~v+qmR+Z$(IhWV7m7S)5yqb@|kL5M1=&z;~ZzGIxAhK zYf_R zK4baCnuDt=jn?hC!rapqaSXHhvn}eVUZWNQ{!yR*7?l%3v?9rEm;JEWG<*J}yK|D= z`Q7;id#^PCFEHaD&Y13YLc3OEP{1s~N(h~X7?&h+QR`%d?S7d~t z;ee+5inyXx9A#wN)&7;Y=re3KwvA)VdlmbK+9QsmVyGw>a`=DdBHnf_T+O_qdr2vH z(CIHq{R+v_s0DR%)91i9Bg<;d5!JgdLB`@+WOaR4*&)FUxnRDW7uor7zs9qN#-WD4 zWh?sLjZ%mpJrP-%P!koi{(ZPHO1#$^yD?4>J#s-NmQ>;-RSSqodqvP z7TkZzb0}iSymL&-!bY6n=@_jT3utQYZy1H)_BB!{I!Bt}FW~K;bn>#l(eIiB;4Crw z<%S>vklW$hhXxUaD*OKJi}H-nCIHN^95Z7a(2TOMBmdylQYZGu^Y}`#c&)RkdmBeS z>)ZotP_nU1br1e)?lt6 z{^Hyds=ga8$M~R7WTb$Z?p{*THPxs{WoB>;otp!S>5CUHaPb9LSXoWm0)CE^7KRwz zGbsB303LuGFbBr9tAU9IP|YIl^2!(JoX(ay0PXeZ*ftPkfShc3IRz6 zAoqTL=a4eMRtdK^GAv9IItA=-D`+%84QXF0`#rC^M#+rFYWVG^$uD2HBqduaD|Z1V z0XYfPt3F6-Zn8L4ij5e+vH>9k#Bt!uHJZ3MI%Z{M-FkaW- ztukfKCdhJSMFw(pHwG@$-NmJ)AbEh-%FM`sy65`0 z0wLzeJam^Fyb4yKOWAwSAnoo}>TparK8Jyc36Hmz7XalDoFN|!4-dm2CL&%#AhR5r z97yWVJw5f`>w7P7YinpU`y5z3fBrEy*A(g?TvjAefB^sofo$j6fHMWD2*$6XqN4ZA zT12_Hun{Da)pZJ%jOwm33N}_&V;kf;Jn2sWl*-8P@$*ARN9Q7mf!rIdsB3DP3pLRH z(LccR2e1NIh6ZLdFkG18_-|%$N|iIrECqmL)O<}NZ|*JFR0U2R*1>SpqqvDWj0V)>Tm$A?$Ut1_51gI zx5W;4onNhb+!085al=X)>EeI5?5XT`SKX7HQDy)}oDqq{# zB4l;!0KQ4`M7#e9a}yguZjEGdrbYRL!$_I;)%5klex$`{_ED{n z7D*D44w*M?*BSPR3_e0DetoWnnIaU*GIa!9^Eyy8=n_p;%L$VHD#6fye zT8x(5v7t5|y?g*ofP}?2yLV;1TdCExUCm`s(Y)F&m|8NgTci&j!ZCROS(i;34>47_ ze6eToH2kAEMAwxr#`Kk&G+D0RC53lo-Nf3HrKjdv7TdN)MvFITZwl#49)u`{p32C( zVFpUa36@&eC7jMjmQm;VAscg>WyMA(6gFYs_>n1vW8!rlmAW0WO}@a>(~~@;@u($i zBu|L%Z9yCO((}_E63}2aL2Qkk$dPypA9ZJL!9(kDbj)z0W2DQ=gBARNVfpW7M%VYl zhiELmYS!87wv{TSan1Tz@gS|K`7-t!7oD$r>X&N8N2ZF+>A^tKp6;@EH- zG7pRM9LmCakK@|<%B>y1Ul3TufTr9z_WRza3Er8Q`M zN&GOPO8e^ZSv4W_%|Gs1?gjIG8EWmp}><=MtOxEY+RuQ(8+@bQh_bkuQCu8jxjYWqlN6tJeK`l_8%Q9 z$nM{NUR(^dc&KVvuB)zuCMFb>m9ML~ z7+YFKwF)#eT0w#Xsd2lMI!``X{h{6~VfunfJXtWrgF=4W4sg@ozh~y=mY1zEvbuhv zHHsw;?jO}b3{VO;YJv451ei<9IGE}lwP05VRuD^l7nmjuZkC|S#Fd%fR(~ZM+;%lm zCvu_!H?hYGA_XsfPyf;~c+)S>9?XwoS@(B;MyWLI9o_-AoLM#JBMQlXBs4VeJmtSY zh7QNz)+8BPCK&GCy1ryjSL#F=ns;yT6JQ@4rwJ%u@KJb}kB<*LQ)l%mPtoT6>Xx$efw+9o zewXmn8fIz{{8kke6|F5Ts~*0PF^wzQo^KnpEw$4D8cX!=;^N}$4AdH8z@tX=SY>3d zlaoOUb9!R8vlTN%##nTRr`Kwe2tg3N^d~OIxx(O%h&K);ww_?X4Xw)>V6&9V?f}U1 zw$DAU&|wKx)d${KM#fpTHN|}jDgS~uh?`fk&yROpqm1GX5(=i6A&jId{)th2@Id(J z3aT1MaI=A?+VavE71ins@x}Syw=gHA;GvoL@dHXlZhron>%SpOZ+ABM-Q0Wsnb(|Hudes@ z^vwO7Dx7b?$`Ga&plrJ=mu_ut?EXsan93!6TO>n=a`UT(UYP0{>DYU8d>rv+=}XgR zkt4sJK4jTahNvD<|B^tt_aq3%RD^{sHsUj{KDAZ?Dq_q3&ex)2?ucTXkp5vhCPLjC zk*=JjIaMa~;J*SJ&#OO27BHKr@gqbfroNZx78KlsF15dDR=G+Lm!di!rfg!VcQU-O ze#(j}PvOTxW$Vxo>wisyi1WumeEG8FB*>Q|iRO(STsyWPL^&LZLoT5nQ((|NUUyCO za*4*}Y~Y;mg2y~89PRuBBlxZ%!a~;QNgL<0YdS5zF{)xgbacpp4`MBr>8r=SaPL@(*+8Nl*xb6~#iV44L+?W;FG{l9l`m3dxW ze11|}H}1bD6_Plj@D5#PGm3KYnx!gA=-K>Ct-eQf*dFNeKVG)H)P{@4MQ+G~6Z+z+V zutkF9_-DUKhIs-GQFvSB)oU%ZrLS`qb(Z6;9ltm|nLZF?bIi&A3C z(7mqZO}Wugvdw;P+SSxqZ<|w4SnLpA)+PuiBxTBJ<6k3Lm5YfTtclz+f@iC$O;KIo z1W)p;dQrJQe!WyG;PcA^V$=i2r)Yd!$>i6;?>SLWE)SHndM)j~Gdn4=`TP7`Vh=bs zq@tGVV|JSUm>uu+T7CW|iuANlw{}y>_(`Kb&jsH8FovhMHQ(Z9@7BjN3}n&fYD;FB zKSWo|DS`r_`-H(rl*DgcJ)?Acrw=$tI3Bh9!*g{mZqD9D^YK0G=DFV<5*;&Pqt9g^ z*c8$fPo3D)U-@kc0q$MC$>7u$JDiK3tk<$llJBajYF zkYSR-y+O2HrkOU^d@OFJes(8~#||vLG&JrzujZa^?DcnrYD?R#v*Hq(=*XC zaqc>OdZA@Qf_vQZm*|&ic|$%*yYdMG{jXKdFVfde^&WFph#YY}I$gAY91q$nFflCy@*RK$?99vxc9tP5)eA=g z+EOn+3IEK@&egWn0;ls8E}O8f5Egeb^pHm_^9Q9 z#M<1arN4(-^~Sr8zqGw{z83r?#Uz&R6GKR7QFKql zndDiOuG$2}+%KddZlgeY*VeE8^ncG%8XD+;5D3-cjh&O_T>R{T>#JGBwb$j83aI^{ zgamc45MbbXsp@PgH5Yc1(xHv$sWJK92UnlZ&XHKCv|s3POrO&|wC3Rayd5=!e28IxP zg3KO572Gru$cw8uQMts2*_`NtxC~Ng{QRABp}*MLQ;=%P>67n#@(zCZ zVn&CTZ?ypn@$VlZg)Nqsm$UEdlY3dk4+e}@q9aO|6TkMx{QJx5*OV!oJ;LWHzP07< zMYh2En%resuY%jiIHnHA@dHxCmg*0E<`hC29Lxg$341wZ)trYD_Q7LB#!sT6NaEsN z-yaj7wOCwq9aw8Q7oVIoX+w=lxl?aE1_C%z#FM59V)kh5VwOUF^30qhbiQpZ_dQ)b zoevo=$YqzF|2@RSv07sWIeWp*O4~)_y4jbnF7*VeZR(dwb5du^)NkJcefP(MoUaU~ z8a5=dFKC1F+s|GrT9!8~s4D+K_xbP6ytGN1;tN`SJ=C|eA5mV0*qK`>-}@*`BNOq8 zn?6w9f`FWtA*If*!d{Z^f+u7Q|5DwQJEe60++4B0Ni;TJ+8258fW4-kg*W}gt1q4$ zSHg=VHy*=)NILmCFrHSf;`fiK(TrzXsK(rj>&wOL{@>pdYO5Z3FOV8@I_2gA zQ|8SKDr_&(?S)>a5)llD$Skj-@-Z9WY_+zJEPPieGiu1V4EP``?*#^_|KAtjLlgz8 z?(XL+PwyHZtW%jN`A)n1k-45?nl5Sr4v2- zCN?%2Z)=RQf-CPHpbg*Y?;lJyzfQAoWM2Pn z<@hBBAL}w7A;`=0LBB`)dz@FczQkN44OZ7I)$wMC=f-%<`@QUxgPJIk zzSF{DiRs{{DeQiV0+DgBJj%ly1aPa<)sK4z)IN!wO(b1WGb9%yYSE+QR4TI%FRA|6; z3uQ$b2HkQ8qwP24vfi>a$(J6wi&TIYon|u8h7P|pZ=>&4~ zRJI)Bd3hZj?MJS^=YTr-`W!d^>XBq~x60q?l#DEFF9&)Sf@ARie3eksG>}L?N}h2pghi;&?lBoHf?V7 z=KS$PRP6BhRPumiG=)MUj}(9ZN~LV6lPr^NQEy-R1ki?X)-CU44DFU_b-} zx(Zl$q^b&wGH6)A3-4P80v$!7&F-sHMpkkjmjO}mN8!Do9`jGwOE0XD#eO2jZTW2P z6|+kIqgem`w?pCVUrHX=)_0VJ7YcTOLB&z4D8N6*V6MLOB^RfybZKaO)*5_i5=`k0 zA@P(P$(7ERq%lWZdU?b(juyjhL&OtB9^KrOyU70fy5rUPej(H5HgS}yyq^M-5mxT* zC85Qts*S)hFH>d;Yhu9lJy}_X%1WE3$;qh70rOR8`^ViqVR1qsnZ>D1EPLM1yT@YY zXlgWgu{K$2yNB@v1=VB18f1|jewiX{Z3k>{NM%y)kFW?9bjrH&{wA~H=265|HY^H9 zyE0w^*MH#%oF?7%Cy!COBp8ITs%ug0{|nC+MrP>wp^IYd(l_62N@{8v_6ciuySuBqk*TNgoq@?7v8RKz;+VorznP+|By}Zvk@9iLk*Eh} zL7#((OTHRAK}v+3I!)v1I?DI{8Ow5E9m;?7m6fVL5+eMij{Ih3V2Dr=VHt5}(@b^n z(?*^pw|=Zat~l2IhH-fXCwH#}gJb=f!+$rgoRTKP72=(u^|A4g?Efn$C!sg*d|>hw zmD&I8cv4neaq1gVVCB5AyGAar7{%2wkNisDKZdIO7~5@$rX#CA=_?x6Q%`?gT^wN6 z;f!J{-z_PcN<3mv3O_pH^DCeJNt~AQ9?rwXDO36((1XO?j|DoUPT%T}{ZxC%xJxHr zEO7Z59i4-#XE3vafzMH!1^YypHpW$-Rp~N>dQ?b^Pm+W|fkGVdIzhK3;KX$igSYRx`Z22NcCQP&M zYn7FQT9*=zAK3zP;#RSu=7Yyx^c3$|9sDqsA*I*&;mZ0_I=uJsJGhMHNbPE_*>$*A zv9a@Xfll)W6z*+-mw^Po5cpC>mud^} zb+mrP!YXWwPa~7>>S>`0O8RYU+aq8{SHbQ1d}*84HbNuX(@Wslrw$*PJsY>g{`}S5 z#v-=ypM!BmT`CrQBNI^-6&X0=;~RfKB-2TM6`0s$Y*n)V*Y^B8Y~SuDw(Oa?fmF=< ztQZfjFIZTl<>q6LboKTR)(W*Tu^{^mtUCJ=yV7_1@tVd*W4(Ii`J9mC-p22+SPGnb zW=w3W3hhYFew6I&l_n*h(EQpJa0wqrQ&xD17@q;t*v<3D!4@Tu?b9qU1Qv7mM=6}-9K4lyfp;rY#A^m|1 z>Y(4lcYwdd8HT`S`&XDxY-IP)|x~lIICmtEP?;{K*1J2Q)GCR}~mw>#>d!6)_DY)?vb+pk{ zB=3h14_$Hm^;9|D0)29r4w%wDwVb9eR+B1z2bjws42qT3lJQ8m)6tTpD`rLPHI zi4hmRuUaWQrKGXBmS6Q~yNI|nu@Rv2C$cy!%MrD;K@va`qAL0RrYa6KRJ=g84cM`F&QPB(ZVd3okBl+&m zhK0KYlZ*;I``T*f9cIiG4AFO3euW(6DCr zmozx=mGp_w%kC^cjc-w(EdQuE-9bU|@r&`EoD8lp>V8A_GNfRvXN;DT%gOLjcw@LK zBNiV!<(`HszIw66`=UVVt-EUYR(2BQk+0t9C!4F&0>Y$#!dPHc6eV0%WnbONusfmS zU-V~dtH+{Yusif2B$m;s$l~HR8ZHix-1)J?7vntY_$i_?nckG!r{V_d*GkM8RjO^> z}zG9W}w#6tBxUCLu`sAH815 z@*m{B_i8gKLtl$av)ze%RLRBuAQa-+nQ__wslR3awZSj3!tr$&xo|;6o{Gzwo6%t_ zvBZ3Fi&XQ+kU1uU?4^2@lz&ZBr5&*P#=NKi)>wB@9=wB+{~)O=WUrWUjf$xrZ@^4CPZ zo3gTIWMB?@ED*wmyN)!!G za8lpbBg*AQJ+pWdkTtVYzYJ01XF4nvN~|rer+Aq+YOQ##cCW4|&&ictik6*y$wk6H zrc*AhvSU0p&xH?9f#Dk2PbxMYlysdPy@&7g-_BGXF(k4nFU~!-5m*XnG-J>=OsG_1 zTlwmP^$ zjR@Gcw;yip+?Kh&|9?;%rx13Wum&y(kT7p|;{yF1SbLgkYCk~*osvSlK3u)2L>Cym zK6$up_EEkDe8Rv!W1y)C_)5PQz7ae1V&een1{BN^&CI{Bry>Bzf?#+I#Qwsj81AZr zv%=*}zw3M7%ZbZgl^Hg;az+Mr&*w#;)A_%aX$g+zlx`Ce{`)&WQzCmOKYC5$Nw$$4 zdrzGeme^0D*~HY&l?0C^n!CH?^f$8vmjz1;OB`+}MbRHf2eF{EDde0NNokzN&@x1t>Mnfa!8)`Rc zNzv1r`r1a?kG>);(r{%-&Pr0dviMzM#-NbM;UB*-y~iJ^K4#_0{GIwvNf}biTDq(% zlD2^I=|x8ef|+UmnE8TAW(PHj>Xl3IcqxKOd1+NqJ$YIqM-YFpDqqhqHXQBQ@6n%w zzYRD35R_F4mLlD|ml24uvlCj{aOk4Xb#f_hz~-_$u)4a2_2%x5mwNl!24K!;|CW=B zSzWCpgDbeP(LR1$)*It$H_mZYJYw+_Ke91$4k=@k`TTW{mcPsO9DIkKsUuc&BcB`@ECm?zpz zcBcKDNT|Eip@1QGui#w;p@7GfWn4u4#$IElRMI=jR2wXQmuyuhZwIOW9AheIpC4Pj zQR|8kTA{RkDgIj7nVujjU*?}L&Do=)1TFReCZe&FlN!oo>-x}#btBR&Yt`t5l!`=g zqLjk_LitBK6S2E}xaGNYQ7jz7HKm8Tt$Yh3Q`PU?vv(jBndwGh_`Wf>xznr99h?>% zp!_u&HJSA|L@P9)ukcsVsm2S|4B8b1-?NXZp@a_EM`?7QJ}R-5eloZe%Q4#Av2@c3 z!0$Wo7~GH-c=)R$vD+V^lgTH_F~ht;R5+{D#Ic#XBa%K(L6=-Ozu;3|Uq6+eiPx7y zc`jdIMEQw?^26l)01|2(iiU#1#^S9=8?SZi?&%f z?<$oG2rg#3!fnC=beGjQj3=D=$+X{pKtn{GhumY zp{dz3TW=f?fOe90*09_~O)fz)Mw^s3;U{%nssJvbq3XL|Hwlb!&#vb}T zuI<7+S-!(w4u;M%a7zYH{{LX1F(|IpG&G*Tat+Yv;?YY>g2w$$aq<^zCA(W#8qDJi zVTlS%5y0y*-OO(j7BJ`RaMdYn%oIAj}X!xl2oV~#V%gIUloJ2;(qt^Ejw}4$TAnNJq{sG2vPR3jLPuEI+ z_Vr&McGXE9hZ!yG{R_u1w6N-0WS}-*-EJqM3{T+QbSU?aizW97pe-p0IAo>H>+Pn_ zjN8di&J2<7JhwwK3ks)i@1D(WULTtnuuiC5@h{Qr{+b=(Drr345QF@AY>a4+kB3QnnF@THQdnEMh9;FCczvFRgI{a>@~9c_jsot;GkU*~&UKJ9Hd5W2rVju$+r zhlRtmP8BpCSInB0wrUfr8=742zd$E0p~!eTZ)TZGAxP#*FEIA+#0dv|H>&r|`k^)- z1%(8igai=}_MKM!$6Mq79i7JZE`%Q<%lKTp8OLIdR_^Zd-`HGQlNphoxopC>I5PV4M^%#`AS^|Yme#73C*`0k_MLt})2j+s%n_agOr#L`{d>6u z4F8#Hpr*CUaFIGVSe>b25UQmSfBTjd@LuyJOZ4Zu{=T%g+skcnlqhb0F#V*|SuHJv zM3PdGqFNgc`^L5jlk6{48!!RT_)TUa$~!rqb>PddrlzBC!O;Ma$o{5CT{Y=N4 zl%#_InSauaE|7bS%v<5=K|a?#1m7cKIxW4zjj^Skm_C}+nBTU{q}-I}+*y0z zot|i`7_ZE>{)e)&&d1k!B<00}IgZMDDd4;)NNgoRF){{-{!wv5_85nBU|a|i2{sL_gCmKI9X}2+ve${{^JstWBxaPp z@s=TOK8i3w)tPB}qF`0Ad*34G=Q&7ho<>nBZ0+}89UrLbmL+B*-(~}BPPz8uC9PGe9fwdRFA#00Y-|~|c6_Uj*ED){N=*Q!Mf)#IcV$4yQ3VTM0 z#z-peD^-!dj_m!WSh^~6zVxL2#pkg?-Om~+rAOZsES_v3sd20?2J5Ol&ZJoBZhlxh zh5PE;QuCFE*l#k=`h^Tim*_NuMK8v42)Wn71-Ba`Zpvo&gP4H0>mtT*luw{utSfGG z+`c+>lOkWN3q~T~-C_rxr=UWl5pl_BJlaR#<0sr&5yI{opyI-6lyUIS0;7VpzwO<+ zpakMJs$X!rH?!M+rK_%A@Y^|_{oUqs~e z=qLwPue!N`arsBk%gD-hfW>|*P^m#*4;m%yT=5@3=q`iJ6cyhlzc_+J1#pVDz;}=y zj3VLxy4~;yY;o|c0FpcC8W+Go2KdMzN8<;zHY_kahHXk_;KU2M8edyBJFSDS-+O3N zre8Fg(2@-8v30yfA3kqj8MCxH;~$UdzBc| zPd|P_7I+!r^q5BK9@%?tZWGb|%@nIwMo;c~jiIAQNyr7#{U+;DNzChOlhQZf6IE?b zi}Td#jV84Wk|}1Tz`?=uIA?jvIw+(nseMccf`ftm0Qes(&WA?XSkp;tRkT3 z>9LQeXO();pxYm}_Qye2ui- zZE?0}8IrH`35pIBD{^ru_BKa71eiR#yI_G&xS#?zHO8Gd@$QJDng%O#ZuIE%G%SK^ zp<~w`sp_3f`=YL^7nPv$RNfi|?Oz<7f*pIKn2-p+FBeZGjhLnx4_JBmK74ZbXpop4 zx~&j%PVT2Cx%NV5XCABGV|t?*gkf(7U#^?J3x`bsi2+ zY);QuxePBUyiRwYqS1WdYV`cAGC+BabpxwIQA%vgQn`Nxy1EYbfrBdp4OGKxz>SB8 zNz6(=rAUXGESKjaIx?XT!m;fs@zxuLiIIvo2)%L9zHwQe2|)i1op z=X#d?b-0oS#r(72va((pf6M!IbW&@>DL^@S-_t8GsqbH1L!+LN>?6{ey6Re@Dc#3| zIldVgI%ZmOZG6nrGfDXxW<0&UnmMWpD87vYHlRZ?`(9IM|55%ga|jcLzkK(oqgWtG z@v<^!$TCyFxst&a zTYyrkwW!p+6?BW6yT5gjOGmov$G4{Ml*WM&xCx}>13z4AP+6}A=mqI1`!OWyG%?qwN`sBRMbEcPmL)vglBh| z#l&P|XKA0Y{J*gAg7Pq@#|{2`RxC4tffuAsE00S(C`*^^9Ra}4$gF|gFtqnfq@ElPgB4A}77OdxjC^TvNwIQ}5hsHEDdkV&ajI7#q@}_2^PsoilD(rx014zIr zD(c}T1JJ!`ysnx%;h-TFJ*Ux2_+%ypUVwc2-4CJriMe3bM9dDSOG#{rd4FHgdbq8% z{$uqS-72%b5LZ~AKz<+wKuVarS=rdXIPn=u)=CchHzPJe!~aS6a^{H2x|Pe>=k z5bn^N-+TYZ1xQJ@nw!JKEUJ4MOAeF`aoX_Evsj4ozSv$TLL!7gNYvC1x7h{Z`Qq>W zMO4Acej;LC_55 zGF}O20~1FSkeT!A9lR6fwUPiG%VMGj2i@t+`&Yv8hio3+z1s7O^vJhne%S1 z1sIHbd3lwngGb@`ts_zb{oeNW_Rfyfe>3%PIXtl05UeSn5lkf|oe2#fT50rqJmU{$ z>)@l53I3d5Gy(3*VEOYDd{uPJtF$$q?(ufrEKI39zN8eQ)pd zuWPiAQYWSgr?RuMxNssmn~c?Vc|2kRIqGIt6IarkGl|(4arMNpzF!x8S!_CpFT~y>-Nm9lc}kR zgHyRmrD}cIS2tNFyMVv2CGKu|v_yP3T5fJmTwzVf!@_|4?f5M^$PHe*t69Te&tu53oTBrY>S(tHavpjv_b1Lgo!v7aX2OUqK+iK{;<6tHY! zv#-3X==}Bat$d{!+r6AoYqbY#yy<_OAypo(&r$N2_x;LUBrq{Sb=Nb9$(HCk|Nj}zWO^czyAy?E{;Jgx`hVJLF=uS70=HH zzN~ina&b}6urQ;zy2^LI<6sww{Hx3Mpl>|8J96L)10UuM0_wwW1x;BQgZ38TkCV1{4%e&&13SO)qPkUh_9?c_0VXVv)k7H-G#8XnV`3Dx)rH)J6qK z2?0St8V)HasdxZIq`SMj8x?_rlz^0!v`7g^w+M)ov~=g8yZILKzVEo>j{Ea|=ZE76 z=R7>I_u6aCHTRsN;nBbk>@gKZj!)Hxjy1Rb?0Va&$3Xx2UdWCk>>KY4O`zzKee80{ zLI@jMo%J45_zU(&k95sm{t#)8ODc=4t1GJJxmMnqAI3E(Kw#>SH}<^y z%4lj9!$+AGcI5s(z}ouWqbo(EF@e)raY6w(cdrSEh`cqq8hkgY!mn)eL-YLNgNHti z*Pi|r5euFn17tMLyZExoz%?$G=Qsn4Mc+h;{Iv73UshU#lCDCK^Z^{1fd z;dCH1G*lN){r>LrJ^m^-g{k@HM_1e9ld6M1uyx5kuYMoE)X7fHC+sPBa?#l(r90H# z2Yq!IFCZ2J14r69KAPe0U=sJ#@EzNN7~W-H9;B;v%{njP4CqcIJhr1f-TK#BIW7G# za!J^-$xGHAmNx7cgC{CUidoQHImz-o_A-M7S6M{`guAe@2O3ZEM7_LDK`a_JBSRtm z0N0XF-{z>Pb7;s7M63VVCn4Y<(7Be1W_y~WczJnw`5B=x`1qcwC&4yXU*Bu@*x1-O zIJ!WmDXSND#%}HG%+Ab2L`MFev4u^*;I9g`>Fn$*Y($2IJNW+bXxsg$mQ}E>1k>Lh z&^v-H%Y%altJSaGgANz$zXO*%DJd!MI%%-O=KX+1u3X{Jp-MMxk1xAFe6<$TsK7Q9J`VeqL1mtYhi4VO zzPfq}zPR9J)BB(o|Fr*I(5;aX;i`q{z!ZXQhaYduD; zy5;QLHN?c<+K$-6kk*oV(fKMtGw5-l;S{>*#6x&uX*Re@z`%g#W9@5+#%#Wr#{thU zUIdEe8185w1BDTEtbWvf3(G|y8AF0KIc~toDIsA=|HaSG-nK$hN*i~Rcyd8RlH1Ko z$N9HUi1vEbBi^-kMVpi4Ldnt0*~6bjN;4n_BLmzEb+@Sdy>%q`51`#-dI zV5g3p*_~KyOegk{7;L+1X2!_)E=Sw>hqDXttdw9^{BQ*prIw(3$RMdiZy=dp&@F#| zo=Tj?4=po1zBV7!8=OrELzcnGjj+K>VZrF!AU0tn} zSNY2lsw(!^`QE-AIG}KJbl+h=+||)*p$?JNY<#2TA2`|O^vC#1-c(G6@fWd_`&<%h z#aR>v3${I`_Pma#tt*L^k9IBw`PSa!<@~wF`3{qg*D@G3*Y!)t=KcAEK@6xnQpiQ>;HqzxQ%a-^O_TwJS+%L!rvSp}#1t4Oef>GI_BZ3yxUosVSR-&CoJ z^rH|RYNBS!%(-uPGepQggSe{7{Pg3Lm{scc5}c6ShWc>ggruAxlnk?SUh}wqCpNuor|wL1LlkV=LzA z@P}ffe>KhR|JF384{gY5PyPISd>Y0s?Iq2l(Q;t?RY1U5agfa(JY7K9df zfr<`9HNX+UO4o+`tMW-l5GWnNcKhVYO7$e^D;O9ct^<)v`t5O;M0!mPCEJ}1-V?GZ z04E`Z2Pr7hv+XcMpr&~HwsDav0GuFl2=+0h<>eqa-~h2>Kz5nx0;nW{)+2awnVXp0 zp`Z}d{fY8=putXp1z!YtAE+VVXiz<{vao>uCfGB<8G*}+m^x6IJc&;S$*5th@i)!NAk>bM$E&IK41WSRCbbaZt&fS~pH^JgeEVcmd@ zJxM#!F)eGu3e@g=LqhNGJ&0<7B@A`|!__5( zhuf` z?hcPYODjpK@=aQS7s_I?^DOU`+_XRyD zDq#piS@`5k9aI5)ZlA!CzH3c?9q!PX2ZNQB)QcBoGntF?^QD#}ypRHb@H{Mf>wRMd z`A-pZvzIh(`UroWhJDfnAj;7pbXxiS`&MI1dis^CDe|FFOs0W257iy@)(v%ale-rz zKO4(Eq^ACZGx{Za-^o=3kC5=8-K+!-4vu%-2hbQKCf;S81}oDDaWG1TGpW*CotxtU zxh2@R0|+_j!B~t|?vJSmsq3|)LT?lO8GK)MHJ@{Rd#aT|#z%)fPDA;7bgeco@1D!t zt<%##3WxhTue)@=s|JZ~{$|R1=j#jFEt_D724h-Q4E8$@4Gp2~%VGY3CD-hIsgoKS z(0Kz8K0I6wkOt7rSd+K6wIv1Paga6(mba|$pcJE%4I?4{S??Xqpgbh4)wFN-z2t>> z35;2ZSN^Efyr?BJ60Five4=114vq)BtBuP$J9RAqEwJa8M8LOVE?IhQfG+u6f;TlI z;jL{fY}g~stoogvj!x|RH03+lOm(nWhn?g!jYnV>#dGkpBk@Hz0N`DO(6zYa-FfSS zM^K+4fcAuh24G#MuC6XCyOHOEFHX3#!}W0F+X|T;bOO%o0lWx5RG;wV9S~oR z;XJ*{{#FwJg_oem1(?L<+8PNtIcQRY6lbR~7s#!tmRcuORUQ9{g$E9zXXiq$kaB`Q z7s&QNqWMZf0@5)+U_cfpFK>0t5rPurx#fo8V<2GL z^XCs(`%#jTnnHF6{xJD;Fb}r2j>vEpPnp2j=ghN9PEOCzkO&Wt-Rs1CK^6S@)YXS! z!a>Rhe-h-?z}R?ceEbh=0UsSb0cE)aewTuroLdk#w7_%aOdQ>Vjg@s302k0CdJSTi zppp!F+p4Onpk)IpMi>|vaK*Q-c^4LLvK(nZ7Hnby3Fv2U?|PV(pwdl7N(zd&qQT?~ zXtbJ^R$i~^jI9eq03ZYO?1P3Fh>d}A4+!UgFr7AAPhX#SKZot~uQRi`_PEDhEtgsWDV<2>Q8iVaK z=nVNqe})r*Z)MihjDdm?r0<6Ft-=>T>VcG$)YZ)mH0`ZHVGZ)&CrQ$QU>y&ACy1q{ zMXmsl0XzlGG8iTxQ3UTtMn*uX2OJwgj~k?b$*AsvTn;=wh+h!hz(W!IJi!qcr2D{K zCY8a-8I0hd*)=dY2+5{f0aPho8nM<@PrUixn|b{X3epnycvw^Ws}qS~sg!U8FtgDLugNzO7vW<+7@!B>}UgM>cR3l{GIPicgU_o0Mr<^y@%d+%z;$VE_5u3{E=P<-HFAPBc>-S zd=`RnHBwR`;%VSt0;>{8c@7($?5$66 za&VmVzO2B;9oPBw^mUCS?zrErD|=bk7f)38lrB;VW5kd#bgCKAV zp~JOGGffsmjn4uSA_{Iw;vnmOp1i#L$H>S}5}*w8859fQag~j&35}7R-dd4=k22>T z+HEXT_qx8uHhr&Fi|+bb{k2O+3Vs*7Sm3`7*Vp@U|jh3BDsV+e)0UR?}aPQ=kH$Mzi^S_{H^x&+6RvP{@n|eqMRuw zWLWjqPFxd~2;2BXkJ8CvM#FCv@16N6oG#NXeNn#cd%Wj6K#Hp6+<1q?MYh+d^|woBP8o%KQ`|L(A3e z9fImMYP~bI6Hh#gP)MqkwFTR}BGP=MjW7anhHBP6<-ROVX`) z*q$9d^tpcFz#7YNukC(dw`B>=!5jC#7rAykstSd(7B;)9rnCy7tdE+es{fB?Q1zh^~r}xsHy!Jc&1-k!~ zuJcI%x3q*@zw#C+5f*>HJx!CnrK@v(iZ!^`*Vd*+y-trSR;yzT29ib zjrl1>kYG^~4XOM39v`zY^=5`?d8U;2L|fY~{r?ylmc5~8XnBHoHxev}Kb@aFXeQ=!k3IwO0APNLf z)G)7h`_Rz@+NeWmcgI0B+mE#faj3rF?UV|29s|$&j-}r3_L$d@ZHsPp74Px51uZlHnm!oK*m8#NXVjD zsmvIG?dWiR(lyuNDJH^-4==JM#`7O~CRyM5zdR8C?+ow%@1wr0LtOApg=7`<&^>_@ z1bGKlC_N300d(g;Sqp-Tw+89IE3aL92Tyna_%|!%Xhh+F1d=1Ec6x`r2aN;D!yxVp z@_t#pi**Rxe;?N-5q|BLI14pH$*^re|8g}S-DY24&vnX(7PLUu@WFiOS+`9K_&{(( zXMXih=VQz|;=Xa6)aHCjNV$4$R7T+G)ZWo`NL=1$ly7JANBI2Ww__PV8rEDdE|>aIoYkj5j8n~cJ<_!_>Zo@I)aqPCHiqB zjsE*bo1)&j<@~p`y*_Y(Z|W!RQDu(%Wk}*~>|dhjuq@FUOFsXACmsx9I`d9nJOev}@i2N%>^7Ch@<+)UNrR{TXTc%Bf(! zIEs$98g7UkQiRZo!GMxvqT9^YV;|vsXt6%c{?Cmp{oye_kx>R2MMvD@LXe=&b`p%n zy%RZ|$Ib`2v8f5yCnQxTKnHhlaoPSpJdadZ&tU9!LvVT(%U!kJNOnegdeb<2IjC)T z3ZN2~jb=Kq5x-V)K9pZi+=OYz0juR4P_3Zx3WE0Fq6WDEq({Jk_15^$xm%yj^6)43 ze!iT*;s4&5|Hgp}|G!4(*V^AOUr)e7?hZ~eAcbuAh#cH&K=cio*5Fz2ztdWK z14}ev$xPs*0&e>dO>GOpB-n@CGq zOIuq1Sw5`s9DjpRy+2Flt8=YH^51QFt#cv#Z(T=;UK4htffo<$9ML54wf}MfY+Y-X z(zmAL#y>-dg_QV~xW)OstjB^u>JGUSlItzn9!* z5OF%J?^y1r(9ez)o|x!0g8t8Ee9u0kmappr730v{Z=M9!nvHeO47*;V!!5%W&>x2; z#I>yd4x1|_q27sL1zof8o5`#{w@U9c=@l_!hNzfvYL}|FO;f`siY$QZRv?k4_}_HB z0n;^4B9?nOTuQL)^yE-VK_Px|ewt!>b00eWE{mBNz)kqy(A2qnk>W%RM;!N9@- zC0S*IZs&qMjAHore?#Eg+Y2~K7wg#|nTMCR#`(%n`wix_Qex8-0~3>(i3#3KpBp!B z071c91MlBGtNj3zQHh|X0Bqme16Zq}a&pm zR)(APqG3(Hozpn6PHthz#KIB}RRwIssnl-{tIIta2DgC>l8|U^l^rOXXW3hW_fJ`?2=JbL=Y6;y znqLMBHYk^p{Ji;}z{hpPxs(?SDnL-)&f8n`n(1kogNN(Q5S=rK3pA3>54r}SUrEf( zjSsIO)5GhV0!xIkL051v49Yw!jej4xP=DnTzxrL-%#Wxg%Z?h=+F4r=IQP}b)sy+p z?=jAP4=f6I!J5*dk>%|ZJ2Mj_r<(4V4A_e^P?k(+Bua%v4tkhRkVcuHgvz3nzj9By<^L058;z;*AZupk_-r*t6Z~tNI zhOUlf&jh~q=fTq}fQ|;NIQQe0ICHkpE?|gdt z`zZmGlQ#X&6MGL&?9(F%c6e=pK?ikx12&yVc@Vt)U<_JQ%)cA1wC#@vqdD^764 zUOH-I(hb9i4G*0i!Q zJ473?nvaw#d6z(+v!1iD#NmW0UGvub-*Df{o z$nEK`sj7#1VG?_%*F3ml4z&2}G^xf3I;lhWH|B&+Rr30AG@H*)>x|{Zb_}b zqsMFo)}%H zX1oBBi-z&7nZ;eUj*+s87F~_-Nc(jqp5kE_yG@9t^yDd#;(!$?=4sv~BvepVh6Ixq zO3*KP=SS6MgQIR+H{Es8H!?~z?>DO4JZW|mz}rWOsCUaX&;?>QX{3mrI1K)LP8m@@ zrBaG6^a~;9{JW&XXi$uxS5E_+v=W99_>arV$$?PFaU+>kbldDxkEiF4v!4uZB#v(W z9^gFc1^O?ddKG)k?j_U83E{aY?{t51&3v+Um|r#=w(H?wZ{^W_J!ou#jHq|Ce70&p z?jzOxekjU#w-2C+eF_<92)w{OuerHwyH5?A#W2tg=g(Z7=Pjkym2p$xq2*QW3uAEb`P26YRQMbF>gc)cR}_&NLL4;G2!o}9(mCw>TpLX2s+QJ#YnHMMGq@5+~7#EJ|@Nx%FU$))7rd#H1vpIQaH|Z zn{bg9skTk6zxDREvx61O`|#i(Rn`PdkHFx(X)1qc8Jv8}tsqH)*Xu;Aixjy$q0hHU zYL20Q;nG1NbZqiUAVkL$%Y!0vhEw8MqCbk`?f2m@O2s}lWQP=alMzWk%l4lMUBUV# zO)evIWmJ^Goagt$Cwg|#;x1N_hOVC-k4L>7JVb^3oh{uZO2x}^3d;Mmm!RxfUZzQg zRP31_wnrWoz>Edi!b#5oT0^1MT&rX=xgZUOK3!GSh&eaL)vKV&9)L_n*F^j~)eF@a z7b&jQf&cdI>X=uDz%F>TgrQusi!BI=mC_#+{KUt5hK2W7AVKw+R%r-hmzW3=BlgoR z?!(DdoHRF&D~S zmSTUANpM7|Z)A!Nk5wFn8P;;@n48_A>i~F#k2(W>@9rHKVW+)5TL932i_?hZnCP>nu* zwl$UvSyaiyv_xNN!mtZx>VJQFQf59>xgZ`oKlSN=m5i0fw8u{ou|XK%|v9{k4Q*sY8aJftc};rKA+z`hE?cy17jmjQ<;a$r+;YquA@2Xnun;d4XdlL0juj)i`OsK= z_wEYlx!acUQ%He@;?t*Zx)(r0Uy(85&Ye4^Mde9Jbu+d;zP?|3O{sKMB6UbCi>`Us z85WupsbZaHB70XMx2yXIBE5b>NB8OJ=R3tHHOkzyQbXR~cimLqw}>$z&6H=rFT%ol z#81JVXk}Nn@Sb05aDayRW|NN}Zs)|TB5Mmm95rS56Z_Nn9SN2D{Q^J9a%q7Pi|U`>?(M7atR3ND6Ro8@{ zJGNl^HQDKzcAd`Xlb7`zcwvN~_KvoxOgu9cw@6ZxEz3AgK5zx30f+6512b}k9G4=4U>C&&Y+(U=0l2Ekh89@L#G_{lL1+P zSp=P~y#ClWLjZ)|zPoD=g@;tY*-o1MViza}-_%cxqycUOaRvmF%>>NwspAK+f zug9yG*ous(FU62%GRxG|-?-V2a_5%F0 zbDjBRu7xub)J+dAofrkoLCR&$ujM%}`!YLmHG#&C+Cv`G(F0Tq5!W#X9@!N%q3>?5 zaI?0I3h|<=hhJI~JA|G-<=GI)2S>vPnE@(9_z(3ZKr~DUi3A&&tcIsoVxr zq|;vCkt(;{j>8Z@Z!@}^kF%xef9kS3J$_tKH$be>_3K)P)C>$NmJ zOpKNJd03!;ip&`C6$0RbJ-A(ge>U{aic3mb^a~&n1Ql<1U=<_aLjgQEsDoqM+a>dr z$q5O;=i;ot$;O6DLJ}Pw4pt>V6T5QhO-+qZJsX&lz#6-^he9Sp=e)5|lp=(fj0|Y% z!DTUL*Ni`CKW`#q;mYGlP5#RJT9xy^;-#~ex8u)+{Uq3bG_nr@1cE$!TDl7yV!qh&S&!`Ml_ zcPZBbwY@3u&Hat!eXT+xuar*t+jzwe?edXnJtZZN1V%ALLtNQd-JC4yKM5QXQccKB zH8Q^lWc^!S-%@}n_EV52|8u>0{8k-8cc3@Zzr--rLvyS&X)hI7s}&m=M?k(co@B+x z(%1U=HDu2c*mK@~)ede|DbMT+^-@B(O56wjN+1_54qm66srG#-8A2zK7I%lN*LZ1c z><-6=WO*XJk0Q2h+1B9NReIzP2oHq==G?IkH_JwrEuo=rVlu&PsHZmtJzuZU z8nt`4(94K4XwZfAd&ikysFWnJ!-%AD*^wYrI0@GusV;OWu_LNk->@*I!tf2$;t}G7 z154}9VAu}rFeKH&>Z*|nH*86`cdvG8-FjfOqXS?!Apf~lje=Jy3UwV?QvLmOQ7yp1 z(LgAHy~xlq_RX6cu2tLH+kjW==05@hFhrD-z5UPLe*jxpmyPL}2^>HUE?>S(8{RxNriF+i3lh!c zq+(D zE@sOA)~NfJ<<;W=qJslq1@xr9eX|Bm5o~t^(^eX)td=V6exrf%+DAoJId!GeQypWm z0uJiRzuE(e__9ouGHBCHA0q9)w`AXpR}VA8k8_)s>~mg9y}3%t*~W(K7cURKr+dn&q`_2*35zDYy*FLbG}IO;oi^z^{SI)yG6EysR-%%j zyI(%wJR(>0d&Y>{`s#fYrU+!~<&~q)DSN-eZXChxH|d5UO)2>PlW#Av?vS<`a^i{X zec{)%Lcey=*1q>cEP`m<>#IgyiSjGj@EVCl)`&jR1HiA#P8ptURbdk4+5-vf)VhQp zY-PMEF;7p`AG7?J37~!?ob+eL)}{sO$E;r0GET7MRi`+)7y4W|(%}&~@&Qo*;Z2`T zW9YI$Vw0oD7vFwz;z?KLK)DUB+A#AsHnlN>rgh7Q>f;UU{1# zCKlGmW_`zscrs8cOkyL!a;(tE>J=3egG@IefkH5mHS{^~%AnCNK`RG20uuR7IEgJr zf}esFX6}MLH*Ck<-G!9i_uad-{UewsRFG0)UB5nlzyS8ZFyrtZa&rR-;L8{K97Qz^ zjkfKC2eh=j@$JA`hMP)6#0?~ke^h5gj06z@LHD0OKw@68=Y>iE<}MITfUG9Z_)1(H zI?UjRL=-S%yL!WE{W*}Nz)J}THUs9QL9{|bT1nDC=fb@57RY?irGrj136?Lkn85`( zclVu6G!UH%hxYdRX;WB137w1Ge9yUD?1zHpulGtcp1QogPPW|gpY#*xOPQ= zKxSl!A)>&HDW7?$rw4E#C}YY5Q&v_iVh$HWkr1b3eez&u3&1f~`%0?63+ zWB}k6^6QxU=j~ALF#Kv9TI9Q3FInTi=FF(pn3~>MFKHVtna6QIewAFK1psz-;u*ez z0kz5ly4xb}BAj-s?UE)Z*dH%kJUNR#11$yozW!@65IY#l-8O~oN&VlU%Ci+Mi|{Wd z0%C}RH!xyJ73S$wRn@ujfJ!KHZ)0OZ@U+z7h^G4j9*x;Ur2e-7D9L|4$q|0=AUWXL zWxSwUw`L>xdy{qv1DgE&@H_jXRF#uwUQm|??CkdO;NcM;hLkvbk8KN2`4K?%17+-F zA%F+@m0uIvtMn7SoaJ@XnlXJN+|I}i#*+TS+Ig9Q@+Sc;MTKAcl$bqT#6w><9eyh) zZ!%Sj2y=cZ%7`bRX8Rt+-I2fx8DZg|?^n%dI?nT(1WoKnFXqP$_*L8nKYrllM+o?a zAo_gi3Ed)FxA6if_6}kxvMelAH^{na{|>KUV0+ZFwTfXHO8URwD^2&5($EV`?5LZ; z^uE2(8C%T9Pm!xOpX92$wG6J^8qM4Qe+*OryO^%Llo722r$nxJ=!W0`|n+*1de5G&)x3eBP-t z`-oWUWe^dq&^z#>Z1MTgpYcjLjKm8~thK<>heukYL`$4z{BR()*hSgV)c)r)mjTy+ zZC|MxZGXiKS_C=X;aaF3ak;Cg@e#$ZyJ?Xx6jw)u#_eaXVT^o;TQitS6L1KE1jfNs z78<}md78@FQL$a&XK{E}*hhk18S)DViWT)^i!6PB&J~$7YY?GZGG6+Bp{Fv})!!SM zYatCfHqF#`=|bVA9r=W(2`I+C*5Pn?!*YCi*$^|)*il{Lt!PzI@>h%6CEK#eo@&j= zoK;R0=1AN$Dwf)s)SP-_Cuj9sbBzzt9ZV+ZZ$rznZ?r!mzBT_H(<^*TjvXYw$Qs^5 z8VK|lci$JCro1eDmS<#?{TDxzJWqP7Jb=!Id_Zv1`6t)Em%68&f|8Qf$^MxdLG>y^ zvvMzG^;PB*X$vmRvatJezcnMg#ty0kiQhb_y>)GWjuv`gF1P)k+`C6*`NCHR_6q>F zOciq2`iasbKaQAINKmRc0V&kXi;A6o+cecYv(a){0>}{JX&j&^RiXw}_Zj2$*F%lR zw|$zXpk|~9X&QOd-PF`%SZFh%IV`ui@4li0cJVoi=xtYExdCnJ`V~lH6tf~w?}T+A z7gtA=sOE6Q1U5|pL)j+W2{^P+P8ALSv#iYI(F1({PUG(b<`APMCMLkyEiX@s{x%0z z_rQS4zn}`G5G45F%}`be3k#Q`Z6HvnBX*tn;pPC52I4h8e_REB6Qte;2iLA#!Igm; ztEGT9J~->AF&FGRflYk*%0R~(wsd;_{JK*VE*vYonTndaLD$B}D5GTtw!2IsYA0OZ za*+mgF90bzf505LcCaxLyapjmG85U22mnb9A{?3+$caEEgx<@rb{w=F3J1WF_AGRY zL)y4&2Q)V?uNtrd%IbBjsLII+(uoE_!0Xo+U^h%}Z(6TuN^-I~0yu{R{Ug-3Z-NFs ztT6f>7BH8r zLske60Bn{lF@Xe8DbO~-mWex-<-r5}qrG)l4S>xAOEItzh=~)0yhcrnpo#*9 zqB3(N5M-02NeBq0?0JD;3Aaf!<%Oi=Y@^ir{t?)(XZ5Pe$aMDiXLK6F{?}>S$2`2u zP+r=H7%_Ow(9iux>SwAf*R0Z3uInc&(mb@kdd{_ZL(^VPp>-zWDTq< zs2fXP)`CA0pmGWdgibf2p$!OWKHT!s7z6k@q`AwEZgB4h%mrPy=u6gNUEbK-EL)Mu zQ6$61w}q<#a$1t~BVfowhZu<9(7}L}H#IdCsKT(Mffx)Fc+&JBy%W66TwGBHiL&Vxw<*0pD9phw3H4qmsgu$Wm{=MJ7kh|hGUh=FA#%rHn7 zV5!f{oZz(pGAB??;THUy+XN1rovkfgA^14~-OYhlVUkwJR_Az^fP7#1^_k~BN)Lh3E24n8%>?@0p`y5zU8Nrc6Y~#0T(=R0@yQ1lz@`5 z2JEX~cLLa0b?WAm^V%Dqo*erjif}_aC+JNa+HQ)+Tebo^s0^4uUm{+9Qqqt zrpM`NERxG)whh}mNN988%N)L@0qh^&AN8eUW&G0>ehNI7N@hG46)LLPipjF-3+qnD zmlUJa=IG`SgH~kUaAmT`gsL$r^cR)_C%t}W*5AhEEcGaB(T%61&WXj@~hqP+0!r4v@VaMUQTQaio{J$X3A=jEaS!R+jvs25}L4@D5SEwi$# z`MvMqRXNU9B5TLWv<<(=!aY^-8z|snKM6fjr#d+1q0x-=&>Q>-$wzQe@oJ9M`|ZtR z-39)%=Uh-=s3e~)v7bo1^83G~RWeE=_)6WA6lp!?t!;*0yO`)6r4Pt0TkdY~jBOSy z^_6uK2cwSB1DLSJ+P?L!R_2wWy-j33z&>s-$mZHJHMJ4UU2*~)y9;HHE*5hBmkVHi zTCnd=`jE$NLe58TrKOE`q_b>6*avb5+kv;Ya}1Z~v|z#Yq1GLXdDx&`m{D0IQae?} zXZAb!UFjGliwW1x+%=42j|pZW|5WXkU`SaM&3m%s-V5egRYFG|$`ZN&LptN0fRpMB zi`uvx%meh+4a1DA`Qs3PshH&6TRxX8o#Sx&V!^ZPct&Ru400djyQYlhvp+7c#; zJlx~AJNh`VAl`ex`jXV9vTCYu@Y?;561Mx?O!RAMa%mQq4Yl%}M^Vu)(@&m7sfxIt zY^351)V=A5jI&&zy#BET?i$I+!-*>;h1T-ArL3U$oo`yW z?j7ozX6&)DSw%lOXlJ__>*gc(%%Ubk>O3cPXH?saU~eZl;&xv;+7y1f%ft0B=Aeo~ zsu&@SxDB4XD~=U}R62^z#&%v_DfwPdZ$3d_er$on-GVz-l<_VZ>98Sp;==rQ)g0R- zHu02P9#V+z&RF4JWYmCJd;;|OH;|0IND<6gT`U{|u&zh;#*M4jVn-}X{1^!tRNI<^ zCB54O0nnR4Q{!DXb-e|v9Wpgk{P1ot)It>X`d0xpN}lX)K9OjQT~YQVuP$8JUaRj#Qq2-B_I~%mzN);fL}UV>$DuU_4tSvWtFdN z7tU2NqYVc7I}#EPJ1za1rZ!xaRz`VFjzeU>lim;6t*CD>+i|~>w~ndAAe3iTX)&e* zt+Y?K$BdkYt5=x-ZGOO>fJ$I^+^bh4a_~j>*!|_UhWhi_YLPKbg;&TraUVrmmR=_t zrT(Uaef-X5YymuzRQ$B(@_KB8!t9mhO__kU#CrE;e5bAzGRpYfI90?>VM~(uV}xT`|K%oxb>@eMJ7C-%#X+(4sF`x zD?ya~sXS^Z`bH@cWIRz;3;J%-`jqPxM9hIdy#l(U7ZB{=oz2eQsYv<0k(WpU>iV*X zxRv>jm_EaK5S)=*0<`a$3Q}{tQAB|#z|^^`{0xkmMC~r$63on{OBQRIeZP0q)oz(m z&wigO5M4Z8PH0IH^FzV;s|F={u7cm>3Z94R&=aK7oIbq>Rd~Hic98%wbNoc5!i=nv z+09L{T#?qmzgS5T)46hv_BI<5gEW?}HS)0yJ0PU^XH_B9LCf^IZPHqHHQHWm?$?uL z2)-oACG8Oeor&*n0tEIxVYK^hyvfCr-C0AtPCjBr8}P)uN2;NW)S^8e+IpcY zcp8sM2xxAu3^g&$&$7x&}m^<->c1cGF&tA7|tk57N5!0RVX(T2M9r|2?WdNO^Id(Xj(!E8$uXZ zTp{4~Sm7(~jT&`sFLC*wFEiSA2F=a7!Zc%5p{hA?UiygYICXCxc3EdvYso^`ZwKDO zJb_BH5_iNy%L7K9R;=pQk5n6|w;WWP(l|qDyWnj z@uEhfXb-MS=u4@x-h3?ST^rZs_x_<1hoqmrb1ZZ!1VqsVId8X?kvjQ%y*`!XFV za+I_678cSP^Ajj7-N-&cRDpKZ$Kg_Y5!)Z#O_IF)3Gg`FtMwBSksDA?YWpPXZx{4u zNdA=@Gj=Rr`6Vt?ZK&e;#Zt<`Tb1f@rR{pUid=`&_V-)SbjWuK$BT`Cy9aZXKp$T# zPkFCofuLitLiGTHR?H_VN6TUE%NE4&OB7b<;RUy-cFxD->Z>mX4kCahJtCt3b5JS$ z_Fa{-M*~`g8M&Qe%!7hfq6_6@h?r~qnmeIKafN70k~B>I!C_cuYN~ZQqgsf1NCKgn zHZ+%S%j@IkqMdVA^FwuvJ<_vnV9D}v*9>WkBqNuv_Uy$Zz=Tuz>3$OVxLYz;1?C&j z>t38J%9$y-mpixntWW?B5xW%FIAo>DMpfMTca=5V^O0AhPpr~H8xeGPqGA=ih6W=T zZW!_M8VFCcgg2FpIuxqYTf0=sB~vk4sT}W>R?ePWjClUtq5XULP~UHnAA%JEB3X}1 z-RlnR(su`zU^tJc$eCJdq}=w$;y(QK#G*N<1skFDnI~3&xq|m${$gY=04D01R)DM? zl}A(%wU+1G#9o;hjsjS0M~)f;!FB)n{uEK}k^B|7IROIKDA=9! z(?-O39XzmawGjvH7ZOhT$T*~Z_V%A2z;~%Xmiy6yS#a~80Lr(p823NVZ>rYIxjm0V*8i6j)k*iew*+R9He?T|r4LtH8EVMFUm;=U zC&aZLw-Ei@JHYLP_%S;mH};9$AKe>IKzuwJXR@DEhfIxpzCszlZLd>}>Yz@J$(K=^ zm1x22#FP0ZygvNyt&^eT#LWakvLds(Y{KRn!#a~dMKWp(naQ12jR|wMpP6*CPIHf0T)H8JiQ2?*l?~h&RqZ~8fR9rqwgwy zXr8jWk>Wwy0s|HRUKK2y7LZ7m)jdU~%KZXVzRBZ~_Bifjf<^ zCFhCflCkA)WyqP_3=gz2q^^HxLD6@KZ3FW3^A{VHtUnTG*1)UGTyU4^85gk4BJNMG z_odPRg7r{xx#bvRwYhov>m++Cuop_wrG8)Uv1Qs<5M4Y7Wx#rd)d@e%vCFVKRlgs? z!zwt-GjN9^>4V!M5SGeMm3pNg=A4u?jZ?3MCcz6g>pXJ!x_&;tAN%uYOXS9n>oE8ODUS$$hSlOlQB z)A?)&82hLOOkQ#ohzVy6Jnb#|6@&Dvd#8@@CU07J zA}I4k4JF^86xDsIm?vyByl-P4E4R+ENKx2WS<;r^(3$pxOwTb(QPho`a1ZtyS-NwT zP+w)3hWTiBN{p9S!_063z@Jq|w7rpHd02-RNL5B5G=2v?zx0|CIIhn^)Df)A%wWzu z!P&uYcc$ev>b43xn&qq2hY6ks3w*1UoI*lEP~h|O4mLGGoNI#K4k9lK1*@pk(20%R z=>EW|!x9D*~v5V-GQIj*XR76i=+gNkkg&(`m(ZI}USVhCAzO_|s5lnz1(xj`zlG8~FljK(8tCUj5Aq61}%K9@?!f@VWdGm5s^MNpC0T)rcImLak5hMDnp zrqdy>>m^m;#bw%AxJ5vH>Ly!ZQnVWnf4YC*{mqn9ob67+u`=@rP|+nd+T zi86o)fu+F6&s>pKDGn4$c8g?4TT08@7j4Jl5;(+(pCh4T#(}hwh^HsKUTtZbxytz} z5UTFWk_(lqgK6jWAT1N4-Sy(&+c~pkH?F$-x*RWEjhr92p$CP3IddPdI!q0^ zmN$*|Ui`F*XWA`Tn!4%&giBgKJF|TeA^*Lqfo^%KPQE!N7DRtup%}p5=q=|_W|r`q zSB7R^^*>J=V*Ki6QB2ky))yjiKN*58La^!``^>6n|p+)X*U2P@SWc$Fhb48 z$Am-YgKlUW5Ss_4JwvmSN-t@=7$48*3<$lVFTGC#+rkGmW$vbaSk>g`foM9}Tu2c) z!Y1ftBOF&}=ry%;HRAc!ewYhXP-vreGV>jP&pU{MBCcPNa(9FIlZg1l$%8PQD%-Hk zT81DG1@!YPM?hQD^B;{B*p3YBsoH)8`R3!grxir)@3&+G)|YLlT&>!BL~hu?c_sGr zh#h>vX4ZPd%{-ksNx<9*k&#)7RHkpFa8uU;s`rMj{X1_4;W~E)fH;{}1IuRj)bY0h zTIHm_h5DLtgS*h+gr+X=1~fm((-YqGbV`lxV5=bRD1+TQ$L|@vM?5!weT2)Ee*f;& zX&mKe0Bzg-4uYnL?+#p~hZ#-CY9S|A_)FsxRw~KR{XM4Tn^HDAtoBWZ<^(kpamV{0 z*QMAnM1TXJ2hccy^*eO77+MM5Ohmz$I4(y+Q`cgTcUrgNH;sHVKX%*|q9(qHgV5TO z*4){7Z)#c4AtXgkH@)fU^&RfhK_-G zGc?<_RftZf2Ve(>-$IXn1&wTwH1N9_PX0e z&2Vs~!H!z~YtS6i(Su32+Edq_UpX_xfC<=5=?3SjaxgF+&2-N}5;>Z-nLv@~m0FfI zYWxYG&<0Y0G8=X&Pq!b-+tq>&KZ5r;1t_2?=LWQ)h1f_T#SbNAm0b(#fwotZ~PasXx9_)cZRgL$VYT zt2i7RyIs=70aeFPXG*n36_-RBe`jvU#`fbf!+pTHij{gvmV;pq7+I=@Qhq%w&!l;R zVA8Cb_IWY2)33EypiE5nGhVI4R#tz{>D5b0CK8XYDd?>9NP4ZrFtVsB0llvMT#7R3 zQm=ud8p&AYzFs*qwpqVxTBR<%rv^su$_muu>yE97Wd`lvH7lyV7xF2fy-sE$LPA=G z6f~Ns4WWjBbWKlXhce!q2XUyqvSp<@n2qR%^wI#WL@4r(sHFI?B~?rb0om=~#4Hgc z_(wl&ytE#vG2nY@j*%qG$CcXb+Y#Ip%(9p-g4*Lj_z;oJRi67Hn z3Av?S&Ry(~xs|w9 zJG5fs-oED2^2?qtFRjWa{B87#Bb8#v&u;S!TB$84;G|$8O!mQn?!uAB%Q-EMZ6`Gg zs2H-WH0-1#XD42=00|^qq;Qut7Ql(`J2=zVFcfmNj{ih2LC^JOs5Ru5z~BT^%~ehjJ*ixk+|Ub@4ANO zTfx6c@j`Zi+(B59z2a1wjo2+n-TSJh*b#Ji`nO`Ww)ktI&Ui;}Z)CQGO8v&il0tGL z{DS8G4I%k2W%*$2vQUb)FmF+MT*rs&@7H4NW$#n=iR+D>*7RcP691b8|01$CRebWjR2$(LD+Nx zI)HJ-c79NJk8T#5o!Wz#4OA7tAIKJ5XrDqC-uVVpkWN#bAIx$F`A$6}{`K=`7qt0h zXVY|}D0vO!ZU4-AOjJFb$hd=#VEgzN3pgKd_S8G1R$dO7Z`|ASOsi!7QN$d6G_^NI z>^zh`v#pKZd>rtczT~`k@NP{_(I{cqJD=_JvHP?VMuMUD2O5QbY|MYNuGe<+HRyQL z+vvB=ZzJf|_RbLbY|}P=FEdEtUV|dj!JCQCV_cV!;_ti2#`2{bq`|EihT;*2PV2#a z%a?*XcUqB+p}0KkCUTXG;MPA*r{8+d@_{N_TwzB zAmLwWa8v{gRyVb;A@=UGh@`k(Mlk2xM-ka{x(vqsxKu|R@gR1F>R+BjSLTzPC>DXL+`3#y%<~v7QPtoJqs3+PT zbSQ^|sybUNx#sM(ksm@!n88-h4pCb31=o?6#7oGVfkhgzs5;M)0{pVWyabLaP9n3! z5_;x4@$xiPuCgbO`&>|!!klVNR6&E`f6@CV(PB!ZIB1zk;%Tb$vLowuEfRFTOvQ@G zhbj{xpDW8E2?EH;OFxoeZ*|qT#2H$N0%BV_Eqd{{hP?Mf3nAFJ7@ z8B(a4u{C_+GNUCxZ#uKUM`6Vb-JhD~m#bFxD{LS^_HLx1nRkD(M>g7~RdgRcW(xge zAL@DqamqC4%v;26!kKo#Njt4y97H~jaV4UCwld`FP*^1+w;tHp&(pco)YCi#Ul-t6 zZODWKJD$oS2a6}eh|3VlRfdK*nW$Tr8O^-b^(L~G&V!HCyk0Y{vWXUs`LuK@ehCp3 zY|Lmi$38rv$oXJb9Uh?=j+kqyNg$$J&XtLso6-;Dx^&Z-=4*EMJ0(4txCj*)CJ$28`;Pv`c)v&!imQEWJqAl|G;U##(9aFa;bCSR`oZuMA>;?D zV*gRy{Om)gf;(26mynH!?YX;5MXln(lbf*4I;w43I!qBCk3Rpqp{rrv##brYXSdJ% zn-8(W{qpmv5K2RJnAG>fzSh=bUJc!@rN53gX*L#6s=X9ip5NQm83h{o$5Ax=$5|Ne z$mE#meQL_{@f~}&ko}721cD|WVvgw_J2=xDwt{Y+&oV}>0*+Cf$cf$XA+HPtzd;8J zN0w%6XQr9=n&(d~7JUhtMi?I@Ebgey{YvV5+u`9CASV!-cV6j6 z8qqxTb3pBH>Gl>MY4C618d8GYs-(^#-ueMS-W6QqBF+X+*&9gRk3VQgugNE4S^`Rb z13%*o8R71+avP*cVx$(C3?qz~6*Y|r*m)Y@p=-*)eBxVLkq{|Wq}%Q{Mv3Q;U_8X? zm|mznH$UH9ti>F%`8CaC3=e01J=wU)s=vhZH8wq-M5@?ye1Enr_pYv9Olp` z%<0uN(;(5n<;c_sr3hrQdfJhosW`br`#Q$@KB;KmkheWcN9MVbV>+^NB17E%d9;N= zDXm4wte64xu_`@~CNoasBK%nop|MAy%7ku<@Soo;xxPibVwvSu;`*HHElEpoHXToB z%y8;vq=#v~`WM5BTY)A$u&y+v)!dk;!4`l-K@#pFt_6)4f4e3Fk~g^;1HK84DqlAJ<*do2c1P9tj$s~0$@ATf+GY;8&#HEC3Z6Zg=P>NaTO>9Vf$^Xy&l{7d>LHGB zUR%`L@Kp-V6!v@sLj zs&suj%5)+xBhWE=I&YrQ-n4A{KGu!#&70ct;pn)l}nq{ zAzEX5=6jgxLUTB}$&VIMvANXAD>YTTYEo&wa8$Ha5kYK?nA)$-l)ox535V2xEU-JAu5A9|$WQAqH#RUOQ>E(&Xzc7% zv<*LM3-eOqx`GfV5j?|JaC?9MZ_{o%V@LYQ_SRq@YUwnP7GdkRLnhNR0`Yo#2Mf4| ztM`|-+%ArT7uYwclZs7LiIJa7GXzZy7CZ*TZwxs?9TcSE^rg{04YfV5jHgEwZah&< zY_aNR_rmh!EGrE7c@?=v8PjS`5+J_C7k7|=52mPRjy_{bciaEe0w^D>^j{6bMW|f^sVQUV z5?Hc=>8`golXHG2xe=Nx%Aqh3JF4aC)hcI02bg_KX^dRn+jN!&9VFq_fnl;r@$@Najk( zHn7Pty~Ep3ApFLa(xiEZQG|30<1!#>HhZk?6Y=-eSX!{;hhO-y%}Z%=%@}&Ih2@Zs zSY(b|LfrX$5%X)8`%{!1KfLxIxb&8dUhOK<KD_@OfB^#3wZ{nROOUQ%~Gy7 zLXnInnQtE;=0E0>H+^P)REPk+C$cNJO-_rTgE!7P*3TJP@Tx&GFI81CO_I+~CSH<1 zEACyz2tFj^Lv|k3#i88pHKh7iJ4S237=5O{38%!sHUVn(==m-ik)+t#eLy`R#c&I5uMfWl%D581>3)&54p9Z{B4c&QpE9i~v9me{& z$Xr7m(`(40@?1gd%g%ZBemk_uf>sg7aCtzyIVqEP<^9RJsJ0oUP(}z8OEyh49#$j8ynp~LQE$%tRESV_+ck@E3F<@;Q&!WuBkRLxbq3mYUcukSv^s zK7xp2fVugb;^Izou8XRKR_LDw(GpxL@sqs~XNj$L+6ycPXV77%+3Av=z^F?L$R!d% z!#ikAK*OpeHy_>=E(11@w(LRSq_Jg5KgnCP6+U&Ekc@%l2^)4{9=;lXdOPw}v#EN!3n2>{4=7`97Qk8B)MYV3>B=_QO`WSt0eVz9Z-IPnJW3(M- z-5I!m#013B35A{Gf%}h)ytTiB{M=@P#XuMF@5a92`l34Cn=zb052n{~DC$cqpR}RQ z&eW&XyjCBm{|wn)2HS3H_jab8Q~iTE-n*gOJEbpc#s74_DWK{~U5l30t1GqJ|Iw~S z{Ii@S(riVf?n6b6B1i4V&n`{VwN}f~>NNKlsv3iSH#jBNO4FDcXxi=^tsU8Vizd}L zf)y%Sx38W#V#IbUvt(eey1r3l=n69PWU)7@n-r-~|M*T_U}ihv)0e8^PPEA&T!G;y zcPiAmjM9HE?>VfjT|(S2_TVtHO?d$!INrNT$)k_J_JB zJ(!PWT)_BKy#V86GZRg+WBwlh2CZHoY94vhaZA#R5-WF7Yxss+g}}o0rQj!RIc3-H zq6a&CIl@6FifA>DJ;DIzTL_s9mu?H%ZL_4i5y$>rK2}AOgE3C^(Sy54`<=yjhwzz! zVSFN#5+ik*I(09K+ZK_xwn?TI*=)0f<1m&S3#Iqw1-%y1lx>H+unBz~8_|ObXOrkK zAMl_Pxh0lTKqF+qz!-0j7L4t0ugJ)~l@h5VL_Z%Nz%I!j#v@KaP{?a>*A{tGsK(HF z8QCJ(Vu^o5)BW7x-Zfw1_%50fj9}X*Zy&sxb`~C2jh`URG^0)LP#-2E;bz4WsXdqH z<_r8P|5x^9_u|(!!!N-Ofi|KFGcm1qj$6~YMHvO};7bdi_*_*s|Dpal)QYM7SM-*vC4lG^ohTgAv>@8J&6ZxE@L7AX1 zE^PFj_ZJH{OZW_F2bG2?|8M@yoWtzRunDC-9shQQx^FzT4+;fv_7n5bf_aqfqn{!- z6&PB=-ahH2y&-PJxF)at)gqz9dL`Ccz&HCYri_BG@d1jTX!xscoN?CnfXM5EK3T@D zd2}R0-r?kmpi~^Tq=lCZ$!#HeMd_K?Z<@@G(Xo^wUoy$7JQ$|KWM@S=4hO_%%ZKh; zh({=723ByM`R}vw5|@>z1;?cjk|^m{*qB0*mF6Uf zVp)ZGo+N^Nx9T6j6|WY$XE;mJ85-?K)@l9SDnE#G@2Y~18XZ|AV>mrc|72Lour}PM zwELl`>Yl$_cY+ zp)Jtz9P=7+eE92K^?b%7P@gc-(xOf1gob+1yyPS6e)F67McbjjcucQ^eW~Lrk8}ca zY92Gg_N&3Qs8*(M&`)&s-1YzAd^uQ~VOv+PH=Qa#SqhxWgX0#t4wnUU(aqZXQNNwf zsN(!*qiFcI7*N_i}!@ zSMvEmp90icC(Nw+u+bu9G_lwBaJmKyRQJe}JUdRH;XvL@@rTgFEA52iOI*d@%`txxyNTi8duxR{o{ zr7D6Gyiid!v9fZwc;a_$@|bGPucH28scP+K$&tHDHAm_Dp{I|{G9@zGP#?sG4z>eA z!|AgjCWu+G{yhr1R2njSim`%gtJcSPc->4JtecfG znZs2~sso=O;eD%2oDGal&qO@PyYQ9523_4{p%MF3j|gIGapM&qjbJ_vQr@H`I_NED=}E1 z3J3L{)Ul9~Pu^&fNyZFwA8}K=W@=uJK0av++rti)WnznaVYHfqDto@EXN4TF%{h3E zzmRZo`HGFaeIDfAiy&M~_AeEM$fC(BhLAU=xtD|ApFZgP0}-;Ej-MRUT!^yP_dAa` zua~*g#~N-L_*N4@@QsUE|5dIgdXwDn%2kNo@bF6gwRdBES zRfaZtLu99;XJq_P*yt$j>?sYu5j(+-ZynsB@pSxgAc16(6hB!sL`S&9ALq8pk{PoQ z$|W$DX-k*tV-MGU&0l$HoS2Y*Z?C4Zb?tI6Uf3&iL2J+0f>h(q_#6}J?-m0)uC*cL zkMY8;AP`bS+VMArdB^7W;$svHJ-a}72rnG_=;X|Y9N|T|-l!5d{$=S+r+6fpA&Crs zLF;cv39GCPEi(}GNO}=m9qwQzWW}Ffr#_^8z^@>^r>2O_D|JUjgVDrk>#hzy_dRaP zn__i;EUZ9Tn8INfB?>Va#$(Jm&kEXhYDJH_P9TU<1+`mEZpxg!e@Q zflCvH#X3w5F}stR%j?IwMI22hizxHi<{s}<6WEiJ*qbUhoK>ShHP{go2-K` z)<&_~n8m079f`k6FC@y4(1sFjxfPE`GxhgClTDN3H&sM}bo{H+jvZFy&7}K4N~9Rc zlL6}ocdlM{^P&;)kQ3iEXs4p?sQX+l(#0{S$X|+3$Eu89HDGJc(|*cE#FxdQ_-*!{ z7rGuC37^T;JlLECb$t`Z8e_Y|8MNpK=`(&{w{x&6_3+T$x^D>{cByT9k0R?||D`J~ zb_^)uJbHIo%yJ<`wB>}IA74vicrTR=%W@=GbCXP`sBkN@B_5y{r=)344HBjm1z)51 zvA)~zFgEa|IC8vHRuw!LrfoWUTfkx?g?_!vpiY_vV|esskZ7Us9ry2)osXlfco<)1 z^%2mUm)G?T&37B9r9wZpgpG*Q0zOF-Ew3F@c)UE_%s63a3xHs(tgMVV0CWb(gY~UW zP7n%@Gc9RctsDWFu+lcD$_0gu(~*g@O7Hb?5Ll)Z^YfT!2mnzD=+-vp!Um~`8#nlP zc^yC}2Gnz8b6kCVKIi7nf;bpvNES4)fNt8sK<3%b@5Zy;5L|Jeyd1-Zu4RcEN1SpyPzZ-&_Mlj|J%WvDRIl6`tDf(R36@c~J4EiEm5 zeL>*Ffqcr}zn0#jad{;Fz$E;pH}7R;Hk(5WmKOEr_Y?9QWUu2EuCr6nIo;vglNhf~ zKABkE_X%=;^Z2VdnCD=pdIx+;t6QRNnY>{nPMt9L{^2TU z^?UrJMfTS&I}cY*A9as#BU92_SX6<=2UW@Oh1i*`qhh?8yZd_1=jM7zQkY1L041l7 zwMqU!Yu+M+5Ld64oVuM)DOUd^jxekHCz`}#QaJfz2VX9xH)!`gL-A>RKaDqCjmPP? z^}6G!Eh&n$yr6(LQ}H*9M*9|8b7kfvUB+3RPmd&9DhRDuvLIG-ma_cIc~!tz({LuL z_2q~2-}NHB zB$|C#W3m<@;9yr8b7sAXj*+=9rPMaKUY9AK%fZ3U?GupPaJr_De4HFHcUAx0x!CAV zxfquF&^wXkyxN>TLT9Hxe1a$C81ArxK9EZd^e)i&GCL|>{)-~Yw9G;Y;#+6co|ZVr zDRaN%_Ngp1Bai2PKEv-&nh31wv#fozr+g=E7YzUWRMji`??!BX9ZhdO6mTxbxId9; zz-n2ZDK@^hkH6}W+K?Y;&6`r=G*D>~G@18Dku>Jm?IGjvCu>7b2&y}d^nh<^hEJ$srV&8hKM`;Ok)fi`Ft8;f?4A18}~t; z?T-cd(Q%j;^4ZFd4>_?trc~ptq&CyKlT1loD(YQ#198gGy@BBR*9GyC7wQ#*{X}cc zYQR1D3-T-*aPl6Hnx-?^*k$!B?7|sNi;QY(DWJU*`)Rvz1d-@jEBc(N_LfJuR!D=1 zNi&Ko!`5NVttQvFr|gUWLoR7q6H`5%9;^OdN@p<(Dl2Z1-`Y!X#@w&PMQ=*7%5_K$ z%*qYQRM=Sq#P5|pR zH*$S@kehP9<#99qo<_Uw$=RIbecqJRENxr&r&zLGsAmzK-xZNbwHs}Ci=PacUgc=) zKB`+5QI)D}npwnj^YK__4qrVV?PtIw(V-$AkXCqJ@%{Wk5A_k72()CZ^g$)hJ{gvgG+s}+9zZ86E znDn&eu$}AYs@wfaqICaqp6x^_{)hWNu3T-A*ExhQgkToPR?rdgjgY?`M=9JBM_~nC zAATnz{GN}5CL~7wl&9lzDM{qBLozw0x-p1sqEwG}VMrWHvp-Y6RIKm+#n;esgYl69 zw!diuLwZN-y{(8XKDdv55lL|d-?`#dMl|zEMFp*&IMX-bORKpR7zgKAW<OUZ{!KdD$1Pbs0c7J~zNUBiV?hti7lc#E~XZ6;tkXhU!Y;U41*etIWi650S~$HSzYm z4*6NpMLpzWbrlt2?>0-GIxgVAU`E=3#6Ky-3)BBH#hdC#_pOKWu{hFBxS=C;@3r=k71e5?rHE{t7a#msjf>MRqGBGF)4gOmq2$YpIYke z^VEUtIQ1wJQ7=|UnliF(xm{*KX|_kl7wE7E^48l7N#mSFUUfEMA%v%PgNQY3s?!P& z5wxsC3)XA`3;aC}V~1s|1@TO;Zo9SJ(=UNB$uqR=9$a)vN;v5fl#D@~`5WIXA&)Db zX{M{|b)@_B2lIm2`O$B!vFFZDq20LamuaBYD`keXpH!-?wVB1XsdFxZp#7j^*aie5 zw=$j&9jG$UR|35?kh}^9c_z2&*WK3cg0De=-=N$YU_tg3f1p1T&bSieYB%Vw1Yu;* zQz15XX@+L$`L2|5pr|hcjWsXLsRqO}^-4?wj#sNBHkvM7cvNpNMmO6c_rWrP?3xH0|n zo9GbrT=eoKW8*LDSlDiagpGB1Or9o|%1dv*qYlqVP9hO5%SF#eXw)e^yd&fzn2|HG z;q)Z_CB2dy`Pqd($L|cJnTXNRk!)d{4UWlKGJ;!Z`LGR=WLJ}?qrdr$jR$$Xrdzkd zxqR-L9k3tEtk&fHlvb|m7o1Rnw0+XTvsa!?EUfsJm&<*dKC&!5rJZZ!tIFd^aiO`p zg8S~_;(?EgG&{l2ldKSi&hj>+=|vB=ByNl+C7(Q3xv=frE?27i#UmGU7p3)JLne>sv|>)Z*V$JwFKP>*k5!zU_u$L90Z41XWd%DP%$Ih zibl!IH{SvBx9pDu30KC)m>%^xoa0OHqx)`{h$&+}0MBwjaHqM+e7@jerEo9hHL5C+ z@s%Zu3n$B)By@b8N0>J5(9o0N5*!`NA~lybzxIDQ5euPx^jxKzU#>z*Rx1)SVzs*` z1H0*aeD>%jGe2VTts#YccuMLAlKO&Q9^=ClHVqc)cRzP`3BT-<^+zGzRXpt}tIh zLe9rTXZTm(gM`5b=OJT`1YH&s6>0k>!MhJE$9mr98WHc?W>ZRZil05f#loCe7tn6E z(o0NULiOZa(a$!+B(v| zP2q+4H0Z9zwC>+Xn^@-2ZqH3#R5Ep19u;t~?vHdPB;0#c7ezF3w>ZE`Vd{y0_d^QC z+g-X!Tpsr{9`g}6aT&#okS8Y+_4duw|LPmFEfx9_x9xdo%mBInlQ;|82Hk*ruFb5g zCQx&c+N^K<##1I6OOB0^JO>-8g37V%&+%vm7$nzQgfKrCb-H(tnICwX8HRbdp8rL@zUO5R-aGMWL#z%EOX`bAzmBsy_Y*pe3` zYzh;XMQU%e^D~6FW~iq1puEUJap9fhBxSw1hQYxBR7JV%QJw#d%nrXQXY=)si`ntn z21TQA-lsWwT}>9z%dzApfl>rAw#tVrJ63LsFa&Lm(b6ms7RbljI3IT13hpR-r>^%t z7A8a6yS+HcjqG@-DD@5dN53t~SO50Yq|4i0e+IWB_r6kTx7CV!vF29~W`w80y}&~0 z(e+KrilOHLkHLZp|C&u~Buw_c5>hxA*{>l->pk?-w$#kPR5O7!sJddgpjEdpc93Z% zLw={}MRn(5LrwNOWd`5c+Wl99_jpJmJPcE_D|c_lymBo5u2k$k3%@+gG1{`!r$)2c zid$fJhwARg2Nurv+U?_uiy{_OI$MY3gT8ni^HDt+nB}WmW$|t)9<>&qTkn2<7YBv+r9x(l3-u*>)EY$9w-s3e`tyEvo3~-Ip z-?4lsm($8`R@|>dpJ)ED*@0r`sDzD^JxS5jZr#4@&n~xu2(*QmG+B2CW+la8cH zZB3Nq%DYST^Vx4kNQKR`h0s!{{$45ymvZfagtlufZF&zIbJu+NtJ}55@F0p=cA(PZ zQT=m>BfV%z$=4{gJxX6|AZlCK9h_x({HD)R#L4}#<)(8IL7CmrYC=6lP7hQD$dK=S zlyLd#u zLm!%_oRW{B*nuYO0HT2S{B6cj0B=QWulsESnYzH*nrwMCzw+Yz!KrtNokfIh;EEWw zb>!$CCt1Mv={=_Od7+Q! zMUU~dXBXlLIiMZ_GAW3^xEYNRx%^kl(zv)=IH5UxwzjGKDaxiLdWG67Ns47j*-Y`d zfZzgYIPMf>P!#d>+}+!=<|eH-&6KB$rW9LptNv=axwAL`;*{XT|04ks-8Kzz1P}(H z!vooUdc3P3{Sng!!l*1PEa;DeyRGrBTn7FjM{;p(tq8$2P;@EPcPP~dS+C?yo#H{D z!I^ViMn|hKWG9Nap9RVoV2Ley{BhvF9raC_p~bo!>=aN`;UYnCk$`kKytV4JMG6o| zmZYsie}p=@Q;580VZoLr&YU|%x^!^_e)M99x7(`!vHt6m4+I+_o6=sI zl6kM&$}c}D>|$i4j*OYo+o+~JGZ#+CLUy>r81dYr*-fe0;$Il9;AoDqOu1;LU{|{u zvJet0eGQdmi$lfd71$}t(50++`f?+p4CU%!Qe@j{%T&&pK9rzx{4vD4|&lfmX|N|l9Hi{jf|b2U&YKoSCb>*tfIP=Dq?>AO38;G zY3TR3h0d|9*4T8`z+?+z`#^L}S1&2kqXsM#s1T~qv2y?E8zk!1MS1O^`_qZSLa?Mb zt30g{BHKatpGA6qL{3Y6(iIO+QwDI-*r9xk*}OE6_tCd4nlUKp5x1_07+P5_`i{W{ zatkBvnzxddA|_UEFStKyCEttUOM=34|X{oj0>)n(3;nualnqx_(xK}y40r`2B6EEx_yquO-QutF%i|OpA zPuJ}zDEt%nrOj*TK!t<7Wzc1m|C_D-_sLOHoOqSqmnmZ{18bbiFubrUitr_E^A^eB zlDj_rGT=ri@rv*Lz@p!p&+W84=QNG-?1D+~{Qr=v3dK}e8|sKA`R)1K2Bo_1N}O!W z{wwXJcQP9lIgU498#pjbhf=LV(0zBQjTIZo1?SZRX^|qIvFs7+;kw#{+zjB{{X9q!fv9kS}X?cxCMtpkd`|S<)KgfGL z);-M`=7DI`1cmT-6jy_hQN?G;YexiaTG&U<&$rklsGe3a)dZ7+RbVm*bj zaL?tG^Q-s?st^-%wqnVi`uS!`N%W(H<(z3@83`|>0=;77rF9NXp5BOn~D^Q`3| zH!#sT>)f8J5S0C0w|^_4`2gmiz)x+_jmr)w%6MAjqrb4*+Yp&sIC950I8{v;oANKMgXJ z0A~YXBYAGPm)C->mbSJpS}lRB$!_=w6u&Xi(W;Fz0P!-@ehXyD%()<83QQ}|^?-Q0 zR8~Pz(bD1~DG3P@3}hELo`#3*szv~(366H}JP!%>qK9Ge;I+#@UbLzj0gYFXpQ6J9 zLBH_go}p9~!t5=!;in$6V< zwH4{&H&?=Ri*uR2+gHT5kl=)dK7US*gM||^ucBmRjS7w-X6H@r~RO${xLJBZ( zW7VAueSv6%T?zm6~RAk225RGVST%aYK6fYNMiP%{Xlb)e$Qc>$v2$%tiSZQ$)_fC=k6A7JbFZ2 z%K57b!t70ppU7$FIY@6C1FNN*d4n?%xlYSmy)|4al5S{5Dkz>QR1WW)ARB2*eb8_+SdW zte_n!?8#nIk|wnpcfsIHDSz?I&e@u=wL+)%-!g1O54cF)t@QhyV}aIvx+=g-@&|m6 zewlMI-MrFHKr-dxBdUn|ObRD7q-`2tDq5z0y=uFydD6N$n3+K=&Z}089A|!RE`GbR zbM~(bD3yXR=-$qbPyIL$-unAB@tOvBRnF`|O;n5$dX*v51A=enT)<#~H!BQPz8ph% zCN(doZviyQKoYdK*M^G(#OgSbm6!;6Qu&lPl0nZD#a*b*bP{+TM=J+W9d)MHfU`9% zp&};-sXY4%LzX163$4vBvu>a{cM=!`f--b?8pVU$DJuYbrlqB&!~X=}y{xP|@mKrp zD*(6(6Pz3;zpn0e%M>h~CqMhTF4Ef5aSE_pD<;;R9USOV-hk*SFoVv{&Y*M{RPlgz z+t)YcA_yXM0Rdvj&fea)>CO0fn4tYbL$*~TAR-2qx zutQ~-onU6~{H70l7X;~`_F{b<>V|-QyXfI(ZrqvdLL%x{};z}V5H!KMOQM!oY1in+WhMr^EXcJ0{s@885Yp$a(Kh1#$M$A|EcMS)JzV@b(eP1xl7 z>p{#ivv>c{AEX6gYh*~6otx7nm~|1fO@+A+g9gvepQzrnch;?gN(M;5ba=2yyHp#) zG-FBnHtm|wk*J!Vq)4C8;Zbd@n$H6u?Dx7*id;LE0pU=0_qK>Sc^r?rcvdJee=FBv zc2|J_u&9#M@XTTRm!XO4od-q9e^e_0mmD9$nb3h3_H@h%mYu)&#PTkzq|9E>+=kj0 zn9xs}sbw?_-@k0NtR7nf{k<^h_zu*HS8bo|Wh7lSU!4iqQ2+A)kQXT0UcgM|RN>H@?5`t@s`2{lp5AP5?RusndN zAETWJ5Wy*nbLoKd68t;*=`~~{1x@s*5vDZ^UTpdt#c496k2_}(*tgpUaZ+JgpmXLj z+^SAQ50QpoAk*V5Vs5nQOje|UjgSIu8uSkSL>8VmZA z2{;Q+TiXRCdpfBrcb)~nzz3)e19pi z=PU4r;q&3wwQ2tO%+s5xc=Lfs(7}cUQczNosn4F7oD4W&2*-m*+Jj;YY&TvlX>v^xq2>doT5Wou}lko?R^OPFNfUm0;pVATzu(s!}Z; zya4>!97Lz_I|>I#jgj(LCnhDOPf;>-9`p~KcYjw{Aebotki+J)#J(WAx*Rvb*8{(m zusL{Gh|Ta#?HasryaJL50wiS_QcQUay}W&yqFGFo`0b zrQEvszQC(u)%$@*{pmB~$HXO<&gasc_jY&Ro7TgIdD8jt&VL`UbNaE6bBglZu3t#= zBy&uH+*{4X`Fa8)Q^G0ZkzLbQ4AUTdglN=4OZTSYfh2%aCI}k<4e7l*Y&^*5l0??@QfHw-@vqMyZ zmn?nh`D+)w~?k* zugtB_ZPRE-weguR=O=wO*qo}W_KS)?2wQ0Z`kGMB7!6Sq-F#|VB4M@s40s(!;~*BE zc*07L=1&vX)S1)q#k#!O*olp(Mcus&&St!(wrSTo(Ns2UOfcpCQk~I}Q3-~~dE&v| z?jal%>QNSnwE<5wS)B=p96s`l*Dsb9{JL2=!VB9&X#dgy7>ocos`~Wlb+>#7uX_k644{~{v*R?(3)tQii}=#<^os^x^z<0 z(je9YnxUeU(18rJ($25>^(R}Mn?5Hnd&C3uMgB8scq@8NMK+K(y2Y7(ONG}t>tfrc zK@L=jE?%ctS8VeuzFFOK^8ftLJG+}vpMhx%w#QoS`q|%khtbcQ6Kgh*Xakt}I33@$ ze{D5SZwhNoyb#&=_vk$RF^qG`=f5kNEf`JoKUef1YC!vx$i1ygu%4gh4=Bi;tzY~#Ni`o0cX396 zA>nP#-*e`tXShsTjri#a&|_6Uakf{5xWM>*fD~p?TF4I@Bd}}%q<9^HLe4bc=V=RI zW%YWEdHx3Nbi~%){tK(>&hE_qaWbbF4gyY8&|+?KUX9R|fXdV3i@N$#Xi?}+g-d&1 zT+RSfY2klyxLio9-G>(zm<)F0^9LXUS?Gi#mDz31C77;?faCSOqa$T$j(TSp49x64 zCZr2E@?E406MO;4yu)TEVNL=2`ng;3q&GNFbL$KadeaSL3ZXI!2AVE%U|FQ)hwuiO;?fK^qSh2uvB1vA9(t_P8y<_0xayBz? z5mn2iM;CiOs#*S)F?-CpP~a0SqqPhM{lz)Ll7xjHL&~JEND7PAX?%^ zZ^4z*eXd{$0Wn|;%I1{j<;{UF1mtL-2JAPU2gu2_L04bkRzjw-=kgDMTh*WaPd3{; zg~yoF;8Z&39rEncV zlVpOPfV~w`kUw6+%yy=6b1fWL{G6Y~6fTNY%W=d4INIfAEUG$S>Qp%lZ~68LwTOqA z5C{17yv;$+c+i*lr`p{{l(M9#s0N63R@|h>58itF!6GP+=1%AUWMY!&#bBsh1Mlx% zC@$3S9335n>B#!}`ohKv=z^>)i>1`(4Ow7nkF5UVe(zt;cn;r?go%a4PINuA4N>1Fl>1udz~LhbLCJ8)0_xmxl^`y34`fc5Jc2R7@oxY03-%ObzlnBcB+WRdhhlJ&#qC!+uFtCwWK?DZb^#X_JRlx6G6 zf3J-fb{V1&by}43+lR=_h}OJ+UmcPju=i1dukUfod{=&l|m4MDzDuNYmaml&=aY-ET)@La>B(GjPr z!x*^mZ3U+^4$o)-}I@w7~S=t^JDN2)P_IV^im=~U!-ol*u4b)^+1`Q&g=8e zdaeGk;!gwlaX8OR%l3|K{x#4LVpRX1mW@^+0RoiNXRS?)7)y|%JARn0OkGi1SMj88cxo2$mYRDR&L#0 z=0G;j*O40$)MQJ{GEw?U5~5avMLO1sF?_JWLMUZ7)meM;@2yEr0}x<2-d}eHuMfCQ z!s!>Q4@jTyD6_6Azo3Ge>M4Iz+Q z`xC>%i~GkwmB+)!ztJMEq_ll}+6WwK+qMBfPHJoO2A%nbwzv-J=pFhufZvLXjUDNF z4V?21*rz@;9NCAq#AReOIE~c+>F@eE9fCB`cS1s#;fMr5J~M%84wRjl0e&L)L&lFi z@Dx*$B&pRkHQi2kf0r0m*aKIqv=$Po0PO*E`wI>TErf!t1W)?qT%lIJuD13&v&L~v zPEO7s(AlxX+7qMf&Cd+8x4gIrl)Ipl&CnM!fH|7Aq$eM7H0;df;9GbXx z6@jr&ZnKKXor6DqY=8-yF6b&H4dL=9&2OsDq5KYZ*zopGcA)364YpFZHS`&c)OweY zY2adpK(yI92nZm7fekM%__-h)0E99qf?IDqKg+rwD*bhO6C#L^QW$)PegbTELnEVs zfdRm+!l`_8yxs^mMj`5>E)9&^Hj;Umf(c5O5hVTaH1HO%+OTjbBR^DCRXqY000is- z4pQ_v;Mi+h!ra1QYI5?;@WkL?Qe0dXKZUrzJ|P6^i8zdzX!ML-WlT&=cFr1)mRR!# zm}w%2*de~5*$P2L2tL1%H#GbMJ73OmcNhW{h1NjB&>*}hwFe55^uENwWQe_%jm>;( zpe%w+#$T+E;BTNw33x&o)alW#{p(TK)?stKLjw$kTJQDBy1VrBR`WgSbMl{G&@nPn z3A&hnPh_S6;(STLUy!wQ-&xSO{>412e763Rljs z22Jgz)zDhaP`a(9sm+Ykl5SHGQdg~pk(KpBTZWh`BhNE1!1Q&ymqU9Id! zIy;2+9JhY=uk(ws!GZI=m*;)n@BJPI6yI>zY&P~M#504hPt&Q-SO7a?*HbsJOc+0w z)E0J5R=DRBTSAlHW5$?LLF)A!EAP6b?t#a*$jC_4BtAtj>yu88H_JaT=Hx|UfHvf5 zcXx!^fFMv_UjCsa^^m=E^9aOZ)y6GR)La_+)CJAZO`&;^hk3nt?%6mQLK8pCEu2`f zy(=O6f)cpJ(Nh;n=Vfvqb#@ARCpv2$Odww!vrC@Z?wSh+P_FWWpPwMJ7+9eN5(GN?K_szCku8udx z&3U3)4TfDl)7G}oUiD>GrT_G7sI^5X2Jd|#(Mv-zx|+0UBh#S(86Mo$*w_dot^pso zESDZr7Kr<>{0U7Ryfdjgwn3wLi2!?|Cxp)zi^Y52wN62g5D3gN&1$u}^70q?h4Dbj zGn-}MkxGoWZ1J7}7BaM%gd%q6qCGS?3d8Q_T&~opK&5bXh}sZ~%_;?}0eT9&0xvo( zEiDv9qeOQ0#`+P&TA}SI?k_msHVDb#jB2+V^@@w1XjwQXX*F z+Un|oUk5AH;RI0=ZvAyrnCA2a`B2i~w#0yx;FtR1> z8qRUxgyW{A7h=lVje9Vn_NK&F?6+}cpL_mq6z1N;=&N0XM!kEiLUiRE3Dh4VNiL^=T z{AooJY28T@X-yI3YW&82?P+iP*kFEM!-7QG>rVV{l~32`H4(Kt$IbM?l14RY&qspt%FVB% zl3muwD_lJytxb34B%S9yT7KpyoeN8Xi;;BP`yN=>KJA}lYps~G+wC`AmZs-%dOt=% z%FTRiNkRN0L+&nqx=_luufjl9)7j#84@v77@niqV|CK*|k+yLO(rd}FDlRTQDt1Fw zR#rzxC&ybyySQU?VPRoAo9Z)v|0rP@KHtE=KtI2266)zjHPIJ?EBD3(#cJnT;{yr` z3OV{oUA+N#bDJF zYQt~V>QItt*Wo3g<2YI4M!n;@fx&S8bgMzAm1>FiUK{Q6Sdn@OFSe0wEV9jSu6*=P z(N$IDH?DuvTNUF@+gF=(NlQ!QW^JPLLI+df(qxL9$ml0+`fd2slFTO7k^Z_A=egb( zgNm@l`x$olSBzYE>pR1${**$eaL1_)%#U{qm`?op`TDGvP`k?-1_9H&n;#IjMb2?s z);U#1N^LuQVdl3dm$bCB(~k#-4jt+!@u~R!J+CX^5-Vr+a6?*;IC-`rPANtuGuD`X(??|N3y0}*CPDMdDLVOM zOLuBZs&yV;y|>psP1v89=g%zQy!I9{3XE>iyng-qnCHO2 zKq5OiDd8+$lo7I_Lc&g5>-`gEx)TwxckbM=`>4EILqj7~J<+r!rm!Wh*h(^+FF=>^EM@%uJwt2sAUZ$9pQhssO(0StLIboY2hoS7AAuRiuGiTh~ z+_>^5D!=$1Xv#EedwcC$Qf3gpN#m2O$5r2W5o|^fcY0F%| zCDR6*Hb)B!i%air-1>SbaF6{!YE)E|oY?HuoHYmld8HX8y=%8`->yy8wl6&zV?0{- z-cMGjw@jcdM8K?-Ka+vaNZHV^m;U~!twZsn?YXRfroWVzCu2e=BeJq=KR(!WRLssw z1tZJL%fpZC`rP>HWNqO)(Kiqbm&k=5%fI4PUapgFq(bcX+1c5zZR_I5S9H8;k}hTD ztz|OH91!OisWtaudW0H z20Aaz_V8T)uAX76;pDUw;m(-T<}@EAcp=?)VXZDw80LG3z2rB z`lkwZ&-Epwtf$!{c;$m@8NzR-y~#8;31QnSy$)M%^KZtP(h!HSugtD$x{g+!o>43p zgXkn~v=uno=`GTZcb5q-jC^Fd@SJw4{y>P8Y}Qzi`Os{{pcw8-XtvW2_t|1ev6e)- z?0#f~e9UQK=EH{%Wy8j82H}oER(#efW8O7^tXsdp7o4gtIK42^5aSswyGy{d`GrB)i-O14;sdk>?U}EyeG52n&RYaG zhjf)^R1;6np&4TSVFU5Yse1jYy(aU~7hY_Yploqo(jCnIAQj_E+v+qwSp5K@eA%En zi)X|~d}(HygF)DOfWHJuM9_AmT|3~$4o%!9B{OfEev{{P{Q=iEmc5m9hlvldoLHk9 zia-=_rL9eBnfjf6lbl9_0p$v;D-i0hjFO>b&f~$S+Fme9)H{r4i>)M`9+=bS7zY`8!yW6 z@(l@UZtKQB9@y-rtIHtGPe(`h&bZ-t%P~xJeF4KXZEjHacb*D|PTzCuH#6o94Legi zlP?w9s;a8KH*?~-`sE4l^)(oI$<;E4tEgN%GJQTAyU8W?adC( zsQ&X$|2Z4~m&JxgFOB?tR3TnQA+K{O0T&FnH#!_HGg6T%k6bPTHDev`s8qg3YP4*> zV5l-`&*b!29fii`0mBs(9^hMP8-g6XD%~|hca(#JiG#$I8RQBOqV{+|$+7)myH3@v4CV3pr0kOG}F=KY}%8lgJ8y^L2IM zt&X!yKJn7{UJQvmnjmj#YC1-prs;+PQZ+fBsu_P|$W5(Ns&)-ccU#>+nDm^qWQtNAF<_GJJx{jdS#R$>k9pWLYbwfEc-X3)Q z5wEkvAD9sr$RwejV`(VOgGB#EiEgHkfRW6uHHQv0Y30O6M(XXw$E+J{x0<1A^-c=0G`g>yovZ z97f(_dATlSBW!!k-}JalJHm2 z|1+x0)hJ<7Zz>k`j3>RnzyBbrDYizhH>R=3T5yeShr?*`4loDPwt_{>%AIP3bLnPn z1^90`Z(KIV0t+d>YhA(h7oa;ESVFk12kIw1P7AgdnSY6hvKnWR3u{hLOPL?al7nC%Xx=FGZ0m}B*;dHiEa`KA*mm|f*{7!T(uiv7+AHqD8Xbf+oVYoisU zM63%(c~RXyARw(=Ym-0o-vH2@GpA`K&Yl(QeI{=%x!-rEkmU_c0?d!()6Fn+qQ-C7 zwd>b_Q`1{Z>Odjks>uH{tW-6Q!ScT;YGcg>X$dBk}^AEJbzp&@AUaUPzky+j5Y zlyAS8{9x1WXkpASNx1aT1b+M{K>EJ}E&>ufF-j?1nu7>6ls8`9T^2mtei({}19&8u zMu$I}8oyC>EQqh;+(d76Toq^82E<2j!mV{qzdv~pGREj6DCXd=kM4o~{%*7gHce&a ziNVaa@{kkbh{UcGr^$r>u9TDC5uB)*-sdemgbgb^_vbZN&YxF^L~N1m^z`1lc)CHK z8c-^8EPLPfuO{HT8-x|GNM0~bu7a6Yk&&GgCW5zqe*$l_?yK$vA1x=ewyW2#PYt!? z#>W1c?FnZRFbyFnz)7R(hwk+p5LZTj7dJFCfT*Xc8FtaO9xuAY9o0k1xx=a9jgWemY z+nA8srJS3m+m0V!a`Ze6ECPAtk&lk{q2Ui-Dk_k^2pFa8@VdLJ>qSlcQ(xkxfI?(R z5huo4pil-1-x_gUI)MOK8cCYV63uQ+luf_D^wq+$cgd@ue1+Y?goZn2k z)qWTxT9OhtZ?LV<8OfAQgPdBz$i~IhruQZ>(Rg;OE0?<~12D0ttILfRl8COBR><$y zYHARffFan_k|*ds#y^GBn#XyoCSC;!Pp`MqMM8&xCvu zO{w6|_wL<;jFxFs(>L~wYq&nO!&*Sfx|<{MQYNRD`1Cdp*GrxW(zg#^{0BzI&738^ z?MtHS2_?<0g{Seiqu)`@=5H2%uv+)UWPcrb$focT2yCWl3y|5svE4+}c;atTeuo$P zb;&{#KO~*;j~HQU{Dm)QHJJ%2B^MVLF?*Q0rw1TnQZi2rwb=LDH~NX3fI8|@E#FSo z|Ni_K^aK&0$YqQ=09BNRx!JbEt4Yh*M|3J{xL{t9VG9w6r5iejCtIMHwRza!OU|b< z*sNa~Eny-ToY61wZdqUo7BE|aPC7J4V8%C&<^QLU<~mCz=Y`QH%#t)BBSlb6EC=dG z6Vu8e^lA|lg%hJ)l2OfB^F;Q**~eLvwU@q?mP%Z|9;Mm+khoJKNvBiG~2Tm^KWx$>SoxQfdfBLpNJk#3n?-#7=Fa1Gr)NqJ?c zIs51ZNM9$f0dN31A9STPs%f11Z7bpmSn*(4+Ie}L%3)JF^pOX&AJlYB>_o(c*lE?9$j5BvrA_!F$@&Cq7 zC2Og1WKe*tA^(=S`It!Fyey<=0(MWV!${f-2{37BFliNPco-NmRlj6e=xY}TT>1HXbjGSq+h(#x4J%%P6#2EHVi-adVZbZ{xI^p0o&ZJt zI7Ja948%q9zqLUWKmI=wdQdV?G9Uy5usN~gDiCO(12y^f(-737gz1O@DFCBYR#rwx zx+C}HV{azzB&-IzsexHk#HdsE*BgFckJUXyHxqR2%M)hoy)vH?3MOm$x&ZsnJUrs+ zb1iF_9LBH{oq*7xr6bQu*_osXz@yTmsc7;A#-Hpso=(T)j!10x0%Eu;)yF5PetEspHHp&W5_peK@o@69Y}mr(X=6q8+Q8K-e}lrt`pcxPsYoIxm-v3J^Xg0 zXy?w_-ZJcU2|R3ighP0KAdOHWqAl?`&e6#_-9o%|xRC6WbALqtj+fS^$~2fqqW1Lk z^o&B?aWXWGh5b4lZ+UnO9CIzJ#l?aO^;BYHLz9syEDaFu@%2!jZN712@|rf!Y*d@r@cfwml3C+-+Uu(9u&(q$O`_6WRP0j+g}e61Pbabz7)fopBiWwciW+J1mcLg!ASf2RP_NQ zwGGRzBfV#_Uh0+uXBU1fz6my84|8;1C`z}d}_|NV6yuYk!{gK=p;6+ zcY0LRw6{xS5ugD^63C)quv|pjC`P{QR90Tcoo*JLkf+4vqQNYo#%tfsd-<}xlr3e? ziQApE!Oi$2v#a-B;Q|oT;6f=QOb5jt^P&%K)F7;R2a0p!rzA_WD2RD5aU##Ru?+J& zLyTr^w5LQ~*Z=fz3qCkZ-yitw*|WgFOyh>JN*S(}-=aT&Du`L9yz>6ux+YOml|gsq z+so5wLWuOV5~%FnxOWbE%y&X^j!}Ej-Qdk|LY^-+Yq?K1Hkcy-J( z!=%a7!VSuWiG>5E1VI^e{8}ld)6>(_M(8BI2GD@`ngf7HvuD`po4Tn0(b8|$>0lRj z--of4`|H8(r*bhqB^?BWo1YpKw)|rb34kybzn=R&_%Ob4^U>D?0*E|^QlOs-HSoE( z^Af+>4CW`}#xNqt?$oJMlvfHDr_R$0?Z<>J_X6ai{^r^Y>x`ro*cjU zM4AxYPug{Mb|TUkahFbTjx(}9;M?C6fBCpse2>JJva(QKLq)>ZQb$(G`TIeg1Ax&_ z_)8e=jVZ5yb|Bz~YGvFv?anCvQm5;@SK>xf#$auduLj#TDV=&b^gU!9Hd%W08ly@=SegYRr67+%7xp&bzWq z(TKCf`RUEf-%<8PK5i}?D`g9Zt3_<6g&uLPBS+vD4uKhU_lMsuJ<99~<&yAppjl!t)sUl>x`WNEO~oU% z*?rGP3QKyu0sgHn^k-&aVF47>K9`dGd(;*l2;lmH=-c>Cz+C8`sPCHThE>tPwug=C zl0*Icl$4d94eeXD7@x+R11unXzjN@ROTT?{YkZ~uE&ob18+@13|%bda`N5N+mdTDreTRXh5^K@kAw1M{eHw|ttTEC%0ozH zz0WXWb@iFd!o|U7O0uv#4Mi9s-4);I2_;g&B)+?UlF7FHF7QtBrA$+hCpB;+;$$K^XxKeejXZmuX~0LiNC{fD6GQRu~QRWXfTxEQf@BYkfhZ`TBy= z6Ke|DpoMjX+-=(q{GnN1G`zpONR7jw{a49ywj}+pwUYcl?XYl3w4|8H1m!5i_5tNV zpTGgwd^rljYZX>0*v^uCx#*0!s;fc6oec+=~Py_EY@M z7bh&POG!yl{;JpaN!ocqN4pT-urgsHy0mCWQ#-(kkfelE5xWyY=55M|JLBEU4zCu} zYD#AG9vmmQ*g(0WX8SZ6D`-DV^j5K*C5&L&5A+ow;kZ0y1ZsC20oq8pY0v0quq)kY z&!RB{80GZ-Tja&*Lh{l)vnwa_g+eYYuSO1W}{@L_N#0;U_8!3hnjV-=ud zFuU^ItVMa|yyNZwzxpl(Ew&DDLVtD7Dq=QurMV&q=fhf-p}fef zYOL#9n_YhrvKQl>PG2nU<6^yDS->Y!t}k!MT`qR^ypa#oX4zcflJ%5n63Vi(dcxyq2nGtn%YeFf#obX55>TV&L4eTfT z_+Zk{j`y&fQ%%un0d0Y!u9CNDP)k_)1FZMY6_F$ps(%B=C)t{V;f~zDiPb={p(Z-gN_jGv{6$mL{ zw`o=c68K((E-EZ0kZA6)pBk|2X}p6RKk*|%(thHP@4ci~Y*EvHcK|UlKrq55X0X5y zXk|qwtG{vPvb%Cy{`s+4d|`yJs<8vmixR2)8IHa6U?WP|BD!}BpwN2KbI1z*a$UB# z0Srf&h^7cA#6%+Z_f6IHJDYy|csG5L1Ex|3n+BvX)S~=EFU^b@Qb(e0;Sx=ywsy_fF@Unn{va;n=g8>W@Ym(n7W?%_U84c9%me*U zJ2V1X*FOdCvtBA3zz4L9a9FU^fZHih1Dh=>&^0p@zBCc@=pxZfh7yyHxXI4W#_z?T zlT%Yu3$*n-+;V`wFnG~}CG26b9ymWz+UHdid*lHtZADrE4pD%tyGs4VZzn{4^<$YH zY|6y8C-RS=dHe5TPx7=gS%BU%A7Pbyxjg=?bVU&X)0kbs;#3}g4>^%Wux@i871fQ0 zQ<8PxTt3B`cB>zd-nz+znT;)^UUZM8`IoO7blHzM-&v?n)hANk4qb<1vxSEMR?ecF z%OM!oX$!U0bj&<@ACmZ;*`2X|AB%mFjGrmC#Arv!aOFm#$pYnEa3ihqnQPg*)w5U(Wf~B_5O@}K;_UV7-98OQP>Tqb_0CWUdC~LHBSs|RKpnd{nLthz zZsA--%mfQpH~^7k@t4l8ev*`BU?D)+#qHQwl9cdL{Tqz3h(^84HYfWp->7X`x0x;}eppn6W0~VfI5`%gP*Qy8;8*HI9Rs z;5}`?2+XcnaaE_6KVP3C1UsS!2vAKN*^Nj$07pvjGBIgB4Si_d*~5E7{_NS$N^+v< z4zpwMA`74o0rBLL@7MpU26U@g5{eqLD@WJ~aTF)odzVs_dcj%IEOq5aR76z)IsK)q zPFAlCGfNNftc(sDQe>pee%~4Bg~44lh6(~^{ZXm3G={CbzUM)*xVwHX4HCUWkTnR7 z^vD-^fE$d>nY^^ULJ&d%16q#!4qq%l!bMJ3ew(SzKJ{svoN~yJDB6QiGxkc%5S@zp z89BfR0w#^h^77`j8V1*{UUj7<@&y{OOnR35m5xTOD*#GjcEyCq1@RvBJx@$A!G|d$ zP)WA;1HiIC4x?cnKpt+(WQuP_R7tQO2>D^M`x^uAM)y4#C~-bwb4LhU&4mnF;&J-b zQ-inHKIt*LBC$R@)A#{k&*4vmyQ$c5G&*9y2e~3rMt~WTb@GLbx%y5~?6pMlFcEv| z5T@CcQr0x2`n7485c$jMPV^_H!Pr;&YHCAqG@oMW5nFQB;#8AbvQ`6>6kCx3n3l93 zfH3_;*61R`Kp(?>#9>d;dum&h@x^^t{m7wPAW|K-*Ql;_>NimrbLWKiKzF{qYu7FS zmIbVIVB)2=#BzSIeCIcvxld}py#Q{?EgL<({}_@tgz9i=dUhf`AC?WtI-oX_VbD#$soGDG^)~!*2HV41p;3ib4vO2W*8KRG`t7oc;6LX6)4~$DwVG{)##~hvR&hIk;9AFdlOrv3Z;ed!N8?qP zT?qqjN%xAtvyhs-v+v*2#M`EpSQjcZ-1=&!FrLkZ9!xsmz_Iz%ocNhrMo{D1=BDH}W>Xld71W?P6RQkTovBle^ssKJ&~mVG68p%ZH!2Vi`#umD z5~sva4W@Cja&H8~SoD3?IwU$Ye?ItbQX+!78EtB}s{52k8z$agkw|J^uP_3kyG{Hr)QDznw7X6Bl8WYt=mKAB*vg zZSx3sRf$CjLZka+AyYzvI(tBPI1|zSatZc$PKPoR9F?Y_b4RVUiVc&VK7Cr&ChJeX zMf6Y=A`cFJCrrHg@yFA{p_4yPl?6dd6k9T#t+eV#m@9+_KnzxsVK=Z@{+Rxs5 zo3Pu;O4tg09P8qA3klgNkmJ%6^fMOSD`7|p(Y#^{W)x8oMRgazG(oqdM^CXOOS7`E zB`o|Qr1RdG@Fj@Zu6K7l&|Nsb$Kl48avJJxcY~4#a$=^C$;_PRo2q--L=SPv!#Y#p zOOP)1l+2v>D)lA;VZSeTxXKYAze4T=;%8*nw7!+ftqu9;Gj@_xVQ!06c?E^Xk426V zvyK*{X!bcZN|fJz_;tC++5*lC^QQY}*^@ZJj0Mn%L+DP1(sTsDgg{EfY~g&%Fzb4- zqC(ZB^9wDf?5G?xG#Zfm3GWI}lF$Gkh5#Rszsfn?h7c468A?NADe=jAqK2bL0^$Aq z1cK2kdxO14gu6>E7ZtBQ#sK@ zg68y;+@YM#FOgDyoY*&=OeJcG>b`R_%%=I1wEzyc{``nQldUdDtOO-o&nfhj_`Z0d zfxgI5gXk7{2F6d5(3_FOmq3Dt;Mfvr3rwfjb4MY1E|AU({=D)OvKd;yf<%gKAGKZJ za-RJbY*uimMn!EAVg=#o5*UMkg;?XZxuZDLak3E`H^HPS1Mv2|qdnTGGkKm?0#c7V&0bk_<^z`647WN$p%VjNk)VXAX^Wz!bcF%zM% z8)4NpKmbA=`GXdQf#>?(8<-|bn7;LIewjIq2YYfA2x7Q;hA0q=(`{sT@r5h!FOh(OwS3~M zt~G%%5v^*Yrz~#Ye##RPuxw+Bh>JVJryRPQ!io-@ukJsRHPcJ;%~hfR`#}Uj{s;?` z_r{NXvzL8M#NGvjDiJ3N#>QI64?e~M;^GU?9*!BRqEC>=HvMmoh98)dyI>y|#y1i$k(X?!OL1js`MG^aApKGQ={fj@lqoOt{09WSF#A+k7Hv?uBP zo(2coqm7A?3JYuxY_Z7+J6Cd16d>z2~% zbf}rNkT=imwW^*@8m|gpvMh30#q8>_jU}3M7cRi}QNti~`$~2W)OET80uI2rINwHg zdcSUmD{XSYT%To3!UfRvF}`8(#a=l!-9ZEKd5z*yd|KWu$7Waz%n2TYlI)2nU=ygP z!VoHvOYK1&$iH_Kp0~*`QIY)qOFnNSAL75OfSH`2DL1Y1o1AJ&GDk|bk z0khgoN-k1l9R2IBL;s$0F8`){DDjb<6TSLR}JYlrurcEe!K}!Xxo*r%!=Qu|+H++%m zl^nnyT|$?o;*)C!KFIt5S6n(UZC=J#rz`~niu5o?TfLpIVVSZimQQCr)sk55%6>y# zQ4|;uFq9KYw0C&DrKWZOOG$1zfe%$5;UvU{{~f2hDm$PU!v;~)wo(~m!sr+gi8;N| zrI~i{IFazN8hsF@{ z^6Bq=H3?|Y%Fy>d5sfNOG%o`70+x@|sOd%t*ZEkXdmX#z-8mn|nlW>69eC8ZwUIRr zNLGb&H7W42=_Dw<7~B#yb-8pFu=<4jPSg~`n9zVB7vW)o$e?%RwN(~QgN1~IsI#jc z@@4JWNQmiIQT4ltCL2Jv52+>YC1EuBx|DK0n<}hDmpKH&FlFip`UFwF0!delB`~1u64gmF!JWMo1sT z4#9xTgPT;H5}_kqzjn=yRyA1*$O}H$yrcU(@?bfRhyc!~V4tFE_0zRL?J1N<(}EA% z5JF;n#|`Nt`9wyO7zmVL8UGF3cy66$iQ|r8Eq+u<)9|H>*aj(+>KD ztD7}vp=HU_pnQRhLzb7l>X_D#^msQn4;;0BxbH3PR-+7tdjb4syk6a?VHX-?NYYFS zPco{BWl*tKmXTk({lGm}c8#qK_RX{N9j(!ZkurzsE{0X;mYo=$DX8A`it&j{>wsv&e?nPW(}@w6v6!EeAdz5qbkt*bQVqWHD}TZgs$EAOfJ1)2Ch2 z(*Y-(wXM>eZzqGfA=|BTNlhJvd4Qu%<1@XCBDO^HD12mO7&LbsK6nsq-ZTunN6>Kz z!|nKs#JjFl-kCH_l`uKeQIVfqCC_K` zLNMp@``QXs53s~Y79w3XkSU}EoF)WviR=5k`tIeP@=ze3nuQrG^Gz{7+{@C(D$!Nz zZTBy~7GEmHJ(E84$+=)>w))1CoPl-Lhk`UFC^j=D8lSN0Q^tJa3Zfz+B*@1(v9UKm zIaQ`hzkE5u&b}6(U7Dghdi3b6Teo0i0|O*94s~_a;7j5;$ctJG(1%H^y3#4kw3O(o z*!Z$BZ_#D>70(ewol=uIcWb-d-*N2Q>VZt6SXn8rPO|W1$T)w2)ZW*p+2o%GD8SFp z4|M0{9$RU@OL$BrM5uiZ{Z zhpwDdBQJ877KxRWRZ&T)Snn2u%y_HW`S~@m{3)j_YE)E$cgXJ+TrGE+ctr}Jtkd{Y z%}>?-u$iRV`a3^AUz>GDW&&T$^`C5XkFbw-F)+~Vq5-Wb?)-#VKcGWbF5i)eL<7`T%Lre%|S|MoEG9 zv7&8Th*z%{wyRW>qC@T)dz;sN#)C4yW{kyLfG%q<9`e8Wa}6am^|!LJ!KxS^G)_m` zz$GNq6EFuf_sE8eh2posO0bqoH!TGmj*5wi5pRupv0JydFr3=u{{8#%N4}SrJ2~u{ z{r>$z*?RKAjdWL~KOW2BJMkbPp&x`F_5sJd(WWT}3SH;(2OsA9bRHxT8a)U=U-Z-V z_RBYK_9OC%3oOh+;8mG|u)5uQP)Z&y8o+9S$F4*HTT=~ik!T$d1PL4~=z+4!GQcWJ2DK9T?ZEb}+ zUZcYh=nn+`hT{AXh%m!8s1C@TjEX&XraI+ zMHKsUDJ1Y)cQ9_E~NpW0F!@yN+zkU`SR`CeA9jEwfB%# zRy{}3M8@Gux#HvDv0=l8xD26|LLoJ57Yx(O0DYcer}sAi;Q5#$o1tb6C@0%OWDahHGH{gc z5kQx6D7*aEk013hSq~mO08BqFfRm|t*_dxQ#s+3)g^u&ql9C@;7M_POuD87>5s`(W z2~C?zCr@2jIkNxu-=w&6=gu3ouguUigWE~4CZufw-VvQLrJCP39I?_1puBD|uO zs_UxW_WAQ?#W-F80kt}}Y9t`PH?kf9 z@6&1Ma0|QGylvaI&3go2exzF2rLnB*XV&)(G^9yK-G1m3nC|;dGm!NBNAz}=H*dJL zpPgM92H#~DvzLrw4wOaT8sk6fm|eO_!ZcfHt|YrHizjdq>2aEmkHj_3eOA^x}1W2 z>D$({nQvkg5d;SK^}&rPZ0VX&{T=GKD{)#n$?~iJ;8Z@)wUze1<+pEH^gb}MsS%EZ zT)3F%)i1Q?40`svRet*L7SI~nSxc%uh1#2y@z_P1uPulepPbajL7n7;eJg`3e81&y z9o{cm^v^$A-lV73SZ#=uLrrpK6%w+8>%3~!DjFJ^u&^*d2~<~AR{xx2&*kZgW!7_v z2|~F)Dk!+z7Uno3Z)m8Y&YWvI`WwQIgrwy7__&q!-sK@H-2ZrVLis*02fPa<4&TcQ zxwdheFIwhBZO&XP&|GmU)g8%LB|(ANV-N&AC-P*{1`Px{!H2?+(V^3=7J_}H{_ zjfl#Q9XlvUOG|t<^OGEZHi_FujPagKGxpHRZgP%xp zK1iUeTp}Zba+wY;6>M5rjGFE1ZR3-{sR0!Q$<&P3s(<90ak-$B)LCi#{m$L^P6K0O z5z}TtLjY$RE0SCv4Yu$ z!b8o?@{0HRw;VabEg;aAVG@iKlzj8x@@g8XW5M`DQNa&p`PBklkMkMMacJCx9+DK zW29YKeeKIGF@-UTaRE4sX;j0St)DsCGPXnMZBj^qnuQ7tMw8gju#(98vL9@1|Mu-0#L=d2fm}Q2>Fw<7 zK)3CnlkNBY6Fv|e5Rd`Bh^PbNqoAOG%J}|1MN?yAt#w(&L%<}Yoecp2>e=+BLyfiF zI)51N@v&J<4+n!vqwrd49$2}D#kzf~9jvXV(0CK@{5fUOhY!EY!90hWJsv*}Ii~;M z#ft{r;oXdkOT{~MkN8VN6EHM<5fl{k?3pDROp*O(=jLjqW`!~=dSufzy)sJ{!dm&( zu6`W;kh%w{H#974=+B?n*Q-}gonGf}n0;E1_HREwTboRc=mBYk{LZDuQ#q zzK%{SIOH?wE_Cq2q)tQI1_UNp840gm5xq;uK2}PFrf^yIl7wvHBIJx+dW2$Yj0X8v zHkIj`Q(_p0J3%-kOUAL}%*h{XenOL8h1V+5kaNydkM<+;#j@Q4z^S znRPEt|AdBWqgM>akN0Zdmjz8Zxz8p0Oj%2SYNN?Y28`veA(NOG8EN)$bHC{`zGH81 zZ)RqeuIUJ6x3FG-gJY9RLc-0Ovk~%te*a#*kB={1mDTC9`lU;fNoQrP%UT*VNB|ps z?+*V>7BNc{SVmhC!3%4X4S!B7?ZPEVP><9SYW%&9zql|in1BENN2QdhUDcQ*lHfK~ zNnMRyoNk_{U%h?z1SLG-!J#GhGu5b{Vv7?>YHD?;C`-jdaK(v}lj&y~(W}9K<-=M4Q3jh`{Kn6_ zlTWGOlxTVT`}^U4sFt?fJxOX8UpfjAFXs*|LEE#67rQN_^w7az zh%!yGjV;Skubs8&$2y*cS7O==uWHwO7l~UDKbM3Y`@%xGScmsLX+=o1HZ}?Y%x7g~ zY34>3mzMT%O%d?N1(`rO+J*!s0q)uY(Ha|n2UNuw9pZ>yhZm#@m<@9BWM^i7>5CUH zu6h;_Am&8fv0(yx61m4;ymoQBC^fwP>})(}qd+mP+KOY+6=_qMb)CGrdhCnilH}0T z)Kq0w)XW+y=1F((;PVG1Bo}McWZ2o+m04936nvAZ<5rSm-8e>Mn>n1*C3AaN7jzaoJ^32AYC># z{;nbLH**MJ_39o6oI8D5Svw^epqZ6bdcQBx+JTRubrTTLBcJ>d~z@|pE~S^1%lTT z-*>&uVchG&;oZmdPIvvVgDlkX`!}AoL(ploB4QG+85pqnOZ%SZHL5M~zZeImHq+t|!zD&%zAT9s;H44M~ z=JmQdaVodpNX;l23PY8Un#IH+c`DhJcmz0#;OaGUxY&uj_)z%0N7cJH6P72u6C7YH< zuV0^;cH1dy$2^4dGiT2NNe1rvEBCqYY~Mh_MnYcEKS)1JHQ16M)loWiM?0*)kbJ|| zb_V4K4+Klj{ii17%oS3JE^b5lRX##Do@*T&)VfMAC}@DjQHqrwNm+_Bus>F41-a?D zu#k%Ikgv5x+=cW$HrX$N^V_>M(N_Z>FNf>#N`c@dMt~#J92++!MO8A`i?WyTHC3DZ zosbFP^|q@?N=j8*v$Y}=YwCk_6(Sm;8hz$C(_K^T&YTY+SfQAo3GDG)1>0| z`+g}Yi5a3^VJ!PM;Sw%57_{m+gS8G%y-v*nbzqgJ4D>Nmef`&-l`G?*1mN`g@#6<{ z^_H^o!7P&s@1~z|XNpf;yVmnmNmtz5xWT_aARr*zX)f+xNcqPMBZhbP}D-;(J%1KQ8-qIo}C}@lG43**< zp)0HMuy5lUkwrqU1hz=*XZ>4Ag~ZnU&!F3cA647XwkB*pWo&1cbNBAu@bETRtv8H~ z6Z>DST)K~Az3p5bIw-{9w8HF;ujwm2i!db4j*NcZEyTXERe}n7Q z+~ZYYOKy0S#J8)*-#OD2X-a+N2|Jf^8*e03g=1OqKmw;jFj!#XA!TE$Ei~~^+hoJ<8hwFleii(O_X)~+{ugu8*fBZ_$ON$dN&DB=hS4J|q4I4lW5+B+} zP;xC04-*r{@M_R^1pW}&pYwNSx2Gmk?^_J;^W(v3FrsF?NxhqWM(XSwC<5_VL!1!( z9>gb8w{Arkso91l>#R1KM33wK0|$P!ZS`wAS9P8AQhwjSGrO`u+BG7g+1Np=i$=C^ z^Cq_T4Bqz+#l|88#c8*dxTz5*2C;S9wpB$z8zQWK^=dWR zF#k!nu}fE6Vpss^3W3n}|7N9k_I~xDO;~S8ug^U;))sa@u#1)2iY}zE_&~pq zaIe66Te~1A6od~42Q1zTt%^Hg=L4v1+PKkDW#{tyd<&55Vh#%kOrvUEyl}zQUf*@< zz5x#Zw0CbRj#xwuBhII0nuh*U4Jy^JuGQ?C?~WWhwnaPVx8H2+9^=g$CBh4yj}1+? zIWsXZFl^oW>Yr3Mv3*DC>%ylmUfe%E+qx89o40fE~&$Z!t>@_;NQ8}1( zxc#Bj@Tz*y8Iv6smhW0w_tx4lBUPO1!GVZ@mRz+6d1d~L8$b2FK0O3X;ova;=g*(Y zN=3_owLEDIG}BrX=MUnx($%<^Z%$$HGf9pQ&p&WRkMpb7uamqY*DU0Va&SDl`qpp5 zS2cY-$kv(YQA5AJNC7oTU9-mFbn4FQ-LtcIjM6pt(N^D{{TuPLYeZ@;gWdl8`I9Rr z*J(#%Q{@l&D#2W(c@qU2mW7@CIK~0wj?)w>Ex^-Mq}vuZ#a?N7e^`C<=FMoE6uySnC3{< z`h1s(JC;ezZgX)l36OQ5r$^w<*ww)`<9{#YGrG$*zP!4c?xgPpReiH7ZW~=RF3OYz z2L}^6H(Hy-Z~b1&pG?mG_;W*rl}kDwqu4WD!9wT|@N)Ku+P#B4pMHDQ-wEDwu00Nv zM4S|})$gW5&%B(6lz)>*`RC8q(J}yL5h~`G15<{yara#_8~w5Qo&2kn3@+E{SPndU z@?_-aIi{M%?SD6oB=pQsA0Hne0Izcz{V8qnwUckMW~!*wNE`PEj&OOm_xJY?3}`7T z-YeH0nG&W|oeg^NT(3Oljg6qwf~|vt1IM$KU8gL5tVn4Sv>aC7^J0Qlrr$l3)T(%J zeov9glfU_4-y0J-O$Fki0z{Kbq1ZiZ|6H+@>K`#r4jiE@f)yC%)cg1L=)&*DFsxm- z&IAvy{NTEw>GiA3b7#~B8PJr7T27v#1P3LxcP_ZsehIG6*_F9JLUGk|;36coXgsMF z-5D|JE25!7KB^`kH4V)Y9CIM-JBZ4O3CnHU{LowlJTACzUwim~sj=}!(zR=a0)tJS zRySX&zR%ba#CKmJ?y-j$Y;@E{v=06(bS5`u)2zg%-8}$}XKrrAt{d=_i030~=k4t6 z$><%(V!Yd}Loyd#-LERC_i(e=*=a^vstDDN7^YM!nLB6afWl!GFTm*Xg5?a~S8n$B z-?Ft1P%ixyxPmd`I0m1!ULMRyVx zgokcaA%E$pUin7p7OKEgmO1cCWUtH-Tn+sEMa?Y4DXi}Vv`!tnQKgrQK23`lv zl0@rq8r%%ca%ril8z?FNG-akhMjJ}@S;1ADSRL)>AZX}>y{4OV%5X!X327g*j&P;Z z1UKnI_&Oe*tC1Zer=GviFJ`E(UtLt`Hq(1BQZcWZVu{0diPqkl&C_$J6@6tx+GX#1 zK596@m8~|W|W7@9$$4CQh?mAo%uU3DRGPgLV zznMf?Nb;vjaW$%%=Q0?rlB#d$aAx~8aPN8UjNS&;(x3da%=tfKw=hQQP21dwi(3YW zjyb3I8=|Wp|9L%reBt6n!U;kNQs|NwU#XB{_-dg_kW?n7a2!+%6S)I_b%)euMDN*x zbtKior0aoGw4^-E8_ptGOeC=&DuW%d%8e6ZcI*#mC|raUX6!^CQ8D`#r5$x$-LNjxt^WS}!7GUg`@OGQ&z?Eax_5K- z9_y(;TUJw?yK2z!=hvrCpRm1&HHAfmVL*vF#?L=BJ#B_(ADkT5%x#0oDs^zHcm1-2 z@iWb1RhVVb$B!Lon}z0#K({wFk8}c&%2SeThDj&u zcD!@1&!s-&_Gg^x@WoXwQPjS^-aB+YO=xiQo*r;CUZS17^W^-G@6=n@T(h)X$&c4I z;rY^s4&LU$bvJUuaC;aC<&x3=llAdKYAr{&Uw!BzQ=W6*LBzR#s5fB)iH@B*GtZOE?s3gC>q`E7eerLz|uOF zw59VKjZB`RqZ;_0-M^2I8h(>G@hCffC_OYI0>qk-P_JPejx(s&2ETmy?Z*4=Kc!sX zedrn?A(u!ui%W%%q8f0mU zqo3)Vca29vQeB4<+W%Uo{2)-oKJAT+H$v=(UyX!xHWs2hZRJ|3-Z@g6w5tpGZV7`B z8qa&OyExYTwnL#|WNwL_ocT=wXotTLvReqKj0p9ZSeMR5pjJdfMTH@@xP%0N#88C* z;Q*ZDp1{ooQ9FzM6zqm?1bV1N@+ zwXm>3nSRU1j{v7l{4LS??~@F;xnTx7IxGPz0y5hTv&3gw)Cd>R@JzpuVNHp6-zS1nR4^8Z`_ z&PkWhO!9ehv<9rPb-E@AEUf(=Yea@Lw$xLUj>^~5^+-xeV4iW`);7(jf+iHSa;&T@ zEI`U``}?^^RJ^lSIp?L>F}E)Qz{T?qj!F#4x@8sHo-ph9^9RNPLc+qX07AiB`oS94 zvvw$gAKBaYi~Ra84hX@(rsyf$qfotMgPkGxDEmRTj{uJ%GDh&V4u^+?TA6-VCw^Nn z8+&g#hTzyB4wH(4g0AuL`qEN1 z;K!AgVuYrrPlY0(z-hB_b~aN9vHb6IFT#edpkn{%2qw$c6KSB|2bTtz0MN(;Q(07w zGzQm^PgQk(!MulC{RI2%H`%&%g!^9s#4|(rXk1PPxtYFx=Tk?145t4bU zM#@D7x%Ve1n7V;51_rGNV913W`jb<1ed7PBwg+rAs3c(Bg7RI5o8t9GFy7c9{og$? z0YlrPZBO@`2T-1Gn3@3PdIBv9@ZOA!jLZJlGu&EK&lJNyC>O@CLjUvKODLQTDX)=(dexG(_kMU)0Qm<7`yb`*}8dzu2t(+tR@krCe_Ga8q)A$vpFGfcpP=G}XvQb-LGxV(~DJeic`S9WN#t{p^>#vFRpsM5M)?^}R zZfWt{mAbh!$-!2ktWv{Z+YeQ#T;!i}n0@{U+p!q1+eAedO}zgt;jA(WKtA3d)ZWLw=o zem0FjynGiwn@+TH;xhfC(900vcZP=$+{@HsbKhd&svOQIM3^<-ZlK^j4y(aLTn3O^ zqeQgbLYx-hlg5$E``M5ovL*8$mb(3QB%`q2o-Hlyh`8q$gvSu9bfzRKZap+r-PpU| zsZ-AXOV4`)1F^8z+aiP)GrYwtIfWD%35V~m2?8+k+9z8E(B#?K*`c63gG&VvxqWVj zh=2h2Xgq}e84WoMNJ2(jZ6W`H0|Q%i*pBx1tLy4u5WvC3C3yEP2>q}yF_Q<@92;h! zn+gSti06hbBP=KgDWy5&AJnYwzP`RbJ_!j4-@bkofP{T~3;@O_uU`ooEM7i7I9ON$ zK%o5OhH-Q0?<)Wx7@!Og7!=Har6b^bX}lJ}KsB6OWwM`)mAI<&`M4iv#7Dcr+BJ(l z;#=c}Yfc;AK{wYFrGUP_r0*vfmLpZ*x}-%Ecs0^0_TId+*j?~I(O(RqdZ*#}Xu_;I z5}gF4Bk|na!E>=qWCUCP^Yy;(f=;($lU~_+@lN_yaLS^Y7JOxQKrMDp`ihN*BJ-RT2OwdB$`O=2T4af7Pr#gOAv_|M~K-m|rgV8Jx zQPJ)DpASV@83+`Vv%e^^W?YDRr>Y@PN93V|juRp5mnwg5o$<4+l3lZyG~5ckv!$aG z8jP~PON4*Bq^zQ921#w-|W&6t>bzzhdkC5*j|jBXZm^Ef)agfCQoSO@V%_e|>gU-V-7au0qY8o0`oSYwlUjWVe+7)t` z>@e`*p{tb;6@{U?%#!baY^#kQpW!5Evk{5guYX1Xha(WV5{1I{9cWj!XFq7y^?sD4 z)5((zOJS|Q2)rUrpppA{vhNu7l>h7urK$OI5Ai+R{aw-9RLBw%-Sd4vCH4=LEb$Vk zx|UGTjCT{Z`iHx5$f2RRv>3hmXlLdui?ns2`Nbl%%S+3# zt4u{DpGWZ5Nf1dXh1_pjZ|j!%%(WGvA~c5TA3iR|!^k%_H!nthCs`kIDvXd^xPyp| z^H@UW;^y~3Mnx`SviZ)Fk!3v;&?M6PDh8^;5kYTUIVCib;9HruZCEY$DWSFgVT^IR ztE(8nw|s|jkb##Bh*hy?C zPk#OE?GWGI-DpKJej<5#>D>1Zzstzb{<}a{V+P(<5wm)l{=Y*)jNw{1^7rqD4W!?U zg{i1+%W^=Y4(S0|Q=m$?$RcMw|CXsKlZ;NDloyyR!KNNWb$z|}lP5xgf`Ii+N#gK< zjV3N`VjnJD`aqRgGXXU-e&C|pND|>lg{6HH7?k_@hXJc z-q_01?17?aW3~)Mtc`9xv5$?a$*9HU89EPaFm}*IeBl$CZSS1g;ycybvAk%$OLoE% zAwMlFZ^D6vWod=P>4flmmCxPrl_E2FZk&o^VY{CpG&E|w$tx;qT!kJ9 zw2Ez>9!Wa@reJ0FoaU2`egq?WzQN~q=J@!7t;eFG%VXb1D#T*3_-#01u^J33_0vd6 zK(GWwmVrTZj6wN?{TOfq7x(3gio3qyRJBIgs3cwE!Hwl1&~0t@DctQtVJnq29Gm=7 zHW^O6I;zlK0IvY~aIvwm0h9gp^&9(4peqBh0vyDGAT#0$al-ZuSSV0lUJg1{cz~J$ z{+w`2&(F7l6+ke?D=vNvaQWjZ0P}7XiR~dF2#!<^28LxYdw`n(*c&iDgjzNDY(F1N zKzw|_tPbuI_~7C1sUPwqEtIPYBobKV$?6I9dT!o;u!;3BY2m%31r~y=F+_ z9v(GdB4uf52`UOTwOD-)xNRjt_Xb=fx1TMknVB&jdvBf~O#B$H=y9k!{(9}Zp`oB` z&%_ik+@?-Qd<$jf2Zn(qLG1v5Sn+nn3d<;50$ zbc`~#TgGb?S#z4f!TY*cmyI~Ip-rL)3`dy8(a2SX+v)9OU-omj4REr_t~2@xPs~$9 z$0CQ4wm9H=UQ=~j$5Wvp+RdXhq6{sb)y18t97q;7(LxuGC@Tg1t~pE zsxp9tOf9jJ;?Ks@wNHZ$7GcuM} z93b>m(}yBxHG2kvYHTP+b(el6GFvR4xD?4mgs5u-(;_3$ytVqqrLS+Zxj z9||#`>e$-8h5J!f7RX#%GJiRBK&1x?|Fcyi_=K_WUJfbg;1kll+gTmP`h+Gcz z9-w*wOFe38>Iui2p`@O!F5K}SKKQDtszUw)k_>28K7G295#zn7-fpm;aaky&4TfZZ z8k>re55Behhz65S08v3T{dlhFl=HPqe;{hvB{?$&>xOj2sm?upRva!$=#7B7^hAtpqKM-LS=7(PKiA!Oz?NerrMi zq~*+x6k)csh2pze{Y}r1qP{B~mdNWm-@$9g?~E#l-Tok9)+zV12tU%Bp`FbIj=_tS z**}P#7l_5v5jVuzTK|`@*B?)g{T#|V+!7bcC;GpJr>36Bg#ujS?p7hCswI5;PDA-E zBBI6d@749)6=F$hHCv{y(rx;6ym31}k*%&3xV#bmWYG+xw8atBf2L?(ll!IyH1D4j>bjm%B@FL^~FHw70nrM&^pDZTLS57L~c>DJ8{kEuIrCI;vz(6&s zCkm4TeA!iyfN;2r(k(&0ZaqVU($`giNq)>0O33(=iI;Z^aZKy!h1QX@Y&I`Tl zx_cP51;-szGs)-X@F~HPI)yXUe(4S;Cld<`Gc!mOE^!yMC{3rQOFg{+x)K8iDbW3a zYEYHwC5R0ktfk*^hxKDVK2Yz2dhN!9RFw&;SCB-FR$!9>zy!dPE8?c};Oy+UxVUf=VddoHKz9NE0CG9-Oo07qGVY5V6tv9I ziHR}+M>m`Z5CeF5ZGn&t!myL1A+-)iY8EptT8Pn$TJcEvgN+f!1``)Iw|{5naW~2K zNg`5isV#DtsWJgF3}g{NOkWPH31o6XSOy6+z{AL4Z!+^1K<51i ze3RKz)x5k8fd#k*4G9Eu=ySTd3MwkLft{*koS8L?3K5-=QGuQ)wr4r^DSs1bq10OV z-|0GR={7zQl{2N=wDF5H8zk{5tbD%0$TJWUOd7i?RRfQ5Plo&@e4*YnIqR517IAaT ztC@<}kH0PY(cZ8p@kOK>T0Y*}_D|fnp~cMmrpV})wzm*}{Sj|;k%y(zaAnKRuT;Vi z3?qBke`0zNbmn$Xu?5!GDi*tV8GpLYWH}+7(sT< zQl$IHmO&clh19;83L-idHFQ@BK_`KonYr(n|4>4wE;=;jE}~Sw$&V%~%Fjmhru#%g zgHzW*hY;OIAs~diOuao~8ZnTz{LRI$p(Fe6nUBP7kL94>P8w&L98VwQ3pa#8Rj6*v zhz9?oQN)Ssq zRkP%z%H-#Z1;Ej7+WgkrNG^O&Zc$M|Waf!P3ANMfUsKfzFQ=5wib|_Y$_OX5x}0PI98FKq8!WGBjOsTbqpfa9#LF6@$Dp)oSHlHCr|xp0mwWJI2Y*Vud);DGyC znWPjfZ=pIE@x5;pt?c91jJr);?ax?SH!e?~^|W)G$afEflkk}Idj}=Jw>+5zkTT89 zwSpE4Cg6ZFfyFd5wJ-#DV90;7c@EShklvxAo&tFTU`a3>0DcO{4gO?-aEy*;A{j>1!Pr2usH>K?(g3}-fB_~jot$N7ho3>!Iw?86p=MG9`5e@K!~0M z7IOPPpfQc=W@@}`zFVLG#Rfota0-B=5E4{=BhooSLy;V@fcX&MLGQ)&r^^j_;Q#<6 z1lk06_wBu(&QJHM{P9|D_~9;i+LZ2bQ^{aqV{<*c z<%K=hgcF8GFV*n0r!T*m^!yHjgH{9sDKx^bi0N}5Y0gIkRJ#Z=0sW_XHdiK)CdTbs z#R$+(3*SJ<@8!%#><_V&KA0g!byEU<#~h{22q~XncLRp=4-6Z^KmpSCu4LD@xVUSq z?^IMmVV$_3qJ`5UnV9Tm_1Un=ll)r=ixhj?8u!wAT4HfxDk=T>p05Izvb_wn4;;*G zM&aW`k@CrK^Q5JbP*Yz$Z%3r1L3!Vv*8in<{Z9X2Jm^baT**BBMNa!*;{DaX`d69v z_N0<--NV4li5NyLI%Q#F`=qM3gpWpiUk)ji1KFAs5A`>mn!L8W{JlMczu&5a@u%H~;JvcaJt7iZ^s6Bk$4_hU6IquxKBjvTOVPHT4yzOx~os^W6FO&=QIj$}*p^tw6dVt|!OW@c$IFJL{n4S*$ z*^;r}aC881=4zrp|N9r}$OroR{cD~tlv7^4VhVih>PiEf1l^b9A+W&VCD0cR2~#fo z0aI`wHL@D{A;XIWH0fcpXYt}Yz>V0(UG+IqAz=Z5k7%0`vMO|@ThPahj#}H>Z|vrColtm0C4q_duwwO7+U}djypEw<>l2`a$(5=N!`fE2re?TLy3vs zfiHi2eEiRq@gK6IlM~30z>pV~slkQ0o)tIHH!%3R_9w)<;qsw))n#3v~LRIk2K0ot!jKeF(Fiwzf8c z7$$)I^Yc49JFP%x0$;MS{sI6OR4}DdPfeWpg(Ai3$V4$xW1S2S_N~t~NVt{{7*Do~ z+7dby#G3Nz?8qchWkOiA*W$PX8#8w-U=!BlaG;D0G zjH+Ec5oaRkEHvk8Wo_kZtjFp>QwWxu=Y4ziVX}U2`xf4lyx=hVC*M!dHa8KkI1rU- zgRDkIvEu*wG3rf}BjA0tmKLjD~tWTi8^c;&5GVcIX}+=aFvyvjkNFaKYPBvduw-Y`Lxj6$L3VpTf8wi zR&9vYluu}FDzEgtZ5I>&w^}#c)Krb!-q)PX-eCpu=O@1^8|v%buWttgw8Baj;#0=x zxSpTSv_CKy?vI_qoNp?J+hTZbu!6jQEwC+z=2K~<)sp1MOUo0gETO>qN_tlI25zez_nNfx72ah@^%ZOr5jUDSY>jcmwCIm_Wbu4oGTD1 zXr2wu&j*8M8kPsYG~=0?ngXIiQ{dm)q;JD>bMcC?C8WZ_&<--Q-i-VKp$C}2q_VNP zx*F^znosWB;TP0^>RMgh>7U1iR#rwvss6k3^MEiDIO+94kDZ;jmzJ!otqZEE7J$(K zh(8Qc;rpF!9#9%XgIO{L1ENjP6GH9n=T1Ep*IdK5A zZYJ>c^&m2V{{z;I(V$WXdLjVz5MbkGycOQ)bKHa+V1zw-LcAO!7@`-5cBW@%Z#+5yKefAXCJI}^PIw6?7HFjxoK7%&kn&uGZZmo@VFX^Sv{=vZ z@Qu67jr~@{dHO%emk^jzQeJ0efx_wX>S{Ka1wLK)JAkWtdMSm4Hee`leSg5DQkb5e z9#qrX+TVeBK2*R!Prvj(47Xr5vXl!) z%s^xdg()N}_-)`eNC6bec6u#+__2%(w)kQgt3y(RwjZLXfy>fyNQ;a>=60lMVR7*q zj9K1Fm-AoR9v(Ct%ibcB#X3;3T;(oDk+A<%+OC?O9t{G0mv zm4lAX5+q9DJH~L1;4R6^hp&&KdBj~kZT8H3gVZXRT}~f(aYrUFF@|4Bsr6G7yB@h{ zM%z_1hTXi=ZOYEY{Oq%wojk(e^j##g_OG}L`6Yd?%Hs3-%VhkX67Pj{ymH8v8TcvT z?3a!fXIz`j{o4PZ8<$b{K;b^_;)p^TxAiDdXtaA;r7Q=8mvJHnMn8X|Mj!8E7XC0V zJ2DX@Q_5u<|Cb33LO3|8MNG`GbMxicQ&pZ9!ok-@CRQ#AYFPfw(-L@+(N`CuzESv$ z9;Y3{N!ulYLir`Eql=edfk4R3z3eHaa~U|=mj7~wg1`OEdlHjRD2RrvQ36M&`{fR< zVUvMKxxOU*Ulq%Hsy-SmRaLy`vQYhhE&!FR-*X-=*^-`56LQsbv>ij#Z}P)3`izVO z-1%h9c|r|IYp%AYlGKcAsbV6~M-aS;1dp*vzoS%`Jxj*_>p#~V`GR{(q@YBl|8Z#X z!2NekzUj7ic<&GrGViJ&7qGo42!HBZUH#~b022YjXC&aPG0=0SY&r%U_jVC1?ox+3 z+)d^j{w@0SR@tj#N}YG`E@vou6G~KgLmAs&=JkbI3J1kDLUMF)wLPBI$mo8zhH$I; zJV%w2;9^?#-gZVKjcaA{&PxIh??+S8gM-FbEDa4BdF(isgL#+dcL^}10(8wZPW0Yt z+5Ny`B9OjL+F)~b&is%h9VF%zON&!DX633CBz1gz+dpQ)ZDhK?;IE+2NC~eT%9$hj zr119Kvt)j&{OrGkUS5sPpU2|zLYSmU7;Q+n7}oix-tW(BwMyeIu6hKE#_RTkE8V7) zlat<>(lv=y41d8*y!PbD6V>7~A4aJc=K;5HAOH5;-17Ir6sJa#B9q8vxg*hDluyHG z6Bwwf^|n=`q~v?3Wo@16z0R(e{@DAcc84r43HXIJVwR}%P>3UvS~iqyRs~s zLkRHp77W1!za)|f6aW>MMj4Cw_awNDmt4JW2oB$Veq?>t(ne5NY!$v&_Oiq7&RxT2HwdC`*|$#1Y&fJzy;gV*_sQ&p=%)E4$}SkA#gK z2b4ezb|x4!!pBHnnQFGZvjc{M6Ch;(*OKw^@k{7p!1aWSg9EZ|;Q5im1v`lQVb#ez z0JHTbJ75WLd9l~t*WHaFp0&8R2uJ}Z5f%uJp-u-45ESVxE#e^9g{TWeH<_SweRz*! zV_sl}^8WpMc(u?B0VDve0KD=4Azy%9!0UmV4E{6NM+pf%5G|1>&@mMO^QcD-4&X6u z0qOuOdso-s%QFvS<4lvvUD)u`))oxjt;&UfDDVN{$le}u7Y{r84?rQHpP12l4mnG? z5P$tEB&PoU(t?75wl;G-ez0?xf`0+@y0bH1Q|}OfHt3}wJsGntuPiO`!*hU-CnRyW zQr^(K@tT!lMHZBmeF04~%)lEP4cPXUS64yEQwkDo%n8$Y(;?rPz7g@^Fr3T7|7E0q z&Ka4R@I)VW7C;6KSqricc-KWmL==^jG~8QYCWtYcEnw$BX5t=du~8BAQJkyHhlG$T z#38zaIo+PV5ZpO{t*0lDS8Y#|BW`1-;(&R&>$%6H+{^Z0e4_R#>%|GM>h#st3EO6H zl2V3n7x>-i8I?T`B%kBPUb_Flnygsdq9$5hnT6(C%;(j75Wi62Yg+z9(U7#76@Le^ z=eQo{OenV&e|>K{2=q<*E`0dchl|V4MjBd!bItWuty4tTp64IbyLR})ggBmm8T8)f zyM?N0-s7jZgN5daCObP&{1N?rvSfg4qB-y6{^1`6&XkWI$oX}dR$bbTa1x4`QeLUH z%SIGWH6p^)FzH&K#7MS3F<{oUFTN^fwr>y>i8%@Xbv+wgH&glc8hb_7fdlu&NTjTK=GuxwW+5s_l-3>hB z_jM&C9SF6Qo{muLGUY5Qvs!m1;m&VMFto_e&n2(g??n#&8?*n^&+pG~gV;FZHZpb2 zHnGrR;st6ZcKke7=fBzE$O+VtNu(}ns_BFxhh7gi?)L`1)hcS;T^={1-yzc`C-_>ZH0BiEbh+ zn)J+(HtqC=d_GiH$o#{AXw2SN8s`A??|be?bEv5m2G>gnkT zrErf0KV1fS-So$}BE_iIZiy|Ocl}=NPf{-Y0{&sjQ4^c7C;zS=Ha%UWl~q8o z$_J+e(0KWuJV<{J%~}|e1ABw-ZXggWprwbFWTycaLBJk--PH0jP0hONE_tL`=W>rm zcD(-AK3XgRhMZh!TifEEp8v@<=jxOLj*3*YgU^v(L>b!N-Fl$!h!e!Q@zPtBV)f5u zc=(AJ7bJ3Ug1T`X_R8TGmctA6KD8SZm9O&H<6&VL%uuSDf0cSL;e&+@8PEI`7_4k~ zdmq^TA)1>L^NkuS@iy7<(xN@VAtO)Dj?n+QQG<$GK$mxQQCj6RvRU1j-&)byN>%oX zXMWm+v)+QbAaqZ~;(7>w_{$=zqx121P9JQrVA2C}%7_zgE-oT0 zB|xv#)nWWN0FpvTgp<-=icz5TXpc`Ae~aV27(xu1`x_T%z|w)xDk>>?36iLrs{*nP zcv|2BLjeG$eb6Gp0%|&KsFBL_ zzkZjg4W>RmM#U`xy8s)k*!gu9H^{AQgITnAmzLRkn z*T%nPnK%EOo8$dE_O--;M=Sqxi;&)-9DGIx`^8x|jCO4I4bueLo-x8a*P@b|73?gb zqirPx6hj3>b~ci=P50jbf2NN4pW~CmQizyWlvV}EA>&4&C>QMgCFLS4EQ(4mlX0Rm z_htU6*4Fni81W~~7H#Awxo-U^kix|^yd5KX| z?i?dbxQ~y?$Ybg)k|IB(CRbG{SXdAt*Nb2v?$`6S?qE)!&x}SX$%Na=GJSAS*B5|C@XtgiZP{C@IX#pQg)v%^gJL-C?hL|n&fRzzYhNodNE8XsS#r=N>m&=X7Zc_=^!Pv2zQ!=jKnf>I)Ou6k%Lh_hAyPHL$VyjHOZ7# zY!A4C2xC^8)vy|bm{_gsSihpXE)icq2kA{GFnNg_o}3~=i=%(MY^Ch8~9~pgy%zTd7xXQ8ynm71clRS zem1%dspR@iOV$CQzl}5&+Ed^BiP~K$5-t`2Gpd2`o_^x(1&Ho8y)|CE^U~Rp32jIg zyN@PJ&n^ykL?nYFn#BtH!cjS$e0Gafct>@AoU@Bl56rI~?&uq?tPYl#)>JPJ=5_as z7IQ9K-89kX&$^HSmD|8PST?J{k_idjvCuK^oj#ztj``u$6#SxIe|y`{i`Y}lQ7A8% zC#s+^uxl;O$fPEa84Ha=h&C`nm${eZ&YiyjxaA9PwP}O@W`6Y5VIva`w_;3EO;X*U zjdl|O&~3UkGV0b#f`iSjDBSz+N-I`#gdWKh`&#(yKAuNDkPdiCZO6O3f^BL0jS}1F zs?ksH0-Y}eUzgnzjZ$WL@l+$LYsYk4f9%H#!(VjijOlet3TqDj(HVTSQ;I|x8QSpPnI6M)2 zuHp82n;T5h<_7Pr`m{Vf?H#R8s7QF^iq|*XpoMi5a;sWdbpRsn@BNddLu#|8u4#|1 z6xkACf;`(Rl)p=)u*$CF@w#){%!^c^(_QPwLt&p=1t=S`D)V%VLfu5>{~`V%YH(>o zdaLm_yyMBf%wOFO$=rXMeU1_to>^zydRw;4dW?1ZA}yP0)l>Q+{KM4JM*sCE!STEP z6s*_Xvy?Ax(OLvcceS}CtFp1nm053kpP!(+dp!O}AAA;xE7Loz=*I;JAw(S!%v6>6 ze|XJQk>QClHowf*zbWuA3S_5|tYU$-}ru zIXDZh&gT(9=t`aGis31BXOMH+y}#dnF#)|K2r#y5I_tZYQbu%{AVdSuHa#9a`PdTjg%k5Xg+u#`xk0Gvqo54D?G z3Uu6;s7CHDU!3k%)imZCx{W(_y}$c};$bVfczoP1f4re(v{vEtH%N1BU)B?+&+xC2 z;%NpVy|}Wz$7jOrbYN3qF@XM>O!C!2;!C{m=T86{$`s>I{PMNCCv}POY{xF)T6q{D z{&D1I-g8_f(=V8n3qWT zqvqa3Hof$z){iuln$J?-oVbk{nHarHy{9Dh*7o}e!Q!%-vUnm5fuzprCwY2A$Cu2E zzV1G2yXSG7QBmEJ1ua;NE0fU*47)TL8M02{i0Bs>W&Pos5B5bD{j1~@6o|0Ce(jEk zCN(&7a^w>rVZ?cm`u(Kff!Y3_Qacnuhaov)qH3uC}x|R>!xDXUM9Cfex?%!Ns zK#n(eagH9-8Qniw9nP2eqyZHzu9zy)Pv^V2wa7b;-xHL;c9i4hqH1Z&EmhSRQheH@ zf*a$nXeS~Z6t1r}{bUu3gg0_h@Wr>vGLi4wlxvDg1!D()R7WV69Y{DD{Bk6NqqX`L z>8;Ff5YF0N(iQBiOp3A5feM6bF*st{YH9WQ4FGnr>_z9=nQv^f-{jUutUlbL<+gX>@i=Q*+R?-^uGoBXMO~&p`K5V8g>#u@wPv zZl@!2gOrC#EAbJ2!WAloR^w!sEr&fM8bi~EcN%>(a-ubQOH6-nD+fzIZ&O+bb+K4p z?Ow&4{=x5fThmQr^4lIS^bsT`-jY2L=sQiW+KhL#j}@A zY){8TqH%LqSf~V2@w|s4OpB9}GW70424BoW3%EYvJ*c>-7$3j2Ns*CN2Qouyb?F^b zXDaVY^t|Hm@d>;ncipWpmwUkc^U~;;B!ueMvOi3?T8>qVTkEnaG%hcVFGP4OmeXl} zPrTL!kLUe!XF&=QUYm8U;ETxcz?O@{zc@591=UIBLAgKeofZm;GQyr^eh9Tu0X`3{ z1@yLUNL8QYz3$#MxB5nDCfYauFyr;%$%3jj2wjyEA9q^Uo3u$pD|WG(9&CO4IV%}Y z@_=U9eiI^`rf*W$)NpCbqKFj{ovvD&~2*0uATz6 zirSYeeU6EjwcsTR?mL$$snUS%{9D;D3fq%K7vHY}myMhGNnf8zzbVLh@wsyTWp?QU zjC30pcLB^Sz|sO@KR8PZJ55%Ae*FewoSO0le|6Y6YaARL3_XCA)zanP!v~(^_mi1e z-i+P!J8d+X+NBllo}LsS{D*c19riebJO5370FyrGkZ$-qun+3dqepS}_rc`wo_Dkd)KZ{=9f#po4?ej%l_`+}oI;Jbb~>IjY9suRE_1OTPY3 zSf2VW?S{FDyek*=4qfR(#K!u0v8$6<+QjMJ8xL9^lrJwXj=Zz=B{;T_ueSwWJ zTtY(Eh}b84AAL(|?sK*N+C{3o#KYJ?sw66{qjcRZdP_$})h2j*@gvIk#Aq@0`K`G` zJi{4uT%@zVDw~cYV>aEYn_DnQwY`IK3z_QbMT-g0uxnYE?|GBpZ3>t*qgpyf>8r`* zHuW+viKL|>t1_LP={{t({eztr;S$r$@>bt4rUlWdS3TtTzV&vqnb|0*HF{5F+v9LD zxi@R_b`kyveiAzO<8vS7sCq{#)IYpm2~Fx;B5^h-5oF|6k2JIzOS|kHOQMDyzYO%u z$vsTQX4j7f4p4TIIw?|GRY$+-APK%Wu2~yu zwI#ppdga|YaSgenk;x^(cE1H0*I&tt5KiFoq9VJo6lJqv9^H;)<<3=>Qfi^?=7 z!{h;D;4?%>d~k4nbYxWX@7H(a)nk+VhM!-`l*j?G^|0>k~ z$a>f%zNu|!w+djHq0|afZFxy?>O&ICJy{cuwZ~`Z(S2CW(aN9W4L86YQo-YYK+=-O5@5LbGIuYF5b8Roe!bFS1Hykb%Ym&!UktVP2P?|Dy z!OwyQLEdS`Ysex+LmyVf&IN%owi1+h6-BVy#TG3XDK?T_D=y=5O?~dUk z0qF~5U)-)e{6h>8-^L&y08Ulq>(+U`VB?HS=;}%3^|9RR=<(`*RAHLfCcWhQ<$FoU zn~X}Y14#nRy~{g6L7f6Aa?g64KvAd`3otINi*niOs1jtg+B;5^a4uV74+tTJHfQIx zsqp-Y*(m{e@k%UL+Zm8--Fp{ zGdT8v{UWyT45;+NeeY{(g0a&-)=FRCafiTSAm9VgyNtqY2DIRklC7|R5_sR>3-paN z%_l62qA}##l!dhw{rlU`fGP{#qA+bUH_zSe-g6lOmJrCP0eu25Y*;o=)M#*#^5qeH zA=I@i_G9q1!oqb^^U8|4yyQX%>u%LVt((XKZPyR9;+OH>X}rC2$eHnaApp~sd-*$2 zM@Dw{{OP5lqDGu-D^}X}t8n`2N?CiqPYc{#(O?nJx?`hjyDh3?0Y)TK1)Y-EaDj)v32h+%*ywUPR`$tWin)7Bo zTZb_%-z#5pnRgVipZ&!r{A^n8_mtYE@9>IVdUkNqntS365iJ*9R~-3MWMU?BTPJr&xCp4G*f|Ny z$pS7I(qX9GV)BZEi~CEUyg3a`ZY2*&W%JtvEkzY@B}l0=l790>=<$aTI;aMgeWPA< zbb$wzu3qW8?rFse?~D}g*uUtgaAT^3Jf4?N*7{Yy&gayyOdsm4NawQV=QY)oZw+H!I1K4I@pv^t+?qEo_~JB zd_?tbPKu_ISn##=4^^wQ2#{Rl8!Om}BtONbZ4{K*k~Li{G~#o0zm)`XM#b0nt_ikP z)#Z3d2^qSblP3!hJL>AC4~9)f3o1X`+9EQw*?1Y>uqGr7XKm4QCAM37js#P1aHdIld!*dAu2 z%SGJ-T&mp@Euo|e<-E=%SAYM@^IO*2hc9)lYYkPKz!25GD+Z9NkjxeYIl5)1L zoFy)m{zTjVa{*qZ=DX23I=)O{ymS{IEBWj^hP9v|+dQcIEGJ3&aFgo3^8B=H zqU=F#ix;J3FXzUVo)I|H%}Xyoxj5tRP-jGJZbBsdV!T@4C79K?&Kdbog~4h>mx-WY zw{T40@-ctVy;5g}XciioqLwT<1;xDa0(lt2OtPSgOG)+I%CnWCRM)~W&h!h2d0<#& z_O>`Ov(9}-(GiQA+wTeSXMT~c($dAX`!H$O5*FX7+uKer~M!R}l z6eeNM*&93wqo#;<<$tVNOB>06#KL?v^Y!cYGZ%EI<$jISATI!sZ?PR8z+He{FOGgQ zR{P3U8vOf5@{J#mYM>zg`J>B`3NuAusuwW>M`CMYH9{v(pR;s4cfH)%^%sn@(rxTeDm(*^G)7L)tQ-J5B%t<)IMflh@l-)*kVcIP{1vZl!*7W<3XRhhPD71RV05{@?RX(eQM z?CfGzE+aECnp0>!X3h(ew|Uk-hWa?H`W1KU!B~s!@qqUDgtc8_4)vEW43XFyVIc@@ zf$8epTHPodg+i)T76DlBO;6Gd1u#2rMKGMbE9q*=~h&*v*S-(BiFf%1R0@ZpA4Aptn5Jwy(>h)1>8U{TM0iF{TZEnxn)M`GwHFM8zzQR_(lVlpGu< zN+w-_(Qchc%WgJ5=8R0n^pq4vVKPBPW_(=-%|iejb^ zqFY+WD{}DUA4(FZtGXY)snvaChD_{MDW>v}Ix5OqQC1ZT7#}|j31a%+&PgS!Hq+8; zsXiTfj^|QSM|kS^-8qs@{q=XB3kPTW2%j&E1@`i^ZyHG|Psq^;*)nBe-wBk3Ix%mJx2;U0B0GE(PqA$5Uoh5Z+Se!MV!S2mYNHcoHoM!?1EboHv3IbTste4r z?-ak>E?HKZHAjrjX?PvhSz$5XZK*d<2lTxjLbA+<_HKIJCpR}rP1DfLo?CRr zAn*Hd(pwh(`By)+gypyNv8^QN|CJ7TXkzSR66%iaQjL7gj=b24BroN9J}LT8^CSI- z9UdYL7YBL~QckC2L|iO!y4_Cs#PU_@)UHw2_;2s!h*q8VwO2-1xupEXjo5yeRt=5h zB)OL1)?1N$#uw1+Jgk3TX_))4+ZB{}w$~cr<21Su)PRKH@)-HQ(LVNnqkUQQ>nd2w za16d!u=5-I&c8tu2dr-@T3QP*_f1X)>Rt>eb{m<|zp2NLKAK!4kr|8xNkD`5O?D=*&#_2n3teJ7xNre$Yq zI|(F9fMF?BHr2tVq$C){ z57QA#eg{Y|H;3W5hSu^QrO;Tt~zU#mS3M=2Qhp!s-;p<0jBTqROx#-ZeDhx2NFIf z-Z%6rQxPQOfKlG0Y-fWiI_ZSpRd2>>9SS!rPdi!6%$jLCImcAh6fN>N(~VDshlYTW z#h9ZleBbbSj?`;uh+lcZmTR8z@$6idD*@1|(MB5#j4U7mj=K9ZUW+Jx+Im%%k>P$I zdEry24};=mh7pDPerr;JcrR`_IkjA*qz@!pASoyGj=xeTJ-cEHIKLefGeglf6J{_F z8U|n$Ul#9&{G_p=#Daof@nmSDoT7#gUJ(XRcg#e6(SzCkv1b9p?~#-i@%I)e8x!b^ zj9u^Js7VaaLat*{Slv(G)KT8U1hn3A7%x--y*ra%aCQFSh=pvDX-G))d4QxOKOddA znxsnjl=6m<{o^PX$ME1Qgw~~&*IfhnJc~-=Dx5p$9tmjW6;&ge!{0w-XD1>4BmWq6 z$qg@M-#19frqIa!KARUtMy8$~V=bLFZ+3P}BJnRZHgmQ|{6;XRMUxCtUPMi zhb(-Tjl{uWBMrvF@Ya}CesHwtf2HkX@9&$tUG{2@3Mj|LRN6@T9FGJ>uEKt?lMk2O zZ4e5N{nc!Plq2zf)V*a~ly}EzOXEgrI=5Fr*05N_R2`bY{DWU?ZLK%%d9d zwo32Yrs;6ibz|Zx7H~`Um53et5ZfwY0U!NZ`|S}y<=ZD$EzI#eqFiQ7{KM}u zHX@ct_zi|JzZ7BLzi*g_R#ux6H%aHiILJoVNv_ReQ8b2kRbuec>S|`MkUOoE*HD}j zv=^fy;u&<=#l;?%CbHjo$L12t$n8ic&orQz*(>w8<2S#ruOZ#b0dn_30ewE zA<22PXTjX!Rny8DW*KGY_mJH4uCu7$ zaw<6a=B?JB-7vow>y>-_uHDAcP^aP;+mnKiY5b-EUNW#vg)SSiK=B1W7UUT~aRb6ErwiIoV{Q<~ zi92fKn3m`sG(-wq%tH#Mb03;SG11k@)$&GB5%uw8J? z9h^4-M$@iv*??hSkiY~rtCzsTgPylSG#4E$ZOy>I(=_F>^71hF?p-O8KzrK+$z3q@ zI60RL(k!xse+2G#6U@~BfxgnX> z`S$4(9lP3g2>Rw`sYg&Y-4$P`QK|Xu&YcB;iA~<4EIDTxE3soKNFI&afFYC(cU4FW==Gl6&A5 z`&<53tG(Ul%y(Z{tR;<0;xIg z^_XBA&*)f(Jzl;#F5ae|h}VfDSW{gR3JEn&WF@a<-5$ZHAA9rm3U~LbV^q7%_KM<9 zlU3<=N&czym9CETA5eBSJ`?jY&b%!FBVl4$pS|$HCmI?ZcJF;uCz>X_6`Dp2zc#Db z-Q!}x&q}QRN4rW!mBGe_UjV-|VmSkTbB5c*#5~po4^n6T@=v^ztJZb7M&>daG$X>L zFTB^wwX1w8g%1vV#_2huUMGHL2#-f<-=U_?>=QyU2?QoqZfr1eeDrZa)Y;oZ^c^5m zR>q)46|$Kr64LmW3yzJ&AA7%bZ+hHNbIA=oB}GP#K{!Oj1L{;wR;ebJBYk7!V{BX+ zm?DPgzxsPcOURSKRV-0N(vrLKNZ-WQpLvg7Hh8YxdSs+UhFO@{$moY_M7V+_CJBkl z5pDS2>4qYNmR1S%tl}Td}8*-%n4~~XLu3lPcba<-L z7kYiK0yS12uYIK9)QG`|EK9WzChcbuV&j4|8ytgJS+Q%d1T z`&LX6F?{nTDu{Rqnc2s{_%`Yhm)Zd!Ho(7ThM;bp4?T%S|C4-Y*CsId7BBeUN z(z)O%-%^Vd@dXsfKoY8(=?n;xx>0n*l+)g)0F!o<8mFi`#$N-IJeR zTMxA9Qz2O?0nU(`pQ!b6XtwK#-pTp)eR}#vqA`~fSyNN|14$D`9#ceAD(__26cH(p zCxZJH_X9q0*~bP07~~WfWj+D7CH$_as^SsLN?g5)P8R`eFLDZK6apVzNvP$MnIdHZ z7w^RHq-kRwcQh#}2Bvq>Vj8wWLP}pS%6|4(`C&CSeAwh(9c!w2qS}V^i--*Hx>W2u zFhJlMInAKTdi=xjkcfCjKH-fx^f~~LQw>xqrbs3t;j-dyVp3JJzmH~Q1Y?|?=B18t z@m5k5!D;tN+rz^Z#1yTpva*;aK|~bmRv@yDrn$>fk`(y<1>Co|@U#S>RpI8(e9OfZ zx(+8Y-O4wpeZ1)jZunkfXi(8@f33g0U1}FvC*Bk)!2~JHch%sK^$kYRr1F>F3Nv2P zFc5GgW_*s8jc9D}CtaV~`w*1}W3V}L$6n+bw1Pq zLe5|)3K0zk>fXJ3cWwbR-&Nn$wV`J_>4h(Vaa=8U+K;z?h5-pB*bEKAFrxUo&0

MBRPuh;Zr79=Hfq9Y^KKujLqL>M@-Kz(+5^$PN3H}JBD5t65H3V`e#7=l521SF_a zKpX;OJAagwVUuy4&uZv-o$LT77gQe4)jeVO7_=d^w1&Yy75-5%p1-TVzXXmQ&=&*) zfk04AgpnJm)Z48sOvlvBigt$|#0r6_EgeGM2aT8?KYna7D>iJm&YG@{PE6lOL1bm8 z6%ZtZ`dyE`54KMP2?}wEQrjNTrAlwAt({1CIra()aA0`Ng3UmmB@lJNb44!g3ma#D zS_VGpqg@qWdC*?@4mztGPrr#F!Wo*Hc+E@#v#lW>9V#v^h0ET)Jv&AfN=>&YvV(o^ zxw%bxCh8+efPMuH6=QB8F^R(}7GHfQ->0g??R)JB6N|+8ZGo8Y2_%&yWbg9X+N}hZ z2KEiGSlVl8mO2J$@p17ce}|_2x1v-t&g~r(#@rC`>e#F-Ov$LKDXY*Ir+3-SaEH+t z82iXeIrHLAK#wF-=3d+U_ufCKow?V+PU0K^*-_UH;sqs4nFOfP7~@YOd|W6gZ*w5{ z{NFuFQ4r*eYK?++MOQ!rns{D#q;LL>LB*DKk{gOiRU>7O#f7{Qk-$LrnP=Gs!L<}c zJ(GssD+6b_crYL%l{7DFfwx8gTVWqpzNje_oNC}-}eY(2ZsU+tIscK ztgIhwF$dh(f6@HsW4jFzF*H!;I^L6og-{jUpPsej3GU{0(o9+HX~VUpnKaUxB071J zUG~a*fb1>$#3D4hfJ*#ss-Fs1adm|0b~t($Ox>_W^b#=;9ACAwXA}wbaj_X~sE_z* zx4B6sOacltD{vY|lZ}kpJ2dr0zH9XvPfdRF>9RI99S064CuM@2lek_I9G!mn6yB=h zy1DbAF>#ONxCQ3B2z{@hqt}U-;mg@@kbHzADb;@@jCNVT5A-c$~xa+0q>UAaqWMZXT)fO(u9wK`>e7>ibn*mDQo3`{b0} z9)rloqws-&&K$wMJ|Wki30kG4`eq%K8c^F=y6~K;>>0p|?ncfYZD7kKF_I zpH-C&308(Bm_t=0A6%2oD)fH)`9ruY9o_etn_qI0N3^>^J{y^4_R~0}jzT6ot%Fa+ z=xRuk80-DU#-0r$RUJu_x!WRd42>3k8VI=h)YZ-V$$mE@Ra1Z0CO36+VL-_;GozG0 zFqAGLATW%uUOWE&llo$`gik+N-nY!}-zC80#37YaNUki;S{CZl{q#G$w@gnb@cn7-rnBPb@bWVFdqJ2qgK4BMJ;^hAWRWl=2nAev^7zQG)U$Vb z6h(!>*LZk^3E^h|eejMsp13v`krotm1K&5$o7;4)1nM&!Rgm7RX=p?}VYwG04+A5> z!L}*U%IEmLGyc_ z8;zvk=deFSL-Xc!Z>{lsa^9JwoPvTUG@W2(9NP6qbE0Qp{QKgY3=5GQ13^a5?Wk5q z2M6#T&+A#x=7>_r0I9WJ7=D2Xby#G6%FtT<%Wxl8IV;T1Y5`m9_1~O!GTln=){;&jw zqzRY*v}BaUn+Zuj6tH~GnDt?{ojEdg%qwcdhCEV&$28)J%vz;~y&ZbJwh;3xBlC7` z<*fOH5{~wVHNSp-p3`MWdmmWb&BjTHAJU%Dm6}zVP;687lrbC?uVBc3&w_gfd` z=_A}lzI^?Jl5yR%kdsY)AFUUmzMcJW6YSS=3{DH-+7B| zG6IbG&RQ?|teRgfY4%fD8F9R}3}OEKl$hA5Z{7r@@^X--WPBj{^0lHAJtn?F*J0rH zQwctfv~B$D4QbM-OR6kCt(fwoPqzt7=APueS*`T{QO<%?RLO|Yd^}uLQ960pIONw& zlbH759(*%tPsQEK%B;6}L)$+R_)D~EbiS3`Zbed}?Y zY+6QE{u?Rd&iXJV7k=SGyvRg4-?ye6j2RIk~YaK7I@F^j3GxCH!#B$K>pN{ox(b4%Z~5RR&)X1kRW2b@>E1&E**L_MG5$ z*ENz>r}nL>(25xt^!7-2iV7cP1Z`9$CnoB?vRCk_ef|`Y953WBu;6s#+Lg~;@1Mx5 zSQFin3=@^LygzEsRwnG!VQaq8?S=JC)IM0({ms{pV1o!FPUk=;$V;B^6>}S z8a?`Yz8V@ZFaqllC5ORUttdY^Zw}7Gtqqk9Y7uyJtAXyq_GHxH%H`8QiPzQSxnY;i zDoaOUz(l)p)>0RUoZfMLb!JFA zUu~5C6hyYk{Ic@$z}O8&00LT!m9zUmngjA$V>Uuk(x`j<3-`i1<^(|_Lq{i}V=gEl zparbVNl&<|LWJ*yH9LYUL}(~ZxXg#}aC`y&844vMqbV3703g*@{Cs}BV=g;4cXe@* zm4l1y5S;nv|I>@A`TW7Ubr?#68yUat}qcke(lr-oaV#U8HaEN z6w{bkxRR>ln^4M>yq8%Ka%szwGY(}{8_XcRyOr!u4TM#Y7Cl=o?5H|;MEewWkRZPv zB0QMN6 zp5DpL$+=G+0+*ii_3N>jnFPGkEgmL1x?1yl)B;YKFYOxm@$qkF#1lBHznN7Tw>bIiohx+AGzzfn7Ad z#_Yio^0Rcn#Cv}7Efp^X{FNOGYoPoI>0DyxA`9aESCTJzsba5%7ytg%rw;2M9&SB# z#mDre4x4+Ia_R|o#bEqHK7urFW;cOzNtN`z? z!N9}U)lz=8ug_L&y^a<#l5!ITr_#TFwk0~a^5*MafA4M#WTn^Vu&P(-YV_0;vO_XfhlRvkHae>_LE38 zwaDR>P_VKE&-uSKgh?G5FntAigB^ceeSLEn4IGqJmFWKU`7q`JTOW)IH(e@xe9)ln zQKZRE5~QA;0DoEWYHQ$)#rWSHSD2pF($iB!!gMImIbdQ^Dpe^HJEXJBC9<9oS0kng=BXH!xRYVC&k z>n;`W&6kIYvVjwBaNdA{kDSRU6Q9QS`B@Jo>y&8A{bp@Ub5QsTf*Im|| z+Hr-ZlD4L%02kM%E^|<}2G2ts4pRU617}?F)UKcHy3{l@dFt6Gp4E+p!2gE9S{O6U z$;#R|aEAqimNiTfBVqIwTGHo_IF56u69PdH)CgnC@NqC^3&|Czdsv-nLo*wcG{$;+ z8*C`72o?Xw1z^I%0LCGY*QtAP&@ZUs!9KH@R|oz)MxN;?&21Luhar$cT1JVJ0jdtA zrO~4|Am@{ng+bXTEJTzzU^`Q1Sp*}>F>R*1SP-C#4PlB(C0*X2UzH7%MJIeo!KjIM zKUF9BiCvj?P$L46PlvKbLxZ!SAmFG!wc?H$VBc{)2_$A)tv>S%K-RT4EjkH+pD|>s zpmPEGlb~%nYQq;JAIE1a0-DiBTH(!8IwGs@4FEP_8nthx>g($}_u#d#dn<-OfJ2ze zR0toe<^bC|+_~%C53~CU#x-Eu(BiSMv%|i(CKhZ3`zHp6@=2lzQG&Cg?CVj4=Ui7u_ya$yZyO>DUQ_P%*p`~G$B<%p(9~Pv=S7e{mCHUK z$f~@~?Asiz&LwXnK^!_i;<@zirTq+JeckrkRICU`_Mx*5AM1uUI$Aj3jJ+*t6XK;B_j^4)npH@U%a121Fl}n^{S|~ zc)lG=iuhjGpr?No!-9g`cKS2`CmJKxvq~Z(zAMbD$@Eu);)qYRkBkaJV(=UoSq;$) z;Td+YhBX^8JNeSsuFIe9TVxvGMYWoc&6@4lZowO--Zb06G$b>XIrXa0#nw&Z&(;_T zJGgyqoU8N$6Nl<+noAJ_I`q_GW>3$2hk$=_6<4T+qo;0i!=(fydb0DXzC2;^`(}9Y zWkm@w*85STHr4L<{m&(6@xb;6;lNJ`YAw>MZA&{Oq@>XO7+Y}i^Y;hObI-gP*<1>D z5m6SFpN9EZ90r)8?~w=t(&YR>tEfETo1%X3_ZVm-K_;1O4kzzgjIEf%ZSQzviTZ)4 z7}^yH5$v58-UejFYNJa#LsrGfx$Qd=S*p6>Ny;^bUUK*#!%x0hcH-|&CHQ*nrp|m` zxEQV7N<@o3E@Sn$m!yaX#2%nF-*P|P`mJf@Wt-IDZ!(?xp*9^asbg=eA~W~;?$=3o zU2Is9C$xB_%4ZUnb^iEz%6Pue9=A)@edsMQXQzJI^AZ-Ao&0-|UB~--afdJYT}E@c z_{Y}ZTpoDvR zp6Z<)YQ(ki_U>HyccrB^bT##RyMMH7+n})pGEJAG*d($8R9F|6md-Y#s^w$v0lI0C z`TSyG>^vs1UtH6VzRk+&IvZogAT*|zPBiv0oORkEAhXukbGarwrwE&G)Tt*~!XI+b zCmMwRKIX5hx8VBviO}AN;~O#kw&R$y$op>}uOW#tRIQ%!ySZ99d3cATP=$GD{&z>G#U@*c<_KyX0?UFD_Jj4e#%So4L6;ZAwMwyR|!1 zX}A;ISmoFkxAEV@F1&x@iJ0X2{qiF>lF||N2`M_5-n5$M+L?mE8_!y_eap4;;O@!tyc^Psux zPXJL&GlzOn^I}zw?|ix`eDPxq7hM0vk6c)^HCHcw#JY;He(}?#`b%Ih{qObv{a5RW zjS2^Ul$Vd%>^;%Axch$vd@kXNe(YH|AC}+R+JadK>0mNY7j=WPd3C(nEAxL#uQPGJ zA6=GKS9O0lSpeLF&-f*h`^~38E8z>#8?Tckz3TP4n-V$?|9#KbOK4Ywj_o|F&vu7x z^S*qErKCDI{7ym8Cx_TXzoKPHu{!MW7cl;S(9o)-+;YDku@~3Zf7L13pC%Uw#_&+R zN~Y5r;SYli++YwbDZ4TJk>jsDKlG#vAf^q;{ddD?U~Bq=Ixr%oxoP%B>4+_niLdz_ za}x+6!tKM7?7Mj9Tt3)^^^l$6D~0OrC8Rw##6y6vO2tRD{~l3YcocBw_KpHj^<3l6 zei@wR|F^x5WzPQ+1hMeD%BbsKeoS0iU`&Aw__-$Yf7eQMeyxz{(Wab<*bk9nxTns) z%|F|^`2w0Z&}ktH{PN#pWPAQywZ5AxSz0bm{MpqiS)QlOiNp2e9d!D zFaXKz^S|fQ=KPeMa1j~d?Z1a_&bNSg^8OUf z+5T^yj_*Z9&vMi&bs?kuzuT7r&83u^%r77~nDT$ySG$BOfa+pp{~vo2HnlcDf#uoR zK^=4cK0bE*=FArhyr2K>>t9SkP`m?ga@cAB;z246;^EYREcSs3!=2l=Z!Hd8tSOe!yg07LZwRZ2uV!!E2-_>U z6=AEOIBU<3>|JvbcAIV(HftYuXk~{&s2S7o;wy_{t=7~NZ%qbqUcd4NcUGZXm_G8Q z#YnjvgS+)&gCKX$;o;$!=g~@X&VQj~-50(SSZhBAaNq}%%R|HCl1Cb$6pxWm+n3J% zFUl)GMiU8&bn-}giSOgj$QeP4tPyg={_hGGpWBZQD=Bq_h-{Jd;vcY#FxKr%z{C!( zYj8Dj?T$6C)*)p((9r(x=EArlqz#Io_)siFR(iy7_Ii|W`=&AnnE(0utiM9{zc;=C zZ)}+8x_H~q;Pm-XB5u2y%ZcZ1qlo`j)hN0MM;?Fz=SJt9fAIeOf&PA-z`9+de4@~S zFRAW+J5{N7mV-7}8+)TO1C%EAmPgJyC2&E3DY&_w7~|hxt<%&}xs4MFHl{ zH@9*zgBrz=tl{$%d2obyvs%EBL`lbIq@E$ofevy{7ci#S$ zH&LNUfo@ygjBzD7*`;F0I9_XTZs|S8UW|(_f-#v0EXx-%LFYU8IoV%LlMb)E8Lk>R zXs&05uu#zZnbelWrPyxycPW{KVh60*S%&C|g$&!xf8U%7>)*nhCw)K#pcNpYVREtVNgS|y z1ECrOO7=KuBylgHWy-M8?F(oB>-xJHxqP4t0YNW_xO@rMUSVyjt z0nE_1CLti;Mi%|I04?WtZ4uO|jJXH1LTw6LA0s7q9`BsY-m>pf)RSA|NLtIkD&0MP zw4eQv7qPgo@M^sl9opMFfVBPKZ1nu^S{}f)EJ9KS2n4!NqW>-xCLWcQ1$~N^^yh-K zDzU$_?1H+YYOJ&NYi+m6Tvvl}=h{jcg?-(t_{Lo5!5R}TOi|>(_{EEzchnbKqXtic z)U}j^8YXc71o?V9Z}HoZwg?TrWUGl)m;W4F&?0$+x7u(NJ64ozz!Ke?=&oB)Wpz$DW6tGNVV^Xp)@^%$kH+w2`Aj=4l4`0n|j4ij=S zaH*yo`Qb8tKI&QCo zsQta^F2c0ksQBME&!uVcMx1&c0}1z?48z&W;)>GL?EY2S6t1ifqxx<-se?0BCP>V< zvFs2J_ylIL?V;WP2#)#AwemRi+XE{|BE~3UDPQI+N-^9HSF@dU4a)ZoY(d>{)P96yE9m70G7yb^Lf~SsuYHKxw5ElN^O&l9nz| z)Pv+;YVXX#uy!<+J77M`yO~$LzloK2tv!~Tj=Lf~&ugM7=dyDWLH@f|>*uz5MWA7$ zmOW6?YFq#y2N?N@B+*b)i+~PZh?HYa7|FlS{)_qWwLhI*zs}27m1E#9G_j+ShE(kv z*66pDdvY6Hh#K)86K&a4kRlc-vDNnerf^7qKDv*rP&jV`Ai1Su)+)$B!OL zDjj&EcvN>E2o$*0R4uyuj!uDOx2fnkkr2QD>LYMCF*Pws%gER|aL<1l9~&!RJp_%) z*}bE?nHOcKrvQA*pKxb|4(C(B? zvHtE>(W)G=`iq=yW=q3ru*Hv~0<$WG1qG$r zxni{73HGa_V{LiaseBX>4uX222GP&myDx6J*-gN|qGcF{Km*BRZ?wd1&!$-V1Rf?m z>+z$#TM_g#1xyiXW;MAt-=kg8bJMU+l4U1g%u+)-eJbml*clT*+?S$bPAO@R-`hX& zA``Vy5WjBd^CP`5G*U{U&NdZ%iQ~yp&-Lb!8o&lC$;2`tVudcfG+@?>q zA%|qC$3L{1=eZmVOCin5v8#W6x)k2LhD)$MSW*%_n7mfI+yf^v(#cJV6~AvUF0ABb zI$AajIZq&pSM|?_TdvL0e%9JT{>>9YrF;64dG?xCz05aFA6+}XMp0d%zubY@RmGjg z#dpsb4#>w$h4QpdV`Iq}BTm*93Z?v28fHR+Fo+$>(6xrxNghc`UOP&L{tYx*XoR3w z4lVbd1qMWdLKNsLD`yRC>;ilo7#6m+xVY&e1PCq2OcoEX0M5x20bcidg^m~bcRdc2 z4ypi)!3$prT!9G{h4MX)b~%PG;d?w)U)VWXYilF0D|`Mcn}$-DRgNR*!y}5vB;KRR zk*G3CKUBqO5MNXIiZgx~1pxzHilgv#L@YUBivby8NLdRG>gEtggq_3o>I>vz#J;(% zI%U1Ct}c6_A>sI56PyYR%f!~@Hex&;=iJh4`)mH$37Z&#RFSF=L zZw-rWojG(D+09qZg2GoMsbg+SQcCI^XXfPvTvLwWJ;+W%;_K&kJ*t(Mni>df?mN{N z+3CIo#Iz*JO5JLRfKVEMLk8pxU@~K8Ex9ukXj%3|-@eQ&_w2ZGP{pjN@%UuBtY0y* zipyU6t(+{9F+pc}*yPHau2nl%4+M4pqBCK*IE!%*xx6WYJgpxeGHvSe=5nsvB2Vw6 zKk{Crp^0N7#khjBjTdP(iGFd@)K@mGoIFF9=dz7IhZ%z$1m|l0~e#ELA|J~>;PoiqLN5({HLn#(Tf3?WNG_+yI{sS``D25Ztbz4Ya+xyLoU7DxuNQWH3%!qzNrLFyuTOzld(y>`<~2 zru8q8-g){>zxM6;@hl&t%)9Vd0ej>eLrN^LQ7M)GWfH9ssF;mR(K9j2luY4#PN4u- z?Yk(wcrS!$Yqe?iAi!QAzy}q}uI`+4D)t&RP`+{9-y2o;S-Ci~xC4nasuK^$EY7R$ z;pO5$+)>J5NJ+qcjNBfM4|m6DCdK%gT}u1cK`+VqYLvn5{i2V~6jJ^R01;6*7#-()04dT4$h>bsZi3=)^OqF{RHEMCky) zx3%T0=K?<>=&e8r2Mtr`AOWC26x0Zt{EZlhU&9?>J2UwQ*VK3czUc|GfID#dgBsz& zj;n{rIItJTAmagzTW9VvW?X8I%Fp-YI0T2MwATm!XrvS6$|#$meLyuMAi}$j$m~OQxPK1 zxE|Ham>!QeEPSQ2{e4?QJL? zi2Ck3kj?sKqFf>;w{GqLRbY5VPC46!GJR-Zd~i?}X_;?cvjAp_)yE9y*2GB(UO4{s zQyU{TeE0#4lk4kt$xqA+2d5ogf&Ti_r_pVuMn}hU)6--jO^J!r0m%1Y_5A*Q_xSjl zLm31bG1}4;(tm|!O$YuJAuI|Z>agZ15R|tXEHEJnO3Ta3OHBoMYiyv}OO&GQD85}V z$hj5mO4*Yq{kFf(?CPhC97!3QZ8N`?y=o2JxoOrRiyGA(-`))9nL^ZYOWbqEG1GHg zdfiOeFelAs+&nDE<`$yl5F^h*izsaM*l>IR8N>bwayvxiAc0p9Kc8@UM=&0ug-n7v z2OeJ6Lc0yZkhFt)I5QlDJ63Q{ zH_c_~_d&{!CUal+smTWr*|&G%+^9ltgim-S-EE=4AadWffFAY>=OzgwhpNmhFQ+qu zyJ35O=a*jv*mqFf9#a}8YMA3xd?csLrgvM!=$qj5mD=6`mb~%m)T)-LJ;#&NkxsnS z`ayH<{=PnJPkLw(pG_W3ZXHmE`J;Tq4L||dzaF4+NL=8I6cUOzAS)lGCMSy_bNp@U2ucP0OZUmJAlv76n1-f{8CiOt;mJzw)#Y8i`z2}BM;jrx z12h9%UK(w22JIfd*z}QXxd8cbPVl=nfdQw9YL#3QT>##1DIfd~A9X zKcv@UfV~yFQk>(Zl?w|#mF3F%;YrYYf)C7kEm1rknm7fVejCv-;Z9C%wYH;;D$dr;(&UR=zJAOPx;*xyar)9R?!xog1DoS2w^Ab^MQ)3^XS-k`|tU;hvZ z=P)>6Ai{r6Z20jCb`YBW$R8&+~-q3=7J4X~W zT0QmkL02EzasyVy@NRG%RArZ!51SSOeXU6Iq<--qh$IT_0Xr&X79yv?A5LE(=Y*pk z>@)UHP9gII-b=#553*xOjuL*{3pg1HT!d7~K|-QIXsmfg?XhX=)mMWz-%pnG_A2U@ zEEHuC0?hcsZLVs#cy*}QskZL(K6918=4lylRX&^go0~OB+WvU5w;QvCJ8cpchC!c# z4SJEmRv@5PLHO|p%Zx0GnEqXbJ-zj z>25p_-W{NN^qw%i0ZD*AjI+7e8z5_` z=HFJFTp-1Xty!Y~QMJZvMb#pxQ9$&r%}&KFtgdBjJpee>9Mf}T9&zxI=?Qqs(Ge{_ ztup_hyVqN51RDcBE{>#Q#__?zn&&bu8q|~faZ2?48ht(W#>rjnYQ#|`^tI5c^@yNsD1VENF4 ze5=Ru#}}P5p1+MsXHq}!{p(`jL9yQKvrzKqezBG{Wy;&1>vl0tibg?Q6Z+*hu-@N9 zyCLwnutiz#{B@S3?E>?YRmZo_E7!JrLex;JfMO8IqD?PcLn4O2LLt)zx|&=eAvLm17{7lh;oH8ioj;sPp7OdwS&m>okk{z&cEh#NtFp6{SH(*@d-kGD!bmC&Et|O+`DZ6? zOjKjT+>^uYk(ZuID-H8Bp;CqNvvFIFg=G#D#!ZqSRHj=^ySfx91Jl(a&99c>GQXz? z!h4oXg02wJB8u6nTj%HzbWyD&IwF46=U^R1Ofb8l*5&uTa?U*7qu~T!)iJIiRc*Kk zx<13WWb4$54rEU@09$zUjgZi4#=d7EUSH^LJIlaq;zG{3VyQJ%L8qG7q6PW=(A}34 z&!s^BH%n#v%vPm}Cey?g@16LZNYpsLNBg|pNO;UH?8kulw5bf#e8javCZr#Y_0qs# z=Z8%7)u&ZYSpS|?rw9WxrSbv&nLt|Sq;to}`U8pY&b42CWjFE?g;cDUDM;~)Uo5!a z4dYu)#a1sldiiRQbL@#-ZpL02eItU}cBf+DCc%^!ByHt=ymEP3JYJ^X#YId;nNsV+ zb8mQB9!HZD&H%g&hc27S&8Tz+k#BSQ&lL0I7>=zK)Zfb4udQC?Eip6I{+(CI1AXu6 z7qb;U9Y-%fjBa4QI)Qa`u(JU#Bn%5T;+l0<)=O6p)?_;edE=l#*Mh>Jvtk_23z%@lG7D&=qi0R`N_|@Eo)v1 zd}!sTDFaCpY}fF8eS9)iS;3ahj-LqgT0Ix=wM+|RP=HQ?vC;TiiDnKINrw3YhWQXX z?aD?#TLq{~(9i_(oGL3E>nEO~!$brW6hJ})$E>5HBVtTRV3{^gf!6BQ(Fw%(g@py! zy#SOXKLLXg9xN3#wV4eUa9sid6G)C@UH6t@kW*A74M0M+$PvYM@1AO!@~x02=(~r@ zfDKmj)JBDFUj4*7ogcyHfMl*Ml)^lXV=KlqV8`JB=(@KgM^yHYA47)u^G;xgvUyI4 z1R>`#Z74MEk%M1dyiFK9ga;=}Y|jTuo{p|Y>QPaR)WhySJ=o7OE(&a(IG#bajHQ1} zyDfVc^PsCZl;V$jvBSvxkwc-bfT!iIAG_`EXeV2RKZ{$DoRq@1vQU1{VJ%$dLL-K1t&=rbhobril_*{s7r=dGT zDn2lF`ErV929GLv!<-!|^LdRz)Xsq|SIhyWf}er}Qmyg?sY*uNPD1?B3BN1lvkO9? zr_bm``5a@Dubek(UjGpV{t1FU1Qpx%5hAcmg0r|WKR#?P<`ncTDcNi8Q5}0G*4}U; z6x-dwx6ZMcFA}s-P<$3mq=?M!(BMYewnbAm%+Xj_iw;}=yxDj5s@uhHzHhPz*c+S? zO`I~pYu~ZIis&Vi1Ql~6(A*(`((^@1C69F8aHZkhr1}82=fqJ4Ri*Z0kb}W0?{!2j zr9DKq(Iv=M{e7@~Gj4cG#?FD!2slN9NZfE%T!ndBbDXlaVG@>=;9oF_n(d$Pyp+IEWYe@g7)i5 z{fbjD7!?KjY}K4-mDkiJ2|8re6LuB4oE{$3Xb7N;0s8<>sI@)xYu5mes2(5h?*~>2 z)KvI8+uKib)N|Cqs;7R}{_lnh2?Olb-~W-F<5i zy|`#uGEDW))UqEmR7OXE6A2hgyn-26$n-2=VPnIid=B7K(8GWfBiQmtUK1pthxY-o zWl+EDn1ksnC@hAoc;OUQ80Y8W>iqMkX~rQnB?axun^7BxJ784DKmcI?;6y}VP|!^r z9Hta8+8W>^C7DAMAoBxKT9qsd5g4_-_kg+F3Xm>neRS>0<>9kIV{WPk4xfQo+(ryw z0%*GM#>k5&cTU(r5c^iLddu&@XdlmjLDl!+Z`)glU3*A}Tzb@X-Z99NLzXqNJJa5; zkl4~``6c9}dbH@wDr|!ER#sN+BB0Tacx{iCrt2F2m7K@U{crY66)WlKS0X2QyhJ^0 zK8Q>MAzZc#|0q@yD(w?<&(#W_(lbG=)5Vsan-P-|ZyBD^4Hb_1jfR|e%$TJlE3`Cs z?Vt6%pGFHoQr?b=qzGmwdHzBBBl%aRl=_?%_!~ezr4Tk;G3+)a+O(aIyitJmF(Le9 z=!muj6ReCb3(}6eW_47b6@34@`6@u>clo|_dIw<_+<>`5%N;&d4>iAvBe{c|* z;fW3ECVDE_(aq)T^pU7t-Xh2lwYpUXWHJA!sS|d!Mli!BhYZDSCA92D{&MtNs6GSc zee3e`oQM|x?vqEu5=S^d-jHl1G4?e#v)Lb6 zbUtGhoU&D6{<9aCDB>djbIMc5cDACS^z8Nnw=ac)@~j?S?Y$e}6R)yG0L}pb{Zt@r zUAx3!a8B}}`{|>Irt!q^!zHStUsz2nWy=QvQ*Rqn&Z{beXfvki@q~nf$@~b&B~nR( z5;@2sa{Adv_S0GxU@ec=qvM$(Ob$qg2lMoyOg&3r6CL_PM~Go_D?EiT=uBCAa`SLg ze1|WMamC7fsAr(AnE&(hUN1qlp*cH0G+7&Y+2!xp?w-jUVo-;%qcGUMJh$T3iWbus zH!hW980zbs-aJ5Rw@0JCkpva>85Rw%1X!8`i+fg7_Q34ns7>WPREOQrS9RxTZ&liF zns=-+uH%QX&29=nFc=<2KRcloJ`D3Kw%x}?m(#u-7542@9-kJRr%7uxk1Y)Iqp9p_ zXB^U$v-&JeN}#|4#Zr%76Ltfhc*`QU{FGQDZA#prMq`ci^Yyjor%dgd-#daJRnG+^ z?2-}lH7KzS}7UEOiL51a}JUSwF;K~b`Q33BvWeY^5e*UH3EFBsZ+dgss5&hkg+ za-EEG9pA9#9tbvg7z0$hJ3IL~%+Rvb<|wJGOlX}cFcJeY_ixieFs2G>v?9EY5B<%? zrY6lCb!bzmvI1!Zxa~kwqQyf600^D{Ko%s{FkisO#|KmL$;of5g&{QpR?*tZ3K-mj zIWLgZ_06(^QK-(P9T?XH%SWIz{QCLx+wck?WHu#n*1{HGvpYYJhv^Gx4mX&6uuEq2 zK!d)hxVZTHcMP<*pFcCiwW(`rTJpvLf~=qb!;IjzHS16Ybbd&VR#sME76bCH9ao5i zytcq5$MCHKeEA%3v4F*c!6eUWQ6?s->^^u)xN6z7Zpgr3A;9;1#}(#CVdw))hF`uc zk!K1*q4cYnp!-5Vu(=Y+e{^^Vqnhg3Ceo$+2?}q-YIG&RxNW%wMkB^uqB`aZHFIFv z!c8YzH?QBc5bBqWgX6WGCbl}$Lim*DXJwZzi308W(s-Nd+hzCBx@iY4m|t` z)+mtFoipxNR@BaE_|>_&p$WW6CmW;P-E|z^%y<|aVTW*rS8BQ1ClmuepqyMJ^j*t7 zW&7c;n=^i6Bj`ug-haVv@Yz?VJNQhxS(Vc~5rj%DV?2)R2&yCISeq(kK zbged-B7T^uGD#SJ6O5c~lh)4e;F-*o^QkEW_eiD`dZ% z5;ofw9f9Wtlq8QCUzxJypVqBT58t;e@xpvmbw{+jw5jG}y|c3q@oE7+{x&+XpOnX* zeIK578cmAZ*_^%s<qrOsz84` zv1!(Sw`l$?!+AhAzkiN3IPwDkSi$F}8P^xPRVyG8bu_k|)v6MaxW8x~F~}`WQDIa2 z70{R2vmE;a_m?IiE}oO6JH(`(C3~NA$+w{s#fOxX*Dmau-IYgSdA+bqZe5BiupzAH z+M5Zk;NyD^Z5=-Kex@unub6uxJV=%O4KvfI=TaqM9$qIF!qH~$^~ij@48 zPW)yKlA^Im!-n~%4#PG$^~VSvYgvanwnj6+9$9_s^o-L3Tt;APuHd45%wQakPdq*? z3uM2)1ILT?tt{YD%!Lu{J&4bjYYhDUc}S$ktgi^GYdZSpThw}3Yc}~~Q~WSNdR{++ z`PQN9UbOGhl@tR52gbYA`-YqGh>?C4jPF-ogm5G&w$CY|Fst>K^A?u&j@%C>l4UJt zb`R5a%OSl4^oTkMCY|obwP^rl8C59>Sp03iOeb`xIF)2}eUCC5RL)}OG#3+#cD zlM_Q~PH~Q=&pPT$V4PmH#rYJhVrSUN&yp(hu!5%q z`aR7|W0Y>z5%WZ_r~W45VOF42Aa~AuJj8@uB1_IB7(h6Dw}DdSH`e<*K{4vt*DPXQ z%^lfSe2Lf{g!$dY-{#%3p!_{}jW7)WWpGHEmvO>rv+1Vr6d#{p} zmA%Rgk-b-v6|y(U$_iOoS&hq7pC0F@btYy zPVz{@GSVSz;3v4dziTxH8r=tivrC@>tuVbjuy_WH4t{%Amz=69-dTy1N>=`!oVT|K zytz;sUy3qgJWHa2Kh1%sDANAzTh@g@LR~-*mw z6;j+_$Rq-kTie?p9RLidyLVCAI3r~O`U7RMBO@YUc?0QNqdt|*P!f!d z5iW`Rv4YM8coFA11J=ZUjFrs0tcmZ_)5V_upbzix{|*}0eIR}S!vQBsRYgTjW+I?S z5!?eZ9k6hXjs5857rnSeM1YhAt0o17FqJ&Or9{Bx zjaki~Frc-or-ytLoh7knadA<74clAoHB?J5b;#BUImWGBYCmA4v9VuacM=x*K>@#g$4dkWYzfF;c&oyDMK^6pQ zPM?j0J?Y7R+e+9wlVJzKs3Z%wlQW3x!a3sF&NH&M*=umX`1wgMpF&PoSKpVHqMi*1a7F@$!c%KB=GOc>@P6&M;$rph`Q>C zzWF=zbFi`^Gf*Pa7_l6ApBnkZIp?<&mFlMB_vyN3ImW)XA=mHp>vD{4Y_FxI#C=>W zSG{Rf*ff1=roY=5SVukf;QLS5hsZBbv)unVI&+v;^a4G|E}uC$E!DNy!Falrn!hi_ z$4#O?`qdg#HNDZXTHjdXu3g+9-#q%;Aj;M(LP>zZl zO;?tNI+h=&JSsw^jsNq)2emCm`NKjUI?CGZQB0}p()X^isd+kR5i(Z+dB3TZBKhV% zT;v)z3xd-is*}-dz_L7Kg*dixq9QIDaYzd(wwr!&H>yuq3?7mcQ@1)|b zPLvir(N}Hu17XDn)pg0p&huBd5N|;@8PsU6$0g7CpD?!HGb(1i?iamAjxyJlthx6w z4!3GaZ}wrO-__woGOxdT^XI!{pRdCJ?WBU&P`4qT?@YKul8*0ZKS<- z2~U)a?XpR-!NmDh<0=uWRG(Lsr{ln^-Gy7CC&v_W``7VV+k8!)vggN-@GYAgvV~&@ zgd1Kahjc%*xL)MK-IOZa_Ge$*agpftf-p&6T1FwrYYA43yLs7mC1Xu$8|kq6-=epy{~c$i)2g8 zYWHkx(aZ-JrsLKevtQEWGdB!G+;UlWlZQ#q?2a@7Z9B8-v?vFrYp)?<@3@wlvc4Q! zul-UcN%U)W8Jcp9E#K))5$QN%T{n2o zDrXZjF1w;FRd8`~v9C3!skP<}=}-v}Vg_!1A82ak+}^YqKYn@C?j*>eqx~@yUGeC` z`1m?pI#ety0^<3v78LFWJetyd$FG^V^SiD$l8%up-b2=&QT}W2I0Y@epk|_gu;6am zd(V>L#Cycz>N;;seztY0oVU&II`x<*7g&x?223}SwJf&R(o=0WeC~fJV33(m*57aS zK%eiy=NS6rd!RAOaJ|7xv{fxv?~@Qy+ZLX;nim~WD6?RyQ^lSOc}e$zxkkuoK{xYZ z5Pdgo@+TpdziO(!c@ql8KSw^ga|Kk?6!GKZhLc6Cwb$yz*Y%kJ2U(iCAhk_wNSrK8 z3fvzAS>*YHB6X`hdrfn* zE%zh6M?j_vxg9;By5r9r=ZOc6CheP8CzpZTEGY_&g`?L|QN+@8C@E&2e_k=LkM4|T zCscU%T_~V+d0*Y}3ysVgR-h-F4(}Alrkyb*kEcLWKMT!Mb5_Hh!_ zTZ33x+&U;owNOBZDX2Txl8@fTx%*q@tEqFoR&^WbtYIP=B*V8ckB*PC1;eCs3hnrr z^ia~aumaT@@d)DYlJ3-JEs2v6(LQ?AaA;vcl1RfwazlfflZ6Ne?4hqeARx2gPPRY1 z{(#K>tb?0{sFx$3mDPmnHd|2t>K2UjE(eDGUMX5RI9xew9QHchD?02Q`tf5=TrAF4 z(9FU@Z2vn?e%K({H_jj<)Qb#JKdy-RD>tYLax}W<9XWA&ySw#479DIWmj^PgK#T%h zK(|2?6gc^U02T($-{7DcbjPj60>IY#hcNU$-4oubUFhSVodVozFGuqileVBFBcD^C z2i<37HQhhu;!ak``F#UjnU1j<${DgWm1Ztmk&)qGg|Qm4KHo2%PnVsuo3{m>f49|R zAm#3_@UGnJ4~n)qape5?OGg;JzjUwcax=HT-kSM7&@4Ju)Sno3eIu%}@0U@tfs2C# z;8~BU#46@TPP?(3xcO`>y~eXU2hhL98YL3d2DElG_^#AUj4v;%IbIz0RzDDugxIN> znE#^Qu*s1a(OEW|BRl$&IO&t}wB1ZeIbvMsDfP$ksu>*nm&!`bO{qLOeSzmoL_-u7k05^*iRR zYF?kJGKQZ^z+*%&E$ubyom6tNcCL*re@a9^IDa>|O_5t;D4A+&^SYlUt0MA2Mcnql zJ+Fu`%uE9Ts1_R1eA`Hc3A3^Hm3p=|)(IZzGFr=4ls)49xl*d#?4)h}>E=^P^25ee zO-Gzx!xPW;&$qw#re_pl$1j#1u?tCDRCuCdIM(LPF7B3gDahjevgCh{GPf@{421&E z+TH4Du6u&JM^kLb$%PCL@3j5ob5rfm1`av|%-UvV4$9CkuS(Lx7QB-B2cly)tkx;Zg&~}Zy%S7S z&S`p4jf?iP+8xq?kGxOUQ(&F+}=X+==#3Pq|)0oyQdWf{n16F zNi1d#2BncPiw(n!v+4h`_!nKT0LAPygGN_*nYGRrk=6~*)IED%o7b2tNR=6Wk)aD| zyGL5m+dKJd$mO}5E`7Y=;~G=xmuKrqHgxoLFXlIr7qLPr(qpJ@T zc{;KnB|O5oTzsDstjku&gLD1sJZVwSz)X|(Bkv8}$ossMSFqr$KeWNIuVEsq&!efa zzg&A!rw<1mTb9;AsbI}>M};LNFeLi(`BZ27!+^-n2!(B{>rNhPbXWDC{#pOf70`+K6JMN(gWl+ z)5EwMZfyLuQx;_okG@ncH3`pTaOF&!WfHVARTX+sKUSTq<-t$QR9NxcDdo@AdP@F- zfb3}Ipj#BLMAvbG&R(@t%b>xh?{ChH_p(X^aTkZRm4Ab`3&J$l47TXSwh!Ltz^4=h zRK?fWYQf}JcqP6s_D;l#BK;SdsdUE<=hCo5*s;oK?$_}A<^1&1%oM_$-2sPm3c+Bu%urQuC2-3)}P?BL@Y z`2JV!IxlD_Ei{-pRLRFL=bo-ULP3;5KpVedqao16d!CcHGoy%Y(|d=Vx56w`x`AXs z_><{f;!Z3audL$yh1bHm=9V9x9}NsMWf)XxeKkXE8EHQeRs5qWA0at>GEv>jUiM@a zD~Oi9j`RM~)Ml?yhDM-8zftr(VYhQ$*{3MCI^h^AtNh6%zL8|{5furRxzv_#rDlwj93IL#NVx1J`H;#+5U~3TiaQb;8VaF*>|VD0#?A92m8{I|4UC%XLPP%gf3efeOLTaoQ- z`i(-uXh+V_U}hi=2138?n?2B`*(ChBE^BTUdtg^+GqEVc|SUP2U?E z!LqWBTyjpQ)(v8QOxb3ysOV#}_J)k)22#n8b)2i}^W2odw1SDLy^${md$)_rcFZL) zr%Z)v;DZ`Mqs{zNO+7*;5I*H%`{C(C7fGHJ)&AsSVI2$W2(M(Kqpvy@Xq&~!&%PI$ zuc)L&?L?u8>eWwA{>;b3qsqFvvQi5g6^5Rx3}kI&-*$RAKck96r`YSKl9%n;3COlc z3jQt9t-#_h^n_R$1KaEC>)YE*n6x}R!&bawfC}GynW-k1sRqI$UY?$uH~RZ&I(n||GP*O?^KOZN9X9NNSK3sf6h=bxHZ zGk;$Mhwa>Nc0vpWfZ@=!8edMI2e&Mytqu0H;7T8>OmeFlcmU1 zpI%sTpcFFBH9nW@%2mI~&QJJ&W{ZYUdvoTyR15M~!D{Yi5aOqtgFF-RdH&W5lI3T zQXKZ0j`e%GJ+U8A6XT_Zyf_I9hVkwRY!nJGNZE+e9-$zmRxc>YiLn@B+B){W^Bx@e z+B;&R)r#`pLfCPvFXwmmid`3PdnlBk_<3PF4^@0O+*Z=2Rrq~U;ea5Jav%PP^GRt` zy33*2b@%b$e8HY6U!wK?ImdHXNT2Vglt!dbMKm;MOgT~KcM4DGJBMJnitx!W|w{hVnz^kmTzJtu9^MxvALx3;XjA99Ox-qPQ7<)Zo zLZG`1Q_Xv!UtE1Q51D&D4i))12AuHb3^kT#RZ3Lb6*DN;f+H0yY;87i-n2m&NMKon zOmHD{>sn{c6yKBYX7={hJf2@{oVmBV`odxsYwm9nQC>kHNu$Aid3^Y5qbfc=os|Q_ z&#i0}t);W7u;_Qf^7U#Mztrk=gliKcJNC-2-qFv#*Qlnw*Vtz!SVYx>e`7X-E7^a6 zdgMVZYHMCkcm^BC=d%kVzE7Ad2M~G{^}f$$H2=oM$S7sjPf8-j44y5klVA#IR3!O` zc}hX?I4HHWW0l3XFdY*qGb-&s;Kq4!yXfcGkJmjWSuGpUy6ec?FMue0>ogp`?X#ho z7m9v8>Asmd3x!G=Ui{+uFyr*om|;#^kxAhg+Qse1BGK}hB3^$UaT0n!;)jDRl(b&- zO@|=B$#uz;CH!|GgXM$1$`pTWVIgSPuC z{psjM^1Z4b&35(<{7-V}zisOACbg8Cd*R8-IWJ7D-mCo4yuD)2_a4R*aC}o*%CASK z?V(FTboTZ!(1$rtSrI=>5SlzgV&D(olc#jiGQt&}^0XPp-~QG6uY3xv$A5dY%9*XsvI*ePKo~yD z)rt3Tel!KC2n(n8*C&QYaZl`18hPnn80l_5uV$2`6>~Y~ri$d-O1aZ2w>UiUuf(%< z4eC=aamGFWy4kznF(r&k#pBUG!6@I z)#&|QdBu+kj$Myv?kE?^sa1G*(NO-&Go8I*DmSg*itBI|gt|d-&Z(&R*TT&}AO6fe z{)OK6GD=;w2P=gg(%h?4Y3H&j7#mUJ9HZs=3Dj>nio-O;Joctyh98J)SKG}~Qno2H z|LrHhoH9x{?ku>KA?ngfj9J>oF{#|@i3updE!xW6uKF{#|+BhV(om5~7xfA}UUtZpG z<>x9vw55}05C08|I{(%wj?YL7jEmqN^xSk-E*mw>OxS+%*-tWuw-GFsAqak=vjDfFrd5+Px$8G^5f4)t{BUq}UGm%s%En z+^eV6PnP&-y4~_rJwGsKg#5)xBOfe2pYMzo|65yL?g(BMFqDN}I;aBi!FhJVFRP@4 zqacCq2P5Le#gw5C19p&Nre$C=H2hTB1! z%d#nd4K%L|l^YZKXgN2g=G3;n-M_~D95q^a#ZhIp$<3fROY82i&{NfP5x4RN2k13U z1eTWWK3yH3E$Q#qdmDS^^lEq#O?I2TOFuO|u6-H0rSvM7+3iABxPF}53ikKiHho>z z-kgU!!;R~8`gPO{#TPmip2VDHabNS+arW&i|tg3h<=)sR9%s| z6cy0rv7@QE!QdA+qmzj5_bQ#2m(D|2dTaJEB>6iF zmJ`!;4Dlj|GbjZ8KP`$ImYje1Y+|QElA%IDo%iL#(AM_a)6{fsco9|IGT-qrCtV-D zD3kc#&s1@7s8-gV#>EPe_lxSK;vpS-@Ad;G!^t<>PxKN>ee?6S#}|4z{VvEt$5w^1 z$G&SZ&{lrvcO2Fh3msUm)Ju_3S}-Nf*2l`ny5D=CXXlJ7{CWV*Z!8}}qTd4TVRK*l zeS(g^yuK*X(_Inx3~YK?OE~x?es9p&SOfhRvJ-LfN#{a5smt?;9GKs9VNTxfxrVfG zMMzm+!=ZDOl99^zhKYp;M4|ZM-Dc2m_#2ZMjpL6XdEyAdjD{+6=a!yQ2;r^N{5zxy zA^2Rk5#z%%TKa;JID(hWz6WLju_4pca=-TnvnDY2^h3zGqH0AA$Zv9?%8kacE@m@+ z9C=7^IH-Dyowut}oy|XE%;`2HWJd;9j{RSLlbsLY0Y7hjq?Jg$@gpdrR`h1uH`iGFw8QyyALK;a2_;XT742b$mI)5h11CqqB{aVlca ze@;&*ckJpiMatg6_&R?*d~dKV;Ejrda$1N+hRjqtG$pb8J;!&Zk)6rKT`kJ8Q3}tJ zBPg>T|DKr`!$QiGs0Ol!>hFK1X!|BofIb~=S8|8GC42#rk3oYtY!6hwn>9Unfr?RH zQ*|9lFJPM=vYJs_d8o?~E#n1bbmZso!~#|j)xG;tRlmn0O9X;maajsM)SVd%-P5-y zBt$XxvVqcw{~0%NC)`iCKkx8pVadtvOH0B)MfjD*dpXBx&L6y{tlC{v9QRQDAp0RZ ze@3vdMC_wSUwX(T-6atdFC{|w_^Siu_d2kMVS+oeB$ho!zvnR!dGDHE$^Fg0jwgX( zdxZiy;*>LEKU#A~szRq(xpyrAV@Yd<-*RKk*`ovm@>Vs3|Vxyipe?~7a@;}#Ww;evsX`T@ zr1c@LBKzH2qUO!d6IE_oUgw)Nov0^}T9S;}{HXu+b-pNpk3u6G^>3=s#(oE${HKD# z!Zq>9dz82~yyO`Jd0MQuab%u7%Qh?nbPsGYL5B2y_+~~%M#4~lNCEHa4Wyc+k39OXw*pzPm{JS$-RJqQafTRUDOW6?$v-@OQ)6!b@4!ggutf-BYf(TmsEQH!tpvEZ5i% zeP+p3E?2(~5(;a7x>FsG_%XUIyoG6rBF~S{)i2qmUO7S%Z+BlZFWDNmesVu;7JZ zp~81FlI~|QwVs|Dk8(72Pb2@lN)rwLp?Tu^7>1PQpI_=1uhN=?KEgo64-R1L_m!(y zjj?1HZ=6CVY>7*b&NIswUs_q8a2IlS+KgImOx_i5^s8`+U+Z*Zm$}_Ki$P ziPcP4S@XPSYVZ7(#Txo2rLMN?b9tLBj9c?(TCK*=!Ulq;?U90rzHf-9-OG_TrmShh z*AQ=vJ9T(7-jm5nAsCBVl*@zpBxQK=_kDB%VMk-ovcnhxDk3D>e%9+Ddbzi69`~;J2kGHs!e@-n`YVHSEhg4kB6y0%i2J`JrsY zj|X3>p8&t)R(BXTdFewx1$?Xgt%vwX?I){Szs0P05l0ME{2T|Q<T{Y!=?X0D1v-%}c!9tsry4j-yF;PrSDz#cxdDZjWMQ%W{X!*I+vz`f|W zYsEX#UE+#`kWJaMv7QaqPa!evXB01q$vaiUj6SH~-v~PDsg|#pRxUS%!vxyny>b+60_|O;=_6D8olQ07tW8s#QO)xU(;HKszqRWcn_p<65!uai; z`Yl>}Y;!KoLP_}+RLq;PiPSw$c%V3|=xF__JBl~`o+HgGYR7}~S)vf6A>{IHxV z5aa#R*ss*DH0>EDnavRVyWQ;J%B?2ZMU(cGNXtj&>0y)4u=n}2H1Lk*z{&a1#{FgI zeYcO#+tCpS#mJxqH_TaChSW1PCGUDR_swGkS>zKAn%k1`p|%t+(jTbZ9Z+;lQ5)V^ zltOlb+tMr)2{VGV4&-D5q^1!^bGd$olA865a?Le4u@SXPPGTokW&~Q!_Dy?Xk)c9%{=gX%DiNn6~GKr_jx=%r_UF z%adq?4;2&c+%WyPlf=q;du822x`=deHRRpHuNjH@t1EvDN32)`qU1`$C4~r$)jhG# z;4iRO(1!dvvV!y4(fy?bc^utS${#2#JsYlw6&06?g|YjBl(+${2+}Yu zwzhV`p9v^+J_^<+qcby62HxOLRQ>oLykf{I0@UTde?M>+$tVB(D6?FT4g5vB7-JN2 zKy2af-@lFm5w;FDF#;w`O*qNO{zE4eks)&eQ8V@^G)i%^Zm!vZj0l;N*B~|;A|>an zYKiAMLpdWmP9a`D)0I8sh_n9rC&~8dYl*w>h{C3~eV2Y$^K* zS3JFQR3NqHm2bHoRXhX1p`M+CVS(rve>pGR%{mN~iO*g-&c8pt(W{=`?b_u5XvYJ= zWMYNW2Wgq6`%(W!3#=R-b1U62PjEJu^R@Khvo1V)xg5T5FTH?y#&PXyWn=Jcn%Hxo zd3#kKLhHFL=6#_$8gKYs&0`?;*1e-Ur>6a%Af7uI6iiRdBr+0SczU01GisIobUP`f zj`HQRoXmiO`7wGPJ@sPcq+i-)Ij_L@AC3OBg0oJ%CEG&ZKI@$=gK+oR>@@}ptIF4Y`q z1kx27@=NJ0k9A+x0TI#k-4q0oBgKOKqVA_NbSi#jW*W|oFrBMg=idl4OSMv47Brl` zn6!ty*Mc&hH|iZ@d#8e)5=z>PHLoxl3VS4QCs7|ZOjOd`Pgcy{UFD5zTfGr@n+Uan zg82*;x&3b)Rf{Tt8(FyMOhYb!;^*JS=lSJv-o3ZIu`YVBKy>-QD3~(+{Bu?d)`QHG zTUnV6GfPp+P98P+oQU7RNpC0Fm{FxP4Qnjtxi{kH&#UW-@o)qQ+bKYoXx&xM3$-%r zR_1VHm8cCspwg#4yFkBo4g_JPFJ^W7Wmb?`ZmhpP=uD~3Ps zn_)wdU&+KvNxg|5c#t_%?`PH9)%TuUASf_9j&LJy<9o@@)d&lkoxfCPS5UnFtQJkS zuR1mkITG&~Uw{=$5OU`9fzfOcOFatfX+=xlx?>eq8qwrEvd}lQu|p>AkM1JI(&A^5 zCK!Ps+!EsI?e6{(lx>4iL%AuJ**3j7SJH89(au70rU9rU8KLGe#X-LD!uygVJ+7Z_ zwJKA@(s+bNhAiYp1-&k<8eI-wa#p*NGv!%Ru|7W6*J7=w>CcG5X5hM8i6@$sT}(ob z;RCb@J@M z{ThzY+3yeAy0!@qcsv^+s?igi5q8L?*qisUHD%zqw=dk;q_5W>dDgLsg7BV_8Q638 zI7%d_NqXsjfjpT?Lolt-mYK^bzn01_7%b}9>KkMmn*Y3=7k)@uqD^qFbR&{~Mw-C% zBI)&`OaQq7QB@!c^fCT9Y=UJ*a&j_=GkX0!UH+%N2Ac+ZuuTWHs}G32gIHK#!*qr^Dl-Palc7?LIt}0E z+c_gzjYlYCj$R&pNZJcBS#TII5*q{YMuIq)clwQ_Amw>gma8+8A?AYusW3CC*$RHf zOd|*})$@vAWW=VrOx)YMt>6Y^@pA}=w5Rc9RswL5-?4kjlHqgdA?bn|ZFA(Av?Z!_ zcId_`M%q~pz_~^5-OcCK7LQVY{5dY0^!>QfTLSCBYn@l_)$B0Z>y@nEbyA4$6y9tQ z{TIy*?xNu|obT*Hpk%Jyz=_=n(=lrDIDh-+DDPgF6Xm~$8gTDKFFQfT9luGJIJ@z4 z^2e^NIaZWO4YkP8_thKb3i>_~i}18YyW{88e*l{&@3-WI=j+<&q;CNQr{fo+M$fDG zFr$P2!vYLRG%QrJXd~rkEwN?fPm(NinQoi_u8K;;vxfjvi6QEEykrpOW$| ztQhxJ3Y^QrkC4pZJ#$~wlURNXOa8w7SXj-UUxRe^)rotFk9!d7U1fTK2Om5DfQ04f z=7WofxNdyvST0iQ318xRaW+rdZx|LsZ|!-}K4DUmXTUOme*>bN+}3N?@`1EEtKHME zMXpua`3GqzcWxxVxZJuRE*i+7T_o*&_E`B(pRyfN&dWoL7&aR$fcXmBA4vCi^VS>5 z&1T=>Aikv~(1{z6mk6==v~Nn-U%6;%BRfNfGP*Q!#y{;=yP-3#R(ybP3NsKOd=EZ- zK%RSQ_zRO=e6G8RSTHjECOfiI6jpfoE)}0NOsdId6Y(p+azYQ<+**-Q*1WZVf-KcV zglls$x5uXY5DC(_aV`3* zCYv??lR<3D0s{<$IW$Nzn{m%$nuKDl+#vL5?HYnZqsd18(ko^Xl_8oYPZH_Ezws)Q z^RcpY6IQk-<)@Wb)(_E;5WPCie1>c<39ZNsRw83$+!cdJF8kUJz#tI0#oc#A3TZxM zw^g&ghX}P{BJJgRBIG5bRx(4+gY?jmq+nO5G}Qn#;gfQwgO3Ha!Xs-Az$3#vwJ0M^ z%z20!KeU#~D#k$;`hk~(p6g*99`b>32l?>)Cnf?HF7oNUy+y-EO!VyRvt%}g*yE%x zWDLOp&-8*P{7aF^%3f2+<3NeKo+U4NZ%nVP$TMMLg@@{FqtK9yUXT1C>Le1?w=6q! zhkWP05s}>W5fyM5KvoO4>lR@2=zcQG;q}Z;MD@-Q!m~d4!nQ>PcEBZP@ zWJ`h^$QIlqiXvANB_cWd75UztfkUlW41ekCn|#S=w@=qc1TQ{?VTUW^+^xVyd|rhb^m@?n^!@Ffa@l13FK6(hQ){OCR_X?^m+;^E(oFLN9t@9?s0Ls%y7 zrpN^5Z((4W4AP^C_hr?;s2alUqvEJPA)=%vzJ2)sKLEVX9sqQg?C&l7XQk0sWat5{ zY6)Mjf+6Y-T7B{z8@hTbfsdZ1a(wE2r5-rXb)gHo0n9|$2uMiCx%RD5V|+plGI<9h z<-WfAY;0NuTHvqNXVGiM4W4g#!@!4RR7uqXLbZlQTtb2tD6^V;ebdv^0~%k&#l>Kv zp$zQh|A|jFf?{^M`|nU$Di5SidOVdS9S%mz;OqIrGUL@tkVXQKGe{eQVxt#W0wr-68ER?q zg1e^h_7|7m?V*n{)fnRw6B3ltgustRmyM*eqXWF0pTk1{xpuDm0&>%pl^Qu3Ao>G( zi!m`R3%hK%!L6$iF(1KBjmx&hwE7&4EC?(O;(JS+YUL1rnqosExX2r+7X<7NPe*Vq z-_Scz*0{d{DDjHoQOxit@hj48&L{s?yNM)41HPXe<~qe5oH;{(BmApj@K@&OviQ_5 z-(po|R4KVi8OxK(3gQQ=Pq#Mh(ylk0bNW4`cm84o>m#(+Ch2PVZMLT;p3ka=l$bC9 zpjumV6@SJkmS%r^?tnufUgEI#=Fz;I+2H&|eZir>OcDzT6yva0jJ$c@)gQWAQJwQ~ zWji`5)wfcv&wjdpD5`O4PoSNPM*z<_%Ud(L5>(ZZ#TUQy>9NZf)ihQjGJ|xB?B`xK zy6@Aml zFm)_<$~D_ti}L5+*X?osXj;l~ zb}ejKZP(kA0bI0Wj|@htG9{}uzjV!VI-Lc*guTdvA|GL78?d-t4}gB?+%8IcVih*j zTTxq3h*$b$kkXl`F~ikG9voRz$zxiV3MD>(Z5e`}vPEZVhQOs|XVE)3r3472myS?{ z(VKI@l$<4{t{`*i+s;2s5E)-lhgB(m#G7up7p5W^5?7U(In%avbo}AU*`Jf|hX}lT zPe$Eq;`?}ZM8NcMLOQJpS8!@+kx)S2az;jB*pjnY-OPwB?|l{3#PWA^R0AH7V~e}& z{c@2DPGV&auw4CN>ybdESxZ$COWVQ?!aY7(H`vmi zxXq1wo|5~SFW$VK^=?#e=G;mDQK^0SAt3Bn!t1IUi(tO@Ho;HFzNY3y;by<^e&tWC zHE6m<&H7_Qs_1F4qhR_qE;zluU&SI|SVRqPPUt>Gs}JAp;!{&Wn{6ErN4RtjABFPG zHW{pN{mg{$BT1i4FztWD0mwsSnP1_a>-A&h{%y$#_`Q~cu(u;G7v5s&GS@74#di)+4ga=M zl?Y4qf{a-!Dpvt&=ALI7EEZ+ku#rfkA<4;8bMlsbqMI?=#K)xB1)B2nP(TL6c1@-# z;TWr09{WvKunMD@TTg{?%8R+3Wie6y^ZQIzH|Y?^VIx_m_HO^TeI@v*hrbv7`q$=> zOz3xCGT`=txt8Uf%UNO|S9LDvmyGTBD$z&JH@`7r&!leSxE|gCJRFknYslC6a^MsL zVw>b1Vd#phP|dFrHRi$i;^9>*-uK|FMFw;8n<{yl>`6(9iPunHx3)@*@QR3dU!J|t z>i^$@ob(RD!vsi{|_7FNMN~o0o!U_EA&JO2)R4q`MGUG-8%O`N)ec5oQ54h~J z3%e0woJ815I`0YF{*qqS4`KdxC>1tAP%+x(o9n2$a)!u=OnK4Z6$JxYG35!#beQ8Y zzQ&ToC)3Mg1j-R(dzJ9PYVw($PsE$B1xP)aa_DFwOU^PP=??`S(S^X(rOK(5&4WDj ziTG&1B0e=-=2i!{E^(gg^?(;=COF3RteKArPIBv%7aS||6FgX0RheBs=4GG*=Yz4M zbIW?0+_@fZg0BY&nDSdz>k%<#nMy*UEGBNUa4K7~hu)g!T>Cn2cmEE;$*=w3!QH>n z<7=??qD?-40ZH}r=^ltjQ#J!y^)qaKpEmkPWH0@&0;Y(g-vGs9PCsGiDgfc83whXg zt-i-EyXZS{_GSL!d^P`2ppu^M1uVB6`VGll1psMx>GMb$-S}_mWs{ma{CzDoVp-K_ zqk1Bb@msTy$Cv5t=FRf}dU>(OkKG+iw)QRAsn%9?u}+$c3g?EN7adWD#wF#7+&8{hZLmI*o4sr`IW8w>&+a zpy*LFZ1A>&OS@F-0&pl0-I_cagz>$Li&rLk;#AjD3>&p>wOKSic=1R()%l`ESea@Y z7$nUpY85#-3wWeutO6Ub2TLX^WBa=%4160#rv1_G1?XGFUafse=(sTcGx9uiSK7!9 zcKhplm4+DxheLtSu$9bb)=!%I{sMp$u!c>LW``CmVLL^4kF@9KMo*tG02Y~g!;R8J z{$9)trly?-;+!?*++Z~&*8J?_t8D2GfF0Fg#=nEpHP%-2Ts4b8H? zZh(zKr7z;t%g!F@Y1NU%9k>C9vgY%r0eSh!O5>vsZE)g+;~xorGBx?PZ8xBg-_jQn zr@Xt*ZwF}bnVcKB3?0pmTR()y*8utWs@Z)8o^o{fk|5s6e7eV-%1u%6{FSUqR*$!n zfqnls2id9gvOj$WY4H{CU1Aq3o#}rA3dhOnQ!z)=Z6~jnZ0QZ0_syRuc8bcXKt;r) z%R#`o;*s+C=+}l(e_90}zl*Bn0N_1_rTlqp-pcEJmw(b+HgwytxX`_#@B&~N#dppV zDElXVB{A&O-e?kc%<|m{t<2)jRe`Czwog@nYF?T_6|XF<&qn(Mhdu9rvi#&{$Ef~Q z_A_gW$QIRN!^iaTVqRxoufMqIq8SJ958;iYMZ2j-3_CnFNlWq!s%auu@~;*OHzLZG z2LLdppW$nBc>3~wI={H~;pBg39I*LQ4X$EXG0o3y;M{c=uQl9oo~hM12No5;_uw^o z9%EgbZ=9qU$fy$lv2U0oT~1prHlr#OP(XtfxYN8sygWN5t%1GvDfOG@M)Pv3%h(L9lfn>q& zj&K-Up-X(s%{{yPo14oBCennVpf3*c5synS{J>oEA&uITC+m&^ut`;71Yj)?KEZns zG#|LQz}7aU&jRES7IuNY>*nSLiV)%9ZJbI)qqul@K-n$O0#&N(*RKb(e$ZpX@B?NM zIQ>~vEjbHH;LBtWtsjPUm;!r~F9oFI!aD#Y)YSy935{Q-8Yo@d#6bIxcKT1er?%kB z(*Eh|he+SDm8YAucDuJ&1`VcbCjry%uZrG98R4= zq0+`_t7UPK;U}HrDS6lR3;-=bx8%ekpS?@%<2(PteB;t`ZqRDR7~hHA$pANAVYOmX zK8~J~O_~`1C4-p`- zP~Lrx9?L;|JV~jO5$(S)8oKMuXQOHvEll=tPM}hfI*f!{j(Bgt#+A81II4Fy@GD~( zDO!GnwpqvYdmL1poYp!SYU)ThLFV}Kx z(v6v)&^~^f?4w;IK2s)>(>46YzwG^E_2Hh42s+VA80SC*sHX0dt9PhyzZ~yiQMQedE`C>)2W{;- zv6MywmMfrg7}HlCwG6>;5l~2wSpyv13-)8+m;&=);B5$E{BzTz?C9HhHGe+uWW}>i zXBU?%VVh zU(^Ed_z__AgU72JJ$Et=GRPmIkQ3B4-`}qSa~{Ib|EW7dco3k}DbRwc2!s%s?AW&X zk9m3ge0-m4YJ$OK>*NHq>`P18Awn!UgXJ3dJ=xe4jju;WM-P1eZfkE3()hM~!c0up z+sfa*B}wYdQDcTzCh&TcJ;cq;ZNNUx?Df@{6MRKStsh9>vnB%jodO3VrH?UI4y5d$ zcCLS?Q9yJDqD!b0Ep{M?1B_TdkdFf&-<6SKoqI{W(n_+R{{rHYa&j+l{ zqkt_Z83;4`w|;=9hf0C^pDkb2gf0CU$O=G_OB)OS49Z}fWT2TRMGM?cxwo{jau6a1jcA`RKb=wn<)C?z~>`woyu>Pgy z@q-N8Qy#)KBO*%j7rZaLaZDNGRq#EN!qMlP>;8?-iuJ4hve4bWG1s}n5qxEY>Yg!B z0|w}@DiNZLf(!u~hvvO+3Dnz0jiNx87Dj6zPuRTIbMGqW4q$zJDIiH*Es))qyQ2E{ zt8-TsXYMKzn5cBIA;YT|mUxTJXkdciC;;6;Wr{>|l`l^KC?~|xhw_2PF33OhS|1X7c0>iC0J#CAhu8qbFL?1*hNZA@R$ zEhrU|6_u1QK8PG+DR|}``Ynm*mJQ*9TMIVt8FPnnu>!QQLUKdCgzRNqdo7Jl@^gBY z+O59LL4y8;d0p@EK544g(bk0UKx*Say33Y&oCoL+sjHjtrAR({BqAchZcwWR{#Kit zqShn%5&;<6Fc;+IMZbQ1{qQU-EDUCkUS3|{kUZ7d**QL5lbP8%K0a>74R#hVF0`ti zB%qVQp%SUh&zIB3yhTN2_@697{-`xbw|RSegSKu$9TRX|Kn${XV4!Bgwk;SBemL&k z1NZQ~{e2$tNHB2j?d`471F>xa$sim-@IeM^msVpk9*c+$)6=!0viB(|kuuM~%LQy6 z!H3we49wVFL2T0AzO=Gv1?)}PjhYO=4|{X-4mGtAxMj!3XMfLzTcXZ=H)~H(&A`CG z)bw|?%{Zj~4-x@zroqVsJhXp+6dd>^MapEet?eEkM~8*6vazu-GlL^82pWM-F$nz^ zl$HW3m|FOyt)}MiQg2d;&N|p%gLadS&f6bzpmqm$B{=ntjg8%f4^P1dEho!8w*=}; z`1^MY7d<^a1A{4eYp`Vic`49mvaz;ie_B5>IEYV9E(Gk|stGWkUfbL(*$9qHODp-F zeVf}%4$Pro>?G`>=l$5)>dqPrD&F9a!R3bkusuJJ6ovC$d8yMgK zea-A4cqpas&(6-y-^+85d}q1k3kRuK#|A9e3FUessLU0|;p_c-jkj-ENyWLjA>!wJ z`lOXNeC_&mxFC$0yz82peBeWX)_c!gJiKVQlt#zL!2m~BR~L_r^I`Hv_J!C3vo8Fl z;Jo^Jpm5JEEC9f|^l1Z#z(auq=LJr4qPZq0<0iL{Oiq#$5O6{52;=LqrYCmCbgzTg z4#Loz;Hys8zv$@bp#9P8>l@!qPYXiM2G!Pyt;Q>79`F*n#d_)*8r+@-R)i;p;pj7~ za&y$^gzkBCuYK{g(pruml1IwuHvs)QYihHF1r-ggF438GNk>5n2v*w{$90#bk5KVO zSBZah!Far&wpKt)%m)n9!A1O6or|Tr`$G1Gy^O(oc)^rhD;(r!iA5!XkQ8SX@ zD5gNL*DW(Fx%fsxPF@e*S#T^g^Hj8TbYAX?=z<$s4*)zx(78Vw|DjK@FHRin};^NJDVEZTWfau>-{2E-Y(3{H3)ug_G)|Ogn z0<-M)b8x+YlMVaah!(Ve|77s>^41CHrLV4_5G%-oIVU~)!3iH zUlwlZDD+QN*~mz@vEj+y@v+;BTT}Qv2B}fO>tdJ&+Ijo%*6=6g*``-mTFbw6)+GL4 zZ&w~p_1dnLHc2JQENVr@P3&T4jzxw-5p83Gc0#O7t4xcp+Lk1l+ewH}$d)lOoK%Kg zQAmbGGSA9TGOX|Z@qOpo=bY>M&iUiJj=#j}^?Tpn`#jJ6Jn!?~H*PI$pLn`HdFzDM z+T1atmI~_B=g-{S`E8wbEyJ`B zf+H2wn6_k^+_!C+nDptq^_b%+opLO(c3Gl-c~Sd324lM0rNYg`G#A4YtZIdr0~>p(B$yp*R84z zOVe|6mX;g#0t_%YSee3L?+je}4iR^^Pl=b$6L4aihio#o)bazf}*0^hjv@3)C88^b?5kTn6e&r`v+V`v zNQh>+gpaEGaYYg@nfRaZtZ=IkKkKWD?ubKU0o}mSv5#S&*)*)9qa(5*;r4A%DyeH| z#J;L@@2=oVt|Rss%g-Lnb%D>Wa2y2TCUxQtv}?5JsItkcsI)dU{Uj|t?51$Yz`&`} z-Bw>;A3?#@b*$8TRP;pZ${YU1>|{#Mz;e;2g^k-*>Yer2sm)ax`u2qd1?pdFcb%uM zn@5uY1Ox5O%s!UBT+y3rRK&*MkAxe}wSDp>iMcp9KRt*EfMdtg(-YKtfBS*6-I7qZ zg(Dbzolx`Ghj>QzRZ9p2rCBHDWZ|j3(Y~+rG`uOGru*AwF?(=ZY}1cEej) z{f~O$J9ZEXhKa40AT~B}=`J`}Mb&&>8HTSsL~@qzi}kB=z0L>Mu8_+IZMeB=ddX|W zS2cYzaS%)VX%-pnf6GTQQxAMV8$`Mx`GCd$9MRfJCAk<;ODH@&p}XM86EL2J(p#Lz zm#Ng#;C>>Pq`i69-0Y~Fp!fV#9L(y)Mv4Nj6MU_e3j5H9 zXj9E!Y;EO8XB+PJ-`J{QZfD1ckv1pyrG$vK=bw$pN?`SZl%l2$f(4!Ek;ZV3+%_jG z9ZFeU{T~cIuIvu8H9V`b;ub;cR`rf}cLe|Uj}%3Eo{f!-txrFQK#iz_ks9;BX8Z7= z40mXFI3z<>e!|Hw4^s)Py~458mcf^N8q`bh!lZkxqrIO#*)l|uPs7VV0*J&TBI3GN z$c`bReda$OBfW9Oq5iA4>SV`YQI~>%kbC*8B^);ncr7fNfk^_tL?Ihcncu6P=~rE))u7A=U1>p$qR}_IVy3~d*3+T!^6We$NGh` z9VJv)#m(!!Pzq=K&zHWWJ=U}JTN$5`9Ibsn;;KskI)?Cxg5z=|C8x&Zk&zMn>khmt zBp=b=6H_jvAwyvrd}N@zC;^1KD3>F$atjC?x3{l=TZ0s7dPatVP0_2C99{RcPFuIm z5=C|O&Yqiyf=li?{EY0Bl%A?!)_8yyo2&9K#Z$uANA41H;|H@oq+^SY%SB6?niv{# ztY42z07RQK7%r9yIxn>$qP_+X3?nvlYQ3*2OL_YADU?JY7luWts?z4>*tE$Gvfd#f zH4n@)pw?~2(4Nt+e|-kaO;tmDf1o>bPb~lk-H>X!qvGC$g`BX=2vam1`t%>eM#&oj zWaycJ}b#{_R&UY&V{ zEHU#|87ZkPkq~HaJ{kTUT885XuZr?gcz7eH`mnBK*Dv8$WJ#A{#??%reTr#0Dph- zb64fEYQ4flLqzc)(4SRVzpip|6$5(A== zK?0HO=6*Egk$40JhySLW!tdhb^mE|iH+C`q6Zvf-n~xnk7D|8XWTC9G+v{!9>g>0` zVjjVsU+_CTMbg~-z+B~gd-KUT`I*e1=$o)msMgVI(M#B_Ihgtv6j0}V7|x+-xC<(l zr~;$^Z@&n731jFG29)~CYS1d|y6)jz$?* zMrq5bJUYG2)x_A?<=i<4iy!n42q4T~UMPHkUF%yg-L>oKFZ1)ZWQZ6M3g5U2`r*VL z$cNiVi~pDDO!P)*@kRFaIgZ+`ntEeF66FUKB*S?g{YKGseaKJt3WZ&}1}LGctBV%L zswTC!OLBaFIxK&QM;aSOT+kWzLJU$)czt4z9CDz}k_&UU@p4bWsBpmt-I1F&Z}$1Q znb`YpH%;(m&g4;chP_bo9600`*JW>DP>cL0rgHd5@WR2hlSL1+vWO;5XdfjUN)HMO z0`6EIJF8`0)VR*}(^y+RatA3XDW`Mi6otZ(phB5ZuC0)QW0z{(SkRu za1HVVv4mb2pZC#}^J^7mGOGf1{WF3dXZ?S`Eb^!K2%z?a<|R7@G#~#}1pO;?3bN0s zsOUPCS5nFg?DSP1*ondpm(%vgNnNDyL2FnyvQ-0i5GD%0t!B|t5l{xCyw5p?KE@~m zadZ2LJ*Y|qv$C7~uGA0ys{-qs?RP5+3IGE3DJxq-v}NOOU~R-^Le^eZI0zMI$VH>l z@ow1bGTSC2Bct}B^EN<}l43pXw1p0^X>o7chAsT5w-;iIs3k1U<|2Ed*a#tG9#O`3 z4v+KmX)~!}$f;FhWYr3CtPAVyA4B9*!Z_}_$bOH3SHCNi&m3Ja$8iIM%d_Dk&vQnI zYLrrXo`E_)q#hq-KvMellE+(3^0u<)A0T-v6CW&%$Uszt82RgB|u%H`wczo%p4jCWR0`@~{ zrT4t4$rHauQAH?xH*R8JX)W}dQ5XW(zH4qJb+QXkJj{SXnLsl?Y3{{2%w93(@%&8QRz(mR8O^DA3CxrKNp}kyCPsg zb=zh!%|$rcZIReL9j?j?oPY$NLv2r@;kLZd`}11Nx@bu~aZSx-v>1A3X16#~$38^H z#VLD_ScK>j60QFJPjyo?n8kS?HnFhK>GZj-V=OFC3$T4cA`!c1_oHHzeW?BQvA~F` zF{oHoRDZ|5LF9IFb$xoFS07^A2Mz?&As(TV(Nvh5-8ve=CAQoj@6gU(7dz+QE8IOa z)kk=@Kh(j*9&j>AbFmh3y31ln3xcjmbNE13LO*pl&3b{o7Nsq(;{<@I7emo&Zr!q5 zY^?6wx(cBBv@|mLS#@>UB1a!Blh{;}fed1m&;Va{6lHM`s2BbC#O9fMl2vzOBc-99 zbT9j!yR$P-{p9PXnf5e>e1TV@kVp@TVT(oc1 zel)Pp1^KKzRr*+GAR0S?9t*Xsp{6rAncL#rn*B>J#R)&&96h3!hVpFNmWhP!#q=*5 z_n#RpX>V^wjc0DKyWEdzXP0=?LHfu@l7^r1u3gnBk6WArmS$TU64eHKd#4MvB;kn( z-_n9aAYI)w5cdn1-)gklMV;N;>`N#;u$Fy7uaS9+*rV17EpF$$<6{y)-TugLP}m_m zf?mO>r-8QYdFCebe!Trrw!v|nd9>!(NRkl7@QmggXtP-k$ihNvah5A9Bh&sKPO@C7 zaQSnMSS6DSFW-0PT^%kKGsSTIeB|(Gmpx`>MH^J>CJI8QPMtzs4!W#919e=5aB_B!ej0`S|z%bCSu=;i8BD3qF1BOP@j|MFVDz+`ny^b=cTALGiRc&Uxl7^q|} zFOZbZy-S;`+>Q0}x;o$%#Y=Ls(-8*;2%wWHByt0;3!vQv^$?f<5 zM(H>_ypc^8{EuSGQz3&%w5}8n6O%VcKFNLX*|=p)T|zk`x%I>b4HoTLQM3(rc6MT& zZV2srhy`Z5d~)nV9yVYlrAMb@tUL#%?@;Om4D1bqvbVQivt|tjAxfXfE*AEUKo7nQ zGf)Wp9EQ62N)fR8)~#EJbG9Xvlz=&T5=>sxbKn(nitGBRNP@8FUn{Z9l% z`;p+Q)}VZn%%2xgg~~BR2}(=%W62y?cm40BlQCeIAw3l0(A+FTf~fQ(H%=Cos@7`r rp?_2k{tqepUjYaHFw4KYRx?F&|HH|bi&I067~N3Mg#1Y7#NYl7*|Qem diff --git a/11-survival-analysis-and-censored-data_files/figure-html/unnamed-chunk-7-1.png b/11-survival-analysis-and-censored-data_files/figure-html/unnamed-chunk-7-1.png index fbcdbd412a69f3889444ed73554d29c6fde5a764..d94c0c5623ad038956e39f9c5875d8fdfe0c00ec 100644 GIT binary patch literal 51339 zcmeFZbySs4^gfCrpi+W_v;uQWJCl&GK&cd8>+Qk2~KXcd3SjJJ2^QyAt3>LjgB_d)zvjO-`U-@FgG{f z)pY)w`Xb&PKE@mckvJ{rFk0HJ7c0eIy!1_+DqVbudb;%AC(nIPD--4$a@%z z+YkCf@1W4o{RODEqN3tdfJo-i>8XRgeMNbB`JX@U-o5MY=@Azb3kVA88yqAEZ*6TI z61cy~P4${C)AtVx3p*-*h&bDyp5HK{prEw<#H6=8u(Glm&6TC#_izFIN=Zx8%Z{5> z;o#u#IPYuL*{yX15oTs&h=h{5o^HyAi0B_K{?Ax^H8!o)s?5fwDvX7xLD!U&%LAMX zyOU-5(R*4DMLMNK4$Jj{MCYs1Ewu`xKrslUolwU^_h0+N?N|Rj_U@amG8b+i7_hv( zf}fq8o!7;x7i%|rxSsFN2^-ASJF>?;ghM%~{GxSWK){Rer8a+oP0K^NXtUP;o>Afw z5vfu2AX-C8Yl-dY>A}I7(>~l^TzpNL-8cO|0rcg&!x|Siw_n}iRH6=(H4dff*Iyoj ztst48FZ3-7iyY)^Z>GlU_6lwK123-!e0z*?q*Mn=EROonFiLZO#Sp*7)#-JXc%-)5 zR`$+B>4JUi>FKF(5CME^l$MsZYj)OSy+59v;w9n^IoTuf3ysF6CV{KXOd?hzJbZjc z?M9cLh__k|PMA-h9-f^!IXl<<&w4R1^#9L#1qO22t%|9ts%mR%Z|$QKFqlk~B-?sF zL9{4HCgOWP^HrA5(9)96{cJZXDk>u*D|ZCb*iB zO8WHSUwSYj%X;CHu0V`gv{gJR{xl$4UTP($ln z&R>(O4D^y;RY*c`SRWpLPRO^kDaCa?eu)2CYGcm(YAbgm)muBvb-$}W8qLG0hyv=p z5+ac(C8a2%9U|@g->)Fasl|E$Cd;QmxZ$FnbE#y@<}FzdbHjpnG2J&aGcz?+mzA~p z*}1>JpMj39x2GpRP|KyjVGh9ymKcx}a73n#6P$p5)E*~Ex{oFj^O1d$} zE)kTJfg*A!FfNXeX@7j?3}o|%Ycm9JtWWsCe@2hRzQpU!|s>Xs!FK0^S_>%Dpx8VGVfQ23Cm-DclfVj3zhc2vZtS?5Z^%WNX&IAd z_a4bS?5HYS6!KqdyA1lB-Ekibk?%L>3c~%AYy*`cvteYF65_x1`10KSjYK4$BKo`% z@+HIp+e%zQqTtS!n8QN#)=$uQUuOI(1-uS!DB&F#7CUy($2!!_IwaKv33Fw%cuWlo-&Zc7*45pm;skLV7@ z_PjA5ATd!xPj9(It3liAat+w)$Y*9|W(tbbhJye)2F-U`4HDwwnhMHpGG3RAu7f3f zXlQW7d~Qx>e4Jr3!xfPYCM}P|Piq8tQ)+V);8AgsU0C@VUNaWXio2i3%r~tq3#*l9 zE%ud?^Hm*8#2PWbIw>6q;fPuPpS(@tvoEv*1cAvvx}Mi(WV|kizQ|}0gPV(^gQoM8 zGW++xYib%VG6GzFxhPLhR^7xN+7NM;T!g0>qc-oZo4|WcQ^t5tW=O)w1H|(5gbJ09 zw{lf}%9?PWmF*uHr|wSF|EXQ|zGfo8k5xPgMm+U57Me~hxdv4Y4JdqEqfBM&LbF<2v=zrJcq0_nK^`JeVD}ZB^E&;mMj-C~ zxX*6UCxETQ?CyJOXW&_h=X5>vV?k!~GGgEOSiL(`ibDj&sfv)~46i?B?>A?LC>`8$ zN6{b!?l|+kt(uTf;khHvVc_5<^TbZr>8ka!?S>qkZx94_^y=Wr$PnIT9nwdX-eQaBlyjSm5?TR8dAerC8{P zZM2wks)&|)Z48A+fw#GUY%bkI%)~DSbUrOj1=IY-v%PFj_pjkHN`@qwV~aokp5WUjwimBv{^;gc%gDs%yKXQ~n^$EhL|gXAi0$&#A5hWIB$AWYm34KK zq8A=t3%BR0ygO`=)zoSbIL@SXW3w&Gw}}{etTUHqzb+;po}+uu5M6&<`T%>=MC2EB z4V*BwLT_ESO}>}~5$vzcGz6$NaOYx?Z&!MrWO_yCiFWiI zZ>)@#p9JYemeUcs@clDe^QTzt48cy~XyUxmq4YcTez2fgR&6S@l@sf|4{2ekD*7A0 zNTj5bt1Tmh{Ijfe97U#uB=4;MO*Z>pbD~{wJU=}@wK6gO7eyC+bJyUoKg+_tGD_Wv6ID9mBt>IvGra~T zAITQq#v=$;M^B68eZOnyIO7K zqTf>$YdZH4wV%}L6&2}l;wLMFZH5T^`icc~3Thz@E2>FYxyKG=M{tJ`AtOYWwTLBR z1YkZhQ!1IRFGk$0-s3VmPU(obE>I#H_dNsC<-5~LEZPsO@|JF{har_oG)&`k(FN1# zh!tL(e|MN?Pa}gTDoVsEa(6D!GZ=$OlBLk4oEXo`2|E~pV~}A%H&(|omLqgzbZ$+q zw{p>{iHH`n!~P+9x4{Oz3k&`1mK6|}`+akfXy+M(l5pm-z8bO&58T_!#?y8cMPu@uocrD zm$YQ=A0NKJ4jT!h!_f$-lQ=zVbUUafpQC{qJ8rca?>X)4#666J5R8PdFJ&{m3uQx* za(&$>;X7}Qq_MEu*3O@Y3utG*gk81H^-K>87wqojv<>rw?RcGE8@mr`S+CJkTEz}- zzaf1X8#)R(T*RX26L)VC7o~!nV!7Q=9kUmAZkuiqL%*=WP$0d3a$=BJ?7u3zqYSUe z2VZOmx+gI*iioA{i|w<9Mw?WrhcDw6WBFe~7n!o@CsQ|Pc^VCA3xgGjSOk}sPss=o zU0DhFiZ(4Ry}7@uF{%3KS--lrCuX~E;x5D<`D3=hOsUVp;NU~3Cd6gBlGGd#O?+P4z*73CYP$dyW@xx5^* z)JPxMKF09CCuaNI<}Vpa8t7=MnK5b)M_g(n+lWREA6o91-kX`}NZKO0JNcC?ZG?H$ zWN9wdU#j5^O|q#t)zGwGcq}PB)k!lrZg?WwnUxeM35Pr2;*`}8dV5cm2|=vZ2_{Wp zsF&i1MqyN7v;s0R)8`8+w<{E~yO-O6KEAA|)gM?Ho#`b6TQUO!!|vdW^|}?VvP}Ke zl}xv8OX^p_zLvz(=8AM<1GC@T!UJob-{?qGP6tYQT^`5tW0J8wcwm6jL_G~Yy4vjr zesxu^h?YICFK!9~pbYQ!%-!m9S%4};>$sl$WS|*87OGyzt`a1sZ*0!`HBxyWE7HB{ zX2H6q>23~^y}eL!bg)r^j~7jCye|Jh4Q~w>rmp{<(y}ES;lxvO2Ir=KlSr= zeksKD`1Tw`k%5n#g?hIEg#Iz-?%t+&!otl%@95+;yj2@DPL2iQSYg};k6H#T zI&{L`Jo%4SNvf)^c&WgxKHQtl&u_9NW6ikH((w9Ygzp&-{>T28pOYr@!*Sw+gX&BSM<7Ewwe7jb7^QNN*tFwGGLRmsq;)Dcq1%6God%oDn;BJFI%^dkvCv zjQ6E2f$NcAYKj_bv{0QzF>3=Ft*wDTh1OBNH*go1PtT zio;JFJ}Rps?sF6gA-rg8Y>bM+Lq|s^B#ceqC;avlG!r1J=WtHIgrhPl(~*<(OZxrH zriuR1T#}dXUCP9$UB*l69AiQQ1HxAio86~AvlIPhzRBGmi)^|;sSurz33ghCBxK`6 zAS-)PTPrwo6NJs)J4jDAW5>(QMb*#(gRmRtr3N%M9c}ssJXt|OpeDK8&GeSsoE!oI z0_D2*JUs4W1xl2-FJFGWefg+LI9CRj(AfP9b|W6y+X=SJoBNu!fQNVnv#ck)xVgC@ z)OOL`881FMa#ru(u`Veo;ZA*;=g93CF_5^nDgPeO7AJ5AZg2KE>lKx9S3i*N`Jweu zg$1Zv{&{fD+qh)tjmm{&9}T)op?7YnCmVywg)@OZ(|M;Izs}w03=C9C!?l_WjwkGH zYDoo{f(TX`A-w!-`Uqq2HwN zxl7D6`1uLR$^EXXaugF2v$HFQLcIYc0HFq?-cSnPM2XhP(b47EjCF&ly_$M*6% zsL9jlB{NBJwZp(=!{wE^>#_J$6}|mvSamoK2T#q+?kp4K2h0R3FP96Zd*AGeb-r=0 zZ`Yk`=QEzmz5X*YaI=-OX_Nbe%MXYpe-{{vau>wh`~ruSwPL~her{+;79tY->VrY| z3rWeow~{eYgW@tWfGNE1iKHD$6_E86O&-bWVZI(>nzmcs63`bxojuENF7DocseSiLuO^aF1HhKXcSns5@rGAbtnpmi%)Q2&vQYUAD;dK ziTG$|ez$=L?P*Bd1Q*@iVmL&fY`&Sg6NUpqEq`nqaE}#fX@bSdK?p{&H=4e|afbt- z^_CVvV`F2_AE?1hZ&_s_3KhqFB8rqoIW)o@Zz-&J*5>||WG8z5&{D6Pf^u1Bfp)`N zzdXZswuWu$m(E;4SJ!$bbR>7iM>743@n!oED|3+;Iy4@`q&ZP-Ia7{fZ+{ZQs6l{_ zuhr~PXYt2;qD06aOIlV|r%X?n-S2T#l0^2^kcl^;h9u7WcXV}K7WVdA^ICM3&Nx%$ z!bXEpq7?Xqv7NHF`rX5G<7!0gk%$25)4Hj;$jZtp+&(>BfAeiLM_QPgj;`P)4GM(@ zSj<#Q1Lnrg;oNlc-{-HzLQhb7p!bZ;&2}d_dxMttHB71Z2e!ux_Yy`qDIXCIMUD>G zX=Y|-05uIw428vbQG7rWw`0N0x0Ce&;nk6B$@=7{c>~pVL=^yt@rRk{QynzX=E_QH z&cc64hE{Ky#3wRZH@+E2?BiTejB`KOxdDwLZNC7mmX?+VfRsqIa4ZJ1+G0upa(8nw zbj?DL^xUN94Sc7z%hHeU-@lLi1q^DCn2E{eSfNUvQOrE= z0`MK%G$>|~gMqOe%<)&7`JU{~&#`F>I|s)<@b4`oH?Wzmgy9madzphMs9e|-WmB>K z5Ha(V95jF2<>#ijozFRsj*P@GXiqoi0o|tex9F-ii(8=aPk5Uc6RDb zZjM_!%dR1$+X>xbjuHwj9ftzaG)nUqfZhsoG@XWOt@I}DrzVTsuCaRVH;&C_WwoD) zG(wc%FweH1cGtN0h)Ii*>3VE!ZGAuIybwal^-(}T04y^Pj|PN+k#S=n@%(D1G?m}e zt;Oe&VPtVpk+%CD`|T=<*)wKl9MiAA*T=KfE_HcA)FDiolJ{?l@)A()QnYo>t>2e!ZXPpwa$#RU8=W$lq8UFD_o*n=T4(hQ#&F z%?tR1j=PJCBIkR#LN;qAO3be%?58&G1b7e1&d`{tJ^t(|){VU}ld$lku^cU(R=TXe zppkBG&FFj}p;=zfp*IFpnhY-&mNwG~WGENbSWMv(6W2Q*%6~U&R7<({RvC!fqBN^0lL_ zhAx^8aQ^S!<+=U!nU>DRWoG7bSI)?&B;#(UHUdmVKtQ1ud>;AU$BW;Qo#Xw<-Xar$ zQO;V|1bMAuwO~`A1iSvXM4uQ;3?dBBywCiW>LP{rjjg-;_0dX~Y$8XoMvc|gz>{}= zH8n|m^s&tMX4~~PeSsq%2zmNYA|GSe*l~)QM~lgL4R@FmxuSI7KfeJVKuqgxCa-t# zdnly8OWv>fXKY|lgeYV5MuR3(I4q}Edm`s%XHVlW^7axq32}Ps@Y?>D7NE|f&hX&# zULu$A2BB7cN_axNbzG#g>C)Bh$?XYxkR|&%Abs$wn>jiK-czYS&XSwX;H88ZFQ0)& z5DXbP;yVlzxT`HLw%pnPWIOccxKBEP?b-9^&!0Ve@6O08*A-20vAIdZ#1TZ}tZ`=$ zc1*JJwOo{Wo)x~LW1Y1z{_ir`f?<}ln6Abd2joz7T+Na+9bXDAVC=iHnppz>*DlKz>?q@8Uo=p?X5`uyVT z=b2WW)X0~toCXZUjwx3x?#JMWlBm%mD!;HjR|B}eNIz&CNs!rr;gB+8*hD0Zyh*q1 zXM1})yZQJN6qNiiGaPYoakXj-wXRSyQ2eU3m?{T=<2D;?^zq`i-0C?p#Vi*3rgw9* zUT>JOcE=0S170n9+Bt_4ABr{C)gR?k;Zj)lKV&@x(&vwLIy*a!MzX|bsx2kO#4bSE zkB0H;o0%CMB^Y}gt8r*U!*$#T{Vzg7h2E5c@B5{tEf@v{q?9_bD9L8rJDm3OM`F6K z;GKgA&yl8nD!178iJP1MsQo2~Fm+CQ)4%ZP-OhIN74pFXeEIUlrI2)+H(J6_-_)uw ze>u>B%C5S~0$yqifi&OGPd<>Bib4B@{}zP(P%*#z`4txz+uPfNvYz=eo}gtWuG*Oa z_KJ>usl}$8tw4%9v;ET2t@4@daAkj^5x4KtGRmHmCh#ja}au>LqMTn=Nuwf zY=4|3`sF(Yv7+v$(m(I$OXX^W3KZ$ggUs&VRQ>rapJ@O$Q)^J3_o%2BMTD|%)K~8h zK@~8O(;Dm9vni-IqU1SME0T$@BrGf}3=CKwfHUU3cwCMWPWwgwmI$r+)RHs#$LqF= z-ZYO%(tNbdDAGS%_0S-tE2GK&=`Th1_Y7GBPrffB8}G2kR8T z#|{tM-PQw=vHQ=Uw7;WO!O0Qj^5{m%oshtkl(bGNTsN~1=icL1tpU2fp^})8_wr?X zn~*>*;u+-HfO(cSUzuH&K6>;BjC!Rr1gz4iD4n3MlypazC>FV9J@sl}P0oxdtP{&2 z(ZC-ymcs60#oPOS#B!M;F=tns5zxy2(nT;3E4Rq6v_s$~AOKxlTzvMD#>Lfji~dF0 zH-3J*!?f+zAM`y_mWi;E#|w3JJ4@Z|$5b-HT$GfwvI-ALnDD={mirBX`*;@JFTjf7 zi6S_9dU`U++|l9TOIur0P43QB^-gfMSCL_10c>o*U+>kx6En}mWMu^$HsxGgTqGnU3YCj#WRg_Ruo_oaS3wcga`I2QMF&4{ zlA!u1oytGw;wqnbaSnMB74GH{TL)>G8WQ{jZ~ zOgIWWL|NpK6ZgevCN?&Q=}J?urwyrqw^}IE7X`2tEY$Vco;xVo9-pv&{Ako4SLS}c z4_tr=+gP4L9*KHQ478*rwJt(=z%L&HS+#2&$fvk>%jFtH<7KN)tO=!kr&YGRtt2k_ z635n|NKZyPWp!P->3VL1=Tm@0wmza(grrng`gj2#h!Je-#v>6qIl0U5YiCe5N1N+t zX$D@?`|cza9~HorpBO}TU?`x#R#w^M?hZyW*YMd7l~y+82Ec;n=;)l5)2I6l2jfc( zxHeGW&S`T|Wv*Nu^}ebkhH5iC+fvd2r8DNi`~Fodv+w1{iE!=koNx`xL6-Sq)Nh+t;6?ehqEFqwyz62x@1N)!1h`vcK1a^NE^92`!UJA!g$lBXM8 z?K5wY@-Sg$T<&L`10I)4QgG}X`9ww+-bEP=Z(iIAy@@Ha#1UNgO5>7|}vF{4~;ftXP`#Eb%s!w9yrvT`%>oDR)%i=x+hqkE;^EJOsmvr? z&g^@arLFts&4q>3w0&cR#}hhah(Q$`fFwJbUWG0gA|f&__)4FyDL2h>rnlPB^Y>(JFKYlMT)Xz<+|VZPqM!u}Uez>@auPOGD0_oy$3#e!KrEV@v(; zXCQjDs5eoO(vO=XpJg&5{Pu!;fYl!TfDfw0Gu7aH&N+kDbXKYTz(m0jG7DTBoHvwn z4bCb`>aK-gS+Y^)hlWafJ#TMrz&rptm^81(t;&|w>A&lbbS*n~w1opwDVsA_%{kT9 z%lQjQpiO824@K)?sh*H*ARc}LSZIFMi9lvPv|Hs1Ylp)JsF-gEPQ)(NNMlo24&n(i`_~7@t0V99dYnmwBnFrDe|cMlK~VPy&Lb zU2A*T8saX4<(1U|6ZJIXhv`g%9)_|c&a(^*1C{BktA9!&Ha+Z3y-rkLceiStogvso z6mFa8z=vo-y|lv0FiNEpIVj7Ghf+?~l=pDMVK5lj@KIj#<%W`LWMCVf9V{?YCi6I# zT*k?L|NY|VB+`C^gEW}9%EUPuJ6=U+f&i2J%oH}30ih7@=4Qi_ zSLO};_+7WR*_Ikh^$0m*+RM4p7VEisc2?G}kik>|P~%~SUTqippw8L zqcj8vbc~ZW^DME*qgag%`eIIsQ4Kc;J7^5qZHI>vm-@`VmAdL{=DN$w*fQ*A+L?Ed z1mtg_t?2a9Gsw(1j>8~h&GGiB)%%o`TU!xP)SOmgnIb#{U9-q_sS+}Jn=4VmTi2QO|HFYm-i_-@W-;{(5NAG+0O<&|;txGzFdj)CT8?;iq^wfkCX= z2>MR&uY=AH?abj|5PCr@R#Q_0Xj%)P6F^-63;|pk$c{l`SPpi86wrqN6gD_G=q9CU z59T})WC$ZD8x<85eVxF+UDBp{jLgmP0j$r-S##XC3e+L|V3fUm6PYR7FmcH0=I~$9 zuucqD37Rt&r`J?2w6!}w%(t>rXV{z?*|)dei-t*96gljfRT_Yy`RHxw>Y9;|B#KAU z09U~H=FJjVeNvWhKET5UBz>~g1K@Rke_y{d_!%Z<@m-)b;lqH#0!5T^s`8hK3_8s>?SU_eh=>5*CjtfFsP|u^F8T$` z#z6sorL)}Kx$TtXrxt&S84(vs_ynuwE(2LHWALk$pULJz;V0Lj4k@v;F)<=ih@JW_ zoFCmI9u<@L2^yM)hQ`MF`cHrViT3vxX}ucDKypLKtZi|@B1eu$KBJsgvXhND$U@zW z^;ANxss*gfu{EbXGe&~Of!wO6GvnA}F6vJ5rt9)QfkE0y)r3;dX6SUk5A%OeEg33j zoUNM^nzD*WcJu!nDS$ijK}+9woEjHb3!D$orGa2ulb4rgF*`r64W)f-#=~5S%gQSK z*$QY*v~_L4`^OjZ#3)xj#@t@5(F%0Bp$Ta;W3bg5^c zeU7x8{x=NLy3Kg7pBfK&>f2#iU!Y1`0KrjAto!cvItb82o$SQY020{P*nInzrY-a2 z-OrGa9%g8q*+8?^#NR;)OH28qq3J#0FxDf!Cc z0fB*2m8M_8NC3mpC$J23ar`!mEnta(?rdgq^B!}uN&*P905ID!#>U28`T2Qoq%d|g znX^E#Km<$qC;C_PN%e#vF?FXqK-b5HCWT7hHvA4Fqwfk0EG+cwv&zSY^X^>!t{%~6 zs2M;+lWn5^N6`*Q1WY)0>V zol{bMo>K6HaduZAZiJ7W9%|9vkUqDNxG@lUe=QIg@G7d(Qf{*PMr2=H=qD{|Xt&vl z3jBW_eYmUz;eOYks85~%n?pF%fdOfX{MVG_AOQp}Ky71gbaWK{3e}CpG=h|y8mLsV z#79cz_;N+(YA^4?XlK|RwQs_#P{oX2HK#b=jm@_Qf)pf)_r3W45s`5xB z^;cLJg9Z&U8ZiqEV$+fQ-(c_FA|UqwkdJ%!qMvmL;08MZ@8ZblxAOAkCgBX~)XmK` zS7xI*&Iu_xijF!*EK|?*fl8dVn=UUu`*refeM3LMXK|SSVX~~=5VC?MHz<2WqXOYv zvJPHiRDR!(9!pLRKy#PARcK6PO-ZLR&>Vu0xe$CrwVvg<)^iU#yJNrv3ul2?&wGl+ z%VKX&#I5)fE4qkbs$R)yFG4<(Zs^#`g!5wy{ae;j3n0#cpNt;$1Z+I^OhrFs6o^QO zrf7tm3ljvdo!d_&W8Ij2fQLkWetE{{Y5oDrogTi@q@EC;@QRZJv9_5G?+=MXi~wb( zUVL_YzK{w8y7R1$;6Fk_q!OqjX_U&|v%xIx?{Y;$B7C(9XO-yf-tag5NNiFocKhD6 zZVJokIb*t9-#(j}AY-VWoAq?ra>-pU@p4vsgAj$v@oO=us%dC&nvOsqdiwhC6fTSH@S+bWeBjeQ?thq$y{s7z2eD~!|uJx%FuCj*0}LwkS?{Al*O%pL(0*=4#!MRYvjW1u{v zQ}3_^_`wP5*DDgTvZ*}IRv-&D9!S6^CME`Tmb#`OF%OR$Acry7*zf6B|ru~P)Trb%Ir4=LDB+5yC3N2%s^HOP}rD>%X7l3_04w;&V8`_ za;xOvS8sTnB^H_uNxb3JZ14ov|O^0b5HR9dTV zF+)F_S;g%4jP4yGb{yVgw@!iUs%nxYmQD^hY@3WGOgM2gmL z9aYWr7Ew*eR_kTRqu zh z*M;Td$8w{LN8Xo(O|f>(D(4)m>}53y69#G1t!-0RQ*W9BiRz*`^A$%<-f;2_-mf`$ zNEm7B%9o`)$n4Jn*ROL9n45qAz$Ic`bAS32AhuQRl-#zR>EeH>2bnPKc8RFdn=MjmOBy%$l`-fW{BLGXX;S{+%-hHnvWaTUEd* z(6D!oJW{fCj>V^^W-9q5jfUOZ)@dE*og~Y`*<`837cDW(XadG7H4cbtN_@*bzz!>D10TrI=c{YVMmmNds zVr_j8#(!nf8B8QBEF1)Ia=C#xptO)5KL&dIxw$z|x?x~o0FKDs&JGtB7jSi#n}F~xxtgew{6eORMq-TUHNXS7S=!e1=)X? zdE<9ghd>dhQeb;mUjqAa`^)7Wdf?LrK?U5WzO>UiT_s;JCnty9lc33EJ3qImxESo$ zwD((nc-*}J| zRA$@Sk>whIrhWOY&1xcpbx7xzvlBB}P-DuGW?E`~U^;~cn0&$HTz%hz;mj_r>a*=J z^WLcUfFK*WI9dTCU;(@#$nH>3QMa9#knR6J9W%H>DouYJOv9m zwtYk2T*Fj1q;@L`WYOlciU}4b9O$b@VZC`(pzu@M3xT@1BM2U((E^B6ax|# zyz-+Kc#xDd$|2fbYEJu_##3w`_|EIny)+5OnH_~DXplAgnvVsTL4#&80~}_cJ;3n= zbptoG?<&fTbj$hyRG%*w{RaFqfj)Jv|2>OS8uuVbL_j4$A`HNILCm}(;oX~i^*!9 zLNM?3AoT&5WawPLb-6vTF&L922TV$>{CIJs>q#(KHaW|OaNRaif2?SrJ=v=vlTGnB z&Mi$1zEC?mh&39Z>mU0?8YE#hI$2h|fG8mOs0()88#vh7)@_hhsqbxUaN4bQgF`@@ zP6S9-IIrG?h62e69^Rp1wQpQp_vY|NvnY!>I%_Cnu8g>X!rEhR>W{W}^;8c)-$p|} z3ouST5x~VoMns$@A`4=cS4L?ytga8tOkWRY#uAo_rJNu5Fck;K7L|O>H)wxK^x;c+ zd3(+;_IuWo=fc!HrT-Rtz_8MIXDgqYDRMC5TBmCWkg z6^3MA=8n2^{F!(~Kh&gTb%25b`!oLe`kX zKJigs z1e@pD`vPs+z$vN~x^PH~Ni23SkH3+d-80_=XsM3$f+S#8KHQ?Qk98CjA`%nLEiJWl zdAYbQ0hRp}8(UUF!fZIB0|Y%#pR_bHD+o(MI+Yrm6oTxI;zFM_XnNhKDk}W=k`k|R zaTLnxtj={Sb0VC*JvTl#3xhz*j4>=&LE-tB>R#La!o{K7Ye?_u=g-HRnMTk%Zqo(z zshaBLaVpU)`;C#X_wlYM!f&xyjZMAs*!HYW?(^sG-M82Ggc+^*D0K~WjjTJKBVfey zvu`MOfabSuzFY^qT2gXysb-ym*J!inH3&su?G&!ypy&*sFrZMi0TBlT9`G1KpA=QL z6TI-y?Hk(bDkc)ee8uEOdM0_(!Vli&Ut%Y-`g_spMm^B&M}bluh&#EUe;ZypJ5M#7 zPjV(k2wcI+U(141c1I@;PJfhxGBYci;oiODR3B1zx#=m%7-vwcJ+p2e?elxjcVzia zEK8zddM)L50G&ByhC)6NME^^VuXy7v0mi>MjEr|-4ZU>&r`V>ZrlL7Jy1D>jC@E@+ zeTI#F0L})2gK3d8(gpZ0edb+V0s^k1C0EX@@o<2-d3abQ$|+D5DvPTp1Uw0NqBwb5 za?(^XBXT3)?o3XgGzEQhlR|Vxre+{7C~vzUjPuo2Xjs9%2ZGC5!)DJ|CAA<$MQAf5 z1(;xIdVCeOQM?`$&A*r^D6@is%2Vs-U8k(1s6;>Gxfsqj+)B@hQ#afP>s%L@R(Lj& zHp-mFHP6i2_AL)6?22{P2_OH94%omnHPG+k7Is$3vLvW`9c# zjb~y`Vf)oo&ET>WVM#As0m6b~z%@U6_AHWCb{^^toz8qI|Kk%6a*H2Y94i_U zB~_XEggMH_&#uW-02Tvhie+^6Qn(5xu!>TK6?lj3* zZcghG0lWYz(X>*X@H_~3QGd!N2I@Xufd19a`5m`mfVhv3k8erLA7A>)4i?r9hW z{iicbxVX4#bVN;w9n12qPmS^m?BM9KAsTvA_sfR>sG%%b>GC5j|#ZsMX z@T}l2R_8R8x6JF!y-bLRmir+um8fJx{!APZTfTmNvbmCCGWREkr&L<4nyX zeSf(rJSAkWnRr}gQwlM?idGW06k;pXRhiE zNz;wzfJDdHme0^bh zT7B$4h~n7kmS3AcCXa)QipOk92j0%S=rlMP(^vtGC5hR#nWM}*gKyZ z%O@raVHaL#F1d*(p-JVe@4KG9kTj+v9CKC2XIn)T zb|+ktVUNMi4jYRcGd5n?RZHRxgx-CO(wkc)SIC?gM|siY@hrT%eReXd{Srz%z8}AQ z(gu^OG5DVmU83?cFzlXgjRM02wVcvUeLcO5%*;pNu#MUh`XQ+40-}Z=VbJU-G#PYx zuJ(Y!n7dANRj&W+l=IKjjLn{k&z`F3W1j=H9$1r)zk-ARHE_C}qrQH;s#D%OBwB$g zCjGbE6>;>AvWFe~zySuoaL<+n>9!vbuSlHB1!aU#g7 zy7^C|8)i_EKn3X)r&ZFfJ=E)#E+HzQphUgu1#X1#0aD8+D(OwOEPUK&>eUc!a+#EN z6`iHxns26o{GNwtQsQRk46=|_z=y1;2 zn*X!*=QlpFmzwb3;KbpkPOnwU&0fty3YR?%C+9h+1_2e*JAU|*KgIOS4D@ORux59l zTnP@V%{I9c`lvDh;<*Y!87@GDGsc3VJdPx#%^OP+``BAlptpZO#YNE{m4m}*dzQ2L zJMmOv7j@S9Dt?r*4Hr{(npnYC33r_pSiz6Keji*|ka#_>tXaLcaMeN0D@r2#HzFoj((I+)^0Yr(QFvO8TAS|Kd$Hl{QxVf;MG7f|wDzHHNlK++- zN6GZZI0upx>l#HmG%~8%#oEAB)X~!HzTFqEyzF|Ix&#Q2~H2%Zo~3dR}Da# z2_l7#j!wAPQAiFbHK9Fu;;=g@zr=tu#zw&8h~!Wf;Z2_FWy!iwL!4vO#NZkrMty9h zm3tx99p4bEm+HwnGAF3=XTuEx-h0GJ`@Yc8ytei=6BhLL=YkjVzwYUE^oCBZP??_d z`~En#j*j_^Rj9>siqR?|K{RiCP;q|Af6m9?&kE$#zh5X#UteDb5GR4nbcDnVQECK| z2o5t?_rO50-c&%II!?nU=m%=`lhxTXrn~LDdB3{cJ8h~d;u=t-!K&CSagK8ECxSuEy zp-PirPyZ1+noZ(TZK}V^k^ZbN(mQ}Bo(&bAwcgT{T)6~e38)M$eTo}y6T>JJM4(y; znC&m3>2%pnmaD7ZV0jVl!-2vX>Ev({F4d+-Z!0^yI7buw8v881i7&}S2C2bAvN`!Zv7Tx@#J3b|C`JM1Wm*@`a%dZ zp}Ff6g0!kZ$(x%et1)M)->~7Or~j#F+Z=wyiB(c!{i$T6d8vpBhY%0{&f{3QE>uFS z=c>{h9aFfVI!ES5G6-B&%hpK&`G%jw_+`9!9<_j2?Xu}7QBQ~mfd&x zG(N4b<8SXskQPf-Cdm_7vzK`n1~whTX-nST*>B{pA1iv3@hl%emsb4H2^Y5l1D>j? zJxfN|gxJ@~mY4E|%UjQ6Wp8fmO;3cl$0$bfn+cHRvp3d%9@=2M@{x@F{TqqC5Wo(M zQ)-!U8YcpeycBS60e;CBS%^~mAKgEHT2^~%-Ks7QODf9l%w_q9Jl-oZ;cD+qKgI5v zYcLtx-p@P*_+i6eGp_!!=wBJQI{+4p$%~_eGd?aR+jRvunZU}GX(A4-PrZ4^&U2^s z(`Q)BrM^B-heA+`=|X zN&UY80ov!c=e1sBRH7MaNX^YIXHuJjibPvGVmZ(`g-tv}$R|!$LwzqX zcu)v(Dx&wn>k_3{W2GC1K__&o;s;h1YHkLFsU@RZnHXESx!>P?c%?m;E;Y47XyB)b ztjK>{4-XF>p;RfwWZPB8k8J_DRC{3kjVi4-CVH|<`;F^U_ zLY*`X&Z)_JC!L4@;hdlK^dj!6@x*w{3yvI4&Mrl*{|9kz0#)PxzKw1oV^U-&6xbkpO2?@0u}+l{GQ5YEANoqv=DCQSoYCLT%CCQ;c=Mx20{e~~ z7;v(@thI&|ucUU3ZAHUT-^@whEdNVw9KMiIdR zuJF{?_!~EZ*p2dW%m~D0ILk^;c9+OrP!L~SUTjKj(m7}0vuBN_ruA2LU&FeM zE}sJqJIpeZKIUk@`~C0x)5+qCYOMtWfp^qrt~bW3d~?rS`Eev+Lv>rd{6#xprz%`s)hvRng#QM;$T5i$6ry1_qpxxiXGtFFVyO_4w>7<~q0?cVp=5MMDVo zB(&|5kzvr&eZ-|Ia7{aQn~Rw9KJ}&4wyY@8;5}oK4tjbwTsm%OEUfe{G-vSh*S#LM z`QB*c<+PA|^0McG`gue7*Utra1`FEiAWQ3}#`ke!Z$q6jYS&y#`jg37qJLP1f^*J%=&y+R$rva^Ct*E1| z_J8vF-c%oXA()#s*c9gL_Ty@D=CAZ+9>%8ETbPLL6L*^CAJU9`@Dcu2g})4eaKomp zTU+o?&)6@P0M7PL+)k&w`R&`=b+Ui|$i0jmpM1Dt@1&)znrd|EuU*EEbv|#SKdEr& zyW2r?Nrom~%FLn!wuzP%)`c!NytwCZztEvzn?Hyh{lTvdnKZlHai zmbA$a8t2e@^}e0uUYBJ<^Vu~zw(mI9RklOG%ty#{-R0ASnX;rpsdVZ7Zs%dAI9s5cNH|LqzwDLXs6-Fd}vy(>N~ zS$YHMH z-?_qqgL-B3%$^?=532J^=XJR%?C)yW(K|@^xEB$(s90H9X$y;rx_{Ugd&-dU2!ew4 zT$iAeuh+lr@%+#M{})2M0T*IilwSMG*6LS&{bYT@UTERD$a$5S8td2M!b$9^U8-ER zFF*mOqB1u%4Mj5q1B0@@o~|w%iHV7+{wRW)c!phM zwsjKrU)r?#4o4q};~E=mlhux`dn|_K@_1uO)ehoYbaXU$g(bzs*&jc?P}#-G>inl-FKM_r zLva|}ETDP3du&p=$o+)vHWiiAvGMLKG5X1(_mXnYSsVYUZa(923jc8i`)`LcPQo6Yc@etK$}FXP>*DYGh%UhV0lUZA+b`<5H*}`AHu2A;MhLc7O-=+O&%ee!ePl;a)rODlFVwz&gBxLp|BI4r;iSWVG*GSWE%HAO=dM&+_ZEp7ts@7+%IX)3#Pdp!t zB(B^O${cKbJK|zfsaARM;e7uglO`AaA0LbuHg9peOvK>IK3=vRdJx45s(I^M{fc<3<#AW3cojiMg*aVq*WUx=gl3LQC!Hk! zRAJF%&v%wsMxPvRo|{(PC+<=9++v*E(mFo=jasGk?ioGjR^mdV-Ki1jE(_{yc5w6$ zEEx%Hi*|Ol8g!qZOwp%NN!C&|o^7_3st=gk$tz;PBX75yTReO_Tlx&y-`G^qyK^ zHw}+iZA_TDbZPs}NAgj}oN}#odme^*pOLD(FL)pMV2ray8*_(w*wX3oPJMmZE2^qX zb>k1R1_ah_cbXh8r#<=kZj6H7s} zj=?&JiA#e+1g{r zk6%aEL~`M0N zo@CH|W`o-{g)@DFQR>1tC_3Nlp0My(F=7vMQZdarW9MGr4DgbPj5_CMGxD!p@g)dQ zBF@X=P_$g!uzr0Rv!KJ=9ZgO2ygCXa(1CI#NJVNiax%)^vX5rligHG8P3k)=308)m zs|x#guSUk~&{%D(Mc>mdegVUWHBoX~tpXBeXWU=zc2S+CwtMEavR$clYVfh^28+34 z$X@B_l8-l>7{a&@9C^or!?`otI($vgYGxnCsEZv9#lmCB-Z_8d3$e*Zh4w{gDJmLhH3 zB7O4!+dr?zuqz5WR4d6y7}q>i&)HUp0*D@^5vDmMm3kSFeX#dAnR;J;x<8f5;u$xo zE{Cg#D;MND%xbAEWf*VVRuafl%rvKO&9ju((%g`xa-3iJ;`Crq5midc;lnqOEDJ5t=ur zA92BseGAHW&jVC&p7{8ztIn}C=g~tbrI7gO#XE+@F%M9-kxP2AW1Gw!alo8tV?1hnT*3%N?ee@=D zw`AYN`QD6j((kio2(?L8*4BjF>Ln|3q~+$EXHSEt6*`qRsJ=#U&v_-!o=SbUx}uw+ zp6Jf`lNnSDM(XX&m0x6i54}^G*s^&I+gshEZA{l4awGEQ$12}j7UvY{jEY}Sy%&~n zt-ZY1=%m=l+KX^>uE?@L%by%nveB&9HF?8D?>W?MMx!PyoxFdv{-X@{!=Wv^si@$G8?2gzv2C5e9#6E8mM#KS_ghs})$iZx=VFb9 z)NBDsfh)OB!cIU;Y>`F}j}iAdKaiM=!Lc<7X3FN6DLZEfwg?V?O{NOJyZ6Yfk?q-Xg;W(JSX`Drov zUl^bHeb{ug;@Py4sfWz%U#Zmfg!vX6C}4jd71cdFY^tZHXK0uWw%EgmYk$olC?Et- zVX;8{1FCP1%es}~W!z!>I3glqh-*`X>vp<$SJ_8$udCnfRZ!Gs8f}t4)%)iM+AYWL zUuZDUyV)#&I>8mzdv=y$770VCX%@Yk#l@$r*+g&oN94=&5f$fh1p_LS-$5kIhi*Z; z)8g=F*T#lrYmoQ6i&J0rn74fF92{KyZC>E^=+W3VAv*t=zW&b1s(=Kr`VOAsv~SL_ zO)=a-0>tCv3($69>Oq!pON&y)`Dl46UI@sXS|^+`S|T&l#6X>tF$4 zWYlgRGg_fnb#+F-9GbG2nVH+|29rQ8%O7ZiCNuJP3z&|NA3x&EAlA4A)0+C$5h0v$ z!@KyodQT788%b)NQ-&2`zJWacp}wvg#`|M$x9mB3G)Q1mS=s5o2kkVA+({qq^ShKh z*>zcnV=W?KvO@3xjf0j}8+>EQ6J}%dBqXXPj~1*fqmeB2$e}~Auv%aqeEs$<8F&Ez zR-ia+^wX#tpEz*>QnrPKq(Y8g%&$2KtZjT{Q>dGV?0#ftF&DAI^gB|lxz0X$l*H53 zopO`vw6wD1;CSLD+GqUrd!BD3wb|1yENt9Tw6fBMOj1cXZmy{*PCjbez7u1}`MO*Y z7ZS~*XhHe*%v^R?E!wWIBbd=+2CJ8#nWrEEAPC})y5aR1(FK=IpZ)^XWv}vr0y#@d zJ`%$=HgdvYdwYkmHIj2AWWEl2h0Siv-I)*OQoS?H`N_s@^0iQV{^DG5s-tRiU0UF8 z;jp+!qxy$_v`#C`2Mz>DYW4|WTlBsGY_MLTsJq4IxmS)czM=z-mLY83Abv1>eY=MH!c7C z*&*!hmt~n^G-`BEb)Vgl<@3$XVrBM1nZ`r6BizFuCU8kf{aY5xG}WP4Q}2N-S$OoN zBUxPSSp|INZMjfjhWtQ+!*uWsbXD8f*l_Nf!`5U!bm(_;bAkjtoj>EcbvK69LQXt* zEo?E#p_1&LFDWk3IqXY!u%m}9k*_*{BwJb9%FLqvukYe)Sef7$(ovxsdCb%z2wP5bSmlk?Y}NB7j1AP>|={xw9kO`OehgC(K2~ z-gA>z{>>kn87Y=pDNPt?6k5C``}>>WuVPvTPnqEX_3xooUr){s&We4N_(2*wA-H&> z$MxZ9>wIb5xJ0V`JY1t!dtCXvmGAE9t510HK3ZY)y;@RV|IN8O@3fr_etmjWl|NxL zvfqk;p=08j3b8;X-@b{RIPoHzwgtRh1w}>l)ea30E9a=N8yFhS|FK=haazO7e9!Jn z?G9~c-@jEB|4v*EIr;Kv@x@O<8Rsnd^naY5S;-pz^l7eVN8JdUE#EA4<_L|+8V&og ze{VkhHaW!f+-)@D$`6P5w%YvED=%-y^B=`um4?1NA zAf{*wQXsX+I)L~g< z7q?4Tn9a{MW4D#m?;cv|yk_MT@#pfDn7+FB5BIl3{kCP}z3cV_hbx-9zWssfv#>B0 zm*{9VIc>yi2@!31|{Z6s@61>*)pSc{k7&@#4w-v8AX;S0C)5vE&Xa}x~9tGoN6Q};+;UsL3nEufd8H?I-l@6)F;GJ;BfD`7=01{T(5^h94{ zV;qpzUW|z2{ORwJb!ptrKXlee=0$y{UgN^T`uoZCrp>z6vm@KSKN_Mf*~!{wD>>2> z@OWF`gxG$dc!p0(SR*#vFf&_55#OluG{eilz|1?HUl5Tq?A)0f!3_ZP`0;TtsL}rU z5As6=^M9C(jhz-R%S7{(lw8Zb*0)`WdC%bBp26LVnx#KC@?RX<+8(){npQ}-PB2S= zQ&PMpr(b6DZEzKe6H}~sBt#c;^_x@4%S~D4+9X7pca^QEqE8@l0lQ4?+O@cS^2T^v z?Qj5H-otwt8AU}ze*ONFQ!N0w|dIknW&o~wY4Zfs_$w}wA&c~32hm8j2yXj>p zp1DBLw+;RM$Bu0$QVsu~G!B#wk${!pU$wPq70P{yUXYDLyS;it9@I+wSf!jG27=z; z7qUBt1`Y;X0E=WBt7I-jO%SlOBN-U$_zY--Pe7o~?hcu}mLzlL=75q9E3K4NMAX%c zq7i&4=LUwx{!nysryJHHN2+^$_9U88AdEr{odjxx#oTC2K%dHHWP}KN(DRNZ%|(O* zlH?lv_#eEjnjA<`#l>GcD)z!(v^mIE&joV zI&?Xl2RJyW3kx4meP7&@E6c`s{MJ_l#BlFXuWsY8?sIk)Fs8P(jg6)C^6C-Z9^ccz zntg!IyZn7^bfuXMGowm#0``U*a=zS|b_rey`=n0lcIWQ|pO20ca zGiAa}u8Wb$^a{0aNhY=7o)?BlYKD6U27CVOX36_^wQv8&rNf~#4ttm9s3!m1{JzU- z=;NTtP(b^^3VF52vhO(^B9UwC7T&mxsr9i!#63)b4P- zRDuebB%koWxjvY2?@|tx5wvgw0%Z52Uki90bg&Ab!X*LVpqCa6VO>|Gme##NV*}$w zEb)az_1*FDLj6^bDkHd6#rW~RoBS=#6XZDo?RAP@zItxl`gML1uwLPH!jUU=AFeNy zayY!Ap*?NiZjb%}qT*UsHa zKf-Ge`P$Je2Gu%kZBQ$vkQ5-sg47OB3NVeUe*6evX4$^I8X^cco!4Qn?|1wg+QYxX zc1=0lQ6bK9uh_0c)$S6SSt)0!_79IKU#WkSY9$@oN{apIL*5d~&{ zBk1>bQ@458*vOHozN}IMo>7H^*@}W3{x{0#x9)4|_6yH+_$g0Wp0p1SoQaN&#p%H; z91f%gXV0B0$VO&af|zF!O?~KRN#Eh;;Ze@E7NF~ydr59D|9icZ7xmJHETNx`_wPDx ziB${aSYRx_q+B63TKN6@Tu<3egPB|0=3jOoX2w$xMnhbeY655S4|O#)C^8TuA!yN! z<+NmHWo3naXW;B;B0;JL#K6~A4(*2O>9;$FDiCUUa7NX$+Ih(&3yu+_oRn?{!)ezR zO^*A*_IDj~7KAQ3wAR0Vp6~%OwRZP%?akVsRoDEG8cB>jxNL6pBe=pvPvrjQd>*@- zxAuzL)`!sAD-k3HqhmNp@ViE$=h#smBVOIz2d%aP++uyKRzeyZ+6xA#&`1D}<6XX5g|9xp>974ayGnU}PqNUj$MPTbBOLJZH~m?qy**K#mMP8DFq~ zH0OOkin20BLe`u%BQG2UNDJS8XnlH=Y1)2urN65XrOh`sBmbmv}YW-`h`K ztDf1>?Ptw?i}cB1)2nr=Sw~5O2A+q~JASaEtWgTl1DatlIyhR2zD} zJM66Vs1to7Fa$Mjr(l2P7K0tf;RX zgB|TUSe^qRl>4+ukm%j7`JC}SroYw42bPT{^N%~%H!G*$Fl>&b@-nWC;&+rg$(sUR zByy1wd)WMyIEgYTJ+Y)1WTQvO`ShG6Om%E$5wu~e52VMYM4i$37MzhV?;Rq(Z@Z{W zc2H1ZgUW2bVxrGqN=fu4jtCKR%zW-!F_#gzU42p!OM_=~Kvh-T(P$;NB(EO{E8hjd zFKboq??K2fP zXCv^lF)(s2RWjJ6{!7~SI!VBFiA z5cjd09Z8BR;@wD8VqWJ>JM%{#(`<+#t4Wo4KFq=M^Uis|E04(b(8L*9n_<^(z*q&CE<3^kw|qxcl?xvGLD= zJbOeGYS+ZOi`wnv)jAwDG4U`%aPWGSU4wCaTlMy(g;QVtmZqOhcm{P@02XCf-x zzIS(6tes_K()#vgtp_U4YMg(jY0u0Q?3!)550T8RV%9&JOwW z*5-mxw5BQ3^z{rc&whr{Ld3bfaA_noBSPH{bP8=b#C2JwzX^B;qEomEs5a41Ud&KN zAu8_Vgj+IXU{PFLmtMQCEYVhl>-1jmC_eoya$tW?MXR%!)OEhN?3x!x?oG&?a(l8( zazWPVgSGJo%5}I8@Nicry?$79o=Qd`pzRaNg^egQ$&lr5-;VS1yJDz^)Ap>@i4|Re zkci=Y`n)^>n3KQ1Kd1@|L>`J$9tMqz%TiUHBp1s!oXnt9n zN>kDyP8pO7cOF`>Q5LOL%p=lX6dih9)pq&9q=|`vS+uG{ZicsVmi{)&;)0dCE2k{q zewkdoyt9KDg;iDND6Rq$d*14md_XCe%6Yh1kc1+Jo%HOs!f)SV5isIfk%qP?+CYoL!vt#h{T|v z>XqQATU6nJfz)q+`s%6J4AdRLpd>>>;}slOWylhBjuwn}KEB{f>J08`)A<%T_uM{`JlPMGeWawzCSj+O+y5LFs>INAFvVG zm`6%x4v)e-;x_Ni`=fjIa$9oRg6vmvL|PrU@)VsNHeu(%sosvx&J;vaV`F1{TzZ1V zFW(^&i&Ryav){RMSBm}a^6wA7fV7doY^-Ng9p!d#B^G8?fX9Kh z8mN+qaG|g7HVKM;s>jhlA)lddYIOCVA1&bow_FU|JJ4U3!go~UegA^dB~(+!_zLb! zdcNii|JwF5?npQ@;C@qlGQ#P_-20Qec(ufFXQ;IS;s^=~+Q|!AoEoA*sIskjgJc8i zs4RVrU|SsEp<#8}Xl{;R3n5~UYrux&0GsNN&!)M-Vz<-s?f8E-z5-9_(F^YSZBh)O zQ*1nkgs*(gI)LZNNVIq$*?Q(m!zeH|H6;!8P!$O3~}6< z5%}^$Z8TC+-0JT3vmYGAwr(W}BT5{*`u z3Jv64gqvnZyN7-$Tu~UuCLt$~!E?*Y%M&ERf;bMNR>myz?uI1imAGWTXH3a6{jNE- z{()~h#=IKMn_T|&qzb_ zyX|H6!5uq}8`6h_Y-iQZpTxEKZ~q#cUl5-E=M&8!<#cM2Hx z+(>aZCT?H(R)X*tnp{GT>y)5Z4w_5fxieEzjIx|H+R+N%lCDfrRH`LMsBszdy2)e^xzKo5H-B8%vY=`(7 zyxCx=K|>u03;O9a@kE-@?owE2V|!3KZF1Wg$vfsZj7{~gI%sVU@x1@SKkDA+<^ffr zN@<8H<@a6h0Ym>vq(j04gr)7H#X%vVOr%b!r;z<%XG1LZy?WLN^nnxZXcS=gR@QS8$Kr6`~VYz#tIBp+X2Z(30?F{0=N z`;*LgkAyXZ&yoP?hUp#-O*v94hsB0sZ(7F*8FFEcoM!fYRk1V_L1n8dx$fW2AWU3B zI@SZ7V}uz)%Gc#e;BN1=@|r;dR&|B?J81;$UpquK-2_kt z5~H9|^xHyWTi;}L-m0#?a~EgO&)OpUQROVV0MK%3m-B2%WxSR!&x(AO#LlTdi7O`Z zENB=&VG`Qu!sc!JSXkPil?Cl7XrMuP5O7#wVIdf=gv3kga96f3Q4$NdY4#*V$M|aA z1?u#>EY+3QYgLc*KmTE1ekjIp8?-fRqeln0QeVeh2{(w*@}zK;l(@0@p=?bcZeK`p(WPfO+}`mUEt!qb|K%DK@&3 zH!VGV>!wW>R#xw>Ucyr&TQ4q!FCI!hv?=r>$fWUZ;KT|{^R<&QbM zUtdoz10X}FZQD)6mtZr=HGF?a^DUCccRDXvY$Hk;9Yu7%iJcU~~Gx|ss{_GhcDoOxa zh}6-$h)N)KohDRuAb9rV$-?LuakKG9#hC8j2QMI&{}BP%DoHXJeSmU=N|GAG_U%u@ z!n7O!nMYk&D|1Q7M+@nx9#2XUzWm(uxjqb63>GpK#W+p3RxuaA zur$n~=}?tH6DOxOZpuS8bZV-je^b2D-_cr1#=*%7;{}sK;AO?dWy?(%NeLY?{3_Ug zh~ZK2+R7*(D`buRJlpQ9&2ybdNa_l|Z*l3VJ3=I2bOC-|e%1ePBF)}UQ&>u+BU62A zPX4Vs?{MrY#g6vA>0+LPxm>f|QwDHJan5WP?j#qwogi#?;Hjwy9>8k(t7RB(*s^83 zHD?=d*xrGQ|DzH?RMv0TCj|KURWX%2I5+?nr0M_@CP5RIjQp03-fDRB4Nsj);e#y{ z8XD>stPxi(UZkqVQRU+jvmV&FckldEzaqh?#KU%yu)DG1wD`O6hR%c$_t1sYMhzF; zdWSVlQK+a@Ie{%#jI(KKVgipcIWbZG?nV7IHMInapCC_9&Z19ckHkX#$*#$Fy^F~& zR9NowirlC(p5?I#^_AE;zrVV@sJ!l^s|c3Viy$D5jWwkiT!A9*tANyC|U7JYANDL)@y?<(XEU4o}fHHxrG=-YHgj7mF0{sFcXs@oP=-*dunt$LpkQy zfdij^y=phYNs#Z|vBTA@^+G7K_@08Lx5Y#eF6Mt0Fg?8x(_4W8rA+kw=|P^8>`o6AZZZ&S$NTlvY?>9@FE8D#xg2R6Cx{F1mAP-_I{Q7-4Cv?|`bp9gCb9l1b`&TA*ikH`lxsvx z4>B5RZ@YM9FJIPz^~3jid3ixJ3@*G7rI~~S=DlMWU`+KPEsaw4R8qVY;*n;MV#Z83 zczTS6tcGEd~0>lb^BLqOvyK{Qr4G-iJFN?Bj0Ds zZi+z_d;zv;5jrYAW@nd;6us2PwuDjkqYT3>1>avOC(tGYEWX^wgb`$}$0?jfZi2`{ zMw>P2oX)%@2&~(f|4+xZPZy6~5nH*vLO4H78PNX-dPpxSD;)b<~}A; z3seS*&mj_6AUNFe?EDtwwNzNG-tWBM*rhIif8rciMvzR?&laD%H2yIDKU{!5q#J9R zAd_(2(A=DMf8tNxUkS0Xhpd1QlM`ArG;0!fTUu`4#X-PM37$w~|8J?`(SrC=MB}I_ zk=W?V`f8$#3HIGMN^@&I=H%E8H!IuI!2rqLNmRL6%P}?JR%A;Wnp>>wjN6d&K@dZFLd;1WpQy#dg|QV9O^41Vxq9o?w@c( z%^IJq3{f8eV~LlWTO2ie)SC!U=)<*!!dXzVP`0i&BYZc#Qg_L<{CzGd`A1!hX~PPF zvtI>qG~U>KK1TgYo745HabuSlk`DnNJzGre)b!yd$RF@&P)8m?2@gte7aLF{xK;Y} z01+@k%UdBLNbTnB+l0y&LKj$f4h{}Gdwb`JvO`dF2bt*fIb^kw<$4zi{%F2#-py)} zA{dZJ=PiTM$NBhQo2#x8nNWfu%)6tbBXUCvJkGed!HFzY3&)rhOG^moV zQrS;Z&ukvK8<)7cvb$5BQ|RB&GFpekArpeV2n6s&uaFW42SZHP5}Hg%~$B)0km~Yv_AXP=!NW7xgbGAL$&;f-H5KOjj-@bF_ zi`TEYn7H$sbMy0;(Fi1J-X`uw+ob9QmfQqh)6>2_KIp9CB>;NU-$JUYsu6c7J9vLU zS1FyYK4;i)4V8+@K(ejPwz96PMKy94gB*EF`v9RG45OH9@Nd2MiYh zQrFqD?x^?X58!( z-HHQ4A1#2uEKQ4wzNDi48qL5#fwK>r>*8q*C>eG@RUVosYu_MWLlXwl{3Z0!)ib|K zt*)=fQ0zW%=Qgl9ptk6W5)8rI=(|e!bo8Cv=L?fmJ{FVcouX5(&AQ7yU0^a@w=k9b z*%PsyLd*kU0f8ZeZAzRFEuROwtv)QCexM=gH;6ju=S{~kudcot(GBb znMe)kgRAJal7<^(#Aypbe9`97R+aW)6db_cK^(^m-^n1l4bP;04=O~Jq3dGLQhM$_ z!z@*SWQPb4gr+4h&+3@sCh!&5RCx6y!KjjdvPekW5#EPGh2E96($YRG%JT9u#+(hd zWKZHdSS9S(0xr}uJt4v;ymXaX&W?at`O@)2R+g0QNQj>wEeXd16Yc$FqsN{;re#gA zNxDGrf!Nv4%=|*zS`&eJ;H9LAR1wK&T2BW!(>r#o_~nWDB_?PrN;#RnI<$svhrH6I zGxQY)EE*IpTAXAI&7e2<;i?uJ9-$xloOd`{pW{HkVl$n@H|`w=9#e0apbL@Tw>LzA zs^zc=&Yxd>8tsPd7W^i=7blmEEs_Vd7QX;N<~1J9c(0noIz2!BjrE+*o(Pk>G>|=x zlh>kQPJU}3{Lb89;4F#6aYNT-6mDdC=0Pe%Cs**xOh8H{@)%#QA(8xC?-wl@&>EWS z7~cHPZzEMsZj-zkh*lpY;a=X}zOS{C!~BI$K;p(Pe!N>SEKG>iQFj1yvfrL6NlXAZ zzHeEo-o6L3ve3BG&#H;7FB6UN0`M_K#(c#c_a2c}zz(el790fm&)pwcQ7$o90E92?m4!yeK0qVRCS$U?0d%mTTeY*;w4kr8ra zo@KY%S)11!N8gT6ldk^eWayAWS3NRiF9<&6;r8rdxbz&*fPkceDDEOMlUnGD;?19n zq!vqJP)o8wW(5r62sih;H$#Y_Kw0(k^+g$Ugb7n4&>4&L{X;`AG_v#)Zc5WxG_)>p z|G8AgKgPo-BInqarDR$caq*A>U%ABA={m(Lj=l|1f0Q^2!$qwx$N+i&QdC6KuPFmH zeW9HMYy05KK@>l6Im786{kTUunI^GDj|nVO#!{WZTWO`V<=7kfnH<80k@V*jsR z7I!;{0_L9`l)o#WmrTo>2$P7)%s_QS5penPkztIIhb08f@I~4bX8@!d0PJGfJv`t+ zK$CEN_z+u*hnu^6*WmO5t`|-SrT}buz{luGBOZ{IBYxkjmeKRH<)uq`}ya#M8Clt)vxn_@Htt6%e)wIP>qR4{5hOsBA zjHSL6AmlMu8tKBs(u%l;OM^_pJfhprlU8R=T~Yhd_xGN?2U{ z;LS;4(#VfsL2{~5eF8$xMw6ET(k>fFZz>m-Y{$orS??g-u=}&OV^Eo(p3BOxyfEZ? zWzBjQ1=n44XbHr|Q&|>E&CG;z2Z5)RGIfo55vqCH59Cp~h$Jex2yeg`6EXxXrs7_H zHWmmbc!#B^L>Wi(sc!O~4;Sj7*u9JwN)T>0K0JOKJg zwC}GUjJ+J-d>DHoY{4kr*6PJ<+YZ&n30gNJ<@j%gwGYy6=w(~9_@MU};o!%Q zA3>AR8*?!**z;QTAp;)9;I?)=-Ymj|THr z&g5K$<@by_VSfJhkNC%M9T%=TKgEY8Wa=n@eT`v1XicmvEd>DXgN63?w(sKxXBFLP zSAvd3K!$=u`g4>|VP=HKxsku>1xrf$3&%yaBc7DgKt0I3Pxq_GgrDoc%tnKb#5g|Q zsS_Wug$=?TF;B@Fhs=x1(ZN9r^Fp%P?7l;r!^SR=t+J4KmT|vNP_3Lqviz=Jl7$tx z;Z4d5Jul}kT|zOz1V9!j43}fmFtQQO7APp5`Q)T=TW)szYhrFCDaiZ6`Z?(TN4{OW zw-x=gcpb5gC1l!3h3WTv_7Q&}ah}A^#p~qyjbp@rNpYk#l*#5KhW(TaA-!38TOm_%hfe&#fks^uRi3%yG#BssrB0ZKtq+K2z@!MS-ip;$>YaU-Y!!9 zFNq;e$py-;j@jJ=DF*=BR_%sF)vDTBeqP>DRMFf&qd`;Y`zAsZnMA6%9<$aVTeSdj zmDJ(GvV_S)yObnI=HQI0Uk7xglIZK{xoOLm9EbS{LgcwJbP;`pD(3B!_XjZ?G{0pF zyy2ym-TB0NNo)tmmM((>7w#}|o;{B;+z~usY(@eX#0gb}P&KSaedFrs)BjdVT0vp# zZEP$^X6MeG%lY&vP+bRTpWgRfh}}WgQchG+jyS)1xuO1RoM5%cKOz2rD^$JOfDZ?3 z;RStkXY-aVl(i%s7bS*_dU+HStVnYq+&R;OV7?@8JLNOBM^WW=p}C4S>HLKY^mN9? zg9*8-Khdu&CAEV|h*=q!NLilnhMS9v3#rmo%A+Fr=;*qj3hLp31QD3aal5?y7REd+ zDHQZ7DlSP&P(Jy7E zry#lfI?h+&YB{L4pC<%i8W9ISVG@V`QAA&oe*N?X(iX(PU!8ReDa%w zHIisq1p^M~g|IkHJ`OVqQP3Kk0lx*|c^y!GPGMqv9?2z}f0@I^F3$;07)3=1o1vfs zc}$36&Yn3#1E}}&!}WTz#C!~r%eh+)E>PpE0Yo7q6IFwQ@;3?9i|i~kzn8O5Ij=0X zBX%lKNx4Y;I;kSWe~lzNd+(_8N{$B*u#w+cqkt!%fhrY!#(bxmDNki2Ut{xg6?|lBan{r>ITUIV$oiWhC zVn0?QFz!&Ow(O7~S0^|khAXMs8!?SuR2dNzVSR`> zPft$TqZtJ`b@C7vT?Oj$2+a|RIiN^S2Z08MPPg3&i8{nmkSdfA~!`Jr*B8+^*y_hwV+OiftAFh^F)T$I7rcZVC zyekCok9i^eL6(15NQg+QaY~>ykAm$4$l?GmF)jo{c{8k5x6>CP!}o1?jgfaJC<-0J+OR2n@k4fe|;# zow+FW@{a}Rnws*EY3=G({Xv{hxz=gtxv@?V?q4#t0MkGK#~Bpx`ZfHp8XffEj=z_p zynBBD?HKH$GWJ^hXLLBKSbl=-Iw(Lf5Ts>wU+vSpWVBeYI!?*+{5NZ5t*lp7SQRX2 z3}T2o?*A(6yokNCh_98J6ksF4aG`>?uDS z+kck&9&G5nF?1L#XFr=RH~2$&rY;uvSQt;0pNPsZl24j>-#&!Ufni~7*x$&{Jm8sH zaiS1{HfNgVV80uL-G3{ZLrr{cVa1-kgf9LeGA1z~{F+FfI&n3 z{pERvDF@<47%sKkcQDLu;7E~2K!7sbAx3SZdTp|2s5xU03zn`nj0cs_beM9}e=15z z&Y;TDS=_mESfmM6J35SGJ}S}_uXu=V9@&eJ$*M!z!`z{%sd=UcZkv4rSd|CJNJBYA znovfh%eLx&_FvC`zN0-IrV3_hB@1uOG;39pfquC{s>1w~y@f@}xWwv}&frSH+NJj= z>O}7Dj3a7|5z-szJfA$tw;2wOP{_WQ4{nO`Fd-FBBJu0%y3lUgB>z5A8}<;yQo!S9 zqCvmMP6W~e$|pg^N1YtdO2D_`$vq;JwVOB6kHP!=n@r2?tT;D_Ycm5|N4e z8UhLY{d)F1#MhUgm5`8ZKzVBgyr-57H7!d>P|%6hPB+-PBd(*lla*tgMHd6JKYc=m zMG(|4n6BPZ8+Rh#eA%J!mJc_j%Ls1{i1DbX=mOxe^0Kn~3;nS;+wVV3X=pJSJSHgdoMeQUNT3Gr?B!Yy67MLy zq})%x08Tz~I#?78f09hUhpt%V`$@qzz{hY;orIlo|Y_X+#SF@0=^B9CUS~Q&Vu3zye?@!DNDV$5~Aj z7+^9mEB6xGbbmQ+F z!UqnVimIu*h0GbRy9T;0n6wh@Az;=r2j5P^B=oK!@KO|!8bUFjRSBt$1RAaimWIs{ zCN9D^vrb-Fb&>;n4)R98YbSUhl8G^kwxt^%8{6)WoM*F98*a1r?G)}H4=#+ynqov; zYSy&<`gL6flh_h7_9{Y4Hpk;1is``RX%ldCiD63~kUF^(azcK7`>H3(C`1UEVXQh| zu%&;{9gc^1_N-_bfbC*Yn)wMR1^6tPo{32e+q+y`YBdW;fJ3SZIxa0EWY!WC5~7i` z6jXuY3R>B?$&fYYE%;}N?WkWFLhSSxN$C7EY0uB|aG3ldjFwMCWEMzaOtX-~V9nyvBj^x^qqzRz#;IR?fG`lA&JNUHltGbO0EiI+ z!$zYh;)6(&Iz5_wXLNz1qaH%QAP69Sqj3YK24hAy4`95_?J48>Z3xqG(D5uBaN4jT z7?dDO3u=Zv5FGIGz%h-_*hX0&{qJ)mR?M0 zt!)_ta1g~d8b?rEoQnunH(4`B<=4i|o9!@5756~wI!Db_PqT6ho63IXod;UZbwJx- z^nI(QPC3h@n7unXCMPE|)RH`M@!Xw43dUB&`w?8^$07TvR`W5m&blIS*3JMYVDCroF&f46Z96)4$5zG)X5dahz zjc)P+)MMJ7-`YovCa7OM?y7jZlnB_5D7?HiSl&FVpPT9aogh4UB4#?_Nk^cgF zBH!(Xe<9>b1TC>+IE5)UODe~q9YvZyJ+pd7IB#IRVW6gG#+zWG%MbhyO0@S zLI{WyeAe2O%b3KoN&ZcQTdTyZP)Mlq@FWe@wjw)18$L>=uCA`xzr={&ezU8N-!xURQ@s~_}DnX^!8 zvVy<71ON;;%4(L;6KGLmwZ33lUR4E5GU!>Ggt$1zm9Tv~clJO(d?amr%FYp$U(BRH zDFx@@+4JYPAB5LN*r0>Hlm)CfjT{|Z##Edod86n`4<8@#yMKHZ3>x0RSBxld#Q zm|vp(3J38VT)u6vjaYepId%tKH_O;aR~MOFZaPf-gcP?4#wnfh0)^$V3XFs>56>_H zL*WrOBj1-VU+%GwmC)No`-qZS*6l;G@jnf`jpPv4dl2<5Vc4t9#*490AkiT+uz3Ue_fJs?~6WMchM^~`tL1x})&XmvC~Xe-!G zxeK`40S+6o<^MW+6{5MVL91zn*I5Mcu+2Bj;ID9nCFdxDpVzL-;|9$b#i zxj(6IWMl+O2DK_Pv-f5Io#AqrmN_U|3`nP@)-iXLaMiOw(jU_>agafOPd18O{j%dYHkOvD9#+WUdpeLA zpCOoGfMDG}qe{DXfMO3QDHb|E$S7nO>^XAu=yLnYk~UpGqL#7?y%!cS5$V4|7y*+5 z?pF$k@o~A&enQ7YF?9m7wA-d z-7(4?{we+W(GaO56BIcB|DmWMdh^RVy{l0*Xdu1%ODpBp8MH5?4v&lwT1yGDls)3| zi*I6Ut_MOVi4!O0q&7gmm6#l|apTqe3gmY}U}6#~eG~bso3lfH?|o`5nSHB?C&UML z?>F{d>(*fCd#dJ{Jf>%7kr&b{?m$0@q&0B)m*VNrN_T?l=U1zL7S2F<2T zM{Gyog7KUS0A^XvLSP6($Q16EuA&XcE(b_bB@=QKXLLb4P0pK9tM^+yP~~0xW0a5k z-wa6qcT_ttfVUMiZK48gJ&FRQ*`?L3`{vQPjbk9B;Z-ucdPmBi*;ki0?qp0Q0n;|z zVWM2y8*;#Af`^;jFG>0TC)a;`^uH55|L+(3{{@2o|LsG#;bliTy4q zDM8o-CJp|qbEKT;iY;sM_YrkwWE&75!nEY)fq`!u2QM-J+FY2QM@mM$VS}(~a~+%9 zYO?x&w0G`dIi~Fze=B6qJ0>9-8ktdKH)SHSNk*YmigtF>ASsnpc7+ewPo^ZRrG4wZ8A0b$oxU?^x?w>yLG;{%|;`x97Q^=f3XqI?waE zu8v_3<^L>RVh3L+(Gkxk%{FKq36b=I;K2iO|?j^3v@PiF00@1BafjVHP%fk{-6YdN8VEpVx{cZGLh4 z+JJw+Jys?VMsvPBr1-Q6fukS3TB)_LNTnF=&BP=nP5}%h`Wqzj?>P3P+xF}kz3|M) z9jez9@xZmKQmX|tik9fH??!{Y;6Z~14g`!in|x|~JMZtGy8l1G78G(?s4uxtNEjf0 zusHdc{YCa4_woy0xqt9s75R5eyhAyeL9UySAo_#h>a65cnJ^(RIoTKDlO7uQU;(o$ z$2?;NtgWs8;OoT;0C?ytPKyS@+?!;@8b@DmUn|G;^3S4>7hipnkNnS|#~+>7F8!}* zah;T#RRlu+vj+E%vBGiEyxH_Rz+NUx!i%s}ctaR~IJ?cqm`{JTmmrY48hM$T-Xz1e za&&Bt?t$aQ`prGXT~FU3k$*e@BLlQu_WnK0k%!YM4>l_c-+76Ep6hzy`NuXRT z1%MJrZP#>9aVddwOTYSLJjK+Mi5jLRWLgJ8Le9f1p%mISp$Wn`rZK`UJX7VlV)^yA5^^#!CP=Xl%!8@@^iz)>UwuW#fni!uG4X%@)Vl`_ zt()F|-aledG1*v2fV}0XHAHfvOBQt9$-fx~>c0@7cfWqHLH=8Eaw8oL(CVz_%(;m4 z$NL5b)bHOvqE&~i;p`X0qa zIrqMtRBe@7H^6=J6_WNiHoK(tL-K?1pHJ_b_nrrBAD0KK2v1mNu5#goB0bvgvpaQ0 ze7?Gx8q@55zjtnnui?{F zfW?FSogxj+-e1ra-!os+pS86-H>YB_s(qYoZ0>>79aSf@mxR1tH2#Ox>BTwYB0oh% zMTM7L`JKkUCR3)sT>5gRU65BzcmqwdXZ!X8BVA;Ov>0tHtpUlDEf~U_^GL!|)gw>P z+|ySOH86z-4ZRiL^r7b2XS4uXtcZ;AL`~Eu6fanb{i!?bGkbqby<>Ca0S@cSK`?SB zQ2Ylil|;8iSo%te9VySSO7wc!78DkyUftK{3Dr9$bR`^g+B;_fW+`+t{-E1EWdijGnJ~{ zNDptw?r4&OI(?$1ZS>$Tw%(FX!!FH-J*;vLJU{}-Xs{Ymi>CzziggCxXWqOCE1k~> z)SPsQ{*?H9NFgZrXQXsF>5+Z@=Eu-Gs)zu8ipBp@Gspkw>z}CtJ@d9>ls+s&v<Uq4Nb-#BvLn4GAK7yY12Cr+F=)FJ6S(WkKy531<-rhAy(mP(t_TG-gPd2tc! zI@kfK-}*9DH#~gqyCHcg+h1*nA7@1C$V7$*bQUA#dl9G~3{?;a_Qc!SJFJ3IH zGT5*c!;-iSs48)?7zz7}QT=px_Znz2`rk$7T&lg+2~hyvxXh$jIa-DGfA3gU-KYQl zju!M`rL@P5ONXYy9n&)ycSC3TAi%70$_bHk-#cubS0H?>2r-snET0?;jUdIFFhdgz ze|B+!ukzqrRwK7O2SJW7FTOcu@ik-332#P@8bya3#YA%~cw0OV%^dyn?d~oc(i!M9 z=ymt#wa)6 zvx+Nt=DHWmV{>)FZ#{Zc$C)!|X}9Q0>_FWeO(pVH--2{O3wXUqN3tO5Zt*NuwoK!R z8o#^IL+0V7*2a&;D@V<^{&6JGAfOLzA7xta=@N`U#Bxh1bQx zhE$-zpnI=_KF+EudW`Mm0jHx-7y9(EcMT&?2^nl{ay%h{MiW=`Cwfnaa=N%;z=#pE zOc2bw$e)M4XTg16J(^;|lH&jWrB}sJ$%Qq4>>3TiDu4CLaKW{|bd=4hvJB(&nO-ml zPwc4&tnOR&yW@9nOG_Ve`XsLDVt4<$fdfOh-h8Dte!X7d zCxN1t29u@HgfC<$%7m4HnbVODM(!MEC*9r?{3q&fx#CR;?c{<|F2R8ppa0VDBU*pM zebq*a{Z~$G!$+X?hz~-@3mv**GhQA>c*~akabuI z3nb$(-?HFWALr5COBW0*-x(FurcIj=htjuiOl_Go+x5bh zT!%+oXy`5uzFzI-R`{qWX_{J{c`6N7b^tEuado-a|74Nw ziq2`&T*v2ElWPNmm&NKw9$44>?v}#&jQE><@7|2w;!+r9Hx@5aXsA%1Wb3|OF^x+x z2fQX`!w7NyitH;V&lgbM{r>fT28pASvmWqW!~HoOhMfXNi6 zp?%BF&>!MW!MR%~>|E<5_tLLc_qwyfiL~S1CfCksAEgp~GI~v#fgt)ftX>CZ=Aj~H zcs$8+khmTPV>5{bj0m54rL`eGH2j*?*3@3MukrsngQ59n5!=&N4Mo2Z6{7C<4n;(a zUH9od$$g@DZ-UVhyN&-CS>!~jj_^P8&e$g%sxO@(r2LeWlq54PxUyUWQASrot$Dvh zb23mAU`oA|z&^ngGB#JwldlF`o9ET`4D2%l(VE@HTQ8)4qtEFJ#WR{UD|o@!fdk7h zmQbPlj(TZtZ{Z?ex$=D7odrZspDLTl+B~asJ3G~o+J8#3;P**mBD3JqW8gyMqpRy)yuCmL#e#mY&7))Y=Z@aWyST?)YQmslJI|H zv*^j}t39@CxW%|H-+61^T;SDyN1WGsgu@1QdnpIIEPizqtC(6unS&|9<$CSpqhF^x2b9ayq?jqDasA8UMr8bGAD4s^Rn}vR2zJO-JHIhT zYBGvGoGi&H=P(8Ws0GuVx?|4|^bQQ!g}2K6Ny0=za|FLmM~3~nfZ&Ml2KQAx6d!jpIJaVv`?E{1)1-1JvVoD!F8umP z2=HT~^`&{%R=FF&#s_UQrZlNuSg^3LoBT{qVcE7m*L%DahY{gF_2NY+AtV9gXsqTF z3zpyzNW2OL6Ya(4-eg1z6{`Lz`uxSlK%`M!^8EW9Ro#iZ^Tg@jdCfx$MKQsN85+{F z=g-63ZJ$=CqC;EZsfHaM<2X7r)2?aTrM8xf??#Nan0ps&Gc+wMs;iGbCWKHm%zJr^ z1$TcI^ZE@AmOGvTgK23^ntO&9Lr+IX$6>|$%31hnrC`hZSy`iYp1WTyVZFl+A3jqQ z`6xSk1XabuogE#G7`jw`@;a3aGoB2v?9HyU+gFY3|IJb@fJ$#QRRIX5_$TZOBF_tv z)q)l5-0#h%=QQ^oRs`zYj%|M&%Op!=e!yDJx;yE38)KjU(j9R~+xINLc$ag0KuC`d z836^bGt`U>&>}OY!Tm1~`W$^|O)p-&FgebuVE$UlB*Ifq+vtoWN&$Vg2OT+bBqG8g zGZR4;t!(7lI}Q?`>rnF`nMKk9I$@?Hn=E95bp^gQOm9(d6D@u= zt0g0W=1h!1Kb5AUS+iN)IlB45U0(2z1NXU@37NQd2_jjlzYX->-W*3^ZI7S+qym|1u34E7ZWnF?>`=F$?I4O^?>Txg7leatpCJwSe$;#N$F zbp4?cjjn~$pY^L<0md6)G_CR|ULO0Zb`889s_Jl;oZplgrM)?Sg)jp*n7QuuoSt>^OelEZPS-K>@?WhJ{stpWrj>oc&#Z z1@V{a%u)ibf0dUa^X%}&ZmnbaAV-uz!MAMv_Fv(#|KCr`{*Psu|IOQR#RBwuldn)4G2Qz4eFW3a5JgZ`qWow= zD59K+7<2O?k;ZUX3Kc~FIXkG@2-Fdry9r3Y5k1$&vZsbdtm7~7IWV^kTH=?OK?r{x zj7~4i=4v5>^B*9O8+}vD5oV?mQ0?# zVuc@0EF6DkGn%vJwf>&H1Xy%@t)b@&lk$BZmo8oE(hV$@fNKG|lLns;v$M~FRz>{7 zVYTUioRod)u0FZMC3SUml=;BGKrkY}?aaov>Jo|mm;3Cq%ha$#0PFzeZJ`dL$c;l` z+wEoX>9$vo;sEAp7~v&Hig5(*QR_8oR6TvVNXP_qN1|=(jYA63)dyu)ckn< z|8*k>y0|CU@syG=l^c*N5!|sL0um37@o@zspls`|$zTA2y0hO=>>H%LIQ}TxFte~Q z_A0v_HgA0a&=w?_5rhir=cZV0UF{4wf7Nk?PATgzb4wgf>mN?UtM-&M{I~Bcwg)lO zfHYG$ssLBixS5(DY|)C(Sl(RkRRM5a4r{<`1mGkKr{21Ar%uZX&z}kcl3satQ{#%f zHO-1yIs@ip2T3HV4bw~}+B6fGX^3yy8FR)4fun)2V?6CiNjz8joUq#(kS!z{9#V1HsQTcx)WbB;b- zp6@8NwuCK}dU|_5cDbxkd z{;gILnEDs7uebNU=3fw78CqSDe(4f#-G|6m^5zQTP={L&!bhF7X=e-Yd2s*!OcS{J zc{Nhb$(UI!KeS2D%$}}u1rxDhb?oYgvnO#Adkqpz*OyJBqX<`-_DOEN zyLa#JQZwbqp+o7+J(c=5xDQyg?!m+=dUb{j6}Y}&qISK; z$s*&?PYMh7^?QvQ)oZg{(BL?a7^-vp;>9PG`E3b{Q$+xK77|-YvyL^N-Y>mgiY`L4 zaE~XS`;+3>8JIa6&D*{pALqQ;8do%GPaja)AVJ>ziHA!?| z0GZIu>>+j!^uhM^EZA1DV5#Yh;*@Qx#3yz=(yL`HbBRPFqpObP1H8$%5UjezkXqRo zoVQNHNK)<;7VoamM z2M7yV)h~W#2M1&@AjmGIWKHIt0ClN$as}@!ZO)@#LMq9&96Pq?!Viw=%F4>5TA{(g zyEiGf6^o8-Ambr~(#e;haNkQGwstfYdciAp7TdHbA( zG=2VX7gKuchuSeYMMXvV`M!V~(Y-P@ms*9zom6R4fx4f!dW>XFKV?nHHPYjidST`P z%U#0aj=Zg`jDFir?ol=N=8WmnhcM~L{+>bVQ|=ee#Ln~OO`I5Om@_%HoyS?~yzz_V zr;Yo3vYo_DB0lRW)#rS#l^J4ht}6pmMh;D0=kyZ9S+KDl77)8}z<5i&uyeT$?91j% z_Yccnz(-P9rlO)^uJ_FnS67){l1E9a9E*xvjXTwtxUU-9cPfFLMt_ex<~=wW!Z<*h zI_$Ng$E<^e#RIx?x1i8m<9xyt5N_qVe7RGd--nWY{l=F>e(JC)%IcJToZjWxvAHz* zNUY`n{h@WPBtf$*kDxYRjgVgj=!Ncw&UZIE1rp_PYY!8R;fdhmv8M9h_`EdPHJtdE z@ci)ruzSue-m-OTr_Gc8n4I@uD?KD_t*sx^$sT9KIS$>DHOW?|RuXv7QEYWW`KZGT zYr*B5;#fnTi0`1_+J=Q^=M3qoaip&#PepOF=H}*S=d7hI(uWGO%j@H>)MAI@)LIMC z$yg}v#73tf4ZEmbSXN6*OYBH7&7Pl$vt7ffg~ij#3`*@vzgoC8s=|I0y)26y<7~6m zH(i-?c23`KzA2%)=9sri#;5s!T(|B!0^VP)3$z#vD^r1#g*r<`Bd68 literal 51315 zcmeEu^;cC}`!9%qgdimX0=f~A77&mUloY^$C#v;NJvPJ#l?gakdROXkdRQC9z6iR zaepWj0{(;XMNHiu35nbn@egw7%7GCQ(hDSUp^r)~3EQ*IYD%iN?FV~uXg}u|ykC44 zBy`3s)YNntde_@hmv&nE{7A!pD|Nl_%mdy!y05&l(Vbgf=nx7v&;e!Z)-l=Muny|wzdKDFwHl~clC-eC! zM%~ejYJZw0lcf$ZSlivD@p@@rF_Z9cXPTky|J-jW@h&E>C?)}%D%on@b9*w zEj>GXd*N4a-~O>*>uW1QMcjV8x0Ve3;C1aHBNI}Bl`Hq~oOPz&mF@j|m+49?lfHx+ zyOjWyd`6b0+1Xh*od3m(&+7xJj+-Ma5@Deu>7t?PWyU0Ri0p(k02|JhrR4XTnNU`G z$5B!zYiD=Z6U(+WlFJ6J|MlzF@87?NHnfEnmzJt*;2%6M9VjW2iFG|#SXfFHCGR_< zM9=&6PSe4`p;)JZNw-mJByG@?myb_dTf4BJfPsO*&d!dNmDPHt8jp-DJ}HR_Qd3g{ z_c-e9OXR7tnjXuMr&=1tLG&eO@zMU-^71nF^Y?$=Hq_PD$|Ui+?$0;(#<8=pvorFk z<&TPph}>Ro6?exl!@ey1UFnYT#UXdwo?r zmeEnio6D2a)6-DbD2>9~#Xx3mr|mb8t+7J3&z}FeHW&#iYiO9^hfTqIE*{gIuaqaB zF5)kyq!js)?!GhLNV>16s04)^c;;uDn?DL+R#n{pzUc`Vsf(LiKt~vrs6%z-ex8K; zi~B5^g88BePVnT(3yAyKt~Si;hCtipczrO6UNu0hKZ)LNDt2;!&%bw9#=P$ zCdr9ls3%+Fl&<37<$XiNs&(E=3JcS-w*Fw$MffiU193x6)_e3@3h?e)1Bqe#f3aL$ zU9pInw8~B8>zsC`s%-X_mX=mmjg5?q^z_2waHgoms>!w3Ih>MyATps9#r>8H~yB@9f#uJl~ zxr3(5O!{9_Q)gyo8W|d@X=qGMOpN5n{~I44hr?mkv$btZPZ9UZX@iZ_F^u?>eQ|a} z{0b>3kox%k%l`p1pD{dL!EmUlsfoOtoSiwXSr8X^=LI0?>ZGQo3jV~Vy)GCvKY3rK zk}oYIGq&(IZFnOXviq0l>L2G|Q7ms8p1(6OO;yxwRhv!{ex_6i3y{&1c*^a`hv*` z2?^PmuHtkeiVB(P8EG539$dy`5?>8~sU7)USrv9Fe zkEgij2DL|8nxRMfM4c1SmXgXtmuJe~(Q*S}f8yU4b%&AL8Mb?B-`?UsdjpBb5rvj5 zS;nfy$Hp;`!b?))@y19IcL|_UHT~--NFpmc6ic|2DEIpvA9rlEu<*I?DH++ubk!H{ z1`(@ld1BLw?`Kx~{xFB?PQ``bY2#dWNSb`SdfXBGEojBt{Odl-_wy4WEfJqj zXIEdqxG9mVt!dmFu3ol$={Q-h{_VRF`~$}0r~x?)+?Zk*({m!H+qMcGDAajiDqEys z%BlAHHDb!-VxP>XnnbvBCe73xEQQKe$6vIZeQ}JURW$4(6qg8R)HLj;AmNY_0~9`Hu?Osc<@iS#GHg1QxH{+q&4xj@yxE0L+zOA7NE z&-Yqus|ePBAg*3VN7fO_NJ)9|@@4Av(MXLJuts zw>rYTuFh`$+ljg{{vU}Vo_oP(;t|WioQ7xClMR~OPLbofx_j&StSlFzqREHw`_x27 z0s16VsKWF9{VN(8vbtY%3-G&WS$?9h#!NTX(P+giriNWL9$s$SNq!z;jyxqgJn_yB zSH}~*hMBvv16lTiFOg=$hCR>JE2Tc%Y2i>renpucIlZ;nvvp<@c^Z7|=)At%{lFbD zm@WZtVmw<%#|Fo(F_6mF`Vz@VNYp3RcXnoT1yu3mjd4%AwNOF2oLD~W<`c_sqyLEe{bphs7FF!JA zmtJ49MXAKc*v#*sFff=iXueAJG(K9mqDso+ELHTxVpFt`wEs6ph-l*2mu8>(WPLCl z{gY1-y} zpe)6k?;Zf4283Bk%04o-`j>)+lcm`ZTx&9Jab3lqKXkdAxKewHE{jf4G+b|Q?42s> zuZmt#XJ)KrBr(^l>YD^6nedSXik-c-F6J9n&|N|BVQ=e4`$oS3P&(ci4%QYv87Pb& zYi`~ei<-hv?dFe-@~QFrq3bp8dT9JQ@9Kev&<~H}Fu(VH4-T&s`{-Q%|_flgntOB~=kYSnRA84kY|f3s!VATj#6$(zL5I@6uwhqg?2 z7x{QYsz`4#g4BBl2tN24Mubhuz+X(>u+%bFfXwpXcw$th!ZH?q8Z6&i(pgwI^YJrJFL^A!n~ zs*Lpc@12F*5E%C#vqF6BF|u*liIaanX8B+9j_wRq(^hFZznRUe zACf?K;qwcQYX06j;-KIDmA;lQ|Ly`Y&-1B|_TO??SOiYcLlo>z>iu}{>RVbn1Blpf zCp4Er8-`N9_axGPNAve*34tE%{~Fhc@WDdCwLFDA`7oq);{m-(-RV-h&n-wlo46)& zTDEtL1|cNGpt5vZY%RLS?ilbTJNd_$^L9bawMA4O(3E;kre!+*I%G z`aPZ*nWL2NeT)?_dFb-yM^se(ak!9hXP)vsy4P3|_i=a$TYIG1g2Gq~9I71_?4c&N1vA&pM^#&Dg4r?}=;-}Fs^yEg zWe~HE#66F}n->7RbDxjdyO{CgI1ldWQS!X>^Xo6+$M#}ITyl;jJ1?(Xpgp}fRl$5_ z9b6(4iMmI)r%vf)wvu$DQCL|MbM&o5w(_*m(xAPoEjUeOOOFn5qYE$ZS5al#^Zm@Q zCE*tLg*sOIEJ8xUljRi)#1cpd5FhGqc!099s~yX$&R>RjcTEQO{o!`pEWFYY!H#4x zFg(!r4~}BD5K6}j8`R!Jcg%(F-TcB=T_!fI@@e1vLN7}kpOqxzmU9n(Daep1l)Ksb zJ9cMTzoc9r1|F}!)T{~!l-!ViO&2BciWBzPR_q53CkvarRowL6Y9cl+u|-rs!#d*O zN)I3K<|TxX8|~>}Jo+W*o8j)v<)KzkD2Xi}A6x&f>2C3akUR@+duXoX)x7JMA9%;S z*!sasaN2z<-SmFIhz8QKq^TLC(6E0Ut*!JW4uLd-R(?2Qwd-k8w6a1xBh&jdO!qbj zWX#GaSrXj)6)OOnXu(ldO60a%_G@VsPvrjjdu~vK5yBb4=A013)^d$#T<;6KH_js(%kZ?IF_{&r5A|j z@?s-XjEZQ)onK1SIfci?MX3JqXrHukP8!uCrN^?zo3w@OYVEi|7TstS`W+F^7f^q+ zpTkj5m?=(BU>*dXHuWNWep6NI9dc;7<$3 zu6;T=VplvZlPoN3V0WCk%936;%59; zb;zVJJ~{|v)NJXJ7%Lbo(yp`Xzld_57)Fpz?~kBQy}y5VRKWv8hM7G?L-)0fm_1=G z@*(7PXUv>ucm$O;%+1Y*BZ^Z$A6IBteU;f8MeO<}Dq9@hJW5@7y)&FGp3S}Or>yMi zta;-``;{9mWpoI+usgG_=vAc_i?euRtm{H3Mc6({4h2kYw%hwUA(num&$kGE(v z3a@LWg?5csC(K5O&bAN($f0m~5V|SGZZTHmbi-WDIbpmO5u^J>S9i8{jkF@bE}wEm zg6ZV6rYx+~~#4c11${x0aCt4V*a4Yac|s@-du+MDxd?H2Y2I9_Oj=-}@!Q(?zqgVFyz= z>v4{Ws}D&V84#C@Vj)ww5GoEYhQh~hY+1PY+B@4m3E4?ndYf2VGfn-pUCVqSnX!Fx zfw(w@EP4P%)>v?2MHfeOjmT7ZIp*Ks)<343*=!Zbt+BLG3ANvYn~2vfCyY1$l_q`E zT=>3kVH&OGG+Sv z0{x^99i873Cu^0~L7}dDd>9xYPXx)6gPXkRwn`pc16uxA8$E?kQY!uM_G+@{rG3k6 zYS6CJPKL2`jmO2`#`MQ^6}kJHh{q_YhZ4=lb6AD->uF0;MyXIA|WxmX`GKt1>Kdn7oH{6^$x6*4qG)ry6?-oHMmS!U}A*HumAdOO$C zUKggRj=OoESut#bp6~ydD#r^$8)rgb;+n*0PhReloZ;C{?qXHAp#{3M%H`S?ZtGa} zX=il^z#1a?2t+2JthbMp92FH68EL4ex3IXVzwVAqlX z^oq!ldrGY!cJL^Fw9a}?1{R;dxWPrc!6m|Q-%u222nMJtG78!|%ZdH#i=&^7#Bz8H zjII5$j)#W)5jd0?Ut=h!F86MPg9U8|HcC zWNmF7s|~a@dX<7FVL;P3gg?q*m?{s8ps|ukPBNOmH5@EQ*Jd z(`h#|tRBdA=jZ35I7Ra5Kl?Zz&?Sx)uGHlB|rl_emQ3~*aj59nZQO%n?_nnuQ$PDlF1*2viq6Z z-LRasJq9LJ(b)Hf>tzn22MBDFt@uVq`%i-P3Xj$9br_!^mL|j)Ao|9}r&oKhtE;QP zj$bbYFVFUD%*<*`2h;wt`1|`exSepj9xm6|uYoYFkdWY146pcnR3oC`6&e*-oXixySsl-WQUk~eRcL!T9Iso7_2f(dP@5wr&#&sMr*cbq@~WdlV(3; z>?tJtSGd_Z9D#JaAFMx{Pftq|%8*U|-~)t6x6|!hmHfNwqyB`1=M@@^5GE!jAP4RZ zr2Yel8|A?RAflWozx(VUo~sVA<6*4J5vJBRHV=={-Jcv&{7|t?T0PZ0QG!qS$`_|I zguK+icXNFZZS5~v0v2WY2ASEULI`<;FK+{JNreL0^*HDPQp$7gMorCXTkVL~(g7Mh zy}e}bOn=41IByJPrnLdq^7z@?7hGJflch#041_t$N_3;~|1@eQGy*!{%pewvuk{_y zx!bM(nyIBG_wopF1Lj|J0 zk;3grd+Y0OAXV10baU$~E7Xu|nMArdwPGDUyc(B-ukOysT@@?+zpWNZE@SB#rTY@X z$axG;EzRb%W@cxNWj1Whi(TC`V#&rO57zEsNggseiU&{}g$>(7$fxxCF~AhOfIx3A zOb)_;@CQ+l6BrVRT2$=r;C=zCOB2y&w?%nM?7u-F=->%~Pwll`Xzh=CA% ze0;l;&C#?lpTIS;r%CaTm58kD@HxHSZC)hajjm-dF@bjqhcc@cO9%;#wPP{6UvXto z-(9VRTJYX;dY%`T@hT!AVRbNDN~&u@{5GE4#5u86qxJ^los~J)8vH|y zOm`@MS0ru4bj6!Nn`{>5@%x#J6lq7Yv9W;}_mW~^Vd05OOH0ej65~lqb^{SwSO@}v zxSj3J);eze?Og+U`JBtr63gmvjDBRJ6UqV!XDl zE+$_prPS7uNK>=BuCr0)%!>G~D`mA9+w!&ky!LWgY0uCww(Gq{9TlSP%2cEayFG(L zb?b!-Re(;*_3D)&&=Wx@Wz;Bt_RjRl2Dx%XRaF8vH8Hb938#=H`)cskd(!JMKKD~D zSGr1zUlD>fxnYjFF~!9*P(URAC$Iq-*#wAT>9k877EuyJ&k!HA#=qVY#a^CVjuK4T zLzPx!_Vnz`FaaU+3BL2^E_~*bLktYtt-7!%T^Qpe(G#FES>S3hOr^{TWxAz?>RzF- z)2AUsJ|whs18+(;v})#(Eymbf?@(1D_qcv-ho zr-A+L84u0inp~q!@(uX2Il7nBibAJJHK%ZRpQ@BNqW9PmebM|N2>+)Rpj~>qEr`Ux z!b0=>Vy4C(SU@NH`;V}(v2k#ixSvwwEMA@wySq=8{G+-Z`c^{T=+dZo(0873tcT?s zq-ySw<$8Olr~(?we2$*mZ+3r9fa)KJ+y z;UOWm$#|A8P~%c^mC`4OYt5p8024^ZqGtxn??`+9=>6lcocIFllhJ%tbTqWQQFAo-Lvdh?X;xa+7^PcDNMoAcfR`O|=f*q)a>JQ>_M#rwsItw-b zmk<>n z6CIU59Z46}HI+64&G`7S)GEF}Tk^gbgHW0u`NM|~w;+;Y;NZ-`piuB)EJ_t_C+y!l#D zF_KTS*4OESHB?{YSH5q@MG}#5#B_XtI@OapW_)(nQUw6r8}JJL~8 zw*prSz(I4zl`gZlS#80>EmSX={!REqa+rt(hD)z`MVI+fbBsM(K{Vu|4`2l%Zo?dv zC^|bkffvN>c5JGkpz!bKW6@9wZ3i?Ref`CeTt!aH3Au8EWDqKWarjy`$+F%}KKA_5 zT$%1H!{WOt#rkyZZcmf(N-`@AZ2mH_6!zq;e6Bf{Y1g(J0|f;HVryI5k9cCrgN=*l zE}2*u7_o2lK7o&XH?Z(3fxim?Di{O>pPP`75Fur9c=+L8Eao3Se-`O9Xr$AMMt&Oo zY!I=a(9OLdQn(zSy|mrMrM{qJA7ed34m++w4~_-y#UGpx+;jgaJbKdw=nQE0sh11D zrzj{WfmOVXz@+5epeEX`+iSd9;aHt^VQ2%J0obE z_vRAgh%`3)-%a3`!vD zjID(?4J!)0;?~%m!Q9HFcC} zE~_=Xkq=n%fBV7IV?BEGr?T?)<_r$ISdE{qvH_|8YJ0K_!fMbKgo^c)u;I*Wd!ht- zy+k=UI0y{d=lotC0AvT9Vx@$iUv8b>d4wO1HXg3222WYu{yxRsyn%=PHk$5_+(Pq_ zoj=|`&ut5Yky3xKdqdVb4KytiBK05lBy+1r%24FL159QE@v zdntoJ4!gU(+MTU4F)=}aC|!>O;a1gjzfGGCB)uzQN<>)LiNuHm~oC%`Q*WkUtcSXqL*$%cH$% z`;9qH_|9y|5MZ0uX#nsaprG9C-QCIx>A3Gy3=unlo(THlfF%xWY~9w#0EwckhE+gi zW)kSG7B)F5RkFVeLZzkXuS-Nrn4-ks(G;wPxeVGGBfpbX|J8QtT~4FfjOU z>TQlYtkG@}Nx<9uhxGQYe^3x10vF~f<>6~5f}M!He|vjtq_efR2L&}1L%Q}UQ=6M> z=G*ub-E2Dzl+Zs@J%;+Y@OILv^4#rd=hue1r+@mM8XGmxAtKs*C!mPq1?opVZ2d}^ zp5qY_5yCA+1&LJwAz}2Fa^KR^)BOyMqYZdi1ZXgv*+NBWK6nI4WSTE7m|AOmmAs7S zjPbnkuok*=oP!`ujQzY<18y&_03dH0W z5*Ds7AKmo*tko<8is%t~MWWQgXe|vA;Qx0+?&M2#nl@a{;|UwK1`s7BCB??Z0!$Lm54{BiBt=>^hp@YwVo)&xa2$RZ z*wn$KKvRIB&|!#**0?E0GZKVpbcX7{b*M9|$2vKx9`WH-G4h_aHxLU6aXUPblmr*u zUW(l3F@ix)Vz*!sHp>YB*?xke3oAfuK|jFz?EoqF9qIW{>7 z6e+Sk05W2el60yUJlx#eZQp$Rzw`G58rDKMgP|nAI*Qm7u+y-4q~Jpj%)Bl0cZTww?o| z4^$07PbWoRhAS47SH=DWf9C0HpEiDBF1*kCsn<)Vwe&O5f>O%NCu69Pc_E)^cw!$lF zDw<8VfWXAe^a>xmZZW4XwofKuM;%R2Y!-b)H@rV>ur(3qf?_T?xnC6(j>g8TAn`o5 z4aAgN4R?{1?f1bVR`FnwqhKpj%8on57$3j8*8JjfgDX-soU?ajujxLozwTdJWp7_v z@`g``)8`KF&UUwra?N@|Ev`6mRMJZ2EBaqJbAEKbpBlBwuri{ONPugDs|jA~XaP_= z==e;TiL8!}j(lB1LxY#iLnSZT;JAs`v-J^C&S%EvD%M{90$$WFPc9RKSed0{`f0&q z80)oxa%Qx)s@v$w$w<%1Mk8xwT#ib`RWU1+;WR^SKm;Cx^d`>h0Vs*NW`cIQa!hjQR23+mwZV7dN{btqb#H6 zpw`y0{L(>76p!R!qc7mY(|CW^k{BwM#rUKxDBYQsc}vI;Q8&_H#A%dOM~<~f=^a=r{|}^1s9vJqy4)GzSiKL5u~e0m z2)8cibG>ddbou=QBiJpL+bOA86`nGN3kSBqbyW&}Iqd|X8LtufiCOF1eqVC~WV0sg z^%;3_ihO*ODN9JBXQqvAXLp34@2hu8p9EJXSSD)}opvKp(3fL$gS06@KR$Z^kj)AA zum{CxpbT!%hFxL=h%5k$kmwup@YLxvxNUq%Ewmz1x7Chvb?L}2?gBr9>A?OfwN4Zg z>IflkMe#_ji>-krH|U(JXNIeMVDBn}&9)f1OO^uw8q1&6&_}QdV!gvILn2T2)t=<1 zZ`)JU?q^+3aDoxsUC04ve-C!o=(Q!s1SFo*9eU~8uZ!hoR8~_R*~wj<5kvK^2T6a? zmkz3i`5)mhn?D~JvoPn^ZhXHrG~o!Jv}koU1Ge40)vn-JFf(92@+;*91}5EL{b^I{ zaj$0b)^Z_*#2@f1fs-^zZ&@iRUf076zw?WWgCU+2^wrg&ENOp^WDTdle~p|lS1QA< zdZlbqNSZ>f8$heZEt1I6x|pUK^(#&qP6LoxPW89Z@P3`U{rI!dWNMqcv+JYTOJa^G z&D?eetIx*f`ZH5tR7l^4sGBU_#mzOiTU1QRez;hU0$SNIDpFEHtbf;88#R}${^c}* z;f>qIOpQyi%Cd)tou{gMS=T7W#pCx#;qn`}e=6V<+b4{HDH$ zZIvQ1u<|vkqN2jr*Vj)A`2Fw`C2pW-+1lCyK*0lO5+Ev4Qc~vTM%s2kc_!mb8?%~4 zx-r?w(V_fIb7gniCnWTD=NC=F22sWgW$0D=UV9Z23kHmK_8%~AO|8jdtfglP7<C@yHy6WhoJ zMh&-Hmn^(<$0d-@-L3wjm;xX#Wl4i^gwXsfA*48{5!B9tOkU@5u&8O%kO}`5L=SZ9 z)t*>T>Kn`LXf7ru1_r)y%mg_&H~{Pf#Jbwh(0)+I2}~2UgUqzrx9m|DM=LS9Eps|v zm*Poo5=^=rPSR1{H8qwKFHW{3U8;G{5+YBW5%!A`3jjS;rKOmF69SRouMZaP^Y?Fo ziujaKW28u1GgD!Is~XT8z;9}`7|6}uk;=--s;a8e)6-*N1%fmh6chw9OiNo^R(5uv zn6>pT*lcA$0kfK}1l7|WE*0r{WV~v(=Yl_Rq9SNAtu(ouq;v`3y&bWYi{6dK^hLJh zqO%-5P%RmmQlmiLlgNkY-OyXAqt&SAU)K6iVB24uW2P#)c@QUEa@4?PPJ#6~**QA$ z0qH_Oz}rtvO|81Rn)l7W_SV)7r{aLHhzPbX^G(2!+1y0f9?jp;IW5L|Mn=kyKNqTi zYOf0bML`AhH5h6Pvn~a~>XQAy?Moq4Lz2v_c(_q>&Rs!4Wb?zLjJ?PsX0^ zbF3jUetuA4h=vpCJQqf**wVVvI0u$!1WkA2K=ViIS$z8=yZabH7afA^1>Sll89#!c8zX-&=X7BMjSfE)<1+yC1; zggiGi3D~u>Jan|Xrw_i{=NXja#tu|f>^o#OQ z@7%+SW1n_CT}?c;oNeP#%XP2Xm)o~PsK;+2fIT3c%a$Ky4e&2yadWfQ>*lf@`m0DK zA9lVNxWB*u9UZqwvnm8BG$3WrPps~eeY|HoHbV6Uq>>W)x$II<-Sh4bm;K!3e)?l{ z=DV}1Biy*o*0G`1u@~`jD(l_v{`u-kOWZAYH&9PET=_fmVJni#ajd0n-^`TZ zQ_@dD`DFY0W2EDk^NV{{`zopw$nxt0mUk`r3ZcWAw6U3WN^~1T6Uw1W&Y~?enZ_%t zD_glv;8F9ofHvkQfl4*Jl8TDa1yJ6Djz>E)hV%9TSOpcEjnTI^MpsUtN)auHO+lJS zf|~V@^lZR0=tItIqyoeqPvv^U!MG0b4|B^0BNa$Hz z2%X8sP^v}D!{@Ws+uVrD$qydhCV6{%AFcK8&DJ>stV$&k-5b7Z`ru*3lR6)^aqdktsc7y)qK zvZZ1bSTY}kW(s5BvB=L7g`t!0n!ZP{SoJ>_LK{UzUPU(87Rt>h>(XbtkVWW<)NPWk zumRKU?c$cjb{t|{+5lN)OUIvP#3&sifztOf5D|%WTwXFW+bsO`34F?EK3=4Kb-Lr} z;Q=@#KzN8_4EFYH|2f7$KwuBZ_Mbn0z#sm0uk|OxfyBFXiz#4cwKcvz+0$SX7~lmn z_&Xd7(YJ{)t&fN%$8+alXRG=aI7s6RGHw$cokSZSJtR$~yGVN|Gq9u^$dLjSpZBSc zfr>jaay+<1_ffx{~1j1?4wFfi7B|SWy=Fgg(AU+u{w&G;_ zRg9>qNP&XD2MR@bbLjc``MJ3-&kvS>Pc9}aJ8t?eU$q!Sv790zl^nn<&i3F?;CTY_ z#p8au{SW=>^73|x@(#hf07q?&L~lC(i5>P*P)O+T$msk_gw=F$>umkTzB6>>Br@i~ zwX`(dWd$M`DY=4Y0+z~`Q3VRs!8wC?aDo9)UPOKX)EFq=z<^tKD#l!0Uw;o025=@? z+uD*qQ6^^`XFevpP!mfJIQNm!cmDU zE34Ejf`k30Fz7*{isl4|!3yOCD)a{Q0ckx7Ic5ML`Wqm00a^~AO~N}f1yGFa?;kEx zcyx^TD0Q9TG@(4EWa6~8ocwbZ5W6FZa_!1gAWIiD)tA+%U4phW3SONzUH>2dsoq`$ z1c%sxz$98EhW`1<&Xl;db=O(`r@sb!#>>y@;^_MNB&>jAAQ1upl{fM;4}vysUQw*G4!bTJ6EdhfV2yj3}B)FDjilX0^Z_%Df3+8lpeEOu z|8%{2J76x5A#n8P-|V9%svrtRdQ}`AimBXr zr%&lwcvMNmVN6~{M7n;?jCG>8Zf}C8=5={CcMGcwubjB-MbNOThjTWF3xEE&s2q>h zkg>^@XLfCj$fuIs0Mu>Y(BT^Px553)>)d;LdmH4imKJv`BVd8&O$Q^be>~e=+{pD0 zKAEeUy}I^yf-}5x`luWk82Fr@iC(4SuTRLDEX?uNL8`Zwj2m})xWN5m2{>70jDT{r zgDg#ZQx#hW=pX{d{hL5CU}cX2lweGGe%p`3;Y6z(slgcWy0gVUJb|IF&j?4nw~Hw3xL_ur z5TW`6w4_O=#C1(w)y-xe+0sK5^u3cyJ)j4apt>9uH+IOL%6eQW-T@R)u3E-z zq@zH=Sf5~Eof$oW&IGvst&L!!8W956=P9?&*=T zu-L-i0o5kSUY8pn3=!btZ;lu9>$ms;eJVCCZZlm5$<26CLsDp4!MYf?PN|;n+U}FT zKvG<6ox}77`cA@a%0Zffqs5}JwfUj8C<4K3-_>fR0;y6ok^7a0ALLLt$TK4Y68)rtRpV7aU!j{V1k0m^h64RXM zvDn<{E51TmK(C(-M4+An-M;>)k0+kf#>vSkBruSEt^!yN3;%p+z?q|4m&4^wz~})Y zF3_w3CWxC`KCyR_DesJM3p1D3flnbM+(ys#AeMzIq_5C3z)IeAErdbaJU*%MP^N7* zC}A;%-_k=wBtofbxWeNQt9;etyK0u($twmar#%_36KUUcUrQ#@+gZnr@a1JcAK?XA z&5#z4nwy>>b}}-pI<-?JTnZbn#q)kVjIBCs=52*srK9$8n+4_5>2olGoa>|)O~o1& z=0LXstSGY%oct3FdL|z^7xw8DUT;MB*LjJZ^}rhU0{%cvuO{o zlk4+?yZ$*(QDMhID4~a6wY>sYNem1@Bc#CcSrr)M!m-NiFp9`1M0)yEkxdAA& z$fl;I9lT1qwGO!s+HUmp`JgKEoyFL8R}?+m^K2G`nY8qD8rdX}xAdY!Y0~UU%3F$I zi-WL!M|O4=;~>l6VrDzLUx|!7w;`83Qa!N-cXSf3Xw8QBZrUrOo5O{v;PUeeD%1Z! zSSMysg5w7F{`uTo&PV_D0usl`e608a2+04b1<+Lh)glo5q)(4Qaav_@adEkOGFV1H zCJqh`#tJbuG~Ax8Blgb9THP4V28ugo3cwJ+`9csZP^JW+>h4?vJvmL9L`!>i7F$=`VfDa2PVbG2=fpel- zmu**oIRG2RFa#uIZZ0m8CVtYCUzPKxi0t~i+NU>&b7@n6cv_x4%)hVF>L<=GLM&jXx+DWXJT8%-;&}gYxyO#`=sSUZAwxg4Fp?=_Vn<<9^-J9b> zBIkAh)*O?U(o_>*a*p8H=Yv2z77-Ta^SmtAV)4(xw6#b1ocv-9k2}1t4>Bm|xygXJ`LZ`J2~{t)yvpXl13?cQzL3 zCKs`e3=$tAMi%x=irc;JV%AO+vpO-5f{x4GIjZjV_@kqr{=}Ijh;?05QEH}z_-B0a z3=MhAUgwr-yY%fFA2B6S5pz$Q)ZNxy#o!pj;dDaj7y+*GsO)Hq; zJ<=R+-`l_1Yidk$tg`VvOTv%;t@C!^-JuQF2@cNwsYtQTI-DPXoG+ew;%1ZTT&}E- zUDdbz!Gt`*uyw)d543I!6ghoa9ok-^K&BfGAqhF2)3wsp##xPBP zlN=|bGBcyFpTI*ToToqQOiso6yNHZj8%sK$WHutF6w*fTMEm0Ej$~})NA`R}p1eGd zdGCrZFF#RF63_V$4jnuu@!7&+8yaREiSleVAN-|alt+VD0&nJ;l7YGd3R?lfceJyk zdxi@>SXe0Y8ymD|sOKPgd~iK{C;)6tCBol+d9M|RpIeaGwg~=P9{(hoJ1BKrA$?qd z?CEhd%G31@nT+k`6H4nQHU3P^cLJI(TTk7N2M8?)bNl;6-M9^n^#~gqr93fY;QmE6 z7MrH>i;jrJVNU3?`bOf+x4XMLp3faoCEI+y*czBg>2|WId|d#T52z)RG#I_z%=`t@gF<&YF%%ov#1Dc*`M@ucGz`X{|qffD@;cFF;b=as9My z=}hTYG!7-% zw4{XgdawYq0bC zu0(&yX&W3B?oV2uB*<5#qT95hfnL7D$NwQCf{5tKv{OweAa=L&-(8o1ybhGL*7kND zE-vsiM?i2PVb|!=0RIC?2t*`Bc_m2RRKX?>ifJkh4^^$T)lyjECUuq(5x?*&eAlW! z%)F&*i|O%r#f>C`DyouyT>0FljLu^skeZs2DWO53{L#>NuhEv;Qf0_p+|%8ipOFxK z_#GB;_Lf6kv0%Z{Na;FWdbyjE%k`p_dBf5ugL+lO0nyFb$engMQ22vz+7jL$kh3cS zB+p@`0mKMUo7NQ*?3YIr{FL#<)A;K5ok11>>>rNvisyy8&2HERRt|f1{+_$QU*`8`S zJ^IK<&&vg(R+wAvEtdi)>{j-YhUDQ)m4DQF-qYI+rIt}BR^fJTE7W}NM%W&@bZC^o z<7>b67V7nVY7EX{US~3pfz5{^UGg(O*uTe>3HXHDy@p9mCTpwwo+qzq88w4-kr3_2 zeSy5e%MJ7K*%lLf)Obs}2gB9XU0;6`=1=!^wRfnVeP9qYGm8O%L_NqqKD+liZ_ zPM2bXu*H-Y%gZUM#kCc|<3T5pH42SFhtp9sy$R;I%)E$}YxjvhLhzNoyv3nxJ3q)# zM5kAI!KJ<~@ahc#fyX|w*CmE*GK2wJG&Idbc9N8m@_09x#o$-C!u11E$l&MD+7m13 z(J;g=7Hc?vnB@16-3*_B;g22lC&7b@HMPd)$sXTG*gl)kIIynVkUp0mVmfH`XBV1{ zn}{W`JN?M=m@G|CkZTF<>Dw6r@)OASr#tI2>N}l?2=t>F4YZh<`c?1WTgzuMiGTBy z-#cRFs~XI52R-}R8o02{aF!u)^|LHXjf#`sW;*uDh?}1KrIi~zi-<6nl#~rEk!R^2 zuI&bXef?WUI+dd3E8u7gDG~hnN|V<0vDU=J^o>eU%U6_%%EvZy^G(RHbQ_gXAckn~ zf9z%ownFW=kSRld2i>p7wn66gJXziR7o*Nqs<|akyE)c6S+;-H{?|vX3I}@QL(Vgh z<}*-&=r!F_G8agmltyJs5+9fV9_KFC3zGG@5hgYsSy4$#6YHE?hs@29fhN+xk}zd3PrboNhL;n08oP@vbyHa4jS;3eTHzu?#xQo0z){qBRc5+wQR z!_L-ad2#-!fRcKJh3|lz$0-Si)hELYhi!0Ft*CNh@bAEslJ3A4FjyhFHsy8yoK4eF zOb!mU2Ga05r*Cj)TK%3&wqnJyhdsidSl>2<@=g%c%#C?XS4O$zsLd-Zq(Wv zzrY-bnPIZ;j~DCmJMYc}5;J3p0P6&Nttl@r$HWp5QtC;D>bK+g3H|1$R4sax_>+!` zT>7BrEFL5A8@M~?)0>f|vz_Vj?r!jZ8c{^00$~Px0IAOV^BKrTL8LyhY+(`KQw76* zy>KASi#$9mx4$?~1h?kB^m$q$DJzTqgp`yU)Usgq2?{BZ^7Cuee4GCqL=j9S`a>rD z!P;N_(aNU{KgHgqm2IU|I*!=$eDwSIc-r&0MU;&LgZu|#2(18Xf(jN&Qqs$%Q2wmU z%+cH~@8T$a=K}$zq2bzt zUt+IZFYsfhMtTxPdY;$B$cuZrj#T|G*8V#j>pp%T$1kb0Bora3%c$&(LTDkSvMDk$ zQuZcWQbux9D3R%URZ`R#S4fuNTkaS>=^jyIojYsZY*OFMne5TwY$VgZ9Zw zUdu)qnXt2Kt5~}O--8A_47!&j7U;H;bojE9t~lNbw&`HmF) zZ;fmUf4>V;3j_1ZcBDmLs$kZ$_IVe3e%|>WUH8I!RJKY>F)SvX$E#CrEyM5-g{55a zg_Wk5S zxxf6`gQ(g=f2@T1vL5_E`LEMOWCHb0-xI2Cy6{OaXU&`o4zY) zD)@UP?EL+E@F(NDcO-qY?d4Y#U7MGxJNH*tf6y}N3wxEI@W-Wl^>0Eg-=96to)S-o z0>}Lzb$bf&6_>bpE5NU|wnBgmewXkcZ2|7Xx~Oy*kfa7_ZJ|Hz_quvD11e| zIY3U^J>2LW9ew)ZgN~7bq1J*;+SNlh{qxmIMBIG~7v{s}>f_HOD>W)9M%Z-tYG0WA zV<;xxlxiUvZ>6fqLjTTySfdoAs+qF`_o!Khn=?khCTl36L@{#9mMzu}4jp;7RBv0J zmQnN1GoyNOz47FBN1eY{9df#B9x)y{b4bytD3Y|!hjN-vf6xtv0LUhaKF&`Us+TZVPOWGcLd+Uk)m!Cg&b@Q~Y%-l}X zv@d|WHcmE0hh^Y<&s>-9!!zp#rJqMwMPY4c_elD_z!#(?Fy>7A_j>?j0bh#8Nq`(z zb>h9_JGhWgwLk_ur|C}>aa-RF8 zRQ)WVKcpC!ynOr%C!PPs{~G#~n`XWo`tge5`gNC;rSkV4+D=_YKIHUP@iMpJU5@QP zuDzM;HgkV86CT|gRNb$e^QP$6gVIYpg2H&W>Z&*uEG+3|MutL0UUoLcf8pUt?LqM4 zjOWg|Eoavrln)XSrQPMFTM{X|+QBBz+xg$+8b0nOC!aEQ zmU#YAm^Hm>r2GUaZldvTxAyqhs$68Y%A)Z`lvdZT|G~4v`&Qur35^7?B5o&k#QNPI zwqLp06D=H?vhDBe$EU*1GusLap%LMI zrEB&K`CqSiWtUv5oEH0|aytLqMEJ~01U=efUt>x$IRvukXqcGZeLCl8{dw2M&19bR z#&K3J88XFzp7iw~ikXvT{@~er8#ix8eZgb$9Tiy2ko`KM`zSONpdj>_SV+j><+8ViYceS(nx7j&hcgww@m;Xj+o-i*|X8n(Kxj@ax;Pt z!Nio9l!V;44_G+`1qE}Td^%pzhl(f@#J-{e#X7jr{Z+} z9LE)B{Y_moEKP4*{~@H(WiM)v)uLM^uE~RW_4D9Oa+o zLq1L6dCPpj^A7yb3s#QEx;qSo6@apk%dDe58 z@$tggr1+nBDvx>CX_fom=u>Q_k-HGEN=3ONn6-{K_E1Yhvq)nT_k2UFx5GsE&J`{3 zRB_&|6_p#QS$ccU+K&ZnrT;IA*PaMS(hMHEZ4fXvH!ntC6Ot`pg-ZZA@y1bsJ1QiE zH2GFn>53mMdsNiC>Ji$e9c0?qma;}1mhE1_G!pjZv1bJZ`bEvzr81aSB2y0?*jiJ& zt0nVY7h52sVFTAb=?60-{k&f1h+Dd2D>|vNbLUR1A)Ym05oyQHp4~`g_?5wJMU-FX zeMixRR$-JW*QHC}o>8%}JzxvYH96`mxHuzhCBZ9Kqc&(n9As0<3-3eZxHoxulS9dq zGqTQNT5lL6sMR$xGq-Gee~%h?gts?x=kXDnuXzFdk=8w8IU*|6E%G@29xrOQR+)3>x=?MIk< zR}b>@QEXhJpX)fs`gkplSm<}My(1ieiV%D{R0oiP^GU9v{EDb>8d0$?6@2ajB2#ob zXp}GkfNU#aiSc}R{*j;s^YjedL72}Ydccgz${%;i&eGby7TOjgz7p<$5w68XD5tUB&(EqoIZss$bHvo61zyG0G9e0Rn@iR(B) zq7F%!DBrm^Ww64!e}tW^l=a-qx_0gLvRkjlW5?_g8US`FD#SHnIa;>monO4F`nPP& z4jA`oIUP*qxgZFTV(?VqN%-fyyu^(_;gO>zOcRUYJ3 z_YU0N_U2RnS4T#l-#Z6Kjcy%Y-!CqV$0n_G&;$r6#?AUSZ-PiO9OB!#XCFEb0n8sY z{0O>|p}xM7^-bUg059BtSl~H=v0QEO*KbfqQp%=h!^3^=Ok#Rg-FL7CIF4~GEVzpe zNEP(AMH*OKYCRMOL}#@&z0`K*4Lg1KvP-U;-dSUl8!5SFa&x*0!P{69hWCooMF<$S zWR81R{Bz}r-9`m(&~(ESA}>R(!-M0x_(cEa{OplRh__|w6m5QC)m3WN{PIY93FTfe zln_8uBqSxF;GS~w7_GY0L|L%w+$$UQml-xQ+=ULpk}O6k>FRwo-^cE4E^N~>@RP~$ zoSxos*6Og@M89vN8flYpVVgm0nRA-kJ(~Nkk6n%`4`e)Z4k1vS$}4Zr5zrGicaM#Y z4PEVv{(@{v;Uvlj=f?egHdC86BvE@2L9n$PIgLLrPtbBV!w z`W8W|$**jUPpcbJ^UVLC`x8-|% zJ>97b7XrRHXK3Xoun7qZD@bUPL)5MblF7zz-vV;7AK6S3eCOz;?2nTVGc8{yZTKBm zm7>8{e8}_QX2R^MJ-$}?y{;}BOb9fSf+Oe+r*0~|H4dx?#H$1$5h|Y=u}@RLPyf&3 zt;0xoMn*=v9n?0rm3MaPQj7ch?QVL>bmz|9ou}15e)(BTx=3);91gdZqhSN>Zuo$=dJB*zPK+kQ-J1Oqe&|CGx9tND@Ef`%i0-gg zsjgsru!x%OW?+~_bK3N9lfv1vj^E#}t}GET&h%)jYDX-#s@g$`gVk%3V~ekox`Ic% zvdTUxo(#KUdY!^*g3PX7g0L@TSSiOFo%(03tzXHI&nqf+p?n5(8MiKlaxU8D3W|!- zkHxtVjU3q5pw_JQ+-F1YSl_^a=A`fPbb}(nUxb4Kf1ap8y$*RMczpwjk1qo>QJE)$ zGBU>Y!;ba_TH)P8_tf@KRljb_dw?gtj4#z}ZU2%g zITH9!>@{e8QRF(oVPmhc3E56`mJ~Tnl)BDY_WY3xVqREWe7H}%9vX{#wl*Dt0tjvlcDB9@#AxDvAST+} z+zjdiXpI|D1D@=3oe;fQdw`4U_f-Ga`Y(S;8ZWOLbsE3+E{?GYkv-&B(Rl^71lt*t zgKY)jFquon3SnVSp9h90xd19%uzo6Gr$OlNPWBv}K&4#+k zvXx&l7qs?_2ploTpF+cXN3)#bUi0&7~|VC?z$2sodOpFWw?#mPbx)AVc77G#+mT@RS96ltu+ z(wwZE9g_th<`bdm9|4R~`%Rjlma*g7?fN{vh%|RfJ<3EQBbux30o0PT%6rs>S)9Wh z8p$F5q)Y9fc6<8tX)G(EO%TDyqmYnnm~8i@S#-}3eXlJoZ-5h}72eu31fg1#Pe4mU zl)MHS-UNb8&I6ZVN{qKR59G)hi|0YjNA?*@C0?J&D{N?St)WuC^wBO;xo4 zjmRhtefwsPmPUyA*8c(96`V`7<~krX)kXap4GIBSgSx4MLqq31IS<!5jJ!D zr>cY0>to!nJ<$}8Veff226BSMiuN~$$Zi@Z+VJ_&93I!$Qj{OfZw6hJV zN*CnT)1Ls99Ph4tGtGA=z$mR-8Q36KTQ>7T|O_4|dQ>L*7l+S;`I>4{Z1KkuAQmy|$9tG+FWIaZmS_lj54zH4OTw@5)|y|G1pZ-cHD_QtPzOr#HICN+-h*nKrX66;19)xu5-o z+tYH5Ebcy5SCm$A!+Y@w|2*TIa% zv**|Th{rq2x#k~yI%&0*Xp#HdDlfaTY=WOcu(3%?sF*0zk$UMGg6~Owtyb+nA>*PI zNeZpc3HFub$-g4*xj`!Or*^=d z*YYo4tX3H_-fk}GA#h0Gq<~)BhuiJLVbX31zeBA;-?KG_i(Aj}Gw^W>*-ml1uZaCZ zJ%DYhf5C&t*3oe@AzPu+W!Qy|LF8vBS^S;)u;~ToCC)wWeugaOZjj-EWVh0!>FXb3a#}wPWx2a8{dX3)#9q5;`nMHcv?Cf5#U7sEvGLO+B%WiBP%8ct92(1&IBFN*Sa~W zIw2S1ay;#=InMC`cdUr6a{X9z?92>r(uYF@J7q#zSy3?yBmnsG3I6mQkioBqYXAGd zG34ap5)u$VA4L2$IA~;IlAN53_7pfjUOqnRQ zJVI1aQ;sFG-;jfNT)53$JtR7MWfg5@MNVj}MFeeB?nQz4gFdi-beKIv9*H&~v~}s( zF|@>VBdkqO(+1=O zaU*FOc{yMXJj;p04E{3pT{kb{p5a*0Ft~+=h6r3h#!t#D#v1rm3H~r^@@3Y&!isT; zqIU%<2yAsGu21d+l2$yDAS6Gg4O?y^H6odto6k;kt&NYD2wGT-5n*aY1!Xy6RUSb$ zTH3JTk*w?bUK!dmCN=GgEho<;6S%y}Q3qZFP0qXb?^Es-)IW7K`CXt)92^9Z$zwC@ zp$9O$yP@c~#}ajCXHZP%*P4iqQwgCzotK|qIjiQPBRgfEYxzsd{dcqa&K6x_8d5Fchm{uYnE}YuR zb?tWOE2iEqqj8Bf!K}1P+Pgls7yKxo2h5ab+4A;yo1$J~4xlu0zH*AUz|Bjb(olPedw&v9H~x8l z!+DBq+;Y2gOVX2%K}X%h9LI)TZfYyNQM^l(Fhm1EU{rfNT3k9jS2~AMiw80s^m>7x z*!kAR5bbEN`V-Qe09w&}s}RnMM6sp5K2ppk&LxhHwysD_9Klt3pXQl?7sDgzS+6`j z)Bk$URxgsq{vCe{W43ELf>FfA(WvL3aSXW>7Ec3W2k;=9GPGNon>Fx;(01076-=6R!XWcK%+byw0-E2GKZ_AF~4s()Pg}q+|vh8n$#MA6A?_lN3 zu+(040a%dwqnz<5Qe~`hNFI2=8iI{G!Eagn;vLlak`!ZbuFU7o*ht*U=^CrQ!u^zt z6pn4R7VcFC_Wj3&I^=Unrf=!P-$f(OB$|bU8&h9=Q=1VJ+vq-NbD#Z$y+fV|Um#yc zR`UI}{E#Py2V_FF^k)HNjl2-MxfEr@{xVhd+ZLS&{m~}_LZRF2Xe%-!RtAP}B&(5; zk#|%M0A!_E%)2@>{iSP0rDVwM9>hs;dAoM8i?i z#zW+mILFyHNPK=^ib<@`E?VW*AI(C}OGh|1>bZHhw}&r;#^@#Q>Ez*Voz~`lWot?koe;AWQHER7!>#>Iy||`=lgW z8hnzImlrE#b}-$cqHUsXNJpjT=-fg#isEq$)xLcq>?eSGZhsnDN8Sa zBUHrG@WH^5S$2%9D&EV^Yj=s`#FQ3$K0m;Yn8d9!u&g( z45jqAOt!GfWC*S2`(J7pCGPi5&s?|64Es+%^f|BnZdl)YSU*Tr6WE|CXoOejmzI3+ z?5^D#o#yI!(tSbhr5-hHPWyd?sa1?@@0Vk1ed^2|W1%;)sCLJKH3B>{k!^aWi$7#| zcvuS)FOUUN^6{CslI^#|#qDFY83|XP;vwi4A%gfi8Uc;u_Dj26p}0=?;uIr?kqv@h zzUBxvqsU83yVWc32VkNwgPITL4q1L#<=3uK({sh@T9kGF;N83*mG%Yq5q!gg7%@)?HBA)EALbJt__d!}eeaVrJ|3RBg!7{v z2#t96h)lv~08{hCOdX_5hK7cyd84=nWse__akzA8^^O;wL~*M{2A577&zJA~9`Y@p zPYD-MD&ujyssLJgWweIv)2CS9X;l#}}i zWCAuc^ijXLN$>C95Hb_O?3t-{kbmT@<1Bve&{zm_{_&k5#zQN?pRLj!yy#)hTiedhmcZwHtUNdOOF_egi*EO-&tEiiZ|b&UMl2;Z9rI zzJ2vgA5xTae5iJI_np615^!?uZeSqu20ASCOD~KlV;~IzlX+*n1du_zT#zj)|MvC| zf`Z)fG=Q$GA}=GM%*Z6|HXp*SZW*tRyYXyF^Szo|rBr6Y(&@=|+$v7z#PT013CEYO zjMUhpPu$~?YES>--`AE+w{%ztTo@hoS42Xpu@*+kgS0Xa#kZoh``hw z6=QQBziZ@+crN>qnxRC5w{Xr|!Q$7A@4p2_WR_+(`IPCoPw`5t^9WZguCglEu`OJw z!A0w;&p1TA)Os*V6S8F(sN(Gl_k>7lDLrxY=2}Z~`<3MEkd(k<9O zAfK|6T$lSI9n~)9c3OG-{gdANi+ZfuNG?xQRAc^iaDZZ%D=YCLUS_>@NfWzpITjdV zu)jYOwitv=rpBufM5COF9!`m4$23y!s@;e>0~0xf_3An|S$FP3@PpOWZ_gf#>Y0=s zYVQ8EJwGi@bCG+3!|OB zp<&P7oD@4#U{@VvUnH9U%TpZwf(6xlXrHl3I!|IFA)iHqh}L;yRAdO&Im8B(I4QAL z)Xf;=fPORYiHSthbn!(+?S{_w7xT`tEt-ar%*>=5+o{(ZSK_B-&A&~v*o+?ry(}=4 zx;P@D1^RK&aXf}79gUNKM)oo@Gw{nRDEtH#h=E}gFOWtca#THig)kHBeAHRG?^+8c zf5aTDDLvuxCP7%5tue=pUfZEr(+c(k&zB+kzohn|f%nhLOr|!Sy@32sB`Taa)RF8UIh9CS$MdKhxCt~rfzSa2eGXHf~uje z!{*_4_Wr9I)Jd25_S)qL+2)E?N4${?O1k$s_6Pk`KxWo#v9{P@_wo3*eT?UsxOX6G zRpUl*kD^|2(E({`4}dJTG?0w~hmq0CHoYG!Q5~fT5NeKEFq7G{P4|e1kVJBhYah?_ z>1|0FMC?XXdWe@TBhxQ*Tj=6NLCbv`Hdz1O@vXY+;j^bb12NYPR1)TE55CkBSd-vR zh0BafJB45$fWw}3^z|c?=(g9dC;yzjIq2y_rUfB5kU^H4hVA1}!<0Z===v$P!Q0?D z@JLV*X0I`WWxV6;l^>q$3=Fn~D8+sH9P+s)!fsO-0#$4sr5@EmKr3jd!}KszCp zI2h-n@Ov>H@9%8-{9XF?iN)2w9I-&n5Ur;R@Bh z)b65YJ|MZ=->ScsehXaVvH|89TtwKDDQY(j4I<@3vbvj|Uez=kctkxg5prBW?~wnb zbu~}KK~o4GOhn%%o~{UwCg!A*o!!rD!*kdwYEL7H%YENK z?=>D|cJqWT;fH0^wqaMrc&;)LO;_M%;EUVzQBOnG+uYQogKRYq4Ope2lyn`o9{@x@(G__BEws5H?} zvyBE}KNJHdVDC0jOOl+N=HPgsNC#pz-qnN6)bDeQFY_@^+c@o}=;UKHdRTOYg$AV4 zsOLJ%jiyA5uXYQ}n`cj-mY_Eu6f|EdL@E@Jz)c23w8;k<8woxw!A*{M_hMK>qWR*r z#dm*c83dtjySq$RJxkzGOX|~X|Diqhe?s>T-2x{mbs-4{WsMo^QU=!v|p)x^2t7=wUIUb|T`;ppmt(BTiippf88^8NF zi@&Ug$CS8Dv?FombaXZHG7BFR7@q=8iyG2({Pa zM~}8*MIkZE$;m-q{iE>k&8IXq=W=VLwexP);f0~G|EkiQZ zJ;v7I*82NfUjCFuc+^=k?Zr#!S63!9W1cV%+FKa#BonpaDdkdcs%{8+pt$v(9U!PD zU;#YdHOzYiAc^HTJ<%1@aRT=p7Mn%mz7nYv6CSqo>x-Z;k)EhIJWltHP~e~g3xr6G zxyyub@gZq_{R5Przea2!DBtqR%x9?9$=C5Oc{wUR_=wN-=Kp zg2#?EqrTVLS^x|Uz#8P)SLi*k%*g%J`yU9KOb*gsG~G%7J}Kr^P5LHAMlYekkBZ0D zP7glh-w?L=`{&P*8oqS{cb->Hiy5Sd+Maca0yj&4a_u=exnmgri8I<3Ig!xr2(N67 zLXV27YFO!Z; z{_J}A8dcUq1mab(W}2k=5rK897xpw%br2_TAvzraDtdc+!*w1OGcq*%H9jtl>NKj+ zhz2>ixHhkUY;_2CLZ_R1SZi33K2v=;C@aVh80u9)1tUm z8`0vvnkN5z|H$R+?^#0!f+@5EEF$VPzC_=Z7YUjQ! z(BRHj(P{u_gqKLmj)nVPtgLDq&H~EB}d4#0-kE{%( zs}U`^ZN_p%J30=dIr*& zR$aPowM|X=;4?<>>qJ5yw5+TQK`MA{K|zW}Ks}pIojv<2J*za};X_B; zUaNc}$;Ib!f)eFPD1ti?G-hZaD2&H~L52j1XiqTyMs$|R*k0_gJwLcbM#c(72LZO$ z*Uy9eYfqbQKRIoRgKp!dgGZIWAJoyA9^Kk;;ms)>$`cJ#HIMFIJ}omH^D?Tq z*iHJSIz2)zAZeQGunO5>X~k)N{;{)t&l%pUaUVstJ#A0_)2G|se%~+U9#De0+1bp0 zK=g2PsgzvM8N${uGY@v@jd|n3JAH{j8YShN{0|RVtD?2KK2CdF4AtSIY{GsayHJIR zPAWc^gbhA_`SLOF9bdoB)XaJ=&!N)vVLibQ%kF6Z=x9eTu?a;K-V5WDdc@QRY+?-g zh0PiRWe;1l(0&LB?~Plx7BHEWlhY<-4{^u6WcIpz9%x|@7pT?{mhRp4>iSvK%aJ4} zXqPeqJ=?Ww7h(N?d9J6)C_Y`kjuK|BiH~b93Ux0Y$DEm%m^kS=Yd&P+?(R-Q6Aa$x zkJi=?A3mTC1tBv|QBl!@2d|@_q_wqGHR+wvtuV9X%(A`%C7Pz*9oZ~ zKiPSAoIX8Rca|uzs@cH?cSFF*RVe?1xlHa1SS!C(nxez{%2Xz43nI;BfC}TFKjF0-i}LAcC~v#v>${z50Pk z&ZnR|qrs@6q5_^0K)r4Mc;bGrB#fmvg@jaozXXRUou9BD>dfKcdxbWhaO}R%h8{I9 z_XzSsU&-xzf1yo>$6gGFK7v{!bj`LA$yA5y2Y(qdp5)}D^(}Yf=J-+J-dP+A2*G=H zx=afq?ANJ&;>~&jtOy2yd*1IdKF_~Rm`=`8X+@gfPb%QTL1wjkK8E?+4gxe zdcmYgxJ}KA`LzuVNE*-LEx~{>($oJM8$GgS7`woH#)Opa7z{WoL0gc6K&+UpOP8 zMF#$BhsQcCEG%ePB{&!Y)t?sL0kwx!+|~&B@#5klYHq>z@1y+l6(g`}OxR&S@KK@U zWN{{e?GMBsZ?L6l#cJ;H`}As*0l!6`IL^!a5*;o5RfqqB`9>Vp(9t1v=+FR!+(u!T zP^hkr#o9@c`)T*5?tvbEqDlWYA|%@Q!i|!q{KpSxgrA6hCjb6r5wq5Z?_d_WSqtwHng z{!H4U)g|Dxs=rw=MQI*^BIV7-ln`hi`1n+yJgCO{GN-n_e(LYv5kxij(YOMICqnTN zD!ym#6SsXjF}b2?4>gNH{{=uz8rFwjYq(iL%S+p%LNCeX(4jcE-+x~|M30hgdNUEIV^~>!$ zW(&?GVxJ|5IwJ+&E?}TJhU#n@bZ+ab$ATWbW#z##za;9&`|+WA-j5c>(o)b#+ZTr# zY`RYc*zj=-MD(a&TLm31@aq8d0=F}TWCV?OsmGN1O@M+xmK`5+b6$G}JjS%6&6%6Q?dcGSLcfK2Q z*VDZ_aU!KN@p7S&+;6NDzJ@1RZX)S#2wU0V=IlHVF}DjzPQI6uT~Vft;M3OV2$JHr zwY5dU;fPQNdl*pw^p4w$oN5{`;RTmX#lRPkoCQK-YdZ?a{e%wam6&{PK0f@x0(0mk5ZsuWnzkkvxw^P4E?`db zCf~bvMJ(DHIW9qFE%$FFJvlVYZOlc^@h2C5()ebQ(ogO^-6t3N9>He^$YXhyuC2O4 zL)6#}^~r+=4+3-jir9K>_Dz{P7P#n-TN%*1g}@(DY4z*ZWj%kC0t-PU$iTn=i|+47 z7Av>ux^tjezRc&MAbCA?>D@|e^QCT8YVl=BwWmj*S>5MbRd%I`rtCC&(jcaee^pYH znrUpgDP?*&al zG$JA*AVdqe05&=c>X}9O$s}4QA{@*+R4X_#{)nr)^Hro3#O{S|KLXsVjszJ) zF-okbuP-4gQS(UjWnw}A&Jo}TuLFT}fDeFE`FMG&7)E9mF}wp?9;ktCv9q!aW-NH3 z4+`?eZR16fI)(P)9?QJEPDU0MC5XlV zF!{+F85vQ}V(Zps$0sBqKwICr%wg^iu0=;Yd2*NxI6l~gsaP(a2|a6=3TdAzCjoUn z$lZ~w{(JU}hkW_+Ww~z-Ha4`h{jUizPZFtdDPPueUcfZgl579EO1qhn@MT6HnMIsIHq6=I<1>^Mq8%XUerVuE%Ha{M;>C)+WymRYfLC zN?Bs>GRlms(R@&j*e$`r|~hn7pz%@ao68W^GK-L_qneWBUYXYn?M#z z%_9H=Ry4DLnyP1~BbA1*0FzZD#l+?@F-F)yfOz;yEC0`e-uUA2enwMv<{~^=D;7=GUh>s@p{8L$AT=7?h z^`XTiCCUwicr}T{eQJHM6Y12ZE$dU?NhzfDxnv~Lo&)RCQ!t8Wqx1S#)&5P)A$e`v z)xCZ&C5r!#2k|mC4nQ7{M+f_^wj#TJ071pDZA8sjv;*rOpOjOH8{D8sRK*Inv;=xO zwO(_&W=%tb@X3>Fz-8{eL%*on*R5{r-~M(L-!9NBV-vGJASQN222Tg;O9BZhZeIK9 zRewK?^q_|icW&QaguMm?egFoTn2Tc7wf_4jd)TcEBErLGdjkcaL3t>~Z3?fD&3#%K z?j#J>6&NrIxfd>89Bs+!Le{2tyFUzYn1&u9HVGrZ%1*hAaW>$jSXx=J?b*|hTD@gZ zSXdK0B9M;yh6c@-SCoBW=kXDAl0$LxgrlE4A?aY^<&n9u~HTg~cr8--G>lRES89WMr^n{xazX!;xsj0!+?c67(l-3RRf>3k?oiZ|&U%uSJ$U=4V-RnQGyK$>M55yDz zl#bt?90gQ>@owYj14~XeL5vURj!;)6G+kS=c*0N(&@OV!KqL0d3?T|h@@mC(1%AeO zQM@}$W6fw!8Yi5kW&2_E2K2mQYz8&P3to^~e>VEFY4(hZr+qd@i`x+x0#c^jyyah3 z6lhL|IoKy=-L0wT4yo}Y_3JZ6N$MucOwPW(zJzW+lB=kgm^F+VMQ;*RwIAI7_;1OU zq%n6>Bb-H#ua-_0F~yifx{&sq6!ZK!n$vCicetUM=-IPpx>?YxZJO#`|H6wj3TbX` zQvezWIRz!DOVQv6C}9G!jDUyn|9+Htu5lkH=2>9>39oWFx|DdJ8&Q7$ei-+2>GBm$viIF;pdYW7bXd4qjbYMYa#a*?m#%u7oa|d~!EQUPm3K#w}IEb{-iZg(3$@bx2 zkewhj^Svpl9_v8!7j6M;K&;lU__ZttsxL@cvN9k3d)jBi}LhzOh89rolaAKcOv;CJAbASw%zCKT*Rr2o>Q zyft^QNxRj)IE_wd6bOnP$F%TxP)o)hLK29(03w!_sIdP1Z&4u>(xRg3GU%%_Fg7-} zu#kcwOyT^_jYlufB$#&XN5tc^b#i<2&XHRXNzmWbq_YBNBqwl zPzkFk*9=7o&`)wu*>3%37=husbp^-W{V zoaj-J!Bkl`t*7v8AtBcd4I2RN{Cc>)+DQ*~Ok(m**vsXi zcLYi1EtwrKc!com@F?^8O$i$sdFf%9`9a%ZznFD-O^u?A941KtV1oR~$i+W}>6DX` z%g@_RoXqRCkWe|#@l01ttDA`F*@ z;8Bqb#UzJ-N5EN}oho_5paf1A}1rm?nGsh@ihrCoHH3IacnahV?M z0V1iSv}^b7JYcYZ{sFk?+`c9m9I_r!zu?Al0Ro1G`Fx~Hen>(E4jnotV{T!A*nyo) z)W3jUp|J7zSq+-hP{A)-bXoTq<)eFP^+G&D%t>oZeS|^fLc|QIoov#@Mo-hz zM-Z0)1xRoFpS_vG{RmIF1nlrob~J9HHmjqi)(C?A<42GFA{3@(lfLFKRPyT8BtmRr zxqxp`X4wPJGepDSP?ar)esb}*7l)D$q0<+?K?)Th@8Pk^lU1C9!RV@U z>#tc*RlV@8H1O4WeFoA}U~;M-mhTU9!C%(5Mh*R$ene*O$R zJ!Vt`1pbB)8L9E=h~Aoza`5_{{x$C7oW^@X0w1P05nP6i6eth1jW5%dg6L01szBRX z>Rx19Mhqz+kxU=Kp2OO!UAc0YtSI@X5}b03jzZj>mY?0`*z_1Q)gAK-)$ z=Zf3?M)?XeX0%2LU2l^9I93+CBV7ow-No&a@XnjsLT~Lxis%P{51(ztB_-HIOA%$u z{b1w}j@fY4pZF>16ssXF7Y>>}1aVAD5F%3~l4LqYM$OaJxXrBg#~!H2!GYyB5@Wka zBsnT)Ky$Qzrl(&v-?*3xdI%Xh{U>G5ojaNG@!dOOj1xIDBn0EtHuj(W=V%)@uv5Ar z9qj93tZ$)4TEakEqDyFChhYaD46$DBeA$$qc!V@OOml+-5B zLz~}rn7F9&<-NBVU1+bO-~mvJkbwi#PH^)FYu|Ntmfj)sjhNkEw)OP(BCpTItH72M zIe0Mk&IL`)7A$IrPocfO7s+dW^TG8anlrI=!yA{NG;O?u2plS(FW;{Ip_wEbpz;d} z7&EgAnuxHFOpJ}CQtH2dw?rHP9HU@iUAOR7mf86L? zkCOAT%^| z8<{!6z{@exC2g*E3=oUM)E|eGhl`8ov2|lA$j5#XtqV&Nu(2yKYs)4#Zw{iom-gn+ zVX&@*O&brha}jN@kpBX6FJm@*{R~OCCoHHom~B{kaAha)u+f+`>woBZO6T`RvY_B3 ziddq5s(@y@An*u!@)iZazCI&M#q|R?_+u3ja}rb#Yyq|Vj*JaC2#_+Q341OsAgc^I z(QNp=sY&#}f$~P3^&bt3!LQ+UD+Cb)psIg2>ooWLz@cex$GK1|(67HhZL4%F4<>DvTS7{9xnQvF!jK0?9z#wa0&i z_>ouSN2d8T`OBC0qs>S~CG>hFJQt*PQQuofJmiua`hWlKL@t4-3Be5+Sk5E@TA~*n z#sn87tTVBGAc;&BidDo%8plvM62n-iQ1&oJM#i}mRnte8lhH)=B(zgXQWAL{1~HFL zOz4g0{+V`hM?8+Xkw6Vm-Ft!}8y%hPKjco!Y9Dc2eFk8sV7-NbUj0CY-2tWhN}OBP z2*d*d zU2-rY3FJ;R+MwfXJv(RzFcOFo^TR zc{)-KqMtsKx)5^%2#!M37aImYZ-F2jyZONh zrHrlHw$X(EHzrymL%2RduT8H^S7vc;LXSOq{r5`SBWe-91%3vMX0~`rUBrFSzQ182 zt7NwA#6+>`Q-#fWT4T`XLc>DDvu9?;#xu>@R!wfti04HTR!@EQ!ymik|7-Ye8ub4+ z;0)&EdKmR2fw`lr=>j$Xl~*r98`Nnn&z?QYx^Lf&?@>=-=C54Y%R+pXYOeyaM7~H2 zALg_F7a?fp+c3I7gQ{$0%qrjdMH-8g%QV`+83c4kM@E)v4N$>_5*4=jzuf*slKSk9 zWz1biXncL%7G8s#gY-coLrYz<4*LM(fNs}Da=$Gsyaxcl*B4b4a2I;8yU`&9`U1F( zuV24LJ3NdT6zq`*;z1vG$X$PacFL`!iIEZQ*#vvMF9}Qr@8FZ-2cC-0!z=T z8&3T8f0B>%{C|#DNgAuf+1Yt!cKt1qxb=;^vQ7b;hj!&{q~mZeNL*={MLxfOAJ-QA zFYCXWs^W$6@c^W=Yw`Q zAz&^0_v{G^55KV&u<)%f2n^63pH;e;_J{hP8;GZL!x(NVHn$foBeIp%)t8uf`T19& zaw3}mWE{2)*`qaZ9*muqkN}Z1lKEdSyAPJ{BsvxlfYV-mb7`>fKP362^D?K^EOk(h z6vB&KLgW$nnOK70mmxz!Y4u%QowAHZmadM}%D)Q@CxRYy(}sY)1G;pb;9HZL{j`8y z(FE~UsCRT!5|KG=fpyOr^%MWVi3d0_s8})y*e{$^!TZ202ejm1Z7sOtwP{n8(!bQ} zfgut%Uf$i%S~yW5Z)3BrVeK!EY*|5rTeA%=n5m0>{Z-zmL6R1wdE9MLM+UU*$B*~# z-Fqcx3YSf;hZBm|{uliqTYwJ^_zc3Kl~{hM$BGLv2g@B(T&ay=ukk42{RmcBTJ*mg z`-mHZC8iC5`$VSYIn$&GrW7igyF!S0wJ=nvAo)BOybh@`Cs`wq=b9P?*DvVz=lZv| z(6fmucsKGGXp|pAfx}oSzK-|Gt5dBzX=q}Q-Qe~Kxv%Q>tB1( ztTw#E&BLRV7+-fqOL~fFiRe7QQYGgAc0U)ro>`K1he=RcfxcsA29Xf0@lJyB0h^7A zbpN;%ngKu zDwj&4k&-;;^?l5lz1KQxuf5M+`>b`=S*Jf*Ggdu4&-eHJem?Kd`~CiWzW)Sq?Pz~_ z9jl7uxuG~KyR9kuTYvcy27aaaO%D5ruRiDH8C_zl9C=HA^kWtIY}_+HJ9m}*{Rf8F z>f{KJ5#K8Re~Eg`vBz;gGW{YCJmr`EF0>z`BpU$YWF+b|E31(| z<8PxH-LQWBJ<55y-q&3K#u0l&z7Ul^L;S~Mmq7%qdyt+)D2lH|97D{>Q>&20-hG9S zJ^q+4IE2A=${ji|0(PFGW4L~4?R$KF1Y;bf_{$f~o;{nMA}S$jM)#~W;uQktRDAW( z0w==(E+Dbo-mU-ssHhBhIuwkE=<>n<{I+B*B781K zYVwub+{JKupIG(Ke|CsyX@BSDgvJ((+sD3V=(Y*_%&I4?R_5j+D6b zryl@$kgae5o-f~+^!%Yb5JwF^s3hy)>G?)o*M8DaNMsmTMb zST1JI!k$OxHTaLkCA2SEytwh*yYN>qxNkpvFnN7m{=pJ*J2uGFG&E`|Dn@*v0#MKH z$-j0Kg5l>SEKzKlW4_{tCd{hr z;;EQHGl#3;?saiP>CA%#?kAXFHn|HFz zp;8xbc-@oi?2L@&ABL7wxjgt%+(q?L`25y*`JH131`DSADSJcXl0S+v#ZQ$hsNgAv zrpz>_mjFNu$VtrlQgNQ>UXi5pM~Ow#hCu&nJvfm4H8l$t*NEU4Yw%5uL&V>>N`QZB z%%{kCj=vEJ=YJ>bPT7GGlkY$-?2EuYR1&_&*UPiSLg$VR)uM%qfxlLPY z7ytnz;|Y_zC=QLM?~$+7nO_J0CYzj8Qn=qZ3H-vbzyf2%1R)Mg6~qb*I_lzYO-%-?)zCJH!*+=Le*dp7FQo^t^?ZJclOyLv)<;=~^>t4U6cw0Bt>B~+Mvg)2agS7C4 z&~(#6eirTgsZM5h-^)K;LS@H>fLpKDu`V`uF8(7k1ANGEZEc}5ur1Qd$B2)Rqqn+K z+W`Dhd{$j8n7Ub1J7<^u05v3?1>JUgnV-KuN1na9k5Bm1G~gQ*oO}9W@!Nr>-{i<# z*iTw?Zk+b6wFfX;6=EcI&h&DFBeT>71+;IKXQ?O?jxO}ZWNlRF9}I&~uV|0y8OqKs znYL8CnxuP0S4NFYzLL@5(#4F7;9#$LUvx8fMTO5?Bj7A3rlnWryY*7tDn|ju{dh@2 zv85{$8wisA$HWLG=vO)2T=v&jWllh<-^VtDmrbx1==SG#d=HV#pQasZ0Vf2Flna9G z8ovPAn>~xR$b$Z@)kwQ$S;wwj!HrR02yYH58sEy5b(5B|8&hyIGgu}4);D4qa#pVC z)E3a^9~B&H+Fv#Tzf*gu>hG0wX_zk}p!dlPW7I*2nnHK}B-qvJA|!tqAdD?;$e@7y`(MZ!Z0 zY$yQCU^OL`i^dhtNnRVJsfHf!h)fL;6gTdQrl6b^u@@mv`8cMX&SqSpV*|}E`{M``(C5;TJy}(=7VA}Q%YJr zU=*>nGK74u{D&xV#KrRHQhY^Uh^&E*d_l8Oje)!R(u&$4HvZ#bXFh5PIfmL`|1pKw zUL@EVmaw3F_*@usJ#jT;^4ek-Vvl+#+{3BDA9g&>STo78U;qAOfLV`K5)}qC%hB4u z*{krcp{S|ei3_qX2aho`i@u;1+x~Q+OSf?K)MTI7)I~A5{Z19ue!a{2q=~m`f|+oD zQxc$L&vA5&y3ji|{t>J-BlwCeWS0;P#1e*lfBYf7JPsPByqS+xtZhOaj^4qxU_ttJHc#lzuk!GRiCB+8ZCBd7Q zUf|ik36eV2SVw{l-_s6R7gDm!h!bX@qod>7tG1s`7#Wj`N4x?&xeK8cbBd->sCTNn zMwYWIc7fxT9bym^=kI8;=(VTh60b-xp3UIaU@T7q%zSQQ#3){+s0!sWhUOnw@(>}@ zJm0tUAW9>t59N!NFzXW=@GQz@|5-PFx}`h=n1|SgaJ8l>g^S z0WB?59x*+T0+yy;hGnJbRpR?Z_EnBsa=P%a0cKt?QPuJap6kpP2kBG0A%(s#TTm2Z zHk9IW2^t4!UHq_J(bfR-zz1$imN324+LEQE`>bmv%nY*Q554X|t*!dC+ZL<22$yM# z1g1K2#H#)QCTXbWh%ERcz095GB{hLafMMC{;hWkYjO$kNZSqY-lG=4+bDN1i{rmTy ze2iWr(yNZ?HlFI7tc~eudh^oVxq#0!cRkDuwx1()UcSC%?&*V`wp zKc2R;*%!~wO$pn;>?^1`x3H!_Y3;4UL1|HxiVxE^(Lp%1`pPB*?9M7GT`Mms$NHc& zY{qL9{-jE$V*`tqkF;mK#=?+&C7)*C1g~hRxQxgCCN@CrufH zqIAlM+EH8b3XoeOb}|0JJmFe{Z$0(CPG7MoWJ`fgIejSetd^$jwb?r^5cDjD44EP< zpMw5MQ-+QcS8%O;KGW6pn9E;yT;wOVvKCGUyOYkQTMC(cE3OpUHc;emPR*kXb)UqFf=cjSwcf+3l9zd~PPcM;CBYbO>^E}+YrtVp@PiQQOHXZ_F?MxDVA^fe<=la_mplgM`b(f2HHlX|PbumR|MUrvset#NW!Ck#W2}{2S60k~N z@}3=Q#vTEQ@wcV^Jy%Z`M;$#Q?RLhRO3pD>-pu06Dv{@9y0*yw^(}&@f-$SnWjIF9>q) zamELwBrQyO#Uw z7bUpHO!5{}amZpcZqA$c&_dQygqThv5EbjLkdWD$dLM|(7Oiz>29GlTeP43|rBjs8 zXN;($Hcs4iFtFwnNsDC-z_gANMr$OBFyZKO!>=lF()B76yjj4rl$UCqR8@(P-h@!z z;&zq5XyX;VG&P-@I?WS?N+R}TSLj)3+pMkL-O@8DtJq{98Y;0rYmWQKn(!%Kx-h%jkouedvfTu&O*ebt8WC(E<={6qbm4`J(7a%S?! z3;-VvSSB*w#_q?Z=5(LCknF?~^iHu)5)t#|MC0vFY#3?#-?s;K1}$`^0iXR=u1vO1 z(r@|8bb5Y%PCNYQt@`={`i^vY{}QAn_!B5$5WrK5z8iP~(0)1@uRpP{>(D^dgpb|p z$hC&8rhCU0$5BUO_L`g0gkGhZ_|xv)4x_OM#L7Q5o55!9TTZ?3%X$>>AK_gM4GrCb zyq%}MR9K9zGdyV-I(wYYt92IyfgcmZ;#(Hv!^iMmW0{5Mo6g!eW>yvtAcv#2X zu`E|i7^Txq0D_(G7V4{U5Qj%RCGVv^+MfhIfqx#J3F=h8QB)Lw(LUVGl;=V{7q$s= zUGLn2fk@^yUcGBCb-RVBXR4`->ZeG!nM-f4_0?MB=_vyea)69=b#+IM94Y92(PjDD z%i9n*`!CFj)dFacx6^~0TUr2EcJrC;a68dXH#ZX0XF{vp8?FKA70TY+yJ?0tkHw9$ zbNlh7-mmTq9yoBq(c4$9UL8I_T`iP6ZGiK>Sg+L7wuYj2cH^%qh6oTT#g?U+HTymqcC^l7F(O*GxFF_TxngPK+rj)1nQTGJp?z#{J>wXG z=0b{cbOk!bm_IDCd*rj1FPrh5ssoC-D>-~eS&iv1?pXMd{CfB+e94U(x8eZ2Uwf~vrHxqG&1ihIc8Jgkf6 zNxX9)e8Sfn60B@A`_Jz+qkqjEpf#P-GiKx?^nCi{hvS+JC8nJ6L%;{WTe&^wys#ER zv`P8;qohM}6RIQ}|0HHIwMbn2>^(nSk;6RvMwI z^t(|JYe+$$AqE&d$5R(ykC5{I8`QE`E4W>)uln?vvxf2*gUy>R`1>bh33ebsm|`ql zNg&zuu_?Pjm`dH6tYD(8p`h^sl&uz(7tUU!&H>LF5Y0Cl7L*+U2;WD|{X5cNy` zETMf+M|8*VCVGUT>{&3(#n6)^s`c*948u8d=h`>8gH-T&bne#8qcnLRC@ea99_62y zKocGw?o}HReNDCR=$V$>^+mb_Rxx4z1(#M}eB8Eu``-P{$1U2XAHCg0!)RKI7nHSB z|E-OUP5$Ne7%*t~ASWZkITOE zW$Yxy!Sc7h+(@HZ`xEdn72}0R0dWB!F;ojE;jV0mj)XuQN%>xX?{p>Rv#~L>pf=*c z1Pp-O!8#`wdN9%o6NvBw2`4>Kl_E1FI9QA=aq~FkV9)p?s^+By`^8B`;h3M!meTx93mfa}yT^-2jh0uqO#oU6I<6jYg;z{h>v!`q_*ELSiK3nPV%&7n zDi@Z-qzsz6VVSj#+*5=WK|G2kHFifdlv+dgE=9y5bz#4|Sh?WqHXKV+Hxir*yg0(hX9o`g2}@L^wr?Iv4& z)PCBv%Wib}=GIkY!7#(Gkh9>DU%tFP6tj_cC()jzK5A=Yn!|0~vZ75JHh8}&Z<}~G zyXAS`>e9Ew9G!?Q>b%ADGiQ2)(%BH%+8JLG?<=l7ZI}_eu&YY_dJhJ^#n#21Jn5-y zD4O-tf~acQz`DN3-#U{;2)YWW=+nLXK)3qIhhk!~R+Jg2(Gle!ZND-I+H6=~#G2|} z*kgMT&w2{$Hv(^@{!9E<_a_c+?0fu?g~`1K5BB!2yM1(r*EZP#h;VkAs3S8n?!By3 zlr%IC5^L)rwfl`gVa5Ol7twn_No`W+{}=j#hV>n6c+l|bDdD7!^I=jsV~(yTq~^as z+^Mg3zOjfjk6#eqEs7wGQ`|bmn1O}ZgZRWDyLJt}sU)e<6v4NqiN42SLyth;cMX!h zZzNaOaOjoSM(nt0%7!yF9hY$J?p>SvaOQD75t|lW(xEqWz=_QS63|U8Ko{l6Qd?KI zR$P5c`!h^^HJ446Wq-Cie%H=(2MuD~#iQYhtHFicD~J_IkKSZG%gf#w9QN>wad3d` z+S*cmw_I*#nyZoO%x!}-DNCLgd zOvl^$Q#)=ew$9j^ubioUc80TY*|UMew*6p^iNAd8$A7ck<==?gVut39a`+%Zv zitgGxYcc;ATDIas<%f$Gi#}d%E&tH^H1~-|`F4U_$kk_DqbM#ENU=X2AAe_?5@ktD zRMfqiSCWNmJFb^>d_#WPH*-kK3f+tA+WY`x{wB=nGVw+U5?kJ<^&cTd$ zJiEDT6CV0|_Z&BNDW9vaua5~d)pyg!xb~7diK3?DS2MgT*opUO?_(0QeG;*0`sVZg zPU6|U`mvGPO5e~>*0|j(^FV59Hjl*6=ggC@;6ixL06z<-@wc#Pj=x7Hm6`RHd~#@K zE~#s`JeAunTM!T}Hh-E{dV2Ht>0y531BjQ})1lvRCsHBWW}m%qVIy7_vpZQmV!dqZKN zS@#CXsUBy~o$J+k8?o@WR~t8PKAGKsBU6m*MO1wE&MR*A30s}_NtiFHy;?Z8(IZ^} z0GN2d(JTiR<}hun?Kih8eTG^Wu6*oN^XDg*xWBTHmFe{C3>(1~c3ajxId;h^`6he{L11SF@XddPbV zb3B`sg+*PFPmR(UD4JRWB|L43q}@Pa4jm{_5N0w3HmF3>aRPBiGC}OH|NoWd9>-sJ W`S!nXDxbxWOrQL{W$JeefBX-Ed!9G| diff --git a/classification.html b/classification.html index 3a32d23..82e38c2 100644 --- a/classification.html +++ b/classification.html @@ -1016,10 +1016,10 @@

4.2.1 Question 13(t <- table(fit, Weekly[!train, ]$Direction))
##       
 ## fit    Down Up
-##   Down   21 29
-##   Up     22 32
+## Down 21 30 +## Up 22 31
sum(diag(t)) / sum(t)
-
## [1] 0.5096154
+
## [1] 0.5
  1. Repeat (d) using naive Bayes.
  2. diff --git a/deep-learning.html b/deep-learning.html index f737ad8..513a328 100644 --- a/deep-learning.html +++ b/deep-learning.html @@ -750,9 +750,9 @@

    10.2.2 Question 7plot(history, smooth = FALSE)

    npred <- predict(nn, x[testid, ])
    -
    ## 6/6 - 0s - 54ms/epoch - 9ms/step
    +
    ## 6/6 - 0s - 61ms/epoch - 10ms/step
    mean(abs(y[testid] - npred))
    -
    ## [1] 2.334041
    +
    ## [1] 2.219039

    In this case, the neural network outperforms logistic regression having a lower absolute error rate on the test data.

    @@ -779,14 +779,19 @@

    10.2.3 Question 8
    pred <- model |>
       predict(x) |>
       imagenet_decode_predictions(top = 5)
    @@ -1004,7 +1009,7 @@

    10.2.5 Question 10
    kpred <- predict(model, xrnn[!istrain,, ])
    ## 56/56 - 0s - 58ms/epoch - 1ms/step
    1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0
    -
    ## [1] 0.4133125
    +
    ## [1] 0.412886

    Both models estimate the same number of coefficients/weights (16):

    coef(arfit)
    ##       (Intercept)      L1.DJ_return     L1.log_volume L1.log_volatility 
    @@ -1023,25 +1028,25 @@ 

    10.2.5 Question 10
    model$get_weights()
    ## [[1]]
    -##              [,1]
    -##  [1,] -0.03262059
    -##  [2,]  0.09806149
    -##  [3,]  0.19123746
    -##  [4,] -0.00672294
    -##  [5,]  0.11956818
    -##  [6,] -0.08616812
    -##  [7,]  0.03884261
    -##  [8,]  0.07576967
    -##  [9,]  0.16982540
    -## [10,] -0.02789208
    -## [11,]  0.02615459
    -## [12,] -0.76362336
    -## [13,]  0.09488130
    -## [14,]  0.51370680
    -## [15,]  0.48065400
    +##               [,1]
    +##  [1,] -0.031145222
    +##  [2,]  0.101065643
    +##  [3,]  0.141815767
    +##  [4,] -0.004181504
    +##  [5,]  0.116010934
    +##  [6,] -0.003764492
    +##  [7,]  0.038601257
    +##  [8,]  0.078083567
    +##  [9,]  0.137415737
    +## [10,] -0.029184511
    +## [11,]  0.036070298
    +## [12,] -0.821708620
    +## [13,]  0.095548652
    +## [14,]  0.511229098
    +## [15,]  0.521453559
     ## 
     ## [[2]]
    -## [1] -0.005785846
    +## [1] -0.006889343

    The flattened RNN has a lower \(R^2\) on the test data than our lm model above. The lm model is quicker to fit and conceptually simpler also giving us the ability to inspect the coefficients for different variables.

    @@ -1087,8 +1092,8 @@

    10.2.6 Question 11 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 }) -
    ## 56/56 - 0s - 64ms/epoch - 1ms/step
    -
    ## [1] 0.4267343
    +
    ## 56/56 - 0s - 66ms/epoch - 1ms/step
    +
    ## [1] 0.4271516

    This approach improves our \(R^2\) over the linear model above.

    @@ -1150,8 +1155,8 @@

    10.2.7 Question 12 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 })

    -
    ## 56/56 - 0s - 136ms/epoch - 2ms/step
    -
    ## [1] 0.4447892
    +
    ## 56/56 - 0s - 133ms/epoch - 2ms/step
    +
    ## [1] 0.4405331
    -
    - +
    +

    Option iii correct.

      diff --git a/resampling-methods.html b/resampling-methods.html index c5afb52..2aaeaea 100644 --- a/resampling-methods.html +++ b/resampling-methods.html @@ -554,7 +554,7 @@

      5.1.2 Question 2
      store <- replicate(10000, sum(sample(1:100, replace = TRUE) == 4) > 0)
       mean(store)
      -
      ## [1] 0.6424
      +
      ## [1] 0.6308

      The probability of including \(4\) when resampling numbers \(1...100\) is close to \(1 - (1 - 1/100)^{100}\).

      diff --git a/search_index.json b/search_index.json index 9511c04..406e819 100644 --- a/search_index.json +++ b/search_index.json @@ -1 +1 @@ -[["index.html", "An Introduction to Statistical Learning Exercise solutions in R 1 Introduction", " An Introduction to Statistical Learning Exercise solutions in R 1 Introduction This bookdown document provides solutions for exercises in the book “An Introduction to Statistical Learning with Applications in R”, second edition, by Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani. "],["statistical-learning.html", "2 Statistical Learning 2.1 Conceptual 2.2 Applied", " 2 Statistical Learning 2.1 Conceptual 2.1.1 Question 1 For each of parts (a) through (d), indicate whether we would generally expect the performance of a flexible statistical learning method to be better or worse than an inflexible method. Justify your answer. The sample size \\(n\\) is extremely large, and the number of predictors \\(p\\) is small. Flexible best - opposite of b. The number of predictors \\(p\\) is extremely large, and the number of observations \\(n\\) is small. Inflexible best - high chance of some predictors being randomly associated. The relationship between the predictors and response is highly non-linear. Flexible best - inflexible leads to high bias. The variance of the error terms, i.e. \\(\\sigma^2 = Var(\\epsilon)\\), is extremely high. Inflexible best - opposite of c. 2.1.2 Question 2 Explain whether each scenario is a classification or regression problem, and indicate whether we are most interested in inference or prediction. Finally, provide \\(n\\) and \\(p\\). We collect a set of data on the top 500 firms in the US. For each firm we record profit, number of employees, industry and the CEO salary. We are interested in understanding which factors affect CEO salary. \\(n=500\\), \\(p=3\\), regression, inference. We are considering launching a new product and wish to know whether it will be a success or a failure. We collect data on 20 similar products that were previously launched. For each product we have recorded whether it was a success or failure, price charged for the product, marketing budget, competition price, and ten other variables. \\(n=20\\), \\(p=13\\), classification, prediction. We are interested in predicting the % change in the USD/Euro exchange rate in relation to the weekly changes in the world stock markets. Hence we collect weekly data for all of 2012. For each week we record the % change in the USD/Euro, the % change in the US market, the % change in the British market, and the % change in the German market. \\(n=52\\), \\(p=3\\), regression, prediction. 2.1.3 Question 3 We now revisit the bias-variance decomposition. Provide a sketch of typical (squared) bias, variance, training error, test error, and Bayes (or irreducible) error curves, on a single plot, as we go from less flexible statistical learning methods towards more flexible approaches. The x-axis should represent the amount of flexibility in the method, and the y-axis should represent the values for each curve. There should be five curves. Make sure to label each one. Explain why each of the five curves has the shape displayed in part (a). (squared) bias: Decreases with increasing flexibility (Generally, more flexible methods result in less bias). variance: Increases with increasing flexibility (In general, more flexible statistical methods have higher variance). training error: Decreases with model flexibility (More complex models will better fit the training data). test error: Decreases initially, then increases due to overfitting (less bias but more training error). Bayes (irreducible) error: fixed (does not change with model). 2.1.4 Question 4 You will now think of some real-life applications for statistical learning. Describe three real-life applications in which classification might be useful. Describe the response, as well as the predictors. Is the goal of each application inference or prediction? Explain your answer. Coffee machine cleaned? (day of week, person assigned), inference. Is a flight delayed? (airline, airport etc), inference. Beer type (IPA, pilsner etc.), prediction. Describe three real-life applications in which regression might be useful. Describe the response, as well as the predictors. Is the goal of each application inference or prediction? Explain your answer. Amount of bonus paid (profitability, client feedback), prediction. Person’s height, prediction. House price, inference. Describe three real-life applications in which cluster analysis might be useful. RNAseq tumour gene expression data. SNPs in human populations. Frequencies of mutations (with base pair context) in somatic mutation data. 2.1.5 Question 5 What are the advantages and disadvantages of a very flexible (versus a less flexible) approach for regression or classification? Under what circumstances might a more flexible approach be preferred to a less flexible approach? When might a less flexible approach be preferred? Inflexible is more interpretable, fewer observations required, can be biased. Flexible can overfit (high error variance). In cases where we have high \\(n\\) or non-linear patterns flexible will be preferred. 2.1.6 Question 6 Describe the differences between a parametric and a non-parametric statistical learning approach. What are the advantages of a parametric approach to regression or classification (as opposed to a non-parametric approach)? What are its disadvantages? Parametric uses (model) parameters. Parametric models can be more interpretable as there is a model behind how data is generated. However, the disadvantage is that the model might not reflect reality. If the model is too far from the truth, estimates will be poor and more flexible models can fit many different forms and require more parameters (leading to overfitting). Non-parametric approaches do not estimate a small number of parameters, so a large number of observations may be needed to obtain accurate estimates. 2.1.7 Question 7 The table below provides a training data set containing six observations, three predictors, and one qualitative response variable. Obs. \\(X_1\\) \\(X_2\\) \\(X_3\\) \\(Y\\) 1 0 3 0 Red 2 2 0 0 Red 3 0 1 3 Red 4 0 1 2 Green 5 -1 0 1 Green 6 1 1 1 Red Suppose we wish to use this data set to make a prediction for \\(Y\\) when \\(X_1 = X_2 = X_3 = 0\\) using \\(K\\)-nearest neighbors. Compute the Euclidean distance between each observation and the test point, \\(X_1 = X_2 = X_3 = 0\\). dat <- data.frame( "x1" = c(0, 2, 0, 0, -1, 1), "x2" = c(3, 0, 1, 1, 0, 1), "x3" = c(0, 0, 3, 2, 1, 1), "y" = c("Red", "Red", "Red", "Green", "Green", "Red") ) # Euclidean distance between points and c(0, 0, 0) dist <- sqrt(dat[["x1"]]^2 + dat[["x2"]]^2 + dat[["x3"]]^2) signif(dist, 3) ## [1] 3.00 2.00 3.16 2.24 1.41 1.73 What is our prediction with \\(K = 1\\)? Why? knn <- function(k) { names(which.max(table(dat[["y"]][order(dist)[1:k]]))) } knn(1) ## [1] "Green" Green (based on data point 5 only) What is our prediction with \\(K = 3\\)? Why? knn(3) ## [1] "Red" Red (based on data points 2, 5, 6) If the Bayes decision boundary in this problem is highly non-linear, then would we expect the best value for \\(K\\) to be large or small? Why? Small (high \\(k\\) leads to linear boundaries due to averaging) 2.2 Applied 2.2.1 Question 8 This exercise relates to the College data set, which can be found in the file College.csv. It contains a number of variables for 777 different universities and colleges in the US. The variables are Private : Public/private indicator Apps : Number of applications received Accept : Number of applicants accepted Enroll : Number of new students enrolled Top10perc : New students from top 10% of high school class Top25perc : New students from top 25% of high school class F.Undergrad : Number of full-time undergraduates P.Undergrad : Number of part-time undergraduates Outstate : Out-of-state tuition Room.Board : Room and board costs Books : Estimated book costs Personal : Estimated personal spending PhD : Percent of faculty with Ph.D.’s Terminal : Percent of faculty with terminal degree S.F.Ratio : Student/faculty ratio perc.alumni : Percent of alumni who donate Expend : Instructional expenditure per student Grad.Rate : Graduation rate Before reading the data into R, it can be viewed in Excel or a text editor. Use the read.csv() function to read the data into R. Call the loaded data college. Make sure that you have the directory set to the correct location for the data. college <- read.csv("data/College.csv") Look at the data using the View() function. You should notice that the first column is just the name of each university. We don’t really want R to treat this as data. However, it may be handy to have these names for later. Try the following commands: rownames(college) <- college[, 1] View(college) You should see that there is now a row.names column with the name of each university recorded. This means that R has given each row a name corresponding to the appropriate university. R will not try to perform calculations on the row names. However, we still need to eliminate the first column in the data where the names are stored. Try college <- college [, -1] View(college) Now you should see that the first data column is Private. Note that another column labeled row.names now appears before the Private column. However, this is not a data column but rather the name that R is giving to each row. rownames(college) <- college[, 1] college <- college[, -1] Use the summary() function to produce a numerical summary of the variables in the data set. Use the pairs() function to produce a scatterplot matrix of the first ten columns or variables of the data. Recall that you can reference the first ten columns of a matrix A using A[,1:10]. Use the plot() function to produce side-by-side boxplots of Outstate versus Private. Create a new qualitative variable, called Elite, by binning the Top10perc variable. We are going to divide universities into two groups based on whether or not the proportion of students coming from the top 10% of their high school classes exceeds 50%. > Elite <- rep("No", nrow(college)) > Elite[college$Top10perc > 50] <- "Yes" > Elite <- as.factor(Elite) > college <- data.frame(college, Elite) Use the summary() function to see how many elite universities there are. Now use the plot() function to produce side-by-side boxplots of Outstate versus Elite. Use the hist() function to produce some histograms with differing numbers of bins for a few of the quantitative variables. You may find the command par(mfrow=c(2,2)) useful: it will divide the print window into four regions so that four plots can be made simultaneously. Modifying the arguments to this function will divide the screen in other ways. Continue exploring the data, and provide a brief summary of what you discover. summary(college) ## Private Apps Accept Enroll ## Length:777 Min. : 81 Min. : 72 Min. : 35 ## Class :character 1st Qu.: 776 1st Qu.: 604 1st Qu.: 242 ## Mode :character Median : 1558 Median : 1110 Median : 434 ## Mean : 3002 Mean : 2019 Mean : 780 ## 3rd Qu.: 3624 3rd Qu.: 2424 3rd Qu.: 902 ## Max. :48094 Max. :26330 Max. :6392 ## Top10perc Top25perc F.Undergrad P.Undergrad ## Min. : 1.00 Min. : 9.0 Min. : 139 Min. : 1.0 ## 1st Qu.:15.00 1st Qu.: 41.0 1st Qu.: 992 1st Qu.: 95.0 ## Median :23.00 Median : 54.0 Median : 1707 Median : 353.0 ## Mean :27.56 Mean : 55.8 Mean : 3700 Mean : 855.3 ## 3rd Qu.:35.00 3rd Qu.: 69.0 3rd Qu.: 4005 3rd Qu.: 967.0 ## Max. :96.00 Max. :100.0 Max. :31643 Max. :21836.0 ## Outstate Room.Board Books Personal ## Min. : 2340 Min. :1780 Min. : 96.0 Min. : 250 ## 1st Qu.: 7320 1st Qu.:3597 1st Qu.: 470.0 1st Qu.: 850 ## Median : 9990 Median :4200 Median : 500.0 Median :1200 ## Mean :10441 Mean :4358 Mean : 549.4 Mean :1341 ## 3rd Qu.:12925 3rd Qu.:5050 3rd Qu.: 600.0 3rd Qu.:1700 ## Max. :21700 Max. :8124 Max. :2340.0 Max. :6800 ## PhD Terminal S.F.Ratio perc.alumni ## Min. : 8.00 Min. : 24.0 Min. : 2.50 Min. : 0.00 ## 1st Qu.: 62.00 1st Qu.: 71.0 1st Qu.:11.50 1st Qu.:13.00 ## Median : 75.00 Median : 82.0 Median :13.60 Median :21.00 ## Mean : 72.66 Mean : 79.7 Mean :14.09 Mean :22.74 ## 3rd Qu.: 85.00 3rd Qu.: 92.0 3rd Qu.:16.50 3rd Qu.:31.00 ## Max. :103.00 Max. :100.0 Max. :39.80 Max. :64.00 ## Expend Grad.Rate ## Min. : 3186 Min. : 10.00 ## 1st Qu.: 6751 1st Qu.: 53.00 ## Median : 8377 Median : 65.00 ## Mean : 9660 Mean : 65.46 ## 3rd Qu.:10830 3rd Qu.: 78.00 ## Max. :56233 Max. :118.00 college$Private <- college$Private == "Yes" pairs(college[, 1:10], cex = 0.2) plot(college$Outstate ~ factor(college$Private), xlab = "Private", ylab = "Outstate") college$Elite <- factor(ifelse(college$Top10perc > 50, "Yes", "No")) summary(college$Elite) ## No Yes ## 699 78 plot(college$Outstate ~ college$Elite, xlab = "Elite", ylab = "Outstate") par(mfrow = c(2,2)) for (n in c(5, 10, 20, 50)) { hist(college$Enroll, breaks = n, main = paste("n =", n), xlab = "Enroll") } chisq.test(college$Private, college$Elite) ## ## Pearson's Chi-squared test with Yates' continuity correction ## ## data: college$Private and college$Elite ## X-squared = 4.3498, df = 1, p-value = 0.03701 Whether a college is Private and Elite is not random! 2.2.2 Question 9 This exercise involves the Auto data set studied in the lab. Make sure that the missing values have been removed from the data. x <- read.table("data/Auto.data", header = TRUE, na.strings = "?") x <- na.omit(x) Which of the predictors are quantitative, and which are qualitative? sapply(x, class) ## mpg cylinders displacement horsepower weight acceleration ## "numeric" "integer" "numeric" "numeric" "numeric" "numeric" ## year origin name ## "integer" "integer" "character" numeric <- which(sapply(x, class) == "numeric") names(numeric) ## [1] "mpg" "displacement" "horsepower" "weight" "acceleration" What is the range of each quantitative predictor? You can answer this using the range() function. sapply(x[, numeric], function(x) diff(range(x))) ## mpg displacement horsepower weight acceleration ## 37.6 387.0 184.0 3527.0 16.8 What is the mean and standard deviation of each quantitative predictor? library(tidyverse) ## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ── ## ✔ dplyr 1.1.4 ✔ readr 2.1.5 ## ✔ forcats 1.0.0 ✔ stringr 1.5.1 ## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1 ## ✔ lubridate 1.9.3 ✔ tidyr 1.3.1 ## ✔ purrr 1.0.2 ## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── ## ✖ dplyr::filter() masks stats::filter() ## ✖ dplyr::lag() masks stats::lag() ## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors library(knitr) x[, numeric] |> pivot_longer(everything()) |> group_by(name) |> summarise( Mean = mean(value), SD = sd(value) ) |> kable() name Mean SD acceleration 15.54133 2.758864 displacement 194.41199 104.644004 horsepower 104.46939 38.491160 mpg 23.44592 7.805008 weight 2977.58418 849.402560 Now remove the 10th through 85th observations. What is the range, mean, and standard deviation of each predictor in the subset of the data that remains? x[-(10:85), numeric] |> pivot_longer(everything()) |> group_by(name) |> summarise( Range = diff(range(value)), Mean = mean(value), SD = sd(value) ) |> kable() name Range Mean SD acceleration 16.3 15.72690 2.693721 displacement 387.0 187.24051 99.678367 horsepower 184.0 100.72152 35.708853 mpg 35.6 24.40443 7.867283 weight 3348.0 2935.97152 811.300208 Using the full data set, investigate the predictors graphically, using scatterplots or other tools of your choice. Create some plots highlighting the relationships among the predictors. Comment on your findings. pairs(x[, numeric], cex = 0.2) cor(x[, numeric]) |> kable() mpg displacement horsepower weight acceleration mpg 1.0000000 -0.8051269 -0.7784268 -0.8322442 0.4233285 displacement -0.8051269 1.0000000 0.8972570 0.9329944 -0.5438005 horsepower -0.7784268 0.8972570 1.0000000 0.8645377 -0.6891955 weight -0.8322442 0.9329944 0.8645377 1.0000000 -0.4168392 acceleration 0.4233285 -0.5438005 -0.6891955 -0.4168392 1.0000000 heatmap(cor(x[, numeric]), cexRow = 1.1, cexCol = 1.1, margins = c(8, 8)) Many of the variables appear to be highly (positively or negatively) correlated with some relationships being non-linear. Suppose that we wish to predict gas mileage (mpg) on the basis of the other variables. Do your plots suggest that any of the other variables might be useful in predicting mpg? Justify your answer. Yes, since other variables are correlated. However, horsepower, weight and displacement are highly related. 2.2.3 Question 10 This exercise involves the Boston housing data set. To begin, load in the Boston data set. The Boston data set is part of the ISLR2 library in R. > library(ISLR2) Now the data set is contained in the object Boston. > Boston Read about the data set: > ?Boston How many rows are in this data set? How many columns? What do the rows and columns represent? library(ISLR2) dim(Boston) ## [1] 506 13 Make some pairwise scatterplots of the predictors (columns) in this data set. Describe your findings. library(ggplot2) library(tidyverse) ggplot(Boston, aes(nox, rm)) + geom_point() ggplot(Boston, aes(ptratio, rm)) + geom_point() heatmap(cor(Boston, method = "spearman"), cexRow = 1.1, cexCol = 1.1) Are any of the predictors associated with per capita crime rate? If so, explain the relationship. Yes Do any of the census tracts of Boston appear to have particularly high crime rates? Tax rates? Pupil-teacher ratios? Comment on the range of each predictor. Boston |> pivot_longer(cols = 1:13) |> filter(name %in% c("crim", "tax", "ptratio")) |> ggplot(aes(value)) + geom_histogram(bins = 20) + facet_wrap(~name, scales="free", ncol= 1) Yes, particularly crime and tax rates. How many of the census tracts in this data set bound the Charles river? sum(Boston$chas) ## [1] 35 What is the median pupil-teacher ratio among the towns in this data set? median(Boston$ptratio) ## [1] 19.05 Which census tract of Boston has lowest median value of owner-occupied homes? What are the values of the other predictors for that census tract, and how do those values compare to the overall ranges for those predictors? Comment on your findings. Boston[Boston$medv == min(Boston$medv), ] |> kable() crim zn indus chas nox rm age dis rad tax ptratio lstat medv 399 38.3518 0 18.1 0 0.693 5.453 100 1.4896 24 666 20.2 30.59 5 406 67.9208 0 18.1 0 0.693 5.683 100 1.4254 24 666 20.2 22.98 5 sapply(Boston, quantile) |> kable() crim zn indus chas nox rm age dis rad tax ptratio lstat medv 0% 0.006320 0.0 0.46 0 0.385 3.5610 2.900 1.129600 1 187 12.60 1.730 5.000 25% 0.082045 0.0 5.19 0 0.449 5.8855 45.025 2.100175 4 279 17.40 6.950 17.025 50% 0.256510 0.0 9.69 0 0.538 6.2085 77.500 3.207450 5 330 19.05 11.360 21.200 75% 3.677083 12.5 18.10 0 0.624 6.6235 94.075 5.188425 24 666 20.20 16.955 25.000 100% 88.976200 100.0 27.74 1 0.871 8.7800 100.000 12.126500 24 711 22.00 37.970 50.000 In this data set, how many of the census tract average more than seven rooms per dwelling? More than eight rooms per dwelling? Comment on the census tracts that average more than eight rooms per dwelling. sum(Boston$rm > 7) ## [1] 64 sum(Boston$rm > 8) ## [1] 13 Let’s compare median statistics for those census tracts with more than eight rooms per dwelling on average, with the statistics for those with fewer. Boston |> mutate( `log(crim)` = log(crim), `log(zn)` = log(zn) ) |> select(-c(crim, zn)) |> pivot_longer(!rm) |> mutate(">8 rooms" = rm > 8) |> ggplot(aes(`>8 rooms`, value)) + geom_boxplot() + facet_wrap(~name, scales = "free") ## Warning: Removed 372 rows containing non-finite outside the scale range ## (`stat_boxplot()`). Census tracts with big average properties (more than eight rooms per dwelling) have higher median value (medv), a lower proportion of non-retail business acres (indus), a lower pupil-teacher ratio (ptratio), a lower status of the population (lstat) among other differences. "],["linear-regression.html", "3 Linear Regression 3.1 Conceptual 3.2 Applied", " 3 Linear Regression 3.1 Conceptual 3.1.1 Question 1 Describe the null hypotheses to which the p-values given in Table 3.4 correspond. Explain what conclusions you can draw based on these p-values. Your explanation should be phrased in terms of sales, TV, radio, and newspaper, rather than in terms of the coefficients of the linear model. For intercept, that \\(\\beta_0 = 0\\) For the others, that \\(\\beta_n = 0\\) (for \\(n = 1, 2, 3\\)) We can conclude that that without any spending, there are still some sales (the intercept is not 0). Furthermore, we can conclude that money spent on TV and radio are significantly associated with increased sales, but the same cannot be said of newspaper spending. 3.1.2 Question 2 Carefully explain the differences between the KNN classifier and KNN regression methods. The KNN classifier is categorical and assigns a value based on the most frequent observed category among \\(K\\) nearest neighbors, whereas KNN regression assigns a continuous variable, the average of the response variables for the \\(K\\) nearest neighbors. 3.1.3 Question 3 Suppose we have a data set with five predictors, \\(X_1\\) = GPA, \\(X_2\\) = IQ, \\(X_3\\) = Level (1 for College and 0 for High School), \\(X_4\\) = Interaction between GPA and IQ, and \\(X_5\\) = Interaction between GPA and Level. The response is starting salary after graduation (in thousands of dollars). Suppose we use least squares to fit the model, and get \\(\\hat\\beta_0 = 50\\), \\(\\hat\\beta_1 = 20\\), \\(\\hat\\beta_2 = 0.07\\), \\(\\hat\\beta_3 = 35\\), \\(\\hat\\beta_4 = 0.01\\), \\(\\hat\\beta_5 = -10\\). Which answer is correct, and why? For a fixed value of IQ and GPA, high school graduates earn more on average than college graduates. For a fixed value of IQ and GPA, college graduates earn more on average than high school graduates. For a fixed value of IQ and GPA, high school graduates earn more on average than college graduates provided that the GPA is high enough. For a fixed value of IQ and GPA, college graduates earn more on average than high school graduates provided that the GPA is high enough. The model is: \\(y = \\beta_0 + \\beta_1 \\text{GPA} + \\beta_2 \\text{IQ} + \\beta_3 \\text{Level} + \\beta_4 \\text{GPA} \\text{IQ} + \\beta_5 \\text{GPA} \\text{Level}\\) Fixing IQ and GPA, changing Level from 0 to 1 will change the outcome by: \\(\\Delta y = \\beta_3 + \\beta_5 \\text{GPA}\\) \\(\\Delta y > 0 \\Rightarrow \\beta_3 + \\beta_5 \\text{GPA} > 0 \\Rightarrow \\text{GPA} > \\dfrac{-\\beta3}{\\beta_5} = - \\dfrac{35}{-10} = 3.5\\) From a graphical standpoint: library(plotly) model <- function(gpa, iq, level) { 50 + gpa * 20 + iq * 0.07 + level * 35 + gpa * iq * 0.01 + gpa * level * -10 } x <- seq(1, 5, length = 10) y <- seq(1, 200, length = 20) college <- t(outer(x, y, model, level = 1)) high_school <- t(outer(x, y, model, level = 0)) plot_ly(x = x, y = y) |> add_surface( z = ~college, colorscale = list(c(0, 1), c("rgb(107,184,214)", "rgb(0,90,124)")), colorbar = list(title = "College")) |> add_surface( z = ~high_school, colorscale = list(c(0, 1), c("rgb(255,112,184)", "rgb(128,0,64)")), colorbar = list(title = "High school")) |> layout(scene = list( xaxis = list(title = "GPA"), yaxis = list(title = "IQ"), zaxis = list(title = "Salary"))) Option iii correct. Predict the salary of a college graduate with IQ of 110 and a GPA of 4.0. model(gpa = 4, iq = 110, level = 1) ## [1] 137.1 True or false: Since the coefficient for the GPA/IQ interaction term is very small, there is very little evidence of an interaction effect. Justify your answer. This is false. It is important to remember that GPA and IQ vary over different scales. It is better to explicitly test the significance of the interaction effect, and/or visualize or quantify the effect on sales under realistic ranges of GPA/IQ values. 3.1.4 Question 4 I collect a set of data (\\(n = 100\\) observations) containing a single predictor and a quantitative response. I then fit a linear regression model to the data, as well as a separate cubic regression, i.e. \\(Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\epsilon\\). Suppose that the true relationship between \\(X\\) and \\(Y\\) is linear, i.e. \\(Y = \\beta_0 + \\beta_1X + \\epsilon\\). Consider the training residual sum of squares (RSS) for the linear regression, and also the training RSS for the cubic regression. Would we expect one to be lower than the other, would we expect them to be the same, or is there not enough information to tell? Justify your answer. You would expect the cubic regression to have lower RSS since it is at least as flexible as the linear regression. Answer (a) using test rather than training RSS. Though we could not be certain, the test RSS would likely be higher due to overfitting. Suppose that the true relationship between \\(X\\) and \\(Y\\) is not linear, but we don’t know how far it is from linear. Consider the training RSS for the linear regression, and also the training RSS for the cubic regression. Would we expect one to be lower than the other, would we expect them to be the same, or is there not enough information to tell? Justify your answer. You would expect the cubic regression to have lower RSS since it is at least as flexible as the linear regression. Answer (c) using test rather than training RSS. There is not enough information to tell, it depends on how non-linear the true relationship is. 3.1.5 Question 5 Consider the fitted values that result from performing linear regression without an intercept. In this setting, the ith fitted value takes the form \\[\\hat{y}_i = x_i\\hat\\beta,\\] where \\[\\hat{\\beta} = \\left(\\sum_{i=1}^nx_iy_i\\right) / \\left(\\sum_{i' = 1}^n x^2_{i'}\\right).\\] show that we can write \\[\\hat{y}_i = \\sum_{i' = 1}^na_{i'}y_{i'}\\] What is \\(a_{i'}\\)? Note: We interpret this result by saying that the fitted values from linear regression are linear combinations of the response values. \\[\\begin{align} \\hat{y}_i & = x_i \\frac{\\sum_{i=1}^nx_iy_i}{\\sum_{i' = 1}^n x^2_{i'}} \\\\ & = x_i \\frac{\\sum_{i'=1}^nx_{i'}y_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} \\\\ & = \\frac{\\sum_{i'=1}^n x_i x_{i'}y_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} \\\\ & = \\sum_{i'=1}^n \\frac{ x_i x_{i'}y_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} \\\\ & = \\sum_{i'=1}^n \\frac{ x_i x_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} y_{i'} \\end{align}\\] therefore, \\[a_{i'} = \\frac{ x_i x_{i'}}{\\sum x^2}\\] 3.1.6 Question 6 Using (3.4), argue that in the case of simple linear regression, the least squares line always passes through the point \\((\\bar{x}, \\bar{y})\\). when \\(x = \\bar{x}\\) what is \\(y\\)? \\[\\begin{align} y &= \\hat\\beta_0 + \\hat\\beta_1\\bar{x} \\\\ &= \\bar{y} - \\hat\\beta_1\\bar{x} + \\hat\\beta_1\\bar{x} \\\\ &= \\bar{y} \\end{align}\\] 3.1.7 Question 7 It is claimed in the text that in the case of simple linear regression of \\(Y\\) onto \\(X\\), the \\(R^2\\) statistic (3.17) is equal to the square of the correlation between \\(X\\) and \\(Y\\) (3.18). Prove that this is the case. For simplicity, you may assume that \\(\\bar{x} = \\bar{y} = 0\\). We have the following equations: \\[ R^2 = \\frac{\\textit{TSS} - \\textit{RSS}}{\\textit{TSS}} \\] \\[ Cor(x,y) = \\frac{\\sum_i (x_i-\\bar{x})(y_i - \\bar{y})}{\\sqrt{\\sum_i(x_i - \\bar{x})^2}\\sqrt{\\sum_i(y_i - \\bar{y})^2}} \\] As above, its important to remember \\(\\sum_i x_i = \\sum_j x_j\\) when \\(\\bar{x} = \\bar{y} = 0\\) \\[ Cor(x,y)^2 = \\frac{(\\sum_ix_iy_i)^2}{\\sum_ix_i^2 \\sum_iy_i^2} \\] Also note that: \\[\\hat{y}_i = \\hat\\beta_o + \\hat\\beta_1x_i = x_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2}\\] Therefore, given that \\(RSS = \\sum_i(y_i - \\hat{y}_i)^2\\) and \\(\\textit{TSS} = \\sum_i(y_i - \\bar{y})^2 = \\sum_iy_i^2\\) \\[\\begin{align} R^2 &= \\frac{\\sum_iy_i^2 - \\sum_i(y_i - x_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2})^2} {\\sum_iy_i^2} \\\\ &= \\frac{\\sum_iy_i^2 - \\sum_i( y_i^2 - 2y_ix_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2} + x_i^2 (\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2})^2 )}{\\sum_iy_i^2} \\\\ &= \\frac{ 2\\sum_i(y_ix_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2}) - \\sum_i(x_i^2 (\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2})^2) }{\\sum_iy_i^2} \\\\ &= \\frac{ 2\\sum_i(y_ix_i) \\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2} - \\sum_i(x_i^2) \\frac{(\\sum_j{x_jy_j})^2}{(\\sum_jx_j^2)^2} }{\\sum_iy_i^2} \\\\ &= \\frac{ 2\\frac{(\\sum_i{x_iy_i})^2}{\\sum_jx_j^2} - \\frac{(\\sum_i{x_iy_i})^2}{\\sum_jx_j^2} }{\\sum_iy_i^2} \\\\ &= \\frac{(\\sum_i{x_iy_i})^2}{\\sum_ix_i^2 \\sum_iy_i^2} \\end{align}\\] 3.2 Applied 3.2.1 Question 8 This question involves the use of simple linear regression on the Auto data set. Use the lm() function to perform a simple linear regression with mpg as the response and horsepower as the predictor. Use the summary() function to print the results. Comment on the output. For example: Is there a relationship between the predictor and the response? How strong is the relationship between the predictor and the response? Is the relationship between the predictor and the response positive or negative? What is the predicted mpg associated with a horsepower of 98? What are the associated 95% confidence and prediction intervals? library(ISLR2) fit <- lm(mpg ~ horsepower, data = Auto) summary(fit) ## ## Call: ## lm(formula = mpg ~ horsepower, data = Auto) ## ## Residuals: ## Min 1Q Median 3Q Max ## -13.5710 -3.2592 -0.3435 2.7630 16.9240 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 39.935861 0.717499 55.66 <2e-16 *** ## horsepower -0.157845 0.006446 -24.49 <2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 4.906 on 390 degrees of freedom ## Multiple R-squared: 0.6059, Adjusted R-squared: 0.6049 ## F-statistic: 599.7 on 1 and 390 DF, p-value: < 2.2e-16 Yes, there is a significant relationship between predictor and response. For every unit increase in horsepower, mpg reduces by 0.16 (a negative relationship). predict(fit, data.frame(horsepower = 98), interval = "confidence") ## fit lwr upr ## 1 24.46708 23.97308 24.96108 predict(fit, data.frame(horsepower = 98), interval = "prediction") ## fit lwr upr ## 1 24.46708 14.8094 34.12476 Plot the response and the predictor. Use the abline() function to display the least squares regression line. plot(Auto$horsepower, Auto$mpg, xlab = "horsepower", ylab = "mpg") abline(fit) Use the plot() function to produce diagnostic plots of the least squares regression fit. Comment on any problems you see with the fit. par(mfrow = c(2, 2)) plot(fit, cex = 0.2) The residuals show a trend with respect to the fitted values suggesting a non-linear relationship. 3.2.2 Question 9 This question involves the use of multiple linear regression on the Auto data set. Produce a scatterplot matrix which includes all of the variables in the data set. pairs(Auto, cex = 0.2) Compute the matrix of correlations between the variables using the function cor(). You will need to exclude the name variable, name which is qualitative. x <- subset(Auto, select = -name) cor(x) ## mpg cylinders displacement horsepower weight ## mpg 1.0000000 -0.7776175 -0.8051269 -0.7784268 -0.8322442 ## cylinders -0.7776175 1.0000000 0.9508233 0.8429834 0.8975273 ## displacement -0.8051269 0.9508233 1.0000000 0.8972570 0.9329944 ## horsepower -0.7784268 0.8429834 0.8972570 1.0000000 0.8645377 ## weight -0.8322442 0.8975273 0.9329944 0.8645377 1.0000000 ## acceleration 0.4233285 -0.5046834 -0.5438005 -0.6891955 -0.4168392 ## year 0.5805410 -0.3456474 -0.3698552 -0.4163615 -0.3091199 ## origin 0.5652088 -0.5689316 -0.6145351 -0.4551715 -0.5850054 ## acceleration year origin ## mpg 0.4233285 0.5805410 0.5652088 ## cylinders -0.5046834 -0.3456474 -0.5689316 ## displacement -0.5438005 -0.3698552 -0.6145351 ## horsepower -0.6891955 -0.4163615 -0.4551715 ## weight -0.4168392 -0.3091199 -0.5850054 ## acceleration 1.0000000 0.2903161 0.2127458 ## year 0.2903161 1.0000000 0.1815277 ## origin 0.2127458 0.1815277 1.0000000 Use the lm() function to perform a multiple linear regression with mpg as the response and all other variables except name as the predictors. Use the summary() function to print the results. Comment on the output. For instance: Is there a relationship between the predictors and the response? Which predictors appear to have a statistically significant relationship to the response? What does the coefficient for the year variable suggest? fit <- lm(mpg ~ ., data = x) summary(fit) ## ## Call: ## lm(formula = mpg ~ ., data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -9.5903 -2.1565 -0.1169 1.8690 13.0604 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -17.218435 4.644294 -3.707 0.00024 *** ## cylinders -0.493376 0.323282 -1.526 0.12780 ## displacement 0.019896 0.007515 2.647 0.00844 ** ## horsepower -0.016951 0.013787 -1.230 0.21963 ## weight -0.006474 0.000652 -9.929 < 2e-16 *** ## acceleration 0.080576 0.098845 0.815 0.41548 ## year 0.750773 0.050973 14.729 < 2e-16 *** ## origin 1.426141 0.278136 5.127 4.67e-07 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.328 on 384 degrees of freedom ## Multiple R-squared: 0.8215, Adjusted R-squared: 0.8182 ## F-statistic: 252.4 on 7 and 384 DF, p-value: < 2.2e-16 Yes, there is a relationship between some predictors and response, notably “displacement” (positive), “weight” (negative), “year” (positive) and “origin” (positive). The coefficient for year (which is positive \\(~0.75\\)) suggests that mpg increases by about this amount every year on average. Use the plot() function to produce diagnostic plots of the linear regression fit. Comment on any problems you see with the fit. Do the residual plots suggest any unusually large outliers? Does the leverage plot identify any observations with unusually high leverage? par(mfrow = c(2, 2)) plot(fit, cex = 0.2) One point has high leverage, the residuals also show a trend with fitted values. Use the * and : symbols to fit linear regression models with interaction effects. Do any interactions appear to be statistically significant? summary(lm(mpg ~ . + weight:horsepower, data = x)) ## ## Call: ## lm(formula = mpg ~ . + weight:horsepower, data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -8.589 -1.617 -0.184 1.541 12.001 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.876e+00 4.511e+00 0.638 0.524147 ## cylinders -2.955e-02 2.881e-01 -0.103 0.918363 ## displacement 5.950e-03 6.750e-03 0.881 0.378610 ## horsepower -2.313e-01 2.363e-02 -9.791 < 2e-16 *** ## weight -1.121e-02 7.285e-04 -15.393 < 2e-16 *** ## acceleration -9.019e-02 8.855e-02 -1.019 0.309081 ## year 7.695e-01 4.494e-02 17.124 < 2e-16 *** ## origin 8.344e-01 2.513e-01 3.320 0.000986 *** ## horsepower:weight 5.529e-05 5.227e-06 10.577 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.931 on 383 degrees of freedom ## Multiple R-squared: 0.8618, Adjusted R-squared: 0.859 ## F-statistic: 298.6 on 8 and 383 DF, p-value: < 2.2e-16 summary(lm(mpg ~ . + acceleration:horsepower, data = x)) ## ## Call: ## lm(formula = mpg ~ . + acceleration:horsepower, data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -9.0329 -1.8177 -0.1183 1.7247 12.4870 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -32.499820 4.923380 -6.601 1.36e-10 *** ## cylinders 0.083489 0.316913 0.263 0.792350 ## displacement -0.007649 0.008161 -0.937 0.349244 ## horsepower 0.127188 0.024746 5.140 4.40e-07 *** ## weight -0.003976 0.000716 -5.552 5.27e-08 *** ## acceleration 0.983282 0.161513 6.088 2.78e-09 *** ## year 0.755919 0.048179 15.690 < 2e-16 *** ## origin 1.035733 0.268962 3.851 0.000138 *** ## horsepower:acceleration -0.012139 0.001772 -6.851 2.93e-11 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.145 on 383 degrees of freedom ## Multiple R-squared: 0.841, Adjusted R-squared: 0.8376 ## F-statistic: 253.2 on 8 and 383 DF, p-value: < 2.2e-16 summary(lm(mpg ~ . + cylinders:weight, data = x)) ## ## Call: ## lm(formula = mpg ~ . + cylinders:weight, data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -10.9484 -1.7133 -0.1809 1.4530 12.4137 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 7.3143478 5.0076737 1.461 0.14494 ## cylinders -5.0347425 0.5795767 -8.687 < 2e-16 *** ## displacement 0.0156444 0.0068409 2.287 0.02275 * ## horsepower -0.0314213 0.0126216 -2.489 0.01322 * ## weight -0.0150329 0.0011125 -13.513 < 2e-16 *** ## acceleration 0.1006438 0.0897944 1.121 0.26306 ## year 0.7813453 0.0464139 16.834 < 2e-16 *** ## origin 0.8030154 0.2617333 3.068 0.00231 ** ## cylinders:weight 0.0015058 0.0001657 9.088 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.022 on 383 degrees of freedom ## Multiple R-squared: 0.8531, Adjusted R-squared: 0.8501 ## F-statistic: 278.1 on 8 and 383 DF, p-value: < 2.2e-16 There are at least three cases where the interactions appear to be highly significant. Try a few different transformations of the variables, such as \\(log(X)\\), \\(\\sqrt{X}\\), \\(X^2\\). Comment on your findings. Here I’ll just consider transformations for horsepower. par(mfrow = c(2, 2)) plot(Auto$horsepower, Auto$mpg, cex = 0.2) plot(log(Auto$horsepower), Auto$mpg, cex = 0.2) plot(sqrt(Auto$horsepower), Auto$mpg, cex = 0.2) plot(Auto$horsepower ^ 2, Auto$mpg, cex = 0.2) x <- subset(Auto, select = -name) x$horsepower <- log(x$horsepower) fit <- lm(mpg ~ ., data = x) summary(fit) ## ## Call: ## lm(formula = mpg ~ ., data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -9.3115 -2.0041 -0.1726 1.8393 12.6579 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 27.254005 8.589614 3.173 0.00163 ** ## cylinders -0.486206 0.306692 -1.585 0.11372 ## displacement 0.019456 0.006876 2.830 0.00491 ** ## horsepower -9.506436 1.539619 -6.175 1.69e-09 *** ## weight -0.004266 0.000694 -6.148 1.97e-09 *** ## acceleration -0.292088 0.103804 -2.814 0.00515 ** ## year 0.705329 0.048456 14.556 < 2e-16 *** ## origin 1.482435 0.259347 5.716 2.19e-08 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.18 on 384 degrees of freedom ## Multiple R-squared: 0.837, Adjusted R-squared: 0.834 ## F-statistic: 281.6 on 7 and 384 DF, p-value: < 2.2e-16 par(mfrow = c(2, 2)) plot(fit, cex = 0.2) A log transformation of horsepower appears to give a more linear relationship with mpg. 3.2.3 Question 10 This question should be answered using the Carseats data set. Fit a multiple regression model to predict Sales using Price, Urban, and US. fit <- lm(Sales ~ Price + Urban + US, data = Carseats) Provide an interpretation of each coefficient in the model. Be careful—some of the variables in the model are qualitative! summary(fit) ## ## Call: ## lm(formula = Sales ~ Price + Urban + US, data = Carseats) ## ## Residuals: ## Min 1Q Median 3Q Max ## -6.9206 -1.6220 -0.0564 1.5786 7.0581 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.043469 0.651012 20.036 < 2e-16 *** ## Price -0.054459 0.005242 -10.389 < 2e-16 *** ## UrbanYes -0.021916 0.271650 -0.081 0.936 ## USYes 1.200573 0.259042 4.635 4.86e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.472 on 396 degrees of freedom ## Multiple R-squared: 0.2393, Adjusted R-squared: 0.2335 ## F-statistic: 41.52 on 3 and 396 DF, p-value: < 2.2e-16 Write out the model in equation form, being careful to handle the qualitative variables properly. \\[ \\textit{Sales} = 13 + -0.054 \\times \\textit{Price} + \\begin{cases} -0.022, & \\text{if $\\textit{Urban}$ is Yes, $\\textit{US}$ is No} \\\\ 1.20, & \\text{if $\\textit{Urban}$ is No, $\\textit{US}$ is Yes} \\\\ 1.18, & \\text{if $\\textit{Urban}$ and $\\textit{US}$ is Yes} \\\\ 0, & \\text{Otherwise} \\end{cases} \\] For which of the predictors can you reject the null hypothesis \\(H_0 : \\beta_j = 0\\)? Price and US (Urban shows no significant difference between “No” and “Yes”) On the basis of your response to the previous question, fit a smaller model that only uses the predictors for which there is evidence of association with the outcome. fit2 <- lm(Sales ~ Price + US, data = Carseats) How well do the models in (a) and (e) fit the data? summary(fit) ## ## Call: ## lm(formula = Sales ~ Price + Urban + US, data = Carseats) ## ## Residuals: ## Min 1Q Median 3Q Max ## -6.9206 -1.6220 -0.0564 1.5786 7.0581 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.043469 0.651012 20.036 < 2e-16 *** ## Price -0.054459 0.005242 -10.389 < 2e-16 *** ## UrbanYes -0.021916 0.271650 -0.081 0.936 ## USYes 1.200573 0.259042 4.635 4.86e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.472 on 396 degrees of freedom ## Multiple R-squared: 0.2393, Adjusted R-squared: 0.2335 ## F-statistic: 41.52 on 3 and 396 DF, p-value: < 2.2e-16 summary(fit2) ## ## Call: ## lm(formula = Sales ~ Price + US, data = Carseats) ## ## Residuals: ## Min 1Q Median 3Q Max ## -6.9269 -1.6286 -0.0574 1.5766 7.0515 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.03079 0.63098 20.652 < 2e-16 *** ## Price -0.05448 0.00523 -10.416 < 2e-16 *** ## USYes 1.19964 0.25846 4.641 4.71e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.469 on 397 degrees of freedom ## Multiple R-squared: 0.2393, Adjusted R-squared: 0.2354 ## F-statistic: 62.43 on 2 and 397 DF, p-value: < 2.2e-16 anova(fit, fit2) ## Analysis of Variance Table ## ## Model 1: Sales ~ Price + Urban + US ## Model 2: Sales ~ Price + US ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 396 2420.8 ## 2 397 2420.9 -1 -0.03979 0.0065 0.9357 They have similar \\(R^2\\) and the model containing the extra variable “Urban” is non-significantly better. Using the model from (e), obtain 95% confidence intervals for the coefficient(s). confint(fit2) ## 2.5 % 97.5 % ## (Intercept) 11.79032020 14.27126531 ## Price -0.06475984 -0.04419543 ## USYes 0.69151957 1.70776632 Is there evidence of outliers or high leverage observations in the model from (e)? par(mfrow = c(2, 2)) plot(fit2, cex = 0.2) Yes, somewhat. 3.2.4 Question 11 In this problem we will investigate the t-statistic for the null hypothesis \\(H_0 : \\beta = 0\\) in simple linear regression without an intercept. To begin, we generate a predictor x and a response y as follows. set.seed(1) x <- rnorm(100) y <- 2 * x + rnorm(100) set.seed(1) x <- rnorm(100) y <- 2 * x + rnorm(100) Perform a simple linear regression of y onto x, without an intercept. Report the coefficient estimate \\(\\hat{\\beta}\\), the standard error of this coefficient estimate, and the t-statistic and p-value associated with the null hypothesis \\(H_0 : \\beta = 0\\). Comment on these results. (You can perform regression without an intercept using the command lm(y~x+0).) fit <- lm(y ~ x + 0) coef(summary(fit)) ## Estimate Std. Error t value Pr(>|t|) ## x 1.993876 0.1064767 18.72593 2.642197e-34 There’s a significant positive relationship between \\(y\\) and \\(x\\). \\(y\\) values are predicted to be (a little below) twice the \\(x\\) values. Now perform a simple linear regression of x onto y without an intercept, and report the coefficient estimate, its standard error, and the corresponding t-statistic and p-values associated with the null hypothesis \\(H_0 : \\beta = 0\\). Comment on these results. fit <- lm(x ~ y + 0) coef(summary(fit)) ## Estimate Std. Error t value Pr(>|t|) ## y 0.3911145 0.02088625 18.72593 2.642197e-34 There’s a significant positive relationship between \\(x\\) and \\(y\\). \\(x\\) values are predicted to be (a little below) half the \\(y\\) values. What is the relationship between the results obtained in (a) and (b)? Without error, the coefficients would be the inverse of each other (2 and 1/2). The t-statistic and p-values are the same. For the regression of \\(Y\\) onto \\(X\\) without an intercept, the t-statistic for \\(H_0 : \\beta = 0\\) takes the form \\(\\hat{\\beta}/SE(\\hat{\\beta})\\), where \\(\\hat{\\beta}\\) is given by (3.38), and where \\[ SE(\\hat\\beta) = \\sqrt{\\frac{\\sum_{i=1}^n(y_i - x_i\\hat\\beta)^2}{(n-1)\\sum_{i'=1}^nx_{i'}^2}}. \\] (These formulas are slightly different from those given in Sections 3.1.1 and 3.1.2, since here we are performing regression without an intercept.) Show algebraically, and confirm numerically in R, that the t-statistic can be written as \\[ \\frac{(\\sqrt{n-1}) \\sum_{i-1}^nx_iy_i)} {\\sqrt{(\\sum_{i=1}^nx_i^2)(\\sum_{i'=1}^ny_{i'}^2)-(\\sum_{i'=1}^nx_{i'}y_{i'})^2}} \\] \\[ \\beta = \\sum_i x_i y_i / \\sum_{i'} x_{i'}^2 ,\\] therefore \\[\\begin{align} t &= \\frac{\\sum_i x_i y_i \\sqrt{n-1} \\sqrt{\\sum_ix_i^2}} {\\sum_i x_i^2 \\sqrt{\\sum_i(y_i - x_i \\beta)^2}} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{\\sum_ix_i^2 \\sum_i(y_i - x_i \\beta)^2}} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{ \\sum_ix_i^2 \\sum_i(y_i^2 - 2 y_i x_i \\beta + x_i^2 \\beta^2) }} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{ \\sum_ix_i^2 \\sum_iy_i^2 - \\beta \\sum_ix_i^2 (2 \\sum_i y_i x_i -\\beta \\sum_i x_i^2) }} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{ \\sum_ix_i^2 \\sum_iy_i^2 - \\sum_i x_i y_i (2 \\sum_i y_i x_i - \\sum_i x_i y_i) }} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{\\sum_ix_i^2 \\sum_iy_i^2 - (\\sum_i x_i y_i)^2}} \\\\ \\end{align}\\] We can show this numerically in R by computing \\(t\\) using the above equation. n <- length(x) sqrt(n - 1) * sum(x * y) / sqrt(sum(x ^ 2) * sum(y ^ 2) - sum(x * y) ^ 2) ## [1] 18.72593 Using the results from (d), argue that the t-statistic for the regression of y onto x is the same as the t-statistic for the regression of x onto y. Swapping \\(x_i\\) for \\(y_i\\) in the formula for \\(t\\) will give the same result. In R, show that when regression is performed with an intercept, the t-statistic for \\(H_0 : \\beta_1 = 0\\) is the same for the regression of y onto x as it is for the regression of x onto y. coef(summary(lm(y ~ x))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -0.03769261 0.09698729 -0.3886346 6.983896e-01 ## x 1.99893961 0.10772703 18.5555993 7.723851e-34 coef(summary(lm(x ~ y))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 0.03880394 0.04266144 0.9095787 3.652764e-01 ## y 0.38942451 0.02098690 18.5555993 7.723851e-34 3.2.5 Question 12 This problem involves simple linear regression without an intercept. Recall that the coefficient estimate \\(\\hat{\\beta}\\) for the linear regression of \\(Y\\) onto \\(X\\) without an intercept is given by (3.38). Under what circumstance is the coefficient estimate for the regression of \\(X\\) onto \\(Y\\) the same as the coefficient estimate for the regression of \\(Y\\) onto \\(X\\)? \\[ \\hat\\beta = \\sum_i x_iy_i / \\sum_{i'} x_{i'}^2 \\] The coefficient for the regression of X onto Y swaps the \\(x\\) and \\(y\\) variables: \\[ \\hat\\beta = \\sum_i x_iy_i / \\sum_{i'} y_{i'}^2 \\] So they are the same when \\(\\sum_{i} x_{i}^2 = \\sum_{i} y_{i}^2\\) Generate an example in R with \\(n = 100\\) observations in which the coefficient estimate for the regression of \\(X\\) onto \\(Y\\) is different from the coefficient estimate for the regression of \\(Y\\) onto \\(X\\). x <- rnorm(100) y <- 2 * x + rnorm(100, 0, 0.1) c(sum(x^2), sum(y^2)) ## [1] 105.9889 429.4924 c(coef(lm(y ~ x))[2], coef(lm(x ~ y))[2]) ## x y ## 2.0106218 0.4962439 Generate an example in R with \\(n = 100\\) observations in which the coefficient estimate for the regression of \\(X\\) onto \\(Y\\) is the same as the coefficient estimate for the regression of \\(Y\\) onto \\(X\\). x <- rnorm(100) y <- x + rnorm(100, 0, 0.1) c(sum(x^2), sum(y^2)) ## [1] 135.5844 134.5153 c(coef(lm(y ~ x))[2], coef(lm(x ~ y))[2]) ## x y ## 0.9925051 1.0006765 3.2.6 Question 13 In this exercise you will create some simulated data and will fit simple linear regression models to it. Make sure to use set.seed(1) prior to starting part (a) to ensure consistent results. set.seed(1) Using the rnorm() function, create a vector, x, containing 100 observations drawn from a \\(N(0, 1)\\) distribution. This represents a feature, \\(X\\). x <- rnorm(100, 0, 1) Using the rnorm() function, create a vector, eps, containing 100 observations drawn from a \\(N(0, 0.25)\\) distribution—a normal distribution with mean zero and variance 0.25. eps <- rnorm(100, 0, sqrt(0.25)) Using x and eps, generate a vector y according to the model \\[Y = -1 + 0.5X + \\epsilon\\] What is the length of the vector y? What are the values of \\(\\beta_0\\) and \\(\\beta_1\\) in this linear model? y <- -1 + 0.5 * x + eps length(y) ## [1] 100 \\(\\beta_0 = -1\\) and \\(\\beta_1 = 0.5\\) Create a scatterplot displaying the relationship between x and y. Comment on what you observe. plot(x, y) There is a linear relationship between \\(x\\) and \\(y\\) (with some error). Fit a least squares linear model to predict y using x. Comment on the model obtained. How do \\(\\hat\\beta_0\\) and \\(\\hat\\beta_1\\) compare to \\(\\beta_0\\) and \\(\\beta_1\\)? fit <- lm(y ~ x) summary(fit) ## ## Call: ## lm(formula = y ~ x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -0.93842 -0.30688 -0.06975 0.26970 1.17309 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.01885 0.04849 -21.010 < 2e-16 *** ## x 0.49947 0.05386 9.273 4.58e-15 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 0.4814 on 98 degrees of freedom ## Multiple R-squared: 0.4674, Adjusted R-squared: 0.4619 ## F-statistic: 85.99 on 1 and 98 DF, p-value: 4.583e-15 \\(\\beta_0\\) and \\(\\beta_1\\) are close to their population values. Display the least squares line on the scatterplot obtained in (d). Draw the population regression line on the plot, in a different color. Use the legend() command to create an appropriate legend. plot(x, y) abline(fit) abline(-1, 0.5, col = "red", lty = 2) legend("topleft", c("model fit", "population regression"), col = c("black", "red"), lty = c(1, 2) ) Now fit a polynomial regression model that predicts y using x and x^2. Is there evidence that the quadratic term improves the model fit? Explain your answer. fit2 <- lm(y ~ poly(x, 2)) anova(fit2, fit) ## Analysis of Variance Table ## ## Model 1: y ~ poly(x, 2) ## Model 2: y ~ x ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 97 22.257 ## 2 98 22.709 -1 -0.45163 1.9682 0.1638 There is no evidence for an improved fit, since the F-test is non-significant. Repeat (a)–(f) after modifying the data generation process in such a way that there is less noise in the data. The model (3.39) should remain the same. You can do this by decreasing the variance of the normal distribution used to generate the error term \\(\\epsilon\\) in (b). Describe your results. x <- rnorm(100, 0, 1) y <- -1 + 0.5 * x + rnorm(100, 0, sqrt(0.05)) fit2 <- lm(y ~ x) summary(fit2) ## ## Call: ## lm(formula = y ~ x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -0.61308 -0.12553 -0.00391 0.15199 0.41332 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -0.98917 0.02216 -44.64 <2e-16 *** ## x 0.52375 0.02152 24.33 <2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 0.2215 on 98 degrees of freedom ## Multiple R-squared: 0.858, Adjusted R-squared: 0.8565 ## F-statistic: 592.1 on 1 and 98 DF, p-value: < 2.2e-16 plot(x, y) abline(fit2) abline(-1, 0.5, col = "red", lty = 2) legend("topleft", c("model fit", "population regression"), col = c("black", "red"), lty = c(1, 2) ) The data shows less variability and the \\(R^2\\) is higher. Repeat (a)–(f) after modifying the data generation process in such a way that there is more noise in the data. The model (3.39) should remain the same. You can do this by increasing the variance of the normal distribution used to generate the error term \\(\\epsilon\\) in (b). Describe your results. x <- rnorm(100, 0, 1) y <- -1 + 0.5 * x + rnorm(100, 0, 1) fit3 <- lm(y ~ x) summary(fit3) ## ## Call: ## lm(formula = y ~ x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.51014 -0.60549 0.02065 0.70483 2.08980 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.04745 0.09676 -10.825 < 2e-16 *** ## x 0.42505 0.08310 5.115 1.56e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 0.9671 on 98 degrees of freedom ## Multiple R-squared: 0.2107, Adjusted R-squared: 0.2027 ## F-statistic: 26.16 on 1 and 98 DF, p-value: 1.56e-06 plot(x, y) abline(fit3) abline(-1, 0.5, col = "red", lty = 2) legend("topleft", c("model fit", "population regression"), col = c("black", "red"), lty = c(1, 2) ) The data shows more variability. The \\(R^2\\) is lower. What are the confidence intervals for \\(\\beta_0\\) and \\(\\beta_1\\) based on the original data set, the noisier data set, and the less noisy data set? Comment on your results. confint(fit) ## 2.5 % 97.5 % ## (Intercept) -1.1150804 -0.9226122 ## x 0.3925794 0.6063602 confint(fit2) ## 2.5 % 97.5 % ## (Intercept) -1.033141 -0.9451916 ## x 0.481037 0.5664653 confint(fit3) ## 2.5 % 97.5 % ## (Intercept) -1.2394772 -0.8554276 ## x 0.2601391 0.5899632 The confidence intervals for the coefficients are smaller when there is less error. 3.2.7 Question 14 This problem focuses on the collinearity problem. Perform the following commands in R : > set.seed(1) > x1 <- runif(100) > x2 <- 0.5 * x1 + rnorm(100) / 10 > y <- 2 + 2 * x1 + 0.3 * x2 + rnorm(100) The last line corresponds to creating a linear model in which y is a function of x1 and x2. Write out the form of the linear model. What are the regression coefficients? set.seed(1) x1 <- runif(100) x2 <- 0.5 * x1 + rnorm(100) / 10 y <- 2 + 2 * x1 + 0.3 * x2 + rnorm(100) The model is of the form: \\[Y = \\beta_0 + \\beta_1X_1 + \\beta_2X_2 + \\epsilon\\] The coefficients are \\(\\beta_0 = 2\\), \\(\\beta_1 = 2\\), \\(\\beta_3 = 0.3\\). What is the correlation between x1 and x2? Create a scatterplot displaying the relationship between the variables. cor(x1, x2) ## [1] 0.8351212 plot(x1, x2) Using this data, fit a least squares regression to predict y using x1 and x2. Describe the results obtained. What are \\(\\hat\\beta_0\\), \\(\\hat\\beta_1\\), and \\(\\hat\\beta_2\\)? How do these relate to the true \\(\\beta_0\\), \\(\\beta_1\\), and _2$? Can you reject the null hypothesis \\(H_0 : \\beta_1\\) = 0$? How about the null hypothesis \\(H_0 : \\beta_2 = 0\\)? summary(lm(y ~ x1 + x2)) ## ## Call: ## lm(formula = y ~ x1 + x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.8311 -0.7273 -0.0537 0.6338 2.3359 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.1305 0.2319 9.188 7.61e-15 *** ## x1 1.4396 0.7212 1.996 0.0487 * ## x2 1.0097 1.1337 0.891 0.3754 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.056 on 97 degrees of freedom ## Multiple R-squared: 0.2088, Adjusted R-squared: 0.1925 ## F-statistic: 12.8 on 2 and 97 DF, p-value: 1.164e-05 \\(\\hat\\beta_0 = 2.13\\), \\(\\hat\\beta_1 = 1.43\\), and \\(\\hat\\beta_2 = 1.01\\). These are relatively poor estimates of the true values. We can reject the hypothesis that \\(H_0 : \\beta_1\\) at a p-value of 0.05 (just about). We cannot reject the hypothesis that \\(H_0 : \\beta_2 = 0\\). Now fit a least squares regression to predict y using only x1. Comment on your results. Can you reject the null hypothesis \\(H 0 : \\beta_1 = 0\\)? summary(lm(y ~ x1)) ## ## Call: ## lm(formula = y ~ x1) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.89495 -0.66874 -0.07785 0.59221 2.45560 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.1124 0.2307 9.155 8.27e-15 *** ## x1 1.9759 0.3963 4.986 2.66e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.055 on 98 degrees of freedom ## Multiple R-squared: 0.2024, Adjusted R-squared: 0.1942 ## F-statistic: 24.86 on 1 and 98 DF, p-value: 2.661e-06 We can reject \\(H_0 : \\beta_1 = 0\\). The p-value is much more significant for \\(\\beta_1\\) compared to when x2 is included in the model. Now fit a least squares regression to predict y using only x2. Comment on your results. Can you reject the null hypothesis \\(H_0 : \\beta_1 = 0\\)? summary(lm(y ~ x2)) ## ## Call: ## lm(formula = y ~ x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.62687 -0.75156 -0.03598 0.72383 2.44890 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.3899 0.1949 12.26 < 2e-16 *** ## x2 2.8996 0.6330 4.58 1.37e-05 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.072 on 98 degrees of freedom ## Multiple R-squared: 0.1763, Adjusted R-squared: 0.1679 ## F-statistic: 20.98 on 1 and 98 DF, p-value: 1.366e-05 Similarly, we can reject \\(H_0 : \\beta_2 = 0\\). The p-value is much more significant for \\(\\beta_2\\) compared to when x1 is included in the model. Do the results obtained in (c)–(e) contradict each other? Explain your answer. No they do not contradict each other. Both x1 and x2 individually are capable of explaining much of the variation observed in y, however since they are correlated, it is very difficult to tease apart their separate contributions. Now suppose we obtain one additional observation, which was unfortunately mismeasured. > x1 <- c(x1, 0.1) > x2 <- c(x2, 0.8) > y <- c(y, 6) Re-fit the linear models from (c) to (e) using this new data. What effect does this new observation have on the each of the models? In each model, is this observation an outlier? A high-leverage point? Both? Explain your answers. x1 <- c(x1 , 0.1) x2 <- c(x2 , 0.8) y <- c(y ,6) summary(lm(y ~ x1 + x2)) ## ## Call: ## lm(formula = y ~ x1 + x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.73348 -0.69318 -0.05263 0.66385 2.30619 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.2267 0.2314 9.624 7.91e-16 *** ## x1 0.5394 0.5922 0.911 0.36458 ## x2 2.5146 0.8977 2.801 0.00614 ** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.075 on 98 degrees of freedom ## Multiple R-squared: 0.2188, Adjusted R-squared: 0.2029 ## F-statistic: 13.72 on 2 and 98 DF, p-value: 5.564e-06 summary(lm(y ~ x1)) ## ## Call: ## lm(formula = y ~ x1) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.8897 -0.6556 -0.0909 0.5682 3.5665 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.2569 0.2390 9.445 1.78e-15 *** ## x1 1.7657 0.4124 4.282 4.29e-05 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.111 on 99 degrees of freedom ## Multiple R-squared: 0.1562, Adjusted R-squared: 0.1477 ## F-statistic: 18.33 on 1 and 99 DF, p-value: 4.295e-05 summary(lm(y ~ x2)) ## ## Call: ## lm(formula = y ~ x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.64729 -0.71021 -0.06899 0.72699 2.38074 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.3451 0.1912 12.264 < 2e-16 *** ## x2 3.1190 0.6040 5.164 1.25e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.074 on 99 degrees of freedom ## Multiple R-squared: 0.2122, Adjusted R-squared: 0.2042 ## F-statistic: 26.66 on 1 and 99 DF, p-value: 1.253e-06 par(mfrow = c(2, 2)) plot(lm(y ~ x1 + x2), cex = 0.2) par(mfrow = c(2, 2)) plot(lm(y ~ x1), cex = 0.2) par(mfrow = c(2, 2)) plot(lm(y ~ x2), cex = 0.2) In the first model (with both predictors), the new point has very high leverage (since it is an outlier in terms of the joint x1 and x2 distribution), however it is not an outlier. In the model that includes x1, it is an outlier but does not have high leverage. In the model that includes x2, it has high leverage but is not an outlier. It is useful to consider the scatterplot of x1 and x2. plot(x1, x2) points(0.1, 0.8, col = "red", pch = 19) 3.2.8 Question 15 This problem involves the Boston data set, which we saw in the lab for this chapter. We will now try to predict per capita crime rate using the other variables in this data set. In other words, per capita crime rate is the response, and the other variables are the predictors. We are trying to predict crim. pred <- subset(Boston, select = -crim) For each predictor, fit a simple linear regression model to predict the response. Describe your results. In which of the models is there a statistically significant association between the predictor and the response? Create some plots to back up your assertions. fits <- lapply(pred, function(x) lm(Boston$crim ~ x)) printCoefmat(do.call(rbind, lapply(fits, function(x) coef(summary(x))[2, ]))) ## Estimate Std. Error t value Pr(>|t|) ## zn -0.0739350 0.0160946 -4.5938 5.506e-06 *** ## indus 0.5097763 0.0510243 9.9908 < 2.2e-16 *** ## chas -1.8927766 1.5061155 -1.2567 0.2094 ## nox 31.2485312 2.9991904 10.4190 < 2.2e-16 *** ## rm -2.6840512 0.5320411 -5.0448 6.347e-07 *** ## age 0.1077862 0.0127364 8.4628 2.855e-16 *** ## dis -1.5509017 0.1683300 -9.2135 < 2.2e-16 *** ## rad 0.6179109 0.0343318 17.9982 < 2.2e-16 *** ## tax 0.0297423 0.0018474 16.0994 < 2.2e-16 *** ## ptratio 1.1519828 0.1693736 6.8014 2.943e-11 *** ## lstat 0.5488048 0.0477610 11.4907 < 2.2e-16 *** ## medv -0.3631599 0.0383902 -9.4597 < 2.2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 There are significant associations for all predictors with the exception of chas when fitting separate linear models. For example, consider the following plot representing the third model plot(Boston$rm, Boston$crim) abline(fits[[5]]) Fit a multiple regression model to predict the response using all of the predictors. Describe your results. For which predictors can we reject the null hypothesis \\(H_0 : \\beta_j = 0\\)? mfit <- lm(crim ~ ., data = Boston) summary(mfit) ## ## Call: ## lm(formula = crim ~ ., data = Boston) ## ## Residuals: ## Min 1Q Median 3Q Max ## -8.534 -2.248 -0.348 1.087 73.923 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.7783938 7.0818258 1.946 0.052271 . ## zn 0.0457100 0.0187903 2.433 0.015344 * ## indus -0.0583501 0.0836351 -0.698 0.485709 ## chas -0.8253776 1.1833963 -0.697 0.485841 ## nox -9.9575865 5.2898242 -1.882 0.060370 . ## rm 0.6289107 0.6070924 1.036 0.300738 ## age -0.0008483 0.0179482 -0.047 0.962323 ## dis -1.0122467 0.2824676 -3.584 0.000373 *** ## rad 0.6124653 0.0875358 6.997 8.59e-12 *** ## tax -0.0037756 0.0051723 -0.730 0.465757 ## ptratio -0.3040728 0.1863598 -1.632 0.103393 ## lstat 0.1388006 0.0757213 1.833 0.067398 . ## medv -0.2200564 0.0598240 -3.678 0.000261 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 6.46 on 493 degrees of freedom ## Multiple R-squared: 0.4493, Adjusted R-squared: 0.4359 ## F-statistic: 33.52 on 12 and 493 DF, p-value: < 2.2e-16 There are now only significant associations for zn, dis, rad, black and medv. How do your results from (a) compare to your results from (b)? Create a plot displaying the univariate regression coefficients from (a) on the \\(x\\)-axis, and the multiple regression coefficients from (b) on the \\(y\\)-axis. That is, each predictor is displayed as a single point in the plot. Its coefficient in a simple linear regression model is shown on the x-axis, and its coefficient estimate in the multiple linear regression model is shown on the y-axis. The results from (b) show reduced significance compared to the models fit in (a). plot(sapply(fits, function(x) coef(x)[2]), coef(mfit)[-1], xlab = "Univariate regression", ylab = "multiple regression") The estimated coefficients differ (in particular the estimated coefficient for nox is dramatically different) between the two modelling strategies. Is there evidence of non-linear association between any of the predictors and the response? To answer this question, for each predictor X, fit a model of the form \\[ Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\epsilon \\] pred <- subset(pred, select = -chas) fits <- lapply(names(pred), function(p) { f <- paste0("crim ~ poly(", p, ", 3)") lm(as.formula(f), data = Boston) }) for (fit in fits) printCoefmat(coef(summary(fit))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.37219 9.7088 < 2.2e-16 *** ## poly(zn, 3)1 -38.74984 8.37221 -4.6284 4.698e-06 *** ## poly(zn, 3)2 23.93983 8.37221 2.8594 0.004421 ** ## poly(zn, 3)3 -10.07187 8.37221 -1.2030 0.229539 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.6135 0.3300 10.9501 < 2.2e-16 *** ## poly(indus, 3)1 78.5908 7.4231 10.5873 < 2.2e-16 *** ## poly(indus, 3)2 -24.3948 7.4231 -3.2863 0.001086 ** ## poly(indus, 3)3 -54.1298 7.4231 -7.2920 1.196e-12 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.32157 11.2370 < 2.2e-16 *** ## poly(nox, 3)1 81.37202 7.23361 11.2492 < 2.2e-16 *** ## poly(nox, 3)2 -28.82859 7.23361 -3.9854 7.737e-05 *** ## poly(nox, 3)3 -60.36189 7.23361 -8.3446 6.961e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.6135 0.3703 9.7584 < 2.2e-16 *** ## poly(rm, 3)1 -42.3794 8.3297 -5.0878 5.128e-07 *** ## poly(rm, 3)2 26.5768 8.3297 3.1906 0.001509 ** ## poly(rm, 3)3 -5.5103 8.3297 -0.6615 0.508575 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.34852 10.3683 < 2.2e-16 *** ## poly(age, 3)1 68.18201 7.83970 8.6970 < 2.2e-16 *** ## poly(age, 3)2 37.48447 7.83970 4.7814 2.291e-06 *** ## poly(age, 3)3 21.35321 7.83970 2.7237 0.00668 ** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.32592 11.0870 < 2.2e-16 *** ## poly(dis, 3)1 -73.38859 7.33148 -10.0101 < 2.2e-16 *** ## poly(dis, 3)2 56.37304 7.33148 7.6892 7.870e-14 *** ## poly(dis, 3)3 -42.62188 7.33148 -5.8135 1.089e-08 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.29707 12.1639 < 2.2e-16 *** ## poly(rad, 3)1 120.90745 6.68240 18.0934 < 2.2e-16 *** ## poly(rad, 3)2 17.49230 6.68240 2.6177 0.009121 ** ## poly(rad, 3)3 4.69846 6.68240 0.7031 0.482314 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.30468 11.8599 < 2.2e-16 *** ## poly(tax, 3)1 112.64583 6.85371 16.4358 < 2.2e-16 *** ## poly(tax, 3)2 32.08725 6.85371 4.6817 3.665e-06 *** ## poly(tax, 3)3 -7.99681 6.85371 -1.1668 0.2439 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.36105 10.0084 < 2.2e-16 *** ## poly(ptratio, 3)1 56.04523 8.12158 6.9008 1.565e-11 *** ## poly(ptratio, 3)2 24.77482 8.12158 3.0505 0.002405 ** ## poly(ptratio, 3)3 -22.27974 8.12158 -2.7433 0.006301 ** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.33917 10.6540 <2e-16 *** ## poly(lstat, 3)1 88.06967 7.62944 11.5434 <2e-16 *** ## poly(lstat, 3)2 15.88816 7.62944 2.0825 0.0378 * ## poly(lstat, 3)3 -11.57402 7.62944 -1.5170 0.1299 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.29203 12.374 < 2.2e-16 *** ## poly(medv, 3)1 -75.05761 6.56915 -11.426 < 2.2e-16 *** ## poly(medv, 3)2 88.08621 6.56915 13.409 < 2.2e-16 *** ## poly(medv, 3)3 -48.03343 6.56915 -7.312 1.047e-12 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Yes there is strong evidence for many variables having non-linear associations. In many cases, the addition of a cubic term is significant (indus, nox, age, dis, ptratio and medv). In other cases although the cubic terms is not significant, the squared term is (zn, rm, rad, tax, lstat). In only one case, black is there no evidence for a non-linear relationship. "],["classification.html", "4 Classification 4.1 Conceptual 4.2 Applied", " 4 Classification 4.1 Conceptual 4.1.1 Question 1 Using a little bit of algebra, prove that (4.2) is equivalent to (4.3). In other words, the logistic function representation and logit representation for the logistic regression model are equivalent. We need to show that \\[ p(X) = \\frac{e^{\\beta_0 + \\beta_1X}}{1 + e^{\\beta_0 + \\beta_1X}} \\] is equivalent to \\[ \\frac{p(X)}{1-p(X)} = e^{\\beta_0 + \\beta_1X} \\] Letting \\(x = e^{\\beta_0 + \\beta_1X}\\) \\[\\begin{align} \\frac{P(X)}{1-p(X)} &= \\frac{\\frac{x}{1 + x}} {1 - \\frac{x}{1 + x}} \\\\ &= \\frac{\\frac{x}{1 + x}} {\\frac{1}{1 + x}} \\\\ &= x \\end{align}\\] 4.1.2 Question 2 It was stated in the text that classifying an observation to the class for which (4.12) is largest is equivalent to classifying an observation to the class for which (4.13) is largest. Prove that this is the case. In other words, under the assumption that the observations in the \\(k\\)th class are drawn from a \\(N(\\mu_k,\\sigma^2)\\) distribution, the Bayes’ classifier assigns an observation to the class for which the discriminant function is maximized. 4.12 is \\[ p_k(x) = \\frac{\\pi_k\\frac{1}{\\sqrt{2\\pi\\sigma}} \\exp(-\\frac{1}{2\\sigma^2}(x - \\mu_k)^2)} {\\sum_{l=1}^k \\pi_l\\frac{1}{\\sqrt{2\\pi\\sigma}} \\exp(-\\frac{1}{2\\sigma^2}(x - \\mu_l)^2)} \\] and the discriminant function is \\[ \\delta_k(x) = x.\\frac{\\mu_k}{\\sigma^2} - \\frac{\\mu_k^2}{2\\sigma_2} + \\log(\\pi_k) \\] Since \\(\\sigma^2\\) is constant \\[ p_k(x) = \\frac{\\pi_k \\exp\\left(-\\frac{1}{2\\sigma^2}(x - \\mu_k)^2\\right)} {\\sum_{l=1}^k \\pi_l \\exp\\left(-\\frac{1}{2\\sigma^2}(x - \\mu_l)^2\\right)} \\] Maximizing \\(p_k(x)\\) also maximizes any monotonic function of \\(p_k(X)\\), and therefore, we can consider maximizing \\(\\log(p_K(X))\\) \\[ \\log(p_k(x)) = \\log(\\pi_k) - \\frac{1}{2\\sigma^2}(x - \\mu_k)^2 - \\log\\left(\\sum_{l=1}^k \\pi_l \\exp\\left(-\\frac{1}{2\\sigma^2}(x - \\mu_l)^2\\right)\\right) \\] Remember that we are maximizing over \\(k\\), and since the last term does not vary with \\(k\\) it can be ignored. So we just need to maximize \\[\\begin{align} f &= \\log(\\pi_k) - \\frac{1}{2\\sigma^2} (x^2 - 2x\\mu_k + \\mu_k^2) \\\\ &= \\log(\\pi_k) - \\frac{x^2}{2\\sigma^2} + \\frac{x\\mu_k}{\\sigma^2} - \\frac{\\mu_k^2}{2\\sigma^2} \\\\ \\end{align}\\] Since \\(\\frac{x^2}{2\\sigma^2}\\) is also independent of \\(k\\), we just need to maximize \\[ \\log(\\pi_k) + \\frac{x\\mu_k}{\\sigma^2} - \\frac{\\mu_k^2}{2\\sigma^2} \\] 4.1.3 Question 3 This problem relates to the QDA model, in which the observations within each class are drawn from a normal distribution with a class-specific mean vector and a class specific covariance matrix. We consider the simple case where \\(p = 1\\); i.e. there is only one feature. Suppose that we have \\(K\\) classes, and that if an observation belongs to the \\(k\\)th class then \\(X\\) comes from a one-dimensional normal distribution, \\(X \\sim N(\\mu_k,\\sigma^2)\\). Recall that the density function for the one-dimensional normal distribution is given in (4.16). Prove that in this case, the Bayes classifier is not linear. Argue that it is in fact quadratic. Hint: For this problem, you should follow the arguments laid out in Section 4.4.1, but without making the assumption that \\(\\sigma_1^2 = ... = \\sigma_K^2\\). As above, \\[ p_k(x) = \\frac{\\pi_k\\frac{1}{\\sqrt{2\\pi\\sigma_k}} \\exp(-\\frac{1}{2\\sigma_k^2}(x - \\mu_k)^2)} {\\sum_{l=1}^k \\pi_l\\frac{1}{\\sqrt{2\\pi\\sigma_l}} \\exp(-\\frac{1}{2\\sigma_l^2}(x - \\mu_l)^2)} \\] Now lets derive the Bayes classifier, without assuming \\(\\sigma_1^2 = ... = \\sigma_K^2\\) Maximizing \\(p_k(x)\\) also maximizes any monotonic function of \\(p_k(X)\\), and therefore, we can consider maximizing \\(\\log(p_K(X))\\) \\[ \\log(p_k(x)) = \\log(\\pi_k) + \\log\\left(\\frac{1}{\\sqrt{2\\pi\\sigma_k}}\\right) - \\frac{1}{2\\sigma_k^2}(x - \\mu_k)^2 - \\log\\left(\\sum_{l=1}^k \\frac{1}{\\sqrt{2\\pi\\sigma_l}} \\pi_l \\exp\\left(-\\frac{1}{2\\sigma_l^2}(x - \\mu_l)^2\\right)\\right) \\] Remember that we are maximizing over \\(k\\), and since the last term does not vary with \\(k\\) it can be ignored. So we just need to maximize \\[\\begin{align} f &= \\log(\\pi_k) + \\log\\left(\\frac{1}{\\sqrt{2\\pi\\sigma_k}}\\right) - \\frac{1}{2\\sigma_k^2}(x - \\mu_k)^2 \\\\ &= \\log(\\pi_k) + \\log\\left(\\frac{1}{\\sqrt{2\\pi\\sigma_k}}\\right) - \\frac{x^2}{2\\sigma_k^2} + \\frac{x\\mu_k}{\\sigma_k^2} - \\frac{\\mu_k^2}{2\\sigma_k^2} \\\\ \\end{align}\\] However, unlike in Q2, \\(\\frac{x^2}{2\\sigma_k^2}\\) is not independent of \\(k\\), so we retain the term with \\(x^2\\), hence \\(f\\), the Bayes’ classifier, is a quadratic function of \\(x\\). 4.1.4 Question 4 When the number of features \\(p\\) is large, there tends to be a deterioration in the performance of KNN and other local approaches that perform prediction using only observations that are near the test observation for which a prediction must be made. This phenomenon is known as the curse of dimensionality, and it ties into the fact that non-parametric approaches often perform poorly when \\(p\\) is large. We will now investigate this curse. Suppose that we have a set of observations, each with measurements on \\(p = 1\\) feature, \\(X\\). We assume that \\(X\\) is uniformly (evenly) distributed on \\([0, 1]\\). Associated with each observation is a response value. Suppose that we wish to predict a test observation’s response using only observations that are within 10% of the range of \\(X\\) closest to that test observation. For instance, in order to predict the response for a test observation with \\(X = 0.6\\), we will use observations in the range \\([0.55, 0.65]\\). On average, what fraction of the available observations will we use to make the prediction? For values in \\(0...0.05\\), we use less than 10% of observations (between 5% and 10%, 7.5% on average), similarly with values in \\(0.95...1\\). For values in \\(0.05...0.95\\) we use 10% of available observations. The (weighted) average is then \\(7.5 \\times 0.1 + 10 \\times 0.9 = 9.75\\%\\). Now suppose that we have a set of observations, each with measurements on \\(p = 2\\) features, \\(X_1\\) and \\(X_2\\). We assume that \\((X_1, X_2)\\) are uniformly distributed on \\([0, 1] \\times [0, 1]\\). We wish to predict a test observation’s response using only observations that are within 10% of the range of \\(X_1\\) and within 10% of the range of \\(X_2\\) closest to that test observation. For instance, in order to predict the response for a test observation with \\(X_1 = 0.6\\) and \\(X_2 = 0.35\\), we will use observations in the range \\([0.55, 0.65]\\) for \\(X_1\\) and in the range \\([0.3, 0.4]\\) for \\(X_2\\). On average, what fraction of the available observations will we use to make the prediction? Since we need the observation to be within range for \\(X_1\\) and \\(X_2\\) we square 9.75% = \\(0.0975^2 \\times 100 = 0.95\\%\\) Now suppose that we have a set of observations on \\(p = 100\\) features. Again the observations are uniformly distributed on each feature, and again each feature ranges in value from 0 to 1. We wish to predict a test observation’s response using observations within the 10% of each feature’s range that is closest to that test observation. What fraction of the available observations will we use to make the prediction? Similar to above, we use: \\(0.0975^{100} \\times 100 = 8 \\times 10^{-100}\\%\\), essentially zero. Using your answers to parts (a)–(c), argue that a drawback of KNN when \\(p\\) is large is that there are very few training observations “near” any given test observation. As \\(p\\) increases, the fraction of observations near any given point rapidly approaches zero. For instance, even if you use 50% of the nearest observations for each \\(p\\), with \\(p = 10\\), only \\(0.5^{10} \\times 100 \\approx 0.1\\%\\) points are “near”. Now suppose that we wish to make a prediction for a test observation by creating a \\(p\\)-dimensional hypercube centered around the test observation that contains, on average, 10% of the training observations. For \\(p = 1,2,\\) and \\(100\\), what is the length of each side of the hypercube? Comment on your answer. Note: A hypercube is a generalization of a cube to an arbitrary number of dimensions. When \\(p = 1\\), a hypercube is simply a line segment, when \\(p = 2\\) it is a square, and when \\(p = 100\\) it is a 100-dimensional cube. When \\(p = 1\\), clearly the length is 0.1. When \\(p = 2\\), we need the value \\(l\\) such that \\(l^2 = 0.1\\), so \\(l = \\sqrt{0.1} = 0.32\\). When \\(p = n\\), \\(l = 0.1^{1/n}\\), so in the case of \\(n = 100\\), \\(l = 0.98\\). Therefore, the length of each side of the hypercube rapidly approaches 1 (or 100%) of the range of each \\(p\\). 4.1.5 Question 5 We now examine the differences between LDA and QDA. If the Bayes decision boundary is linear, do we expect LDA or QDA to perform better on the training set? On the test set? QDA, being a more flexible model, will always perform better on the training set, but LDA would be expected to perform better on the test set. If the Bayes decision boundary is non-linear, do we expect LDA or QDA to perform better on the training set? On the test set? QDA, being a more flexible model, will perform better on the training set, and we would hope that extra flexibility translates to a better fit on the test set. In general, as the sample size \\(n\\) increases, do we expect the test prediction accuracy of QDA relative to LDA to improve, decline, or be unchanged? Why? As \\(n\\) increases, we would expect the prediction accuracy of QDA relative to LDA to improve as there is more data to fit to subtle effects in the data. True or False: Even if the Bayes decision boundary for a given problem is linear, we will probably achieve a superior test error rate using QDA rather than LDA because QDA is flexible enough to model a linear decision boundary. Justify your answer. False. QDA can overfit leading to poorer test performance. 4.1.6 Question 6 Suppose we collect data for a group of students in a statistics class with variables \\(X_1 =\\) hours studied, \\(X_2 =\\) undergrad GPA, and \\(Y =\\) receive an A. We fit a logistic regression and produce estimated coefficient, \\(\\hat\\beta_0 = -6\\), \\(\\hat\\beta_1 = 0.05\\), \\(\\hat\\beta_2 = 1\\). Estimate the probability that a student who studies for 40h and has an undergrad GPA of 3.5 gets an A in the class. The logistic model is: \\[ \\log\\left(\\frac{p(X)}{1-p(x)}\\right) = -6 + 0.05X_1 + X_2 \\] or \\[ p(X) = \\frac{e^{-6 + 0.05X_1 + X_2}}{1 + e^{-6 + 0.05X_1 + X_2}} \\] when \\(X_1 = 40\\) and \\(X_2 = 3.5\\), \\(p(X) = 0.38\\) How many hours would the student in part (a) need to study to have a 50% chance of getting an A in the class? We would like to solve for \\(X_1\\) where \\(p(X) = 0.5\\). Taking the first equation above, we need to solve \\(0 = −6 + 0.05X_1 + 3.5\\), so \\(X_1 = 50\\) hours. 4.1.7 Question 7 Suppose that we wish to predict whether a given stock will issue a dividend this year (“Yes” or “No”) based on \\(X\\), last year’s percent profit. We examine a large number of companies and discover that the mean value of \\(X\\) for companies that issued a dividend was \\(\\bar{X} = 10\\), while the mean for those that didn’t was \\(\\bar{X} = 0\\). In addition, the variance of \\(X\\) for these two sets of companies was \\(\\hat{\\sigma}^2 = 36\\). Finally, 80% of companies issued dividends. Assuming that \\(X\\) follows a normal distribution, predict the probability that a company will issue a dividend this year given that its percentage profit was \\(X = 4\\) last year. Hint: Recall that the density function for a normal random variable is \\(f(x) =\\frac{1}{\\sqrt{2\\pi\\sigma^2}}e^{-(x-\\mu)^2/2\\sigma^2}\\). You will need to use Bayes’ theorem. Value \\(v\\) for companies (D) issuing a dividend = \\(v_D \\sim \\mathcal{N}(10, 36)\\). Value \\(v\\) for companies (N) not issuing a dividend = \\(v_N \\sim \\mathcal{N}(0, 36)\\) and \\(p(D) = 0.8\\). We want to find \\(p(D|v)\\) and we can calculate \\(p(v|D)\\) from the Gaussian density function. Note that since \\(e^2\\) is equal between both classes, the term \\(\\frac{1}{\\sqrt{2\\pi\\sigma^2}}\\) cancels. \\[\\begin{align} p(D|v) &= \\frac{p(v|D) p(D)}{p(v|D)p(D) + p(v|N)p(N)} \\\\ &= \\frac{\\pi_D \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-(x-\\mu_D)^2/2\\sigma^2}} {\\pi_D \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-(x-\\mu_D)^2/2\\sigma^2} + \\pi_N \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-(x-\\mu_N)^2/2\\sigma^2}} \\\\ &= \\frac{\\pi_D e^{-(x-\\mu_D)^2/2\\sigma^2}} {\\pi_D e^{-(x-\\mu_D)^2/2\\sigma^2} + \\pi_N e^{-(x-\\mu_N)^2/2\\sigma^2}} \\\\ &= \\frac{0.8 \\times e^{-(4-10)^2/(2 \\times 36)}} {0.8 \\times e^{-(4-10)^2/(2 \\times 36)} + 0.2 \\times e^{-(4-0)^2/(2 \\times 36)}} \\\\ &= \\frac{0.8 e^{-1/2}}{0.8 e^{-1/2} + 0.2 e^{-2/9}} \\end{align}\\] exp(-0.5) * 0.8 / (exp(-0.5) * 0.8 + exp(-2/9) * 0.2) ## [1] 0.7518525 4.1.8 Question 8 Suppose that we take a data set, divide it into equally-sized training and test sets, and then try out two different classification procedures. First we use logistic regression and get an error rate of 20% on the training data and 30% on the test data. Next we use 1-nearest neighbors (i.e. \\(K = 1\\)) and get an average error rate (averaged over both test and training data sets) of 18%. Based on these results, which method should we prefer to use for classification of new observations? Why? For \\(K = 1\\), performance on the training set is perfect and the error rate is zero, implying a test error rate of 36%. Logistic regression outperforms 1-nearest neighbor on the test set and therefore should be preferred. 4.1.9 Question 9 This problem has to do with odds. On average, what fraction of people with an odds of 0.37 of defaulting on their credit card payment will in fact default? Odds is defined as \\(p/(1-p)\\). \\[0.37 = \\frac{p(x)}{1 - p(x)}\\] therefore, \\[p(x) = \\frac{0.37}{1 + 0.37} = 0.27\\] Suppose that an individual has a 16% chance of defaulting on her credit card payment. What are the odds that she will default? \\[0.16 / (1 - 0.16) = 0.19\\] 4.1.10 Question 10 Equation 4.32 derived an expression for \\(\\log(\\frac{Pr(Y=k|X=x)}{Pr(Y=K|X=x)})\\) in the setting where \\(p > 1\\), so that the mean for the \\(k\\)th class, \\(\\mu_k\\), is a \\(p\\)-dimensional vector, and the shared covariance \\(\\Sigma\\) is a \\(p \\times p\\) matrix. However, in the setting with \\(p = 1\\), (4.32) takes a simpler form, since the means \\(\\mu_1, ..., \\mu_k\\) and the variance \\(\\sigma^2\\) are scalars. In this simpler setting, repeat the calculation in (4.32), and provide expressions for \\(a_k\\) and \\(b_{kj}\\) in terms of \\(\\pi_k, \\pi_K, \\mu_k, \\mu_K,\\) and \\(\\sigma^2\\). \\[\\begin{align*} \\log\\left(\\frac{Pr(Y=k|X=x)}{Pr(Y=K|X=x)}\\right) & = \\log\\left(\\frac{\\pi_k f_k(x)}{\\pi_K f_K(x)}\\right) \\\\ & = \\log\\left(\\frac{\\pi_k \\exp(-1/2((x-\\mu_k)/\\sigma)^2)}{\\pi_K \\exp(-1/2((x-\\mu_K)/\\sigma)^2)}\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2} \\left(\\frac{x-\\mu_k}{\\sigma}\\right)^2 + \\frac{1}{2} \\left(\\frac{x-\\mu_K}{\\sigma}\\right)^2 \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} (x-\\mu_k)^2 + \\frac{1}{2\\sigma^2} (x-\\mu_K)^2 \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} \\left((x-\\mu_k)^2 - (x-\\mu_K)^2\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} \\left(x^2-2x\\mu_k+\\mu_k^2 - x^2 + 2x\\mu_K - \\mu_K^2\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} \\left(2x(\\mu_K - \\mu_k) + \\mu_k^2 -\\mu_K^2\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{\\mu_k^2 -\\mu_K^2}{2\\sigma^2} + \\frac{x(\\mu_k - \\mu_K)}{\\sigma^2} \\end{align*}\\] Therefore, \\[a_k = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{\\mu_k^2 -\\mu_K^2}{2\\sigma^2}\\] and \\[b_k = (\\mu_k - \\mu_K) / \\sigma^2\\] 4.1.11 Question 11 ToDo Work out the detailed forms of \\(a_k\\), \\(b_{kj}\\), and \\(b_{kjl}\\) in (4.33). Your answer should involve \\(\\pi_k\\), \\(\\pi_K\\), \\(\\mu_k\\), \\(\\mu_K\\), \\(\\Sigma_k\\), and \\(\\Sigma_K\\). 4.1.12 Question 12 Suppose that you wish to classify an observation \\(X \\in \\mathbb{R}\\) into apples and oranges. You fit a logistic regression model and find that \\[ \\hat{Pr}(Y=orange|X=x) = \\frac{\\exp(\\hat\\beta_0 + \\hat\\beta_1x)}{1 + \\exp(\\hat\\beta_0 + \\hat\\beta_1x)} \\] Your friend fits a logistic regression model to the same data using the softmax formulation in (4.13), and finds that \\[ \\hat{Pr}(Y=orange|X=x) = \\frac{\\exp(\\hat\\alpha_{orange0} + \\hat\\alpha_{orange1}x)} {\\exp(\\hat\\alpha_{orange0} + \\hat\\alpha_{orange1}x) + \\exp(\\hat\\alpha_{apple0} + \\hat\\alpha_{apple1}x)} \\] What is the log odds of orange versus apple in your model? The log odds is just \\(\\hat\\beta_0 + \\hat\\beta_1x\\) What is the log odds of orange versus apple in your friend’s model? From 4.14, log odds of our friend’s model is: \\[ (\\hat\\alpha_{orange0} - \\hat\\alpha_{apple0}) + (\\hat\\alpha_{orange1} - \\hat\\alpha_{apple1})x \\] Suppose that in your model, \\(\\hat\\beta_0 = 2\\) and \\(\\hat\\beta = −1\\). What are the coefficient estimates in your friend’s model? Be as specific as possible. We can say that in our friend’s model \\(\\hat\\alpha_{orange0} - \\hat\\alpha_{apple0} = 2\\) and \\(\\hat\\alpha_{orange1} - \\hat\\alpha_{apple1} = -1\\). We are unable to know the specific value of each parameter however. Now suppose that you and your friend fit the same two models on a different data set. This time, your friend gets the coefficient estimates \\(\\hat\\alpha_{orange0} = 1.2\\), \\(\\hat\\alpha_{orange1} = −2\\), \\(\\hat\\alpha_{apple0} = 3\\), \\(\\hat\\alpha_{apple1} = 0.6\\). What are the coefficient estimates in your model? The coefficients in our model would be \\(\\hat\\beta_0 = 1.2 - 3 = -1.8\\) and \\(\\hat\\beta_1 = -2 - 0.6 = -2.6\\) Finally, suppose you apply both models from (d) to a data set with 2,000 test observations. What fraction of the time do you expect the predicted class labels from your model to agree with those from your friend’s model? Explain your answer. The models are identical with different parameterization so they should perfectly agree. 4.2 Applied 4.2.1 Question 13 This question should be answered using the Weekly data set, which is part of the ISLR2 package. This data is similar in nature to the Smarket data from this chapter’s lab, except that it contains 1,089 weekly returns for 21 years, from the beginning of 1990 to the end of 2010. Produce some numerical and graphical summaries of the Weekly data. Do there appear to be any patterns? library(MASS) library(class) library(tidyverse) library(corrplot) library(ISLR2) library(e1071) summary(Weekly) ## Year Lag1 Lag2 Lag3 ## Min. :1990 Min. :-18.1950 Min. :-18.1950 Min. :-18.1950 ## 1st Qu.:1995 1st Qu.: -1.1540 1st Qu.: -1.1540 1st Qu.: -1.1580 ## Median :2000 Median : 0.2410 Median : 0.2410 Median : 0.2410 ## Mean :2000 Mean : 0.1506 Mean : 0.1511 Mean : 0.1472 ## 3rd Qu.:2005 3rd Qu.: 1.4050 3rd Qu.: 1.4090 3rd Qu.: 1.4090 ## Max. :2010 Max. : 12.0260 Max. : 12.0260 Max. : 12.0260 ## Lag4 Lag5 Volume Today ## Min. :-18.1950 Min. :-18.1950 Min. :0.08747 Min. :-18.1950 ## 1st Qu.: -1.1580 1st Qu.: -1.1660 1st Qu.:0.33202 1st Qu.: -1.1540 ## Median : 0.2380 Median : 0.2340 Median :1.00268 Median : 0.2410 ## Mean : 0.1458 Mean : 0.1399 Mean :1.57462 Mean : 0.1499 ## 3rd Qu.: 1.4090 3rd Qu.: 1.4050 3rd Qu.:2.05373 3rd Qu.: 1.4050 ## Max. : 12.0260 Max. : 12.0260 Max. :9.32821 Max. : 12.0260 ## Direction ## Down:484 ## Up :605 ## ## ## ## corrplot(cor(Weekly[, -9]), type = "lower", diag = FALSE, method = "ellipse") Volume is strongly positively correlated with Year. Other correlations are week, but Lag1 is negatively correlated with Lag2 but positively correlated with Lag3. Use the full data set to perform a logistic regression with Direction as the response and the five lag variables plus Volume as predictors. Use the summary function to print the results. Do any of the predictors appear to be statistically significant? If so, which ones? fit <- glm( Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + Volume, data = Weekly, family = binomial ) summary(fit) ## ## Call: ## glm(formula = Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + ## Volume, family = binomial, data = Weekly) ## ## Coefficients: ## Estimate Std. Error z value Pr(>|z|) ## (Intercept) 0.26686 0.08593 3.106 0.0019 ** ## Lag1 -0.04127 0.02641 -1.563 0.1181 ## Lag2 0.05844 0.02686 2.175 0.0296 * ## Lag3 -0.01606 0.02666 -0.602 0.5469 ## Lag4 -0.02779 0.02646 -1.050 0.2937 ## Lag5 -0.01447 0.02638 -0.549 0.5833 ## Volume -0.02274 0.03690 -0.616 0.5377 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for binomial family taken to be 1) ## ## Null deviance: 1496.2 on 1088 degrees of freedom ## Residual deviance: 1486.4 on 1082 degrees of freedom ## AIC: 1500.4 ## ## Number of Fisher Scoring iterations: 4 Lag2 is significant. Compute the confusion matrix and overall fraction of correct predictions. Explain what the confusion matrix is telling you about the types of mistakes made by logistic regression. contrasts(Weekly$Direction) ## Up ## Down 0 ## Up 1 pred <- predict(fit, type = "response") > 0.5 (t <- table(ifelse(pred, "Up (pred)", "Down (pred)"), Weekly$Direction)) ## ## Down Up ## Down (pred) 54 48 ## Up (pred) 430 557 sum(diag(t)) / sum(t) ## [1] 0.5610652 The overall fraction of correct predictions is 0.56. Although logistic regression correctly predicts upwards movements well, it incorrectly predicts most downwards movements as up. Now fit the logistic regression model using a training data period from 1990 to 2008, with Lag2 as the only predictor. Compute the confusion matrix and the overall fraction of correct predictions for the held out data (that is, the data from 2009 and 2010). train <- Weekly$Year < 2009 fit <- glm(Direction ~ Lag2, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 (t <- table(ifelse(pred, "Up (pred)", "Down (pred)"), Weekly[!train, ]$Direction)) ## ## Down Up ## Down (pred) 9 5 ## Up (pred) 34 56 sum(diag(t)) / sum(t) ## [1] 0.625 Repeat (d) using LDA. fit <- lda(Direction ~ Lag2, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class (t <- table(pred, Weekly[!train, ]$Direction)) ## ## pred Down Up ## Down 9 5 ## Up 34 56 sum(diag(t)) / sum(t) ## [1] 0.625 Repeat (d) using QDA. fit <- qda(Direction ~ Lag2, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class (t <- table(pred, Weekly[!train, ]$Direction)) ## ## pred Down Up ## Down 0 0 ## Up 43 61 sum(diag(t)) / sum(t) ## [1] 0.5865385 Repeat (d) using KNN with \\(K = 1\\). fit <- knn( Weekly[train, "Lag2", drop = FALSE], Weekly[!train, "Lag2", drop = FALSE], Weekly$Direction[train] ) (t <- table(fit, Weekly[!train, ]$Direction)) ## ## fit Down Up ## Down 21 29 ## Up 22 32 sum(diag(t)) / sum(t) ## [1] 0.5096154 Repeat (d) using naive Bayes. fit <- naiveBayes(Direction ~ Lag2, data = Smarket, subset = train) pred <- predict(fit, Weekly[!train, ], type = "class") (t <- table(pred, Weekly[!train, ]$Direction)) ## ## pred Down Up ## Down 27 29 ## Up 16 32 sum(diag(t)) / sum(t) ## [1] 0.5673077 Which of these methods appears to provide the best results on this data? Logistic regression and LDA are the best performing. Experiment with different combinations of predictors, including possible transformations and interactions, for each of the methods. Report the variables, method, and associated confusion matrix that appears to provide the best results on the held out data. Note that you should also experiment with values for \\(K\\) in the KNN classifier. fit <- glm(Direction ~ Lag1, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5673077 fit <- glm(Direction ~ Lag3, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5865385 fit <- glm(Direction ~Lag4, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5865385 fit <- glm(Direction ~ Lag1 + Lag2 + Lag3 + Lag4, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5865385 fit <- glm(Direction ~ Lag1 * Lag2 * Lag3 * Lag4, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5961538 fit <- lda(Direction ~ Lag1 + Lag2 + Lag3 + Lag4,data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class mean(pred == Weekly[!train, ]$Direction) ## [1] 0.5769231 fit <- qda(Direction ~ Lag1 + Lag2 + Lag3 + Lag4, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class mean(pred == Weekly[!train, ]$Direction) ## [1] 0.5192308 fit <- naiveBayes(Direction ~ Lag1 + Lag2 + Lag3 + Lag4, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "class") mean(pred == Weekly[!train, ]$Direction) ## [1] 0.5096154 set.seed(1) res <- sapply(1:30, function(k) { fit <- knn( Weekly[train, 2:4, drop = FALSE], Weekly[!train, 2:4, drop = FALSE], Weekly$Direction[train], k = k ) mean(fit == Weekly[!train, ]$Direction) }) plot(1:30, res, type = "o", xlab = "k", ylab = "Fraction correct") (k <- which.max(res)) ## [1] 26 fit <- knn( Weekly[train, 2:4, drop = FALSE], Weekly[!train, 2:4, drop = FALSE], Weekly$Direction[train], k = k ) table(fit, Weekly[!train, ]$Direction) ## ## fit Down Up ## Down 23 18 ## Up 20 43 mean(fit == Weekly[!train, ]$Direction) ## [1] 0.6346154 KNN using the first 3 Lag variables performs marginally better than logistic regression with Lag2 if we tune \\(k\\) to be \\(k = 26\\). 4.2.2 Question 14 In this problem, you will develop a model to predict whether a given car gets high or low gas mileage based on the Auto data set. Create a binary variable, mpg01, that contains a 1 if mpg contains a value above its median, and a 0 if mpg contains a value below its median. You can compute the median using the median() function. Note you may find it helpful to use the data.frame() function to create a single data set containing both mpg01 and the other Auto variables. x <- cbind(Auto[, -1], data.frame("mpg01" = Auto$mpg > median(Auto$mpg))) Explore the data graphically in order to investigate the association between mpg01 and the other features. Which of the other features seem most likely to be useful in predicting mpg01? Scatterplots and boxplots may be useful tools to answer this question. Describe your findings. par(mfrow = c(2, 4)) for (i in 1:7) hist(x[, i], breaks = 20, main = colnames(x)[i]) par(mfrow = c(2, 4)) for (i in 1:7) boxplot(x[, i] ~ x$mpg01, main = colnames(x)[i]) pairs(x[, 1:7]) Most variables show an association with mpg01 category, and several variables are colinear. Split the data into a training set and a test set. set.seed(1) train <- sample(seq_len(nrow(x)), nrow(x) * 2/3) Perform LDA on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? sort(sapply(1:7, function(i) { setNames(abs(t.test(x[, i] ~ x$mpg01)$statistic), colnames(x)[i]) })) ## acceleration year origin horsepower displacement weight ## 7.302430 9.403221 11.824099 17.681939 22.632004 22.932777 ## cylinders ## 23.035328 fit <- lda(mpg01 ~ cylinders + weight + displacement, data = x[train, ]) pred <- predict(fit, x[-train, ], type = "response")$class mean(pred != x[-train, ]$mpg01) ## [1] 0.1068702 Perform QDA on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? fit <- qda(mpg01 ~ cylinders + weight + displacement, data = x[train, ]) pred <- predict(fit, x[-train, ], type = "response")$class mean(pred != x[-train, ]$mpg01) ## [1] 0.09923664 Perform logistic regression on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? fit <- glm(mpg01 ~ cylinders + weight + displacement, data = x[train, ], family = binomial) pred <- predict(fit, x[-train, ], type = "response") > 0.5 mean(pred != x[-train, ]$mpg01) ## [1] 0.1145038 Perform naive Bayes on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? fit <- naiveBayes(mpg01 ~ cylinders + weight + displacement, data = x[train, ]) pred <- predict(fit, x[-train, ], type = "class") mean(pred != x[-train, ]$mpg01) ## [1] 0.09923664 Perform KNN on the training data, with several values of \\(K\\), in order to predict mpg01. Use only the variables that seemed most associated with mpg01 in (b). What test errors do you obtain? Which value of \\(K\\) seems to perform the best on this data set? res <- sapply(1:50, function(k) { fit <- knn(x[train, c(1, 4, 2)], x[-train, c(1, 4, 2)], x$mpg01[train], k = k) mean(fit != x[-train, ]$mpg01) }) names(res) <- 1:50 plot(res, type = "o") res[which.min(res)] ## 3 ## 0.1068702 For the models tested here, \\(k = 32\\) appears to perform best. QDA has a lower error rate overall, performing slightly better than LDA. 4.2.3 Question 15 This problem involves writing functions. Write a function, Power(), that prints out the result of raising 2 to the 3rd power. In other words, your function should compute \\(2^3\\) and print out the results. Hint: Recall that x^a raises x to the power a. Use the print() function to output the result. Power <- function() print(2^3) Create a new function, Power2(), that allows you to pass any two numbers, x and a, and prints out the value of x^a. You can do this by beginning your function with the line > Power2=function(x,a) { You should be able to call your function by entering, for instance, > Power2(3, 8) on the command line. This should output the value of \\(3^8\\), namely, 6,561. Power2 <- function(x, a) print(x^a) Using the Power2() function that you just wrote, compute \\(10^3\\), \\(8^{17}\\), and \\(131^3\\). c(Power2(10, 3), Power2(8, 17), Power2(131, 3)) ## [1] 1000 ## [1] 2.2518e+15 ## [1] 2248091 ## [1] 1.000000e+03 2.251800e+15 2.248091e+06 Now create a new function, Power3(), that actually returns the result x^a as an R object, rather than simply printing it to the screen. That is, if you store the value x^a in an object called result within your function, then you can simply return() this result, using the following line: > return(result) The line above should be the last line in your function, before the } symbol. Power3 <- function(x, a) { result <- x^a return(result) } Now using the Power3() function, create a plot of \\(f(x) = x^2\\). The \\(x\\)-axis should display a range of integers from 1 to 10, and the \\(y\\)-axis should display \\(x^2\\). Label the axes appropriately, and use an appropriate title for the figure. Consider displaying either the \\(x\\)-axis, the \\(y\\)-axis, or both on the log-scale. You can do this by using log = \"x\", log = \"y\", or log = \"xy\" as arguments to the plot() function. plot(1:10, Power3(1:10, 2), xlab = "x", ylab = expression(paste("x"^"2")), log = "y" ) Create a function, PlotPower(), that allows you to create a plot of x against x^a for a fixed a and for a range of values of x. For instance, if you call > PlotPower(1:10, 3) then a plot should be created with an \\(x\\)-axis taking on values \\(1,2,...,10\\), and a \\(y\\)-axis taking on values \\(1^3,2^3,...,10^3\\). PlotPower <- function(x, a, log = "y") { plot(x, Power3(x, a), xlab = "x", ylab = substitute("x"^a, list(a = a)), log = log ) } PlotPower(1:10, 3) 4.2.4 Question 13 Using the Boston data set, fit classification models in order to predict whether a given census tract has a crime rate above or below the median. Explore logistic regression, LDA, naive Bayes and KNN models using various sub-sets of the predictors. Describe your findings. Hint: You will have to create the response variable yourself, using the variables that are contained in the Boston data set. x <- cbind( ISLR2::Boston[, -1], data.frame("highcrim" = Boston$crim > median(Boston$crim)) ) set.seed(1) train <- sample(seq_len(nrow(x)), nrow(x) * 2/3) We can find the most associated variables by performing wilcox tests. ord <- order(sapply(1:12, function(i) { p <- wilcox.test(as.numeric(x[train, i]) ~ x[train, ]$highcrim)$p.value setNames(log10(p), colnames(x)[i]) })) ord <- names(x)[ord] ord ## [1] "nox" "dis" "indus" "tax" "age" "rad" "zn" ## [8] "lstat" "medv" "ptratio" "rm" "chas" Variables nox (nitrogen oxides concentration) followed by dis (distance to employment center) appear to be most associated with high crime. Let’s reorder columns by those most associated with highcrim (in the training data) x <- x[, c(ord, "highcrim")] Let’s look at univariate associations with highcrim (in the training data) x[train, ] |> pivot_longer(!highcrim) |> mutate(name = factor(name, levels = ord)) |> ggplot(aes(highcrim, value)) + geom_boxplot() + facet_wrap(~name, scale = "free") Fit lda, logistic regression, naive Bayes and KNN models (with k = 1..50) for a set of specific predictors and return the error rate. We fit models using increasing numbers of predictors: column 1, then columns 1 and 2 etc. fit_models <- function(cols, k_vals = 1:50) { dat_train <- x[train, cols, drop = FALSE] dat_test <- x[-train, cols, drop = FALSE] fit <- lda(x$highcrim[train] ~ ., data = dat_train) pred <- predict(fit, dat_test, type = "response")$class lda_err <- mean(pred != x$highcrim[-train]) fit <- glm(x$highcrim[train] ~ ., data = dat_train, family = binomial) pred <- predict(fit, dat_test, type = "response") > 0.5 logreg_err <- mean(pred != x$highcrim[-train]) fit <- naiveBayes(x$highcrim[train] ~ ., data = dat_train) pred <- predict(fit, dat_test, type = "class") nb_err <- mean(pred != x$highcrim[-train]) res <- sapply(k_vals, function(k) { fit <- knn(dat_train, dat_test, x$highcrim[train], k = k) mean(fit != x$highcrim[-train]) }) knn_err <- min(res) c("LDA" = lda_err, "LR" = logreg_err, "NB" = nb_err, "KNN" = knn_err) } res <- sapply(1:12, function(max) fit_models(1:max)) res <- as_tibble(t(res)) res$n_var <- 1:12 pivot_longer(res, cols = !n_var) |> ggplot(aes(n_var, value, col = name)) + geom_line() + xlab("Number of predictors") + ylab("Error rate") KNN appears to perform better (if we tune \\(k\\)) for all numbers of predictors. fit <- knn( x[train, "nox", drop = FALSE], x[-train, "nox", drop = FALSE], x$highcrim[train], k = 1 ) table(fit, x[-train, ]$highcrim) ## ## fit FALSE TRUE ## FALSE 78 2 ## TRUE 3 86 mean(fit != x[-train, ]$highcrim) * 100 ## [1] 2.95858 Surprisingly, the best model (with an error rate of <5%) uses \\(k = 1\\) and assigns crime rate categories based on the town with the single most similar nitrogen oxide concentration (nox). This might be, for example, because nearby towns have similar crime rates, and we can obtain good predictions by predicting crime rate based on a nearby town. But what if we only consider \\(k = 20\\). res <- sapply(1:12, function(max) fit_models(1:max, k_vals = 20)) res <- as_tibble(t(res)) res$n_var <- 1:12 pivot_longer(res, cols = !n_var) |> ggplot(aes(n_var, value, col = name)) + geom_line() + xlab("Number of predictors") + ylab("Error rate") KNN still performs best with a single predictor (nox), but logistic regression with 12 predictors also performs well and has an error rate of ~12%. vars <- names(x)[1:12] dat_train <- x[train, vars] dat_test <- x[-train, vars] fit <- glm(x$highcrim[train] ~ ., data = dat_train, family = binomial) pred <- predict(fit, dat_test, type = "response") > 0.5 table(pred, x[-train, ]$highcrim) ## ## pred FALSE TRUE ## FALSE 70 9 ## TRUE 11 79 mean(pred != x$highcrim[-train]) * 100 ## [1] 11.83432 summary(fit) ## ## Call: ## glm(formula = x$highcrim[train] ~ ., family = binomial, data = dat_train) ## ## Coefficients: ## Estimate Std. Error z value Pr(>|z|) ## (Intercept) -44.525356 7.935621 -5.611 2.01e-08 *** ## nox 55.062428 10.281556 5.355 8.53e-08 *** ## dis 1.080847 0.304084 3.554 0.000379 *** ## indus -0.067493 0.058547 -1.153 0.248997 ## tax -0.005336 0.003138 -1.700 0.089060 . ## age 0.020965 0.014190 1.477 0.139556 ## rad 0.678196 0.192193 3.529 0.000418 *** ## zn -0.099558 0.045914 -2.168 0.030134 * ## lstat 0.134035 0.058623 2.286 0.022231 * ## medv 0.213114 0.088922 2.397 0.016547 * ## ptratio 0.294396 0.155285 1.896 0.057981 . ## rm -0.518115 0.896423 -0.578 0.563278 ## chas 0.139557 0.798632 0.175 0.861280 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for binomial family taken to be 1) ## ## Null deviance: 467.04 on 336 degrees of freedom ## Residual deviance: 135.80 on 324 degrees of freedom ## AIC: 161.8 ## ## Number of Fisher Scoring iterations: 9 "],["resampling-methods.html", "5 Resampling Methods 5.1 Conceptual 5.2 Applied", " 5 Resampling Methods 5.1 Conceptual 5.1.1 Question 1 Using basic statistical properties of the variance, as well as single- variable calculus, derive (5.6). In other words, prove that \\(\\alpha\\) given by (5.6) does indeed minimize \\(Var(\\alpha X + (1 - \\alpha)Y)\\). Equation 5.6 is: \\[ \\alpha = \\frac{\\sigma^2_Y - \\sigma_{XY}}{\\sigma^2_X + \\sigma^2_Y - 2\\sigma_{XY}} \\] Remember that: \\[ Var(aX) = a^2Var(X), \\\\ \\mathrm{Var}(X + Y) = \\mathrm{Var}(X) + \\mathrm{Var}(Y) + 2\\mathrm{Cov}(X,Y), \\\\ \\mathrm{Cov}(aX, bY) = ab\\mathrm{Cov}(X, Y) \\] If we define \\(\\sigma^2_X = \\mathrm{Var}(X)\\), \\(\\sigma^2_Y = \\mathrm{Var}(Y)\\) and \\(\\sigma_{XY} = \\mathrm{Cov}(X, Y)\\) \\[\\begin{align} Var(\\alpha X + (1 - \\alpha)Y) &= \\alpha^2\\sigma^2_X + (1-\\alpha)^2\\sigma^2_Y + 2\\alpha(1 - \\alpha)\\sigma_{XY} \\\\ &= \\alpha^2\\sigma^2_X + \\sigma^2_Y - 2\\alpha\\sigma^2_Y + \\alpha^2\\sigma^2_Y + 2\\alpha\\sigma_{XY} - 2\\alpha^2\\sigma_{XY} \\end{align}\\] Now we want to find when the rate of change of this function is 0 with respect to \\(\\alpha\\), so we compute the partial derivative, set to 0 and solve. \\[ \\frac{\\partial}{\\partial{\\alpha}} = 2\\alpha\\sigma^2_X - 2\\sigma^2_Y + 2\\alpha\\sigma^2_Y + 2\\sigma_{XY} - 4\\alpha\\sigma_{XY} = 0 \\] Moving \\(\\alpha\\) terms to the same side: \\[ \\alpha\\sigma^2_X + \\alpha\\sigma^2_Y - 2\\alpha\\sigma_{XY} = \\sigma^2_Y - \\sigma_{XY} \\] \\[ \\alpha = \\frac{\\sigma^2_Y - \\sigma_{XY}}{\\sigma^2_X + \\sigma^2_Y - 2\\sigma_{XY}} \\] We should also show that this is a minimum, so that the second partial derivative wrt \\(\\alpha\\) is \\(>= 0\\). \\[\\begin{align} \\frac{\\partial^2}{\\partial{\\alpha^2}} &= 2\\sigma^2_X + 2\\sigma^2_Y - 4\\sigma_{XY} \\\\ &= 2(\\sigma^2_X + \\sigma^2_Y - 2\\sigma_{XY}) \\\\ &= 2\\mathrm{Var}(X - Y) \\end{align}\\] Since variance is positive, then this must be positive. 5.1.2 Question 2 We will now derive the probability that a given observation is part of a bootstrap sample. Suppose that we obtain a bootstrap sample from a set of n observations. What is the probability that the first bootstrap observation is not the \\(j\\)th observation from the original sample? Justify your answer. This is 1 - probability that it is the \\(j\\)th = \\(1 - 1/n\\). What is the probability that the second bootstrap observation is not the \\(j\\)th observation from the original sample? Since each bootstrap observation is a random sample, this probability is the same (\\(1 - 1/n\\)). Argue that the probability that the \\(j\\)th observation is not in the bootstrap sample is \\((1 - 1/n)^n\\). For the \\(j\\)th observation to not be in the sample, it would have to not be picked for each of \\(n\\) positions, so not picked for \\(1, 2, ..., n\\), thus the probability is \\((1 - 1/n)^n\\) When \\(n = 5\\), what is the probability that the \\(j\\)th observation is in the bootstrap sample? n <- 5 1 - (1 - 1 / n)^n ## [1] 0.67232 \\(p = 0.67\\) When \\(n = 100\\), what is the probability that the \\(j\\)th observation is in the bootstrap sample? n <- 100 1 - (1 - 1 / n)^n ## [1] 0.6339677 \\(p = 0.64\\) When \\(n = 10,000\\), what is the probability that the \\(j\\)th observation is in the bootstrap sample? n <- 100000 1 - (1 - 1 / n)^n ## [1] 0.6321224 \\(p = 0.63\\) Create a plot that displays, for each integer value of \\(n\\) from 1 to 100,000, the probability that the \\(j\\)th observation is in the bootstrap sample. Comment on what you observe. x <- sapply(1:100000, function(n) 1 - (1 - 1 / n)^n) plot(x, log = "x", type = "o") The probability rapidly approaches 0.63 with increasing \\(n\\). Note that \\[e^x = \\lim_{x \\to \\inf} \\left(1 + \\frac{x}{n}\\right)^n,\\] so with \\(x = -1\\), we can see that our limit is \\(1 - e^{-1} = 1 - 1/e\\). We will now investigate numerically the probability that a bootstrap sample of size \\(n = 100\\) contains the \\(j\\)th observation. Here \\(j = 4\\). We repeatedly create bootstrap samples, and each time we record whether or not the fourth observation is contained in the bootstrap sample. > store <- rep (NA, 10000) > for (i in 1:10000) { store[i] <- sum(sample(1:100, rep = TRUE) == 4) > 0 } > mean(store) Comment on the results obtained. store <- replicate(10000, sum(sample(1:100, replace = TRUE) == 4) > 0) mean(store) ## [1] 0.6424 The probability of including \\(4\\) when resampling numbers \\(1...100\\) is close to \\(1 - (1 - 1/100)^{100}\\). 5.1.3 Question 3 We now review \\(k\\)-fold cross-validation. Explain how \\(k\\)-fold cross-validation is implemented. We divided our data into (approximately equal) \\(k\\) subsets, and then generate predictions for each \\(k\\)th set, training on the exclusive \\(k\\) sets combined. What are the advantages and disadvantages of \\(k\\)-fold cross-validation relative to: The validation set approach? LOOCV? When using a validation set, we can only train on a small portion of the data as we must reserve the rest for validation. As a result it can overestimate the test error rate (assuming we then train using the complete data for future prediction). It is also sensitive to which observations are including in train vs. test. It is, however, low cost in terms of processing time (as we only have to fit one model). When using LOOCV, we can train on \\(n-1\\) observations, however, the trained models we generate each differ only by the inclusion (and exclusion) of a single observation. As a result, LOOCV can have high variance (the models fit will be similar, and might be quite different to what we would obtain with a different data set). LOOCV is also costly in terms of processing time. 5.1.4 Question 4 Suppose that we use some statistical learning method to make a prediction for the response \\(Y\\) for a particular value of the predictor \\(X\\). Carefully describe how we might estimate the standard deviation of our prediction. We could address this with bootstrapping. Our procedure would be to (jointly) resample \\(Y\\) and \\(X\\) variables and fit our model many times. For each model we could obtain a summary of our prediction and calculate the standard deviation over bootstrapped samples. 5.2 Applied 5.2.1 Question 5 In Chapter 4, we used logistic regression to predict the probability of default using income and balance on the Default data set. We will now estimate the test error of this logistic regression model using the validation set approach. Do not forget to set a random seed before beginning your analysis. Fit a logistic regression model that uses income and balance to predict default. library(ISLR2) set.seed(42) fit <- glm(default ~ income + balance, data = Default, family = "binomial") Using the validation set approach, estimate the test error of this model. In order to do this, you must perform the following steps: Split the sample set into a training set and a validation set. Fit a multiple logistic regression model using only the training observations. Obtain a prediction of default status for each individual in the validation set by computing the posterior probability of default for that individual, and classifying the individual to the default category if the posterior probability is greater than 0.5. Compute the validation set error, which is the fraction of the observations in the validation set that are misclassified. train <- sample(nrow(Default), nrow(Default) / 2) fit <- glm(default ~ income + balance, data = Default, family = "binomial", subset = train) pred <- ifelse(predict(fit, newdata = Default[-train, ], type = "response") > 0.5, "Yes", "No") table(pred, Default$default[-train]) ## ## pred No Yes ## No 4817 110 ## Yes 20 53 mean(pred != Default$default[-train]) ## [1] 0.026 Repeat the process in (b) three times, using three different splits of the observations into a training set and a validation set. Comment on the results obtained. replicate(3, { train <- sample(nrow(Default), nrow(Default) / 2) fit <- glm(default ~ income + balance, data = Default, family = "binomial", subset = train) pred <- ifelse(predict(fit, newdata = Default[-train, ], type = "response") > 0.5, "Yes", "No") mean(pred != Default$default[-train]) }) ## [1] 0.0260 0.0294 0.0258 The results obtained are variable and depend on the samples allocated to training vs. test. Now consider a logistic regression model that predicts the probability of default using income, balance, and a dummy variable for student. Estimate the test error for this model using the validation set approach. Comment on whether or not including a dummy variable for student leads to a reduction in the test error rate. replicate(3, { train <- sample(nrow(Default), nrow(Default) / 2) fit <- glm(default ~ income + balance + student, data = Default, family = "binomial", subset = train) pred <- ifelse(predict(fit, newdata = Default[-train, ], type = "response") > 0.5, "Yes", "No") mean(pred != Default$default[-train]) }) ## [1] 0.0278 0.0256 0.0250 Including student does not seem to make a substantial improvement to the test error. 5.2.2 Question 6 We continue to consider the use of a logistic regression model to predict the probability of default using income and balance on the Default data set. In particular, we will now compute estimates for the standard errors of the income and balance logistic regression coefficients in two different ways: (1) using the bootstrap, and (2) using the standard formula for computing the standard errors in the glm() function. Do not forget to set a random seed before beginning your analysis. Using the summary() and glm() functions, determine the estimated standard errors for the coefficients associated with income and balance in a multiple logistic regression model that uses both predictors. fit <- glm(default ~ income + balance, data = Default, family = "binomial") summary(fit) ## ## Call: ## glm(formula = default ~ income + balance, family = "binomial", ## data = Default) ## ## Coefficients: ## Estimate Std. Error z value Pr(>|z|) ## (Intercept) -1.154e+01 4.348e-01 -26.545 < 2e-16 *** ## income 2.081e-05 4.985e-06 4.174 2.99e-05 *** ## balance 5.647e-03 2.274e-04 24.836 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for binomial family taken to be 1) ## ## Null deviance: 2920.6 on 9999 degrees of freedom ## Residual deviance: 1579.0 on 9997 degrees of freedom ## AIC: 1585 ## ## Number of Fisher Scoring iterations: 8 The standard errors obtained by bootstrapping are \\(\\beta_1\\) = 5.0e-6 and \\(\\beta_2\\) = 2.3e-4. Write a function, boot.fn(), that takes as input the Default data set as well as an index of the observations, and that outputs the coefficient estimates for income and balance in the multiple logistic regression model. boot.fn <- function(x, i) { fit <- glm(default ~ income + balance, data = x[i, ], family = "binomial") coef(fit)[-1] } Use the boot() function together with your boot.fn() function to estimate the standard errors of the logistic regression coefficients for income and balance. library(boot) set.seed(42) boot(Default, boot.fn, R = 1000) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Default, statistic = boot.fn, R = 1000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 2.080898e-05 2.737444e-08 5.073444e-06 ## t2* 5.647103e-03 1.176249e-05 2.299133e-04 Comment on the estimated standard errors obtained using the glm() function and using your bootstrap function. The standard errors obtained by bootstrapping are similar to those estimated by glm. 5.2.3 Question 7 In Sections 5.3.2 and 5.3.3, we saw that the cv.glm() function can be used in order to compute the LOOCV test error estimate. Alternatively, one could compute those quantities using just the glm() and predict.glm() functions, and a for loop. You will now take this approach in order to compute the LOOCV error for a simple logistic regression model on the Weekly data set. Recall that in the context of classification problems, the LOOCV error is given in (5.4). Fit a logistic regression model that predicts Direction using Lag1 and Lag2. fit <- glm(Direction ~ Lag1 + Lag2, data = Weekly, family = "binomial") Fit a logistic regression model that predicts Direction using Lag1 and Lag2 using all but the first observation. fit <- glm(Direction ~ Lag1 + Lag2, data = Weekly[-1, ], family = "binomial") Use the model from (b) to predict the direction of the first observation. You can do this by predicting that the first observation will go up if \\(P(\\)Direction=\"Up\" | Lag1 , Lag2\\() > 0.5\\). Was this observation correctly classified? predict(fit, newdata = Weekly[1, , drop = FALSE], type = "response") > 0.5 ## 1 ## TRUE Yes the observation was correctly classified. Write a for loop from \\(i = 1\\) to \\(i = n\\), where \\(n\\) is the number of observations in the data set, that performs each of the following steps: Fit a logistic regression model using all but the \\(i\\)th observation to predict Direction using Lag1 and Lag2 . Compute the posterior probability of the market moving up for the \\(i\\)th observation. Use the posterior probability for the \\(i\\)th observation in order to predict whether or not the market moves up. Determine whether or not an error was made in predicting the direction for the \\(i\\)th observation. If an error was made, then indicate this as a 1, and otherwise indicate it as a 0. error <- numeric(nrow(Weekly)) for (i in 1:nrow(Weekly)) { fit <- glm(Direction ~ Lag1 + Lag2, data = Weekly[-i, ], family = "binomial") p <- predict(fit, newdata = Weekly[i, , drop = FALSE], type = "response") > 0.5 error[i] <- ifelse(p, "Down", "Up") == Weekly$Direction[i] } Take the average of the \\(n\\) numbers obtained in (d) in order to obtain the LOOCV estimate for the test error. Comment on the results. mean(error) ## [1] 0.4499541 The LOOCV test error rate is 45% which implies that our predictions are marginally more often correct than not. 5.2.4 Question 8 We will now perform cross-validation on a simulated data set. Generate a simulated data set as follows: > set.seed(1) > x <- rnorm(100) > y <- x - 2 *x^2 + rnorm(100) In this data set, what is \\(n\\) and what is \\(p\\)? Write out the model used to generate the data in equation form. set.seed(1) x <- rnorm(100) y <- x - 2 * x^2 + rnorm(100) \\(n\\) is 100 and \\(p\\) is 1 (there are 100 observations and \\(y\\) is predicted with a single variable \\(x\\)). The model equation is: \\[y = -2x^2 + x + \\epsilon\\]. Create a scatterplot of \\(X\\) against \\(Y\\). Comment on what you find. plot(x, y) \\(y\\) has a (negative) quadratic relationship with \\(x\\). Set a random seed, and then compute the LOOCV errors that result from fitting the following four models using least squares: \\(Y = \\beta_0 + \\beta_1 X + \\epsilon\\) \\(Y = \\beta_0 + \\beta_1 X + \\beta_2 X^2 + \\epsilon\\) \\(Y = \\beta_0 + \\beta_1 X + \\beta_2 X^2 + \\beta_3 X^3 + \\epsilon\\) \\(Y = \\beta_0 + \\beta_1 X + \\beta_2 X^2 + \\beta_3 X^3 + \\beta_4 X^4 + \\epsilon\\). Note you may find it helpful to use the data.frame() function to create a single data set containing both \\(X\\) and \\(Y\\). library(boot) set.seed(42) dat <- data.frame(x, y) sapply(1:4, function(i) cv.glm(dat, glm(y ~ poly(x, i)))$delta[1]) ## [1] 7.2881616 0.9374236 0.9566218 0.9539049 Repeat (c) using another random seed, and report your results. Are your results the same as what you got in (c)? Why? set.seed(43) dat <- data.frame(x, y) sapply(1:4, function(i) cv.glm(dat, glm(y ~ poly(x, i)))$delta[1]) ## [1] 7.2881616 0.9374236 0.9566218 0.9539049 The results are the same because we are using LOOCV. When doing this, the model is fit leaving each one of the observations out in turn, and thus there is no stochasticity involved. Which of the models in (c) had the smallest LOOCV error? Is this what you expected? Explain your answer. The second model had the smallest LOOCV. This what would be expected since the model to generate the data was quadratic and we are measuring the test (rather than training) error rate to evaluate performance. Comment on the statistical significance of the coefficient estimates that results from fitting each of the models in (c) using least squares. Do these results agree with the conclusions drawn based on the cross-validation results? for (i in 1:4) printCoefmat(coef(summary(glm(y ~ poly(x, i), data = dat)))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.55002 0.26001 -5.9613 3.954e-08 *** ## poly(x, i) 6.18883 2.60014 2.3802 0.01924 * ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.550023 0.095803 -16.1792 < 2.2e-16 *** ## poly(x, i)1 6.188826 0.958032 6.4599 4.185e-09 *** ## poly(x, i)2 -23.948305 0.958032 -24.9974 < 2.2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.550023 0.096263 -16.1019 < 2.2e-16 *** ## poly(x, i)1 6.188826 0.962632 6.4291 4.972e-09 *** ## poly(x, i)2 -23.948305 0.962632 -24.8779 < 2.2e-16 *** ## poly(x, i)3 0.264106 0.962632 0.2744 0.7844 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.550023 0.095905 -16.1620 < 2.2e-16 *** ## poly(x, i)1 6.188826 0.959051 6.4531 4.591e-09 *** ## poly(x, i)2 -23.948305 0.959051 -24.9708 < 2.2e-16 *** ## poly(x, i)3 0.264106 0.959051 0.2754 0.7836 ## poly(x, i)4 1.257095 0.959051 1.3108 0.1931 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 We can see that the coefficients in the first model are not highly significant, but all terms (\\(\\beta_0, \\beta_1\\) and \\(\\beta_2\\)) are in the quadratic model. After this, subsequent \\(\\beta_n\\) terms are not significant. Therefore, these results agree with those from cross-validation. 5.2.5 Question 9 We will now consider the Boston housing data set, from the ISLR2 library. Based on this data set, provide an estimate for the population mean of medv. Call this estimate \\(\\hat\\mu\\). (mu <- mean(Boston$medv)) ## [1] 22.53281 Provide an estimate of the standard error of \\(\\hat\\mu\\). Interpret this result. Hint: We can compute the standard error of the sample mean by dividing the sample standard deviation by the square root of the number of observations. sd(Boston$medv) / sqrt(length(Boston$medv)) ## [1] 0.4088611 Now estimate the standard error of \\(\\hat\\mu\\) using the bootstrap. How does this compare to your answer from (b)? set.seed(42) (bs <- boot(Boston$medv, function(v, i) mean(v[i]), 10000)) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Boston$medv, statistic = function(v, i) mean(v[i]), ## R = 10000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 22.53281 0.002175751 0.4029139 The standard error using the bootstrap (0.403) is very close to that obtained from the formula above (0.409). Based on your bootstrap estimate from (c), provide a 95% confidence interval for the mean of medv. Compare it to the results obtained using t.test(Boston$medv). Hint: You can approximate a 95% confidence interval using the formula \\([\\hat\\mu - 2SE(\\hat\\mu), \\hat\\mu + 2SE(\\hat\\mu)].\\) se <- sd(bs$t) c(mu - 2 * se, mu + 2 * se) ## [1] 21.72698 23.33863 Based on this data set, provide an estimate, \\(\\hat\\mu_{med}\\), for the median value of medv in the population. median(Boston$medv) ## [1] 21.2 We now would like to estimate the standard error of \\(\\hat\\mu_{med}\\). Unfortunately, there is no simple formula for computing the standard error of the median. Instead, estimate the standard error of the median using the bootstrap. Comment on your findings. set.seed(42) boot(Boston$medv, function(v, i) median(v[i]), 10000) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Boston$medv, statistic = function(v, i) median(v[i]), ## R = 10000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 21.2 -0.01331 0.3744634 The estimated standard error of the median is 0.374. This is lower than the standard error of the mean. Based on this data set, provide an estimate for the tenth percentile of medv in Boston census tracts. Call this quantity \\(\\hat\\mu_{0.1}\\). (You can use the quantile() function.) quantile(Boston$medv, 0.1) ## 10% ## 12.75 Use the bootstrap to estimate the standard error of \\(\\hat\\mu_{0.1}\\). Comment on your findings. set.seed(42) boot(Boston$medv, function(v, i) quantile(v[i], 0.1), 10000) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Boston$medv, statistic = function(v, i) quantile(v[i], ## 0.1), R = 10000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 12.75 0.013405 0.497298 We get a standard error of ~0.5. This is higher than the standard error of the median. Nevertheless the standard error is quite small, thus we can be fairly confidence about the value of the 10th percentile. "],["linear-model-selection-and-regularization.html", "6 Linear Model Selection and Regularization 6.1 Conceptual 6.2 Applied", " 6 Linear Model Selection and Regularization 6.1 Conceptual 6.1.1 Question 1 We perform best subset, forward stepwise, and backward stepwise selection on a single data set. For each approach, we obtain \\(p + 1\\) models, containing \\(0, 1, 2, ..., p\\) predictors. Explain your answers: Which of the three models with \\(k\\) predictors has the smallest training RSS? Best subset considers the most models (all possible combinations of \\(p\\) predictors are considered), therefore this will give the smallest training RSS (it will at least consider all possibilities covered by forward and backward stepwise selection). However, all three approaches are expected to give similar if not identical results in practice. Which of the three models with \\(k\\) predictors has the smallest test RSS? We cannot tell which model will perform best on the test RSS. The answer will depend on the tradeoff between fitting to the data and overfitting. True or False: The predictors in the \\(k\\)-variable model identified by forward stepwise are a subset of the predictors in the (\\(k+1\\))-variable model identified by forward stepwise selection. True. Forward stepwise selection retains all features identified in previous models as \\(k\\) is increased. The predictors in the \\(k\\)-variable model identified by backward stepwise are a subset of the predictors in the \\((k+1)\\)-variable model identified by backward stepwise selection. True. Backward stepwise selection removes features one by one as \\(k\\) is decreased. The predictors in the \\(k\\)-variable model identified by backward stepwise are a subset of the predictors in the \\((k+1)\\)-variable model identified by forward stepwise selection. False. Forward and backward stepwise selection can identify different combinations of variables due to differing algorithms. The predictors in the \\(k\\)-variable model identified by forward stepwise are a subset of the predictors in the \\((k+1)\\)-variable model identified by backward stepwise selection. False. Forward and backward stepwise selection can identify different combinations of variables due to differing algorithms. The predictors in the \\(k\\)-variable model identified by best subset are a subset of the predictors in the \\((k+1)\\)-variable model identified by best subset selection. False. Best subset selection can identify different combinations of variables for each \\(k\\) by considering all possible models. 6.1.2 Question 2 For parts (a) through (c), indicate which of i. through iv. is correct. Justify your answer. The lasso, relative to least squares, is: More flexible and hence will give improved prediction accuracy when its increase in bias is less than its decrease in variance. More flexible and hence will give improved prediction accuracy when its increase in variance is less than its decrease in bias. Less flexible and hence will give improved prediction accuracy when its increase in bias is less than its decrease in variance. Less flexible and hence will give improved prediction accuracy when its increase in variance is less than its decrease in bias. By using shrinkage, lasso can reduce the number of predictors so is less flexible. As a result, it will lead to an increase in bias by approximating the true relationship. We hope that this increase is small but that we dramatically reduce variance (i.e. the difference we would see in the model fit between different sets of training data). Repeat (a) for ridge regression relative to least squares. The same is true of ridge regression—shrinkage results in a less flexible model and can reduce variance. Repeat (a) for non-linear methods relative to least squares. Non-linear methods can be more flexible. They can perform better as long as they don’t substantially increase variance. 6.1.3 Question 3 Suppose we estimate the regression coefficients in a linear regression model by minimizing: \\[ \\sum_{i=1}^n\\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 \\textrm{subject to} \\sum_{j=1}^p|\\beta_j| \\le s \\] for a particular value of \\(s\\). For parts (a) through (e), indicate which of i. through v. is correct. Justify your answer. As we increase \\(s\\) from 0, the training RSS will: Increase initially, and then eventually start decreasing in an inverted U shape. Decrease initially, and then eventually start increasing in a U shape. Steadily increase. Steadily decrease. Remain constant. As \\(s\\) increases, the model becomes more flexible (the sum of absolute coefficients can be higher). With more flexible models, training RSS will always decrease. Repeat (a) for test RSS. With more flexible models, test RSS will decrease (as the fit improves) and will then increase due to overfitting (high variance). Repeat (a) for variance. As \\(s\\) increases, the model becomes more flexible so variance will increase. Repeat (a) for (squared) bias. As \\(s\\) increases, the model becomes more flexible so bias will decrease. Repeat (a) for the irreducible error. The irreducible error is unchanged. 6.1.4 Question 4 Suppose we estimate the regression coefficients in a linear regression model by minimizing \\[ \\sum_{i=1}^n \\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 + \\lambda\\sum_{j=1}^p\\beta_j^2 \\] for a particular value of \\(\\lambda\\). For parts (a) through (e), indicate which of i. through v. is correct. Justify your answer. As we increase \\(\\lambda\\) from 0, the training RSS will: Increase initially, and then eventually start decreasing in an inverted U shape. Decrease initially, and then eventually start increasing in a U shape. Steadily increase. Steadily decrease. Remain constant. As \\(\\lambda\\) is increased, more weight is placed on the sum of squared coefficients and so the model becomes less flexible. As a result, training RSS must increase. Repeat (a) for test RSS. As \\(\\lambda\\) increases, flexibility decreases so test RSS will decrease (variance decreases) but will then increase (as bias increases). Repeat (a) for variance. Steadily decrease. Repeat (a) for (squared) bias. Steadily increase. Repeat (a) for the irreducible error. The irreducible error is unchanged. 6.1.5 Question 5 It is well-known that ridge regression tends to give similar coefficient values to correlated variables, whereas the lasso may give quite different coefficient values to correlated variables. We will now explore this property in a very simple setting. Suppose that \\(n = 2, p = 2, x_{11} = x_{12}, x_{21} = x_{22}\\). Furthermore, suppose that \\(y_1 + y_2 =0\\) and \\(x_{11} + x_{21} = 0\\) and \\(x_{12} + x_{22} = 0\\), so that the estimate for the intercept in a least squares, ridge regression, or lasso model is zero: \\(\\hat{\\beta}_0 = 0\\). Write out the ridge regression optimization problem in this setting. We are trying to minimize: \\[ \\sum_{i=1}^n \\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 + \\lambda\\sum_{j=1}^p\\beta_j^2 \\] We can ignore \\(\\beta_0\\) and can expand the sums since there’s only two terms. Additionally, we can define \\(x_1 = x_{11} = x_{12}\\) and \\(x_2 = x_{21} = x_{22}\\). We then need to minimize \\[\\begin{align} f = & (y_1 - \\beta_1x_1 - \\beta_2x_1)^2 + (y_2 - \\beta_1x_2 - \\beta_2x_2)^2 + \\lambda\\beta_1^2 + \\lambda\\beta_2^2 \\\\ f = & y_1^2 - 2y_1\\beta_1x_1 - 2y_1\\beta_2x_1 + \\beta_1^2x_1^2 + 2\\beta_1\\beta_2x_1^2 + \\beta_2^2x_1^2 + \\\\ & y_2^2 - 2y_2\\beta_1x_2 - 2y_2\\beta_2x_2 + \\beta_1^2x_2^2 + 2\\beta_1\\beta_2x_2^2 + \\beta_2^2x_2^2 + \\\\ & \\lambda\\beta_1^2 + \\lambda\\beta_2^2 \\\\ \\end{align}\\] Argue that in this setting, the ridge coefficient estimates satisfy \\(\\hat{\\beta}_1 = \\hat{\\beta}_2\\) We can find when the above is minimized with respect to each of \\(\\beta_1\\) and \\(\\beta_2\\) by partial differentiation. \\[ \\frac{\\partial}{\\partial{\\beta_1}} = - 2y_1x_1 + 2\\beta_1x_1^2 + 2\\beta_2x_1^2 - 2y_2x_2 + 2\\beta_1x_2^2 + 2\\beta_2x_2^2 + 2\\lambda\\beta_1 \\] \\[ \\frac{\\partial}{\\partial{\\beta_2}} = - 2y_1x_1 + 2\\beta_1x_1^2 + 2\\beta_2x_1^2 - 2y_2x_2 + 2\\beta_1x_2^2 + 2\\beta_2x_2^2 + 2\\lambda\\beta_2 \\] A minimum can be found when these are set to 0. \\[ \\lambda\\beta_1 = y_1x_1 + y_2x_2 - \\beta_1x_1^2 - \\beta_2x_1^2 - \\beta_1x_2^2 - \\beta_2x_2^2 \\\\ \\lambda\\beta_2 = y_1x_1 + y_2x_2 - \\beta_1x_1^2 - \\beta_2x_1^2 - \\beta_1x_2^2 - \\beta_2x_2^2 \\] Therefore \\(\\lambda\\beta_1 = \\lambda\\beta_2\\) and \\(\\beta_1 = \\beta_2\\), thus there is only one solution, that is when the coefficients are the same. Write out the lasso optimization problem in this setting. We are trying to minimize: \\[ \\sum_{i=1}^n \\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 + \\lambda\\sum_{j=1}^p |\\beta_j| \\] As above (and defining \\(x_1 = x_{11} = x_{12}\\) and \\(x_2 = x_{21} = x_{22}\\)) we simplify to \\[ (y_1 - \\beta_1x_1 - \\beta_2x_1)^2 + (y_2 - \\beta_1x_2 - \\beta_2x_2)^2 + \\lambda|\\beta_1| + \\lambda|\\beta_2| \\] Argue that in this setting, the lasso coefficients \\(\\hat{\\beta}_1\\) and \\(\\hat{\\beta}_2\\) are not unique—in other words, there are many possible solutions to the optimization problem in (c). Describe these solutions. We will consider the alternate form of the lasso optimization problem \\[ (y_1 - \\hat{\\beta_1}x_1 - \\hat{\\beta_2}x_1)^2 + (y_2 - \\hat{\\beta_1}x_2 - \\hat{\\beta_2}x_2)^2 \\quad \\text{subject to} \\quad |\\hat{\\beta_1}| + |\\hat{\\beta_2}| \\le s \\] Since \\(x_1 + x_2 = 0\\) and \\(y_1 + y_2 = 0\\), this is equivalent to minimising \\(2(y_1 - (\\hat{\\beta_1} + \\hat{\\beta_2})x_1)^2\\) which has a solution when \\(\\hat{\\beta_1} + \\hat{\\beta_2} = y_1/x_1\\). Geometrically, this is a \\(45^\\circ\\) backwards sloping line in the (\\(\\hat{\\beta_1}\\), \\(\\hat{\\beta_2}\\)) plane. The constraints \\(|\\hat{\\beta_1}| + |\\hat{\\beta_2}| \\le s\\) specify a diamond shape in the same place, also with lines that are at \\(45^\\circ\\) centered at the origin and which intersect the axes at a distance \\(s\\) from the origin. Thus, points along two edges of the diamond (\\(\\hat{\\beta_1} + \\hat{\\beta_2} = s\\) and \\(\\hat{\\beta_1} + \\hat{\\beta_2} = -s\\)) become solutions to the lasso optimization problem. 6.1.6 Question 6 We will now explore (6.12) and (6.13) further. Consider (6.12) with \\(p = 1\\). For some choice of \\(y_1\\) and \\(\\lambda > 0\\), plot (6.12) as a function of \\(\\beta_1\\). Your plot should confirm that (6.12) is solved by (6.14). Equation 6.12 is: \\[ \\sum_{j=1}^p(y_j - \\beta_j)^2 + \\lambda\\sum_{j=1}^p\\beta_j^2 \\] Equation 6.14 is: \\[ \\hat{\\beta}_j^R = y_j/(1 + \\lambda) \\] where \\(\\hat{\\beta}_j^R\\) is the ridge regression estimate. lambda <- 0.7 y <- 1.4 fn <- function(beta) { (y - beta)^2 + lambda * beta^2 } plot(seq(0, 2, 0.01), fn(seq(0, 2, 0.01)), type = "l", xlab = "beta", ylab = "6.12") abline(v = y / (1 + lambda), lty = 2) Consider (6.13) with \\(p = 1\\). For some choice of \\(y_1\\) and \\(\\lambda > 0\\), plot (6.13) as a function of \\(\\beta_1\\). Your plot should confirm that (6.13) is solved by (6.15). Equation 6.13 is: \\[ \\sum_{j=1}^p(y_j - \\beta_j)^2 + \\lambda\\sum_{j=1}^p|\\beta_j| \\] Equation 6.15 is: \\[ \\hat{\\beta}_j^L = \\begin{cases} y_j - \\lambda/2 &\\mbox{if } y_j > \\lambda/2; \\\\ y_j + \\lambda/2 &\\mbox{if } y_j < -\\lambda/2; \\\\ 0 &\\mbox{if } |y_j| \\le \\lambda/2; \\end{cases} \\] For \\(\\lambda = 0.7\\) and \\(y = 1.4\\), the top case applies. lambda <- 0.7 y <- 1.4 fn <- function(beta) { (y - beta)^2 + lambda * abs(beta) } plot(seq(0, 2, 0.01), fn(seq(0, 2, 0.01)), type = "l", xlab = "beta", ylab = "6.12") abline(v = y - lambda / 2, lty = 2) 6.1.7 Question 7 We will now derive the Bayesian connection to the lasso and ridge regression discussed in Section 6.2.2. Suppose that \\(y_i = \\beta_0 + \\sum_{j=1}^p x_{ij}\\beta_j + \\epsilon_i\\) where \\(\\epsilon_1, ..., \\epsilon_n\\) are independent and identically distributed from a \\(N(0, \\sigma^2)\\) distribution. Write out the likelihood for the data. \\[\\begin{align*} \\mathcal{L} &= \\prod_i^n \\mathcal{N}(0, \\sigma^2) \\\\ &= \\prod_i^n \\frac{1}{\\sqrt{2\\pi\\sigma}}\\exp\\left(-\\frac{\\epsilon_i^2}{2\\sigma^2}\\right) \\\\ &= \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2\\right) \\end{align*}\\] Assume the following prior for \\(\\beta\\): \\(\\beta_1, ..., \\beta_p\\) are independent and identically distributed according to a double-exponential distribution with mean 0 and common scale parameter b: i.e. \\(p(\\beta) = \\frac{1}{2b}\\exp(-|\\beta|/b)\\). Write out the posterior for \\(\\beta\\) in this setting. The posterior can be calculated by multiplying the prior and likelihood (up to a proportionality constant). \\[\\begin{align*} p(\\beta|X,Y) &\\propto \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2\\right) \\prod_j^p\\frac{1}{2b}\\exp\\left(-\\frac{|\\beta_j|}{b}\\right) \\\\ &\\propto \\frac{1}{2b} \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 -\\sum_j^p\\frac{|\\beta_j|}{b}\\right) \\end{align*}\\] Argue that the lasso estimate is the mode for \\(\\beta\\) under this posterior distribution. Let us find the maximum of the posterior distribution (the mode). Maximizing the posterior probability is equivalent to maximizing its log which is: \\[ \\log(p(\\beta|X,Y)) \\propto \\log\\left[ \\frac{1}{2b} \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\right ] - \\left(\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\sum_j^p\\frac{|\\beta_j|}{b}\\right) \\] Since, the first term is independent of \\(\\beta\\), our solution will be when we minimize the second term. \\[\\begin{align*} \\DeclareMathOperator*{\\argmin}{arg\\,min} % Jan Hlavacek \\argmin_\\beta \\left(\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\sum_j^p\\frac{|\\beta|}{b}\\right) &= \\argmin_\\beta \\left(\\frac{1}{2\\sigma^2} \\right ) \\left( \\sum_i^n \\epsilon_i^2 +\\frac{2\\sigma^2}{b}\\sum_j^p|\\beta_j|\\right) \\\\ &= \\argmin_\\beta \\left( \\sum_i^n \\epsilon_i^2 +\\frac{2\\sigma^2}{b}\\sum_j^p|\\beta_j|\\right) \\end{align*}\\] Note, that \\(RSS = \\sum_i^n \\epsilon_i^2\\) and if we set \\(\\lambda = \\frac{2\\sigma^2}{b}\\), the mode corresponds to lasso optimization. \\[ \\argmin_\\beta RSS + \\lambda\\sum_j^p|\\beta_j| \\] Now assume the following prior for \\(\\beta\\): \\(\\beta_1, ..., \\beta_p\\) are independent and identically distributed according to a normal distribution with mean zero and variance \\(c\\). Write out the posterior for \\(\\beta\\) in this setting. The posterior is now: \\[\\begin{align*} p(\\beta|X,Y) &\\propto \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2\\right) \\prod_j^p\\frac{1}{\\sqrt{2\\pi c}}\\exp\\left(-\\frac{\\beta_j^2}{2c}\\right) \\\\ &\\propto \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\left(\\frac{1}{\\sqrt{2\\pi c}}\\right)^p \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 - \\frac{1}{2c}\\sum_j^p\\beta_j^2\\right) \\end{align*}\\] Argue that the ridge regression estimate is both the mode and the mean for \\(\\beta\\) under this posterior distribution. To show that the ridge estimate is the mode we can again find the maximum by maximizing the log of the posterior. The log is \\[ \\log{p(\\beta|X,Y)} \\propto \\log{\\left[\\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\left(\\frac{1}{\\sqrt{2\\pi c}}\\right)^p \\right ]} - \\left(\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\frac{1}{2c}\\sum_j^p\\beta_j^2 \\right) \\] We can maximize (wrt \\(\\beta\\)) by ignoring the first term and minimizing the second term. i.e. we minimize: \\[ \\argmin_\\beta \\left( \\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\frac{1}{2c}\\sum_j^p\\beta_j^2 \\right)\\\\ = \\argmin_\\beta \\left( \\frac{1}{2\\sigma^2} \\left( \\sum_i^n \\epsilon_i^2 + \\frac{\\sigma^2}{c}\\sum_j^p\\beta_j^2 \\right) \\right) \\] As above, if \\(RSS = \\sum_i^n \\epsilon_i^2\\) and if we set \\(\\lambda = \\frac{\\sigma^2}{c}\\), we can see that the mode corresponds to ridge optimization. 6.2 Applied 6.2.1 Question 8 In this exercise, we will generate simulated data, and will then use this data to perform best subset selection. Use the rnorm() function to generate a predictor \\(X\\) of length \\(n = 100\\), as well as a noise vector \\(\\epsilon\\) of length \\(n = 100\\). library(ISLR2) library(glmnet) library(leaps) library(pls) set.seed(42) x <- rnorm(100) ep <- rnorm(100) Generate a response vector \\(Y\\) of length \\(n = 100\\) according to the model \\[Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\epsilon,\\] where \\(\\beta_0, \\beta_1, \\beta_2,\\) and \\(\\beta_3\\) are constants of your choice. y <- 2 + 3 * x - 2 * x^2 + 0.5 * x^3 + ep Use the regsubsets() function to perform best subset selection in order to choose the best model containing the predictors \\(X, X^2, ..., X^{10}\\). What is the best model obtained according to \\(C_p\\), BIC, and adjusted \\(R^2\\)? Show some plots to provide evidence for your answer, and report the coefficients of the best model obtained. Note you will need to use the data.frame() function to create a single data set containing both \\(X\\) and \\(Y\\). dat <- data.frame(x, y) summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat)) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat) ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: exhaustive ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) " " " " ## 2 ( 1 ) "*" "*" ## 3 ( 1 ) "*" "*" ## 4 ( 1 ) "*" "*" ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) " " "*" ## 7 ( 1 ) " " " " ## 8 ( 1 ) "*" " " ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" " " Repeat (c), using forward stepwise selection and also using backwards stepwise selection. How does your answer compare to the results in (c)? summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat, method = "forward")) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat, method = "forward") ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: forward ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " "*" ## 3 ( 1 ) "*" "*" ## 4 ( 1 ) "*" "*" ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) "*" " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" " " ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" " " ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" "*" summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat, method = "backward")) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat, method = "backward") ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: backward ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) "*" "*" ## 3 ( 1 ) "*" "*" ## 4 ( 1 ) "*" "*" ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) " " " " ## 7 ( 1 ) " " " " ## 8 ( 1 ) " " " " ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" "*" Now fit a lasso model to the simulated data, again using \\(X, X^2, ..., X^{10}\\) as predictors. Use cross-validation to select the optimal value of \\(\\lambda\\). Create plots of the cross-validation error as a function of \\(\\lambda\\). Report the resulting coefficient estimates, and discuss the results obtained. res <- cv.glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1) (best <- res$lambda.min) ## [1] 0.09804219 plot(res) out <- glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1, lambda = res$lambda.min) predict(out, type = "coefficients", s = best) ## 11 x 1 sparse Matrix of class "dgCMatrix" ## s1 ## (Intercept) 1.8457308 ## 1 2.9092918 ## 2 -1.9287428 ## 3 0.5161012 ## 4 . ## 5 . ## 6 . ## 7 . ## 8 . ## 9 . ## 10 . When fitting lasso, the model that minimizes MSE uses three predictors (as per the simulation). The coefficients estimated (2.9, -1.9 and 0.5) are similar to those used in the simulation. Now generate a response vector \\(Y\\) according to the model \\[Y = \\beta_0 + \\beta_7X^7 + \\epsilon,\\] and perform best subset selection and the lasso. Discuss the results obtained. dat$y <- 2 - 2 * x^2 + 0.2 * x^7 + ep summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat)) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat) ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: exhaustive ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " "*" ## 3 ( 1 ) " " "*" ## 4 ( 1 ) " " "*" ## 5 ( 1 ) " " "*" ## 6 ( 1 ) " " "*" ## 7 ( 1 ) " " "*" ## 8 ( 1 ) " " "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) "*" " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) " " "*" ## 7 ( 1 ) " " "*" ## 8 ( 1 ) " " "*" ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" res <- cv.glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1) (best <- res$lambda.min) ## [1] 1.126906 plot(res) out <- glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1, lambda = best) predict(out, type = "coefficients", s = best) ## 11 x 1 sparse Matrix of class "dgCMatrix" ## s1 ## (Intercept) 1.061389580 ## 1 . ## 2 -0.883080980 ## 3 . ## 4 -0.121018425 ## 5 0.028984084 ## 6 -0.009540039 ## 7 0.188796928 ## 8 . ## 9 . ## 10 . When fitting lasso, the model does not perfectly replicate the simulation (coefficients are retained for powers of \\(x\\) that were not simulated). 6.2.2 Question 9 In this exercise, we will predict the number of applications received using the other variables in the College data set. Split the data set into a training set and a test set. set.seed(42) train <- sample(nrow(College), nrow(College) * 2 / 3) test <- setdiff(seq_len(nrow(College)), train) mse <- list() Fit a linear model using least squares on the training set, and report the test error obtained. fit <- lm(Apps ~ ., data = College[train, ]) (mse$lm <- mean((predict(fit, College[test, ]) - College$Apps[test])^2)) ## [1] 1695269 Fit a ridge regression model on the training set, with \\(\\lambda\\) chosen by cross-validation. Report the test error obtained. mm <- model.matrix(Apps ~ ., data = College[train, ]) fit2 <- cv.glmnet(mm, College$Apps[train], alpha = 0) p <- predict(fit2, model.matrix(Apps ~ ., data = College[test, ]), s = fit2$lambda.min) (mse$ridge <- mean((p - College$Apps[test])^2)) ## [1] 2804369 Fit a lasso model on the training set, with \\(\\lambda\\) chosen by cross- validation. Report the test error obtained, along with the number of non-zero coefficient estimates. mm <- model.matrix(Apps ~ ., data = College[train, ]) fit3 <- cv.glmnet(mm, College$Apps[train], alpha = 1) p <- predict(fit3, model.matrix(Apps ~ ., data = College[test, ]), s = fit3$lambda.min) (mse$lasso <- mean((p - College$Apps[test])^2)) ## [1] 1822322 Fit a PCR model on the training set, with \\(M\\) chosen by cross-validation. Report the test error obtained, along with the value of \\(M\\) selected by cross-validation. fit4 <- pcr(Apps ~ ., data = College[train, ], scale = TRUE, validation = "CV") validationplot(fit4, val.type = "MSEP") p <- predict(fit4, College[test, ], ncomp = 17) (mse$pcr <- mean((p - College$Apps[test])^2)) ## [1] 1695269 Fit a PLS model on the training set, with \\(M\\) chosen by cross-validation. Report the test error obtained, along with the value of \\(M\\) selected by cross-validation. fit5 <- plsr(Apps ~ ., data = College[train, ], scale = TRUE, validation = "CV") validationplot(fit5, val.type = "MSEP") p <- predict(fit5, College[test, ], ncomp = 12) (mse$pls <- mean((p - College$Apps[test])^2)) ## [1] 1696902 Comment on the results obtained. How accurately can we predict the number of college applications received? Is there much difference among the test errors resulting from these five approaches? barplot(unlist(mse), ylab = "Test MSE", horiz = TRUE) Ridge and lasso give the lowest test errors but the lowest is generated by the ridge regression model (in this specific case with this specific seed). 6.2.3 Question 10 We have seen that as the number of features used in a model increases, the training error will necessarily decrease, but the test error may not. We will now explore this in a simulated data set. Generate a data set with \\(p = 20\\) features, \\(n = 1,000\\) observations, and an associated quantitative response vector generated according to the model \\(Y =X\\beta + \\epsilon\\), where \\(\\beta\\) has some elements that are exactly equal to zero. set.seed(42) dat <- matrix(rnorm(1000 * 20), nrow = 1000) colnames(dat) <- paste0("b", 1:20) beta <- rep(0, 20) beta[1:4] <- c(5, 4, 2, 7) y <- colSums((t(dat) * beta)) + rnorm(1000) dat <- data.frame(dat) dat$y <- y Split your data set into a training set containing 100 observations and a test set containing 900 observations. train <- dat[1:100, ] test <- dat[101:1000, ] Perform best subset selection on the training set, and plot the training set MSE associated with the best model of each size. fit <- regsubsets(y ~ ., data = train, nvmax = 20) summary(fit) ## Subset selection object ## Call: regsubsets.formula(y ~ ., data = train, nvmax = 20) ## 20 Variables (and intercept) ## Forced in Forced out ## b1 FALSE FALSE ## b2 FALSE FALSE ## b3 FALSE FALSE ## b4 FALSE FALSE ## b5 FALSE FALSE ## b6 FALSE FALSE ## b7 FALSE FALSE ## b8 FALSE FALSE ## b9 FALSE FALSE ## b10 FALSE FALSE ## b11 FALSE FALSE ## b12 FALSE FALSE ## b13 FALSE FALSE ## b14 FALSE FALSE ## b15 FALSE FALSE ## b16 FALSE FALSE ## b17 FALSE FALSE ## b18 FALSE FALSE ## b19 FALSE FALSE ## b20 FALSE FALSE ## 1 subsets of each size up to 20 ## Selection Algorithm: exhaustive ## b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 b16 b17 ## 1 ( 1 ) " " " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 2 ( 1 ) "*" " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 3 ( 1 ) "*" "*" " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 4 ( 1 ) "*" "*" "*" "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 5 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " " " " " " " " " " " " " " " ## 6 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " " " " " " " " " " " " " "*" ## 7 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " "*" " " " " " " " " " " "*" ## 8 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " "*" " " " " " " " " "*" "*" ## 9 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" " " " " " " " " "*" "*" ## 10 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" " " " " " " " " "*" "*" ## 11 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " " " " " "*" "*" ## 12 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " " " " " "*" "*" ## 13 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " "*" " " "*" "*" ## 14 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " "*" "*" "*" "*" ## 15 ( 1 ) "*" "*" "*" "*" "*" "*" " " "*" " " " " "*" "*" " " "*" "*" "*" "*" ## 16 ( 1 ) "*" "*" "*" "*" "*" "*" " " "*" " " " " "*" "*" " " "*" "*" "*" "*" ## 17 ( 1 ) "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 18 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" "*" "*" "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" ## b18 b19 b20 ## 1 ( 1 ) " " " " " " ## 2 ( 1 ) " " " " " " ## 3 ( 1 ) " " " " " " ## 4 ( 1 ) " " " " " " ## 5 ( 1 ) " " " " " " ## 6 ( 1 ) " " " " " " ## 7 ( 1 ) " " " " " " ## 8 ( 1 ) " " " " " " ## 9 ( 1 ) " " " " " " ## 10 ( 1 ) " " " " "*" ## 11 ( 1 ) " " " " "*" ## 12 ( 1 ) "*" " " "*" ## 13 ( 1 ) "*" " " "*" ## 14 ( 1 ) "*" " " "*" ## 15 ( 1 ) "*" " " "*" ## 16 ( 1 ) "*" "*" "*" ## 17 ( 1 ) "*" "*" "*" ## 18 ( 1 ) "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" plot(summary(fit)$rss / 100, ylab = "MSE", type = "o") Plot the test set MSE associated with the best model of each size. predict.regsubsets <- function(object, newdata, id, ...) { form <- as.formula(object$call[[2]]) mat <- model.matrix(form, newdata) coefi <- coef(object, id = id) xvars <- names(coefi) mat[, xvars] %*% coefi } mse <- sapply(1:20, function(i) mean((test$y - predict(fit, test, i))^2)) plot(mse, ylab = "MSE", type = "o", pch = 19) For which model size does the test set MSE take on its minimum value? Comment on your results. If it takes on its minimum value for a model containing only an intercept or a model containing all of the features, then play around with the way that you are generating the data in (a) until you come up with a scenario in which the test set MSE is minimized for an intermediate model size. which.min(mse) ## [1] 4 The min test MSE is found when model size is 4. This corresponds to the simulated data which has four non-zero coefficients. set.seed(42) dat <- matrix(rnorm(1000 * 20), nrow = 1000) colnames(dat) <- paste0("b", 1:20) beta <- rep(0, 20) beta[1:9] <- c(5, 4, 2, 7, 0.01, 0.001, 0.05, 0.1, 0.5) y <- colSums((t(dat) * beta)) + rnorm(1000) dat <- data.frame(dat) dat$y <- y train <- dat[1:100, ] test <- dat[101:1000, ] fit <- regsubsets(y ~ ., data = train, nvmax = 20) summary(fit) ## Subset selection object ## Call: regsubsets.formula(y ~ ., data = train, nvmax = 20) ## 20 Variables (and intercept) ## Forced in Forced out ## b1 FALSE FALSE ## b2 FALSE FALSE ## b3 FALSE FALSE ## b4 FALSE FALSE ## b5 FALSE FALSE ## b6 FALSE FALSE ## b7 FALSE FALSE ## b8 FALSE FALSE ## b9 FALSE FALSE ## b10 FALSE FALSE ## b11 FALSE FALSE ## b12 FALSE FALSE ## b13 FALSE FALSE ## b14 FALSE FALSE ## b15 FALSE FALSE ## b16 FALSE FALSE ## b17 FALSE FALSE ## b18 FALSE FALSE ## b19 FALSE FALSE ## b20 FALSE FALSE ## 1 subsets of each size up to 20 ## Selection Algorithm: exhaustive ## b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 b16 b17 ## 1 ( 1 ) " " " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 2 ( 1 ) "*" " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 3 ( 1 ) "*" "*" " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 4 ( 1 ) "*" "*" "*" "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 5 ( 1 ) "*" "*" "*" "*" " " " " " " " " "*" " " " " " " " " " " " " " " " " ## 6 ( 1 ) "*" "*" "*" "*" " " " " " " " " "*" " " " " " " " " " " " " " " "*" ## 7 ( 1 ) "*" "*" "*" "*" " " " " " " "*" "*" " " " " " " " " " " " " " " "*" ## 8 ( 1 ) "*" "*" "*" "*" " " " " " " "*" "*" " " "*" " " " " " " " " " " "*" ## 9 ( 1 ) "*" "*" "*" "*" " " " " " " "*" "*" " " "*" " " " " " " " " "*" "*" ## 10 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" " " " " " " " " "*" "*" ## 11 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" " " " " " " " " "*" "*" ## 12 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" "*" " " " " " " "*" "*" ## 13 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" "*" " " " " " " "*" "*" ## 14 ( 1 ) "*" "*" "*" "*" "*" " " "*" "*" "*" " " "*" "*" " " " " " " "*" "*" ## 15 ( 1 ) "*" "*" "*" "*" "*" " " "*" "*" "*" " " "*" "*" " " "*" " " "*" "*" ## 16 ( 1 ) "*" "*" "*" "*" "*" " " "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 17 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 18 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" "*" "*" "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" ## b18 b19 b20 ## 1 ( 1 ) " " " " " " ## 2 ( 1 ) " " " " " " ## 3 ( 1 ) " " " " " " ## 4 ( 1 ) " " " " " " ## 5 ( 1 ) " " " " " " ## 6 ( 1 ) " " " " " " ## 7 ( 1 ) " " " " " " ## 8 ( 1 ) " " " " " " ## 9 ( 1 ) " " " " " " ## 10 ( 1 ) " " " " " " ## 11 ( 1 ) " " " " "*" ## 12 ( 1 ) " " " " "*" ## 13 ( 1 ) "*" " " "*" ## 14 ( 1 ) "*" " " "*" ## 15 ( 1 ) "*" " " "*" ## 16 ( 1 ) "*" " " "*" ## 17 ( 1 ) "*" " " "*" ## 18 ( 1 ) "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" mse <- sapply(1:20, function(i) mean((test$y - predict(fit, test, i))^2)) plot(mse, ylab = "MSE", type = "o", pch = 19) which.min(mse) ## [1] 5 How does the model at which the test set MSE is minimized compare to the true model used to generate the data? Comment on the coefficient values. The min test MSE is found when model size is 5 but there are 9 non-zero coefficients. coef(fit, id = 5) ## (Intercept) b1 b2 b3 b4 b9 ## 0.03507654 5.06180121 3.82785027 2.20434996 7.05312844 0.57032008 The coefficient values are well estimated when high, but the smaller coefficients are dropped. Create a plot displaying \\(\\sqrt{\\sum_{j=1}^p (\\beta_j - \\hat{\\beta}{}_j^r)^2}\\) for a range of values of \\(r\\), where \\(\\hat{\\beta}{}_j^r\\) is the \\(j\\)th coefficient estimate for the best model containing \\(r\\) coefficients. Comment on what you observe. How does this compare to the test MSE plot from (d)? names(beta) <- paste0("b", 1:20) b <- data.frame(id = names(beta), b = beta) out <- sapply(1:20, function(i) { c <- coef(fit, id = i)[-1] c <- data.frame(id = names(c), c = c) m <- merge(b, c) sqrt(sum((m$b - m$c)^2)) }) plot(out, ylab = "Mean squared coefficient error", type = "o", pch = 19) The error of the coefficient estimates is minimized when model size is 5. This corresponds to the point when test MSE was minimized. 6.2.4 Question 11 We will now try to predict per capita crime rate in the Boston data set. Try out some of the regression methods explored in this chapter, such as best subset selection, the lasso, ridge regression, and PCR. Present and discuss results for the approaches that you consider. set.seed(1) train <- sample(nrow(Boston), nrow(Boston) * 2 / 3) test <- setdiff(seq_len(nrow(Boston)), train) hist(log(Boston$crim)) Propose a model (or set of models) that seem to perform well on this data set, and justify your answer. Make sure that you are evaluating model performance using validation set error, cross-validation, or some other reasonable alternative, as opposed to using training error. We will try to fit models to log(Boston$crim) which is closer to a normal distribution. fit <- lm(log(crim) ~ ., data = Boston[train, ]) mean((predict(fit, Boston[test, ]) - log(Boston$crim[test]))^2) ## [1] 0.66578 mm <- model.matrix(log(crim) ~ ., data = Boston[train, ]) fit2 <- cv.glmnet(mm, log(Boston$crim[train]), alpha = 0) p <- predict(fit2, model.matrix(log(crim) ~ ., data = Boston[test, ]), s = fit2$lambda.min) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6511807 mm <- model.matrix(log(crim) ~ ., data = Boston[train, ]) fit3 <- cv.glmnet(mm, log(Boston$crim[train]), alpha = 1) p <- predict(fit3, model.matrix(log(crim) ~ ., data = Boston[test, ]), s = fit3$lambda.min) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6494337 fit4 <- pcr(log(crim) ~ ., data = Boston[train, ], scale = TRUE, validation = "CV") validationplot(fit4, val.type = "MSEP") p <- predict(fit4, Boston[test, ], ncomp = 8) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6561043 fit5 <- plsr(log(crim) ~ ., data = Boston[train, ], scale = TRUE, validation = "CV") validationplot(fit5, val.type = "MSEP") p <- predict(fit5, Boston[test, ], ncomp = 6) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6773353 In this case lasso (alpha = 1) seems to perform very slightly better than un-penalized regression. Some coefficients have been dropped: coef(fit3, s = fit3$lambda.min) ## 14 x 1 sparse Matrix of class "dgCMatrix" ## s1 ## (Intercept) -4.713172675 ## (Intercept) . ## zn -0.011043739 ## indus 0.022515402 ## chas . ## nox 3.856157215 ## rm . ## age 0.004210529 ## dis . ## rad 0.145604750 ## tax . ## ptratio -0.031787696 ## lstat 0.036112321 ## medv 0.004304181 Does your chosen model involve all of the features in the data set? Why or why not? Not all features are included due to the lasso penalization. "],["moving-beyond-linearity.html", "7 Moving Beyond Linearity 7.1 Conceptual 7.2 Applied", " 7 Moving Beyond Linearity 7.1 Conceptual 7.1.1 Question 1 It was mentioned in the chapter that a cubic regression spline with one knot at \\(\\xi\\) can be obtained using a basis of the form \\(x, x^2, x^3, (x-\\xi)^3_+\\), where \\((x-\\xi)^3_+ = (x-\\xi)^3\\) if \\(x>\\xi\\) and equals 0 otherwise. We will now show that a function of the form \\[ f(x)=\\beta_0 +\\beta_1x+\\beta_2x^2 +\\beta_3x^3 +\\beta_4(x-\\xi)^3_+ \\] is indeed a cubic regression spline, regardless of the values of \\(\\beta_0, \\beta_1, \\beta_2, \\beta_3,\\beta_4\\). Find a cubic polynomial \\[ f_1(x) = a_1 + b_1x + c_1x^2 + d_1x^3 \\] such that \\(f(x) = f_1(x)\\) for all \\(x \\le \\xi\\). Express \\(a_1,b_1,c_1,d_1\\) in terms of \\(\\beta_0, \\beta_1, \\beta_2, \\beta_3, \\beta_4\\). In this case, for \\(x \\le \\xi\\), the cubic polynomial simply has terms \\(a_1 = \\beta_0\\), \\(b_1 = \\beta_1\\), \\(c_1 = \\beta_2\\), \\(d_1 = \\beta_3\\) Find a cubic polynomial \\[ f_2(x) = a_2 + b_2x + c_2x^2 + d_2x^3 \\] such that \\(f(x) = f_2(x)\\) for all \\(x > \\xi\\). Express \\(a_2, b_2, c_2, d_2\\) in terms of \\(\\beta_0, \\beta_1, \\beta_2, \\beta_3, \\beta_4\\). We have now established that \\(f(x)\\) is a piecewise polynomial. For \\(x \\gt \\xi\\), the cubic polynomial would be (we include the \\(\\beta_4\\) term). \\[\\begin{align} f(x) = & \\beta_0 + \\beta_1x + \\beta_2x^2 + \\beta_3x^3 + \\beta_4(x-\\xi)^3 \\\\ = & \\beta_0 + \\beta_1x + \\beta_2x^2 + + \\beta_4(x^3 - 3x^2\\xi + 3x\\xi^2 -\\xi^3) \\\\ = & \\beta_0 - \\beta_4\\xi^3 + (\\beta_1 + 3\\beta_4\\xi^2)x + (\\beta_2 - 3\\beta_4\\xi)x^2 + (\\beta_3 + \\beta_4)x^3 \\end{align}\\] Therefore, \\(a_1 = \\beta_0 - \\beta_4\\xi^3\\), \\(b_1 = \\beta_1 + 3\\beta_4\\xi^2\\), \\(c_1 = \\beta_2 - 3\\beta_4\\xi\\), \\(d_1 = \\beta_3 + \\beta_4\\) Show that \\(f_1(\\xi) = f_2(\\xi)\\). That is, \\(f(x)\\) is continuous at \\(\\xi\\). To do this, we replace \\(x\\) with \\(\\xi\\) in the above equations and simplify. \\[\\begin{align} f_1(\\xi) = \\beta_0 + \\beta_1\\xi + \\beta_2\\xi^2 + \\beta_3\\xi^3 \\end{align}\\] \\[\\begin{align} f_2(\\xi) = & \\beta_0 - \\beta_4\\xi^3 + (\\beta_1 + 3\\beta_4\\xi^2)\\xi + (\\beta_2 - 3\\beta_4\\xi)\\xi^2 + (\\beta_3 + \\beta_4)\\xi^3 \\\\ = & \\beta_0 - \\beta_4\\xi^3 + \\beta_1\\xi + 3\\beta_4\\xi^3 + \\beta_2\\xi^2 - 3\\beta_4\\xi^3 + \\beta_3\\xi^3 + \\beta_4\\xi^3 \\\\ = & \\beta_0 + \\beta_1\\xi + \\beta_2\\xi^2 + \\beta_3\\xi^3 \\end{align}\\] Show that \\(f_1'(\\xi) = f_2'(\\xi)\\). That is, \\(f'(x)\\) is continuous at \\(\\xi\\). To do this we differentiate the above with respect to \\(x\\). \\[ f_1'(x) = \\beta_1 + 2\\beta_2x + 3\\beta_3x^2 f_1'(\\xi) = \\beta_1 + 2\\beta_2\\xi + 3\\beta_3\\xi^2 \\] \\[\\begin{align} f_2'(x) & = \\beta_1 + 3\\beta_4\\xi^2 + 2(\\beta_2 - 3\\beta_4\\xi)x + 3(\\beta_3 + \\beta_4)x^2 \\\\ f_2'(\\xi) & = \\beta_1 + 3\\beta_4\\xi^2 + 2(\\beta_2 - 3\\beta_4\\xi)\\xi + 3(\\beta_3 + \\beta_4)\\xi^2 \\\\ & = \\beta_1 + 3\\beta_4\\xi^2 + 2\\beta_2\\xi - 6\\beta_4\\xi^2 + 3\\beta_3\\xi^2 + 3\\beta_4\\xi^2 \\\\ & = \\beta_1 + 2\\beta_2\\xi + 3\\beta_3\\xi^2 \\end{align}\\] Show that \\(f_1''(\\xi) = f_2''(\\xi)\\). That is, \\(f''(x)\\) is continuous at \\(\\xi\\). Therefore, \\(f(x)\\) is indeed a cubic spline. \\[ f_1'(x) = 2\\beta_2x + 6\\beta_3x \\\\ f_1''(\\xi) = 2\\beta_2\\xi + 6\\beta_3\\xi \\] \\[ f_2''(x) = 2\\beta_2 - 6\\beta_4\\xi + 6(\\beta_3 + \\beta_4)x \\\\ \\] \\[\\begin{align} f_2''(\\xi) & = 2\\beta_2 - 6\\beta_4\\xi + 6\\beta_3\\xi + 6\\beta_4\\xi \\\\ & = 2\\beta_2 + 6\\beta_3\\xi \\end{align}\\] Hint: Parts (d) and (e) of this problem require knowledge of single-variable calculus. As a reminder, given a cubic polynomial \\[f_1(x) = a_1 + b_1x + c_1x^2 + d_1x^3,\\] the first derivative takes the form \\[f_1'(x) = b_1 + 2c_1x + 3d_1x^2\\] and the second derivative takes the form \\[f_1''(x) = 2c_1 + 6d_1x.\\] 7.1.2 Question 2 Suppose that a curve \\(\\hat{g}\\) is computed to smoothly fit a set of \\(n\\) points using the following formula: \\[ \\DeclareMathOperator*{\\argmin}{arg\\,min} % Jan Hlavacek \\hat{g} = \\argmin_g \\left(\\sum_{i=1}^n (y_i - g(x_i))^2 + \\lambda \\int \\left[ g^{(m)}(x) \\right]^2 dx \\right), \\] where \\(g^{(m)}\\) represents the \\(m\\)th derivative of \\(g\\) (and \\(g^{(0)} = g\\)). Provide example sketches of \\(\\hat{g}\\) in each of the following scenarios. \\(\\lambda=\\infty, m=0\\). Here we penalize the \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. This means that the \\(\\hat{g}\\) will be 0. \\(\\lambda=\\infty, m=1\\). Here we penalize the first derivative (the slope) of \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. Thus the slope will be 0 (and otherwise best fitting \\(x\\), i.e. at the mean of \\(x\\)). \\(\\lambda=\\infty, m=2\\). Here we penalize the second derivative (the change of slope) of \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. Thus the line will be straight (and otherwise best fitting \\(x\\)). \\(\\lambda=\\infty, m=3\\). Here we penalize the third derivative (the change of the change of slope) of \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. In other words, the curve will have a consistent rate of change (e.g. a quadratic function or similar). \\(\\lambda=0, m=3\\). Here we penalize the third derivative, but a value of \\(\\lambda = 0\\) means that there is no penalty. As a result, the curve is able to interpolate all points. 7.1.3 Question 3 Suppose we fit a curve with basis functions \\(b_1(X) = X\\), \\(b_2(X) = (X - 1)^2I(X \\geq 1)\\). (Note that \\(I(X \\geq 1)\\) equals 1 for \\(X \\geq 1\\) and 0 otherwise.) We fit the linear regression model \\[Y = \\beta_0 +\\beta_1b_1(X) + \\beta_2b_2(X) + \\epsilon,\\] and obtain coefficient estimates \\(\\hat{\\beta}_0 = 1, \\hat{\\beta}_1 = 1, \\hat{\\beta}_2 = -2\\). Sketch the estimated curve between \\(X = -2\\) and \\(X = 2\\). Note the intercepts, slopes, and other relevant information. x <- seq(-2, 2, length.out = 1000) f <- function(x) 1 + x + -2 * (x - 1)^2 * I(x >= 1) plot(x, f(x), type = "l") grid() 7.1.4 Question 4 Suppose we fit a curve with basis functions \\(b_1(X) = I(0 \\leq X \\leq 2) - (X -1)I(1 \\leq X \\leq 2),\\) \\(b_2(X) = (X -3)I(3 \\leq X \\leq 4) + I(4 \\lt X \\leq 5)\\). We fit the linear regression model \\[Y = \\beta_0 +\\beta_1b_1(X) + \\beta_2b_2(X) + \\epsilon,\\] and obtain coefficient estimates \\(\\hat{\\beta}_0 = 1, \\hat{\\beta}_1 = 1, \\hat{\\beta}_2 = 3\\). Sketch the estimated curve between \\(X = -2\\) and \\(X = 6\\). Note the intercepts, slopes, and other relevant information. x <- seq(-2, 6, length.out = 1000) b1 <- function(x) I(0 <= x & x <= 2) - (x - 1) * I(1 <= x & x <= 2) b2 <- function(x) (x - 3) * I(3 <= x & x <= 4) + I(4 < x & x <= 5) f <- function(x) 1 + 1*b1(x) + 3*b2(x) plot(x, f(x), type = "l") grid() 7.1.5 Question 5 Consider two curves, \\(\\hat{g}\\) and \\(\\hat{g}_2\\), defined by \\[ \\hat{g}_1 = \\argmin_g \\left(\\sum_{i=1}^n (y_i - g(x_i))^2 + \\lambda \\int \\left[ g^{(3)}(x) \\right]^2 dx \\right), \\] \\[ \\hat{g}_2 = \\argmin_g \\left(\\sum_{i=1}^n (y_i - g(x_i))^2 + \\lambda \\int \\left[ g^{(4)}(x) \\right]^2 dx \\right), \\] where \\(g^{(m)}\\) represents the \\(m\\)th derivative of \\(g\\). As \\(\\lambda \\to \\infty\\), will \\(\\hat{g}_1\\) or \\(\\hat{g}_2\\) have the smaller training RSS? \\(\\hat{g}_2\\) is more flexible (by penalizing a higher derivative of \\(g\\)) and so will have a smaller training RSS. As \\(\\lambda \\to \\infty\\), will \\(\\hat{g}_1\\) or \\(\\hat{g}_2\\) have the smaller test RSS? We cannot tell which function will produce a smaller test RSS, but there is chance that \\(\\hat{g}_1\\) will if \\(\\hat{g}_2\\) overfits the data. For \\(\\lambda = 0\\), will \\(\\hat{g}_1\\) or \\(\\hat{g}_2\\) have the smaller training and test RSS? When \\(\\lambda = 0\\) there is no penalty, so both functions will give the same result: perfect interpolation of the training data. Thus training RSS will be 0 but test RSS could be high. 7.2 Applied 7.2.1 Question 6 In this exercise, you will further analyze the Wage data set considered throughout this chapter. Perform polynomial regression to predict wage using age. Use cross-validation to select the optimal degree \\(d\\) for the polynomial. What degree was chosen, and how does this compare to the results of hypothesis testing using ANOVA? Make a plot of the resulting polynomial fit to the data. library(ISLR2) library(boot) library(ggplot2) set.seed(42) res <- sapply(1:6, function(i) { fit <- glm(wage ~ poly(age, i), data = Wage) cv.glm(Wage, fit, K = 5)$delta[1] }) which.min(res) ## [1] 6 plot(1:6, res, xlab = "Degree", ylab = "Test MSE", type = "l") abline(v = which.min(res), col = "red", lty = 2) fit <- glm(wage ~ poly(age, which.min(res)), data = Wage) plot(Wage$age, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) points(1:100, predict(fit, data.frame(age = 1:100)), type = "l", col = "red") summary(glm(wage ~ poly(age, 6), data = Wage)) ## ## Call: ## glm(formula = wage ~ poly(age, 6), data = Wage) ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 111.7036 0.7286 153.316 < 2e-16 *** ## poly(age, 6)1 447.0679 39.9063 11.203 < 2e-16 *** ## poly(age, 6)2 -478.3158 39.9063 -11.986 < 2e-16 *** ## poly(age, 6)3 125.5217 39.9063 3.145 0.00167 ** ## poly(age, 6)4 -77.9112 39.9063 -1.952 0.05099 . ## poly(age, 6)5 -35.8129 39.9063 -0.897 0.36956 ## poly(age, 6)6 62.7077 39.9063 1.571 0.11620 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for gaussian family taken to be 1592.512) ## ## Null deviance: 5222086 on 2999 degrees of freedom ## Residual deviance: 4766389 on 2993 degrees of freedom ## AIC: 30642 ## ## Number of Fisher Scoring iterations: 2 fit1 <- lm(wage ~ poly(age, 1), data = Wage) fit2 <- lm(wage ~ poly(age, 2), data = Wage) fit3 <- lm(wage ~ poly(age, 3), data = Wage) fit4 <- lm(wage ~ poly(age, 4), data = Wage) fit5 <- lm(wage ~ poly(age, 5), data = Wage) anova(fit1, fit2, fit3, fit4, fit5) ## Analysis of Variance Table ## ## Model 1: wage ~ poly(age, 1) ## Model 2: wage ~ poly(age, 2) ## Model 3: wage ~ poly(age, 3) ## Model 4: wage ~ poly(age, 4) ## Model 5: wage ~ poly(age, 5) ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 2998 5022216 ## 2 2997 4793430 1 228786 143.5931 < 2.2e-16 *** ## 3 2996 4777674 1 15756 9.8888 0.001679 ** ## 4 2995 4771604 1 6070 3.8098 0.051046 . ## 5 2994 4770322 1 1283 0.8050 0.369682 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 The selected degree is 4. When testing with ANOVA, degrees 1, 2 and 3 are highly significant and 4 is marginal. Fit a step function to predict wage using age, and perform cross-validation to choose the optimal number of cuts. Make a plot of the fit obtained. set.seed(42) res <- sapply(2:10, function(i) { Wage$cats <- cut(Wage$age, i) fit <- glm(wage ~ cats, data = Wage) cv.glm(Wage, fit, K = 5)$delta[1] }) names(res) <- 2:10 plot(2:10, res, xlab = "Cuts", ylab = "Test MSE", type = "l") which.min(res) ## 8 ## 7 abline(v = names(which.min(res)), col = "red", lty = 2) fit <- glm(wage ~ cut(age, 8), data = Wage) plot(Wage$age, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) points(18:80, predict(fit, data.frame(age = 18:80)), type = "l", col = "red") 7.2.2 Question 7 The Wage data set contains a number of other features not explored in this chapter, such as marital status (maritl), job class (jobclass), and others. Explore the relationships between some of these other predictors and wage, and use non-linear fitting techniques in order to fit flexible models to the data. Create plots of the results obtained, and write a summary of your findings. plot(Wage$year, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$age, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$maritl, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$jobclass, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$education, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) We have a mix of categorical and continuous variables and also want to incorporate non-linear aspects of the continuous variables. A GAM is a good choice to model this situation. library(gam) ## Loading required package: splines ## Loading required package: foreach ## Loaded gam 1.22-4 fit0 <- gam(wage ~ s(year, 4) + s(age, 5) + education, data = Wage) fit2 <- gam(wage ~ s(year, 4) + s(age, 5) + education + maritl, data = Wage) fit1 <- gam(wage ~ s(year, 4) + s(age, 5) + education + jobclass, data = Wage) fit3 <- gam(wage ~ s(year, 4) + s(age, 5) + education + jobclass + maritl, data = Wage) anova(fit0, fit1, fit2, fit3) ## Analysis of Deviance Table ## ## Model 1: wage ~ s(year, 4) + s(age, 5) + education ## Model 2: wage ~ s(year, 4) + s(age, 5) + education + jobclass ## Model 3: wage ~ s(year, 4) + s(age, 5) + education + maritl ## Model 4: wage ~ s(year, 4) + s(age, 5) + education + jobclass + maritl ## Resid. Df Resid. Dev Df Deviance Pr(>Chi) ## 1 2986 3689770 ## 2 2985 3677553 1 12218 0.0014286 ** ## 3 2982 3595688 3 81865 1.071e-14 *** ## 4 2981 3581781 1 13907 0.0006687 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 par(mfrow = c(2, 3)) plot(fit3, se = TRUE, col = "blue") 7.2.3 Question 8 Fit some of the non-linear models investigated in this chapter to the Auto data set. Is there evidence for non-linear relationships in this data set? Create some informative plots to justify your answer. Here we want to explore a range of non-linear models. First let’s look at the relationships between the variables in the data. pairs(Auto, cex = 0.4, pch = 19) It does appear that there are some non-linear relationships (e.g. horsepower / weight and mpg). We will pick one variable (horsepower) to predict mpg and try the range of models discussed in this chapter. We will measure test MSE through cross-validation to compare the models. library(tidyverse) ## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ── ## ✔ dplyr 1.1.4 ✔ readr 2.1.5 ## ✔ forcats 1.0.0 ✔ stringr 1.5.1 ## ✔ lubridate 1.9.3 ✔ tibble 3.2.1 ## ✔ purrr 1.0.2 ✔ tidyr 1.3.1 ## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── ## ✖ purrr::accumulate() masks foreach::accumulate() ## ✖ dplyr::filter() masks stats::filter() ## ✖ dplyr::lag() masks stats::lag() ## ✖ purrr::when() masks foreach::when() ## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors set.seed(42) fit <- glm(mpg ~ horsepower, data = Auto) err <- cv.glm(Auto, fit, K = 10)$delta[1] fit1 <- glm(mpg ~ poly(horsepower, 4), data = Auto) err1 <- cv.glm(Auto, fit1, K = 10)$delta[1] q <- quantile(Auto$horsepower) Auto$hp_cats <- cut(Auto$horsepower, breaks = q, include.lowest = TRUE) fit2 <- glm(mpg ~ hp_cats, data = Auto) err2 <- cv.glm(Auto, fit2, K = 10)$delta[1] fit3 <- glm(mpg ~ bs(horsepower, df = 4), data = Auto) err3 <- cv.glm(Auto, fit3, K = 10)$delta[1] ## Warning in bs(horsepower, degree = 3L, knots = 92, Boundary.knots = c(46L, : ## some 'x' values beyond boundary knots may cause ill-conditioned bases ## Warning in bs(horsepower, degree = 3L, knots = 92, Boundary.knots = c(46L, : ## some 'x' values beyond boundary knots may cause ill-conditioned bases fit4 <- glm(mpg ~ ns(horsepower, 4), data = Auto) err4 <- cv.glm(Auto, fit4, K = 10)$delta[1] fit5 <- gam(mpg ~ s(horsepower, df = 4), data = Auto) # rough 10-fold cross-validation for gam. err5 <- mean(replicate(10, { b <- cut(sample(seq_along(Auto$horsepower)), 10) pred <- numeric() for (i in 1:10) { train <- b %in% levels(b)[-i] f <- gam(mpg ~ s(horsepower, df = 4), data = Auto[train, ]) pred[!train] <- predict(f, Auto[!train, ]) } mean((Auto$mpg - pred)^2) # MSE })) c(err, err1, err2, err3, err4, err5) ## [1] 24.38418 19.94222 20.37940 18.92802 19.33556 19.02999 anova(fit, fit1, fit2, fit3, fit4, fit5) ## Analysis of Deviance Table ## ## Model 1: mpg ~ horsepower ## Model 2: mpg ~ poly(horsepower, 4) ## Model 3: mpg ~ hp_cats ## Model 4: mpg ~ bs(horsepower, df = 4) ## Model 5: mpg ~ ns(horsepower, 4) ## Model 6: mpg ~ s(horsepower, df = 4) ## Resid. Df Resid. Dev Df Deviance F Pr(>F) ## 1 390 9385.9 ## 2 387 7399.5 3.00000000 1986.39 35.258 < 2.2e-16 *** ## 3 388 7805.4 -1.00000000 -405.92 21.615 4.578e-06 *** ## 4 387 7276.5 1.00000000 528.94 28.166 1.880e-07 *** ## 5 387 7248.6 0.00000000 27.91 ## 6 387 7267.7 0.00013612 -19.10 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 x <- seq(min(Auto$horsepower), max(Auto$horsepower), length.out=1000) pred <- data.frame( x = x, "Linear" = predict(fit, data.frame(horsepower = x)), "Polynomial" = predict(fit1, data.frame(horsepower = x)), "Step" = predict(fit2, data.frame(hp_cats = cut(x, breaks = q, include.lowest = TRUE))), "Regression spline" = predict(fit3, data.frame(horsepower = x)), "Natural spline" = predict(fit4, data.frame(horsepower = x)), "Smoothing spline" = predict(fit5, data.frame(horsepower = x)), check.names = FALSE ) pred <- pivot_longer(pred, -x) ggplot(Auto, aes(horsepower, mpg)) + geom_point(color = alpha("steelblue", 0.4)) + geom_line(data = pred, aes(x, value, color = name)) + theme_bw() 7.2.4 Question 9 This question uses the variables dis (the weighted mean of distances to five Boston employment centers) and nox (nitrogen oxides concentration in parts per 10 million) from the Boston data. We will treat dis as the predictor and nox as the response. Use the poly() function to fit a cubic polynomial regression to predict nox using dis. Report the regression output, and plot the resulting data and polynomial fits. fit <- glm(nox ~ poly(dis, 3), data = Boston) summary(fit) ## ## Call: ## glm(formula = nox ~ poly(dis, 3), data = Boston) ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 0.554695 0.002759 201.021 < 2e-16 *** ## poly(dis, 3)1 -2.003096 0.062071 -32.271 < 2e-16 *** ## poly(dis, 3)2 0.856330 0.062071 13.796 < 2e-16 *** ## poly(dis, 3)3 -0.318049 0.062071 -5.124 4.27e-07 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for gaussian family taken to be 0.003852802) ## ## Null deviance: 6.7810 on 505 degrees of freedom ## Residual deviance: 1.9341 on 502 degrees of freedom ## AIC: -1370.9 ## ## Number of Fisher Scoring iterations: 2 plot(nox ~ dis, data = Boston, col = alpha("steelblue", 0.4), pch = 19) x <- seq(min(Boston$dis), max(Boston$dis), length.out = 1000) lines(x, predict(fit, data.frame(dis = x)), col = "red", lty = 2) Plot the polynomial fits for a range of different polynomial degrees (say, from 1 to 10), and report the associated residual sum of squares. fits <- lapply(1:10, function(i) glm(nox ~ poly(dis, i), data = Boston)) x <- seq(min(Boston$dis), max(Boston$dis), length.out=1000) pred <- data.frame(lapply(fits, function(fit) predict(fit, data.frame(dis = x)))) colnames(pred) <- 1:10 pred$x <- x pred <- pivot_longer(pred, !x) ggplot(Boston, aes(dis, nox)) + geom_point(color = alpha("steelblue", 0.4)) + geom_line(data = pred, aes(x, value, color = name)) + theme_bw() # residual sum of squares do.call(anova, fits)[, 2] ## [1] 2.768563 2.035262 1.934107 1.932981 1.915290 1.878257 1.849484 1.835630 ## [9] 1.833331 1.832171 Perform cross-validation or another approach to select the optimal degree for the polynomial, and explain your results. res <- sapply(1:10, function(i) { fit <- glm(nox ~ poly(dis, i), data = Boston) cv.glm(Boston, fit, K = 10)$delta[1] }) which.min(res) ## [1] 4 The optimal degree is 3 based on cross-validation. Higher values tend to lead to overfitting. Use the bs() function to fit a regression spline to predict nox using dis. Report the output for the fit using four degrees of freedom. How did you choose the knots? Plot the resulting fit. fit <- glm(nox ~ bs(dis, df = 4), data = Boston) summary(fit) ## ## Call: ## glm(formula = nox ~ bs(dis, df = 4), data = Boston) ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 0.73447 0.01460 50.306 < 2e-16 *** ## bs(dis, df = 4)1 -0.05810 0.02186 -2.658 0.00812 ** ## bs(dis, df = 4)2 -0.46356 0.02366 -19.596 < 2e-16 *** ## bs(dis, df = 4)3 -0.19979 0.04311 -4.634 4.58e-06 *** ## bs(dis, df = 4)4 -0.38881 0.04551 -8.544 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for gaussian family taken to be 0.003837874) ## ## Null deviance: 6.7810 on 505 degrees of freedom ## Residual deviance: 1.9228 on 501 degrees of freedom ## AIC: -1371.9 ## ## Number of Fisher Scoring iterations: 2 plot(nox ~ dis, data = Boston, col = alpha("steelblue", 0.4), pch = 19) x <- seq(min(Boston$dis), max(Boston$dis), length.out = 1000) lines(x, predict(fit, data.frame(dis = x)), col = "red", lty = 2) Knots are chosen based on quantiles of the data. Now fit a regression spline for a range of degrees of freedom, and plot the resulting fits and report the resulting RSS. Describe the results obtained. fits <- lapply(3:10, function(i) { glm(nox ~ bs(dis, df = i), data = Boston) }) x <- seq(min(Boston$dis), max(Boston$dis), length.out = 1000) pred <- data.frame(lapply(fits, function(fit) predict(fit, data.frame(dis = x)))) colnames(pred) <- 3:10 pred$x <- x pred <- pivot_longer(pred, !x) ggplot(Boston, aes(dis, nox)) + geom_point(color = alpha("steelblue", 0.4)) + geom_line(data = pred, aes(x, value, color = name)) + theme_bw() At high numbers of degrees of freedom the splines overfit the data (particularly at extreme ends of the distribution of the predictor variable). Perform cross-validation or another approach in order to select the best degrees of freedom for a regression spline on this data. Describe your results. set.seed(42) err <- sapply(3:10, function(i) { fit <- glm(nox ~ bs(dis, df = i), data = Boston) suppressWarnings(cv.glm(Boston, fit, K = 10)$delta[1]) }) which.min(err) ## [1] 8 This approach would select 4 degrees of freedom for the spline. 7.2.5 Question 10 This question relates to the College data set. Split the data into a training set and a test set. Using out-of-state tuition as the response and the other variables as the predictors, perform forward stepwise selection on the training set in order to identify a satisfactory model that uses just a subset of the predictors. library(leaps) # helper function to predict from a regsubsets model predict.regsubsets <- function(object, newdata, id, ...) { form <- as.formula(object$call[[2]]) mat <- model.matrix(form, newdata) coefi <- coef(object, id = id) xvars <- names(coefi) mat[, xvars] %*% coefi } set.seed(42) train <- rep(TRUE, nrow(College)) train[sample(1:nrow(College), nrow(College) * 1 / 3)] <- FALSE fit <- regsubsets(Outstate ~ ., data = College[train, ], nvmax = 17, method = "forward") plot(summary(fit)$bic, type = "b") which.min(summary(fit)$bic) ## [1] 11 # or via cross-validation err <- sapply(1:17, function(i) { x <- coef(fit, id = i) mean((College$Outstate[!train] - predict(fit, College[!train, ], i))^2) }) which.min(err) ## [1] 16 min(summary(fit)$bic) ## [1] -690.9375 For the sake of simplicity we’ll choose 6 coef(fit, id = 6) ## (Intercept) PrivateYes Room.Board PhD perc.alumni ## -3540.0544008 2736.4231642 0.9061752 33.7848157 47.1998115 ## Expend Grad.Rate ## 0.2421685 33.3137332 Fit a GAM on the training data, using out-of-state tuition as the response and the features selected in the previous step as the predictors. Plot the results, and explain your findings. fit <- gam(Outstate ~ Private + s(Room.Board, 2) + s(PhD, 2) + s(perc.alumni, 2) + s(Expend, 2) + s(Grad.Rate, 2), data = College[train, ]) Evaluate the model obtained on the test set, and explain the results obtained. pred <- predict(fit, College[!train, ]) err_gam <- mean((College$Outstate[!train] - pred)^2) plot(err, ylim = c(min(err_gam, err), max(err)), type = "b") abline(h = err_gam, col = "red", lty = 2) # r-squared 1 - err_gam / mean((College$Outstate[!train] - mean(College$Outstate[!train]))^2) ## [1] 0.7655905 For which variables, if any, is there evidence of a non-linear relationship with the response? summary(fit) ## ## Call: gam(formula = Outstate ~ Private + s(Room.Board, 2) + s(PhD, ## 2) + s(perc.alumni, 2) + s(Expend, 2) + s(Grad.Rate, 2), ## data = College[train, ]) ## Deviance Residuals: ## Min 1Q Median 3Q Max ## -7112.59 -1188.98 33.13 1238.54 8738.65 ## ## (Dispersion Parameter for gaussian family taken to be 3577008) ## ## Null Deviance: 8471793308 on 517 degrees of freedom ## Residual Deviance: 1809966249 on 506.0001 degrees of freedom ## AIC: 9300.518 ## ## Number of Local Scoring Iterations: NA ## ## Anova for Parametric Effects ## Df Sum Sq Mean Sq F value Pr(>F) ## Private 1 2327235738 2327235738 650.610 < 2.2e-16 *** ## s(Room.Board, 2) 1 1741918028 1741918028 486.976 < 2.2e-16 *** ## s(PhD, 2) 1 668408518 668408518 186.863 < 2.2e-16 *** ## s(perc.alumni, 2) 1 387819183 387819183 108.420 < 2.2e-16 *** ## s(Expend, 2) 1 625813340 625813340 174.954 < 2.2e-16 *** ## s(Grad.Rate, 2) 1 111881207 111881207 31.278 3.664e-08 *** ## Residuals 506 1809966249 3577008 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Anova for Nonparametric Effects ## Npar Df Npar F Pr(F) ## (Intercept) ## Private ## s(Room.Board, 2) 1 2.224 0.13648 ## s(PhD, 2) 1 5.773 0.01664 * ## s(perc.alumni, 2) 1 0.365 0.54581 ## s(Expend, 2) 1 61.182 3.042e-14 *** ## s(Grad.Rate, 2) 1 4.126 0.04274 * ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Non-linear relationships are significant for Expend and PhD. 7.2.6 Question 11 In Section 7.7, it was mentioned that GAMs are generally fit using a backfitting approach. The idea behind backfitting is actually quite simple. We will now explore backfitting in the context of multiple linear regression. Suppose that we would like to perform multiple linear regression, but we do not have software to do so. Instead, we only have software to perform simple linear regression. Therefore, we take the following iterative approach: we repeatedly hold all but one coefficient estimate fixed at its current value, and update only that coefficient estimate using a simple linear regression. The process is continued until convergence—that is, until the coefficient estimates stop changing. We now try this out on a toy example. Generate a response \\(Y\\) and two predictors \\(X_1\\) and \\(X_2\\), with \\(n = 100\\). set.seed(42) x1 <- rnorm(100) x2 <- rnorm(100) y <- 2 + 0.2 * x1 + 4 * x2 + rnorm(100) Initialize \\(\\hat{\\beta}_1\\) to take on a value of your choice. It does not matter 1 what value you choose. beta1 <- 20 Keeping \\(\\hat{\\beta}_1\\) fixed, fit the model \\[Y - \\hat{\\beta}_1X_1 = \\beta_0 + \\beta_2X_2 + \\epsilon.\\] You can do this as follows: > a <- y - beta1 * x1 > beta2 <- lm(a ~ x2)$coef[2] a <- y - beta1*x1 beta2 <- lm(a ~ x2)$coef[2] Keeping \\(\\hat{\\beta}_2\\) fixed, fit the model \\[Y - \\hat{\\beta}_2X_2 = \\beta_0 + \\beta_1 X_1 + \\epsilon.\\] You can do this as follows: > a <- y - beta2 * x2 > beta1 <- lm(a ~ x1)$coef[2] a <- y - beta2 * x2 beta1 <- lm(a ~ x1)$coef[2] Write a for loop to repeat (c) and (d) 1,000 times. Report the estimates of \\(\\hat{\\beta}_0, \\hat{\\beta}_1,\\) and \\(\\hat{\\beta}_2\\) at each iteration of the for loop. Create a plot in which each of these values is displayed, with \\(\\hat{\\beta}_0, \\hat{\\beta}_1,\\) and \\(\\hat{\\beta}_2\\) each shown in a different color. res <- matrix(NA, nrow = 1000, ncol = 3) colnames(res) <- c("beta0", "beta1", "beta2") beta1 <- 20 for (i in 1:1000) { beta2 <- lm(y - beta1*x1 ~ x2)$coef[2] beta1 <- lm(y - beta2*x2 ~ x1)$coef[2] beta0 <- lm(y - beta2*x2 ~ x1)$coef[1] res[i, ] <- c(beta0, beta1, beta2) } res <- as.data.frame(res) res$Iteration <- 1:1000 res <- pivot_longer(res, !Iteration) p <- ggplot(res, aes(x=Iteration, y=value, color=name)) + geom_line() + geom_point() + scale_x_continuous(trans = "log10") p Compare your answer in (e) to the results of simply performing multiple linear regression to predict \\(Y\\) using \\(X_1\\) and \\(X_2\\). Use the abline() function to overlay those multiple linear regression coefficient estimates on the plot obtained in (e). fit <- lm(y ~ x1 + x2) coef(fit) ## (Intercept) x1 x2 ## 2.00176627 0.05629075 4.08529318 p + geom_hline(yintercept = coef(fit), lty = 2) On this data set, how many backfitting iterations were required in order to obtain a “good” approximation to the multiple regression coefficient estimates? In this case, good estimates were obtained after 3 iterations. 7.2.7 Question 12 This problem is a continuation of the previous exercise. In a toy example with \\(p = 100\\), show that one can approximate the multiple linear regression coefficient estimates by repeatedly performing simple linear regression in a backfitting procedure. How many backfitting iterations are required in order to obtain a “good” approximation to the multiple regression coefficient estimates? Create a plot to justify your answer. set.seed(42) p <- 100 n <- 1000 betas <- rnorm(p) * 5 x <- matrix(rnorm(n * p), ncol = p, nrow = n) y <- (x %*% betas) + rnorm(n) # ignore beta0 for simplicity # multiple regression fit <- lm(y ~ x - 1) coef(fit) ## x1 x2 x3 x4 x5 x6 ## 6.9266184 -2.8428817 1.8686821 3.1466472 1.9601927 -0.5529214 ## x7 x8 x9 x10 x11 x12 ## 7.4786723 -0.4454637 10.0816005 -0.2391234 6.5832468 11.4451280 ## x13 x14 x15 x16 x17 x18 ## -6.9684368 -1.3604495 -0.6310041 3.1786639 -1.4470502 -13.2957027 ## x19 x20 x21 x22 x23 x24 ## -12.2061834 6.5765842 -1.5227262 -8.8855906 -0.8422954 6.1189230 ## x25 x26 x27 x28 x29 x30 ## 9.4395267 -2.1697854 -1.2738835 -8.8457987 2.2851699 -3.1922704 ## x31 x32 x33 x34 x35 x36 ## 2.2812995 3.4695892 5.1162617 -3.0423873 2.4985589 -8.5952764 ## x37 x38 x39 x40 x41 x42 ## -3.9539370 -4.2616463 -12.0038342 0.1981058 1.0559250 -1.8205017 ## x43 x44 x45 x46 x47 x48 ## 3.7739990 -3.6240020 -6.8575534 2.1042998 -4.0228773 7.1880298 ## x49 x50 x51 x52 x53 x54 ## -2.1967821 3.3137115 1.6406524 -3.9402065 7.9067705 3.1815846 ## x55 x56 x57 x58 x59 x60 ## 0.4504175 1.4003479 3.3999814 0.4317695 -14.9255798 1.3816878 ## x61 x62 x63 x64 x65 x66 ## -1.8071634 0.9907740 2.9771540 6.9528872 -3.5956916 6.5283946 ## x67 x68 x69 x70 x71 x72 ## 1.6798820 5.1911857 4.5573256 3.5961319 -5.1909352 -0.4869003 ## x73 x74 x75 x76 x77 x78 ## 3.1472166 -4.7898363 -2.7402076 2.9247173 3.8659938 2.3686379 ## x79 x80 x81 x82 x83 x84 ## -4.4261734 -5.5020688 7.5807239 1.3010702 0.4378713 -0.5856580 ## x85 x86 x87 x88 x89 x90 ## -5.9799328 3.0089329 -1.1230969 -0.8857679 4.7211363 4.1042952 ## x91 x92 x93 x94 x95 x96 ## 6.9492037 -2.3959211 3.2188522 6.9947040 -5.5392641 -4.3114784 ## x97 x98 x99 x100 ## -5.7287292 -7.3148812 0.3454408 3.2830658 # backfitting backfit <- function(x, y, iter = 20) { beta <- matrix(0, ncol = ncol(x), nrow = iter + 1) for (i in 1:iter) { for (k in 1:ncol(x)) { residual <- y - (x[, -k] %*% beta[i, -k]) beta[i + 1, k] <- lm(residual ~ x[, k])$coef[2] } } beta[-1, ] } res <- backfit(x, y) error <- rowMeans(sweep(res, 2, betas)^2) plot(error, log = "x", type = "b") # backfitting error error[length(error)] ## [1] 0.001142494 # lm error mean((coef(fit) - betas)^2) ## [1] 0.001138655 We need around 5 to 6 iterations to obtain a good estimate of the coefficients. "],["tree-based-methods.html", "8 Tree-Based Methods 8.1 Conceptual 8.2 Applied", " 8 Tree-Based Methods 8.1 Conceptual 8.1.1 Question 1 Draw an example (of your own invention) of a partition of two-dimensional feature space that could result from recursive binary splitting. Your example should contain at least six regions. Draw a decision tree corresponding to this partition. Be sure to label all aspects of your figures, including the regions \\(R_1, R_2, ...,\\) the cutpoints \\(t_1, t_2, ...,\\) and so forth. Hint: Your result should look something like Figures 8.1 and 8.2. library(showtext) showtext::showtext_auto() library(ggplot2) library(tidyverse) library(ggtree) tree <- ape::read.tree(text = "(((R1:1,R2:1)N1:2,R3:4)N2:2,(R4:2,(R5:1,R6:1)R3:2)N4:5)R;") tree$node.label <- c("Age < 40", "Weight < 100", "Weight < 70", "Age < 60", "Weight < 80") ggtree(tree, ladderize = FALSE) + scale_x_reverse() + coord_flip() + geom_tiplab(vjust = 2, hjust = 0.5) + geom_text2(aes(label=label, subset=!isTip), hjust = -0.1, vjust = -1) plot(NULL, xlab="Age (years)", ylab="Weight (kg)", xlim = c(0, 100), ylim = c(40, 160), xaxs = "i", yaxs = "i") abline(v = 40, col = "red", lty = 2) lines(c(0, 40), c(100, 100), col = "blue", lty = 2) lines(c(0, 40), c(70, 70), col = "blue", lty = 2) abline(v = 60, col = "red", lty = 2) lines(c(60, 100), c(80, 80), col = "blue", lty = 2) text( c(20, 20, 20, 50, 80, 80), c(55, 85, 130, 100, 60, 120), labels = c("R1", "R2", "R3", "R4", "R5", "R6") ) 8.1.2 Question 2 It is mentioned in Section 8.2.3 that boosting using depth-one trees (or stumps) leads to an additive model: that is, a model of the form \\[ f(X) = \\sum_{j=1}^p f_j(X_j). \\] Explain why this is the case. You can begin with (8.12) in Algorithm 8.2. Equation 8.1 is: \\[ f(x) = \\sum_{b=1}^B(\\lambda \\hat{f}^b(x) \\] where \\(\\hat{f}^b(x)\\) represents the \\(b\\)th tree with (in this case) 1 split. Since 1-depth trees involve only one variable, and the total function for \\(x\\) involves adding the outcome for each, this model is an additive. Depth 2 trees would allow for interactions between two variables. 8.1.3 Question 3 Consider the Gini index, classification error, and cross-entropy in a simple classification setting with two classes. Create a single plot that displays each of these quantities as a function of \\(\\hat{p}_{m1}\\). The \\(x\\)-axis should display \\(\\hat{p}_{m1}\\), ranging from 0 to 1, and the \\(y\\)-axis should display the value of the Gini index, classification error, and entropy. Hint: In a setting with two classes, \\(\\hat{p}_{m1} = 1 - \\hat{p}_{m2}\\). You could make this plot by hand, but it will be much easier to make in R. The Gini index is defined by \\[G = \\sum_{k=1}^{K} \\hat{p}_{mk}(1 - \\hat{p}_{mk})\\] Entropy is given by \\[D = -\\sum_{k=1}^{K} \\hat{p}_{mk}\\log(\\hat{p}_{mk})\\] The classification error is \\[E = 1 - \\max_k(\\hat{p}_{mk})\\] # Function definitions are for when there's two classes only p <- seq(0, 1, length.out = 100) data.frame( x = p, "Gini index" = p * (1 - p) * 2, "Entropy" = -(p * log(p) + (1 - p) * log(1 - p)), "Classification error" = 1 - pmax(p, 1 - p), check.names = FALSE ) |> pivot_longer(!x) |> ggplot(aes(x = x, y = value, color = name)) + geom_line(na.rm = TRUE) 8.1.4 Question 4 This question relates to the plots in Figure 8.12. Sketch the tree corresponding to the partition of the predictor space illustrated in the left-hand panel of Figure 8.12. The numbers inside the boxes indicate the mean of \\(Y\\) within each region. tree <- ape::read.tree(text = "(((3:1.5,(10:1,0:1)A:1)B:1,15:2)C:1,5:2)D;") tree$node.label <- c("X1 < 1", "X2 < 1", "X1 < 0", "X2 < 0") ggtree(tree, ladderize = FALSE) + scale_x_reverse() + coord_flip() + geom_tiplab(vjust = 2, hjust = 0.5) + geom_text2(aes(label=label, subset=!isTip), hjust = -0.1, vjust = -1) Create a diagram similar to the left-hand panel of Figure 8.12, using the tree illustrated in the right-hand panel of the same figure. You should divide up the predictor space into the correct regions, and indicate the mean for each region. plot(NULL, xlab="X1", ylab="X2", xlim = c(-1, 2), ylim = c(0, 3), xaxs = "i", yaxs = "i") abline(h = 1, col = "red", lty = 2) lines(c(1, 1), c(0, 1), col = "blue", lty = 2) lines(c(-1, 2), c(2, 2), col = "red", lty = 2) lines(c(0, 0), c(1, 2), col = "blue", lty = 2) text( c(0, 1.5, -0.5, 1, 0.5), c(0.5, 0.5, 1.5, 1.5, 2.5), labels = c("-1.80", "0.63", "-1.06", "0.21", "2.49") ) 8.1.5 Question 5 Suppose we produce ten bootstrapped samples from a data set containing red and green classes. We then apply a classification tree to each bootstrapped sample and, for a specific value of \\(X\\), produce 10 estimates of \\(P(\\textrm{Class is Red}|X)\\): \\[0.1, 0.15, 0.2, 0.2, 0.55, 0.6, 0.6, 0.65, 0.7, \\textrm{and } 0.75.\\] There are two common ways to combine these results together into a single class prediction. One is the majority vote approach discussed in this chapter. The second approach is to classify based on the average probability. In this example, what is the final classification under each of these two approaches? x <- c(0.1, 0.15, 0.2, 0.2, 0.55, 0.6, 0.6, 0.65, 0.7, 0.75) ifelse(mean(x > 0.5), "red", "green") # majority vote ## [1] "red" ifelse(mean(x) > 0.5, "red", "green") # average probability ## [1] "green" 8.1.6 Question 6 Provide a detailed explanation of the algorithm that is used to fit a regression tree. First we perform binary recursive splitting of the data, to minimize RSS at each split. This is continued until there are n samples present in each leaf. Then we prune the tree to a set of subtrees determined by a parameter \\(\\alpha\\). Using K-fold CV, we select \\(\\alpha\\) to minimize the cross validation error. The final tree is then calculated using the complete dataset with the selected \\(\\alpha\\) value. 8.2 Applied 8.2.1 Question 7 In the lab, we applied random forests to the Boston data using mtry = 6 and using ntree = 25 and ntree = 500. Create a plot displaying the test error resulting from random forests on this data set for a more comprehensive range of values for mtry and ntree. You can model your plot after Figure 8.10. Describe the results obtained. library(ISLR2) library(randomForest) ## randomForest 4.7-1.1 ## Type rfNews() to see new features/changes/bug fixes. ## ## Attaching package: 'randomForest' ## The following object is masked from 'package:ggtree': ## ## margin ## The following object is masked from 'package:dplyr': ## ## combine ## The following object is masked from 'package:ggplot2': ## ## margin set.seed(42) train <- sample(c(TRUE, FALSE), nrow(Boston), replace = TRUE) rf_err <- function(mtry) { randomForest( Boston[train, -13], y = Boston[train, 13], xtest = Boston[!train, -13], ytest = Boston[!train, 13], mtry = mtry, ntree = 500 )$test$mse } res <- lapply(c(1, 2, 3, 5, 7, 10, 12), rf_err) names(res) <- c(1, 2, 3, 5, 7, 10, 12) data.frame(res, check.names = FALSE) |> mutate(n = 1:500) |> pivot_longer(!n) |> ggplot(aes(x = n, y = value, color = name)) + geom_line(na.rm = TRUE) + xlab("Number of trees") + ylab("Error") + scale_y_log10() + scale_color_discrete(name = "No. variables at\\neach split") 8.2.2 Question 8 In the lab, a classification tree was applied to the Carseats data set after converting Sales into a qualitative response variable. Now we will seek to predict Sales using regression trees and related approaches, treating the response as a quantitative variable. Split the data set into a training set and a test set. set.seed(42) train <- sample(c(TRUE, FALSE), nrow(Carseats), replace = TRUE) Fit a regression tree to the training set. Plot the tree, and interpret the results. What test error rate do you obtain? library(tree) tr <- tree(Sales ~ ., data = Carseats[train, ]) summary(tr) ## ## Regression tree: ## tree(formula = Sales ~ ., data = Carseats[train, ]) ## Variables actually used in tree construction: ## [1] "ShelveLoc" "Price" "Income" "Advertising" "CompPrice" ## [6] "Age" ## Number of terminal nodes: 16 ## Residual mean deviance: 2.356 = 424.1 / 180 ## Distribution of residuals: ## Min. 1st Qu. Median Mean 3rd Qu. Max. ## -4.54900 -0.82980 0.03075 0.00000 0.89250 4.83100 plot(tr) text(tr, pretty = 0, digits = 2, cex = 0.8) carseats_mse <- function(model) { p <- predict(model, newdata = Carseats[!train, ]) mean((p - Carseats[!train, "Sales"])^2) } carseats_mse(tr) ## [1] 4.559764 Use cross-validation in order to determine the optimal level of tree complexity. Does pruning the tree improve the test error rate? res <- cv.tree(tr) plot(res$size, res$dev, type = "b", xlab = "Tree size", ylab = "Deviance") min <- which.min(res$dev) abline(v = res$size[min], lty = 2, col = "red") Pruning improves performance very slightly (though this is not repeatable in different rounds of cross-validation). Arguably, a good balance is achieved when the tree size is 11. ptr <- prune.tree(tr, best = 11) plot(ptr) text(ptr, pretty = 0, digits = 2, cex = 0.8) carseats_mse(ptr) ## [1] 4.625875 Use the bagging approach in order to analyze this data. What test error rate do you obtain? Use the importance() function to determine which variables are most important. # Here we can use random Forest with mtry = 10 = p (the number of predictor # variables) to perform bagging bagged <- randomForest(Sales ~ ., data = Carseats[train, ], mtry = 10, ntree = 200, importance = TRUE) carseats_mse(bagged) ## [1] 2.762861 importance(bagged) ## %IncMSE IncNodePurity ## CompPrice 11.2608998 104.474222 ## Income 5.0953983 73.275066 ## Advertising 12.9011190 125.886762 ## Population 3.4071044 60.095200 ## Price 34.6904380 450.952728 ## ShelveLoc 33.7059874 374.808575 ## Age 7.9101141 143.652934 ## Education -2.1154997 32.712444 ## Urban 0.9604097 7.029648 ## US 3.1336559 6.287048 The test error rate is ~2.8 which is a substantial improvement over the pruned regression tree above. Use random forests to analyze this data. What test error rate do you obtain? Use the importance() function to determine which variables are most important. Describe the effect of \\(m\\), the number of variables considered at each split, on the error rate obtained. rf <- randomForest(Sales ~ ., data = Carseats[train, ], mtry = 3, ntree = 500, importance = TRUE) carseats_mse(rf) ## [1] 3.439357 importance(rf) ## %IncMSE IncNodePurity ## CompPrice 8.5717587 122.75189 ## Income 2.8955756 116.33951 ## Advertising 13.0681194 128.13563 ## Population 2.0475415 104.03803 ## Price 34.7934136 342.84663 ## ShelveLoc 39.0704834 292.56638 ## Age 7.7941744 135.69061 ## Education 0.8770806 64.67614 ## Urban -0.3301478 13.83594 ## US 6.2716539 22.07306 The test error rate is ~3.0 which is a substantial improvement over the pruned regression tree above, although not quite as good as the bagging approach. Now analyze the data using BART, and report your results. library(BART) ## Loading required package: nlme ## ## Attaching package: 'nlme' ## The following object is masked from 'package:ggtree': ## ## collapse ## The following object is masked from 'package:dplyr': ## ## collapse ## Loading required package: survival # For ease, we'll create a fake "predict" method that just returns # yhat.test.mean regardless of provided "newdata" predict.wbart <- function(model, ...) model$yhat.test.mean bartfit <- gbart(Carseats[train, 2:11], Carseats[train, 1], x.test = Carseats[!train, 2:11]) ## *****Calling gbart: type=1 ## *****Data: ## data:n,p,np: 196, 14, 204 ## y1,yn: 2.070867, 2.280867 ## x1,x[n*p]: 138.000000, 1.000000 ## xp1,xp[np*p]: 141.000000, 1.000000 ## *****Number of Trees: 200 ## *****Number of Cut Points: 58 ... 1 ## *****burn,nd,thin: 100,1000,1 ## *****Prior:beta,alpha,tau,nu,lambda,offset: 2,0.95,0.287616,3,0.21118,7.42913 ## *****sigma: 1.041218 ## *****w (weights): 1.000000 ... 1.000000 ## *****Dirichlet:sparse,theta,omega,a,b,rho,augment: 0,0,1,0.5,1,14,0 ## *****printevery: 100 ## ## MCMC ## done 0 (out of 1100) ## done 100 (out of 1100) ## done 200 (out of 1100) ## done 300 (out of 1100) ## done 400 (out of 1100) ## done 500 (out of 1100) ## done 600 (out of 1100) ## done 700 (out of 1100) ## done 800 (out of 1100) ## done 900 (out of 1100) ## done 1000 (out of 1100) ## time: 2s ## trcnt,tecnt: 1000,1000 carseats_mse(bartfit) ## [1] 1.631285 The test error rate is ~1.6 which is an improvement over random forest and bagging. 8.2.3 Question 9 This problem involves the OJ data set which is part of the ISLR2 package. Create a training set containing a random sample of 800 observations, and a test set containing the remaining observations. set.seed(42) train <- sample(1:nrow(OJ), 800) test <- setdiff(1:nrow(OJ), train) Fit a tree to the training data, with Purchase as the response and the other variables except for Buy as predictors. Use the summary() function to produce summary statistics about the tree, and describe the results obtained. What is the training error rate? How many terminal nodes does the tree have? tr <- tree(Purchase ~ ., data = OJ[train, ]) summary(tr) ## ## Classification tree: ## tree(formula = Purchase ~ ., data = OJ[train, ]) ## Variables actually used in tree construction: ## [1] "LoyalCH" "SalePriceMM" "PriceDiff" ## Number of terminal nodes: 8 ## Residual mean deviance: 0.7392 = 585.5 / 792 ## Misclassification error rate: 0.1638 = 131 / 800 Type in the name of the tree object in order to get a detailed text output. Pick one of the terminal nodes, and interpret the information displayed. tr ## node), split, n, deviance, yval, (yprob) ## * denotes terminal node ## ## 1) root 800 1066.00 CH ( 0.61500 0.38500 ) ## 2) LoyalCH < 0.48285 285 296.00 MM ( 0.21404 0.78596 ) ## 4) LoyalCH < 0.064156 64 0.00 MM ( 0.00000 1.00000 ) * ## 5) LoyalCH > 0.064156 221 260.40 MM ( 0.27602 0.72398 ) ## 10) SalePriceMM < 2.04 128 123.50 MM ( 0.18750 0.81250 ) * ## 11) SalePriceMM > 2.04 93 125.00 MM ( 0.39785 0.60215 ) * ## 3) LoyalCH > 0.48285 515 458.10 CH ( 0.83689 0.16311 ) ## 6) LoyalCH < 0.753545 230 282.70 CH ( 0.69565 0.30435 ) ## 12) PriceDiff < 0.265 149 203.00 CH ( 0.57718 0.42282 ) ## 24) PriceDiff < -0.165 32 38.02 MM ( 0.28125 0.71875 ) * ## 25) PriceDiff > -0.165 117 150.30 CH ( 0.65812 0.34188 ) ## 50) LoyalCH < 0.703993 105 139.60 CH ( 0.61905 0.38095 ) * ## 51) LoyalCH > 0.703993 12 0.00 CH ( 1.00000 0.00000 ) * ## 13) PriceDiff > 0.265 81 47.66 CH ( 0.91358 0.08642 ) * ## 7) LoyalCH > 0.753545 285 111.70 CH ( 0.95088 0.04912 ) * Create a plot of the tree, and interpret the results. plot(tr) text(tr, pretty = 0, digits = 2, cex = 0.8) Predict the response on the test data, and produce a confusion matrix comparing the test labels to the predicted test labels. What is the test error rate? table(predict(tr, OJ[test, ], type = "class"), OJ[test, "Purchase"]) ## ## CH MM ## CH 125 15 ## MM 36 94 Apply the cv.tree() function to the training set in order to determine the optimal tree size. set.seed(42) res <- cv.tree(tr) Produce a plot with tree size on the \\(x\\)-axis and cross-validated classification error rate on the \\(y\\)-axis. plot(res$size, res$dev, type = "b", xlab = "Tree size", ylab = "Deviance") min <- which.min(res$dev) abline(v = res$size[min], lty = 2, col = "red") Which tree size corresponds to the lowest cross-validated classification error rate? res$size[min] ## [1] 6 Produce a pruned tree corresponding to the optimal tree size obtained using cross-validation. If cross-validation does not lead to selection of a pruned tree, then create a pruned tree with five terminal nodes. ptr <- prune.tree(tr, best = res$size[min]) plot(ptr) text(ptr, pretty = 0, digits = 2, cex = 0.8) Compare the training error rates between the pruned and unpruned trees. Which is higher? oj_misclass <- function(model) { summary(model)$misclass[1] / summary(model)$misclass[2] } oj_misclass(tr) ## [1] 0.16375 oj_misclass(ptr) ## [1] 0.16375 The training misclassification error rate is slightly higher for the pruned tree. Compare the test error rates between the pruned and unpruned trees. Which is higher? oj_err <- function(model) { p <- predict(model, newdata = OJ[test, ], type = "class") mean(p != OJ[test, "Purchase"]) } oj_err(tr) ## [1] 0.1888889 oj_err(ptr) ## [1] 0.1888889 The test misclassification error rate is slightly higher for the pruned tree. 8.2.4 Question 10 We now use boosting to predict Salary in the Hitters data set. Remove the observations for whom the salary information is unknown, and then log-transform the salaries. dat <- Hitters dat <- dat[!is.na(dat$Salary), ] dat$Salary <- log(dat$Salary) Create a training set consisting of the first 200 observations, and a test set consisting of the remaining observations. train <- 1:200 test <- setdiff(1:nrow(dat), train) Perform boosting on the training set with 1,000 trees for a range of values of the shrinkage parameter \\(\\lambda\\). Produce a plot with different shrinkage values on the \\(x\\)-axis and the corresponding training set MSE on the \\(y\\)-axis. library(gbm) ## Loaded gbm 2.2.2 ## This version of gbm is no longer under development. Consider transitioning to gbm3, https://github.com/gbm-developers/gbm3 set.seed(42) lambdas <- 10 ^ seq(-3, 0, by = 0.1) fits <- lapply(lambdas, function(lam) { gbm(Salary ~ ., data = dat[train, ], distribution = "gaussian", n.trees = 1000, shrinkage = lam) }) errs <- sapply(fits, function(fit) { p <- predict(fit, dat[train, ], n.trees = 1000) mean((p - dat[train, ]$Salary)^2) }) plot(lambdas, errs, type = "b", xlab = "Shrinkage values", ylab = "Training MSE", log = "xy") Produce a plot with different shrinkage values on the \\(x\\)-axis and the corresponding test set MSE on the \\(y\\)-axis. errs <- sapply(fits, function(fit) { p <- predict(fit, dat[test, ], n.trees = 1000) mean((p - dat[test, ]$Salary)^2) }) plot(lambdas, errs, type = "b", xlab = "Shrinkage values", ylab = "Training MSE", log = "xy") min(errs) ## [1] 0.249881 abline(v = lambdas[which.min(errs)], lty = 2, col = "red") Compare the test MSE of boosting to the test MSE that results from applying two of the regression approaches seen in Chapters 3 and 6. Linear regression fit1 <- lm(Salary ~ ., data = dat[train, ]) mean((predict(fit1, dat[test, ]) - dat[test, "Salary"])^2) ## [1] 0.4917959 Ridge regression library(glmnet) ## Loading required package: Matrix ## ## Attaching package: 'Matrix' ## The following object is masked from 'package:ggtree': ## ## expand ## The following objects are masked from 'package:tidyr': ## ## expand, pack, unpack ## Loaded glmnet 4.1-8 x <- model.matrix(Salary ~ ., data = dat[train, ]) x.test <- model.matrix(Salary ~ ., data = dat[test, ]) y <- dat[train, "Salary"] fit2 <- glmnet(x, y, alpha = 1) mean((predict(fit2, s = 0.1, newx = x.test) - dat[test, "Salary"])^2) ## [1] 0.4389054 Which variables appear to be the most important predictors in the boosted model? summary(fits[[which.min(errs)]]) ## var rel.inf ## CAtBat CAtBat 16.4755242 ## CRBI CRBI 9.0670759 ## CHits CHits 8.9307168 ## CRuns CRuns 7.6839786 ## CWalks CWalks 7.1014886 ## PutOuts PutOuts 6.7869382 ## AtBat AtBat 5.8567916 ## Walks Walks 5.8479836 ## Years Years 5.3349489 ## Assists Assists 5.0076392 ## CHmRun CHmRun 4.6606919 ## RBI RBI 3.9255396 ## Hits Hits 3.8123124 ## HmRun HmRun 3.4462640 ## Runs Runs 2.4779866 ## Errors Errors 2.2341326 ## NewLeague NewLeague 0.5788283 ## Division Division 0.4880237 ## League League 0.2831352 Now apply bagging to the training set. What is the test set MSE for this approach? set.seed(42) bagged <- randomForest(Salary ~ ., data = dat[train, ], mtry = 19, ntree = 1000) mean((predict(bagged, newdata = dat[test, ]) - dat[test, "Salary"])^2) ## [1] 0.2278813 8.2.5 Question 11 This question uses the Caravan data set. Create a training set consisting of the first 1,000 observations, and a test set consisting of the remaining observations. train <- 1:1000 test <- setdiff(1:nrow(Caravan), train) Fit a boosting model to the training set with Purchase as the response and the other variables as predictors. Use 1,000 trees, and a shrinkage value of 0.01. Which predictors appear to be the most important? set.seed(42) fit <- gbm(as.numeric(Purchase == "Yes") ~ ., data = Caravan[train, ], n.trees = 1000, shrinkage = 0.01) ## Distribution not specified, assuming bernoulli ... ## Warning in gbm.fit(x = x, y = y, offset = offset, distribution = distribution, ## : variable 50: PVRAAUT has no variation. ## Warning in gbm.fit(x = x, y = y, offset = offset, distribution = distribution, ## : variable 71: AVRAAUT has no variation. head(summary(fit)) ## var rel.inf ## PPERSAUT PPERSAUT 15.243041 ## MKOOPKLA MKOOPKLA 10.220498 ## MOPLHOOG MOPLHOOG 7.584734 ## MBERMIDD MBERMIDD 5.983650 ## PBRAND PBRAND 4.557491 ## ABRAND ABRAND 4.076017 Use the boosting model to predict the response on the test data. Predict that a person will make a purchase if the estimated probability of purchase is greater than 20%. Form a confusion matrix. What fraction of the people predicted to make a purchase do in fact make one? How does this compare with the results obtained from applying KNN or logistic regression to this data set? p <- predict(fit, Caravan[test, ], n.trees = 1000, type = "response") table(p > 0.2, Caravan[test, "Purchase"] == "Yes") ## ## FALSE TRUE ## FALSE 4415 257 ## TRUE 118 32 sum(p > 0.2 & Caravan[test, "Purchase"] == "Yes") / sum(p > 0.2) ## [1] 0.2133333 141 (109 + 32) are predicted to purchase. Of these 32 do which is 21%. # Logistic regression fit <- glm(Purchase == "Yes" ~ ., data = Caravan[train, ], family = "binomial") ## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred p <- predict(fit, Caravan[test, ], type = "response") ## Warning in predict.lm(object, newdata, se.fit, scale = 1, type = if (type == : ## prediction from rank-deficient fit; attr(*, "non-estim") has doubtful cases table(p > 0.2, Caravan[test, "Purchase"] == "Yes") ## ## FALSE TRUE ## FALSE 4183 231 ## TRUE 350 58 sum(p > 0.2 & Caravan[test, "Purchase"] == "Yes") / sum(p > 0.2) ## [1] 0.1421569 For logistic regression we correctly predict 14% of those predicted to purchase. library(class) # KNN fit <- knn(Caravan[train, -86], Caravan[test, -86], Caravan$Purchase[train]) table(fit, Caravan[test, "Purchase"] == "Yes") ## ## fit FALSE TRUE ## No 4260 263 ## Yes 273 26 sum(fit == "Yes" & Caravan[test, "Purchase"] == "Yes") / sum(fit == "Yes") ## [1] 0.08695652 For KNN we correctly predict 8.7% of those predicted to purchase. 8.2.6 Question 12 Apply boosting, bagging, random forests and BART to a data set of your choice. Be sure to fit the models on a training set and to evaluate their performance on a test set. How accurate are the results compared to simple methods like linear or logistic regression? Which of these approaches yields the best performance? Here I’m going to use the College dataset (used in Question 10 from Chapter 7 to compare performance with the GAM we previously built). In this model we were trying to predict Outstate using the other variables in College. library(gam) ## Loading required package: splines ## Loading required package: foreach ## ## Attaching package: 'foreach' ## The following objects are masked from 'package:purrr': ## ## accumulate, when ## Loaded gam 1.22-4 set.seed(42) train <- sample(1:nrow(College), 400) test <- setdiff(1:nrow(College), train) # Linear regression lr <- gam(Outstate ~ ., data = College[train, ]) # GAM from chapter 7 gam <- gam(Outstate ~ Private + s(Room.Board, 2) + s(PhD, 2) + s(perc.alumni, 2) + s(Expend, 2) + s(Grad.Rate, 2), data = College[train, ]) # Boosting boosted <- gbm(Outstate ~ ., data = College[train, ], n.trees = 1000, shrinkage = 0.01) ## Distribution not specified, assuming gaussian ... # Bagging (random forest with mtry = no. predictors) bagged <- randomForest(Outstate ~ ., data = College[train, ], mtry = 17, ntree = 1000) # Random forest with mtry = sqrt(no. predictors) rf <- randomForest(Outstate ~ ., data = College[train, ], mtry = 4, ntree = 1000) # BART pred <- setdiff(colnames(College), "Outstate") bart <- gbart(College[train, pred], College[train, "Outstate"], x.test = College[test, pred]) ## *****Calling gbart: type=1 ## *****Data: ## data:n,p,np: 400, 18, 377 ## y1,yn: -4030.802500, 77.197500 ## x1,x[n*p]: 1.000000, 71.000000 ## xp1,xp[np*p]: 0.000000, 99.000000 ## *****Number of Trees: 200 ## *****Number of Cut Points: 1 ... 75 ## *****burn,nd,thin: 100,1000,1 ## *****Prior:beta,alpha,tau,nu,lambda,offset: 2,0.95,301.581,3,715815,10580.8 ## *****sigma: 1916.969943 ## *****w (weights): 1.000000 ... 1.000000 ## *****Dirichlet:sparse,theta,omega,a,b,rho,augment: 0,0,1,0.5,1,18,0 ## *****printevery: 100 ## ## MCMC ## done 0 (out of 1100) ## done 100 (out of 1100) ## done 200 (out of 1100) ## done 300 (out of 1100) ## done 400 (out of 1100) ## done 500 (out of 1100) ## done 600 (out of 1100) ## done 700 (out of 1100) ## done 800 (out of 1100) ## done 900 (out of 1100) ## done 1000 (out of 1100) ## time: 3s ## trcnt,tecnt: 1000,1000 mse <- function(model, ...) { pred <- predict(model, College[test, ], ...) mean((College$Outstate[test] - pred)^2) } res <- c( "Linear regression" = mse(lr), "GAM" = mse(gam), "Boosting" = mse(boosted, n.trees = 1000), "Bagging" = mse(bagged), "Random forest" = mse(rf), "BART" = mse(bart) ) res <- data.frame("MSE" = res) res$Model <- factor(row.names(res), levels = rev(row.names(res))) ggplot(res, aes(Model, MSE)) + coord_flip() + geom_bar(stat = "identity", fill = "steelblue") In this case, it looks like bagging produces the best performing model in terms of test mean square error. "],["support-vector-machines.html", "9 Support Vector Machines 9.1 Conceptual 9.2 Applied", " 9 Support Vector Machines 9.1 Conceptual 9.1.1 Question 1 This problem involves hyperplanes in two dimensions. Sketch the hyperplane \\(1 + 3X_1 − X_2 = 0\\). Indicate the set of points for which \\(1 + 3X_1 − X_2 > 0\\), as well as the set of points for which \\(1 + 3X_1 − X_2 < 0\\). library(ggplot2) xlim <- c(-10, 10) ylim <- c(-30, 30) points <- expand.grid( X1 = seq(xlim[1], xlim[2], length.out = 50), X2 = seq(ylim[1], ylim[2], length.out = 50) ) p <- ggplot(points, aes(x = X1, y = X2)) + geom_abline(intercept = 1, slope = 3) + # X2 = 1 + 3X1 theme_bw() p + geom_point(aes(color = 1 + 3*X1 - X2 > 0), size = 0.1) + scale_color_discrete(name = "1 + 3X1 − X2 > 0") On the same plot, sketch the hyperplane \\(−2 + X_1 + 2X_2 = 0\\). Indicate the set of points for which \\(−2 + X_1 + 2X_2 > 0\\), as well as the set of points for which \\(−2 + X_1 + 2X_2 < 0\\). p + geom_abline(intercept = 1, slope = -1/2) + # X2 = 1 - X1/2 geom_point( aes(color = interaction(1 + 3*X1 - X2 > 0, -2 + X1 + 2*X2 > 0)), size = 0.5 ) + scale_color_discrete(name = "(1 + 3X1 − X2 > 0).(−2 + X1 + 2X2 > 0)") 9.1.2 Question 2 We have seen that in \\(p = 2\\) dimensions, a linear decision boundary takes the form \\(\\beta_0 + \\beta_1X_1 + \\beta_2X_2 = 0\\). We now investigate a non-linear decision boundary. Sketch the curve \\[(1+X_1)^2 +(2−X_2)^2 = 4\\]. points <- expand.grid( X1 = seq(-4, 2, length.out = 100), X2 = seq(-1, 5, length.out = 100) ) p <- ggplot(points, aes(x = X1, y = X2, z = (1 + X1)^2 + (2 - X2)^2 - 4)) + geom_contour(breaks = 0, colour = "black") + theme_bw() p On your sketch, indicate the set of points for which \\[(1 + X_1)^2 + (2 − X_2)^2 > 4,\\] as well as the set of points for which \\[(1 + X_1)^2 + (2 − X_2)^2 \\leq 4.\\] p + geom_point(aes(color = (1 + X1)^2 + (2 - X2)^2 - 4 > 0), size = 0.1) Suppose that a classifier assigns an observation to the blue class if \\[(1 + X_1)^2 + (2 − X_2)^2 > 4,\\] and to the red class otherwise. To what class is the observation \\((0, 0)\\) classified? \\((−1, 1)\\)? \\((2, 2)\\)? \\((3, 8)\\)? points <- data.frame( X1 = c(0, -1, 2, 3), X2 = c(0, 1, 2, 8) ) ifelse((1 + points$X1)^2 + (2 - points$X2)^2 > 4, "blue", "red") ## [1] "blue" "red" "blue" "blue" Argue that while the decision boundary in (c) is not linear in terms of \\(X_1\\) and \\(X_2\\), it is linear in terms of \\(X_1\\), \\(X_1^2\\), \\(X_2\\), and \\(X_2^2\\). The decision boundary is \\[(1 + X_1)^2 + (2 − X_2)^2 -4 = 0\\] which we can expand to: \\[1 + 2X_1 + X_1^2 + 4 − 4X_2 + X_2^2 - 4 = 0\\] which is linear in terms of \\(X_1\\), \\(X_1^2\\), \\(X_2\\), \\(X_2^2\\). 9.1.3 Question 3 Here we explore the maximal margin classifier on a toy data set. We are given \\(n = 7\\) observations in \\(p = 2\\) dimensions. For each observation, there is an associated class label. Obs. \\(X_1\\) \\(X_2\\) \\(Y\\) 1 3 4 Red 2 2 2 Red 3 4 4 Red 4 1 4 Red 5 2 1 Blue 6 4 3 Blue 7 4 1 Blue Sketch the observations. data <- data.frame( X1 = c(3, 2, 4, 1, 2, 4, 4), X2 = c(4, 2, 4, 4, 1, 3, 1), Y = c(rep("Red", 4), rep("Blue", 3)) ) p <- ggplot(data, aes(x = X1, y = X2, color = Y)) + geom_point(size = 2) + scale_colour_identity() + coord_cartesian(xlim = c(0.5, 4.5), ylim = c(0.5, 4.5)) p Sketch the optimal separating hyperplane, and provide the equation for this hyperplane (of the form (9.1)). library(e1071) fit <- svm(as.factor(Y) ~ ., data = data, kernel = "linear", cost = 10, scale = FALSE) # Extract beta_0, beta_1, beta_2 beta <- c( -fit$rho, drop(t(fit$coefs) %*% as.matrix(data[fit$index, 1:2])) ) names(beta) <- c("B0", "B1", "B2") p <- p + geom_abline(intercept = -beta[1] / beta[3], slope = -beta[2] / beta[3], lty = 2) p Describe the classification rule for the maximal margin classifier. It should be something along the lines of “Classify to Red if \\(\\beta_0 + \\beta_1X_1 + \\beta_2X_2 > 0\\), and classify to Blue otherwise.” Provide the values for \\(\\beta_0, \\beta_1,\\) and \\(\\beta_2\\). Classify to red if \\(\\beta_0 + \\beta_1X_1 + \\beta_2X_2 > 0\\) and blue otherwise where \\(\\beta_0 = 1\\), \\(\\beta_1 = -2\\), \\(\\beta_2 = 2\\). On your sketch, indicate the margin for the maximal margin hyperplane. p <- p + geom_ribbon( aes(x = x, ymin = ymin, ymax = ymax), data = data.frame(x = c(0, 5), ymin = c(-1, 4), ymax = c(0, 5)), alpha = 0.1, fill = "blue", inherit.aes = FALSE ) p Indicate the support vectors for the maximal margin classifier. p <- p + geom_point(data = data[fit$index, ], size = 4) p The support vectors (from the svm fit object) are shown above. Arguably, there’s another support vector, since four points exactly touch the margin. Argue that a slight movement of the seventh observation would not affect the maximal margin hyperplane. p + geom_point(data = data[7, , drop = FALSE], size = 4, color = "purple") The 7th point is shown in purple above. It is not a support vector, and not close to the margin, so small changes in its X1, X2 values would not affect the current calculated margin. Sketch a hyperplane that is not the optimal separating hyperplane, and provide the equation for this hyperplane. A non-optimal hyperline that still separates the blue and red points would be one that touches the (red) point at X1 = 2, X2 = 2 and the (blue) point at X1 = 4, X2 = 3. This gives line \\(y = x/2 + 1\\) or, when \\(\\beta_0 = -1\\), \\(\\beta_1 = -1/2\\), \\(\\beta_2 = 1\\). p + geom_abline(intercept = 1, slope = 0.5, lty = 2, col = "red") Draw an additional observation on the plot so that the two classes are no longer separable by a hyperplane. p + geom_point(data = data.frame(X1 = 1, X2 = 3, Y = "Blue"), shape = 15, size = 4) 9.2 Applied 9.2.1 Question 4 Generate a simulated two-class data set with 100 observations and two features in which there is a visible but non-linear separation between the two classes. Show that in this setting, a support vector machine with a polynomial kernel (with degree greater than 1) or a radial kernel will outperform a support vector classifier on the training data. Which technique performs best on the test data? Make plots and report training and test error rates in order to back up your assertions. set.seed(10) data <- data.frame( x = runif(100), y = runif(100) ) score <- (2*data$x-0.5)^2 + (data$y)^2 - 0.5 data$class <- factor(ifelse(score > 0, "red", "blue")) p <- ggplot(data, aes(x = x, y = y, color = class)) + geom_point(size = 2) + scale_colour_identity() p train <- 1:50 test <- 51:100 fits <- list( "Radial" = svm(class ~ ., data = data[train, ], kernel = "radial"), "Polynomial" = svm(class ~ ., data = data[train, ], kernel = "polynomial", degree = 2), "Linear" = svm(class ~ ., data = data[train, ], kernel = "linear") ) err <- function(model, data) { out <- table(predict(model, data), data$class) (out[1, 2] + out[2, 1]) / sum(out) } plot(fits[[1]], data) plot(fits[[2]], data) plot(fits[[3]], data) sapply(fits, err, data = data[train, ]) ## Radial Polynomial Linear ## 0.04 0.30 0.10 sapply(fits, err, data = data[test, ]) ## Radial Polynomial Linear ## 0.06 0.48 0.14 In this case, the radial kernel performs best, followed by a linear kernel with the 2nd degree polynomial performing worst. The ordering of these models is the same for the training and test data sets. 9.2.2 Question 5 We have seen that we can fit an SVM with a non-linear kernel in order to perform classification using a non-linear decision boundary. We will now see that we can also obtain a non-linear decision boundary by performing logistic regression using non-linear transformations of the features. Generate a data set with \\(n = 500\\) and \\(p = 2\\), such that the observations belong to two classes with a quadratic decision boundary between them. For instance, you can do this as follows: > x1 <- runif(500) - 0.5 > x2 <- runif(500) - 0.5 > y <- 1 * (x1^2 - x2^2 > 0) set.seed(42) train <- data.frame( x1 = runif(500) - 0.5, x2 = runif(500) - 0.5 ) train$y <- factor(as.numeric((train$x1^2 - train$x2^2 > 0))) Plot the observations, colored according to their class labels. Your plot should display \\(X_1\\) on the \\(x\\)-axis, and \\(X_2\\) on the \\(y\\)-axis. p <- ggplot(train, aes(x = x1, y = x2, color = y)) + geom_point(size = 2) p Fit a logistic regression model to the data, using \\(X_1\\) and \\(X_2\\) as predictors. fit1 <- glm(y ~ ., data = train, family = "binomial") Apply this model to the training data in order to obtain a predicted class label for each training observation. Plot the observations, colored according to the predicted class labels. The decision boundary should be linear. plot_model <- function(fit) { if (inherits(fit, "svm")) { train$p <- predict(fit) } else { train$p <- factor(as.numeric(predict(fit) > 0)) } ggplot(train, aes(x = x1, y = x2, color = p)) + geom_point(size = 2) } plot_model(fit1) Now fit a logistic regression model to the data using non-linear functions of \\(X_1\\) and \\(X_2\\) as predictors (e.g. \\(X_1^2, X_1 \\times X_2, \\log(X_2),\\) and so forth). fit2 <- glm(y ~ poly(x1, 2) + poly(x2, 2), data = train, family = "binomial") ## Warning: glm.fit: algorithm did not converge ## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred Apply this model to the training data in order to obtain a predicted class label for each training observation. Plot the observations, colored according to the predicted class labels. The decision boundary should be obviously non-linear. If it is not, then repeat (a)-(e) until you come up with an example in which the predicted class labels are obviously non-linear. plot_model(fit2) Fit a support vector classifier to the data with \\(X_1\\) and \\(X_2\\) as predictors. Obtain a class prediction for each training observation. Plot the observations, colored according to the predicted class labels. fit3 <- svm(y ~ x1 + x2, data = train, kernel = "linear") plot_model(fit3) Fit a SVM using a non-linear kernel to the data. Obtain a class prediction for each training observation. Plot the observations, colored according to the predicted class labels. fit4 <- svm(y ~ x1 + x2, data = train, kernel = "polynomial", degree = 2) plot_model(fit4) Comment on your results. When simulating data with a quadratic decision boundary, a logistic model with quadratic transformations of the variables and an svm model with a quadratic kernel both produce much better (and similar fits) than standard linear methods. 9.2.3 Question 6 At the end of Section 9.6.1, it is claimed that in the case of data that is just barely linearly separable, a support vector classifier with a small value of cost that misclassifies a couple of training observations may perform better on test data than one with a huge value of cost that does not misclassify any training observations. You will now investigate this claim. Generate two-class data with \\(p = 2\\) in such a way that the classes are just barely linearly separable. set.seed(2) # Simulate data that is separable by a line at y = 2.5 data <- data.frame( x = rnorm(200), class = sample(c("red", "blue"), 200, replace = TRUE) ) data$y <- (data$class == "red") * 5 + rnorm(200) # Add barley separable points (these are simulated "noise" values) newdata <- data.frame(x = rnorm(30)) newdata$y <- 1.5*newdata$x + 3 + rnorm(30, 0, 1) newdata$class = ifelse((1.5*newdata$x + 3) - newdata$y > 0, "blue", "red") data <- rbind(data, newdata) # remove any that cause misclassification leaving data that is barley linearly # separable, but along an axis that is not y = 2.5 (which would be correct # for the "true" data. data <- data[!(data$class == "red") == ((1.5*data$x + 3 - data$y) > 0), ] data <- data[sample(seq_len(nrow(data)), 200), ] p <- ggplot(data, aes(x = x, y = y, color = class)) + geom_point(size = 2) + scale_colour_identity() + geom_abline(intercept = 3, slope = 1.5, lty = 2) p Compute the cross-validation error rates for support vector classifiers with a range of cost values. How many training errors are misclassified for each value of cost considered, and how does this relate to the cross-validation errors obtained? How many training errors are misclassified for each value of cost? costs <- 10^seq(-3, 5) sapply(costs, function(cost) { fit <- svm(as.factor(class) ~ ., data = data, kernel = "linear", cost = cost) pred <- predict(fit, data) sum(pred != data$class) }) ## [1] 98 8 9 4 1 1 0 0 0 Cross-validation errors out <- tune(svm, as.factor(class) ~ ., data = data, kernel = "linear", ranges = list(cost = costs)) summary(out) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost ## 10 ## ## - best performance: 0.005 ## ## - Detailed performance results: ## cost error dispersion ## 1 1e-03 0.540 0.09067647 ## 2 1e-02 0.045 0.02838231 ## 3 1e-01 0.045 0.03689324 ## 4 1e+00 0.020 0.02581989 ## 5 1e+01 0.005 0.01581139 ## 6 1e+02 0.005 0.01581139 ## 7 1e+03 0.005 0.01581139 ## 8 1e+04 0.010 0.02108185 ## 9 1e+05 0.010 0.02108185 data.frame( cost = out$performances$cost, misclass = out$performances$error * nrow(data) ) ## cost misclass ## 1 1e-03 108 ## 2 1e-02 9 ## 3 1e-01 9 ## 4 1e+00 4 ## 5 1e+01 1 ## 6 1e+02 1 ## 7 1e+03 1 ## 8 1e+04 2 ## 9 1e+05 2 Generate an appropriate test data set, and compute the test errors corresponding to each of the values of cost considered. Which value of cost leads to the fewest test errors, and how does this compare to the values of cost that yield the fewest training errors and the fewest cross-validation errors? set.seed(2) test <- data.frame( x = rnorm(200), class = sample(c("red", "blue"), 200, replace = TRUE) ) test$y <- (test$class == "red") * 5 + rnorm(200) p + geom_point(data = test, pch = 21) (errs <- sapply(costs, function(cost) { fit <- svm(as.factor(class) ~ ., data = data, kernel = "linear", cost = cost) pred <- predict(fit, test) sum(pred != test$class) })) ## [1] 95 2 3 9 16 16 19 19 19 (cost <- costs[which.min(errs)]) ## [1] 0.01 (fit <- svm(as.factor(class) ~ ., data = data, kernel = "linear", cost = cost)) ## ## Call: ## svm(formula = as.factor(class) ~ ., data = data, kernel = "linear", ## cost = cost) ## ## ## Parameters: ## SVM-Type: C-classification ## SVM-Kernel: linear ## cost: 0.01 ## ## Number of Support Vectors: 135 test$prediction <- predict(fit, test) p <- ggplot(test, aes(x = x, y = y, color = class, shape = prediction == class)) + geom_point(size = 2) + scale_colour_identity() p Discuss your results. A large cost leads to overfitting as the model finds the perfect linear separation between red and blue in the training data. A lower cost then leads to improved prediction in the test data. 9.2.4 Question 7 In this problem, you will use support vector approaches in order to predict whether a given car gets high or low gas mileage based on the Auto data set. Create a binary variable that takes on a 1 for cars with gas mileage above the median, and a 0 for cars with gas mileage below the median. library(ISLR2) data <- Auto data$high_mpg <- as.factor(as.numeric(data$mpg > median(data$mpg))) Fit a support vector classifier to the data with various values of cost, in order to predict whether a car gets high or low gas mileage. Report the cross-validation errors associated with different values of this parameter. Comment on your results. Note you will need to fit the classifier without the gas mileage variable to produce sensible results. set.seed(42) costs <- 10^seq(-4, 3, by = 0.5) results <- list() f <- high_mpg ~ displacement + horsepower + weight results$linear <- tune(svm, f, data = data, kernel = "linear", ranges = list(cost = costs)) summary(results$linear) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost ## 0.03162278 ## ## - best performance: 0.1019231 ## ## - Detailed performance results: ## cost error dispersion ## 1 1.000000e-04 0.5967949 0.05312225 ## 2 3.162278e-04 0.5967949 0.05312225 ## 3 1.000000e-03 0.2199359 0.08718077 ## 4 3.162278e-03 0.1353846 0.06058195 ## 5 1.000000e-02 0.1121795 0.04011293 ## 6 3.162278e-02 0.1019231 0.05087176 ## 7 1.000000e-01 0.1096154 0.05246238 ## 8 3.162278e-01 0.1044872 0.05154934 ## 9 1.000000e+00 0.1044872 0.05154934 ## 10 3.162278e+00 0.1044872 0.05154934 ## 11 1.000000e+01 0.1019231 0.05501131 ## 12 3.162278e+01 0.1019231 0.05501131 ## 13 1.000000e+02 0.1019231 0.05501131 ## 14 3.162278e+02 0.1019231 0.05501131 ## 15 1.000000e+03 0.1019231 0.05501131 Now repeat (b), this time using SVMs with radial and polynomial basis kernels, with different values of gamma and degree and cost. Comment on your results. results$polynomial <- tune(svm, f, data = data, kernel = "polynomial", ranges = list(cost = costs, degree = 1:3)) summary(results$polynomial) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost degree ## 0.1 1 ## ## - best performance: 0.101859 ## ## - Detailed performance results: ## cost degree error dispersion ## 1 1.000000e-04 1 0.5842949 0.04703306 ## 2 3.162278e-04 1 0.5842949 0.04703306 ## 3 1.000000e-03 1 0.5842949 0.04703306 ## 4 3.162278e-03 1 0.2167949 0.07891173 ## 5 1.000000e-02 1 0.1275641 0.04806885 ## 6 3.162278e-02 1 0.1147436 0.05661708 ## 7 1.000000e-01 1 0.1018590 0.05732429 ## 8 3.162278e-01 1 0.1069231 0.05949679 ## 9 1.000000e+00 1 0.1069231 0.06307278 ## 10 3.162278e+00 1 0.1069231 0.06307278 ## 11 1.000000e+01 1 0.1043590 0.06603760 ## 12 3.162278e+01 1 0.1043590 0.06603760 ## 13 1.000000e+02 1 0.1043590 0.06603760 ## 14 3.162278e+02 1 0.1043590 0.06603760 ## 15 1.000000e+03 1 0.1043590 0.06603760 ## 16 1.000000e-04 2 0.5842949 0.04703306 ## 17 3.162278e-04 2 0.5842949 0.04703306 ## 18 1.000000e-03 2 0.5842949 0.04703306 ## 19 3.162278e-03 2 0.5255128 0.08090636 ## 20 1.000000e-02 2 0.3980769 0.08172400 ## 21 3.162278e-02 2 0.3674359 0.07974741 ## 22 1.000000e-01 2 0.3597436 0.08336609 ## 23 3.162278e-01 2 0.3597436 0.09010398 ## 24 1.000000e+00 2 0.3444872 0.08767258 ## 25 3.162278e+00 2 0.3545513 0.10865903 ## 26 1.000000e+01 2 0.3239103 0.09593710 ## 27 3.162278e+01 2 0.3035256 0.08184137 ## 28 1.000000e+02 2 0.3061538 0.08953945 ## 29 3.162278e+02 2 0.3060897 0.08919821 ## 30 1.000000e+03 2 0.3035897 0.09305216 ## 31 1.000000e-04 3 0.5842949 0.04703306 ## 32 3.162278e-04 3 0.4955128 0.10081350 ## 33 1.000000e-03 3 0.3750641 0.08043982 ## 34 3.162278e-03 3 0.3036538 0.09096445 ## 35 1.000000e-02 3 0.2601282 0.07774595 ## 36 3.162278e-02 3 0.2499359 0.08407106 ## 37 1.000000e-01 3 0.2017949 0.07547413 ## 38 3.162278e-01 3 0.1937179 0.08427411 ## 39 1.000000e+00 3 0.1478205 0.04579654 ## 40 3.162278e+00 3 0.1451923 0.05169638 ## 41 1.000000e+01 3 0.1451282 0.04698931 ## 42 3.162278e+01 3 0.1500000 0.07549058 ## 43 1.000000e+02 3 0.1373718 0.05772558 ## 44 3.162278e+02 3 0.1271795 0.06484766 ## 45 1.000000e+03 3 0.1322436 0.06764841 results$radial <- tune(svm, f, data = data, kernel = "radial", ranges = list(cost = costs, gamma = 10^(-2:1))) summary(results$radial) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost gamma ## 1000 0.1 ## ## - best performance: 0.08179487 ## ## - Detailed performance results: ## cost gamma error dispersion ## 1 1.000000e-04 0.01 0.58410256 0.05435320 ## 2 3.162278e-04 0.01 0.58410256 0.05435320 ## 3 1.000000e-03 0.01 0.58410256 0.05435320 ## 4 3.162278e-03 0.01 0.58410256 0.05435320 ## 5 1.000000e-02 0.01 0.58410256 0.05435320 ## 6 3.162278e-02 0.01 0.26557692 0.10963269 ## 7 1.000000e-01 0.01 0.15038462 0.05783237 ## 8 3.162278e-01 0.01 0.11224359 0.04337812 ## 9 1.000000e+00 0.01 0.10730769 0.04512161 ## 10 3.162278e+00 0.01 0.10730769 0.04512161 ## 11 1.000000e+01 0.01 0.10737179 0.05526490 ## 12 3.162278e+01 0.01 0.10480769 0.05610124 ## 13 1.000000e+02 0.01 0.10480769 0.05610124 ## 14 3.162278e+02 0.01 0.10737179 0.05526490 ## 15 1.000000e+03 0.01 0.10993590 0.05690926 ## 16 1.000000e-04 0.10 0.58410256 0.05435320 ## 17 3.162278e-04 0.10 0.58410256 0.05435320 ## 18 1.000000e-03 0.10 0.58410256 0.05435320 ## 19 3.162278e-03 0.10 0.58410256 0.05435320 ## 20 1.000000e-02 0.10 0.15301282 0.06026554 ## 21 3.162278e-02 0.10 0.11480769 0.04514816 ## 22 1.000000e-01 0.10 0.10730769 0.04512161 ## 23 3.162278e-01 0.10 0.10730769 0.04512161 ## 24 1.000000e+00 0.10 0.10737179 0.05526490 ## 25 3.162278e+00 0.10 0.10737179 0.05526490 ## 26 1.000000e+01 0.10 0.10737179 0.05526490 ## 27 3.162278e+01 0.10 0.10737179 0.05526490 ## 28 1.000000e+02 0.10 0.09967949 0.04761387 ## 29 3.162278e+02 0.10 0.08429487 0.03207585 ## 30 1.000000e+03 0.10 0.08179487 0.03600437 ## 31 1.000000e-04 1.00 0.58410256 0.05435320 ## 32 3.162278e-04 1.00 0.58410256 0.05435320 ## 33 1.000000e-03 1.00 0.58410256 0.05435320 ## 34 3.162278e-03 1.00 0.58410256 0.05435320 ## 35 1.000000e-02 1.00 0.12506410 0.05342773 ## 36 3.162278e-02 1.00 0.10730769 0.06255920 ## 37 1.000000e-01 1.00 0.10993590 0.05561080 ## 38 3.162278e-01 1.00 0.10737179 0.05526490 ## 39 1.000000e+00 1.00 0.09711538 0.05107441 ## 40 3.162278e+00 1.00 0.08429487 0.03634646 ## 41 1.000000e+01 1.00 0.08692308 0.03877861 ## 42 3.162278e+01 1.00 0.08948718 0.03503648 ## 43 1.000000e+02 1.00 0.09198718 0.03272127 ## 44 3.162278e+02 1.00 0.10217949 0.04214031 ## 45 1.000000e+03 1.00 0.09692308 0.04645046 ## 46 1.000000e-04 10.00 0.58410256 0.05435320 ## 47 3.162278e-04 10.00 0.58410256 0.05435320 ## 48 1.000000e-03 10.00 0.58410256 0.05435320 ## 49 3.162278e-03 10.00 0.58410256 0.05435320 ## 50 1.000000e-02 10.00 0.58410256 0.05435320 ## 51 3.162278e-02 10.00 0.22205128 0.12710181 ## 52 1.000000e-01 10.00 0.11237179 0.03888895 ## 53 3.162278e-01 10.00 0.10217949 0.04375722 ## 54 1.000000e+00 10.00 0.09717949 0.03809440 ## 55 3.162278e+00 10.00 0.09717949 0.03809440 ## 56 1.000000e+01 10.00 0.09711538 0.04161705 ## 57 3.162278e+01 10.00 0.11487179 0.04240664 ## 58 1.000000e+02 10.00 0.13019231 0.03541140 ## 59 3.162278e+02 10.00 0.13532051 0.03865626 ## 60 1.000000e+03 10.00 0.14044872 0.04251917 sapply(results, function(x) x$best.performance) ## linear polynomial radial ## 0.10192308 0.10185897 0.08179487 sapply(results, function(x) x$best.parameters) ## $linear ## cost ## 6 0.03162278 ## ## $polynomial ## cost degree ## 7 0.1 1 ## ## $radial ## cost gamma ## 30 1000 0.1 Make some plots to back up your assertions in (b) and (c). Hint: In the lab, we used the plot() function for svm objects only in cases with \\(p = 2\\). When \\(p > 2\\), you can use the plot() function to create plots displaying pairs of variables at a time. Essentially, instead of typing > plot(svmfit, dat) where svmfit contains your fitted model and dat is a data frame containing your data, you can type > plot(svmfit, dat, x1 ∼ x4) in order to plot just the first and fourth variables. However, you must replace x1 and x4 with the correct variable names. To find out more, type ?plot.svm. table(predict(results$radial$best.model, data), data$high_mpg) ## ## 0 1 ## 0 176 5 ## 1 20 191 plot(results$radial$best.model, data, horsepower~displacement) plot(results$radial$best.model, data, horsepower~weight) plot(results$radial$best.model, data, displacement~weight) 9.2.5 Question 8 This problem involves the OJ data set which is part of the ISLR2 package. Create a training set containing a random sample of 800 observations, and a test set containing the remaining observations. set.seed(42) train <- sample(seq_len(nrow(OJ)), 800) test <- setdiff(seq_len(nrow(OJ)), train) Fit a support vector classifier to the training data using cost = 0.01, with Purchase as the response and the other variables as predictors. Use the summary() function to produce summary statistics, and describe the results obtained. fit <- svm(Purchase ~ ., data = OJ[train, ], kernel = "linear", cost = 0.01) summary(fit) ## ## Call: ## svm(formula = Purchase ~ ., data = OJ[train, ], kernel = "linear", ## cost = 0.01) ## ## ## Parameters: ## SVM-Type: C-classification ## SVM-Kernel: linear ## cost: 0.01 ## ## Number of Support Vectors: 432 ## ## ( 215 217 ) ## ## ## Number of Classes: 2 ## ## Levels: ## CH MM What are the training and test error rates? err <- function(model, data) { t <- table(predict(model, data), data[["Purchase"]]) 1 - sum(diag(t)) / sum(t) } errs <- function(model) { c(train = err(model, OJ[train, ]), test = err(model, OJ[test, ])) } errs(fit) ## train test ## 0.171250 0.162963 Use the tune() function to select an optimal cost. Consider values in the range 0.01 to 10. tuned <- tune(svm, Purchase ~ ., data = OJ[train, ], kernel = "linear", ranges = list(cost = 10^seq(-2, 1, length.out = 10))) tuned$best.parameters ## cost ## 7 1 summary(tuned) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost ## 1 ## ## - best performance: 0.1775 ## ## - Detailed performance results: ## cost error dispersion ## 1 0.01000000 0.18250 0.04133199 ## 2 0.02154435 0.18000 0.04005205 ## 3 0.04641589 0.18000 0.05041494 ## 4 0.10000000 0.18000 0.04901814 ## 5 0.21544347 0.18250 0.04377975 ## 6 0.46415888 0.18250 0.04090979 ## 7 1.00000000 0.17750 0.04031129 ## 8 2.15443469 0.18000 0.03961621 ## 9 4.64158883 0.17875 0.03821086 ## 10 10.00000000 0.18375 0.03438447 Compute the training and test error rates using this new value for cost. errs(tuned$best.model) ## train test ## 0.167500 0.162963 Repeat parts (b) through (e) using a support vector machine with a radial kernel. Use the default value for gamma. tuned2 <- tune(svm, Purchase ~ ., data = OJ[train, ], kernel = "radial", ranges = list(cost = 10^seq(-2, 1, length.out = 10))) tuned2$best.parameters ## cost ## 6 0.4641589 errs(tuned2$best.model) ## train test ## 0.1525000 0.1666667 Repeat parts (b) through (e) using a support vector machine with a polynomial kernel. Set degree = 2. tuned3 <- tune(svm, Purchase ~ ., data = OJ[train, ], kernel = "polynomial", ranges = list(cost = 10^seq(-2, 1, length.out = 10)), degree = 2) tuned3$best.parameters ## cost ## 9 4.641589 errs(tuned3$best.model) ## train test ## 0.1487500 0.1703704 Overall, which approach seems to give the best results on this data? Overall the “radial” kernel appears to perform best in this case. "],["deep-learning.html", "10 Deep Learning 10.1 Conceptual 10.2 Applied", " 10 Deep Learning 10.1 Conceptual 10.1.1 Question 1 Consider a neural network with two hidden layers: \\(p = 4\\) input units, 2 units in the first hidden layer, 3 units in the second hidden layer, and a single output. Draw a picture of the network, similar to Figures 10.1 or 10.4. Write out an expression for \\(f(X)\\), assuming ReLU activation functions. Be as explicit as you can! The three layers (from our final output layer back to the start of our network) can be described as: \\[\\begin{align*} f(X) &= g(w_{0}^{(3)} + \\sum^{K_2}_{l=1} w_{l}^{(3)} A_l^{(2)}) \\\\ A_l^{(2)} &= h_l^{(2)}(X) = g(w_{l0}^{(2)} + \\sum_{k=1}^{K_1} w_{lk}^{(2)} A_k^{(1)})\\\\ A_k^{(1)} &= h_k^{(1)}(X) = g(w_{k0}^{(1)} + \\sum_{j=1}^p w_{kj}^{(1)} X_j) \\\\ \\end{align*}\\] for \\(l = 1, ..., K_2 = 3\\) and \\(k = 1, ..., K_1 = 2\\) and \\(p = 4\\), where, \\[ g(z) = (z)_+ = \\begin{cases} 0, & \\text{if } z < 0 \\\\ z, & \\text{otherwise} \\end{cases} \\] Now plug in some values for the coefficients and write out the value of \\(f(X)\\). We can perhaps achieve this most easily by fitting a real model. Note, in the plot shown here, we also include the “bias” or intercept terms. library(ISLR2) library(neuralnet) library(sigmoid) set.seed(5) train <- sample(seq_len(nrow(ISLR2::Boston)), nrow(ISLR2::Boston) * 2/3) net <- neuralnet(crim ~ lstat + medv + ptratio + rm, data = ISLR2::Boston[train, ], act.fct = relu, hidden = c(2, 3) ) plot(net) We can make a prediction for a given observation using this object. Firstly, let’s find an “ambiguous” test sample p <- predict(net, ISLR2::Boston[-train, ]) x <- ISLR2::Boston[-train, ][which.min(abs(p - mean(c(max(p), min(p))))), ] x <- x[, c("lstat", "medv", "ptratio", "rm")] predict(net, x) ## [,1] ## 441 19.14392 Or, repeating by “hand”: g <- function(x) ifelse(x > 0, x, 0) # relu activation function w <- net$weights[[1]] # the estimated weights for each layer v <- as.numeric(x) # our input predictors # to calculate our prediction we can take the dot product of our predictors # (with 1 at the start for the bias term) and our layer weights, lw) for (lw in w) v <- g(c(1, v) %*% lw) v ## [,1] ## [1,] 19.14392 How many parameters are there? length(unlist(net$weights)) ## [1] 23 There are \\(4*2+2 + 2*3+3 + 3*1+1 = 23\\) parameters. 10.1.2 Question 2 Consider the softmax function in (10.13) (see also (4.13) on page 141) for modeling multinomial probabilities. In (10.13), show that if we add a constant \\(c\\) to each of the \\(z_l\\), then the probability is unchanged. If we add a constant \\(c\\) to each \\(Z_l\\) in equation 10.13 we get: \\[\\begin{align*} Pr(Y=m|X) &= \\frac{e^{Z_m+c}}{\\sum_{l=0}^9e^{Z_l+c}} \\\\ &= \\frac{e^{Z_m}e^c}{\\sum_{l=0}^9e^{Z_l}e^c} \\\\ &= \\frac{e^{Z_m}e^c}{e^c\\sum_{l=0}^9e^{Z_l}} \\\\ &= \\frac{e^{Z_m}}{\\sum_{l=0}^9e^{Z_l}} \\\\ \\end{align*}\\] which is just equation 10.13. In (4.13), show that if we add constants \\(c_j\\), \\(j = 0,1,...,p\\), to each of the corresponding coefficients for each of the classes, then the predictions at any new point \\(x\\) are unchanged. 4.13 is \\[ Pr(Y=k|X=x) = \\frac {e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\] adding constants \\(c_j\\) to each class gives: \\[\\begin{align*} Pr(Y=k|X=x) &= \\frac {e^{\\beta_{K0} + \\beta_{K1}x_1 + c_1 + ... + \\beta_{Kp}x_p + c_p}} {\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + c_1 + ... + \\beta_{lp}x_p + c_p}} \\\\ &= \\frac {e^{c1 + ... + c_p}e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {\\sum_{l=1}^K e^{c1 + ... + c_p}e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\\\ &= \\frac {e^{c1 + ... + c_p}e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {e^{c1 + ... + c_p}\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\\\ &= \\frac {e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\\\ \\end{align*}\\] which collapses to 4.13 (with the same argument as above). This shows that the softmax function is over-parametrized. However, regularization and SGD typically constrain the solutions so that this is not a problem. 10.1.3 Question 3 Show that the negative multinomial log-likelihood (10.14) is equivalent to the negative log of the likelihood expression (4.5) when there are \\(M = 2\\) classes. Equation 10.14 is \\[ -\\sum_{i=1}^n \\sum_{m=0}^9 y_{im}\\log(f_m(x_i)) \\] Equation 4.5 is: \\[ \\ell(\\beta_0, \\beta_1) = \\prod_{i:y_i=1}p(x_i) \\prod_{i':y_i'=0}(1-p(x_i')) \\] So, \\(\\log(\\ell)\\) is: \\[\\begin{align*} \\log(\\ell) &= \\log \\left( \\prod_{i:y_i=1}p(x_i) \\prod_{i':y_i'=0}(1-p(x_i')) \\right ) \\\\ &= \\sum_{i:y_1=1}\\log(p(x_i)) + \\sum_{i':y_i'=0}\\log(1-p(x_i')) \\\\ \\end{align*}\\] If we set \\(y_i\\) to be an indicator variable such that \\(y_{i1}\\) and \\(y_{i0}\\) are 1 and 0 (or 0 and 1) when our \\(i\\)th observation is 1 (or 0) respectively, then we can write: \\[ \\log(\\ell) = \\sum_{i}y_{i1}\\log(p(x_i)) + \\sum_{i}y_{i0}\\log(1-p(x_i')) \\] If we also let \\(f_1(x) = p(x)\\) and \\(f_0(x) = 1 - p(x)\\) then: \\[\\begin{align*} \\log(\\ell) &= \\sum_i y_{i1}\\log(f_1(x_i)) + \\sum_{i}y_{i0}\\log(f_0(x_i')) \\\\ &= \\sum_i \\sum_{m=0}^1 y_{im}\\log(f_m(x_i)) \\\\ \\end{align*}\\] When we take the negative of this, it is equivalent to 10.14 for two classes (\\(m = 0,1\\)). 10.1.4 Question 4 Consider a CNN that takes in \\(32 \\times 32\\) grayscale images and has a single convolution layer with three \\(5 \\times 5\\) convolution filters (without boundary padding). Draw a sketch of the input and first hidden layer similar to Figure 10.8. How many parameters are in this model? There are 5 convolution matrices each with 5x5 weights (plus 5 bias terms) to estimate, therefore 130 parameters Explain how this model can be thought of as an ordinary feed-forward neural network with the individual pixels as inputs, and with constraints on the weights in the hidden units. What are the constraints? We can think of a convolution layer as a regularized fully connected layer. The regularization in this case is due to not all inputs being connected to all outputs, and weights being shared between connections. Each output node in the convolved image can be thought of as taking inputs from a limited number of input pixels (the neighboring pixels), with a set of weights specified by the convolution layer which are then shared by the connections to all other output nodes. If there were no constraints, then how many weights would there be in the ordinary feed-forward neural network in (c)? With no constraints, we would connect each output pixel in our 5x32x32 convolution layer to each node in the 32x32 original image (plus 5 bias terms), giving a total of 5,242,885 weights to estimate. 10.1.5 Question 5 In Table 10.2 on page 433, we see that the ordering of the three methods with respect to mean absolute error is different from the ordering with respect to test set \\(R^2\\). How can this be? Mean absolute error considers absolute differences between predictions and observed values, whereas \\(R^2\\) considers the (normalized) sum of squared differences, thus larger errors contribute relatively ore to \\(R^2\\) than mean absolute error. 10.2 Applied 10.2.1 Question 6 Consider the simple function \\(R(\\beta) = sin(\\beta) + \\beta/10\\). Draw a graph of this function over the range \\(\\beta \\in [−6, 6]\\). r <- function(x) sin(x) + x/10 x <- seq(-6, 6, 0.1) plot(x, r(x), type = "l") What is the derivative of this function? \\[ cos(x) + 1/10 \\] Given \\(\\beta^0 = 2.3\\), run gradient descent to find a local minimum of \\(R(\\beta)\\) using a learning rate of \\(\\rho = 0.1\\). Show each of \\(\\beta^0, \\beta^1, ...\\) in your plot, as well as the final answer. The derivative of our function, i.e. \\(cos(x) + 1/10\\) gives us the gradient for a given \\(x\\). For gradient descent, we move \\(x\\) a little in the opposite direction, for some learning rate \\(\\rho = 0.1\\): \\[ x^{m+1} = x^m - \\rho (cos(x^m) + 1/10) \\] iter <- function(x, rho) x - rho*(cos(x) + 1/10) gd <- function(start, rho = 0.1) { b <- start v <- b while(abs(b - iter(b, 0.1)) > 1e-8) { b <- iter(b, 0.1) v <- c(v, b) } v } res <- gd(2.3) res[length(res)] ## [1] 4.612221 plot(x, r(x), type = "l") points(res, r(res), col = "red", pch = 19) Repeat with \\(\\beta^0 = 1.4\\). res <- gd(1.4) res[length(res)] ## [1] -1.670964 plot(x, r(x), type = "l") points(res, r(res), col = "red", pch = 19) 10.2.2 Question 7 Fit a neural network to the Default data. Use a single hidden layer with 10 units, and dropout regularization. Have a look at Labs 10.9.1–-10.9.2 for guidance. Compare the classification performance of your model with that of linear logistic regression. library(keras) dat <- ISLR2::Boston x <- scale(model.matrix(crim ~ . - 1, data = dat)) n <- nrow(dat) ntest <- trunc(n / 3) testid <- sample(1:n, ntest) y <- dat$crim # logistic regression lfit <- lm(crim ~ ., data = dat[-testid, ]) lpred <- predict(lfit, dat[testid, ]) with(dat[testid, ], mean(abs(lpred - crim))) ## [1] 2.99129 # keras nn <- keras_model_sequential() |> layer_dense(units = 10, activation = "relu", input_shape = ncol(x)) |> layer_dropout(rate = 0.4) |> layer_dense(units = 1) compile(nn, loss = "mse", optimizer = optimizer_rmsprop(), metrics = list("mean_absolute_error") ) history <- fit(nn, x[-testid, ], y[-testid], epochs = 100, batch_size = 26, validation_data = list(x[testid, ], y[testid]), verbose = 0 ) plot(history, smooth = FALSE) npred <- predict(nn, x[testid, ]) ## 6/6 - 0s - 54ms/epoch - 9ms/step mean(abs(y[testid] - npred)) ## [1] 2.334041 In this case, the neural network outperforms logistic regression having a lower absolute error rate on the test data. 10.2.3 Question 8 From your collection of personal photographs, pick 10 images of animals (such as dogs, cats, birds, farm animals, etc.). If the subject does not occupy a reasonable part of the image, then crop the image. Now use a pretrained image classification CNN as in Lab 10.9.4 to predict the class of each of your images, and report the probabilities for the top five predicted classes for each image. library(keras) images <- list.files("images/animals") x <- array(dim = c(length(images), 224, 224, 3)) for (i in seq_len(length(images))) { img <- image_load(paste0("images/animals/", images[i]), target_size = c(224, 224)) x[i,,,] <- image_to_array(img) } model <- application_resnet50(weights = "imagenet") ## Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5 ## 8192/102967424 [..............................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 8085504/102967424 [=>............................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 21987328/102967424 [=====>........................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 36618240/102967424 [=========>....................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 51453952/102967424 [=============>................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 66551808/102967424 [==================>...........] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 80912384/102967424 [======================>.......] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 95641600/102967424 [==========================>...] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 102967424/102967424 [==============================] - 0s 0us/step pred <- model |> predict(x) |> imagenet_decode_predictions(top = 5) ## 1/1 - 1s - 1s/epoch - 1s/step ## Downloading data from https://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.json ## 8192/35363 [=====>........................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 35363/35363 [==============================] - 0s 0us/step names(pred) <- images print(pred) ## $bird.jpg ## class_name class_description score ## 1 n01819313 sulphur-crested_cockatoo 0.33546305 ## 2 n01580077 jay 0.18020906 ## 3 n02441942 weasel 0.08320859 ## 4 n02058221 albatross 0.07002056 ## 5 n01855672 goose 0.05195721 ## ## $bird2.jpg ## class_name class_description score ## 1 n02006656 spoonbill 0.840428233 ## 2 n02012849 crane 0.016258685 ## 3 n01819313 sulphur-crested_cockatoo 0.009740722 ## 4 n02007558 flamingo 0.007816141 ## 5 n01667778 terrapin 0.007497459 ## ## $bird3.jpg ## class_name class_description score ## 1 n01833805 hummingbird 0.9767877460 ## 2 n02033041 dowitcher 0.0111253690 ## 3 n02028035 redshank 0.0042764111 ## 4 n02009229 little_blue_heron 0.0012727526 ## 5 n02002724 black_stork 0.0008971311 ## ## $bug.jpg ## class_name class_description score ## 1 n02190166 fly 0.67558461 ## 2 n02167151 ground_beetle 0.10097048 ## 3 n02172182 dung_beetle 0.05490885 ## 4 n02169497 leaf_beetle 0.03541914 ## 5 n02168699 long-horned_beetle 0.03515299 ## ## $butterfly.jpg ## class_name class_description score ## 1 n02951585 can_opener 0.20600465 ## 2 n03476684 hair_slide 0.09360629 ## 3 n04074963 remote_control 0.06316858 ## 4 n02110185 Siberian_husky 0.05178998 ## 5 n02123597 Siamese_cat 0.03785341 ## ## $butterfly2.jpg ## class_name class_description score ## 1 n02276258 admiral 9.999689e-01 ## 2 n01580077 jay 1.388074e-05 ## 3 n02277742 ringlet 1.235042e-05 ## 4 n02279972 monarch 3.037859e-06 ## 5 n02281787 lycaenid 1.261888e-06 ## ## $elba.jpg ## class_name class_description score ## 1 n02085620 Chihuahua 0.29892012 ## 2 n02091032 Italian_greyhound 0.20332782 ## 3 n02109961 Eskimo_dog 0.08477225 ## 4 n02086910 papillon 0.05140305 ## 5 n02110185 Siberian_husky 0.05064517 ## ## $hamish.jpg ## class_name class_description score ## 1 n02097209 standard_schnauzer 0.6361451149 ## 2 n02097047 miniature_schnauzer 0.3450845778 ## 3 n02097130 giant_schnauzer 0.0164217781 ## 4 n02097298 Scotch_terrier 0.0019116047 ## 5 n02096177 cairn 0.0002054328 ## ## $poodle.jpg ## class_name class_description score ## 1 n02113799 standard_poodle 0.829670966 ## 2 n02088094 Afghan_hound 0.074567914 ## 3 n02113712 miniature_poodle 0.032005571 ## 4 n02102973 Irish_water_spaniel 0.018583152 ## 5 n02102318 cocker_spaniel 0.008629788 ## ## $tortoise.jpg ## class_name class_description score ## 1 n04033995 quilt 0.28395897 ## 2 n02110958 pug 0.15959552 ## 3 n03188531 diaper 0.14018111 ## 4 n02108915 French_bulldog 0.09364161 ## 5 n04235860 sleeping_bag 0.02608401 10.2.4 Question 9 Fit a lag-5 autoregressive model to the NYSE data, as described in the text and Lab 10.9.6. Refit the model with a 12-level factor representing the month. Does this factor improve the performance of the model? Fitting the model as described in the text. library(tidyverse) ## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ── ## ✔ dplyr 1.1.4 ✔ readr 2.1.5 ## ✔ forcats 1.0.0 ✔ stringr 1.5.1 ## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1 ## ✔ lubridate 1.9.3 ✔ tidyr 1.3.1 ## ✔ purrr 1.0.2 ## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── ## ✖ dplyr::compute() masks neuralnet::compute() ## ✖ dplyr::filter() masks stats::filter() ## ✖ dplyr::lag() masks stats::lag() ## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors library(ISLR2) xdata <- data.matrix(NYSE[, c("DJ_return", "log_volume","log_volatility")]) istrain <- NYSE[, "train"] xdata <- scale(xdata) lagm <- function(x, k = 1) { n <- nrow(x) pad <- matrix(NA, k, ncol(x)) rbind(pad, x[1:(n - k), ]) } arframe <- data.frame( log_volume = xdata[, "log_volume"], L1 = lagm(xdata, 1), L2 = lagm(xdata, 2), L3 = lagm(xdata, 3), L4 = lagm(xdata, 4), L5 = lagm(xdata, 5) ) arframe <- arframe[-(1:5), ] istrain <- istrain[-(1:5)] arfit <- lm(log_volume ~ ., data = arframe[istrain, ]) arpred <- predict(arfit, arframe[!istrain, ]) V0 <- var(arframe[!istrain, "log_volume"]) 1 - mean((arpred - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.413223 Now we add month (and work with tidyverse). arframe$month = as.factor(str_match(NYSE$date, "-(\\\\d+)-")[,2])[-(1:5)] arfit2 <- lm(log_volume ~ ., data = arframe[istrain, ]) arpred2 <- predict(arfit2, arframe[!istrain, ]) V0 <- var(arframe[!istrain, "log_volume"]) 1 - mean((arpred2 - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.4170418 Adding month as a factor marginally improves the \\(R^2\\) of our model (from 0.413223 to 0.4170418). This is a significant improvement in fit and model 2 has a lower AIC. anova(arfit, arfit2) ## Analysis of Variance Table ## ## Model 1: log_volume ~ L1.DJ_return + L1.log_volume + L1.log_volatility + ## L2.DJ_return + L2.log_volume + L2.log_volatility + L3.DJ_return + ## L3.log_volume + L3.log_volatility + L4.DJ_return + L4.log_volume + ## L4.log_volatility + L5.DJ_return + L5.log_volume + L5.log_volatility ## Model 2: log_volume ~ L1.DJ_return + L1.log_volume + L1.log_volatility + ## L2.DJ_return + L2.log_volume + L2.log_volatility + L3.DJ_return + ## L3.log_volume + L3.log_volatility + L4.DJ_return + L4.log_volume + ## L4.log_volatility + L5.DJ_return + L5.log_volume + L5.log_volatility + ## month ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 4260 1791.0 ## 2 4249 1775.8 11 15.278 3.3234 0.000143 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 AIC(arfit, arfit2) ## df AIC ## arfit 17 8447.663 ## arfit2 28 8433.031 10.2.5 Question 10 In Section 10.9.6, we showed how to fit a linear AR model to the NYSE data using the lm() function. However, we also mentioned that we can “flatten” the short sequences produced for the RNN model in order to fit a linear AR model. Use this latter approach to fit a linear AR model to the NYSE data. Compare the test \\(R^2\\) of this linear AR model to that of the linear AR model that we fit in the lab. What are the advantages/disadvantages of each approach? The lm model is the same as that fit above: arfit <- lm(log_volume ~ ., data = arframe[istrain, ]) arpred <- predict(arfit, arframe[!istrain, ]) V0 <- var(arframe[!istrain, "log_volume"]) 1 - mean((arpred - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.4170418 Now we reshape the data for the RNN n <- nrow(arframe) xrnn <- data.matrix(arframe[, -1]) xrnn <- array(xrnn, c(n, 3, 5)) xrnn <- xrnn[, , 5:1] xrnn <- aperm(xrnn, c(1, 3, 2)) We can add a “flatten” layer to turn the reshaped data into a long vector of predictors resulting in a linear AR model. model <- keras_model_sequential() |> layer_flatten(input_shape = c(5, 3)) |> layer_dense(units = 1) Now let’s fit this model. model |> compile(optimizer = optimizer_rmsprop(), loss = "mse") history <- model |> fit( xrnn[istrain,, ], arframe[istrain, "log_volume"], batch_size = 64, epochs = 200, validation_data = list(xrnn[!istrain,, ], arframe[!istrain, "log_volume"]), verbose = 0 ) plot(history, smooth = FALSE) kpred <- predict(model, xrnn[!istrain,, ]) ## 56/56 - 0s - 58ms/epoch - 1ms/step 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.4133125 Both models estimate the same number of coefficients/weights (16): coef(arfit) ## (Intercept) L1.DJ_return L1.log_volume L1.log_volatility ## 0.067916689 0.094410214 0.498673056 0.586274266 ## L2.DJ_return L2.log_volume L2.log_volatility L3.DJ_return ## -0.027299158 0.036903027 -0.931509135 0.037995916 ## L3.log_volume L3.log_volatility L4.DJ_return L4.log_volume ## 0.070312741 0.216160520 -0.004954842 0.117079461 ## L4.log_volatility L5.DJ_return L5.log_volume L5.log_volatility ## -0.039752786 -0.029620296 0.096034795 0.144510264 ## month02 month03 month04 month05 ## -0.100003367 -0.143781381 -0.028242819 -0.131120579 ## month06 month07 month08 month09 ## -0.125993911 -0.141608808 -0.163030102 -0.018889698 ## month10 month11 month12 ## -0.017206826 -0.037298183 0.008361380 model$get_weights() ## [[1]] ## [,1] ## [1,] -0.03262059 ## [2,] 0.09806149 ## [3,] 0.19123746 ## [4,] -0.00672294 ## [5,] 0.11956818 ## [6,] -0.08616812 ## [7,] 0.03884261 ## [8,] 0.07576967 ## [9,] 0.16982540 ## [10,] -0.02789208 ## [11,] 0.02615459 ## [12,] -0.76362336 ## [13,] 0.09488130 ## [14,] 0.51370680 ## [15,] 0.48065400 ## ## [[2]] ## [1] -0.005785846 The flattened RNN has a lower \\(R^2\\) on the test data than our lm model above. The lm model is quicker to fit and conceptually simpler also giving us the ability to inspect the coefficients for different variables. The flattened RNN is regularized to some extent as data are processed in batches. 10.2.6 Question 11 Repeat the previous exercise, but now fit a nonlinear AR model by “flattening” the short sequences produced for the RNN model. From the book: To fit a nonlinear AR model, we could add in a hidden layer. xfun::cache_rds({ model <- keras_model_sequential() |> layer_flatten(input_shape = c(5, 3)) |> layer_dense(units = 32, activation = "relu") |> layer_dropout(rate = 0.4) |> layer_dense(units = 1) model |> compile( loss = "mse", optimizer = optimizer_rmsprop(), metrics = "mse" ) history <- model |> fit( xrnn[istrain,, ], arframe[istrain, "log_volume"], batch_size = 64, epochs = 200, validation_data = list(xrnn[!istrain,, ], arframe[!istrain, "log_volume"]), verbose = 0 ) plot(history, smooth = FALSE, metrics = "mse") kpred <- predict(model, xrnn[!istrain,, ]) 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 }) ## 56/56 - 0s - 64ms/epoch - 1ms/step ## [1] 0.4267343 This approach improves our \\(R^2\\) over the linear model above. 10.2.7 Question 12 Consider the RNN fit to the NYSE data in Section 10.9.6. Modify the code to allow inclusion of the variable day_of_week, and fit the RNN. Compute the test \\(R^2\\). To accomplish this, I’ll include day of the week as one of the lagged variables in the RNN. Thus, our input for each observation will be 4 x 5 (rather than 3 x 5). xfun::cache_rds({ xdata <- data.matrix( NYSE[, c("day_of_week", "DJ_return", "log_volume","log_volatility")] ) istrain <- NYSE[, "train"] xdata <- scale(xdata) arframe <- data.frame( log_volume = xdata[, "log_volume"], L1 = lagm(xdata, 1), L2 = lagm(xdata, 2), L3 = lagm(xdata, 3), L4 = lagm(xdata, 4), L5 = lagm(xdata, 5) ) arframe <- arframe[-(1:5), ] istrain <- istrain[-(1:5)] n <- nrow(arframe) xrnn <- data.matrix(arframe[, -1]) xrnn <- array(xrnn, c(n, 4, 5)) xrnn <- xrnn[,, 5:1] xrnn <- aperm(xrnn, c(1, 3, 2)) dim(xrnn) model <- keras_model_sequential() |> layer_simple_rnn(units = 12, input_shape = list(5, 4), dropout = 0.1, recurrent_dropout = 0.1 ) |> layer_dense(units = 1) model |> compile(optimizer = optimizer_rmsprop(), loss = "mse") history <- model |> fit( xrnn[istrain,, ], arframe[istrain, "log_volume"], batch_size = 64, epochs = 200, validation_data = list(xrnn[!istrain,, ], arframe[!istrain, "log_volume"]), verbose = 0 ) kpred <- predict(model, xrnn[!istrain,, ]) 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 }) ## 56/56 - 0s - 136ms/epoch - 2ms/step ## [1] 0.4447892 10.2.8 Question 13 Repeat the analysis of Lab 10.9.5 on the IMDb data using a similarly structured neural network. There we used a dictionary of size 10,000. Consider the effects of varying the dictionary size. Try the values 1000, 3000, 5000, and 10,000, and compare the results. xfun::cache_rds({ library(knitr) accuracy <- c() for(max_features in c(1000, 3000, 5000, 10000)) { imdb <- dataset_imdb(num_words = max_features) c(c(x_train, y_train), c(x_test, y_test)) %<-% imdb maxlen <- 500 x_train <- pad_sequences(x_train, maxlen = maxlen) x_test <- pad_sequences(x_test, maxlen = maxlen) model <- keras_model_sequential() |> layer_embedding(input_dim = max_features, output_dim = 32) |> layer_lstm(units = 32) |> layer_dense(units = 1, activation = "sigmoid") model |> compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = "acc" ) history <- fit(model, x_train, y_train, epochs = 10, batch_size = 128, validation_data = list(x_test, y_test), verbose = 0 ) predy <- predict(model, x_test) > 0.5 accuracy <- c(accuracy, mean(abs(y_test == as.numeric(predy)))) } tibble( "Max Features" = c(1000, 3000, 5000, 10000), "Accuracy" = accuracy ) |> kable() }) ## Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz ## 8192/17464789 [..............................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 7127040/17464789 [===========>..................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 8396800/17464789 [=============>................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 17464789/17464789 [==============================] - 0s 0us/step ## 782/782 - 16s - 16s/epoch - 20ms/step ## 782/782 - 15s - 15s/epoch - 20ms/step ## 782/782 - 15s - 15s/epoch - 20ms/step ## 782/782 - 15s - 15s/epoch - 20ms/step Max Features Accuracy 1000 0.84516 3000 0.87840 5000 0.86400 10000 0.87200 Varying the dictionary size does not make a substantial impact on our estimates of accuracy. However, the models do take a substantial amount of time to fit and it is not clear we are finding the best fitting models in each case. For example, the model using a dictionary size of 10,000 obtained an accuracy of 0.8721 in the text which is as different from the estimate obtained here as are the differences between the models with different dictionary sizes. "],["survival-analysis-and-censored-data.html", "11 Survival Analysis and Censored Data 11.1 Conceptual 11.2 Applied", " 11 Survival Analysis and Censored Data 11.1 Conceptual 11.1.1 Question 1 For each example, state whether or not the censoring mechanism is independent. Justify your answer. In a study of disease relapse, due to a careless research scientist, all patients whose phone numbers begin with the number “2” are lost to follow up. Independent. There’s no reason to think disease relapse should be related to the first digit of a phone number. In a study of longevity, a formatting error causes all patient ages that exceed 99 years to be lost (i.e. we know that those patients are more than 99 years old, but we do not know their exact ages). Not independent. Older patients are more likely to see an event that younger. Hospital A conducts a study of longevity. However, very sick patients tend to be transferred to Hospital B, and are lost to follow up. Not independent. Sick patients are more likely to see an event that healthy. In a study of unemployment duration, the people who find work earlier are less motivated to stay in touch with study investigators, and therefore are more likely to be lost to follow up. Not independent. More employable individuals are more likely to see an event. In a study of pregnancy duration, women who deliver their babies pre-term are more likely to do so away from their usual hospital, and thus are more likely to be censored, relative to women who deliver full-term babies. Not independent. Delivery away from hospital will be associated with pregnancy duration. A researcher wishes to model the number of years of education of the residents of a small town. Residents who enroll in college out of town are more likely to be lost to follow up, and are also more likely to attend graduate school, relative to those who attend college in town. Not independent. Years of education will be associated with enrolling in out of town colleges. Researchers conduct a study of disease-free survival (i.e. time until disease relapse following treatment). Patients who have not relapsed within five years are considered to be cured, and thus their survival time is censored at five years. In other words we assume all events happen within five years, so censoring after this time is equivalent to not censoring at all so the censoring is independent. We wish to model the failure time for some electrical component. This component can be manufactured in Iowa or in Pittsburgh, with no difference in quality. The Iowa factory opened five years ago, and so components manufactured in Iowa are censored at five years. The Pittsburgh factory opened two years ago, so those components are censored at two years. If there is no difference in quality then location and therefore censoring is independent of failure time. We wish to model the failure time of an electrical component made in two different factories, one of which opened before the other. We have reason to believe that the components manufactured in the factory that opened earlier are of higher quality. In this case, the difference in opening times of the two locations will mean that any difference in quality between locations will be associated with censoring, so censoring is not independent. 11.1.2 Question 2 We conduct a study with \\(n = 4\\) participants who have just purchased cell phones, in order to model the time until phone replacement. The first participant replaces her phone after 1.2 years. The second participant still has not replaced her phone at the end of the two-year study period. The third participant changes her phone number and is lost to follow up (but has not yet replaced her phone) 1.5 years into the study. The fourth participant replaces her phone after 0.2 years. For each of the four participants (\\(i = 1,..., 4\\)), answer the following questions using the notation introduced in Section 11.1: Is the participant’s cell phone replacement time censored? No, Yes, Yes and No. Censoring occurs when we do not know if or when the phone was replaced. Is the value of \\(c_i\\) known, and if so, then what is it? \\(c_i\\) is censoring time. For the four participants these are: NA. 2. 1.5 and NA. Is the value of \\(t_i\\) known, and if so, then what is it? \\(t_i\\) is time to event. For the four participants these are: 1.2, NA, NA and 0.2. Is the value of \\(y_i\\) known, and if so, then what is it? \\(y_i\\) is the observed time. For the four participants these are: 1.2, 2, 1.5 and 0.2. Is the value of \\(\\delta_i\\) known, and if so, then what is it? \\(\\delta_i\\) is an indicator for censoring. The nomenclature introduced here defines this to be 1 if we observe the true “survival” time and 0 if we observe the censored time. Therefore, for these participants, the values are: 1, 0, 0 and 1. 11.1.3 Question 3 For the example in Exercise 2, report the values of \\(K\\), \\(d_1,...,d_K\\), \\(r_1,...,r_K\\), and \\(q_1,...,q_K\\), where this notation was defined in Section 11.3. \\(K\\) is the number of unique deaths, which is 2. \\(d_k\\) represents the unique death times, which are: 0.2, 1.2. \\(r_k\\) denotes the number of patients alive and in the study just before \\(d_k\\). Note the first event is for patient 4, then patient 1, then patient 3 is censored and finally the study ends with patient 2 still involved. Therefore \\(r_k\\) takes values are: 4, 3. \\(q_k\\) denotes the number of patients who died at time \\(d_k\\), therefore this takes values: 1, 1. We can check by using the survival package. library(survival) x <- Surv(c(1.2, 2, 1.5, 0.2), event = c(1, 0, 0, 1)) summary(survfit(x ~ 1)) ## Call: survfit(formula = x ~ 1) ## ## time n.risk n.event survival std.err lower 95% CI upper 95% CI ## 0.2 4 1 0.75 0.217 0.426 1 ## 1.2 3 1 0.50 0.250 0.188 1 11.1.4 Question 4 This problem makes use of the Kaplan-Meier survival curve displayed in Figure 11.9. The raw data that went into plotting this survival curve is given in Table 11.4. The covariate column of that table is not needed for this problem. What is the estimated probability of survival past 50 days? There are 2 events that happen before 50 days. The number at risk \\(r_k\\) are 5 and 4 (one was censored early on), thus survival probability is \\(4/5 * 3/4 = 0.6\\). Equivalently, we can use the survival package. library(tidyverse) table_data <- tribble( ~Y, ~D, ~X, 26.5, 1, 0.1, 37.2, 1, 11, 57.3, 1, -0.3, 90.8, 0, 2.8, 20.2, 0, 1.8, 89.8, 0, 0.4 ) x <- Surv(table_data$Y, table_data$D) summary(survfit(x ~ 1)) ## Call: survfit(formula = x ~ 1) ## ## time n.risk n.event survival std.err lower 95% CI upper 95% CI ## 26.5 5 1 0.8 0.179 0.516 1 ## 37.2 4 1 0.6 0.219 0.293 1 ## 57.3 3 1 0.4 0.219 0.137 1 Write out an analytical expression for the estimated survival function. For instance, your answer might be something along the lines of \\[ \\hat{S}(t) = \\begin{cases} 0.8 & \\text{if } t < 31\\\\ 0.5 & \\text{if } 31 \\le t < 77\\\\ 0.22 & \\text{if } 77 \\le t \\end{cases} \\] (The previous equation is for illustration only: it is not the correct answer!) \\[ \\hat{S}(t) = \\begin{cases} 1 & \\text{if } t < 26.5 \\\\ 0.8 & \\text{if } 26.5 \\le t < 37.2 \\\\ 0.6 & \\text{if } 37.2 \\le t < 57.3 \\\\ 0.4 & \\text{if } 57.3 \\le t \\end{cases} \\] 11.1.5 Question 5 Sketch the survival function given by the equation \\[ \\hat{S}(t) = \\begin{cases} 0.8, & \\text{if } t < 31\\\\ 0.5, & \\text{if } 31 \\le t < 77\\\\ 0.22 & \\text{if } 77 \\le t \\end{cases} \\] Your answer should look something like Figure 11.9. We can draw this plot, or even engineer data that will generate the required plot… plot(NULL, xlim = c(0, 100), ylim = c(0, 1), ylab = "Estimated Probability of Survival", xlab = "Time in Days" ) lines( c(0, 31, 31, 77, 77, 100), c(0.8, 0.8, 0.5, 0.5, 0.22, 0.22) ) 11.1.6 Question 6 This problem makes use of the data displayed in Figure 11.1. In completing this problem, you can refer to the observation times as \\(y_1,...,y_4\\). The ordering of these observation times can be seen from Figure 11.1; their exact values are not required. Report the values of \\(\\delta_1,...,\\delta_4\\), \\(K\\), \\(d_1,...,d_K\\), \\(r_1,...,r_K\\), and \\(q_1,...,q_K\\). The relevant notation is defined in Sections 11.1 and 11.3. \\(\\delta\\) values are: 1, 0, 1, 0. \\(K\\) is 2 \\(d\\) values are \\(y_3\\) and \\(y_1\\). \\(r\\) values are 4 and 2. \\(q\\) values are 1 and 1. Sketch the Kaplan-Meier survival curve corresponding to this data set. (You do not need to use any software to do this—you can sketch it by hand using the results obtained in (a).) plot(NULL, xlim = c(0, 350), ylim = c(0, 1), ylab = "Estimated Probability of Survival", xlab = "Time in Days" ) lines( c(0, 150, 150, 300, 300, 350), c(1, 1, 0.75, 0.75, 0.375, 0.375) ) x <- Surv(c(300, 350, 150, 250), c(1, 0, 1, 0)) Based on the survival curve estimated in (b), what is the probability that the event occurs within 200 days? What is the probability that the event does not occur within 310 days? 0.25 and 0.375. Write out an expression for the estimated survival curve from (b). \\[ \\hat{S}(t) = \\begin{cases} 1 & \\text{if } t < y_3 \\\\ 0.75 & \\text{if } y_3 \\le t < y_1 \\\\ 0.375 & \\text{if } y_1 \\le t \\end{cases} \\] 11.1.7 Question 7 In this problem, we will derive (11.5) and (11.6), which are needed for the construction of the log-rank test statistic (11.8). Recall the notation in Table 11.1. Assume that there is no difference between the survival functions of the two groups. Then we can think of \\(q_{1k}\\) as the number of failures if we draw $r_{1k} observations, without replacement, from a risk set of \\(r_k\\) observations that contains a total of \\(q_k\\) failures. Argue that \\(q_{1k}\\) follows a hypergeometric distribution. Write the parameters of this distribution in terms of \\(r_{1k}\\), \\(r_k\\), and \\(q_k\\). A hypergeometric distributions models sampling without replacement from a finite pool where each sample is a success or failure. This fits the situation here, where with have a finite number of samples in the risk set. The hypergeometric distribution is parameterized as \\(k\\) successes in \\(n\\) draws, without replacement, from a population of size \\(N\\) with \\(K\\) objects with that feature. Mapping to our situation, \\(q_{1k}\\) is \\(k\\), \\(r_{1k}\\) is \\(n\\), \\(r_k\\) is \\(N\\) and \\(q_k\\) is \\(K\\). Given your previous answer, and the properties of the hypergeometric distribution, what are the mean and variance of \\(q_{1k}\\)? Compare your answer to (11.5) and (11.6). With the above parameterization, the mean (\\(n K/N\\)) is \\(r_{1k} q_k/r_K\\). The variance \\(n K/N (N-K)/N (N-n)/(N-1)\\) is \\[ r_{1k} \\frac{q_k}{r_k} \\frac{r_k-q_k}{r_k} \\frac{r_k - r_{1k}}{r_k - 1} \\] These are equivalent to 11.5 and 11.6. 11.1.8 Question 8 Recall that the survival function \\(S(t)\\), the hazard function \\(h(t)\\), and the density function \\(f(t)\\) are defined in (11.2), (11.9), and (11.11), respectively. Furthermore, define \\(F(t) = 1 − S(t)\\). Show that the following relationships hold: \\[ f(t) = dF(t)/dt \\\\ S(t) = \\exp\\left(-\\int_0^t h(u)du\\right) \\] If \\(F(t) = 1 - S(t)\\), then \\(F(t)\\) is the cumulative density function (cdf) for \\(t\\). For a continuous distribution, a cdf, e.g. \\(F(t)\\) can be expressed as an integral (up to some value \\(x\\)) of the probability density function (pdf), i.e. \\(F(t) = \\int_{-\\infty}^x f(x) dt\\). Equivalently, the derivative of the cdf is its pdf: \\(f(t) = \\frac{d F(t)}{dt}\\). Then, \\(h(t) = \\frac{f(t)}{S(t)} = \\frac{dF(t)/dt}{S(t)} = \\frac{-dS(t)/dt}{S(t)}\\). From basic calculus, this can be rewritten as \\(h(t) = -\\frac{d}{dt}\\log{S(t)}\\). Integrating and then exponentiating we get the second identity. 11.1.9 Question 9 In this exercise, we will explore the consequences of assuming that the survival times follow an exponential distribution. Suppose that a survival time follows an \\(Exp(\\lambda)\\) distribution, so that its density function is \\(f(t) = \\lambda\\exp(−\\lambda t)\\). Using the relationships provided in Exercise 8, show that \\(S(t) = \\exp(-\\lambda t)\\). The cdf of an exponential distribution is \\(1 - \\exp(-\\lambda x)\\) and \\(S(t)\\) is \\(1 - F(t)\\) where \\(F(t)\\) is the cdf. Hence, \\(S(t) = \\exp(-\\lambda t)\\). Now suppose that each of \\(n\\) independent survival times follows an \\(\\exp(\\lambda)\\) distribution. Write out an expression for the likelihood function (11.13). The reference to (11.13) gives us the following formula: \\[ L = \\prod_{i=1}^{n} h(y_i)^{\\delta_i} S(y_i) \\] (11.10) also gives us \\[ h(t) = \\frac{f(t)}{S(t)} \\] Plugging in the expressions from part (a), we get \\[\\begin{align*} h(t) &= \\frac{\\lambda \\exp(- \\lambda t)}{\\exp(- \\lambda t)} \\\\ &= \\lambda \\end{align*}\\] Using (11.13), we get the following loss expression: \\[ \\ell = \\prod_i \\lambda^{\\delta_i} e^{- \\lambda y_i} \\] Show that the maximum likelihood estimator for \\(\\lambda\\) is \\[ \\hat\\lambda = \\sum_{i=1}^n \\delta_i / \\sum_{i=1}^n y_i. \\] Take the log likelihood. \\[\\begin{align*} \\log \\ell &= \\sum_i \\log \\left( \\lambda^{\\delta_i} e^{- \\lambda y_i} \\right) \\\\ &= \\sum_i{\\delta_i\\log\\lambda - \\lambda y_i \\log e} \\\\ &= \\sum_i{\\delta_i\\log\\lambda - \\lambda y_i} \\\\ &= \\log\\lambda\\sum_i{\\delta_i} - \\lambda\\sum_i{y_i} \\end{align*}\\] Differentiating this expression with respect to \\(\\lambda\\) we get: \\[ \\frac{d \\log \\ell}{d \\lambda} = \\frac{\\sum_i{\\delta_i}}{\\lambda} - \\sum_i{y_i} \\] This function maximises when its gradient is 0. Solving for this gives a MLE of \\(\\hat\\lambda = \\sum_{i=1}^n \\delta_i / \\sum_{i=1}^n y_i\\). Use your answer to (c) to derive an estimator of the mean survival time. Hint: For (d), recall that the mean of an \\(Exp(\\lambda)\\) random variable is \\(1/\\lambda\\). Estimated mean survival would be \\(1/\\lambda\\) which given the above would be \\(\\sum_{i=1}^n y_i / \\sum_{i=1}^n \\delta_i\\), which can be thought of as the total observation time over the total number of deaths. 11.2 Applied 11.2.1 Question 10 This exercise focuses on the brain tumor data, which is included in the ISLR2 R library. Plot the Kaplan-Meier survival curve with ±1 standard error bands, using the survfit() function in the survival package. library(ISLR2) x <- Surv(BrainCancer$time, BrainCancer$status) plot(survfit(x ~ 1), xlab = "Months", ylab = "Estimated Probability of Survival", col = "steelblue", conf.int = 0.67 ) Draw a bootstrap sample of size \\(n = 88\\) from the pairs (\\(y_i\\), \\(\\delta_i\\)), and compute the resulting Kaplan-Meier survival curve. Repeat this process \\(B = 200\\) times. Use the results to obtain an estimate of the standard error of the Kaplan-Meier survival curve at each timepoint. Compare this to the standard errors obtained in (a). plot(survfit(x ~ 1), xlab = "Months", ylab = "Estimated Probability of Survival", col = "steelblue", conf.int = 0.67 ) fit <- survfit(x ~ 1) dat <- tibble(time = c(0, fit$time)) for (i in 1:200) { y <- survfit(sample(x, 88, replace = TRUE) ~ 1) y <- tibble(time = c(0, y$time), "s{i}" := c(1, y$surv)) dat <- left_join(dat, y, by = "time") } res <- fill(dat, starts_with("s")) |> rowwise() |> transmute(sd = sd(c_across(starts_with("s")))) se <- res$sd[2:nrow(res)] lines(fit$time, fit$surv - se, lty = 2, col = "red") lines(fit$time, fit$surv + se, lty = 2, col = "red") Fit a Cox proportional hazards model that uses all of the predictors to predict survival. Summarize the main findings. fit <- coxph(Surv(time, status) ~ sex + diagnosis + loc + ki + gtv + stereo, data = BrainCancer) fit ## Call: ## coxph(formula = Surv(time, status) ~ sex + diagnosis + loc + ## ki + gtv + stereo, data = BrainCancer) ## ## coef exp(coef) se(coef) z p ## sexMale 0.18375 1.20171 0.36036 0.510 0.61012 ## diagnosisLG glioma 0.91502 2.49683 0.63816 1.434 0.15161 ## diagnosisHG glioma 2.15457 8.62414 0.45052 4.782 1.73e-06 ## diagnosisOther 0.88570 2.42467 0.65787 1.346 0.17821 ## locSupratentorial 0.44119 1.55456 0.70367 0.627 0.53066 ## ki -0.05496 0.94653 0.01831 -3.001 0.00269 ## gtv 0.03429 1.03489 0.02233 1.536 0.12466 ## stereoSRT 0.17778 1.19456 0.60158 0.296 0.76760 ## ## Likelihood ratio test=41.37 on 8 df, p=1.776e-06 ## n= 87, number of events= 35 ## (1 observation deleted due to missingness) diagnosisHG and ki are highly significant. Stratify the data by the value of ki. (Since only one observation has ki=40, you can group that observation together with the observations that have ki=60.) Plot Kaplan-Meier survival curves for each of the five strata, adjusted for the other predictors. To adjust for other predictors, we fit a model that includes those predictors and use this model to predict new, artificial, data where we allow ki to take each possible value, but set the other predictors to be the mode or mean of the other predictors. library(ggfortify) modaldata <- data.frame( sex = rep("Female", 5), diagnosis = rep("Meningioma", 5), loc = rep("Supratentorial", 5), ki = c(60, 70, 80, 90, 100), gtv = rep(mean(BrainCancer$gtv), 5), stereo = rep("SRT", 5) ) survplots <- survfit(fit, newdata = modaldata) plot(survplots, xlab = "Months", ylab = "Survival Probability", col = 2:6) legend("bottomleft", c("60", "70", "80", "90", "100"), col = 2:6, lty = 1) 11.2.2 Question 11 This example makes use of the data in Table 11.4. Create two groups of observations. In Group 1, \\(X < 2\\), whereas in Group 2, \\(X \\ge 2\\). Plot the Kaplan-Meier survival curves corresponding to the two groups. Be sure to label the curves so that it is clear which curve corresponds to which group. By eye, does there appear to be a difference between the two groups’ survival curves? x <- split(Surv(table_data$Y, table_data$D), table_data$X < 2) plot(NULL, xlim = c(0, 100), ylim = c(0, 1), ylab = "Survival Probability") lines(survfit(x[[1]] ~ 1), conf.int = FALSE, col = 2) lines(survfit(x[[2]] ~ 1), conf.int = FALSE, col = 3) legend("bottomleft", c(">= 2", "<2"), col = 2:3, lty = 1) There does not appear to be any difference between the curves. Fit Cox’s proportional hazards model, using the group indicator as a covariate. What is the estimated coefficient? Write a sentence providing the interpretation of this coefficient, in terms of the hazard or the instantaneous probability of the event. Is there evidence that the true coefficient value is non-zero? fit <- coxph(Surv(Y, D) ~ X < 2, data = table_data) fit ## Call: ## coxph(formula = Surv(Y, D) ~ X < 2, data = table_data) ## ## coef exp(coef) se(coef) z p ## X < 2TRUE 0.3401 1.4051 1.2359 0.275 0.783 ## ## Likelihood ratio test=0.08 on 1 df, p=0.7797 ## n= 6, number of events= 3 The coefficient is \\(0.3401\\). This implies a slightly increased hazard when \\(X < 2\\) but it is not significantly different to zero (P = 0.8). Recall from Section 11.5.2 that in the case of a single binary covariate, the log-rank test statistic should be identical to the score statistic for the Cox model. Conduct a log-rank test to determine whether there is a difference between the survival curves for the two groups. How does the p-value for the log-rank test statistic compare to the \\(p\\)-value for the score statistic for the Cox model from (b)? summary(fit)$sctest ## test df pvalue ## 0.07644306 1.00000000 0.78217683 survdiff(Surv(Y, D) ~ X < 2, data = table_data)$chisq ## [1] 0.07644306 They are identical. "],["unsupervised-learning.html", "12 Unsupervised Learning 12.1 Conceptual 12.2 Applied", " 12 Unsupervised Learning 12.1 Conceptual 12.1.1 Question 1 This problem involves the \\(K\\)-means clustering algorithm. Prove (12.18). 12.18 is: \\[ \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k} \\sum_{j=1}^p (x_{ij} - x_{i'j})^2 = 2 \\sum_{i \\in C_k} \\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 \\] where \\[\\bar{x}_{kj} = \\frac{1}{|C_k|}\\sum_{i \\in C_k} x_{ij}\\] On the left hand side we compute the difference between each observation (indexed by \\(i\\) and \\(i'\\)). In the second we compute the difference between each observation and the mean. Intuitively this identity is clear (the factor of 2 is present because we calculate the difference between each pair twice). However, to prove. Note first that, \\[\\begin{align} (x_{ij} - x_{i'j})^2 = & ((x_{ij} - \\bar{x}_{kj}) - (x_{i'j} - \\bar{x}_{kj}))^2 \\\\ = & (x_{ij} - \\bar{x}_{kj})^2 - 2(x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj}) + (x_{i'j} - \\bar{x}_{kj})^2 \\end{align}\\] Note that the first term is independent of \\(i'\\) and the last is independent of \\(i\\). Therefore, 10.12 can be written as: \\[\\begin{align} \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k} \\sum_{j=1}^p (x_{ij} - x_{i'j})^2 = & \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 - \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p 2(x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj}) + \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p (x_{i'j} - \\bar{x}_{kj})^2 \\\\ = & \\frac{|C_k|}{|C_k|}\\sum_{i \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 - \\frac{2}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj}) + \\frac{|C_k|}{|C_k|}\\sum_{i \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 \\\\ = & 2 \\sum_{i \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 \\end{align}\\] Note that we can drop the term containing \\((x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj})\\) since this is 0 when summed over combinations of \\(i\\) and \\(i'\\) for a given \\(j\\). On the basis of this identity, argue that the \\(K\\)-means clustering algorithm (Algorithm 12.2) decreases the objective (12.17) at each iteration. Equation 10.12 demonstrates that the euclidean distance between each possible pair of samples can be related to the difference from each sample to the mean of the cluster. The K-means algorithm works by minimizing the euclidean distance to each centroid, thus also minimizes the within-cluster variance. 12.1.2 Question 2 Suppose that we have four observations, for which we compute a dissimilarity matrix, given by \\[\\begin{bmatrix} & 0.3 & 0.4 & 0.7 \\\\ 0.3 & & 0.5 & 0.8 \\\\ 0.4 & 0.5 & & 0.45 \\\\ 0.7 & 0.8 & 0.45 & \\\\ \\end{bmatrix}\\] For instance, the dissimilarity between the first and second observations is 0.3, and the dissimilarity between the second and fourth observations is 0.8. On the basis of this dissimilarity matrix, sketch the dendrogram that results from hierarchically clustering these four observations using complete linkage. Be sure to indicate on the plot the height at which each fusion occurs, as well as the observations corresponding to each leaf in the dendrogram. m <- matrix(c(0, 0.3, 0.4, 0.7, 0.3, 0, 0.5, 0.8, 0.4, 0.5, 0., 0.45, 0.7, 0.8, 0.45, 0), ncol = 4) c1 <- hclust(as.dist(m), method = "complete") plot(c1) Repeat (a), this time using single linkage clustering. c2 <- hclust(as.dist(m), method = "single") plot(c2) Suppose that we cut the dendrogram obtained in (a) such that two clusters result. Which observations are in each cluster? table(1:4, cutree(c1, 2)) ## ## 1 2 ## 1 1 0 ## 2 1 0 ## 3 0 1 ## 4 0 1 Suppose that we cut the dendrogram obtained in (b) such that two clusters result. Which observations are in each cluster? table(1:4, cutree(c2, 2)) ## ## 1 2 ## 1 1 0 ## 2 1 0 ## 3 1 0 ## 4 0 1 It is mentioned in the chapter that at each fusion in the dendrogram, the position of the two clusters being fused can be swapped without changing the meaning of the dendrogram. Draw a dendrogram that is equivalent to the dendrogram in (a), for which two or more of the leaves are repositioned, but for which the meaning of the dendrogram is the same. plot(c1, labels = c(2, 1, 3, 4)) 12.1.3 Question 3 In this problem, you will perform \\(K\\)-means clustering manually, with \\(K = 2\\), on a small example with \\(n = 6\\) observations and \\(p = 2\\) features. The observations are as follows. Obs. \\(X_1\\) \\(X_2\\) 1 1 4 2 1 3 3 0 4 4 5 1 5 6 2 6 4 0 Plot the observations. library(ggplot2) d <- data.frame( x1 = c(1, 1, 0, 5, 6, 4), x2 = c(4, 3, 4, 1, 2, 0) ) ggplot(d, aes(x = x1, y = x2)) + geom_point() Randomly assign a cluster label to each observation. You can use the sample() command in R to do this. Report the cluster labels for each observation. set.seed(42) d$cluster <- sample(c(1, 2), size = nrow(d), replace = TRUE) Compute the centroid for each cluster. centroids <- sapply(c(1,2), function(i) colMeans(d[d$cluster == i, 1:2])) Assign each observation to the centroid to which it is closest, in terms of Euclidean distance. Report the cluster labels for each observation. dist <- sapply(1:2, function(i) { sqrt((d$x1 - centroids[1, i])^2 + (d$x2 - centroids[2, i])^2) }) d$cluster <- apply(dist, 1, which.min) Repeat (c) and (d) until the answers obtained stop changing. centroids <- sapply(c(1,2), function(i) colMeans(d[d$cluster == i, 1:2])) dist <- sapply(1:2, function(i) { sqrt((d$x1 - centroids[1, i])^2 + (d$x2 - centroids[2, i])^2) }) d$cluster <- apply(dist, 1, which.min) In this case, we get stable labels after the first iteration. In your plot from (a), color the observations according to the cluster labels obtained. ggplot(d, aes(x = x1, y = x2, color = factor(cluster))) + geom_point() 12.1.4 Question 4 Suppose that for a particular data set, we perform hierarchical clustering using single linkage and using complete linkage. We obtain two dendrograms. At a certain point on the single linkage dendrogram, the clusters {1, 2, 3} and {4, 5} fuse. On the complete linkage dendrogram, the clusters {1, 2, 3} and {4, 5} also fuse at a certain point. Which fusion will occur higher on the tree, or will they fuse at the same height, or is there not enough information to tell? The complete linkage fusion will likely be higher in the tree since single linkage is defined as being the minimum distance between two clusters. However, there is a chance that they could be at the same height (so technically there is not enough information to tell). At a certain point on the single linkage dendrogram, the clusters {5} and {6} fuse. On the complete linkage dendrogram, the clusters {5} and {6} also fuse at a certain point. Which fusion will occur higher on the tree, or will they fuse at the same height, or is there not enough information to tell? They will fuse at the same height (the algorithm for calculating distance is the same when the clusters are of size 1). 12.1.5 Question 5 In words, describe the results that you would expect if you performed \\(K\\)-means clustering of the eight shoppers in Figure 12.16, on the basis of their sock and computer purchases, with \\(K = 2\\). Give three answers, one for each of the variable scalings displayed. Explain. In cases where variables are scaled we would expect clusters to correspond to whether or not the retainer sold a computer. In the first case (raw numbers of items sold), we would expect clusters to represent low vs high numbers of sock purchases. To test, we can run the analysis in R: set.seed(42) dat <- data.frame( socks = c(8, 11, 7, 6, 5, 6, 7, 8), computers = c(0, 0, 0, 0, 1, 1, 1, 1) ) kmeans(dat, 2)$cluster ## [1] 1 1 2 2 2 2 2 1 kmeans(scale(dat), 2)$cluster ## [1] 1 1 1 1 2 2 2 2 dat$computers <- dat$computers * 2000 kmeans(dat, 2)$cluster ## [1] 1 1 1 1 2 2 2 2 12.1.6 Question 6 We saw in Section 12.2.2 that the principal component loading and score vectors provide an approximation to a matrix, in the sense of (12.5). Specifically, the principal component score and loading vectors solve the optimization problem given in (12.6). Now, suppose that the M principal component score vectors zim, \\(m = 1,...,M\\), are known. Using (12.6), explain that the first \\(M\\) principal component loading vectors \\(\\phi_{jm}\\), \\(m = 1,...,M\\), can be obtaining by performing \\(M\\) separate least squares linear regressions. In each regression, the principal component score vectors are the predictors, and one of the features of the data matrix is the response. 12.2 Applied 12.2.1 Question 7 In the chapter, we mentioned the use of correlation-based distance and Euclidean distance as dissimilarity measures for hierarchical clustering. It turns out that these two measures are almost equivalent: if each observation has been centered to have mean zero and standard deviation one, and if we let \\(r_{ij}\\) denote the correlation between the \\(i\\)th and \\(j\\)th observations, then the quantity \\(1 − r_{ij}\\) is proportional to the squared Euclidean distance between the ith and jth observations. On the USArrests data, show that this proportionality holds. Hint: The Euclidean distance can be calculated using the dist() function, and correlations can be calculated using the cor() function. dat <- t(scale(t(USArrests))) d1 <- dist(dat)^2 d2 <- as.dist(1 - cor(t(dat))) plot(d1, d2) 12.2.2 Question 8 In Section 12.2.3, a formula for calculating PVE was given in Equation 12.10. We also saw that the PVE can be obtained using the sdev output of the prcomp() function. On the USArrests data, calculate PVE in two ways: Using the sdev output of the prcomp() function, as was done in Section 12.2.3. pr <- prcomp(USArrests, scale = TRUE) pr$sdev^2 / sum(pr$sdev^2) ## [1] 0.62006039 0.24744129 0.08914080 0.04335752 By applying Equation 12.10 directly. That is, use the prcomp() function to compute the principal component loadings. Then, use those loadings in Equation 12.10 to obtain the PVE. These two approaches should give the same results. colSums(pr$x^2) / sum(colSums(scale(USArrests)^2)) ## PC1 PC2 PC3 PC4 ## 0.62006039 0.24744129 0.08914080 0.04335752 Hint: You will only obtain the same results in (a) and (b) if the same data is used in both cases. For instance, if in (a) you performed prcomp() using centered and scaled variables, then you must center and scale the variables before applying Equation 12.10 in (b). 12.2.3 Question 9 Consider the USArrests data. We will now perform hierarchical clustering on the states. Using hierarchical clustering with complete linkage and Euclidean distance, cluster the states. set.seed(42) hc <- hclust(dist(USArrests), method = "complete") Cut the dendrogram at a height that results in three distinct clusters. Which states belong to which clusters? ct <- cutree(hc, 3) sapply(1:3, function(i) names(ct)[ct == i]) ## [[1]] ## [1] "Alabama" "Alaska" "Arizona" "California" ## [5] "Delaware" "Florida" "Illinois" "Louisiana" ## [9] "Maryland" "Michigan" "Mississippi" "Nevada" ## [13] "New Mexico" "New York" "North Carolina" "South Carolina" ## ## [[2]] ## [1] "Arkansas" "Colorado" "Georgia" "Massachusetts" ## [5] "Missouri" "New Jersey" "Oklahoma" "Oregon" ## [9] "Rhode Island" "Tennessee" "Texas" "Virginia" ## [13] "Washington" "Wyoming" ## ## [[3]] ## [1] "Connecticut" "Hawaii" "Idaho" "Indiana" ## [5] "Iowa" "Kansas" "Kentucky" "Maine" ## [9] "Minnesota" "Montana" "Nebraska" "New Hampshire" ## [13] "North Dakota" "Ohio" "Pennsylvania" "South Dakota" ## [17] "Utah" "Vermont" "West Virginia" "Wisconsin" Hierarchically cluster the states using complete linkage and Euclidean distance, after scaling the variables to have standard deviation one. hc2 <- hclust(dist(scale(USArrests)), method = "complete") What effect does scaling the variables have on the hierarchical clustering obtained? In your opinion, should the variables be scaled before the inter-observation dissimilarities are computed? Provide a justification for your answer. ct <- cutree(hc, 3) sapply(1:3, function(i) names(ct)[ct == i]) ## [[1]] ## [1] "Alabama" "Alaska" "Arizona" "California" ## [5] "Delaware" "Florida" "Illinois" "Louisiana" ## [9] "Maryland" "Michigan" "Mississippi" "Nevada" ## [13] "New Mexico" "New York" "North Carolina" "South Carolina" ## ## [[2]] ## [1] "Arkansas" "Colorado" "Georgia" "Massachusetts" ## [5] "Missouri" "New Jersey" "Oklahoma" "Oregon" ## [9] "Rhode Island" "Tennessee" "Texas" "Virginia" ## [13] "Washington" "Wyoming" ## ## [[3]] ## [1] "Connecticut" "Hawaii" "Idaho" "Indiana" ## [5] "Iowa" "Kansas" "Kentucky" "Maine" ## [9] "Minnesota" "Montana" "Nebraska" "New Hampshire" ## [13] "North Dakota" "Ohio" "Pennsylvania" "South Dakota" ## [17] "Utah" "Vermont" "West Virginia" "Wisconsin" Scaling results in different clusters and the choice of whether to scale or not depends on the data in question. In this case, the variables are: Murder numeric Murder arrests (per 100,000) Assault numeric Assault arrests (per 100,000) UrbanPop numeric Percent urban population Rape numeric Rape arrests (per 100,000) These variables are not naturally on the same unit and the units involved are somewhat arbitrary (so for example, Murder could be measured per 1 million rather than per 100,000) so in this case I would argue the data should be scaled. 12.2.4 Question 10 In this problem, you will generate simulated data, and then perform PCA and \\(K\\)-means clustering on the data. Generate a simulated data set with 20 observations in each of three classes (i.e. 60 observations total), and 50 variables. Hint: There are a number of functions in R that you can use to generate data. One example is the rnorm() function; runif() is another option. Be sure to add a mean shift to the observations in each class so that there are three distinct classes. set.seed(42) data <- matrix(rnorm(60 * 50), ncol = 50) classes <- rep(c("A", "B", "C"), each = 20) dimnames(data) <- list(classes, paste0("v", 1:50)) data[classes == "B", 1:10] <- data[classes == "B", 1:10] + 1.2 data[classes == "C", 5:30] <- data[classes == "C", 5:30] + 1 Perform PCA on the 60 observations and plot the first two principal component score vectors. Use a different color to indicate the observations in each of the three classes. If the three classes appear separated in this plot, then continue on to part (c). If not, then return to part (a) and modify the simulation so that there is greater separation between the three classes. Do not continue to part (c) until the three classes show at least some separation in the first two principal component score vectors. pca <- prcomp(data) ggplot(data.frame(Class = classes, PC1 = pca$x[, 1], PC2 = pca$x[, 2]), aes(x = PC1, y = PC2, col = Class)) + geom_point() Perform \\(K\\)-means clustering of the observations with \\(K = 3\\). How well do the clusters that you obtained in \\(K\\)-means clustering compare to the true class labels? Hint: You can use the table() function in R to compare the true class labels to the class labels obtained by clustering. Be careful how you interpret the results: \\(K\\)-means clustering will arbitrarily number the clusters, so you cannot simply check whether the true class labels and clustering labels are the same. km <- kmeans(data, 3)$cluster table(km, names(km)) ## ## km A B C ## 1 1 20 1 ## 2 0 0 19 ## 3 19 0 0 \\(K\\)-means separates out the clusters nearly perfectly. Perform \\(K\\)-means clustering with \\(K = 2\\). Describe your results. km <- kmeans(data, 2)$cluster table(km, names(km)) ## ## km A B C ## 1 18 20 1 ## 2 2 0 19 \\(K\\)-means effectively defines cluster 2 to be class B, but cluster 1 is a mix of classes A and B. Now perform \\(K\\)-means clustering with \\(K = 4\\), and describe your results. km <- kmeans(data, 4)$cluster table(km, names(km)) ## ## km A B C ## 1 0 7 2 ## 2 18 1 0 ## 3 0 0 18 ## 4 2 12 0 \\(K\\)-means effectively defines cluster 1 to be class B, cluster 2 to be class A but clusters 3 and 4 are split over class C. Now perform \\(K\\)-means clustering with \\(K = 3\\) on the first two principal component score vectors, rather than on the raw data. That is, perform \\(K\\)-means clustering on the \\(60 \\times 2\\) matrix of which the first column is the first principal component score vector, and the second column is the second principal component score vector. Comment on the results. km <- kmeans(pca$x[, 1:2], 3)$cluster table(km, names(km)) ## ## km A B C ## 1 0 20 2 ## 2 20 0 0 ## 3 0 0 18 \\(K\\)-means again separates out the clusters nearly perfectly. Using the scale() function, perform \\(K\\)-means clustering with \\(K = 3\\) on the data after scaling each variable to have standard deviation one. How do these results compare to those obtained in (b)? Explain. km <- kmeans(scale(data), 3)$cluster table(km, names(km)) ## ## km A B C ## 1 1 20 1 ## 2 19 0 0 ## 3 0 0 19 \\(K\\)-means appears to perform less well on the scaled data in this case. 12.2.5 Question 11 Write an R function to perform matrix completion as in Algorithm 12.1, and as outlined in Section 12.5.2. In each iteration, the function should keep track of the relative error, as well as the iteration count. Iterations should continue until the relative error is small enough or until some maximum number of iterations is reached (set a default value for this maximum number). Furthermore, there should be an option to print out the progress in each iteration. Test your function on the Boston data. First, standardize the features to have mean zero and standard deviation one using the scale() function. Run an experiment where you randomly leave out an increasing (and nested) number of observations from 5% to 30%, in steps of 5%. Apply Algorithm 12.1 with \\(M = 1,2,...,8\\). Display the approximation error as a function of the fraction of observations that are missing, and the value of \\(M\\), averaged over 10 repetitions of the experiment. 12.2.6 Question 12 In Section 12.5.2, Algorithm 12.1 was implemented using the svd() function. However, given the connection between the svd() function and the prcomp() function highlighted in the lab, we could have instead implemented the algorithm using prcomp(). Write a function to implement Algorithm 12.1 that makes use of prcomp() rather than svd(). 12.2.7 Question 13 On the book website, www.StatLearning.com, there is a gene expression data set (Ch12Ex13.csv) that consists of 40 tissue samples with measurements on 1,000 genes. The first 20 samples are from healthy patients, while the second 20 are from a diseased group. Load in the data using read.csv(). You will need to select header = F. data <- read.csv("data/Ch12Ex13.csv", header = FALSE) colnames(data) <- c(paste0("H", 1:20), paste0("D", 1:20)) Apply hierarchical clustering to the samples using correlation-based distance, and plot the dendrogram. Do the genes separate the samples into the two groups? Do your results depend on the type of linkage used? hc.complete <- hclust(as.dist(1 - cor(data)), method = "complete") plot(hc.complete) hc.complete <- hclust(as.dist(1 - cor(data)), method = "average") plot(hc.complete) hc.complete <- hclust(as.dist(1 - cor(data)), method = "single") plot(hc.complete) Yes the samples clearly separate into the two groups, although the results depend somewhat on the linkage method used. In the case of average clustering, the disease samples all fall within a subset of the healthy samples. Your collaborator wants to know which genes differ the most across the two groups. Suggest a way to answer this question, and apply it here. This is probably best achieved with a supervised approach. A simple method would be to determine which genes show the most significant differences between the groups by applying a t-test to each group. We can then select those with a FDR adjusted p-value less than some given threshold (e.g. 0.05). class <- factor(rep(c("Healthy", "Diseased"), each = 20)) pvals <- p.adjust(apply(data, 1, function(v) t.test(v ~ class)$p.value)) which(pvals < 0.05) ## [1] 11 12 13 14 15 16 17 18 19 20 501 502 503 504 505 506 507 508 ## [19] 509 511 512 513 514 515 516 517 519 520 521 522 523 524 525 526 527 528 ## [37] 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 ## [55] 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 ## [73] 565 566 567 568 569 570 571 572 574 575 576 577 578 579 580 581 582 583 ## [91] 584 586 587 588 589 590 591 592 593 595 596 597 598 599 600 "],["multiple-testing.html", "13 Multiple Testing 13.1 Conceptual 13.2 Applied", " 13 Multiple Testing 13.1 Conceptual 13.1.1 Question 1 Suppose we test \\(m\\) null hypotheses, all of which are true. We control the Type I error for each null hypothesis at level \\(\\alpha\\). For each sub-problem, justify your answer. In total, how many Type I errors do we expect to make? We expect \\(m\\alpha\\). Suppose that the m tests that we perform are independent. What is the family-wise error rate associated with these m tests? Hint: If two events A and B are independent, then Pr(A ∩ B) = Pr(A) Pr(B). The family-wise error rate (FWER) is defined as the probability of making at least one Type I error. We can think of this as 1 minus the probability of no type I errors, which is: \\(1 - (1 - \\alpha)^m\\) Alternatively, for two tests this is: Pr(A ∪ B) = Pr(A) + Pr(B) - Pr(A ∩ B). For independent tests this is \\(\\alpha + \\alpha - \\alpha^2\\) Suppose that \\(m = 2\\), and that the p-values for the two tests are positively correlated, so that if one is small then the other will tend to be small as well, and if one is large then the other will tend to be large. How does the family-wise error rate associated with these \\(m = 2\\) tests qualitatively compare to the answer in (b) with \\(m = 2\\)? Hint: First, suppose that the two p-values are perfectly correlated. If they were perfectly correlated, we would effectively be performing a single test (thus FWER would be \\(alpha\\)). In the case when they are positively correlated therefore, we can expect the FWER to be less than in b. Alternatively, as above, FWEW = Pr(A ∪ B) = Pr(A) + Pr(B) - Pr(A ∩ B). For perfectly positively correlated tests Pr(A ∩ B) = \\(\\alpha\\), so the FWEW is \\(\\alpha\\) which is smaller than b. Suppose again that \\(m = 2\\), but that now the p-values for the two tests are negatively correlated, so that if one is large then the other will tend to be small. How does the family-wise error rate associated with these \\(m = 2\\) tests qualitatively compare to the answer in (b) with \\(m = 2\\)? Hint: First, suppose that whenever one p-value is less than \\(\\alpha\\), then the other will be greater than \\(\\alpha\\). In other words, we can never reject both null hypotheses. Taking the equation above, for two tests, FWEW = Pr(A ∪ B) = Pr(A) + Pr(B) - Pr(A ∩ B). In the case considered in the hint Pr(A ∩ B) = 0, so Pr(A ∪ B) = \\(2\\alpha\\), which is larger than b. 13.1.2 Question 2 Suppose that we test \\(m\\) hypotheses, and control the Type I error for each hypothesis at level \\(\\alpha\\). Assume that all \\(m\\) p-values are independent, and that all null hypotheses are true. Let the random variable \\(A_j\\) equal 1 if the \\(j\\)th null hypothesis is rejected, and 0 otherwise. What is the distribution of \\(A_j\\)? \\(A_j\\) follows a Bernoulli distribution: \\(A_j \\sim \\text{Bernoulli}(p)\\) What is the distribution of \\(\\sum_{j=1}^m A_j\\)? Follows a binomial distribution \\(\\sum_{j=1}^m A_j \\sim Bi(m, \\alpha)\\). What is the standard deviation of the number of Type I errors that we will make? The variance of a Binomial is \\(npq\\), so for this situation the standard deviation would be \\(\\sqrt{m \\alpha (1-\\alpha)}\\). 13.1.3 Question 3 Suppose we test \\(m\\) null hypotheses, and control the Type I error for the \\(j\\)th null hypothesis at level \\(\\alpha_j\\), for \\(j=1,...,m\\). Argue that the family-wise error rate is no greater than \\(\\sum_{j=1}^m \\alpha_j\\). \\(p(A \\cup B) = p(A) + p(B)\\) if \\(A\\) and \\(B\\) are independent or \\(p(A) + p(B) - p(A \\cap B)\\) when they are not. Since \\(p(A \\cap B)\\) must be positive, \\(p(A \\cup B) < p(A) + p(B)\\) (whether independent or not). Therefore, the probability of a type I error in any of \\(m\\) hypotheses can be no larger than the sum of the probabilities for each individual hypothesis (which is \\(\\alpha_j\\) for the \\(j\\)th). 13.1.4 Question 4 Suppose we test \\(m = 10\\) hypotheses, and obtain the p-values shown in Table 13.4. pvals <- c(0.0011, 0.031, 0.017, 0.32, 0.11, 0.90, 0.07, 0.006, 0.004, 0.0009) names(pvals) <- paste0("H", sprintf("%02d", 1:10)) Suppose that we wish to control the Type I error for each null hypothesis at level \\(\\alpha = 0.05\\). Which null hypotheses will we reject? names(which(pvals < 0.05)) ## [1] "H01" "H02" "H03" "H08" "H09" "H10" We reject all NULL hypotheses where \\(p < 0.05\\). Now suppose that we wish to control the FWER at level \\(\\alpha = 0.05\\). Which null hypotheses will we reject? Justify your answer. names(which(pvals < 0.05 / 10)) ## [1] "H01" "H09" "H10" We reject all NULL hypotheses where \\(p < 0.005\\). Now suppose that we wish to control the FDR at level \\(q = 0.05\\). Which null hypotheses will we reject? Justify your answer. names(which(p.adjust(pvals, "fdr") < 0.05)) ## [1] "H01" "H03" "H08" "H09" "H10" We reject all NULL hypotheses where \\(q < 0.05\\). Now suppose that we wish to control the FDR at level \\(q = 0.2\\). Which null hypotheses will we reject? Justify your answer. names(which(p.adjust(pvals, "fdr") < 0.2)) ## [1] "H01" "H02" "H03" "H05" "H07" "H08" "H09" "H10" We reject all NULL hypotheses where \\(q < 0.2\\). Of the null hypotheses rejected at FDR level \\(q = 0.2\\), approximately how many are false positives? Justify your answer. We expect 20% (in this case 2 out of the 8) rejections to be false (false positives). 13.1.5 Question 5 For this problem, you will make up p-values that lead to a certain number of rejections using the Bonferroni and Holm procedures. Give an example of five p-values (i.e. five numbers between 0 and 1 which, for the purpose of this problem, we will interpret as p-values) for which both Bonferroni’s method and Holm’s method reject exactly one null hypothesis when controlling the FWER at level 0.1. In this case, for Bonferroni, we need one p-value to be less than \\(0.1 / 5 = 0.02\\). and the others to be above. For Holm’s method, we need the most significant p-value to be below \\(0.1/(5 + 1 - 1) = 0.02\\) also. An example would be: 1, 1, 1, 1, 0.001. pvals <- c(1, 1, 1, 1, 0.001) sum(p.adjust(pvals, method = "bonferroni") < 0.1) ## [1] 1 sum(p.adjust(pvals, method = "holm") < 0.1) ## [1] 1 Now give an example of five p-values for which Bonferroni rejects one null hypothesis and Holm rejects more than one null hypothesis at level 0.1. An example would be: 1, 1, 1, 0.02, 0.001. For Holm’s method we reject two because \\(0.02 < 0.1/(5 + 1 - 2)\\). pvals <- c(1, 1, 1, 0.02, 0.001) sum(p.adjust(pvals, method = "bonferroni") < 0.1) ## [1] 1 sum(p.adjust(pvals, method = "holm") < 0.1) ## [1] 2 13.1.6 Question 6 For each of the three panels in Figure 13.3, answer the following questions: There are always: 8 positives (red) and 2 negatives (black). False / true positives are black / red points below the line respectively. False / true negatives are red / black points above the line respectively. Type I / II errors are the same as false positives and false negatives respectively. How many false positives, false negatives, true positives, true negatives, Type I errors, and Type II errors result from applying the Bonferroni procedure to control the FWER at level \\(\\alpha = 0.05\\)? Panel FP FN TP TN Type I Type II 1 0 1 7 2 0 1 2 0 1 7 2 0 1 3 0 5 3 2 0 5 How many false positives, false negatives, true positives, true negatives, Type I errors, and Type II errors result from applying the Holm procedure to control the FWER at level \\(\\alpha = 0.05\\)? Panel FP FN TP TN Type I Type II 1 0 1 7 2 0 1 2 0 0 8 2 0 0 3 0 0 8 2 0 0 What is the false discovery rate associated with using the Bonferroni procedure to control the FWER at level \\(\\alpha = 0.05\\)? False discovery rate is the expected ratio of false positives to total positives. There are never any false positives (black points below the line). There are always the same number of total positives (8). For panels 1, 2, 3 this would be 0/8, 0/8 and 0/8 respectively. What is the false discovery rate associated with using the Holm procedure to control the FWER at level \\(\\alpha = 0.05\\)? For panels 1, 2, 3 this would be 0/8, 0/8 and 0/8 respectively. How would the answers to (a) and (c) change if we instead used the Bonferroni procedure to control the FWER at level \\(\\alpha = 0.001\\)? This would equate to a more stringent threshold. We would not call any more false positives, so the results would not change. 13.2 Applied 13.2.1 Question 7 This problem makes use of the Carseats dataset in the ISLR2 package. For each quantitative variable in the dataset besides Sales, fit a linear model to predict Sales using that quantitative variable. Report the p-values associated with the coefficients for the variables. That is, for each model of the form \\(Y = \\beta_0 + \\beta_1X + \\epsilon\\), report the p-value associated with the coefficient \\(\\beta_1\\). Here, \\(Y\\) represents Sales and \\(X\\) represents one of the other quantitative variables. library(ISLR2) nm <- c("CompPrice", "Income", "Advertising", "Population", "Price", "Age") pvals <- sapply(nm, function(n) { summary(lm(Carseats[["Sales"]] ~ Carseats[[n]]))$coef[2, 4] }) Suppose we control the Type I error at level \\(\\alpha = 0.05\\) for the p-values obtained in (a). Which null hypotheses do we reject? names(which(pvals < 0.05)) ## [1] "Income" "Advertising" "Price" "Age" Now suppose we control the FWER at level 0.05 for the p-values. Which null hypotheses do we reject? names(which(pvals < 0.05 / length(nm))) ## [1] "Income" "Advertising" "Price" "Age" Finally, suppose we control the FDR at level 0.2 for the p-values. Which null hypotheses do we reject? names(which(p.adjust(pvals, "fdr") < 0.2)) ## [1] "Income" "Advertising" "Price" "Age" 13.2.2 Question 8 In this problem, we will simulate data from \\(m = 100\\) fund managers. set.seed(1) n <- 20 m <- 100 X <- matrix(rnorm(n * m), ncol = m) set.seed(1) n <- 20 m <- 100 X <- matrix(rnorm(n * m), ncol = m) These data represent each fund manager’s percentage returns for each of \\(n = 20\\) months. We wish to test the null hypothesis that each fund manager’s percentage returns have population mean equal to zero. Notice that we simulated the data in such a way that each fund manager’s percentage returns do have population mean zero; in other words, all \\(m\\) null hypotheses are true. Conduct a one-sample \\(t\\)-test for each fund manager, and plot a histogram of the \\(p\\)-values obtained. pvals <- apply(X, 2, function(p) t.test(p)$p.value) hist(pvals, main = NULL) If we control Type I error for each null hypothesis at level \\(\\alpha = 0.05\\), then how many null hypotheses do we reject? sum(pvals < 0.05) ## [1] 4 If we control the FWER at level 0.05, then how many null hypotheses do we reject? sum(pvals < 0.05 / length(pvals)) ## [1] 0 If we control the FDR at level 0.05, then how many null hypotheses do we reject? sum(p.adjust(pvals, "fdr") < 0.05) ## [1] 0 Now suppose we “cherry-pick” the 10 fund managers who perform the best in our data. If we control the FWER for just these 10 fund managers at level 0.05, then how many null hypotheses do we reject? If we control the FDR for just these 10 fund managers at level 0.05, then how many null hypotheses do we reject? best <- order(apply(X, 2, sum), decreasing = TRUE)[1:10] sum(pvals[best] < 0.05 / 10) ## [1] 1 sum(p.adjust(pvals[best], "fdr") < 0.05) ## [1] 1 Explain why the analysis in (e) is misleading. Hint The standard approaches for controlling the FWER and FDR assume that all tested null hypotheses are adjusted for multiplicity, and that no “cherry-picking” of the smallest p-values has occurred. What goes wrong if we cherry-pick? This is misleading because we are not correctly accounting for all tests performed. Cherry picking the similar to repeating a test until by chance we find a significant result. "],["404.html", "Page not found", " Page not found The page you requested cannot be found (perhaps it was moved or renamed). You may want to try searching to find the page's new location, or use the table of contents to find the page you are looking for. "]] +[["index.html", "An Introduction to Statistical Learning Exercise solutions in R 1 Introduction", " An Introduction to Statistical Learning Exercise solutions in R 1 Introduction This bookdown document provides solutions for exercises in the book “An Introduction to Statistical Learning with Applications in R”, second edition, by Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani. "],["statistical-learning.html", "2 Statistical Learning 2.1 Conceptual 2.2 Applied", " 2 Statistical Learning 2.1 Conceptual 2.1.1 Question 1 For each of parts (a) through (d), indicate whether we would generally expect the performance of a flexible statistical learning method to be better or worse than an inflexible method. Justify your answer. The sample size \\(n\\) is extremely large, and the number of predictors \\(p\\) is small. Flexible best - opposite of b. The number of predictors \\(p\\) is extremely large, and the number of observations \\(n\\) is small. Inflexible best - high chance of some predictors being randomly associated. The relationship between the predictors and response is highly non-linear. Flexible best - inflexible leads to high bias. The variance of the error terms, i.e. \\(\\sigma^2 = Var(\\epsilon)\\), is extremely high. Inflexible best - opposite of c. 2.1.2 Question 2 Explain whether each scenario is a classification or regression problem, and indicate whether we are most interested in inference or prediction. Finally, provide \\(n\\) and \\(p\\). We collect a set of data on the top 500 firms in the US. For each firm we record profit, number of employees, industry and the CEO salary. We are interested in understanding which factors affect CEO salary. \\(n=500\\), \\(p=3\\), regression, inference. We are considering launching a new product and wish to know whether it will be a success or a failure. We collect data on 20 similar products that were previously launched. For each product we have recorded whether it was a success or failure, price charged for the product, marketing budget, competition price, and ten other variables. \\(n=20\\), \\(p=13\\), classification, prediction. We are interested in predicting the % change in the USD/Euro exchange rate in relation to the weekly changes in the world stock markets. Hence we collect weekly data for all of 2012. For each week we record the % change in the USD/Euro, the % change in the US market, the % change in the British market, and the % change in the German market. \\(n=52\\), \\(p=3\\), regression, prediction. 2.1.3 Question 3 We now revisit the bias-variance decomposition. Provide a sketch of typical (squared) bias, variance, training error, test error, and Bayes (or irreducible) error curves, on a single plot, as we go from less flexible statistical learning methods towards more flexible approaches. The x-axis should represent the amount of flexibility in the method, and the y-axis should represent the values for each curve. There should be five curves. Make sure to label each one. Explain why each of the five curves has the shape displayed in part (a). (squared) bias: Decreases with increasing flexibility (Generally, more flexible methods result in less bias). variance: Increases with increasing flexibility (In general, more flexible statistical methods have higher variance). training error: Decreases with model flexibility (More complex models will better fit the training data). test error: Decreases initially, then increases due to overfitting (less bias but more training error). Bayes (irreducible) error: fixed (does not change with model). 2.1.4 Question 4 You will now think of some real-life applications for statistical learning. Describe three real-life applications in which classification might be useful. Describe the response, as well as the predictors. Is the goal of each application inference or prediction? Explain your answer. Coffee machine cleaned? (day of week, person assigned), inference. Is a flight delayed? (airline, airport etc), inference. Beer type (IPA, pilsner etc.), prediction. Describe three real-life applications in which regression might be useful. Describe the response, as well as the predictors. Is the goal of each application inference or prediction? Explain your answer. Amount of bonus paid (profitability, client feedback), prediction. Person’s height, prediction. House price, inference. Describe three real-life applications in which cluster analysis might be useful. RNAseq tumour gene expression data. SNPs in human populations. Frequencies of mutations (with base pair context) in somatic mutation data. 2.1.5 Question 5 What are the advantages and disadvantages of a very flexible (versus a less flexible) approach for regression or classification? Under what circumstances might a more flexible approach be preferred to a less flexible approach? When might a less flexible approach be preferred? Inflexible is more interpretable, fewer observations required, can be biased. Flexible can overfit (high error variance). In cases where we have high \\(n\\) or non-linear patterns flexible will be preferred. 2.1.6 Question 6 Describe the differences between a parametric and a non-parametric statistical learning approach. What are the advantages of a parametric approach to regression or classification (as opposed to a non-parametric approach)? What are its disadvantages? Parametric uses (model) parameters. Parametric models can be more interpretable as there is a model behind how data is generated. However, the disadvantage is that the model might not reflect reality. If the model is too far from the truth, estimates will be poor and more flexible models can fit many different forms and require more parameters (leading to overfitting). Non-parametric approaches do not estimate a small number of parameters, so a large number of observations may be needed to obtain accurate estimates. 2.1.7 Question 7 The table below provides a training data set containing six observations, three predictors, and one qualitative response variable. Obs. \\(X_1\\) \\(X_2\\) \\(X_3\\) \\(Y\\) 1 0 3 0 Red 2 2 0 0 Red 3 0 1 3 Red 4 0 1 2 Green 5 -1 0 1 Green 6 1 1 1 Red Suppose we wish to use this data set to make a prediction for \\(Y\\) when \\(X_1 = X_2 = X_3 = 0\\) using \\(K\\)-nearest neighbors. Compute the Euclidean distance between each observation and the test point, \\(X_1 = X_2 = X_3 = 0\\). dat <- data.frame( "x1" = c(0, 2, 0, 0, -1, 1), "x2" = c(3, 0, 1, 1, 0, 1), "x3" = c(0, 0, 3, 2, 1, 1), "y" = c("Red", "Red", "Red", "Green", "Green", "Red") ) # Euclidean distance between points and c(0, 0, 0) dist <- sqrt(dat[["x1"]]^2 + dat[["x2"]]^2 + dat[["x3"]]^2) signif(dist, 3) ## [1] 3.00 2.00 3.16 2.24 1.41 1.73 What is our prediction with \\(K = 1\\)? Why? knn <- function(k) { names(which.max(table(dat[["y"]][order(dist)[1:k]]))) } knn(1) ## [1] "Green" Green (based on data point 5 only) What is our prediction with \\(K = 3\\)? Why? knn(3) ## [1] "Red" Red (based on data points 2, 5, 6) If the Bayes decision boundary in this problem is highly non-linear, then would we expect the best value for \\(K\\) to be large or small? Why? Small (high \\(k\\) leads to linear boundaries due to averaging) 2.2 Applied 2.2.1 Question 8 This exercise relates to the College data set, which can be found in the file College.csv. It contains a number of variables for 777 different universities and colleges in the US. The variables are Private : Public/private indicator Apps : Number of applications received Accept : Number of applicants accepted Enroll : Number of new students enrolled Top10perc : New students from top 10% of high school class Top25perc : New students from top 25% of high school class F.Undergrad : Number of full-time undergraduates P.Undergrad : Number of part-time undergraduates Outstate : Out-of-state tuition Room.Board : Room and board costs Books : Estimated book costs Personal : Estimated personal spending PhD : Percent of faculty with Ph.D.’s Terminal : Percent of faculty with terminal degree S.F.Ratio : Student/faculty ratio perc.alumni : Percent of alumni who donate Expend : Instructional expenditure per student Grad.Rate : Graduation rate Before reading the data into R, it can be viewed in Excel or a text editor. Use the read.csv() function to read the data into R. Call the loaded data college. Make sure that you have the directory set to the correct location for the data. college <- read.csv("data/College.csv") Look at the data using the View() function. You should notice that the first column is just the name of each university. We don’t really want R to treat this as data. However, it may be handy to have these names for later. Try the following commands: rownames(college) <- college[, 1] View(college) You should see that there is now a row.names column with the name of each university recorded. This means that R has given each row a name corresponding to the appropriate university. R will not try to perform calculations on the row names. However, we still need to eliminate the first column in the data where the names are stored. Try college <- college [, -1] View(college) Now you should see that the first data column is Private. Note that another column labeled row.names now appears before the Private column. However, this is not a data column but rather the name that R is giving to each row. rownames(college) <- college[, 1] college <- college[, -1] Use the summary() function to produce a numerical summary of the variables in the data set. Use the pairs() function to produce a scatterplot matrix of the first ten columns or variables of the data. Recall that you can reference the first ten columns of a matrix A using A[,1:10]. Use the plot() function to produce side-by-side boxplots of Outstate versus Private. Create a new qualitative variable, called Elite, by binning the Top10perc variable. We are going to divide universities into two groups based on whether or not the proportion of students coming from the top 10% of their high school classes exceeds 50%. > Elite <- rep("No", nrow(college)) > Elite[college$Top10perc > 50] <- "Yes" > Elite <- as.factor(Elite) > college <- data.frame(college, Elite) Use the summary() function to see how many elite universities there are. Now use the plot() function to produce side-by-side boxplots of Outstate versus Elite. Use the hist() function to produce some histograms with differing numbers of bins for a few of the quantitative variables. You may find the command par(mfrow=c(2,2)) useful: it will divide the print window into four regions so that four plots can be made simultaneously. Modifying the arguments to this function will divide the screen in other ways. Continue exploring the data, and provide a brief summary of what you discover. summary(college) ## Private Apps Accept Enroll ## Length:777 Min. : 81 Min. : 72 Min. : 35 ## Class :character 1st Qu.: 776 1st Qu.: 604 1st Qu.: 242 ## Mode :character Median : 1558 Median : 1110 Median : 434 ## Mean : 3002 Mean : 2019 Mean : 780 ## 3rd Qu.: 3624 3rd Qu.: 2424 3rd Qu.: 902 ## Max. :48094 Max. :26330 Max. :6392 ## Top10perc Top25perc F.Undergrad P.Undergrad ## Min. : 1.00 Min. : 9.0 Min. : 139 Min. : 1.0 ## 1st Qu.:15.00 1st Qu.: 41.0 1st Qu.: 992 1st Qu.: 95.0 ## Median :23.00 Median : 54.0 Median : 1707 Median : 353.0 ## Mean :27.56 Mean : 55.8 Mean : 3700 Mean : 855.3 ## 3rd Qu.:35.00 3rd Qu.: 69.0 3rd Qu.: 4005 3rd Qu.: 967.0 ## Max. :96.00 Max. :100.0 Max. :31643 Max. :21836.0 ## Outstate Room.Board Books Personal ## Min. : 2340 Min. :1780 Min. : 96.0 Min. : 250 ## 1st Qu.: 7320 1st Qu.:3597 1st Qu.: 470.0 1st Qu.: 850 ## Median : 9990 Median :4200 Median : 500.0 Median :1200 ## Mean :10441 Mean :4358 Mean : 549.4 Mean :1341 ## 3rd Qu.:12925 3rd Qu.:5050 3rd Qu.: 600.0 3rd Qu.:1700 ## Max. :21700 Max. :8124 Max. :2340.0 Max. :6800 ## PhD Terminal S.F.Ratio perc.alumni ## Min. : 8.00 Min. : 24.0 Min. : 2.50 Min. : 0.00 ## 1st Qu.: 62.00 1st Qu.: 71.0 1st Qu.:11.50 1st Qu.:13.00 ## Median : 75.00 Median : 82.0 Median :13.60 Median :21.00 ## Mean : 72.66 Mean : 79.7 Mean :14.09 Mean :22.74 ## 3rd Qu.: 85.00 3rd Qu.: 92.0 3rd Qu.:16.50 3rd Qu.:31.00 ## Max. :103.00 Max. :100.0 Max. :39.80 Max. :64.00 ## Expend Grad.Rate ## Min. : 3186 Min. : 10.00 ## 1st Qu.: 6751 1st Qu.: 53.00 ## Median : 8377 Median : 65.00 ## Mean : 9660 Mean : 65.46 ## 3rd Qu.:10830 3rd Qu.: 78.00 ## Max. :56233 Max. :118.00 college$Private <- college$Private == "Yes" pairs(college[, 1:10], cex = 0.2) plot(college$Outstate ~ factor(college$Private), xlab = "Private", ylab = "Outstate") college$Elite <- factor(ifelse(college$Top10perc > 50, "Yes", "No")) summary(college$Elite) ## No Yes ## 699 78 plot(college$Outstate ~ college$Elite, xlab = "Elite", ylab = "Outstate") par(mfrow = c(2,2)) for (n in c(5, 10, 20, 50)) { hist(college$Enroll, breaks = n, main = paste("n =", n), xlab = "Enroll") } chisq.test(college$Private, college$Elite) ## ## Pearson's Chi-squared test with Yates' continuity correction ## ## data: college$Private and college$Elite ## X-squared = 4.3498, df = 1, p-value = 0.03701 Whether a college is Private and Elite is not random! 2.2.2 Question 9 This exercise involves the Auto data set studied in the lab. Make sure that the missing values have been removed from the data. x <- read.table("data/Auto.data", header = TRUE, na.strings = "?") x <- na.omit(x) Which of the predictors are quantitative, and which are qualitative? sapply(x, class) ## mpg cylinders displacement horsepower weight acceleration ## "numeric" "integer" "numeric" "numeric" "numeric" "numeric" ## year origin name ## "integer" "integer" "character" numeric <- which(sapply(x, class) == "numeric") names(numeric) ## [1] "mpg" "displacement" "horsepower" "weight" "acceleration" What is the range of each quantitative predictor? You can answer this using the range() function. sapply(x[, numeric], function(x) diff(range(x))) ## mpg displacement horsepower weight acceleration ## 37.6 387.0 184.0 3527.0 16.8 What is the mean and standard deviation of each quantitative predictor? library(tidyverse) ## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ── ## ✔ dplyr 1.1.4 ✔ readr 2.1.5 ## ✔ forcats 1.0.0 ✔ stringr 1.5.1 ## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1 ## ✔ lubridate 1.9.3 ✔ tidyr 1.3.1 ## ✔ purrr 1.0.2 ## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── ## ✖ dplyr::filter() masks stats::filter() ## ✖ dplyr::lag() masks stats::lag() ## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors library(knitr) x[, numeric] |> pivot_longer(everything()) |> group_by(name) |> summarise( Mean = mean(value), SD = sd(value) ) |> kable() name Mean SD acceleration 15.54133 2.758864 displacement 194.41199 104.644004 horsepower 104.46939 38.491160 mpg 23.44592 7.805008 weight 2977.58418 849.402560 Now remove the 10th through 85th observations. What is the range, mean, and standard deviation of each predictor in the subset of the data that remains? x[-(10:85), numeric] |> pivot_longer(everything()) |> group_by(name) |> summarise( Range = diff(range(value)), Mean = mean(value), SD = sd(value) ) |> kable() name Range Mean SD acceleration 16.3 15.72690 2.693721 displacement 387.0 187.24051 99.678367 horsepower 184.0 100.72152 35.708853 mpg 35.6 24.40443 7.867283 weight 3348.0 2935.97152 811.300208 Using the full data set, investigate the predictors graphically, using scatterplots or other tools of your choice. Create some plots highlighting the relationships among the predictors. Comment on your findings. pairs(x[, numeric], cex = 0.2) cor(x[, numeric]) |> kable() mpg displacement horsepower weight acceleration mpg 1.0000000 -0.8051269 -0.7784268 -0.8322442 0.4233285 displacement -0.8051269 1.0000000 0.8972570 0.9329944 -0.5438005 horsepower -0.7784268 0.8972570 1.0000000 0.8645377 -0.6891955 weight -0.8322442 0.9329944 0.8645377 1.0000000 -0.4168392 acceleration 0.4233285 -0.5438005 -0.6891955 -0.4168392 1.0000000 heatmap(cor(x[, numeric]), cexRow = 1.1, cexCol = 1.1, margins = c(8, 8)) Many of the variables appear to be highly (positively or negatively) correlated with some relationships being non-linear. Suppose that we wish to predict gas mileage (mpg) on the basis of the other variables. Do your plots suggest that any of the other variables might be useful in predicting mpg? Justify your answer. Yes, since other variables are correlated. However, horsepower, weight and displacement are highly related. 2.2.3 Question 10 This exercise involves the Boston housing data set. To begin, load in the Boston data set. The Boston data set is part of the ISLR2 library in R. > library(ISLR2) Now the data set is contained in the object Boston. > Boston Read about the data set: > ?Boston How many rows are in this data set? How many columns? What do the rows and columns represent? library(ISLR2) dim(Boston) ## [1] 506 13 Make some pairwise scatterplots of the predictors (columns) in this data set. Describe your findings. library(ggplot2) library(tidyverse) ggplot(Boston, aes(nox, rm)) + geom_point() ggplot(Boston, aes(ptratio, rm)) + geom_point() heatmap(cor(Boston, method = "spearman"), cexRow = 1.1, cexCol = 1.1) Are any of the predictors associated with per capita crime rate? If so, explain the relationship. Yes Do any of the census tracts of Boston appear to have particularly high crime rates? Tax rates? Pupil-teacher ratios? Comment on the range of each predictor. Boston |> pivot_longer(cols = 1:13) |> filter(name %in% c("crim", "tax", "ptratio")) |> ggplot(aes(value)) + geom_histogram(bins = 20) + facet_wrap(~name, scales="free", ncol= 1) Yes, particularly crime and tax rates. How many of the census tracts in this data set bound the Charles river? sum(Boston$chas) ## [1] 35 What is the median pupil-teacher ratio among the towns in this data set? median(Boston$ptratio) ## [1] 19.05 Which census tract of Boston has lowest median value of owner-occupied homes? What are the values of the other predictors for that census tract, and how do those values compare to the overall ranges for those predictors? Comment on your findings. Boston[Boston$medv == min(Boston$medv), ] |> kable() crim zn indus chas nox rm age dis rad tax ptratio lstat medv 399 38.3518 0 18.1 0 0.693 5.453 100 1.4896 24 666 20.2 30.59 5 406 67.9208 0 18.1 0 0.693 5.683 100 1.4254 24 666 20.2 22.98 5 sapply(Boston, quantile) |> kable() crim zn indus chas nox rm age dis rad tax ptratio lstat medv 0% 0.006320 0.0 0.46 0 0.385 3.5610 2.900 1.129600 1 187 12.60 1.730 5.000 25% 0.082045 0.0 5.19 0 0.449 5.8855 45.025 2.100175 4 279 17.40 6.950 17.025 50% 0.256510 0.0 9.69 0 0.538 6.2085 77.500 3.207450 5 330 19.05 11.360 21.200 75% 3.677083 12.5 18.10 0 0.624 6.6235 94.075 5.188425 24 666 20.20 16.955 25.000 100% 88.976200 100.0 27.74 1 0.871 8.7800 100.000 12.126500 24 711 22.00 37.970 50.000 In this data set, how many of the census tract average more than seven rooms per dwelling? More than eight rooms per dwelling? Comment on the census tracts that average more than eight rooms per dwelling. sum(Boston$rm > 7) ## [1] 64 sum(Boston$rm > 8) ## [1] 13 Let’s compare median statistics for those census tracts with more than eight rooms per dwelling on average, with the statistics for those with fewer. Boston |> mutate( `log(crim)` = log(crim), `log(zn)` = log(zn) ) |> select(-c(crim, zn)) |> pivot_longer(!rm) |> mutate(">8 rooms" = rm > 8) |> ggplot(aes(`>8 rooms`, value)) + geom_boxplot() + facet_wrap(~name, scales = "free") ## Warning: Removed 372 rows containing non-finite outside the scale range ## (`stat_boxplot()`). Census tracts with big average properties (more than eight rooms per dwelling) have higher median value (medv), a lower proportion of non-retail business acres (indus), a lower pupil-teacher ratio (ptratio), a lower status of the population (lstat) among other differences. "],["linear-regression.html", "3 Linear Regression 3.1 Conceptual 3.2 Applied", " 3 Linear Regression 3.1 Conceptual 3.1.1 Question 1 Describe the null hypotheses to which the p-values given in Table 3.4 correspond. Explain what conclusions you can draw based on these p-values. Your explanation should be phrased in terms of sales, TV, radio, and newspaper, rather than in terms of the coefficients of the linear model. For intercept, that \\(\\beta_0 = 0\\) For the others, that \\(\\beta_n = 0\\) (for \\(n = 1, 2, 3\\)) We can conclude that that without any spending, there are still some sales (the intercept is not 0). Furthermore, we can conclude that money spent on TV and radio are significantly associated with increased sales, but the same cannot be said of newspaper spending. 3.1.2 Question 2 Carefully explain the differences between the KNN classifier and KNN regression methods. The KNN classifier is categorical and assigns a value based on the most frequent observed category among \\(K\\) nearest neighbors, whereas KNN regression assigns a continuous variable, the average of the response variables for the \\(K\\) nearest neighbors. 3.1.3 Question 3 Suppose we have a data set with five predictors, \\(X_1\\) = GPA, \\(X_2\\) = IQ, \\(X_3\\) = Level (1 for College and 0 for High School), \\(X_4\\) = Interaction between GPA and IQ, and \\(X_5\\) = Interaction between GPA and Level. The response is starting salary after graduation (in thousands of dollars). Suppose we use least squares to fit the model, and get \\(\\hat\\beta_0 = 50\\), \\(\\hat\\beta_1 = 20\\), \\(\\hat\\beta_2 = 0.07\\), \\(\\hat\\beta_3 = 35\\), \\(\\hat\\beta_4 = 0.01\\), \\(\\hat\\beta_5 = -10\\). Which answer is correct, and why? For a fixed value of IQ and GPA, high school graduates earn more on average than college graduates. For a fixed value of IQ and GPA, college graduates earn more on average than high school graduates. For a fixed value of IQ and GPA, high school graduates earn more on average than college graduates provided that the GPA is high enough. For a fixed value of IQ and GPA, college graduates earn more on average than high school graduates provided that the GPA is high enough. The model is: \\(y = \\beta_0 + \\beta_1 \\text{GPA} + \\beta_2 \\text{IQ} + \\beta_3 \\text{Level} + \\beta_4 \\text{GPA} \\text{IQ} + \\beta_5 \\text{GPA} \\text{Level}\\) Fixing IQ and GPA, changing Level from 0 to 1 will change the outcome by: \\(\\Delta y = \\beta_3 + \\beta_5 \\text{GPA}\\) \\(\\Delta y > 0 \\Rightarrow \\beta_3 + \\beta_5 \\text{GPA} > 0 \\Rightarrow \\text{GPA} > \\dfrac{-\\beta3}{\\beta_5} = - \\dfrac{35}{-10} = 3.5\\) From a graphical standpoint: library(plotly) model <- function(gpa, iq, level) { 50 + gpa * 20 + iq * 0.07 + level * 35 + gpa * iq * 0.01 + gpa * level * -10 } x <- seq(1, 5, length = 10) y <- seq(1, 200, length = 20) college <- t(outer(x, y, model, level = 1)) high_school <- t(outer(x, y, model, level = 0)) plot_ly(x = x, y = y) |> add_surface( z = ~college, colorscale = list(c(0, 1), c("rgb(107,184,214)", "rgb(0,90,124)")), colorbar = list(title = "College")) |> add_surface( z = ~high_school, colorscale = list(c(0, 1), c("rgb(255,112,184)", "rgb(128,0,64)")), colorbar = list(title = "High school")) |> layout(scene = list( xaxis = list(title = "GPA"), yaxis = list(title = "IQ"), zaxis = list(title = "Salary"))) Option iii correct. Predict the salary of a college graduate with IQ of 110 and a GPA of 4.0. model(gpa = 4, iq = 110, level = 1) ## [1] 137.1 True or false: Since the coefficient for the GPA/IQ interaction term is very small, there is very little evidence of an interaction effect. Justify your answer. This is false. It is important to remember that GPA and IQ vary over different scales. It is better to explicitly test the significance of the interaction effect, and/or visualize or quantify the effect on sales under realistic ranges of GPA/IQ values. 3.1.4 Question 4 I collect a set of data (\\(n = 100\\) observations) containing a single predictor and a quantitative response. I then fit a linear regression model to the data, as well as a separate cubic regression, i.e. \\(Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\epsilon\\). Suppose that the true relationship between \\(X\\) and \\(Y\\) is linear, i.e. \\(Y = \\beta_0 + \\beta_1X + \\epsilon\\). Consider the training residual sum of squares (RSS) for the linear regression, and also the training RSS for the cubic regression. Would we expect one to be lower than the other, would we expect them to be the same, or is there not enough information to tell? Justify your answer. You would expect the cubic regression to have lower RSS since it is at least as flexible as the linear regression. Answer (a) using test rather than training RSS. Though we could not be certain, the test RSS would likely be higher due to overfitting. Suppose that the true relationship between \\(X\\) and \\(Y\\) is not linear, but we don’t know how far it is from linear. Consider the training RSS for the linear regression, and also the training RSS for the cubic regression. Would we expect one to be lower than the other, would we expect them to be the same, or is there not enough information to tell? Justify your answer. You would expect the cubic regression to have lower RSS since it is at least as flexible as the linear regression. Answer (c) using test rather than training RSS. There is not enough information to tell, it depends on how non-linear the true relationship is. 3.1.5 Question 5 Consider the fitted values that result from performing linear regression without an intercept. In this setting, the ith fitted value takes the form \\[\\hat{y}_i = x_i\\hat\\beta,\\] where \\[\\hat{\\beta} = \\left(\\sum_{i=1}^nx_iy_i\\right) / \\left(\\sum_{i' = 1}^n x^2_{i'}\\right).\\] show that we can write \\[\\hat{y}_i = \\sum_{i' = 1}^na_{i'}y_{i'}\\] What is \\(a_{i'}\\)? Note: We interpret this result by saying that the fitted values from linear regression are linear combinations of the response values. \\[\\begin{align} \\hat{y}_i & = x_i \\frac{\\sum_{i=1}^nx_iy_i}{\\sum_{i' = 1}^n x^2_{i'}} \\\\ & = x_i \\frac{\\sum_{i'=1}^nx_{i'}y_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} \\\\ & = \\frac{\\sum_{i'=1}^n x_i x_{i'}y_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} \\\\ & = \\sum_{i'=1}^n \\frac{ x_i x_{i'}y_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} \\\\ & = \\sum_{i'=1}^n \\frac{ x_i x_{i'}}{\\sum_{i'' = 1}^n x^2_{i''}} y_{i'} \\end{align}\\] therefore, \\[a_{i'} = \\frac{ x_i x_{i'}}{\\sum x^2}\\] 3.1.6 Question 6 Using (3.4), argue that in the case of simple linear regression, the least squares line always passes through the point \\((\\bar{x}, \\bar{y})\\). when \\(x = \\bar{x}\\) what is \\(y\\)? \\[\\begin{align} y &= \\hat\\beta_0 + \\hat\\beta_1\\bar{x} \\\\ &= \\bar{y} - \\hat\\beta_1\\bar{x} + \\hat\\beta_1\\bar{x} \\\\ &= \\bar{y} \\end{align}\\] 3.1.7 Question 7 It is claimed in the text that in the case of simple linear regression of \\(Y\\) onto \\(X\\), the \\(R^2\\) statistic (3.17) is equal to the square of the correlation between \\(X\\) and \\(Y\\) (3.18). Prove that this is the case. For simplicity, you may assume that \\(\\bar{x} = \\bar{y} = 0\\). We have the following equations: \\[ R^2 = \\frac{\\textit{TSS} - \\textit{RSS}}{\\textit{TSS}} \\] \\[ Cor(x,y) = \\frac{\\sum_i (x_i-\\bar{x})(y_i - \\bar{y})}{\\sqrt{\\sum_i(x_i - \\bar{x})^2}\\sqrt{\\sum_i(y_i - \\bar{y})^2}} \\] As above, its important to remember \\(\\sum_i x_i = \\sum_j x_j\\) when \\(\\bar{x} = \\bar{y} = 0\\) \\[ Cor(x,y)^2 = \\frac{(\\sum_ix_iy_i)^2}{\\sum_ix_i^2 \\sum_iy_i^2} \\] Also note that: \\[\\hat{y}_i = \\hat\\beta_o + \\hat\\beta_1x_i = x_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2}\\] Therefore, given that \\(RSS = \\sum_i(y_i - \\hat{y}_i)^2\\) and \\(\\textit{TSS} = \\sum_i(y_i - \\bar{y})^2 = \\sum_iy_i^2\\) \\[\\begin{align} R^2 &= \\frac{\\sum_iy_i^2 - \\sum_i(y_i - x_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2})^2} {\\sum_iy_i^2} \\\\ &= \\frac{\\sum_iy_i^2 - \\sum_i( y_i^2 - 2y_ix_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2} + x_i^2 (\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2})^2 )}{\\sum_iy_i^2} \\\\ &= \\frac{ 2\\sum_i(y_ix_i\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2}) - \\sum_i(x_i^2 (\\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2})^2) }{\\sum_iy_i^2} \\\\ &= \\frac{ 2\\sum_i(y_ix_i) \\frac{\\sum_j{x_jy_j}}{\\sum_jx_j^2} - \\sum_i(x_i^2) \\frac{(\\sum_j{x_jy_j})^2}{(\\sum_jx_j^2)^2} }{\\sum_iy_i^2} \\\\ &= \\frac{ 2\\frac{(\\sum_i{x_iy_i})^2}{\\sum_jx_j^2} - \\frac{(\\sum_i{x_iy_i})^2}{\\sum_jx_j^2} }{\\sum_iy_i^2} \\\\ &= \\frac{(\\sum_i{x_iy_i})^2}{\\sum_ix_i^2 \\sum_iy_i^2} \\end{align}\\] 3.2 Applied 3.2.1 Question 8 This question involves the use of simple linear regression on the Auto data set. Use the lm() function to perform a simple linear regression with mpg as the response and horsepower as the predictor. Use the summary() function to print the results. Comment on the output. For example: Is there a relationship between the predictor and the response? How strong is the relationship between the predictor and the response? Is the relationship between the predictor and the response positive or negative? What is the predicted mpg associated with a horsepower of 98? What are the associated 95% confidence and prediction intervals? library(ISLR2) fit <- lm(mpg ~ horsepower, data = Auto) summary(fit) ## ## Call: ## lm(formula = mpg ~ horsepower, data = Auto) ## ## Residuals: ## Min 1Q Median 3Q Max ## -13.5710 -3.2592 -0.3435 2.7630 16.9240 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 39.935861 0.717499 55.66 <2e-16 *** ## horsepower -0.157845 0.006446 -24.49 <2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 4.906 on 390 degrees of freedom ## Multiple R-squared: 0.6059, Adjusted R-squared: 0.6049 ## F-statistic: 599.7 on 1 and 390 DF, p-value: < 2.2e-16 Yes, there is a significant relationship between predictor and response. For every unit increase in horsepower, mpg reduces by 0.16 (a negative relationship). predict(fit, data.frame(horsepower = 98), interval = "confidence") ## fit lwr upr ## 1 24.46708 23.97308 24.96108 predict(fit, data.frame(horsepower = 98), interval = "prediction") ## fit lwr upr ## 1 24.46708 14.8094 34.12476 Plot the response and the predictor. Use the abline() function to display the least squares regression line. plot(Auto$horsepower, Auto$mpg, xlab = "horsepower", ylab = "mpg") abline(fit) Use the plot() function to produce diagnostic plots of the least squares regression fit. Comment on any problems you see with the fit. par(mfrow = c(2, 2)) plot(fit, cex = 0.2) The residuals show a trend with respect to the fitted values suggesting a non-linear relationship. 3.2.2 Question 9 This question involves the use of multiple linear regression on the Auto data set. Produce a scatterplot matrix which includes all of the variables in the data set. pairs(Auto, cex = 0.2) Compute the matrix of correlations between the variables using the function cor(). You will need to exclude the name variable, name which is qualitative. x <- subset(Auto, select = -name) cor(x) ## mpg cylinders displacement horsepower weight ## mpg 1.0000000 -0.7776175 -0.8051269 -0.7784268 -0.8322442 ## cylinders -0.7776175 1.0000000 0.9508233 0.8429834 0.8975273 ## displacement -0.8051269 0.9508233 1.0000000 0.8972570 0.9329944 ## horsepower -0.7784268 0.8429834 0.8972570 1.0000000 0.8645377 ## weight -0.8322442 0.8975273 0.9329944 0.8645377 1.0000000 ## acceleration 0.4233285 -0.5046834 -0.5438005 -0.6891955 -0.4168392 ## year 0.5805410 -0.3456474 -0.3698552 -0.4163615 -0.3091199 ## origin 0.5652088 -0.5689316 -0.6145351 -0.4551715 -0.5850054 ## acceleration year origin ## mpg 0.4233285 0.5805410 0.5652088 ## cylinders -0.5046834 -0.3456474 -0.5689316 ## displacement -0.5438005 -0.3698552 -0.6145351 ## horsepower -0.6891955 -0.4163615 -0.4551715 ## weight -0.4168392 -0.3091199 -0.5850054 ## acceleration 1.0000000 0.2903161 0.2127458 ## year 0.2903161 1.0000000 0.1815277 ## origin 0.2127458 0.1815277 1.0000000 Use the lm() function to perform a multiple linear regression with mpg as the response and all other variables except name as the predictors. Use the summary() function to print the results. Comment on the output. For instance: Is there a relationship between the predictors and the response? Which predictors appear to have a statistically significant relationship to the response? What does the coefficient for the year variable suggest? fit <- lm(mpg ~ ., data = x) summary(fit) ## ## Call: ## lm(formula = mpg ~ ., data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -9.5903 -2.1565 -0.1169 1.8690 13.0604 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -17.218435 4.644294 -3.707 0.00024 *** ## cylinders -0.493376 0.323282 -1.526 0.12780 ## displacement 0.019896 0.007515 2.647 0.00844 ** ## horsepower -0.016951 0.013787 -1.230 0.21963 ## weight -0.006474 0.000652 -9.929 < 2e-16 *** ## acceleration 0.080576 0.098845 0.815 0.41548 ## year 0.750773 0.050973 14.729 < 2e-16 *** ## origin 1.426141 0.278136 5.127 4.67e-07 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.328 on 384 degrees of freedom ## Multiple R-squared: 0.8215, Adjusted R-squared: 0.8182 ## F-statistic: 252.4 on 7 and 384 DF, p-value: < 2.2e-16 Yes, there is a relationship between some predictors and response, notably “displacement” (positive), “weight” (negative), “year” (positive) and “origin” (positive). The coefficient for year (which is positive \\(~0.75\\)) suggests that mpg increases by about this amount every year on average. Use the plot() function to produce diagnostic plots of the linear regression fit. Comment on any problems you see with the fit. Do the residual plots suggest any unusually large outliers? Does the leverage plot identify any observations with unusually high leverage? par(mfrow = c(2, 2)) plot(fit, cex = 0.2) One point has high leverage, the residuals also show a trend with fitted values. Use the * and : symbols to fit linear regression models with interaction effects. Do any interactions appear to be statistically significant? summary(lm(mpg ~ . + weight:horsepower, data = x)) ## ## Call: ## lm(formula = mpg ~ . + weight:horsepower, data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -8.589 -1.617 -0.184 1.541 12.001 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.876e+00 4.511e+00 0.638 0.524147 ## cylinders -2.955e-02 2.881e-01 -0.103 0.918363 ## displacement 5.950e-03 6.750e-03 0.881 0.378610 ## horsepower -2.313e-01 2.363e-02 -9.791 < 2e-16 *** ## weight -1.121e-02 7.285e-04 -15.393 < 2e-16 *** ## acceleration -9.019e-02 8.855e-02 -1.019 0.309081 ## year 7.695e-01 4.494e-02 17.124 < 2e-16 *** ## origin 8.344e-01 2.513e-01 3.320 0.000986 *** ## horsepower:weight 5.529e-05 5.227e-06 10.577 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.931 on 383 degrees of freedom ## Multiple R-squared: 0.8618, Adjusted R-squared: 0.859 ## F-statistic: 298.6 on 8 and 383 DF, p-value: < 2.2e-16 summary(lm(mpg ~ . + acceleration:horsepower, data = x)) ## ## Call: ## lm(formula = mpg ~ . + acceleration:horsepower, data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -9.0329 -1.8177 -0.1183 1.7247 12.4870 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -32.499820 4.923380 -6.601 1.36e-10 *** ## cylinders 0.083489 0.316913 0.263 0.792350 ## displacement -0.007649 0.008161 -0.937 0.349244 ## horsepower 0.127188 0.024746 5.140 4.40e-07 *** ## weight -0.003976 0.000716 -5.552 5.27e-08 *** ## acceleration 0.983282 0.161513 6.088 2.78e-09 *** ## year 0.755919 0.048179 15.690 < 2e-16 *** ## origin 1.035733 0.268962 3.851 0.000138 *** ## horsepower:acceleration -0.012139 0.001772 -6.851 2.93e-11 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.145 on 383 degrees of freedom ## Multiple R-squared: 0.841, Adjusted R-squared: 0.8376 ## F-statistic: 253.2 on 8 and 383 DF, p-value: < 2.2e-16 summary(lm(mpg ~ . + cylinders:weight, data = x)) ## ## Call: ## lm(formula = mpg ~ . + cylinders:weight, data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -10.9484 -1.7133 -0.1809 1.4530 12.4137 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 7.3143478 5.0076737 1.461 0.14494 ## cylinders -5.0347425 0.5795767 -8.687 < 2e-16 *** ## displacement 0.0156444 0.0068409 2.287 0.02275 * ## horsepower -0.0314213 0.0126216 -2.489 0.01322 * ## weight -0.0150329 0.0011125 -13.513 < 2e-16 *** ## acceleration 0.1006438 0.0897944 1.121 0.26306 ## year 0.7813453 0.0464139 16.834 < 2e-16 *** ## origin 0.8030154 0.2617333 3.068 0.00231 ** ## cylinders:weight 0.0015058 0.0001657 9.088 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.022 on 383 degrees of freedom ## Multiple R-squared: 0.8531, Adjusted R-squared: 0.8501 ## F-statistic: 278.1 on 8 and 383 DF, p-value: < 2.2e-16 There are at least three cases where the interactions appear to be highly significant. Try a few different transformations of the variables, such as \\(log(X)\\), \\(\\sqrt{X}\\), \\(X^2\\). Comment on your findings. Here I’ll just consider transformations for horsepower. par(mfrow = c(2, 2)) plot(Auto$horsepower, Auto$mpg, cex = 0.2) plot(log(Auto$horsepower), Auto$mpg, cex = 0.2) plot(sqrt(Auto$horsepower), Auto$mpg, cex = 0.2) plot(Auto$horsepower ^ 2, Auto$mpg, cex = 0.2) x <- subset(Auto, select = -name) x$horsepower <- log(x$horsepower) fit <- lm(mpg ~ ., data = x) summary(fit) ## ## Call: ## lm(formula = mpg ~ ., data = x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -9.3115 -2.0041 -0.1726 1.8393 12.6579 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 27.254005 8.589614 3.173 0.00163 ** ## cylinders -0.486206 0.306692 -1.585 0.11372 ## displacement 0.019456 0.006876 2.830 0.00491 ** ## horsepower -9.506436 1.539619 -6.175 1.69e-09 *** ## weight -0.004266 0.000694 -6.148 1.97e-09 *** ## acceleration -0.292088 0.103804 -2.814 0.00515 ** ## year 0.705329 0.048456 14.556 < 2e-16 *** ## origin 1.482435 0.259347 5.716 2.19e-08 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.18 on 384 degrees of freedom ## Multiple R-squared: 0.837, Adjusted R-squared: 0.834 ## F-statistic: 281.6 on 7 and 384 DF, p-value: < 2.2e-16 par(mfrow = c(2, 2)) plot(fit, cex = 0.2) A log transformation of horsepower appears to give a more linear relationship with mpg. 3.2.3 Question 10 This question should be answered using the Carseats data set. Fit a multiple regression model to predict Sales using Price, Urban, and US. fit <- lm(Sales ~ Price + Urban + US, data = Carseats) Provide an interpretation of each coefficient in the model. Be careful—some of the variables in the model are qualitative! summary(fit) ## ## Call: ## lm(formula = Sales ~ Price + Urban + US, data = Carseats) ## ## Residuals: ## Min 1Q Median 3Q Max ## -6.9206 -1.6220 -0.0564 1.5786 7.0581 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.043469 0.651012 20.036 < 2e-16 *** ## Price -0.054459 0.005242 -10.389 < 2e-16 *** ## UrbanYes -0.021916 0.271650 -0.081 0.936 ## USYes 1.200573 0.259042 4.635 4.86e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.472 on 396 degrees of freedom ## Multiple R-squared: 0.2393, Adjusted R-squared: 0.2335 ## F-statistic: 41.52 on 3 and 396 DF, p-value: < 2.2e-16 Write out the model in equation form, being careful to handle the qualitative variables properly. \\[ \\textit{Sales} = 13 + -0.054 \\times \\textit{Price} + \\begin{cases} -0.022, & \\text{if $\\textit{Urban}$ is Yes, $\\textit{US}$ is No} \\\\ 1.20, & \\text{if $\\textit{Urban}$ is No, $\\textit{US}$ is Yes} \\\\ 1.18, & \\text{if $\\textit{Urban}$ and $\\textit{US}$ is Yes} \\\\ 0, & \\text{Otherwise} \\end{cases} \\] For which of the predictors can you reject the null hypothesis \\(H_0 : \\beta_j = 0\\)? Price and US (Urban shows no significant difference between “No” and “Yes”) On the basis of your response to the previous question, fit a smaller model that only uses the predictors for which there is evidence of association with the outcome. fit2 <- lm(Sales ~ Price + US, data = Carseats) How well do the models in (a) and (e) fit the data? summary(fit) ## ## Call: ## lm(formula = Sales ~ Price + Urban + US, data = Carseats) ## ## Residuals: ## Min 1Q Median 3Q Max ## -6.9206 -1.6220 -0.0564 1.5786 7.0581 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.043469 0.651012 20.036 < 2e-16 *** ## Price -0.054459 0.005242 -10.389 < 2e-16 *** ## UrbanYes -0.021916 0.271650 -0.081 0.936 ## USYes 1.200573 0.259042 4.635 4.86e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.472 on 396 degrees of freedom ## Multiple R-squared: 0.2393, Adjusted R-squared: 0.2335 ## F-statistic: 41.52 on 3 and 396 DF, p-value: < 2.2e-16 summary(fit2) ## ## Call: ## lm(formula = Sales ~ Price + US, data = Carseats) ## ## Residuals: ## Min 1Q Median 3Q Max ## -6.9269 -1.6286 -0.0574 1.5766 7.0515 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.03079 0.63098 20.652 < 2e-16 *** ## Price -0.05448 0.00523 -10.416 < 2e-16 *** ## USYes 1.19964 0.25846 4.641 4.71e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 2.469 on 397 degrees of freedom ## Multiple R-squared: 0.2393, Adjusted R-squared: 0.2354 ## F-statistic: 62.43 on 2 and 397 DF, p-value: < 2.2e-16 anova(fit, fit2) ## Analysis of Variance Table ## ## Model 1: Sales ~ Price + Urban + US ## Model 2: Sales ~ Price + US ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 396 2420.8 ## 2 397 2420.9 -1 -0.03979 0.0065 0.9357 They have similar \\(R^2\\) and the model containing the extra variable “Urban” is non-significantly better. Using the model from (e), obtain 95% confidence intervals for the coefficient(s). confint(fit2) ## 2.5 % 97.5 % ## (Intercept) 11.79032020 14.27126531 ## Price -0.06475984 -0.04419543 ## USYes 0.69151957 1.70776632 Is there evidence of outliers or high leverage observations in the model from (e)? par(mfrow = c(2, 2)) plot(fit2, cex = 0.2) Yes, somewhat. 3.2.4 Question 11 In this problem we will investigate the t-statistic for the null hypothesis \\(H_0 : \\beta = 0\\) in simple linear regression without an intercept. To begin, we generate a predictor x and a response y as follows. set.seed(1) x <- rnorm(100) y <- 2 * x + rnorm(100) set.seed(1) x <- rnorm(100) y <- 2 * x + rnorm(100) Perform a simple linear regression of y onto x, without an intercept. Report the coefficient estimate \\(\\hat{\\beta}\\), the standard error of this coefficient estimate, and the t-statistic and p-value associated with the null hypothesis \\(H_0 : \\beta = 0\\). Comment on these results. (You can perform regression without an intercept using the command lm(y~x+0).) fit <- lm(y ~ x + 0) coef(summary(fit)) ## Estimate Std. Error t value Pr(>|t|) ## x 1.993876 0.1064767 18.72593 2.642197e-34 There’s a significant positive relationship between \\(y\\) and \\(x\\). \\(y\\) values are predicted to be (a little below) twice the \\(x\\) values. Now perform a simple linear regression of x onto y without an intercept, and report the coefficient estimate, its standard error, and the corresponding t-statistic and p-values associated with the null hypothesis \\(H_0 : \\beta = 0\\). Comment on these results. fit <- lm(x ~ y + 0) coef(summary(fit)) ## Estimate Std. Error t value Pr(>|t|) ## y 0.3911145 0.02088625 18.72593 2.642197e-34 There’s a significant positive relationship between \\(x\\) and \\(y\\). \\(x\\) values are predicted to be (a little below) half the \\(y\\) values. What is the relationship between the results obtained in (a) and (b)? Without error, the coefficients would be the inverse of each other (2 and 1/2). The t-statistic and p-values are the same. For the regression of \\(Y\\) onto \\(X\\) without an intercept, the t-statistic for \\(H_0 : \\beta = 0\\) takes the form \\(\\hat{\\beta}/SE(\\hat{\\beta})\\), where \\(\\hat{\\beta}\\) is given by (3.38), and where \\[ SE(\\hat\\beta) = \\sqrt{\\frac{\\sum_{i=1}^n(y_i - x_i\\hat\\beta)^2}{(n-1)\\sum_{i'=1}^nx_{i'}^2}}. \\] (These formulas are slightly different from those given in Sections 3.1.1 and 3.1.2, since here we are performing regression without an intercept.) Show algebraically, and confirm numerically in R, that the t-statistic can be written as \\[ \\frac{(\\sqrt{n-1}) \\sum_{i-1}^nx_iy_i)} {\\sqrt{(\\sum_{i=1}^nx_i^2)(\\sum_{i'=1}^ny_{i'}^2)-(\\sum_{i'=1}^nx_{i'}y_{i'})^2}} \\] \\[ \\beta = \\sum_i x_i y_i / \\sum_{i'} x_{i'}^2 ,\\] therefore \\[\\begin{align} t &= \\frac{\\sum_i x_i y_i \\sqrt{n-1} \\sqrt{\\sum_ix_i^2}} {\\sum_i x_i^2 \\sqrt{\\sum_i(y_i - x_i \\beta)^2}} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{\\sum_ix_i^2 \\sum_i(y_i - x_i \\beta)^2}} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{ \\sum_ix_i^2 \\sum_i(y_i^2 - 2 y_i x_i \\beta + x_i^2 \\beta^2) }} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{ \\sum_ix_i^2 \\sum_iy_i^2 - \\beta \\sum_ix_i^2 (2 \\sum_i y_i x_i -\\beta \\sum_i x_i^2) }} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{ \\sum_ix_i^2 \\sum_iy_i^2 - \\sum_i x_i y_i (2 \\sum_i y_i x_i - \\sum_i x_i y_i) }} \\\\ &= \\frac{\\sum_i x_i y_i \\sqrt{n-1}} {\\sqrt{\\sum_ix_i^2 \\sum_iy_i^2 - (\\sum_i x_i y_i)^2}} \\\\ \\end{align}\\] We can show this numerically in R by computing \\(t\\) using the above equation. n <- length(x) sqrt(n - 1) * sum(x * y) / sqrt(sum(x ^ 2) * sum(y ^ 2) - sum(x * y) ^ 2) ## [1] 18.72593 Using the results from (d), argue that the t-statistic for the regression of y onto x is the same as the t-statistic for the regression of x onto y. Swapping \\(x_i\\) for \\(y_i\\) in the formula for \\(t\\) will give the same result. In R, show that when regression is performed with an intercept, the t-statistic for \\(H_0 : \\beta_1 = 0\\) is the same for the regression of y onto x as it is for the regression of x onto y. coef(summary(lm(y ~ x))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -0.03769261 0.09698729 -0.3886346 6.983896e-01 ## x 1.99893961 0.10772703 18.5555993 7.723851e-34 coef(summary(lm(x ~ y))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 0.03880394 0.04266144 0.9095787 3.652764e-01 ## y 0.38942451 0.02098690 18.5555993 7.723851e-34 3.2.5 Question 12 This problem involves simple linear regression without an intercept. Recall that the coefficient estimate \\(\\hat{\\beta}\\) for the linear regression of \\(Y\\) onto \\(X\\) without an intercept is given by (3.38). Under what circumstance is the coefficient estimate for the regression of \\(X\\) onto \\(Y\\) the same as the coefficient estimate for the regression of \\(Y\\) onto \\(X\\)? \\[ \\hat\\beta = \\sum_i x_iy_i / \\sum_{i'} x_{i'}^2 \\] The coefficient for the regression of X onto Y swaps the \\(x\\) and \\(y\\) variables: \\[ \\hat\\beta = \\sum_i x_iy_i / \\sum_{i'} y_{i'}^2 \\] So they are the same when \\(\\sum_{i} x_{i}^2 = \\sum_{i} y_{i}^2\\) Generate an example in R with \\(n = 100\\) observations in which the coefficient estimate for the regression of \\(X\\) onto \\(Y\\) is different from the coefficient estimate for the regression of \\(Y\\) onto \\(X\\). x <- rnorm(100) y <- 2 * x + rnorm(100, 0, 0.1) c(sum(x^2), sum(y^2)) ## [1] 105.9889 429.4924 c(coef(lm(y ~ x))[2], coef(lm(x ~ y))[2]) ## x y ## 2.0106218 0.4962439 Generate an example in R with \\(n = 100\\) observations in which the coefficient estimate for the regression of \\(X\\) onto \\(Y\\) is the same as the coefficient estimate for the regression of \\(Y\\) onto \\(X\\). x <- rnorm(100) y <- x + rnorm(100, 0, 0.1) c(sum(x^2), sum(y^2)) ## [1] 135.5844 134.5153 c(coef(lm(y ~ x))[2], coef(lm(x ~ y))[2]) ## x y ## 0.9925051 1.0006765 3.2.6 Question 13 In this exercise you will create some simulated data and will fit simple linear regression models to it. Make sure to use set.seed(1) prior to starting part (a) to ensure consistent results. set.seed(1) Using the rnorm() function, create a vector, x, containing 100 observations drawn from a \\(N(0, 1)\\) distribution. This represents a feature, \\(X\\). x <- rnorm(100, 0, 1) Using the rnorm() function, create a vector, eps, containing 100 observations drawn from a \\(N(0, 0.25)\\) distribution—a normal distribution with mean zero and variance 0.25. eps <- rnorm(100, 0, sqrt(0.25)) Using x and eps, generate a vector y according to the model \\[Y = -1 + 0.5X + \\epsilon\\] What is the length of the vector y? What are the values of \\(\\beta_0\\) and \\(\\beta_1\\) in this linear model? y <- -1 + 0.5 * x + eps length(y) ## [1] 100 \\(\\beta_0 = -1\\) and \\(\\beta_1 = 0.5\\) Create a scatterplot displaying the relationship between x and y. Comment on what you observe. plot(x, y) There is a linear relationship between \\(x\\) and \\(y\\) (with some error). Fit a least squares linear model to predict y using x. Comment on the model obtained. How do \\(\\hat\\beta_0\\) and \\(\\hat\\beta_1\\) compare to \\(\\beta_0\\) and \\(\\beta_1\\)? fit <- lm(y ~ x) summary(fit) ## ## Call: ## lm(formula = y ~ x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -0.93842 -0.30688 -0.06975 0.26970 1.17309 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.01885 0.04849 -21.010 < 2e-16 *** ## x 0.49947 0.05386 9.273 4.58e-15 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 0.4814 on 98 degrees of freedom ## Multiple R-squared: 0.4674, Adjusted R-squared: 0.4619 ## F-statistic: 85.99 on 1 and 98 DF, p-value: 4.583e-15 \\(\\beta_0\\) and \\(\\beta_1\\) are close to their population values. Display the least squares line on the scatterplot obtained in (d). Draw the population regression line on the plot, in a different color. Use the legend() command to create an appropriate legend. plot(x, y) abline(fit) abline(-1, 0.5, col = "red", lty = 2) legend("topleft", c("model fit", "population regression"), col = c("black", "red"), lty = c(1, 2) ) Now fit a polynomial regression model that predicts y using x and x^2. Is there evidence that the quadratic term improves the model fit? Explain your answer. fit2 <- lm(y ~ poly(x, 2)) anova(fit2, fit) ## Analysis of Variance Table ## ## Model 1: y ~ poly(x, 2) ## Model 2: y ~ x ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 97 22.257 ## 2 98 22.709 -1 -0.45163 1.9682 0.1638 There is no evidence for an improved fit, since the F-test is non-significant. Repeat (a)–(f) after modifying the data generation process in such a way that there is less noise in the data. The model (3.39) should remain the same. You can do this by decreasing the variance of the normal distribution used to generate the error term \\(\\epsilon\\) in (b). Describe your results. x <- rnorm(100, 0, 1) y <- -1 + 0.5 * x + rnorm(100, 0, sqrt(0.05)) fit2 <- lm(y ~ x) summary(fit2) ## ## Call: ## lm(formula = y ~ x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -0.61308 -0.12553 -0.00391 0.15199 0.41332 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -0.98917 0.02216 -44.64 <2e-16 *** ## x 0.52375 0.02152 24.33 <2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 0.2215 on 98 degrees of freedom ## Multiple R-squared: 0.858, Adjusted R-squared: 0.8565 ## F-statistic: 592.1 on 1 and 98 DF, p-value: < 2.2e-16 plot(x, y) abline(fit2) abline(-1, 0.5, col = "red", lty = 2) legend("topleft", c("model fit", "population regression"), col = c("black", "red"), lty = c(1, 2) ) The data shows less variability and the \\(R^2\\) is higher. Repeat (a)–(f) after modifying the data generation process in such a way that there is more noise in the data. The model (3.39) should remain the same. You can do this by increasing the variance of the normal distribution used to generate the error term \\(\\epsilon\\) in (b). Describe your results. x <- rnorm(100, 0, 1) y <- -1 + 0.5 * x + rnorm(100, 0, 1) fit3 <- lm(y ~ x) summary(fit3) ## ## Call: ## lm(formula = y ~ x) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.51014 -0.60549 0.02065 0.70483 2.08980 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.04745 0.09676 -10.825 < 2e-16 *** ## x 0.42505 0.08310 5.115 1.56e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 0.9671 on 98 degrees of freedom ## Multiple R-squared: 0.2107, Adjusted R-squared: 0.2027 ## F-statistic: 26.16 on 1 and 98 DF, p-value: 1.56e-06 plot(x, y) abline(fit3) abline(-1, 0.5, col = "red", lty = 2) legend("topleft", c("model fit", "population regression"), col = c("black", "red"), lty = c(1, 2) ) The data shows more variability. The \\(R^2\\) is lower. What are the confidence intervals for \\(\\beta_0\\) and \\(\\beta_1\\) based on the original data set, the noisier data set, and the less noisy data set? Comment on your results. confint(fit) ## 2.5 % 97.5 % ## (Intercept) -1.1150804 -0.9226122 ## x 0.3925794 0.6063602 confint(fit2) ## 2.5 % 97.5 % ## (Intercept) -1.033141 -0.9451916 ## x 0.481037 0.5664653 confint(fit3) ## 2.5 % 97.5 % ## (Intercept) -1.2394772 -0.8554276 ## x 0.2601391 0.5899632 The confidence intervals for the coefficients are smaller when there is less error. 3.2.7 Question 14 This problem focuses on the collinearity problem. Perform the following commands in R : > set.seed(1) > x1 <- runif(100) > x2 <- 0.5 * x1 + rnorm(100) / 10 > y <- 2 + 2 * x1 + 0.3 * x2 + rnorm(100) The last line corresponds to creating a linear model in which y is a function of x1 and x2. Write out the form of the linear model. What are the regression coefficients? set.seed(1) x1 <- runif(100) x2 <- 0.5 * x1 + rnorm(100) / 10 y <- 2 + 2 * x1 + 0.3 * x2 + rnorm(100) The model is of the form: \\[Y = \\beta_0 + \\beta_1X_1 + \\beta_2X_2 + \\epsilon\\] The coefficients are \\(\\beta_0 = 2\\), \\(\\beta_1 = 2\\), \\(\\beta_3 = 0.3\\). What is the correlation between x1 and x2? Create a scatterplot displaying the relationship between the variables. cor(x1, x2) ## [1] 0.8351212 plot(x1, x2) Using this data, fit a least squares regression to predict y using x1 and x2. Describe the results obtained. What are \\(\\hat\\beta_0\\), \\(\\hat\\beta_1\\), and \\(\\hat\\beta_2\\)? How do these relate to the true \\(\\beta_0\\), \\(\\beta_1\\), and _2$? Can you reject the null hypothesis \\(H_0 : \\beta_1\\) = 0$? How about the null hypothesis \\(H_0 : \\beta_2 = 0\\)? summary(lm(y ~ x1 + x2)) ## ## Call: ## lm(formula = y ~ x1 + x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.8311 -0.7273 -0.0537 0.6338 2.3359 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.1305 0.2319 9.188 7.61e-15 *** ## x1 1.4396 0.7212 1.996 0.0487 * ## x2 1.0097 1.1337 0.891 0.3754 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.056 on 97 degrees of freedom ## Multiple R-squared: 0.2088, Adjusted R-squared: 0.1925 ## F-statistic: 12.8 on 2 and 97 DF, p-value: 1.164e-05 \\(\\hat\\beta_0 = 2.13\\), \\(\\hat\\beta_1 = 1.43\\), and \\(\\hat\\beta_2 = 1.01\\). These are relatively poor estimates of the true values. We can reject the hypothesis that \\(H_0 : \\beta_1\\) at a p-value of 0.05 (just about). We cannot reject the hypothesis that \\(H_0 : \\beta_2 = 0\\). Now fit a least squares regression to predict y using only x1. Comment on your results. Can you reject the null hypothesis \\(H 0 : \\beta_1 = 0\\)? summary(lm(y ~ x1)) ## ## Call: ## lm(formula = y ~ x1) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.89495 -0.66874 -0.07785 0.59221 2.45560 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.1124 0.2307 9.155 8.27e-15 *** ## x1 1.9759 0.3963 4.986 2.66e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.055 on 98 degrees of freedom ## Multiple R-squared: 0.2024, Adjusted R-squared: 0.1942 ## F-statistic: 24.86 on 1 and 98 DF, p-value: 2.661e-06 We can reject \\(H_0 : \\beta_1 = 0\\). The p-value is much more significant for \\(\\beta_1\\) compared to when x2 is included in the model. Now fit a least squares regression to predict y using only x2. Comment on your results. Can you reject the null hypothesis \\(H_0 : \\beta_1 = 0\\)? summary(lm(y ~ x2)) ## ## Call: ## lm(formula = y ~ x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.62687 -0.75156 -0.03598 0.72383 2.44890 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.3899 0.1949 12.26 < 2e-16 *** ## x2 2.8996 0.6330 4.58 1.37e-05 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.072 on 98 degrees of freedom ## Multiple R-squared: 0.1763, Adjusted R-squared: 0.1679 ## F-statistic: 20.98 on 1 and 98 DF, p-value: 1.366e-05 Similarly, we can reject \\(H_0 : \\beta_2 = 0\\). The p-value is much more significant for \\(\\beta_2\\) compared to when x1 is included in the model. Do the results obtained in (c)–(e) contradict each other? Explain your answer. No they do not contradict each other. Both x1 and x2 individually are capable of explaining much of the variation observed in y, however since they are correlated, it is very difficult to tease apart their separate contributions. Now suppose we obtain one additional observation, which was unfortunately mismeasured. > x1 <- c(x1, 0.1) > x2 <- c(x2, 0.8) > y <- c(y, 6) Re-fit the linear models from (c) to (e) using this new data. What effect does this new observation have on the each of the models? In each model, is this observation an outlier? A high-leverage point? Both? Explain your answers. x1 <- c(x1 , 0.1) x2 <- c(x2 , 0.8) y <- c(y ,6) summary(lm(y ~ x1 + x2)) ## ## Call: ## lm(formula = y ~ x1 + x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.73348 -0.69318 -0.05263 0.66385 2.30619 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.2267 0.2314 9.624 7.91e-16 *** ## x1 0.5394 0.5922 0.911 0.36458 ## x2 2.5146 0.8977 2.801 0.00614 ** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.075 on 98 degrees of freedom ## Multiple R-squared: 0.2188, Adjusted R-squared: 0.2029 ## F-statistic: 13.72 on 2 and 98 DF, p-value: 5.564e-06 summary(lm(y ~ x1)) ## ## Call: ## lm(formula = y ~ x1) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.8897 -0.6556 -0.0909 0.5682 3.5665 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.2569 0.2390 9.445 1.78e-15 *** ## x1 1.7657 0.4124 4.282 4.29e-05 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.111 on 99 degrees of freedom ## Multiple R-squared: 0.1562, Adjusted R-squared: 0.1477 ## F-statistic: 18.33 on 1 and 99 DF, p-value: 4.295e-05 summary(lm(y ~ x2)) ## ## Call: ## lm(formula = y ~ x2) ## ## Residuals: ## Min 1Q Median 3Q Max ## -2.64729 -0.71021 -0.06899 0.72699 2.38074 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 2.3451 0.1912 12.264 < 2e-16 *** ## x2 3.1190 0.6040 5.164 1.25e-06 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.074 on 99 degrees of freedom ## Multiple R-squared: 0.2122, Adjusted R-squared: 0.2042 ## F-statistic: 26.66 on 1 and 99 DF, p-value: 1.253e-06 par(mfrow = c(2, 2)) plot(lm(y ~ x1 + x2), cex = 0.2) par(mfrow = c(2, 2)) plot(lm(y ~ x1), cex = 0.2) par(mfrow = c(2, 2)) plot(lm(y ~ x2), cex = 0.2) In the first model (with both predictors), the new point has very high leverage (since it is an outlier in terms of the joint x1 and x2 distribution), however it is not an outlier. In the model that includes x1, it is an outlier but does not have high leverage. In the model that includes x2, it has high leverage but is not an outlier. It is useful to consider the scatterplot of x1 and x2. plot(x1, x2) points(0.1, 0.8, col = "red", pch = 19) 3.2.8 Question 15 This problem involves the Boston data set, which we saw in the lab for this chapter. We will now try to predict per capita crime rate using the other variables in this data set. In other words, per capita crime rate is the response, and the other variables are the predictors. We are trying to predict crim. pred <- subset(Boston, select = -crim) For each predictor, fit a simple linear regression model to predict the response. Describe your results. In which of the models is there a statistically significant association between the predictor and the response? Create some plots to back up your assertions. fits <- lapply(pred, function(x) lm(Boston$crim ~ x)) printCoefmat(do.call(rbind, lapply(fits, function(x) coef(summary(x))[2, ]))) ## Estimate Std. Error t value Pr(>|t|) ## zn -0.0739350 0.0160946 -4.5938 5.506e-06 *** ## indus 0.5097763 0.0510243 9.9908 < 2.2e-16 *** ## chas -1.8927766 1.5061155 -1.2567 0.2094 ## nox 31.2485312 2.9991904 10.4190 < 2.2e-16 *** ## rm -2.6840512 0.5320411 -5.0448 6.347e-07 *** ## age 0.1077862 0.0127364 8.4628 2.855e-16 *** ## dis -1.5509017 0.1683300 -9.2135 < 2.2e-16 *** ## rad 0.6179109 0.0343318 17.9982 < 2.2e-16 *** ## tax 0.0297423 0.0018474 16.0994 < 2.2e-16 *** ## ptratio 1.1519828 0.1693736 6.8014 2.943e-11 *** ## lstat 0.5488048 0.0477610 11.4907 < 2.2e-16 *** ## medv -0.3631599 0.0383902 -9.4597 < 2.2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 There are significant associations for all predictors with the exception of chas when fitting separate linear models. For example, consider the following plot representing the third model plot(Boston$rm, Boston$crim) abline(fits[[5]]) Fit a multiple regression model to predict the response using all of the predictors. Describe your results. For which predictors can we reject the null hypothesis \\(H_0 : \\beta_j = 0\\)? mfit <- lm(crim ~ ., data = Boston) summary(mfit) ## ## Call: ## lm(formula = crim ~ ., data = Boston) ## ## Residuals: ## Min 1Q Median 3Q Max ## -8.534 -2.248 -0.348 1.087 73.923 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 13.7783938 7.0818258 1.946 0.052271 . ## zn 0.0457100 0.0187903 2.433 0.015344 * ## indus -0.0583501 0.0836351 -0.698 0.485709 ## chas -0.8253776 1.1833963 -0.697 0.485841 ## nox -9.9575865 5.2898242 -1.882 0.060370 . ## rm 0.6289107 0.6070924 1.036 0.300738 ## age -0.0008483 0.0179482 -0.047 0.962323 ## dis -1.0122467 0.2824676 -3.584 0.000373 *** ## rad 0.6124653 0.0875358 6.997 8.59e-12 *** ## tax -0.0037756 0.0051723 -0.730 0.465757 ## ptratio -0.3040728 0.1863598 -1.632 0.103393 ## lstat 0.1388006 0.0757213 1.833 0.067398 . ## medv -0.2200564 0.0598240 -3.678 0.000261 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 6.46 on 493 degrees of freedom ## Multiple R-squared: 0.4493, Adjusted R-squared: 0.4359 ## F-statistic: 33.52 on 12 and 493 DF, p-value: < 2.2e-16 There are now only significant associations for zn, dis, rad, black and medv. How do your results from (a) compare to your results from (b)? Create a plot displaying the univariate regression coefficients from (a) on the \\(x\\)-axis, and the multiple regression coefficients from (b) on the \\(y\\)-axis. That is, each predictor is displayed as a single point in the plot. Its coefficient in a simple linear regression model is shown on the x-axis, and its coefficient estimate in the multiple linear regression model is shown on the y-axis. The results from (b) show reduced significance compared to the models fit in (a). plot(sapply(fits, function(x) coef(x)[2]), coef(mfit)[-1], xlab = "Univariate regression", ylab = "multiple regression") The estimated coefficients differ (in particular the estimated coefficient for nox is dramatically different) between the two modelling strategies. Is there evidence of non-linear association between any of the predictors and the response? To answer this question, for each predictor X, fit a model of the form \\[ Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\epsilon \\] pred <- subset(pred, select = -chas) fits <- lapply(names(pred), function(p) { f <- paste0("crim ~ poly(", p, ", 3)") lm(as.formula(f), data = Boston) }) for (fit in fits) printCoefmat(coef(summary(fit))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.37219 9.7088 < 2.2e-16 *** ## poly(zn, 3)1 -38.74984 8.37221 -4.6284 4.698e-06 *** ## poly(zn, 3)2 23.93983 8.37221 2.8594 0.004421 ** ## poly(zn, 3)3 -10.07187 8.37221 -1.2030 0.229539 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.6135 0.3300 10.9501 < 2.2e-16 *** ## poly(indus, 3)1 78.5908 7.4231 10.5873 < 2.2e-16 *** ## poly(indus, 3)2 -24.3948 7.4231 -3.2863 0.001086 ** ## poly(indus, 3)3 -54.1298 7.4231 -7.2920 1.196e-12 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.32157 11.2370 < 2.2e-16 *** ## poly(nox, 3)1 81.37202 7.23361 11.2492 < 2.2e-16 *** ## poly(nox, 3)2 -28.82859 7.23361 -3.9854 7.737e-05 *** ## poly(nox, 3)3 -60.36189 7.23361 -8.3446 6.961e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.6135 0.3703 9.7584 < 2.2e-16 *** ## poly(rm, 3)1 -42.3794 8.3297 -5.0878 5.128e-07 *** ## poly(rm, 3)2 26.5768 8.3297 3.1906 0.001509 ** ## poly(rm, 3)3 -5.5103 8.3297 -0.6615 0.508575 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.34852 10.3683 < 2.2e-16 *** ## poly(age, 3)1 68.18201 7.83970 8.6970 < 2.2e-16 *** ## poly(age, 3)2 37.48447 7.83970 4.7814 2.291e-06 *** ## poly(age, 3)3 21.35321 7.83970 2.7237 0.00668 ** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.32592 11.0870 < 2.2e-16 *** ## poly(dis, 3)1 -73.38859 7.33148 -10.0101 < 2.2e-16 *** ## poly(dis, 3)2 56.37304 7.33148 7.6892 7.870e-14 *** ## poly(dis, 3)3 -42.62188 7.33148 -5.8135 1.089e-08 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.29707 12.1639 < 2.2e-16 *** ## poly(rad, 3)1 120.90745 6.68240 18.0934 < 2.2e-16 *** ## poly(rad, 3)2 17.49230 6.68240 2.6177 0.009121 ** ## poly(rad, 3)3 4.69846 6.68240 0.7031 0.482314 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.30468 11.8599 < 2.2e-16 *** ## poly(tax, 3)1 112.64583 6.85371 16.4358 < 2.2e-16 *** ## poly(tax, 3)2 32.08725 6.85371 4.6817 3.665e-06 *** ## poly(tax, 3)3 -7.99681 6.85371 -1.1668 0.2439 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.36105 10.0084 < 2.2e-16 *** ## poly(ptratio, 3)1 56.04523 8.12158 6.9008 1.565e-11 *** ## poly(ptratio, 3)2 24.77482 8.12158 3.0505 0.002405 ** ## poly(ptratio, 3)3 -22.27974 8.12158 -2.7433 0.006301 ** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.33917 10.6540 <2e-16 *** ## poly(lstat, 3)1 88.06967 7.62944 11.5434 <2e-16 *** ## poly(lstat, 3)2 15.88816 7.62944 2.0825 0.0378 * ## poly(lstat, 3)3 -11.57402 7.62944 -1.5170 0.1299 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 3.61352 0.29203 12.374 < 2.2e-16 *** ## poly(medv, 3)1 -75.05761 6.56915 -11.426 < 2.2e-16 *** ## poly(medv, 3)2 88.08621 6.56915 13.409 < 2.2e-16 *** ## poly(medv, 3)3 -48.03343 6.56915 -7.312 1.047e-12 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Yes there is strong evidence for many variables having non-linear associations. In many cases, the addition of a cubic term is significant (indus, nox, age, dis, ptratio and medv). In other cases although the cubic terms is not significant, the squared term is (zn, rm, rad, tax, lstat). In only one case, black is there no evidence for a non-linear relationship. "],["classification.html", "4 Classification 4.1 Conceptual 4.2 Applied", " 4 Classification 4.1 Conceptual 4.1.1 Question 1 Using a little bit of algebra, prove that (4.2) is equivalent to (4.3). In other words, the logistic function representation and logit representation for the logistic regression model are equivalent. We need to show that \\[ p(X) = \\frac{e^{\\beta_0 + \\beta_1X}}{1 + e^{\\beta_0 + \\beta_1X}} \\] is equivalent to \\[ \\frac{p(X)}{1-p(X)} = e^{\\beta_0 + \\beta_1X} \\] Letting \\(x = e^{\\beta_0 + \\beta_1X}\\) \\[\\begin{align} \\frac{P(X)}{1-p(X)} &= \\frac{\\frac{x}{1 + x}} {1 - \\frac{x}{1 + x}} \\\\ &= \\frac{\\frac{x}{1 + x}} {\\frac{1}{1 + x}} \\\\ &= x \\end{align}\\] 4.1.2 Question 2 It was stated in the text that classifying an observation to the class for which (4.12) is largest is equivalent to classifying an observation to the class for which (4.13) is largest. Prove that this is the case. In other words, under the assumption that the observations in the \\(k\\)th class are drawn from a \\(N(\\mu_k,\\sigma^2)\\) distribution, the Bayes’ classifier assigns an observation to the class for which the discriminant function is maximized. 4.12 is \\[ p_k(x) = \\frac{\\pi_k\\frac{1}{\\sqrt{2\\pi\\sigma}} \\exp(-\\frac{1}{2\\sigma^2}(x - \\mu_k)^2)} {\\sum_{l=1}^k \\pi_l\\frac{1}{\\sqrt{2\\pi\\sigma}} \\exp(-\\frac{1}{2\\sigma^2}(x - \\mu_l)^2)} \\] and the discriminant function is \\[ \\delta_k(x) = x.\\frac{\\mu_k}{\\sigma^2} - \\frac{\\mu_k^2}{2\\sigma_2} + \\log(\\pi_k) \\] Since \\(\\sigma^2\\) is constant \\[ p_k(x) = \\frac{\\pi_k \\exp\\left(-\\frac{1}{2\\sigma^2}(x - \\mu_k)^2\\right)} {\\sum_{l=1}^k \\pi_l \\exp\\left(-\\frac{1}{2\\sigma^2}(x - \\mu_l)^2\\right)} \\] Maximizing \\(p_k(x)\\) also maximizes any monotonic function of \\(p_k(X)\\), and therefore, we can consider maximizing \\(\\log(p_K(X))\\) \\[ \\log(p_k(x)) = \\log(\\pi_k) - \\frac{1}{2\\sigma^2}(x - \\mu_k)^2 - \\log\\left(\\sum_{l=1}^k \\pi_l \\exp\\left(-\\frac{1}{2\\sigma^2}(x - \\mu_l)^2\\right)\\right) \\] Remember that we are maximizing over \\(k\\), and since the last term does not vary with \\(k\\) it can be ignored. So we just need to maximize \\[\\begin{align} f &= \\log(\\pi_k) - \\frac{1}{2\\sigma^2} (x^2 - 2x\\mu_k + \\mu_k^2) \\\\ &= \\log(\\pi_k) - \\frac{x^2}{2\\sigma^2} + \\frac{x\\mu_k}{\\sigma^2} - \\frac{\\mu_k^2}{2\\sigma^2} \\\\ \\end{align}\\] Since \\(\\frac{x^2}{2\\sigma^2}\\) is also independent of \\(k\\), we just need to maximize \\[ \\log(\\pi_k) + \\frac{x\\mu_k}{\\sigma^2} - \\frac{\\mu_k^2}{2\\sigma^2} \\] 4.1.3 Question 3 This problem relates to the QDA model, in which the observations within each class are drawn from a normal distribution with a class-specific mean vector and a class specific covariance matrix. We consider the simple case where \\(p = 1\\); i.e. there is only one feature. Suppose that we have \\(K\\) classes, and that if an observation belongs to the \\(k\\)th class then \\(X\\) comes from a one-dimensional normal distribution, \\(X \\sim N(\\mu_k,\\sigma^2)\\). Recall that the density function for the one-dimensional normal distribution is given in (4.16). Prove that in this case, the Bayes classifier is not linear. Argue that it is in fact quadratic. Hint: For this problem, you should follow the arguments laid out in Section 4.4.1, but without making the assumption that \\(\\sigma_1^2 = ... = \\sigma_K^2\\). As above, \\[ p_k(x) = \\frac{\\pi_k\\frac{1}{\\sqrt{2\\pi\\sigma_k}} \\exp(-\\frac{1}{2\\sigma_k^2}(x - \\mu_k)^2)} {\\sum_{l=1}^k \\pi_l\\frac{1}{\\sqrt{2\\pi\\sigma_l}} \\exp(-\\frac{1}{2\\sigma_l^2}(x - \\mu_l)^2)} \\] Now lets derive the Bayes classifier, without assuming \\(\\sigma_1^2 = ... = \\sigma_K^2\\) Maximizing \\(p_k(x)\\) also maximizes any monotonic function of \\(p_k(X)\\), and therefore, we can consider maximizing \\(\\log(p_K(X))\\) \\[ \\log(p_k(x)) = \\log(\\pi_k) + \\log\\left(\\frac{1}{\\sqrt{2\\pi\\sigma_k}}\\right) - \\frac{1}{2\\sigma_k^2}(x - \\mu_k)^2 - \\log\\left(\\sum_{l=1}^k \\frac{1}{\\sqrt{2\\pi\\sigma_l}} \\pi_l \\exp\\left(-\\frac{1}{2\\sigma_l^2}(x - \\mu_l)^2\\right)\\right) \\] Remember that we are maximizing over \\(k\\), and since the last term does not vary with \\(k\\) it can be ignored. So we just need to maximize \\[\\begin{align} f &= \\log(\\pi_k) + \\log\\left(\\frac{1}{\\sqrt{2\\pi\\sigma_k}}\\right) - \\frac{1}{2\\sigma_k^2}(x - \\mu_k)^2 \\\\ &= \\log(\\pi_k) + \\log\\left(\\frac{1}{\\sqrt{2\\pi\\sigma_k}}\\right) - \\frac{x^2}{2\\sigma_k^2} + \\frac{x\\mu_k}{\\sigma_k^2} - \\frac{\\mu_k^2}{2\\sigma_k^2} \\\\ \\end{align}\\] However, unlike in Q2, \\(\\frac{x^2}{2\\sigma_k^2}\\) is not independent of \\(k\\), so we retain the term with \\(x^2\\), hence \\(f\\), the Bayes’ classifier, is a quadratic function of \\(x\\). 4.1.4 Question 4 When the number of features \\(p\\) is large, there tends to be a deterioration in the performance of KNN and other local approaches that perform prediction using only observations that are near the test observation for which a prediction must be made. This phenomenon is known as the curse of dimensionality, and it ties into the fact that non-parametric approaches often perform poorly when \\(p\\) is large. We will now investigate this curse. Suppose that we have a set of observations, each with measurements on \\(p = 1\\) feature, \\(X\\). We assume that \\(X\\) is uniformly (evenly) distributed on \\([0, 1]\\). Associated with each observation is a response value. Suppose that we wish to predict a test observation’s response using only observations that are within 10% of the range of \\(X\\) closest to that test observation. For instance, in order to predict the response for a test observation with \\(X = 0.6\\), we will use observations in the range \\([0.55, 0.65]\\). On average, what fraction of the available observations will we use to make the prediction? For values in \\(0...0.05\\), we use less than 10% of observations (between 5% and 10%, 7.5% on average), similarly with values in \\(0.95...1\\). For values in \\(0.05...0.95\\) we use 10% of available observations. The (weighted) average is then \\(7.5 \\times 0.1 + 10 \\times 0.9 = 9.75\\%\\). Now suppose that we have a set of observations, each with measurements on \\(p = 2\\) features, \\(X_1\\) and \\(X_2\\). We assume that \\((X_1, X_2)\\) are uniformly distributed on \\([0, 1] \\times [0, 1]\\). We wish to predict a test observation’s response using only observations that are within 10% of the range of \\(X_1\\) and within 10% of the range of \\(X_2\\) closest to that test observation. For instance, in order to predict the response for a test observation with \\(X_1 = 0.6\\) and \\(X_2 = 0.35\\), we will use observations in the range \\([0.55, 0.65]\\) for \\(X_1\\) and in the range \\([0.3, 0.4]\\) for \\(X_2\\). On average, what fraction of the available observations will we use to make the prediction? Since we need the observation to be within range for \\(X_1\\) and \\(X_2\\) we square 9.75% = \\(0.0975^2 \\times 100 = 0.95\\%\\) Now suppose that we have a set of observations on \\(p = 100\\) features. Again the observations are uniformly distributed on each feature, and again each feature ranges in value from 0 to 1. We wish to predict a test observation’s response using observations within the 10% of each feature’s range that is closest to that test observation. What fraction of the available observations will we use to make the prediction? Similar to above, we use: \\(0.0975^{100} \\times 100 = 8 \\times 10^{-100}\\%\\), essentially zero. Using your answers to parts (a)–(c), argue that a drawback of KNN when \\(p\\) is large is that there are very few training observations “near” any given test observation. As \\(p\\) increases, the fraction of observations near any given point rapidly approaches zero. For instance, even if you use 50% of the nearest observations for each \\(p\\), with \\(p = 10\\), only \\(0.5^{10} \\times 100 \\approx 0.1\\%\\) points are “near”. Now suppose that we wish to make a prediction for a test observation by creating a \\(p\\)-dimensional hypercube centered around the test observation that contains, on average, 10% of the training observations. For \\(p = 1,2,\\) and \\(100\\), what is the length of each side of the hypercube? Comment on your answer. Note: A hypercube is a generalization of a cube to an arbitrary number of dimensions. When \\(p = 1\\), a hypercube is simply a line segment, when \\(p = 2\\) it is a square, and when \\(p = 100\\) it is a 100-dimensional cube. When \\(p = 1\\), clearly the length is 0.1. When \\(p = 2\\), we need the value \\(l\\) such that \\(l^2 = 0.1\\), so \\(l = \\sqrt{0.1} = 0.32\\). When \\(p = n\\), \\(l = 0.1^{1/n}\\), so in the case of \\(n = 100\\), \\(l = 0.98\\). Therefore, the length of each side of the hypercube rapidly approaches 1 (or 100%) of the range of each \\(p\\). 4.1.5 Question 5 We now examine the differences between LDA and QDA. If the Bayes decision boundary is linear, do we expect LDA or QDA to perform better on the training set? On the test set? QDA, being a more flexible model, will always perform better on the training set, but LDA would be expected to perform better on the test set. If the Bayes decision boundary is non-linear, do we expect LDA or QDA to perform better on the training set? On the test set? QDA, being a more flexible model, will perform better on the training set, and we would hope that extra flexibility translates to a better fit on the test set. In general, as the sample size \\(n\\) increases, do we expect the test prediction accuracy of QDA relative to LDA to improve, decline, or be unchanged? Why? As \\(n\\) increases, we would expect the prediction accuracy of QDA relative to LDA to improve as there is more data to fit to subtle effects in the data. True or False: Even if the Bayes decision boundary for a given problem is linear, we will probably achieve a superior test error rate using QDA rather than LDA because QDA is flexible enough to model a linear decision boundary. Justify your answer. False. QDA can overfit leading to poorer test performance. 4.1.6 Question 6 Suppose we collect data for a group of students in a statistics class with variables \\(X_1 =\\) hours studied, \\(X_2 =\\) undergrad GPA, and \\(Y =\\) receive an A. We fit a logistic regression and produce estimated coefficient, \\(\\hat\\beta_0 = -6\\), \\(\\hat\\beta_1 = 0.05\\), \\(\\hat\\beta_2 = 1\\). Estimate the probability that a student who studies for 40h and has an undergrad GPA of 3.5 gets an A in the class. The logistic model is: \\[ \\log\\left(\\frac{p(X)}{1-p(x)}\\right) = -6 + 0.05X_1 + X_2 \\] or \\[ p(X) = \\frac{e^{-6 + 0.05X_1 + X_2}}{1 + e^{-6 + 0.05X_1 + X_2}} \\] when \\(X_1 = 40\\) and \\(X_2 = 3.5\\), \\(p(X) = 0.38\\) How many hours would the student in part (a) need to study to have a 50% chance of getting an A in the class? We would like to solve for \\(X_1\\) where \\(p(X) = 0.5\\). Taking the first equation above, we need to solve \\(0 = −6 + 0.05X_1 + 3.5\\), so \\(X_1 = 50\\) hours. 4.1.7 Question 7 Suppose that we wish to predict whether a given stock will issue a dividend this year (“Yes” or “No”) based on \\(X\\), last year’s percent profit. We examine a large number of companies and discover that the mean value of \\(X\\) for companies that issued a dividend was \\(\\bar{X} = 10\\), while the mean for those that didn’t was \\(\\bar{X} = 0\\). In addition, the variance of \\(X\\) for these two sets of companies was \\(\\hat{\\sigma}^2 = 36\\). Finally, 80% of companies issued dividends. Assuming that \\(X\\) follows a normal distribution, predict the probability that a company will issue a dividend this year given that its percentage profit was \\(X = 4\\) last year. Hint: Recall that the density function for a normal random variable is \\(f(x) =\\frac{1}{\\sqrt{2\\pi\\sigma^2}}e^{-(x-\\mu)^2/2\\sigma^2}\\). You will need to use Bayes’ theorem. Value \\(v\\) for companies (D) issuing a dividend = \\(v_D \\sim \\mathcal{N}(10, 36)\\). Value \\(v\\) for companies (N) not issuing a dividend = \\(v_N \\sim \\mathcal{N}(0, 36)\\) and \\(p(D) = 0.8\\). We want to find \\(p(D|v)\\) and we can calculate \\(p(v|D)\\) from the Gaussian density function. Note that since \\(e^2\\) is equal between both classes, the term \\(\\frac{1}{\\sqrt{2\\pi\\sigma^2}}\\) cancels. \\[\\begin{align} p(D|v) &= \\frac{p(v|D) p(D)}{p(v|D)p(D) + p(v|N)p(N)} \\\\ &= \\frac{\\pi_D \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-(x-\\mu_D)^2/2\\sigma^2}} {\\pi_D \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-(x-\\mu_D)^2/2\\sigma^2} + \\pi_N \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-(x-\\mu_N)^2/2\\sigma^2}} \\\\ &= \\frac{\\pi_D e^{-(x-\\mu_D)^2/2\\sigma^2}} {\\pi_D e^{-(x-\\mu_D)^2/2\\sigma^2} + \\pi_N e^{-(x-\\mu_N)^2/2\\sigma^2}} \\\\ &= \\frac{0.8 \\times e^{-(4-10)^2/(2 \\times 36)}} {0.8 \\times e^{-(4-10)^2/(2 \\times 36)} + 0.2 \\times e^{-(4-0)^2/(2 \\times 36)}} \\\\ &= \\frac{0.8 e^{-1/2}}{0.8 e^{-1/2} + 0.2 e^{-2/9}} \\end{align}\\] exp(-0.5) * 0.8 / (exp(-0.5) * 0.8 + exp(-2/9) * 0.2) ## [1] 0.7518525 4.1.8 Question 8 Suppose that we take a data set, divide it into equally-sized training and test sets, and then try out two different classification procedures. First we use logistic regression and get an error rate of 20% on the training data and 30% on the test data. Next we use 1-nearest neighbors (i.e. \\(K = 1\\)) and get an average error rate (averaged over both test and training data sets) of 18%. Based on these results, which method should we prefer to use for classification of new observations? Why? For \\(K = 1\\), performance on the training set is perfect and the error rate is zero, implying a test error rate of 36%. Logistic regression outperforms 1-nearest neighbor on the test set and therefore should be preferred. 4.1.9 Question 9 This problem has to do with odds. On average, what fraction of people with an odds of 0.37 of defaulting on their credit card payment will in fact default? Odds is defined as \\(p/(1-p)\\). \\[0.37 = \\frac{p(x)}{1 - p(x)}\\] therefore, \\[p(x) = \\frac{0.37}{1 + 0.37} = 0.27\\] Suppose that an individual has a 16% chance of defaulting on her credit card payment. What are the odds that she will default? \\[0.16 / (1 - 0.16) = 0.19\\] 4.1.10 Question 10 Equation 4.32 derived an expression for \\(\\log(\\frac{Pr(Y=k|X=x)}{Pr(Y=K|X=x)})\\) in the setting where \\(p > 1\\), so that the mean for the \\(k\\)th class, \\(\\mu_k\\), is a \\(p\\)-dimensional vector, and the shared covariance \\(\\Sigma\\) is a \\(p \\times p\\) matrix. However, in the setting with \\(p = 1\\), (4.32) takes a simpler form, since the means \\(\\mu_1, ..., \\mu_k\\) and the variance \\(\\sigma^2\\) are scalars. In this simpler setting, repeat the calculation in (4.32), and provide expressions for \\(a_k\\) and \\(b_{kj}\\) in terms of \\(\\pi_k, \\pi_K, \\mu_k, \\mu_K,\\) and \\(\\sigma^2\\). \\[\\begin{align*} \\log\\left(\\frac{Pr(Y=k|X=x)}{Pr(Y=K|X=x)}\\right) & = \\log\\left(\\frac{\\pi_k f_k(x)}{\\pi_K f_K(x)}\\right) \\\\ & = \\log\\left(\\frac{\\pi_k \\exp(-1/2((x-\\mu_k)/\\sigma)^2)}{\\pi_K \\exp(-1/2((x-\\mu_K)/\\sigma)^2)}\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2} \\left(\\frac{x-\\mu_k}{\\sigma}\\right)^2 + \\frac{1}{2} \\left(\\frac{x-\\mu_K}{\\sigma}\\right)^2 \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} (x-\\mu_k)^2 + \\frac{1}{2\\sigma^2} (x-\\mu_K)^2 \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} \\left((x-\\mu_k)^2 - (x-\\mu_K)^2\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} \\left(x^2-2x\\mu_k+\\mu_k^2 - x^2 + 2x\\mu_K - \\mu_K^2\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{1}{2\\sigma^2} \\left(2x(\\mu_K - \\mu_k) + \\mu_k^2 -\\mu_K^2\\right) \\\\ & = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{\\mu_k^2 -\\mu_K^2}{2\\sigma^2} + \\frac{x(\\mu_k - \\mu_K)}{\\sigma^2} \\end{align*}\\] Therefore, \\[a_k = \\log\\left(\\frac{\\pi_k}{\\pi_K}\\right) - \\frac{\\mu_k^2 -\\mu_K^2}{2\\sigma^2}\\] and \\[b_k = (\\mu_k - \\mu_K) / \\sigma^2\\] 4.1.11 Question 11 ToDo Work out the detailed forms of \\(a_k\\), \\(b_{kj}\\), and \\(b_{kjl}\\) in (4.33). Your answer should involve \\(\\pi_k\\), \\(\\pi_K\\), \\(\\mu_k\\), \\(\\mu_K\\), \\(\\Sigma_k\\), and \\(\\Sigma_K\\). 4.1.12 Question 12 Suppose that you wish to classify an observation \\(X \\in \\mathbb{R}\\) into apples and oranges. You fit a logistic regression model and find that \\[ \\hat{Pr}(Y=orange|X=x) = \\frac{\\exp(\\hat\\beta_0 + \\hat\\beta_1x)}{1 + \\exp(\\hat\\beta_0 + \\hat\\beta_1x)} \\] Your friend fits a logistic regression model to the same data using the softmax formulation in (4.13), and finds that \\[ \\hat{Pr}(Y=orange|X=x) = \\frac{\\exp(\\hat\\alpha_{orange0} + \\hat\\alpha_{orange1}x)} {\\exp(\\hat\\alpha_{orange0} + \\hat\\alpha_{orange1}x) + \\exp(\\hat\\alpha_{apple0} + \\hat\\alpha_{apple1}x)} \\] What is the log odds of orange versus apple in your model? The log odds is just \\(\\hat\\beta_0 + \\hat\\beta_1x\\) What is the log odds of orange versus apple in your friend’s model? From 4.14, log odds of our friend’s model is: \\[ (\\hat\\alpha_{orange0} - \\hat\\alpha_{apple0}) + (\\hat\\alpha_{orange1} - \\hat\\alpha_{apple1})x \\] Suppose that in your model, \\(\\hat\\beta_0 = 2\\) and \\(\\hat\\beta = −1\\). What are the coefficient estimates in your friend’s model? Be as specific as possible. We can say that in our friend’s model \\(\\hat\\alpha_{orange0} - \\hat\\alpha_{apple0} = 2\\) and \\(\\hat\\alpha_{orange1} - \\hat\\alpha_{apple1} = -1\\). We are unable to know the specific value of each parameter however. Now suppose that you and your friend fit the same two models on a different data set. This time, your friend gets the coefficient estimates \\(\\hat\\alpha_{orange0} = 1.2\\), \\(\\hat\\alpha_{orange1} = −2\\), \\(\\hat\\alpha_{apple0} = 3\\), \\(\\hat\\alpha_{apple1} = 0.6\\). What are the coefficient estimates in your model? The coefficients in our model would be \\(\\hat\\beta_0 = 1.2 - 3 = -1.8\\) and \\(\\hat\\beta_1 = -2 - 0.6 = -2.6\\) Finally, suppose you apply both models from (d) to a data set with 2,000 test observations. What fraction of the time do you expect the predicted class labels from your model to agree with those from your friend’s model? Explain your answer. The models are identical with different parameterization so they should perfectly agree. 4.2 Applied 4.2.1 Question 13 This question should be answered using the Weekly data set, which is part of the ISLR2 package. This data is similar in nature to the Smarket data from this chapter’s lab, except that it contains 1,089 weekly returns for 21 years, from the beginning of 1990 to the end of 2010. Produce some numerical and graphical summaries of the Weekly data. Do there appear to be any patterns? library(MASS) library(class) library(tidyverse) library(corrplot) library(ISLR2) library(e1071) summary(Weekly) ## Year Lag1 Lag2 Lag3 ## Min. :1990 Min. :-18.1950 Min. :-18.1950 Min. :-18.1950 ## 1st Qu.:1995 1st Qu.: -1.1540 1st Qu.: -1.1540 1st Qu.: -1.1580 ## Median :2000 Median : 0.2410 Median : 0.2410 Median : 0.2410 ## Mean :2000 Mean : 0.1506 Mean : 0.1511 Mean : 0.1472 ## 3rd Qu.:2005 3rd Qu.: 1.4050 3rd Qu.: 1.4090 3rd Qu.: 1.4090 ## Max. :2010 Max. : 12.0260 Max. : 12.0260 Max. : 12.0260 ## Lag4 Lag5 Volume Today ## Min. :-18.1950 Min. :-18.1950 Min. :0.08747 Min. :-18.1950 ## 1st Qu.: -1.1580 1st Qu.: -1.1660 1st Qu.:0.33202 1st Qu.: -1.1540 ## Median : 0.2380 Median : 0.2340 Median :1.00268 Median : 0.2410 ## Mean : 0.1458 Mean : 0.1399 Mean :1.57462 Mean : 0.1499 ## 3rd Qu.: 1.4090 3rd Qu.: 1.4050 3rd Qu.:2.05373 3rd Qu.: 1.4050 ## Max. : 12.0260 Max. : 12.0260 Max. :9.32821 Max. : 12.0260 ## Direction ## Down:484 ## Up :605 ## ## ## ## corrplot(cor(Weekly[, -9]), type = "lower", diag = FALSE, method = "ellipse") Volume is strongly positively correlated with Year. Other correlations are week, but Lag1 is negatively correlated with Lag2 but positively correlated with Lag3. Use the full data set to perform a logistic regression with Direction as the response and the five lag variables plus Volume as predictors. Use the summary function to print the results. Do any of the predictors appear to be statistically significant? If so, which ones? fit <- glm( Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + Volume, data = Weekly, family = binomial ) summary(fit) ## ## Call: ## glm(formula = Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + ## Volume, family = binomial, data = Weekly) ## ## Coefficients: ## Estimate Std. Error z value Pr(>|z|) ## (Intercept) 0.26686 0.08593 3.106 0.0019 ** ## Lag1 -0.04127 0.02641 -1.563 0.1181 ## Lag2 0.05844 0.02686 2.175 0.0296 * ## Lag3 -0.01606 0.02666 -0.602 0.5469 ## Lag4 -0.02779 0.02646 -1.050 0.2937 ## Lag5 -0.01447 0.02638 -0.549 0.5833 ## Volume -0.02274 0.03690 -0.616 0.5377 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for binomial family taken to be 1) ## ## Null deviance: 1496.2 on 1088 degrees of freedom ## Residual deviance: 1486.4 on 1082 degrees of freedom ## AIC: 1500.4 ## ## Number of Fisher Scoring iterations: 4 Lag2 is significant. Compute the confusion matrix and overall fraction of correct predictions. Explain what the confusion matrix is telling you about the types of mistakes made by logistic regression. contrasts(Weekly$Direction) ## Up ## Down 0 ## Up 1 pred <- predict(fit, type = "response") > 0.5 (t <- table(ifelse(pred, "Up (pred)", "Down (pred)"), Weekly$Direction)) ## ## Down Up ## Down (pred) 54 48 ## Up (pred) 430 557 sum(diag(t)) / sum(t) ## [1] 0.5610652 The overall fraction of correct predictions is 0.56. Although logistic regression correctly predicts upwards movements well, it incorrectly predicts most downwards movements as up. Now fit the logistic regression model using a training data period from 1990 to 2008, with Lag2 as the only predictor. Compute the confusion matrix and the overall fraction of correct predictions for the held out data (that is, the data from 2009 and 2010). train <- Weekly$Year < 2009 fit <- glm(Direction ~ Lag2, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 (t <- table(ifelse(pred, "Up (pred)", "Down (pred)"), Weekly[!train, ]$Direction)) ## ## Down Up ## Down (pred) 9 5 ## Up (pred) 34 56 sum(diag(t)) / sum(t) ## [1] 0.625 Repeat (d) using LDA. fit <- lda(Direction ~ Lag2, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class (t <- table(pred, Weekly[!train, ]$Direction)) ## ## pred Down Up ## Down 9 5 ## Up 34 56 sum(diag(t)) / sum(t) ## [1] 0.625 Repeat (d) using QDA. fit <- qda(Direction ~ Lag2, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class (t <- table(pred, Weekly[!train, ]$Direction)) ## ## pred Down Up ## Down 0 0 ## Up 43 61 sum(diag(t)) / sum(t) ## [1] 0.5865385 Repeat (d) using KNN with \\(K = 1\\). fit <- knn( Weekly[train, "Lag2", drop = FALSE], Weekly[!train, "Lag2", drop = FALSE], Weekly$Direction[train] ) (t <- table(fit, Weekly[!train, ]$Direction)) ## ## fit Down Up ## Down 21 30 ## Up 22 31 sum(diag(t)) / sum(t) ## [1] 0.5 Repeat (d) using naive Bayes. fit <- naiveBayes(Direction ~ Lag2, data = Smarket, subset = train) pred <- predict(fit, Weekly[!train, ], type = "class") (t <- table(pred, Weekly[!train, ]$Direction)) ## ## pred Down Up ## Down 27 29 ## Up 16 32 sum(diag(t)) / sum(t) ## [1] 0.5673077 Which of these methods appears to provide the best results on this data? Logistic regression and LDA are the best performing. Experiment with different combinations of predictors, including possible transformations and interactions, for each of the methods. Report the variables, method, and associated confusion matrix that appears to provide the best results on the held out data. Note that you should also experiment with values for \\(K\\) in the KNN classifier. fit <- glm(Direction ~ Lag1, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5673077 fit <- glm(Direction ~ Lag3, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5865385 fit <- glm(Direction ~Lag4, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5865385 fit <- glm(Direction ~ Lag1 + Lag2 + Lag3 + Lag4, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5865385 fit <- glm(Direction ~ Lag1 * Lag2 * Lag3 * Lag4, data = Weekly[train, ], family = binomial) pred <- predict(fit, Weekly[!train, ], type = "response") > 0.5 mean(ifelse(pred, "Up", "Down") == Weekly[!train, ]$Direction) ## [1] 0.5961538 fit <- lda(Direction ~ Lag1 + Lag2 + Lag3 + Lag4,data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class mean(pred == Weekly[!train, ]$Direction) ## [1] 0.5769231 fit <- qda(Direction ~ Lag1 + Lag2 + Lag3 + Lag4, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "response")$class mean(pred == Weekly[!train, ]$Direction) ## [1] 0.5192308 fit <- naiveBayes(Direction ~ Lag1 + Lag2 + Lag3 + Lag4, data = Weekly[train, ]) pred <- predict(fit, Weekly[!train, ], type = "class") mean(pred == Weekly[!train, ]$Direction) ## [1] 0.5096154 set.seed(1) res <- sapply(1:30, function(k) { fit <- knn( Weekly[train, 2:4, drop = FALSE], Weekly[!train, 2:4, drop = FALSE], Weekly$Direction[train], k = k ) mean(fit == Weekly[!train, ]$Direction) }) plot(1:30, res, type = "o", xlab = "k", ylab = "Fraction correct") (k <- which.max(res)) ## [1] 26 fit <- knn( Weekly[train, 2:4, drop = FALSE], Weekly[!train, 2:4, drop = FALSE], Weekly$Direction[train], k = k ) table(fit, Weekly[!train, ]$Direction) ## ## fit Down Up ## Down 23 18 ## Up 20 43 mean(fit == Weekly[!train, ]$Direction) ## [1] 0.6346154 KNN using the first 3 Lag variables performs marginally better than logistic regression with Lag2 if we tune \\(k\\) to be \\(k = 26\\). 4.2.2 Question 14 In this problem, you will develop a model to predict whether a given car gets high or low gas mileage based on the Auto data set. Create a binary variable, mpg01, that contains a 1 if mpg contains a value above its median, and a 0 if mpg contains a value below its median. You can compute the median using the median() function. Note you may find it helpful to use the data.frame() function to create a single data set containing both mpg01 and the other Auto variables. x <- cbind(Auto[, -1], data.frame("mpg01" = Auto$mpg > median(Auto$mpg))) Explore the data graphically in order to investigate the association between mpg01 and the other features. Which of the other features seem most likely to be useful in predicting mpg01? Scatterplots and boxplots may be useful tools to answer this question. Describe your findings. par(mfrow = c(2, 4)) for (i in 1:7) hist(x[, i], breaks = 20, main = colnames(x)[i]) par(mfrow = c(2, 4)) for (i in 1:7) boxplot(x[, i] ~ x$mpg01, main = colnames(x)[i]) pairs(x[, 1:7]) Most variables show an association with mpg01 category, and several variables are colinear. Split the data into a training set and a test set. set.seed(1) train <- sample(seq_len(nrow(x)), nrow(x) * 2/3) Perform LDA on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? sort(sapply(1:7, function(i) { setNames(abs(t.test(x[, i] ~ x$mpg01)$statistic), colnames(x)[i]) })) ## acceleration year origin horsepower displacement weight ## 7.302430 9.403221 11.824099 17.681939 22.632004 22.932777 ## cylinders ## 23.035328 fit <- lda(mpg01 ~ cylinders + weight + displacement, data = x[train, ]) pred <- predict(fit, x[-train, ], type = "response")$class mean(pred != x[-train, ]$mpg01) ## [1] 0.1068702 Perform QDA on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? fit <- qda(mpg01 ~ cylinders + weight + displacement, data = x[train, ]) pred <- predict(fit, x[-train, ], type = "response")$class mean(pred != x[-train, ]$mpg01) ## [1] 0.09923664 Perform logistic regression on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? fit <- glm(mpg01 ~ cylinders + weight + displacement, data = x[train, ], family = binomial) pred <- predict(fit, x[-train, ], type = "response") > 0.5 mean(pred != x[-train, ]$mpg01) ## [1] 0.1145038 Perform naive Bayes on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained? fit <- naiveBayes(mpg01 ~ cylinders + weight + displacement, data = x[train, ]) pred <- predict(fit, x[-train, ], type = "class") mean(pred != x[-train, ]$mpg01) ## [1] 0.09923664 Perform KNN on the training data, with several values of \\(K\\), in order to predict mpg01. Use only the variables that seemed most associated with mpg01 in (b). What test errors do you obtain? Which value of \\(K\\) seems to perform the best on this data set? res <- sapply(1:50, function(k) { fit <- knn(x[train, c(1, 4, 2)], x[-train, c(1, 4, 2)], x$mpg01[train], k = k) mean(fit != x[-train, ]$mpg01) }) names(res) <- 1:50 plot(res, type = "o") res[which.min(res)] ## 3 ## 0.1068702 For the models tested here, \\(k = 32\\) appears to perform best. QDA has a lower error rate overall, performing slightly better than LDA. 4.2.3 Question 15 This problem involves writing functions. Write a function, Power(), that prints out the result of raising 2 to the 3rd power. In other words, your function should compute \\(2^3\\) and print out the results. Hint: Recall that x^a raises x to the power a. Use the print() function to output the result. Power <- function() print(2^3) Create a new function, Power2(), that allows you to pass any two numbers, x and a, and prints out the value of x^a. You can do this by beginning your function with the line > Power2=function(x,a) { You should be able to call your function by entering, for instance, > Power2(3, 8) on the command line. This should output the value of \\(3^8\\), namely, 6,561. Power2 <- function(x, a) print(x^a) Using the Power2() function that you just wrote, compute \\(10^3\\), \\(8^{17}\\), and \\(131^3\\). c(Power2(10, 3), Power2(8, 17), Power2(131, 3)) ## [1] 1000 ## [1] 2.2518e+15 ## [1] 2248091 ## [1] 1.000000e+03 2.251800e+15 2.248091e+06 Now create a new function, Power3(), that actually returns the result x^a as an R object, rather than simply printing it to the screen. That is, if you store the value x^a in an object called result within your function, then you can simply return() this result, using the following line: > return(result) The line above should be the last line in your function, before the } symbol. Power3 <- function(x, a) { result <- x^a return(result) } Now using the Power3() function, create a plot of \\(f(x) = x^2\\). The \\(x\\)-axis should display a range of integers from 1 to 10, and the \\(y\\)-axis should display \\(x^2\\). Label the axes appropriately, and use an appropriate title for the figure. Consider displaying either the \\(x\\)-axis, the \\(y\\)-axis, or both on the log-scale. You can do this by using log = \"x\", log = \"y\", or log = \"xy\" as arguments to the plot() function. plot(1:10, Power3(1:10, 2), xlab = "x", ylab = expression(paste("x"^"2")), log = "y" ) Create a function, PlotPower(), that allows you to create a plot of x against x^a for a fixed a and for a range of values of x. For instance, if you call > PlotPower(1:10, 3) then a plot should be created with an \\(x\\)-axis taking on values \\(1,2,...,10\\), and a \\(y\\)-axis taking on values \\(1^3,2^3,...,10^3\\). PlotPower <- function(x, a, log = "y") { plot(x, Power3(x, a), xlab = "x", ylab = substitute("x"^a, list(a = a)), log = log ) } PlotPower(1:10, 3) 4.2.4 Question 13 Using the Boston data set, fit classification models in order to predict whether a given census tract has a crime rate above or below the median. Explore logistic regression, LDA, naive Bayes and KNN models using various sub-sets of the predictors. Describe your findings. Hint: You will have to create the response variable yourself, using the variables that are contained in the Boston data set. x <- cbind( ISLR2::Boston[, -1], data.frame("highcrim" = Boston$crim > median(Boston$crim)) ) set.seed(1) train <- sample(seq_len(nrow(x)), nrow(x) * 2/3) We can find the most associated variables by performing wilcox tests. ord <- order(sapply(1:12, function(i) { p <- wilcox.test(as.numeric(x[train, i]) ~ x[train, ]$highcrim)$p.value setNames(log10(p), colnames(x)[i]) })) ord <- names(x)[ord] ord ## [1] "nox" "dis" "indus" "tax" "age" "rad" "zn" ## [8] "lstat" "medv" "ptratio" "rm" "chas" Variables nox (nitrogen oxides concentration) followed by dis (distance to employment center) appear to be most associated with high crime. Let’s reorder columns by those most associated with highcrim (in the training data) x <- x[, c(ord, "highcrim")] Let’s look at univariate associations with highcrim (in the training data) x[train, ] |> pivot_longer(!highcrim) |> mutate(name = factor(name, levels = ord)) |> ggplot(aes(highcrim, value)) + geom_boxplot() + facet_wrap(~name, scale = "free") Fit lda, logistic regression, naive Bayes and KNN models (with k = 1..50) for a set of specific predictors and return the error rate. We fit models using increasing numbers of predictors: column 1, then columns 1 and 2 etc. fit_models <- function(cols, k_vals = 1:50) { dat_train <- x[train, cols, drop = FALSE] dat_test <- x[-train, cols, drop = FALSE] fit <- lda(x$highcrim[train] ~ ., data = dat_train) pred <- predict(fit, dat_test, type = "response")$class lda_err <- mean(pred != x$highcrim[-train]) fit <- glm(x$highcrim[train] ~ ., data = dat_train, family = binomial) pred <- predict(fit, dat_test, type = "response") > 0.5 logreg_err <- mean(pred != x$highcrim[-train]) fit <- naiveBayes(x$highcrim[train] ~ ., data = dat_train) pred <- predict(fit, dat_test, type = "class") nb_err <- mean(pred != x$highcrim[-train]) res <- sapply(k_vals, function(k) { fit <- knn(dat_train, dat_test, x$highcrim[train], k = k) mean(fit != x$highcrim[-train]) }) knn_err <- min(res) c("LDA" = lda_err, "LR" = logreg_err, "NB" = nb_err, "KNN" = knn_err) } res <- sapply(1:12, function(max) fit_models(1:max)) res <- as_tibble(t(res)) res$n_var <- 1:12 pivot_longer(res, cols = !n_var) |> ggplot(aes(n_var, value, col = name)) + geom_line() + xlab("Number of predictors") + ylab("Error rate") KNN appears to perform better (if we tune \\(k\\)) for all numbers of predictors. fit <- knn( x[train, "nox", drop = FALSE], x[-train, "nox", drop = FALSE], x$highcrim[train], k = 1 ) table(fit, x[-train, ]$highcrim) ## ## fit FALSE TRUE ## FALSE 78 2 ## TRUE 3 86 mean(fit != x[-train, ]$highcrim) * 100 ## [1] 2.95858 Surprisingly, the best model (with an error rate of <5%) uses \\(k = 1\\) and assigns crime rate categories based on the town with the single most similar nitrogen oxide concentration (nox). This might be, for example, because nearby towns have similar crime rates, and we can obtain good predictions by predicting crime rate based on a nearby town. But what if we only consider \\(k = 20\\). res <- sapply(1:12, function(max) fit_models(1:max, k_vals = 20)) res <- as_tibble(t(res)) res$n_var <- 1:12 pivot_longer(res, cols = !n_var) |> ggplot(aes(n_var, value, col = name)) + geom_line() + xlab("Number of predictors") + ylab("Error rate") KNN still performs best with a single predictor (nox), but logistic regression with 12 predictors also performs well and has an error rate of ~12%. vars <- names(x)[1:12] dat_train <- x[train, vars] dat_test <- x[-train, vars] fit <- glm(x$highcrim[train] ~ ., data = dat_train, family = binomial) pred <- predict(fit, dat_test, type = "response") > 0.5 table(pred, x[-train, ]$highcrim) ## ## pred FALSE TRUE ## FALSE 70 9 ## TRUE 11 79 mean(pred != x$highcrim[-train]) * 100 ## [1] 11.83432 summary(fit) ## ## Call: ## glm(formula = x$highcrim[train] ~ ., family = binomial, data = dat_train) ## ## Coefficients: ## Estimate Std. Error z value Pr(>|z|) ## (Intercept) -44.525356 7.935621 -5.611 2.01e-08 *** ## nox 55.062428 10.281556 5.355 8.53e-08 *** ## dis 1.080847 0.304084 3.554 0.000379 *** ## indus -0.067493 0.058547 -1.153 0.248997 ## tax -0.005336 0.003138 -1.700 0.089060 . ## age 0.020965 0.014190 1.477 0.139556 ## rad 0.678196 0.192193 3.529 0.000418 *** ## zn -0.099558 0.045914 -2.168 0.030134 * ## lstat 0.134035 0.058623 2.286 0.022231 * ## medv 0.213114 0.088922 2.397 0.016547 * ## ptratio 0.294396 0.155285 1.896 0.057981 . ## rm -0.518115 0.896423 -0.578 0.563278 ## chas 0.139557 0.798632 0.175 0.861280 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for binomial family taken to be 1) ## ## Null deviance: 467.04 on 336 degrees of freedom ## Residual deviance: 135.80 on 324 degrees of freedom ## AIC: 161.8 ## ## Number of Fisher Scoring iterations: 9 "],["resampling-methods.html", "5 Resampling Methods 5.1 Conceptual 5.2 Applied", " 5 Resampling Methods 5.1 Conceptual 5.1.1 Question 1 Using basic statistical properties of the variance, as well as single- variable calculus, derive (5.6). In other words, prove that \\(\\alpha\\) given by (5.6) does indeed minimize \\(Var(\\alpha X + (1 - \\alpha)Y)\\). Equation 5.6 is: \\[ \\alpha = \\frac{\\sigma^2_Y - \\sigma_{XY}}{\\sigma^2_X + \\sigma^2_Y - 2\\sigma_{XY}} \\] Remember that: \\[ Var(aX) = a^2Var(X), \\\\ \\mathrm{Var}(X + Y) = \\mathrm{Var}(X) + \\mathrm{Var}(Y) + 2\\mathrm{Cov}(X,Y), \\\\ \\mathrm{Cov}(aX, bY) = ab\\mathrm{Cov}(X, Y) \\] If we define \\(\\sigma^2_X = \\mathrm{Var}(X)\\), \\(\\sigma^2_Y = \\mathrm{Var}(Y)\\) and \\(\\sigma_{XY} = \\mathrm{Cov}(X, Y)\\) \\[\\begin{align} Var(\\alpha X + (1 - \\alpha)Y) &= \\alpha^2\\sigma^2_X + (1-\\alpha)^2\\sigma^2_Y + 2\\alpha(1 - \\alpha)\\sigma_{XY} \\\\ &= \\alpha^2\\sigma^2_X + \\sigma^2_Y - 2\\alpha\\sigma^2_Y + \\alpha^2\\sigma^2_Y + 2\\alpha\\sigma_{XY} - 2\\alpha^2\\sigma_{XY} \\end{align}\\] Now we want to find when the rate of change of this function is 0 with respect to \\(\\alpha\\), so we compute the partial derivative, set to 0 and solve. \\[ \\frac{\\partial}{\\partial{\\alpha}} = 2\\alpha\\sigma^2_X - 2\\sigma^2_Y + 2\\alpha\\sigma^2_Y + 2\\sigma_{XY} - 4\\alpha\\sigma_{XY} = 0 \\] Moving \\(\\alpha\\) terms to the same side: \\[ \\alpha\\sigma^2_X + \\alpha\\sigma^2_Y - 2\\alpha\\sigma_{XY} = \\sigma^2_Y - \\sigma_{XY} \\] \\[ \\alpha = \\frac{\\sigma^2_Y - \\sigma_{XY}}{\\sigma^2_X + \\sigma^2_Y - 2\\sigma_{XY}} \\] We should also show that this is a minimum, so that the second partial derivative wrt \\(\\alpha\\) is \\(>= 0\\). \\[\\begin{align} \\frac{\\partial^2}{\\partial{\\alpha^2}} &= 2\\sigma^2_X + 2\\sigma^2_Y - 4\\sigma_{XY} \\\\ &= 2(\\sigma^2_X + \\sigma^2_Y - 2\\sigma_{XY}) \\\\ &= 2\\mathrm{Var}(X - Y) \\end{align}\\] Since variance is positive, then this must be positive. 5.1.2 Question 2 We will now derive the probability that a given observation is part of a bootstrap sample. Suppose that we obtain a bootstrap sample from a set of n observations. What is the probability that the first bootstrap observation is not the \\(j\\)th observation from the original sample? Justify your answer. This is 1 - probability that it is the \\(j\\)th = \\(1 - 1/n\\). What is the probability that the second bootstrap observation is not the \\(j\\)th observation from the original sample? Since each bootstrap observation is a random sample, this probability is the same (\\(1 - 1/n\\)). Argue that the probability that the \\(j\\)th observation is not in the bootstrap sample is \\((1 - 1/n)^n\\). For the \\(j\\)th observation to not be in the sample, it would have to not be picked for each of \\(n\\) positions, so not picked for \\(1, 2, ..., n\\), thus the probability is \\((1 - 1/n)^n\\) When \\(n = 5\\), what is the probability that the \\(j\\)th observation is in the bootstrap sample? n <- 5 1 - (1 - 1 / n)^n ## [1] 0.67232 \\(p = 0.67\\) When \\(n = 100\\), what is the probability that the \\(j\\)th observation is in the bootstrap sample? n <- 100 1 - (1 - 1 / n)^n ## [1] 0.6339677 \\(p = 0.64\\) When \\(n = 10,000\\), what is the probability that the \\(j\\)th observation is in the bootstrap sample? n <- 100000 1 - (1 - 1 / n)^n ## [1] 0.6321224 \\(p = 0.63\\) Create a plot that displays, for each integer value of \\(n\\) from 1 to 100,000, the probability that the \\(j\\)th observation is in the bootstrap sample. Comment on what you observe. x <- sapply(1:100000, function(n) 1 - (1 - 1 / n)^n) plot(x, log = "x", type = "o") The probability rapidly approaches 0.63 with increasing \\(n\\). Note that \\[e^x = \\lim_{x \\to \\inf} \\left(1 + \\frac{x}{n}\\right)^n,\\] so with \\(x = -1\\), we can see that our limit is \\(1 - e^{-1} = 1 - 1/e\\). We will now investigate numerically the probability that a bootstrap sample of size \\(n = 100\\) contains the \\(j\\)th observation. Here \\(j = 4\\). We repeatedly create bootstrap samples, and each time we record whether or not the fourth observation is contained in the bootstrap sample. > store <- rep (NA, 10000) > for (i in 1:10000) { store[i] <- sum(sample(1:100, rep = TRUE) == 4) > 0 } > mean(store) Comment on the results obtained. store <- replicate(10000, sum(sample(1:100, replace = TRUE) == 4) > 0) mean(store) ## [1] 0.6308 The probability of including \\(4\\) when resampling numbers \\(1...100\\) is close to \\(1 - (1 - 1/100)^{100}\\). 5.1.3 Question 3 We now review \\(k\\)-fold cross-validation. Explain how \\(k\\)-fold cross-validation is implemented. We divided our data into (approximately equal) \\(k\\) subsets, and then generate predictions for each \\(k\\)th set, training on the exclusive \\(k\\) sets combined. What are the advantages and disadvantages of \\(k\\)-fold cross-validation relative to: The validation set approach? LOOCV? When using a validation set, we can only train on a small portion of the data as we must reserve the rest for validation. As a result it can overestimate the test error rate (assuming we then train using the complete data for future prediction). It is also sensitive to which observations are including in train vs. test. It is, however, low cost in terms of processing time (as we only have to fit one model). When using LOOCV, we can train on \\(n-1\\) observations, however, the trained models we generate each differ only by the inclusion (and exclusion) of a single observation. As a result, LOOCV can have high variance (the models fit will be similar, and might be quite different to what we would obtain with a different data set). LOOCV is also costly in terms of processing time. 5.1.4 Question 4 Suppose that we use some statistical learning method to make a prediction for the response \\(Y\\) for a particular value of the predictor \\(X\\). Carefully describe how we might estimate the standard deviation of our prediction. We could address this with bootstrapping. Our procedure would be to (jointly) resample \\(Y\\) and \\(X\\) variables and fit our model many times. For each model we could obtain a summary of our prediction and calculate the standard deviation over bootstrapped samples. 5.2 Applied 5.2.1 Question 5 In Chapter 4, we used logistic regression to predict the probability of default using income and balance on the Default data set. We will now estimate the test error of this logistic regression model using the validation set approach. Do not forget to set a random seed before beginning your analysis. Fit a logistic regression model that uses income and balance to predict default. library(ISLR2) set.seed(42) fit <- glm(default ~ income + balance, data = Default, family = "binomial") Using the validation set approach, estimate the test error of this model. In order to do this, you must perform the following steps: Split the sample set into a training set and a validation set. Fit a multiple logistic regression model using only the training observations. Obtain a prediction of default status for each individual in the validation set by computing the posterior probability of default for that individual, and classifying the individual to the default category if the posterior probability is greater than 0.5. Compute the validation set error, which is the fraction of the observations in the validation set that are misclassified. train <- sample(nrow(Default), nrow(Default) / 2) fit <- glm(default ~ income + balance, data = Default, family = "binomial", subset = train) pred <- ifelse(predict(fit, newdata = Default[-train, ], type = "response") > 0.5, "Yes", "No") table(pred, Default$default[-train]) ## ## pred No Yes ## No 4817 110 ## Yes 20 53 mean(pred != Default$default[-train]) ## [1] 0.026 Repeat the process in (b) three times, using three different splits of the observations into a training set and a validation set. Comment on the results obtained. replicate(3, { train <- sample(nrow(Default), nrow(Default) / 2) fit <- glm(default ~ income + balance, data = Default, family = "binomial", subset = train) pred <- ifelse(predict(fit, newdata = Default[-train, ], type = "response") > 0.5, "Yes", "No") mean(pred != Default$default[-train]) }) ## [1] 0.0260 0.0294 0.0258 The results obtained are variable and depend on the samples allocated to training vs. test. Now consider a logistic regression model that predicts the probability of default using income, balance, and a dummy variable for student. Estimate the test error for this model using the validation set approach. Comment on whether or not including a dummy variable for student leads to a reduction in the test error rate. replicate(3, { train <- sample(nrow(Default), nrow(Default) / 2) fit <- glm(default ~ income + balance + student, data = Default, family = "binomial", subset = train) pred <- ifelse(predict(fit, newdata = Default[-train, ], type = "response") > 0.5, "Yes", "No") mean(pred != Default$default[-train]) }) ## [1] 0.0278 0.0256 0.0250 Including student does not seem to make a substantial improvement to the test error. 5.2.2 Question 6 We continue to consider the use of a logistic regression model to predict the probability of default using income and balance on the Default data set. In particular, we will now compute estimates for the standard errors of the income and balance logistic regression coefficients in two different ways: (1) using the bootstrap, and (2) using the standard formula for computing the standard errors in the glm() function. Do not forget to set a random seed before beginning your analysis. Using the summary() and glm() functions, determine the estimated standard errors for the coefficients associated with income and balance in a multiple logistic regression model that uses both predictors. fit <- glm(default ~ income + balance, data = Default, family = "binomial") summary(fit) ## ## Call: ## glm(formula = default ~ income + balance, family = "binomial", ## data = Default) ## ## Coefficients: ## Estimate Std. Error z value Pr(>|z|) ## (Intercept) -1.154e+01 4.348e-01 -26.545 < 2e-16 *** ## income 2.081e-05 4.985e-06 4.174 2.99e-05 *** ## balance 5.647e-03 2.274e-04 24.836 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for binomial family taken to be 1) ## ## Null deviance: 2920.6 on 9999 degrees of freedom ## Residual deviance: 1579.0 on 9997 degrees of freedom ## AIC: 1585 ## ## Number of Fisher Scoring iterations: 8 The standard errors obtained by bootstrapping are \\(\\beta_1\\) = 5.0e-6 and \\(\\beta_2\\) = 2.3e-4. Write a function, boot.fn(), that takes as input the Default data set as well as an index of the observations, and that outputs the coefficient estimates for income and balance in the multiple logistic regression model. boot.fn <- function(x, i) { fit <- glm(default ~ income + balance, data = x[i, ], family = "binomial") coef(fit)[-1] } Use the boot() function together with your boot.fn() function to estimate the standard errors of the logistic regression coefficients for income and balance. library(boot) set.seed(42) boot(Default, boot.fn, R = 1000) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Default, statistic = boot.fn, R = 1000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 2.080898e-05 2.737444e-08 5.073444e-06 ## t2* 5.647103e-03 1.176249e-05 2.299133e-04 Comment on the estimated standard errors obtained using the glm() function and using your bootstrap function. The standard errors obtained by bootstrapping are similar to those estimated by glm. 5.2.3 Question 7 In Sections 5.3.2 and 5.3.3, we saw that the cv.glm() function can be used in order to compute the LOOCV test error estimate. Alternatively, one could compute those quantities using just the glm() and predict.glm() functions, and a for loop. You will now take this approach in order to compute the LOOCV error for a simple logistic regression model on the Weekly data set. Recall that in the context of classification problems, the LOOCV error is given in (5.4). Fit a logistic regression model that predicts Direction using Lag1 and Lag2. fit <- glm(Direction ~ Lag1 + Lag2, data = Weekly, family = "binomial") Fit a logistic regression model that predicts Direction using Lag1 and Lag2 using all but the first observation. fit <- glm(Direction ~ Lag1 + Lag2, data = Weekly[-1, ], family = "binomial") Use the model from (b) to predict the direction of the first observation. You can do this by predicting that the first observation will go up if \\(P(\\)Direction=\"Up\" | Lag1 , Lag2\\() > 0.5\\). Was this observation correctly classified? predict(fit, newdata = Weekly[1, , drop = FALSE], type = "response") > 0.5 ## 1 ## TRUE Yes the observation was correctly classified. Write a for loop from \\(i = 1\\) to \\(i = n\\), where \\(n\\) is the number of observations in the data set, that performs each of the following steps: Fit a logistic regression model using all but the \\(i\\)th observation to predict Direction using Lag1 and Lag2 . Compute the posterior probability of the market moving up for the \\(i\\)th observation. Use the posterior probability for the \\(i\\)th observation in order to predict whether or not the market moves up. Determine whether or not an error was made in predicting the direction for the \\(i\\)th observation. If an error was made, then indicate this as a 1, and otherwise indicate it as a 0. error <- numeric(nrow(Weekly)) for (i in 1:nrow(Weekly)) { fit <- glm(Direction ~ Lag1 + Lag2, data = Weekly[-i, ], family = "binomial") p <- predict(fit, newdata = Weekly[i, , drop = FALSE], type = "response") > 0.5 error[i] <- ifelse(p, "Down", "Up") == Weekly$Direction[i] } Take the average of the \\(n\\) numbers obtained in (d) in order to obtain the LOOCV estimate for the test error. Comment on the results. mean(error) ## [1] 0.4499541 The LOOCV test error rate is 45% which implies that our predictions are marginally more often correct than not. 5.2.4 Question 8 We will now perform cross-validation on a simulated data set. Generate a simulated data set as follows: > set.seed(1) > x <- rnorm(100) > y <- x - 2 *x^2 + rnorm(100) In this data set, what is \\(n\\) and what is \\(p\\)? Write out the model used to generate the data in equation form. set.seed(1) x <- rnorm(100) y <- x - 2 * x^2 + rnorm(100) \\(n\\) is 100 and \\(p\\) is 1 (there are 100 observations and \\(y\\) is predicted with a single variable \\(x\\)). The model equation is: \\[y = -2x^2 + x + \\epsilon\\]. Create a scatterplot of \\(X\\) against \\(Y\\). Comment on what you find. plot(x, y) \\(y\\) has a (negative) quadratic relationship with \\(x\\). Set a random seed, and then compute the LOOCV errors that result from fitting the following four models using least squares: \\(Y = \\beta_0 + \\beta_1 X + \\epsilon\\) \\(Y = \\beta_0 + \\beta_1 X + \\beta_2 X^2 + \\epsilon\\) \\(Y = \\beta_0 + \\beta_1 X + \\beta_2 X^2 + \\beta_3 X^3 + \\epsilon\\) \\(Y = \\beta_0 + \\beta_1 X + \\beta_2 X^2 + \\beta_3 X^3 + \\beta_4 X^4 + \\epsilon\\). Note you may find it helpful to use the data.frame() function to create a single data set containing both \\(X\\) and \\(Y\\). library(boot) set.seed(42) dat <- data.frame(x, y) sapply(1:4, function(i) cv.glm(dat, glm(y ~ poly(x, i)))$delta[1]) ## [1] 7.2881616 0.9374236 0.9566218 0.9539049 Repeat (c) using another random seed, and report your results. Are your results the same as what you got in (c)? Why? set.seed(43) dat <- data.frame(x, y) sapply(1:4, function(i) cv.glm(dat, glm(y ~ poly(x, i)))$delta[1]) ## [1] 7.2881616 0.9374236 0.9566218 0.9539049 The results are the same because we are using LOOCV. When doing this, the model is fit leaving each one of the observations out in turn, and thus there is no stochasticity involved. Which of the models in (c) had the smallest LOOCV error? Is this what you expected? Explain your answer. The second model had the smallest LOOCV. This what would be expected since the model to generate the data was quadratic and we are measuring the test (rather than training) error rate to evaluate performance. Comment on the statistical significance of the coefficient estimates that results from fitting each of the models in (c) using least squares. Do these results agree with the conclusions drawn based on the cross-validation results? for (i in 1:4) printCoefmat(coef(summary(glm(y ~ poly(x, i), data = dat)))) ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.55002 0.26001 -5.9613 3.954e-08 *** ## poly(x, i) 6.18883 2.60014 2.3802 0.01924 * ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.550023 0.095803 -16.1792 < 2.2e-16 *** ## poly(x, i)1 6.188826 0.958032 6.4599 4.185e-09 *** ## poly(x, i)2 -23.948305 0.958032 -24.9974 < 2.2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.550023 0.096263 -16.1019 < 2.2e-16 *** ## poly(x, i)1 6.188826 0.962632 6.4291 4.972e-09 *** ## poly(x, i)2 -23.948305 0.962632 -24.8779 < 2.2e-16 *** ## poly(x, i)3 0.264106 0.962632 0.2744 0.7844 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) -1.550023 0.095905 -16.1620 < 2.2e-16 *** ## poly(x, i)1 6.188826 0.959051 6.4531 4.591e-09 *** ## poly(x, i)2 -23.948305 0.959051 -24.9708 < 2.2e-16 *** ## poly(x, i)3 0.264106 0.959051 0.2754 0.7836 ## poly(x, i)4 1.257095 0.959051 1.3108 0.1931 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 We can see that the coefficients in the first model are not highly significant, but all terms (\\(\\beta_0, \\beta_1\\) and \\(\\beta_2\\)) are in the quadratic model. After this, subsequent \\(\\beta_n\\) terms are not significant. Therefore, these results agree with those from cross-validation. 5.2.5 Question 9 We will now consider the Boston housing data set, from the ISLR2 library. Based on this data set, provide an estimate for the population mean of medv. Call this estimate \\(\\hat\\mu\\). (mu <- mean(Boston$medv)) ## [1] 22.53281 Provide an estimate of the standard error of \\(\\hat\\mu\\). Interpret this result. Hint: We can compute the standard error of the sample mean by dividing the sample standard deviation by the square root of the number of observations. sd(Boston$medv) / sqrt(length(Boston$medv)) ## [1] 0.4088611 Now estimate the standard error of \\(\\hat\\mu\\) using the bootstrap. How does this compare to your answer from (b)? set.seed(42) (bs <- boot(Boston$medv, function(v, i) mean(v[i]), 10000)) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Boston$medv, statistic = function(v, i) mean(v[i]), ## R = 10000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 22.53281 0.002175751 0.4029139 The standard error using the bootstrap (0.403) is very close to that obtained from the formula above (0.409). Based on your bootstrap estimate from (c), provide a 95% confidence interval for the mean of medv. Compare it to the results obtained using t.test(Boston$medv). Hint: You can approximate a 95% confidence interval using the formula \\([\\hat\\mu - 2SE(\\hat\\mu), \\hat\\mu + 2SE(\\hat\\mu)].\\) se <- sd(bs$t) c(mu - 2 * se, mu + 2 * se) ## [1] 21.72698 23.33863 Based on this data set, provide an estimate, \\(\\hat\\mu_{med}\\), for the median value of medv in the population. median(Boston$medv) ## [1] 21.2 We now would like to estimate the standard error of \\(\\hat\\mu_{med}\\). Unfortunately, there is no simple formula for computing the standard error of the median. Instead, estimate the standard error of the median using the bootstrap. Comment on your findings. set.seed(42) boot(Boston$medv, function(v, i) median(v[i]), 10000) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Boston$medv, statistic = function(v, i) median(v[i]), ## R = 10000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 21.2 -0.01331 0.3744634 The estimated standard error of the median is 0.374. This is lower than the standard error of the mean. Based on this data set, provide an estimate for the tenth percentile of medv in Boston census tracts. Call this quantity \\(\\hat\\mu_{0.1}\\). (You can use the quantile() function.) quantile(Boston$medv, 0.1) ## 10% ## 12.75 Use the bootstrap to estimate the standard error of \\(\\hat\\mu_{0.1}\\). Comment on your findings. set.seed(42) boot(Boston$medv, function(v, i) quantile(v[i], 0.1), 10000) ## ## ORDINARY NONPARAMETRIC BOOTSTRAP ## ## ## Call: ## boot(data = Boston$medv, statistic = function(v, i) quantile(v[i], ## 0.1), R = 10000) ## ## ## Bootstrap Statistics : ## original bias std. error ## t1* 12.75 0.013405 0.497298 We get a standard error of ~0.5. This is higher than the standard error of the median. Nevertheless the standard error is quite small, thus we can be fairly confidence about the value of the 10th percentile. "],["linear-model-selection-and-regularization.html", "6 Linear Model Selection and Regularization 6.1 Conceptual 6.2 Applied", " 6 Linear Model Selection and Regularization 6.1 Conceptual 6.1.1 Question 1 We perform best subset, forward stepwise, and backward stepwise selection on a single data set. For each approach, we obtain \\(p + 1\\) models, containing \\(0, 1, 2, ..., p\\) predictors. Explain your answers: Which of the three models with \\(k\\) predictors has the smallest training RSS? Best subset considers the most models (all possible combinations of \\(p\\) predictors are considered), therefore this will give the smallest training RSS (it will at least consider all possibilities covered by forward and backward stepwise selection). However, all three approaches are expected to give similar if not identical results in practice. Which of the three models with \\(k\\) predictors has the smallest test RSS? We cannot tell which model will perform best on the test RSS. The answer will depend on the tradeoff between fitting to the data and overfitting. True or False: The predictors in the \\(k\\)-variable model identified by forward stepwise are a subset of the predictors in the (\\(k+1\\))-variable model identified by forward stepwise selection. True. Forward stepwise selection retains all features identified in previous models as \\(k\\) is increased. The predictors in the \\(k\\)-variable model identified by backward stepwise are a subset of the predictors in the \\((k+1)\\)-variable model identified by backward stepwise selection. True. Backward stepwise selection removes features one by one as \\(k\\) is decreased. The predictors in the \\(k\\)-variable model identified by backward stepwise are a subset of the predictors in the \\((k+1)\\)-variable model identified by forward stepwise selection. False. Forward and backward stepwise selection can identify different combinations of variables due to differing algorithms. The predictors in the \\(k\\)-variable model identified by forward stepwise are a subset of the predictors in the \\((k+1)\\)-variable model identified by backward stepwise selection. False. Forward and backward stepwise selection can identify different combinations of variables due to differing algorithms. The predictors in the \\(k\\)-variable model identified by best subset are a subset of the predictors in the \\((k+1)\\)-variable model identified by best subset selection. False. Best subset selection can identify different combinations of variables for each \\(k\\) by considering all possible models. 6.1.2 Question 2 For parts (a) through (c), indicate which of i. through iv. is correct. Justify your answer. The lasso, relative to least squares, is: More flexible and hence will give improved prediction accuracy when its increase in bias is less than its decrease in variance. More flexible and hence will give improved prediction accuracy when its increase in variance is less than its decrease in bias. Less flexible and hence will give improved prediction accuracy when its increase in bias is less than its decrease in variance. Less flexible and hence will give improved prediction accuracy when its increase in variance is less than its decrease in bias. By using shrinkage, lasso can reduce the number of predictors so is less flexible. As a result, it will lead to an increase in bias by approximating the true relationship. We hope that this increase is small but that we dramatically reduce variance (i.e. the difference we would see in the model fit between different sets of training data). Repeat (a) for ridge regression relative to least squares. The same is true of ridge regression—shrinkage results in a less flexible model and can reduce variance. Repeat (a) for non-linear methods relative to least squares. Non-linear methods can be more flexible. They can perform better as long as they don’t substantially increase variance. 6.1.3 Question 3 Suppose we estimate the regression coefficients in a linear regression model by minimizing: \\[ \\sum_{i=1}^n\\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 \\textrm{subject to} \\sum_{j=1}^p|\\beta_j| \\le s \\] for a particular value of \\(s\\). For parts (a) through (e), indicate which of i. through v. is correct. Justify your answer. As we increase \\(s\\) from 0, the training RSS will: Increase initially, and then eventually start decreasing in an inverted U shape. Decrease initially, and then eventually start increasing in a U shape. Steadily increase. Steadily decrease. Remain constant. As \\(s\\) increases, the model becomes more flexible (the sum of absolute coefficients can be higher). With more flexible models, training RSS will always decrease. Repeat (a) for test RSS. With more flexible models, test RSS will decrease (as the fit improves) and will then increase due to overfitting (high variance). Repeat (a) for variance. As \\(s\\) increases, the model becomes more flexible so variance will increase. Repeat (a) for (squared) bias. As \\(s\\) increases, the model becomes more flexible so bias will decrease. Repeat (a) for the irreducible error. The irreducible error is unchanged. 6.1.4 Question 4 Suppose we estimate the regression coefficients in a linear regression model by minimizing \\[ \\sum_{i=1}^n \\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 + \\lambda\\sum_{j=1}^p\\beta_j^2 \\] for a particular value of \\(\\lambda\\). For parts (a) through (e), indicate which of i. through v. is correct. Justify your answer. As we increase \\(\\lambda\\) from 0, the training RSS will: Increase initially, and then eventually start decreasing in an inverted U shape. Decrease initially, and then eventually start increasing in a U shape. Steadily increase. Steadily decrease. Remain constant. As \\(\\lambda\\) is increased, more weight is placed on the sum of squared coefficients and so the model becomes less flexible. As a result, training RSS must increase. Repeat (a) for test RSS. As \\(\\lambda\\) increases, flexibility decreases so test RSS will decrease (variance decreases) but will then increase (as bias increases). Repeat (a) for variance. Steadily decrease. Repeat (a) for (squared) bias. Steadily increase. Repeat (a) for the irreducible error. The irreducible error is unchanged. 6.1.5 Question 5 It is well-known that ridge regression tends to give similar coefficient values to correlated variables, whereas the lasso may give quite different coefficient values to correlated variables. We will now explore this property in a very simple setting. Suppose that \\(n = 2, p = 2, x_{11} = x_{12}, x_{21} = x_{22}\\). Furthermore, suppose that \\(y_1 + y_2 =0\\) and \\(x_{11} + x_{21} = 0\\) and \\(x_{12} + x_{22} = 0\\), so that the estimate for the intercept in a least squares, ridge regression, or lasso model is zero: \\(\\hat{\\beta}_0 = 0\\). Write out the ridge regression optimization problem in this setting. We are trying to minimize: \\[ \\sum_{i=1}^n \\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 + \\lambda\\sum_{j=1}^p\\beta_j^2 \\] We can ignore \\(\\beta_0\\) and can expand the sums since there’s only two terms. Additionally, we can define \\(x_1 = x_{11} = x_{12}\\) and \\(x_2 = x_{21} = x_{22}\\). We then need to minimize \\[\\begin{align} f = & (y_1 - \\beta_1x_1 - \\beta_2x_1)^2 + (y_2 - \\beta_1x_2 - \\beta_2x_2)^2 + \\lambda\\beta_1^2 + \\lambda\\beta_2^2 \\\\ f = & y_1^2 - 2y_1\\beta_1x_1 - 2y_1\\beta_2x_1 + \\beta_1^2x_1^2 + 2\\beta_1\\beta_2x_1^2 + \\beta_2^2x_1^2 + \\\\ & y_2^2 - 2y_2\\beta_1x_2 - 2y_2\\beta_2x_2 + \\beta_1^2x_2^2 + 2\\beta_1\\beta_2x_2^2 + \\beta_2^2x_2^2 + \\\\ & \\lambda\\beta_1^2 + \\lambda\\beta_2^2 \\\\ \\end{align}\\] Argue that in this setting, the ridge coefficient estimates satisfy \\(\\hat{\\beta}_1 = \\hat{\\beta}_2\\) We can find when the above is minimized with respect to each of \\(\\beta_1\\) and \\(\\beta_2\\) by partial differentiation. \\[ \\frac{\\partial}{\\partial{\\beta_1}} = - 2y_1x_1 + 2\\beta_1x_1^2 + 2\\beta_2x_1^2 - 2y_2x_2 + 2\\beta_1x_2^2 + 2\\beta_2x_2^2 + 2\\lambda\\beta_1 \\] \\[ \\frac{\\partial}{\\partial{\\beta_2}} = - 2y_1x_1 + 2\\beta_1x_1^2 + 2\\beta_2x_1^2 - 2y_2x_2 + 2\\beta_1x_2^2 + 2\\beta_2x_2^2 + 2\\lambda\\beta_2 \\] A minimum can be found when these are set to 0. \\[ \\lambda\\beta_1 = y_1x_1 + y_2x_2 - \\beta_1x_1^2 - \\beta_2x_1^2 - \\beta_1x_2^2 - \\beta_2x_2^2 \\\\ \\lambda\\beta_2 = y_1x_1 + y_2x_2 - \\beta_1x_1^2 - \\beta_2x_1^2 - \\beta_1x_2^2 - \\beta_2x_2^2 \\] Therefore \\(\\lambda\\beta_1 = \\lambda\\beta_2\\) and \\(\\beta_1 = \\beta_2\\), thus there is only one solution, that is when the coefficients are the same. Write out the lasso optimization problem in this setting. We are trying to minimize: \\[ \\sum_{i=1}^n \\left(y_i - \\beta_0 - \\sum_{j=1}^p\\beta_jx_{ij}\\right)^2 + \\lambda\\sum_{j=1}^p |\\beta_j| \\] As above (and defining \\(x_1 = x_{11} = x_{12}\\) and \\(x_2 = x_{21} = x_{22}\\)) we simplify to \\[ (y_1 - \\beta_1x_1 - \\beta_2x_1)^2 + (y_2 - \\beta_1x_2 - \\beta_2x_2)^2 + \\lambda|\\beta_1| + \\lambda|\\beta_2| \\] Argue that in this setting, the lasso coefficients \\(\\hat{\\beta}_1\\) and \\(\\hat{\\beta}_2\\) are not unique—in other words, there are many possible solutions to the optimization problem in (c). Describe these solutions. We will consider the alternate form of the lasso optimization problem \\[ (y_1 - \\hat{\\beta_1}x_1 - \\hat{\\beta_2}x_1)^2 + (y_2 - \\hat{\\beta_1}x_2 - \\hat{\\beta_2}x_2)^2 \\quad \\text{subject to} \\quad |\\hat{\\beta_1}| + |\\hat{\\beta_2}| \\le s \\] Since \\(x_1 + x_2 = 0\\) and \\(y_1 + y_2 = 0\\), this is equivalent to minimising \\(2(y_1 - (\\hat{\\beta_1} + \\hat{\\beta_2})x_1)^2\\) which has a solution when \\(\\hat{\\beta_1} + \\hat{\\beta_2} = y_1/x_1\\). Geometrically, this is a \\(45^\\circ\\) backwards sloping line in the (\\(\\hat{\\beta_1}\\), \\(\\hat{\\beta_2}\\)) plane. The constraints \\(|\\hat{\\beta_1}| + |\\hat{\\beta_2}| \\le s\\) specify a diamond shape in the same place, also with lines that are at \\(45^\\circ\\) centered at the origin and which intersect the axes at a distance \\(s\\) from the origin. Thus, points along two edges of the diamond (\\(\\hat{\\beta_1} + \\hat{\\beta_2} = s\\) and \\(\\hat{\\beta_1} + \\hat{\\beta_2} = -s\\)) become solutions to the lasso optimization problem. 6.1.6 Question 6 We will now explore (6.12) and (6.13) further. Consider (6.12) with \\(p = 1\\). For some choice of \\(y_1\\) and \\(\\lambda > 0\\), plot (6.12) as a function of \\(\\beta_1\\). Your plot should confirm that (6.12) is solved by (6.14). Equation 6.12 is: \\[ \\sum_{j=1}^p(y_j - \\beta_j)^2 + \\lambda\\sum_{j=1}^p\\beta_j^2 \\] Equation 6.14 is: \\[ \\hat{\\beta}_j^R = y_j/(1 + \\lambda) \\] where \\(\\hat{\\beta}_j^R\\) is the ridge regression estimate. lambda <- 0.7 y <- 1.4 fn <- function(beta) { (y - beta)^2 + lambda * beta^2 } plot(seq(0, 2, 0.01), fn(seq(0, 2, 0.01)), type = "l", xlab = "beta", ylab = "6.12") abline(v = y / (1 + lambda), lty = 2) Consider (6.13) with \\(p = 1\\). For some choice of \\(y_1\\) and \\(\\lambda > 0\\), plot (6.13) as a function of \\(\\beta_1\\). Your plot should confirm that (6.13) is solved by (6.15). Equation 6.13 is: \\[ \\sum_{j=1}^p(y_j - \\beta_j)^2 + \\lambda\\sum_{j=1}^p|\\beta_j| \\] Equation 6.15 is: \\[ \\hat{\\beta}_j^L = \\begin{cases} y_j - \\lambda/2 &\\mbox{if } y_j > \\lambda/2; \\\\ y_j + \\lambda/2 &\\mbox{if } y_j < -\\lambda/2; \\\\ 0 &\\mbox{if } |y_j| \\le \\lambda/2; \\end{cases} \\] For \\(\\lambda = 0.7\\) and \\(y = 1.4\\), the top case applies. lambda <- 0.7 y <- 1.4 fn <- function(beta) { (y - beta)^2 + lambda * abs(beta) } plot(seq(0, 2, 0.01), fn(seq(0, 2, 0.01)), type = "l", xlab = "beta", ylab = "6.12") abline(v = y - lambda / 2, lty = 2) 6.1.7 Question 7 We will now derive the Bayesian connection to the lasso and ridge regression discussed in Section 6.2.2. Suppose that \\(y_i = \\beta_0 + \\sum_{j=1}^p x_{ij}\\beta_j + \\epsilon_i\\) where \\(\\epsilon_1, ..., \\epsilon_n\\) are independent and identically distributed from a \\(N(0, \\sigma^2)\\) distribution. Write out the likelihood for the data. \\[\\begin{align*} \\mathcal{L} &= \\prod_i^n \\mathcal{N}(0, \\sigma^2) \\\\ &= \\prod_i^n \\frac{1}{\\sqrt{2\\pi\\sigma}}\\exp\\left(-\\frac{\\epsilon_i^2}{2\\sigma^2}\\right) \\\\ &= \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2\\right) \\end{align*}\\] Assume the following prior for \\(\\beta\\): \\(\\beta_1, ..., \\beta_p\\) are independent and identically distributed according to a double-exponential distribution with mean 0 and common scale parameter b: i.e. \\(p(\\beta) = \\frac{1}{2b}\\exp(-|\\beta|/b)\\). Write out the posterior for \\(\\beta\\) in this setting. The posterior can be calculated by multiplying the prior and likelihood (up to a proportionality constant). \\[\\begin{align*} p(\\beta|X,Y) &\\propto \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2\\right) \\prod_j^p\\frac{1}{2b}\\exp\\left(-\\frac{|\\beta_j|}{b}\\right) \\\\ &\\propto \\frac{1}{2b} \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 -\\sum_j^p\\frac{|\\beta_j|}{b}\\right) \\end{align*}\\] Argue that the lasso estimate is the mode for \\(\\beta\\) under this posterior distribution. Let us find the maximum of the posterior distribution (the mode). Maximizing the posterior probability is equivalent to maximizing its log which is: \\[ \\log(p(\\beta|X,Y)) \\propto \\log\\left[ \\frac{1}{2b} \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\right ] - \\left(\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\sum_j^p\\frac{|\\beta_j|}{b}\\right) \\] Since, the first term is independent of \\(\\beta\\), our solution will be when we minimize the second term. \\[\\begin{align*} \\DeclareMathOperator*{\\argmin}{arg\\,min} % Jan Hlavacek \\argmin_\\beta \\left(\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\sum_j^p\\frac{|\\beta|}{b}\\right) &= \\argmin_\\beta \\left(\\frac{1}{2\\sigma^2} \\right ) \\left( \\sum_i^n \\epsilon_i^2 +\\frac{2\\sigma^2}{b}\\sum_j^p|\\beta_j|\\right) \\\\ &= \\argmin_\\beta \\left( \\sum_i^n \\epsilon_i^2 +\\frac{2\\sigma^2}{b}\\sum_j^p|\\beta_j|\\right) \\end{align*}\\] Note, that \\(RSS = \\sum_i^n \\epsilon_i^2\\) and if we set \\(\\lambda = \\frac{2\\sigma^2}{b}\\), the mode corresponds to lasso optimization. \\[ \\argmin_\\beta RSS + \\lambda\\sum_j^p|\\beta_j| \\] Now assume the following prior for \\(\\beta\\): \\(\\beta_1, ..., \\beta_p\\) are independent and identically distributed according to a normal distribution with mean zero and variance \\(c\\). Write out the posterior for \\(\\beta\\) in this setting. The posterior is now: \\[\\begin{align*} p(\\beta|X,Y) &\\propto \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2\\right) \\prod_j^p\\frac{1}{\\sqrt{2\\pi c}}\\exp\\left(-\\frac{\\beta_j^2}{2c}\\right) \\\\ &\\propto \\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\left(\\frac{1}{\\sqrt{2\\pi c}}\\right)^p \\exp\\left(-\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 - \\frac{1}{2c}\\sum_j^p\\beta_j^2\\right) \\end{align*}\\] Argue that the ridge regression estimate is both the mode and the mean for \\(\\beta\\) under this posterior distribution. To show that the ridge estimate is the mode we can again find the maximum by maximizing the log of the posterior. The log is \\[ \\log{p(\\beta|X,Y)} \\propto \\log{\\left[\\left(\\frac{1}{\\sqrt{2\\pi\\sigma}}\\right)^n \\left(\\frac{1}{\\sqrt{2\\pi c}}\\right)^p \\right ]} - \\left(\\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\frac{1}{2c}\\sum_j^p\\beta_j^2 \\right) \\] We can maximize (wrt \\(\\beta\\)) by ignoring the first term and minimizing the second term. i.e. we minimize: \\[ \\argmin_\\beta \\left( \\frac{1}{2\\sigma^2} \\sum_i^n \\epsilon_i^2 + \\frac{1}{2c}\\sum_j^p\\beta_j^2 \\right)\\\\ = \\argmin_\\beta \\left( \\frac{1}{2\\sigma^2} \\left( \\sum_i^n \\epsilon_i^2 + \\frac{\\sigma^2}{c}\\sum_j^p\\beta_j^2 \\right) \\right) \\] As above, if \\(RSS = \\sum_i^n \\epsilon_i^2\\) and if we set \\(\\lambda = \\frac{\\sigma^2}{c}\\), we can see that the mode corresponds to ridge optimization. 6.2 Applied 6.2.1 Question 8 In this exercise, we will generate simulated data, and will then use this data to perform best subset selection. Use the rnorm() function to generate a predictor \\(X\\) of length \\(n = 100\\), as well as a noise vector \\(\\epsilon\\) of length \\(n = 100\\). library(ISLR2) library(glmnet) library(leaps) library(pls) set.seed(42) x <- rnorm(100) ep <- rnorm(100) Generate a response vector \\(Y\\) of length \\(n = 100\\) according to the model \\[Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\epsilon,\\] where \\(\\beta_0, \\beta_1, \\beta_2,\\) and \\(\\beta_3\\) are constants of your choice. y <- 2 + 3 * x - 2 * x^2 + 0.5 * x^3 + ep Use the regsubsets() function to perform best subset selection in order to choose the best model containing the predictors \\(X, X^2, ..., X^{10}\\). What is the best model obtained according to \\(C_p\\), BIC, and adjusted \\(R^2\\)? Show some plots to provide evidence for your answer, and report the coefficients of the best model obtained. Note you will need to use the data.frame() function to create a single data set containing both \\(X\\) and \\(Y\\). dat <- data.frame(x, y) summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat)) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat) ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: exhaustive ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) " " " " ## 2 ( 1 ) "*" "*" ## 3 ( 1 ) "*" "*" ## 4 ( 1 ) "*" "*" ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) " " "*" ## 7 ( 1 ) " " " " ## 8 ( 1 ) "*" " " ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" " " Repeat (c), using forward stepwise selection and also using backwards stepwise selection. How does your answer compare to the results in (c)? summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat, method = "forward")) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat, method = "forward") ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: forward ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " "*" ## 3 ( 1 ) "*" "*" ## 4 ( 1 ) "*" "*" ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) "*" " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" " " ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" " " ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" "*" summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat, method = "backward")) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat, method = "backward") ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: backward ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) "*" "*" ## 3 ( 1 ) "*" "*" ## 4 ( 1 ) "*" "*" ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) " " " " ## 7 ( 1 ) " " " " ## 8 ( 1 ) " " " " ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" "*" Now fit a lasso model to the simulated data, again using \\(X, X^2, ..., X^{10}\\) as predictors. Use cross-validation to select the optimal value of \\(\\lambda\\). Create plots of the cross-validation error as a function of \\(\\lambda\\). Report the resulting coefficient estimates, and discuss the results obtained. res <- cv.glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1) (best <- res$lambda.min) ## [1] 0.09804219 plot(res) out <- glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1, lambda = res$lambda.min) predict(out, type = "coefficients", s = best) ## 11 x 1 sparse Matrix of class "dgCMatrix" ## s1 ## (Intercept) 1.8457308 ## 1 2.9092918 ## 2 -1.9287428 ## 3 0.5161012 ## 4 . ## 5 . ## 6 . ## 7 . ## 8 . ## 9 . ## 10 . When fitting lasso, the model that minimizes MSE uses three predictors (as per the simulation). The coefficients estimated (2.9, -1.9 and 0.5) are similar to those used in the simulation. Now generate a response vector \\(Y\\) according to the model \\[Y = \\beta_0 + \\beta_7X^7 + \\epsilon,\\] and perform best subset selection and the lasso. Discuss the results obtained. dat$y <- 2 - 2 * x^2 + 0.2 * x^7 + ep summary(regsubsets(y ~ poly(x, 10, raw = TRUE), data = dat)) ## Subset selection object ## Call: regsubsets.formula(y ~ poly(x, 10, raw = TRUE), data = dat) ## 10 Variables (and intercept) ## Forced in Forced out ## poly(x, 10, raw = TRUE)1 FALSE FALSE ## poly(x, 10, raw = TRUE)2 FALSE FALSE ## poly(x, 10, raw = TRUE)3 FALSE FALSE ## poly(x, 10, raw = TRUE)4 FALSE FALSE ## poly(x, 10, raw = TRUE)5 FALSE FALSE ## poly(x, 10, raw = TRUE)6 FALSE FALSE ## poly(x, 10, raw = TRUE)7 FALSE FALSE ## poly(x, 10, raw = TRUE)8 FALSE FALSE ## poly(x, 10, raw = TRUE)9 FALSE FALSE ## poly(x, 10, raw = TRUE)10 FALSE FALSE ## 1 subsets of each size up to 8 ## Selection Algorithm: exhaustive ## poly(x, 10, raw = TRUE)1 poly(x, 10, raw = TRUE)2 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " "*" ## 3 ( 1 ) " " "*" ## 4 ( 1 ) " " "*" ## 5 ( 1 ) " " "*" ## 6 ( 1 ) " " "*" ## 7 ( 1 ) " " "*" ## 8 ( 1 ) " " "*" ## poly(x, 10, raw = TRUE)3 poly(x, 10, raw = TRUE)4 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" "*" ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" " " ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)5 poly(x, 10, raw = TRUE)6 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" "*" ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" ## poly(x, 10, raw = TRUE)7 poly(x, 10, raw = TRUE)8 ## 1 ( 1 ) "*" " " ## 2 ( 1 ) "*" " " ## 3 ( 1 ) "*" " " ## 4 ( 1 ) " " " " ## 5 ( 1 ) " " " " ## 6 ( 1 ) " " "*" ## 7 ( 1 ) " " "*" ## 8 ( 1 ) " " "*" ## poly(x, 10, raw = TRUE)9 poly(x, 10, raw = TRUE)10 ## 1 ( 1 ) " " " " ## 2 ( 1 ) " " " " ## 3 ( 1 ) " " " " ## 4 ( 1 ) "*" " " ## 5 ( 1 ) "*" " " ## 6 ( 1 ) "*" " " ## 7 ( 1 ) "*" "*" ## 8 ( 1 ) "*" "*" res <- cv.glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1) (best <- res$lambda.min) ## [1] 1.126906 plot(res) out <- glmnet(poly(dat$x, 10, raw = TRUE), dat$y, alpha = 1, lambda = best) predict(out, type = "coefficients", s = best) ## 11 x 1 sparse Matrix of class "dgCMatrix" ## s1 ## (Intercept) 1.061389580 ## 1 . ## 2 -0.883080980 ## 3 . ## 4 -0.121018425 ## 5 0.028984084 ## 6 -0.009540039 ## 7 0.188796928 ## 8 . ## 9 . ## 10 . When fitting lasso, the model does not perfectly replicate the simulation (coefficients are retained for powers of \\(x\\) that were not simulated). 6.2.2 Question 9 In this exercise, we will predict the number of applications received using the other variables in the College data set. Split the data set into a training set and a test set. set.seed(42) train <- sample(nrow(College), nrow(College) * 2 / 3) test <- setdiff(seq_len(nrow(College)), train) mse <- list() Fit a linear model using least squares on the training set, and report the test error obtained. fit <- lm(Apps ~ ., data = College[train, ]) (mse$lm <- mean((predict(fit, College[test, ]) - College$Apps[test])^2)) ## [1] 1695269 Fit a ridge regression model on the training set, with \\(\\lambda\\) chosen by cross-validation. Report the test error obtained. mm <- model.matrix(Apps ~ ., data = College[train, ]) fit2 <- cv.glmnet(mm, College$Apps[train], alpha = 0) p <- predict(fit2, model.matrix(Apps ~ ., data = College[test, ]), s = fit2$lambda.min) (mse$ridge <- mean((p - College$Apps[test])^2)) ## [1] 2804369 Fit a lasso model on the training set, with \\(\\lambda\\) chosen by cross- validation. Report the test error obtained, along with the number of non-zero coefficient estimates. mm <- model.matrix(Apps ~ ., data = College[train, ]) fit3 <- cv.glmnet(mm, College$Apps[train], alpha = 1) p <- predict(fit3, model.matrix(Apps ~ ., data = College[test, ]), s = fit3$lambda.min) (mse$lasso <- mean((p - College$Apps[test])^2)) ## [1] 1822322 Fit a PCR model on the training set, with \\(M\\) chosen by cross-validation. Report the test error obtained, along with the value of \\(M\\) selected by cross-validation. fit4 <- pcr(Apps ~ ., data = College[train, ], scale = TRUE, validation = "CV") validationplot(fit4, val.type = "MSEP") p <- predict(fit4, College[test, ], ncomp = 17) (mse$pcr <- mean((p - College$Apps[test])^2)) ## [1] 1695269 Fit a PLS model on the training set, with \\(M\\) chosen by cross-validation. Report the test error obtained, along with the value of \\(M\\) selected by cross-validation. fit5 <- plsr(Apps ~ ., data = College[train, ], scale = TRUE, validation = "CV") validationplot(fit5, val.type = "MSEP") p <- predict(fit5, College[test, ], ncomp = 12) (mse$pls <- mean((p - College$Apps[test])^2)) ## [1] 1696902 Comment on the results obtained. How accurately can we predict the number of college applications received? Is there much difference among the test errors resulting from these five approaches? barplot(unlist(mse), ylab = "Test MSE", horiz = TRUE) Ridge and lasso give the lowest test errors but the lowest is generated by the ridge regression model (in this specific case with this specific seed). 6.2.3 Question 10 We have seen that as the number of features used in a model increases, the training error will necessarily decrease, but the test error may not. We will now explore this in a simulated data set. Generate a data set with \\(p = 20\\) features, \\(n = 1,000\\) observations, and an associated quantitative response vector generated according to the model \\(Y =X\\beta + \\epsilon\\), where \\(\\beta\\) has some elements that are exactly equal to zero. set.seed(42) dat <- matrix(rnorm(1000 * 20), nrow = 1000) colnames(dat) <- paste0("b", 1:20) beta <- rep(0, 20) beta[1:4] <- c(5, 4, 2, 7) y <- colSums((t(dat) * beta)) + rnorm(1000) dat <- data.frame(dat) dat$y <- y Split your data set into a training set containing 100 observations and a test set containing 900 observations. train <- dat[1:100, ] test <- dat[101:1000, ] Perform best subset selection on the training set, and plot the training set MSE associated with the best model of each size. fit <- regsubsets(y ~ ., data = train, nvmax = 20) summary(fit) ## Subset selection object ## Call: regsubsets.formula(y ~ ., data = train, nvmax = 20) ## 20 Variables (and intercept) ## Forced in Forced out ## b1 FALSE FALSE ## b2 FALSE FALSE ## b3 FALSE FALSE ## b4 FALSE FALSE ## b5 FALSE FALSE ## b6 FALSE FALSE ## b7 FALSE FALSE ## b8 FALSE FALSE ## b9 FALSE FALSE ## b10 FALSE FALSE ## b11 FALSE FALSE ## b12 FALSE FALSE ## b13 FALSE FALSE ## b14 FALSE FALSE ## b15 FALSE FALSE ## b16 FALSE FALSE ## b17 FALSE FALSE ## b18 FALSE FALSE ## b19 FALSE FALSE ## b20 FALSE FALSE ## 1 subsets of each size up to 20 ## Selection Algorithm: exhaustive ## b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 b16 b17 ## 1 ( 1 ) " " " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 2 ( 1 ) "*" " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 3 ( 1 ) "*" "*" " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 4 ( 1 ) "*" "*" "*" "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 5 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " " " " " " " " " " " " " " " ## 6 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " " " " " " " " " " " " " "*" ## 7 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " "*" " " " " " " " " " " "*" ## 8 ( 1 ) "*" "*" "*" "*" " " " " " " "*" " " " " "*" " " " " " " " " "*" "*" ## 9 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" " " " " " " " " "*" "*" ## 10 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" " " " " " " " " "*" "*" ## 11 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " " " " " "*" "*" ## 12 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " " " " " "*" "*" ## 13 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " "*" " " "*" "*" ## 14 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" " " " " "*" "*" " " "*" "*" "*" "*" ## 15 ( 1 ) "*" "*" "*" "*" "*" "*" " " "*" " " " " "*" "*" " " "*" "*" "*" "*" ## 16 ( 1 ) "*" "*" "*" "*" "*" "*" " " "*" " " " " "*" "*" " " "*" "*" "*" "*" ## 17 ( 1 ) "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 18 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" "*" "*" "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" ## b18 b19 b20 ## 1 ( 1 ) " " " " " " ## 2 ( 1 ) " " " " " " ## 3 ( 1 ) " " " " " " ## 4 ( 1 ) " " " " " " ## 5 ( 1 ) " " " " " " ## 6 ( 1 ) " " " " " " ## 7 ( 1 ) " " " " " " ## 8 ( 1 ) " " " " " " ## 9 ( 1 ) " " " " " " ## 10 ( 1 ) " " " " "*" ## 11 ( 1 ) " " " " "*" ## 12 ( 1 ) "*" " " "*" ## 13 ( 1 ) "*" " " "*" ## 14 ( 1 ) "*" " " "*" ## 15 ( 1 ) "*" " " "*" ## 16 ( 1 ) "*" "*" "*" ## 17 ( 1 ) "*" "*" "*" ## 18 ( 1 ) "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" plot(summary(fit)$rss / 100, ylab = "MSE", type = "o") Plot the test set MSE associated with the best model of each size. predict.regsubsets <- function(object, newdata, id, ...) { form <- as.formula(object$call[[2]]) mat <- model.matrix(form, newdata) coefi <- coef(object, id = id) xvars <- names(coefi) mat[, xvars] %*% coefi } mse <- sapply(1:20, function(i) mean((test$y - predict(fit, test, i))^2)) plot(mse, ylab = "MSE", type = "o", pch = 19) For which model size does the test set MSE take on its minimum value? Comment on your results. If it takes on its minimum value for a model containing only an intercept or a model containing all of the features, then play around with the way that you are generating the data in (a) until you come up with a scenario in which the test set MSE is minimized for an intermediate model size. which.min(mse) ## [1] 4 The min test MSE is found when model size is 4. This corresponds to the simulated data which has four non-zero coefficients. set.seed(42) dat <- matrix(rnorm(1000 * 20), nrow = 1000) colnames(dat) <- paste0("b", 1:20) beta <- rep(0, 20) beta[1:9] <- c(5, 4, 2, 7, 0.01, 0.001, 0.05, 0.1, 0.5) y <- colSums((t(dat) * beta)) + rnorm(1000) dat <- data.frame(dat) dat$y <- y train <- dat[1:100, ] test <- dat[101:1000, ] fit <- regsubsets(y ~ ., data = train, nvmax = 20) summary(fit) ## Subset selection object ## Call: regsubsets.formula(y ~ ., data = train, nvmax = 20) ## 20 Variables (and intercept) ## Forced in Forced out ## b1 FALSE FALSE ## b2 FALSE FALSE ## b3 FALSE FALSE ## b4 FALSE FALSE ## b5 FALSE FALSE ## b6 FALSE FALSE ## b7 FALSE FALSE ## b8 FALSE FALSE ## b9 FALSE FALSE ## b10 FALSE FALSE ## b11 FALSE FALSE ## b12 FALSE FALSE ## b13 FALSE FALSE ## b14 FALSE FALSE ## b15 FALSE FALSE ## b16 FALSE FALSE ## b17 FALSE FALSE ## b18 FALSE FALSE ## b19 FALSE FALSE ## b20 FALSE FALSE ## 1 subsets of each size up to 20 ## Selection Algorithm: exhaustive ## b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 b16 b17 ## 1 ( 1 ) " " " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 2 ( 1 ) "*" " " " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 3 ( 1 ) "*" "*" " " "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 4 ( 1 ) "*" "*" "*" "*" " " " " " " " " " " " " " " " " " " " " " " " " " " ## 5 ( 1 ) "*" "*" "*" "*" " " " " " " " " "*" " " " " " " " " " " " " " " " " ## 6 ( 1 ) "*" "*" "*" "*" " " " " " " " " "*" " " " " " " " " " " " " " " "*" ## 7 ( 1 ) "*" "*" "*" "*" " " " " " " "*" "*" " " " " " " " " " " " " " " "*" ## 8 ( 1 ) "*" "*" "*" "*" " " " " " " "*" "*" " " "*" " " " " " " " " " " "*" ## 9 ( 1 ) "*" "*" "*" "*" " " " " " " "*" "*" " " "*" " " " " " " " " "*" "*" ## 10 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" " " " " " " " " "*" "*" ## 11 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" " " " " " " " " "*" "*" ## 12 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" "*" " " " " " " "*" "*" ## 13 ( 1 ) "*" "*" "*" "*" "*" " " " " "*" "*" " " "*" "*" " " " " " " "*" "*" ## 14 ( 1 ) "*" "*" "*" "*" "*" " " "*" "*" "*" " " "*" "*" " " " " " " "*" "*" ## 15 ( 1 ) "*" "*" "*" "*" "*" " " "*" "*" "*" " " "*" "*" " " "*" " " "*" "*" ## 16 ( 1 ) "*" "*" "*" "*" "*" " " "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 17 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 18 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" " " "*" "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" " " "*" "*" "*" "*" "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" "*" ## b18 b19 b20 ## 1 ( 1 ) " " " " " " ## 2 ( 1 ) " " " " " " ## 3 ( 1 ) " " " " " " ## 4 ( 1 ) " " " " " " ## 5 ( 1 ) " " " " " " ## 6 ( 1 ) " " " " " " ## 7 ( 1 ) " " " " " " ## 8 ( 1 ) " " " " " " ## 9 ( 1 ) " " " " " " ## 10 ( 1 ) " " " " " " ## 11 ( 1 ) " " " " "*" ## 12 ( 1 ) " " " " "*" ## 13 ( 1 ) "*" " " "*" ## 14 ( 1 ) "*" " " "*" ## 15 ( 1 ) "*" " " "*" ## 16 ( 1 ) "*" " " "*" ## 17 ( 1 ) "*" " " "*" ## 18 ( 1 ) "*" "*" "*" ## 19 ( 1 ) "*" "*" "*" ## 20 ( 1 ) "*" "*" "*" mse <- sapply(1:20, function(i) mean((test$y - predict(fit, test, i))^2)) plot(mse, ylab = "MSE", type = "o", pch = 19) which.min(mse) ## [1] 5 How does the model at which the test set MSE is minimized compare to the true model used to generate the data? Comment on the coefficient values. The min test MSE is found when model size is 5 but there are 9 non-zero coefficients. coef(fit, id = 5) ## (Intercept) b1 b2 b3 b4 b9 ## 0.03507654 5.06180121 3.82785027 2.20434996 7.05312844 0.57032008 The coefficient values are well estimated when high, but the smaller coefficients are dropped. Create a plot displaying \\(\\sqrt{\\sum_{j=1}^p (\\beta_j - \\hat{\\beta}{}_j^r)^2}\\) for a range of values of \\(r\\), where \\(\\hat{\\beta}{}_j^r\\) is the \\(j\\)th coefficient estimate for the best model containing \\(r\\) coefficients. Comment on what you observe. How does this compare to the test MSE plot from (d)? names(beta) <- paste0("b", 1:20) b <- data.frame(id = names(beta), b = beta) out <- sapply(1:20, function(i) { c <- coef(fit, id = i)[-1] c <- data.frame(id = names(c), c = c) m <- merge(b, c) sqrt(sum((m$b - m$c)^2)) }) plot(out, ylab = "Mean squared coefficient error", type = "o", pch = 19) The error of the coefficient estimates is minimized when model size is 5. This corresponds to the point when test MSE was minimized. 6.2.4 Question 11 We will now try to predict per capita crime rate in the Boston data set. Try out some of the regression methods explored in this chapter, such as best subset selection, the lasso, ridge regression, and PCR. Present and discuss results for the approaches that you consider. set.seed(1) train <- sample(nrow(Boston), nrow(Boston) * 2 / 3) test <- setdiff(seq_len(nrow(Boston)), train) hist(log(Boston$crim)) Propose a model (or set of models) that seem to perform well on this data set, and justify your answer. Make sure that you are evaluating model performance using validation set error, cross-validation, or some other reasonable alternative, as opposed to using training error. We will try to fit models to log(Boston$crim) which is closer to a normal distribution. fit <- lm(log(crim) ~ ., data = Boston[train, ]) mean((predict(fit, Boston[test, ]) - log(Boston$crim[test]))^2) ## [1] 0.66578 mm <- model.matrix(log(crim) ~ ., data = Boston[train, ]) fit2 <- cv.glmnet(mm, log(Boston$crim[train]), alpha = 0) p <- predict(fit2, model.matrix(log(crim) ~ ., data = Boston[test, ]), s = fit2$lambda.min) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6511807 mm <- model.matrix(log(crim) ~ ., data = Boston[train, ]) fit3 <- cv.glmnet(mm, log(Boston$crim[train]), alpha = 1) p <- predict(fit3, model.matrix(log(crim) ~ ., data = Boston[test, ]), s = fit3$lambda.min) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6494337 fit4 <- pcr(log(crim) ~ ., data = Boston[train, ], scale = TRUE, validation = "CV") validationplot(fit4, val.type = "MSEP") p <- predict(fit4, Boston[test, ], ncomp = 8) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6561043 fit5 <- plsr(log(crim) ~ ., data = Boston[train, ], scale = TRUE, validation = "CV") validationplot(fit5, val.type = "MSEP") p <- predict(fit5, Boston[test, ], ncomp = 6) mean((p - log(Boston$crim[test]))^2) ## [1] 0.6773353 In this case lasso (alpha = 1) seems to perform very slightly better than un-penalized regression. Some coefficients have been dropped: coef(fit3, s = fit3$lambda.min) ## 14 x 1 sparse Matrix of class "dgCMatrix" ## s1 ## (Intercept) -4.713172675 ## (Intercept) . ## zn -0.011043739 ## indus 0.022515402 ## chas . ## nox 3.856157215 ## rm . ## age 0.004210529 ## dis . ## rad 0.145604750 ## tax . ## ptratio -0.031787696 ## lstat 0.036112321 ## medv 0.004304181 Does your chosen model involve all of the features in the data set? Why or why not? Not all features are included due to the lasso penalization. "],["moving-beyond-linearity.html", "7 Moving Beyond Linearity 7.1 Conceptual 7.2 Applied", " 7 Moving Beyond Linearity 7.1 Conceptual 7.1.1 Question 1 It was mentioned in the chapter that a cubic regression spline with one knot at \\(\\xi\\) can be obtained using a basis of the form \\(x, x^2, x^3, (x-\\xi)^3_+\\), where \\((x-\\xi)^3_+ = (x-\\xi)^3\\) if \\(x>\\xi\\) and equals 0 otherwise. We will now show that a function of the form \\[ f(x)=\\beta_0 +\\beta_1x+\\beta_2x^2 +\\beta_3x^3 +\\beta_4(x-\\xi)^3_+ \\] is indeed a cubic regression spline, regardless of the values of \\(\\beta_0, \\beta_1, \\beta_2, \\beta_3,\\beta_4\\). Find a cubic polynomial \\[ f_1(x) = a_1 + b_1x + c_1x^2 + d_1x^3 \\] such that \\(f(x) = f_1(x)\\) for all \\(x \\le \\xi\\). Express \\(a_1,b_1,c_1,d_1\\) in terms of \\(\\beta_0, \\beta_1, \\beta_2, \\beta_3, \\beta_4\\). In this case, for \\(x \\le \\xi\\), the cubic polynomial simply has terms \\(a_1 = \\beta_0\\), \\(b_1 = \\beta_1\\), \\(c_1 = \\beta_2\\), \\(d_1 = \\beta_3\\) Find a cubic polynomial \\[ f_2(x) = a_2 + b_2x + c_2x^2 + d_2x^3 \\] such that \\(f(x) = f_2(x)\\) for all \\(x > \\xi\\). Express \\(a_2, b_2, c_2, d_2\\) in terms of \\(\\beta_0, \\beta_1, \\beta_2, \\beta_3, \\beta_4\\). We have now established that \\(f(x)\\) is a piecewise polynomial. For \\(x \\gt \\xi\\), the cubic polynomial would be (we include the \\(\\beta_4\\) term). \\[\\begin{align} f(x) = & \\beta_0 + \\beta_1x + \\beta_2x^2 + \\beta_3x^3 + \\beta_4(x-\\xi)^3 \\\\ = & \\beta_0 + \\beta_1x + \\beta_2x^2 + + \\beta_4(x^3 - 3x^2\\xi + 3x\\xi^2 -\\xi^3) \\\\ = & \\beta_0 - \\beta_4\\xi^3 + (\\beta_1 + 3\\beta_4\\xi^2)x + (\\beta_2 - 3\\beta_4\\xi)x^2 + (\\beta_3 + \\beta_4)x^3 \\end{align}\\] Therefore, \\(a_1 = \\beta_0 - \\beta_4\\xi^3\\), \\(b_1 = \\beta_1 + 3\\beta_4\\xi^2\\), \\(c_1 = \\beta_2 - 3\\beta_4\\xi\\), \\(d_1 = \\beta_3 + \\beta_4\\) Show that \\(f_1(\\xi) = f_2(\\xi)\\). That is, \\(f(x)\\) is continuous at \\(\\xi\\). To do this, we replace \\(x\\) with \\(\\xi\\) in the above equations and simplify. \\[\\begin{align} f_1(\\xi) = \\beta_0 + \\beta_1\\xi + \\beta_2\\xi^2 + \\beta_3\\xi^3 \\end{align}\\] \\[\\begin{align} f_2(\\xi) = & \\beta_0 - \\beta_4\\xi^3 + (\\beta_1 + 3\\beta_4\\xi^2)\\xi + (\\beta_2 - 3\\beta_4\\xi)\\xi^2 + (\\beta_3 + \\beta_4)\\xi^3 \\\\ = & \\beta_0 - \\beta_4\\xi^3 + \\beta_1\\xi + 3\\beta_4\\xi^3 + \\beta_2\\xi^2 - 3\\beta_4\\xi^3 + \\beta_3\\xi^3 + \\beta_4\\xi^3 \\\\ = & \\beta_0 + \\beta_1\\xi + \\beta_2\\xi^2 + \\beta_3\\xi^3 \\end{align}\\] Show that \\(f_1'(\\xi) = f_2'(\\xi)\\). That is, \\(f'(x)\\) is continuous at \\(\\xi\\). To do this we differentiate the above with respect to \\(x\\). \\[ f_1'(x) = \\beta_1 + 2\\beta_2x + 3\\beta_3x^2 f_1'(\\xi) = \\beta_1 + 2\\beta_2\\xi + 3\\beta_3\\xi^2 \\] \\[\\begin{align} f_2'(x) & = \\beta_1 + 3\\beta_4\\xi^2 + 2(\\beta_2 - 3\\beta_4\\xi)x + 3(\\beta_3 + \\beta_4)x^2 \\\\ f_2'(\\xi) & = \\beta_1 + 3\\beta_4\\xi^2 + 2(\\beta_2 - 3\\beta_4\\xi)\\xi + 3(\\beta_3 + \\beta_4)\\xi^2 \\\\ & = \\beta_1 + 3\\beta_4\\xi^2 + 2\\beta_2\\xi - 6\\beta_4\\xi^2 + 3\\beta_3\\xi^2 + 3\\beta_4\\xi^2 \\\\ & = \\beta_1 + 2\\beta_2\\xi + 3\\beta_3\\xi^2 \\end{align}\\] Show that \\(f_1''(\\xi) = f_2''(\\xi)\\). That is, \\(f''(x)\\) is continuous at \\(\\xi\\). Therefore, \\(f(x)\\) is indeed a cubic spline. \\[ f_1'(x) = 2\\beta_2x + 6\\beta_3x \\\\ f_1''(\\xi) = 2\\beta_2\\xi + 6\\beta_3\\xi \\] \\[ f_2''(x) = 2\\beta_2 - 6\\beta_4\\xi + 6(\\beta_3 + \\beta_4)x \\\\ \\] \\[\\begin{align} f_2''(\\xi) & = 2\\beta_2 - 6\\beta_4\\xi + 6\\beta_3\\xi + 6\\beta_4\\xi \\\\ & = 2\\beta_2 + 6\\beta_3\\xi \\end{align}\\] Hint: Parts (d) and (e) of this problem require knowledge of single-variable calculus. As a reminder, given a cubic polynomial \\[f_1(x) = a_1 + b_1x + c_1x^2 + d_1x^3,\\] the first derivative takes the form \\[f_1'(x) = b_1 + 2c_1x + 3d_1x^2\\] and the second derivative takes the form \\[f_1''(x) = 2c_1 + 6d_1x.\\] 7.1.2 Question 2 Suppose that a curve \\(\\hat{g}\\) is computed to smoothly fit a set of \\(n\\) points using the following formula: \\[ \\DeclareMathOperator*{\\argmin}{arg\\,min} % Jan Hlavacek \\hat{g} = \\argmin_g \\left(\\sum_{i=1}^n (y_i - g(x_i))^2 + \\lambda \\int \\left[ g^{(m)}(x) \\right]^2 dx \\right), \\] where \\(g^{(m)}\\) represents the \\(m\\)th derivative of \\(g\\) (and \\(g^{(0)} = g\\)). Provide example sketches of \\(\\hat{g}\\) in each of the following scenarios. \\(\\lambda=\\infty, m=0\\). Here we penalize the \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. This means that the \\(\\hat{g}\\) will be 0. \\(\\lambda=\\infty, m=1\\). Here we penalize the first derivative (the slope) of \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. Thus the slope will be 0 (and otherwise best fitting \\(x\\), i.e. at the mean of \\(x\\)). \\(\\lambda=\\infty, m=2\\). Here we penalize the second derivative (the change of slope) of \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. Thus the line will be straight (and otherwise best fitting \\(x\\)). \\(\\lambda=\\infty, m=3\\). Here we penalize the third derivative (the change of the change of slope) of \\(g\\) and a infinite \\(\\lambda\\) means that this penalty dominates. In other words, the curve will have a consistent rate of change (e.g. a quadratic function or similar). \\(\\lambda=0, m=3\\). Here we penalize the third derivative, but a value of \\(\\lambda = 0\\) means that there is no penalty. As a result, the curve is able to interpolate all points. 7.1.3 Question 3 Suppose we fit a curve with basis functions \\(b_1(X) = X\\), \\(b_2(X) = (X - 1)^2I(X \\geq 1)\\). (Note that \\(I(X \\geq 1)\\) equals 1 for \\(X \\geq 1\\) and 0 otherwise.) We fit the linear regression model \\[Y = \\beta_0 +\\beta_1b_1(X) + \\beta_2b_2(X) + \\epsilon,\\] and obtain coefficient estimates \\(\\hat{\\beta}_0 = 1, \\hat{\\beta}_1 = 1, \\hat{\\beta}_2 = -2\\). Sketch the estimated curve between \\(X = -2\\) and \\(X = 2\\). Note the intercepts, slopes, and other relevant information. x <- seq(-2, 2, length.out = 1000) f <- function(x) 1 + x + -2 * (x - 1)^2 * I(x >= 1) plot(x, f(x), type = "l") grid() 7.1.4 Question 4 Suppose we fit a curve with basis functions \\(b_1(X) = I(0 \\leq X \\leq 2) - (X -1)I(1 \\leq X \\leq 2),\\) \\(b_2(X) = (X -3)I(3 \\leq X \\leq 4) + I(4 \\lt X \\leq 5)\\). We fit the linear regression model \\[Y = \\beta_0 +\\beta_1b_1(X) + \\beta_2b_2(X) + \\epsilon,\\] and obtain coefficient estimates \\(\\hat{\\beta}_0 = 1, \\hat{\\beta}_1 = 1, \\hat{\\beta}_2 = 3\\). Sketch the estimated curve between \\(X = -2\\) and \\(X = 6\\). Note the intercepts, slopes, and other relevant information. x <- seq(-2, 6, length.out = 1000) b1 <- function(x) I(0 <= x & x <= 2) - (x - 1) * I(1 <= x & x <= 2) b2 <- function(x) (x - 3) * I(3 <= x & x <= 4) + I(4 < x & x <= 5) f <- function(x) 1 + 1*b1(x) + 3*b2(x) plot(x, f(x), type = "l") grid() 7.1.5 Question 5 Consider two curves, \\(\\hat{g}\\) and \\(\\hat{g}_2\\), defined by \\[ \\hat{g}_1 = \\argmin_g \\left(\\sum_{i=1}^n (y_i - g(x_i))^2 + \\lambda \\int \\left[ g^{(3)}(x) \\right]^2 dx \\right), \\] \\[ \\hat{g}_2 = \\argmin_g \\left(\\sum_{i=1}^n (y_i - g(x_i))^2 + \\lambda \\int \\left[ g^{(4)}(x) \\right]^2 dx \\right), \\] where \\(g^{(m)}\\) represents the \\(m\\)th derivative of \\(g\\). As \\(\\lambda \\to \\infty\\), will \\(\\hat{g}_1\\) or \\(\\hat{g}_2\\) have the smaller training RSS? \\(\\hat{g}_2\\) is more flexible (by penalizing a higher derivative of \\(g\\)) and so will have a smaller training RSS. As \\(\\lambda \\to \\infty\\), will \\(\\hat{g}_1\\) or \\(\\hat{g}_2\\) have the smaller test RSS? We cannot tell which function will produce a smaller test RSS, but there is chance that \\(\\hat{g}_1\\) will if \\(\\hat{g}_2\\) overfits the data. For \\(\\lambda = 0\\), will \\(\\hat{g}_1\\) or \\(\\hat{g}_2\\) have the smaller training and test RSS? When \\(\\lambda = 0\\) there is no penalty, so both functions will give the same result: perfect interpolation of the training data. Thus training RSS will be 0 but test RSS could be high. 7.2 Applied 7.2.1 Question 6 In this exercise, you will further analyze the Wage data set considered throughout this chapter. Perform polynomial regression to predict wage using age. Use cross-validation to select the optimal degree \\(d\\) for the polynomial. What degree was chosen, and how does this compare to the results of hypothesis testing using ANOVA? Make a plot of the resulting polynomial fit to the data. library(ISLR2) library(boot) library(ggplot2) set.seed(42) res <- sapply(1:6, function(i) { fit <- glm(wage ~ poly(age, i), data = Wage) cv.glm(Wage, fit, K = 5)$delta[1] }) which.min(res) ## [1] 6 plot(1:6, res, xlab = "Degree", ylab = "Test MSE", type = "l") abline(v = which.min(res), col = "red", lty = 2) fit <- glm(wage ~ poly(age, which.min(res)), data = Wage) plot(Wage$age, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) points(1:100, predict(fit, data.frame(age = 1:100)), type = "l", col = "red") summary(glm(wage ~ poly(age, 6), data = Wage)) ## ## Call: ## glm(formula = wage ~ poly(age, 6), data = Wage) ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 111.7036 0.7286 153.316 < 2e-16 *** ## poly(age, 6)1 447.0679 39.9063 11.203 < 2e-16 *** ## poly(age, 6)2 -478.3158 39.9063 -11.986 < 2e-16 *** ## poly(age, 6)3 125.5217 39.9063 3.145 0.00167 ** ## poly(age, 6)4 -77.9112 39.9063 -1.952 0.05099 . ## poly(age, 6)5 -35.8129 39.9063 -0.897 0.36956 ## poly(age, 6)6 62.7077 39.9063 1.571 0.11620 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for gaussian family taken to be 1592.512) ## ## Null deviance: 5222086 on 2999 degrees of freedom ## Residual deviance: 4766389 on 2993 degrees of freedom ## AIC: 30642 ## ## Number of Fisher Scoring iterations: 2 fit1 <- lm(wage ~ poly(age, 1), data = Wage) fit2 <- lm(wage ~ poly(age, 2), data = Wage) fit3 <- lm(wage ~ poly(age, 3), data = Wage) fit4 <- lm(wage ~ poly(age, 4), data = Wage) fit5 <- lm(wage ~ poly(age, 5), data = Wage) anova(fit1, fit2, fit3, fit4, fit5) ## Analysis of Variance Table ## ## Model 1: wage ~ poly(age, 1) ## Model 2: wage ~ poly(age, 2) ## Model 3: wage ~ poly(age, 3) ## Model 4: wage ~ poly(age, 4) ## Model 5: wage ~ poly(age, 5) ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 2998 5022216 ## 2 2997 4793430 1 228786 143.5931 < 2.2e-16 *** ## 3 2996 4777674 1 15756 9.8888 0.001679 ** ## 4 2995 4771604 1 6070 3.8098 0.051046 . ## 5 2994 4770322 1 1283 0.8050 0.369682 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 The selected degree is 4. When testing with ANOVA, degrees 1, 2 and 3 are highly significant and 4 is marginal. Fit a step function to predict wage using age, and perform cross-validation to choose the optimal number of cuts. Make a plot of the fit obtained. set.seed(42) res <- sapply(2:10, function(i) { Wage$cats <- cut(Wage$age, i) fit <- glm(wage ~ cats, data = Wage) cv.glm(Wage, fit, K = 5)$delta[1] }) names(res) <- 2:10 plot(2:10, res, xlab = "Cuts", ylab = "Test MSE", type = "l") which.min(res) ## 8 ## 7 abline(v = names(which.min(res)), col = "red", lty = 2) fit <- glm(wage ~ cut(age, 8), data = Wage) plot(Wage$age, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) points(18:80, predict(fit, data.frame(age = 18:80)), type = "l", col = "red") 7.2.2 Question 7 The Wage data set contains a number of other features not explored in this chapter, such as marital status (maritl), job class (jobclass), and others. Explore the relationships between some of these other predictors and wage, and use non-linear fitting techniques in order to fit flexible models to the data. Create plots of the results obtained, and write a summary of your findings. plot(Wage$year, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$age, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$maritl, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$jobclass, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) plot(Wage$education, Wage$wage, pch = 19, cex = 0.4, col = alpha("steelblue", 0.4)) We have a mix of categorical and continuous variables and also want to incorporate non-linear aspects of the continuous variables. A GAM is a good choice to model this situation. library(gam) ## Loading required package: splines ## Loading required package: foreach ## Loaded gam 1.22-4 fit0 <- gam(wage ~ s(year, 4) + s(age, 5) + education, data = Wage) fit2 <- gam(wage ~ s(year, 4) + s(age, 5) + education + maritl, data = Wage) fit1 <- gam(wage ~ s(year, 4) + s(age, 5) + education + jobclass, data = Wage) fit3 <- gam(wage ~ s(year, 4) + s(age, 5) + education + jobclass + maritl, data = Wage) anova(fit0, fit1, fit2, fit3) ## Analysis of Deviance Table ## ## Model 1: wage ~ s(year, 4) + s(age, 5) + education ## Model 2: wage ~ s(year, 4) + s(age, 5) + education + jobclass ## Model 3: wage ~ s(year, 4) + s(age, 5) + education + maritl ## Model 4: wage ~ s(year, 4) + s(age, 5) + education + jobclass + maritl ## Resid. Df Resid. Dev Df Deviance Pr(>Chi) ## 1 2986 3689770 ## 2 2985 3677553 1 12218 0.0014286 ** ## 3 2982 3595688 3 81865 1.071e-14 *** ## 4 2981 3581781 1 13907 0.0006687 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 par(mfrow = c(2, 3)) plot(fit3, se = TRUE, col = "blue") 7.2.3 Question 8 Fit some of the non-linear models investigated in this chapter to the Auto data set. Is there evidence for non-linear relationships in this data set? Create some informative plots to justify your answer. Here we want to explore a range of non-linear models. First let’s look at the relationships between the variables in the data. pairs(Auto, cex = 0.4, pch = 19) It does appear that there are some non-linear relationships (e.g. horsepower / weight and mpg). We will pick one variable (horsepower) to predict mpg and try the range of models discussed in this chapter. We will measure test MSE through cross-validation to compare the models. library(tidyverse) ## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ── ## ✔ dplyr 1.1.4 ✔ readr 2.1.5 ## ✔ forcats 1.0.0 ✔ stringr 1.5.1 ## ✔ lubridate 1.9.3 ✔ tibble 3.2.1 ## ✔ purrr 1.0.2 ✔ tidyr 1.3.1 ## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── ## ✖ purrr::accumulate() masks foreach::accumulate() ## ✖ dplyr::filter() masks stats::filter() ## ✖ dplyr::lag() masks stats::lag() ## ✖ purrr::when() masks foreach::when() ## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors set.seed(42) fit <- glm(mpg ~ horsepower, data = Auto) err <- cv.glm(Auto, fit, K = 10)$delta[1] fit1 <- glm(mpg ~ poly(horsepower, 4), data = Auto) err1 <- cv.glm(Auto, fit1, K = 10)$delta[1] q <- quantile(Auto$horsepower) Auto$hp_cats <- cut(Auto$horsepower, breaks = q, include.lowest = TRUE) fit2 <- glm(mpg ~ hp_cats, data = Auto) err2 <- cv.glm(Auto, fit2, K = 10)$delta[1] fit3 <- glm(mpg ~ bs(horsepower, df = 4), data = Auto) err3 <- cv.glm(Auto, fit3, K = 10)$delta[1] ## Warning in bs(horsepower, degree = 3L, knots = 92, Boundary.knots = c(46L, : ## some 'x' values beyond boundary knots may cause ill-conditioned bases ## Warning in bs(horsepower, degree = 3L, knots = 92, Boundary.knots = c(46L, : ## some 'x' values beyond boundary knots may cause ill-conditioned bases fit4 <- glm(mpg ~ ns(horsepower, 4), data = Auto) err4 <- cv.glm(Auto, fit4, K = 10)$delta[1] fit5 <- gam(mpg ~ s(horsepower, df = 4), data = Auto) # rough 10-fold cross-validation for gam. err5 <- mean(replicate(10, { b <- cut(sample(seq_along(Auto$horsepower)), 10) pred <- numeric() for (i in 1:10) { train <- b %in% levels(b)[-i] f <- gam(mpg ~ s(horsepower, df = 4), data = Auto[train, ]) pred[!train] <- predict(f, Auto[!train, ]) } mean((Auto$mpg - pred)^2) # MSE })) c(err, err1, err2, err3, err4, err5) ## [1] 24.38418 19.94222 20.37940 18.92802 19.33556 19.02999 anova(fit, fit1, fit2, fit3, fit4, fit5) ## Analysis of Deviance Table ## ## Model 1: mpg ~ horsepower ## Model 2: mpg ~ poly(horsepower, 4) ## Model 3: mpg ~ hp_cats ## Model 4: mpg ~ bs(horsepower, df = 4) ## Model 5: mpg ~ ns(horsepower, 4) ## Model 6: mpg ~ s(horsepower, df = 4) ## Resid. Df Resid. Dev Df Deviance F Pr(>F) ## 1 390 9385.9 ## 2 387 7399.5 3.00000000 1986.39 35.258 < 2.2e-16 *** ## 3 388 7805.4 -1.00000000 -405.92 21.615 4.578e-06 *** ## 4 387 7276.5 1.00000000 528.94 28.166 1.880e-07 *** ## 5 387 7248.6 0.00000000 27.91 ## 6 387 7267.7 0.00013612 -19.10 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 x <- seq(min(Auto$horsepower), max(Auto$horsepower), length.out=1000) pred <- data.frame( x = x, "Linear" = predict(fit, data.frame(horsepower = x)), "Polynomial" = predict(fit1, data.frame(horsepower = x)), "Step" = predict(fit2, data.frame(hp_cats = cut(x, breaks = q, include.lowest = TRUE))), "Regression spline" = predict(fit3, data.frame(horsepower = x)), "Natural spline" = predict(fit4, data.frame(horsepower = x)), "Smoothing spline" = predict(fit5, data.frame(horsepower = x)), check.names = FALSE ) pred <- pivot_longer(pred, -x) ggplot(Auto, aes(horsepower, mpg)) + geom_point(color = alpha("steelblue", 0.4)) + geom_line(data = pred, aes(x, value, color = name)) + theme_bw() 7.2.4 Question 9 This question uses the variables dis (the weighted mean of distances to five Boston employment centers) and nox (nitrogen oxides concentration in parts per 10 million) from the Boston data. We will treat dis as the predictor and nox as the response. Use the poly() function to fit a cubic polynomial regression to predict nox using dis. Report the regression output, and plot the resulting data and polynomial fits. fit <- glm(nox ~ poly(dis, 3), data = Boston) summary(fit) ## ## Call: ## glm(formula = nox ~ poly(dis, 3), data = Boston) ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 0.554695 0.002759 201.021 < 2e-16 *** ## poly(dis, 3)1 -2.003096 0.062071 -32.271 < 2e-16 *** ## poly(dis, 3)2 0.856330 0.062071 13.796 < 2e-16 *** ## poly(dis, 3)3 -0.318049 0.062071 -5.124 4.27e-07 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for gaussian family taken to be 0.003852802) ## ## Null deviance: 6.7810 on 505 degrees of freedom ## Residual deviance: 1.9341 on 502 degrees of freedom ## AIC: -1370.9 ## ## Number of Fisher Scoring iterations: 2 plot(nox ~ dis, data = Boston, col = alpha("steelblue", 0.4), pch = 19) x <- seq(min(Boston$dis), max(Boston$dis), length.out = 1000) lines(x, predict(fit, data.frame(dis = x)), col = "red", lty = 2) Plot the polynomial fits for a range of different polynomial degrees (say, from 1 to 10), and report the associated residual sum of squares. fits <- lapply(1:10, function(i) glm(nox ~ poly(dis, i), data = Boston)) x <- seq(min(Boston$dis), max(Boston$dis), length.out=1000) pred <- data.frame(lapply(fits, function(fit) predict(fit, data.frame(dis = x)))) colnames(pred) <- 1:10 pred$x <- x pred <- pivot_longer(pred, !x) ggplot(Boston, aes(dis, nox)) + geom_point(color = alpha("steelblue", 0.4)) + geom_line(data = pred, aes(x, value, color = name)) + theme_bw() # residual sum of squares do.call(anova, fits)[, 2] ## [1] 2.768563 2.035262 1.934107 1.932981 1.915290 1.878257 1.849484 1.835630 ## [9] 1.833331 1.832171 Perform cross-validation or another approach to select the optimal degree for the polynomial, and explain your results. res <- sapply(1:10, function(i) { fit <- glm(nox ~ poly(dis, i), data = Boston) cv.glm(Boston, fit, K = 10)$delta[1] }) which.min(res) ## [1] 4 The optimal degree is 3 based on cross-validation. Higher values tend to lead to overfitting. Use the bs() function to fit a regression spline to predict nox using dis. Report the output for the fit using four degrees of freedom. How did you choose the knots? Plot the resulting fit. fit <- glm(nox ~ bs(dis, df = 4), data = Boston) summary(fit) ## ## Call: ## glm(formula = nox ~ bs(dis, df = 4), data = Boston) ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 0.73447 0.01460 50.306 < 2e-16 *** ## bs(dis, df = 4)1 -0.05810 0.02186 -2.658 0.00812 ** ## bs(dis, df = 4)2 -0.46356 0.02366 -19.596 < 2e-16 *** ## bs(dis, df = 4)3 -0.19979 0.04311 -4.634 4.58e-06 *** ## bs(dis, df = 4)4 -0.38881 0.04551 -8.544 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## (Dispersion parameter for gaussian family taken to be 0.003837874) ## ## Null deviance: 6.7810 on 505 degrees of freedom ## Residual deviance: 1.9228 on 501 degrees of freedom ## AIC: -1371.9 ## ## Number of Fisher Scoring iterations: 2 plot(nox ~ dis, data = Boston, col = alpha("steelblue", 0.4), pch = 19) x <- seq(min(Boston$dis), max(Boston$dis), length.out = 1000) lines(x, predict(fit, data.frame(dis = x)), col = "red", lty = 2) Knots are chosen based on quantiles of the data. Now fit a regression spline for a range of degrees of freedom, and plot the resulting fits and report the resulting RSS. Describe the results obtained. fits <- lapply(3:10, function(i) { glm(nox ~ bs(dis, df = i), data = Boston) }) x <- seq(min(Boston$dis), max(Boston$dis), length.out = 1000) pred <- data.frame(lapply(fits, function(fit) predict(fit, data.frame(dis = x)))) colnames(pred) <- 3:10 pred$x <- x pred <- pivot_longer(pred, !x) ggplot(Boston, aes(dis, nox)) + geom_point(color = alpha("steelblue", 0.4)) + geom_line(data = pred, aes(x, value, color = name)) + theme_bw() At high numbers of degrees of freedom the splines overfit the data (particularly at extreme ends of the distribution of the predictor variable). Perform cross-validation or another approach in order to select the best degrees of freedom for a regression spline on this data. Describe your results. set.seed(42) err <- sapply(3:10, function(i) { fit <- glm(nox ~ bs(dis, df = i), data = Boston) suppressWarnings(cv.glm(Boston, fit, K = 10)$delta[1]) }) which.min(err) ## [1] 8 This approach would select 4 degrees of freedom for the spline. 7.2.5 Question 10 This question relates to the College data set. Split the data into a training set and a test set. Using out-of-state tuition as the response and the other variables as the predictors, perform forward stepwise selection on the training set in order to identify a satisfactory model that uses just a subset of the predictors. library(leaps) # helper function to predict from a regsubsets model predict.regsubsets <- function(object, newdata, id, ...) { form <- as.formula(object$call[[2]]) mat <- model.matrix(form, newdata) coefi <- coef(object, id = id) xvars <- names(coefi) mat[, xvars] %*% coefi } set.seed(42) train <- rep(TRUE, nrow(College)) train[sample(1:nrow(College), nrow(College) * 1 / 3)] <- FALSE fit <- regsubsets(Outstate ~ ., data = College[train, ], nvmax = 17, method = "forward") plot(summary(fit)$bic, type = "b") which.min(summary(fit)$bic) ## [1] 11 # or via cross-validation err <- sapply(1:17, function(i) { x <- coef(fit, id = i) mean((College$Outstate[!train] - predict(fit, College[!train, ], i))^2) }) which.min(err) ## [1] 16 min(summary(fit)$bic) ## [1] -690.9375 For the sake of simplicity we’ll choose 6 coef(fit, id = 6) ## (Intercept) PrivateYes Room.Board PhD perc.alumni ## -3540.0544008 2736.4231642 0.9061752 33.7848157 47.1998115 ## Expend Grad.Rate ## 0.2421685 33.3137332 Fit a GAM on the training data, using out-of-state tuition as the response and the features selected in the previous step as the predictors. Plot the results, and explain your findings. fit <- gam(Outstate ~ Private + s(Room.Board, 2) + s(PhD, 2) + s(perc.alumni, 2) + s(Expend, 2) + s(Grad.Rate, 2), data = College[train, ]) Evaluate the model obtained on the test set, and explain the results obtained. pred <- predict(fit, College[!train, ]) err_gam <- mean((College$Outstate[!train] - pred)^2) plot(err, ylim = c(min(err_gam, err), max(err)), type = "b") abline(h = err_gam, col = "red", lty = 2) # r-squared 1 - err_gam / mean((College$Outstate[!train] - mean(College$Outstate[!train]))^2) ## [1] 0.7655905 For which variables, if any, is there evidence of a non-linear relationship with the response? summary(fit) ## ## Call: gam(formula = Outstate ~ Private + s(Room.Board, 2) + s(PhD, ## 2) + s(perc.alumni, 2) + s(Expend, 2) + s(Grad.Rate, 2), ## data = College[train, ]) ## Deviance Residuals: ## Min 1Q Median 3Q Max ## -7112.59 -1188.98 33.13 1238.54 8738.65 ## ## (Dispersion Parameter for gaussian family taken to be 3577008) ## ## Null Deviance: 8471793308 on 517 degrees of freedom ## Residual Deviance: 1809966249 on 506.0001 degrees of freedom ## AIC: 9300.518 ## ## Number of Local Scoring Iterations: NA ## ## Anova for Parametric Effects ## Df Sum Sq Mean Sq F value Pr(>F) ## Private 1 2327235738 2327235738 650.610 < 2.2e-16 *** ## s(Room.Board, 2) 1 1741918028 1741918028 486.976 < 2.2e-16 *** ## s(PhD, 2) 1 668408518 668408518 186.863 < 2.2e-16 *** ## s(perc.alumni, 2) 1 387819183 387819183 108.420 < 2.2e-16 *** ## s(Expend, 2) 1 625813340 625813340 174.954 < 2.2e-16 *** ## s(Grad.Rate, 2) 1 111881207 111881207 31.278 3.664e-08 *** ## Residuals 506 1809966249 3577008 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Anova for Nonparametric Effects ## Npar Df Npar F Pr(F) ## (Intercept) ## Private ## s(Room.Board, 2) 1 2.224 0.13648 ## s(PhD, 2) 1 5.773 0.01664 * ## s(perc.alumni, 2) 1 0.365 0.54581 ## s(Expend, 2) 1 61.182 3.042e-14 *** ## s(Grad.Rate, 2) 1 4.126 0.04274 * ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Non-linear relationships are significant for Expend and PhD. 7.2.6 Question 11 In Section 7.7, it was mentioned that GAMs are generally fit using a backfitting approach. The idea behind backfitting is actually quite simple. We will now explore backfitting in the context of multiple linear regression. Suppose that we would like to perform multiple linear regression, but we do not have software to do so. Instead, we only have software to perform simple linear regression. Therefore, we take the following iterative approach: we repeatedly hold all but one coefficient estimate fixed at its current value, and update only that coefficient estimate using a simple linear regression. The process is continued until convergence—that is, until the coefficient estimates stop changing. We now try this out on a toy example. Generate a response \\(Y\\) and two predictors \\(X_1\\) and \\(X_2\\), with \\(n = 100\\). set.seed(42) x1 <- rnorm(100) x2 <- rnorm(100) y <- 2 + 0.2 * x1 + 4 * x2 + rnorm(100) Initialize \\(\\hat{\\beta}_1\\) to take on a value of your choice. It does not matter 1 what value you choose. beta1 <- 20 Keeping \\(\\hat{\\beta}_1\\) fixed, fit the model \\[Y - \\hat{\\beta}_1X_1 = \\beta_0 + \\beta_2X_2 + \\epsilon.\\] You can do this as follows: > a <- y - beta1 * x1 > beta2 <- lm(a ~ x2)$coef[2] a <- y - beta1*x1 beta2 <- lm(a ~ x2)$coef[2] Keeping \\(\\hat{\\beta}_2\\) fixed, fit the model \\[Y - \\hat{\\beta}_2X_2 = \\beta_0 + \\beta_1 X_1 + \\epsilon.\\] You can do this as follows: > a <- y - beta2 * x2 > beta1 <- lm(a ~ x1)$coef[2] a <- y - beta2 * x2 beta1 <- lm(a ~ x1)$coef[2] Write a for loop to repeat (c) and (d) 1,000 times. Report the estimates of \\(\\hat{\\beta}_0, \\hat{\\beta}_1,\\) and \\(\\hat{\\beta}_2\\) at each iteration of the for loop. Create a plot in which each of these values is displayed, with \\(\\hat{\\beta}_0, \\hat{\\beta}_1,\\) and \\(\\hat{\\beta}_2\\) each shown in a different color. res <- matrix(NA, nrow = 1000, ncol = 3) colnames(res) <- c("beta0", "beta1", "beta2") beta1 <- 20 for (i in 1:1000) { beta2 <- lm(y - beta1*x1 ~ x2)$coef[2] beta1 <- lm(y - beta2*x2 ~ x1)$coef[2] beta0 <- lm(y - beta2*x2 ~ x1)$coef[1] res[i, ] <- c(beta0, beta1, beta2) } res <- as.data.frame(res) res$Iteration <- 1:1000 res <- pivot_longer(res, !Iteration) p <- ggplot(res, aes(x=Iteration, y=value, color=name)) + geom_line() + geom_point() + scale_x_continuous(trans = "log10") p Compare your answer in (e) to the results of simply performing multiple linear regression to predict \\(Y\\) using \\(X_1\\) and \\(X_2\\). Use the abline() function to overlay those multiple linear regression coefficient estimates on the plot obtained in (e). fit <- lm(y ~ x1 + x2) coef(fit) ## (Intercept) x1 x2 ## 2.00176627 0.05629075 4.08529318 p + geom_hline(yintercept = coef(fit), lty = 2) On this data set, how many backfitting iterations were required in order to obtain a “good” approximation to the multiple regression coefficient estimates? In this case, good estimates were obtained after 3 iterations. 7.2.7 Question 12 This problem is a continuation of the previous exercise. In a toy example with \\(p = 100\\), show that one can approximate the multiple linear regression coefficient estimates by repeatedly performing simple linear regression in a backfitting procedure. How many backfitting iterations are required in order to obtain a “good” approximation to the multiple regression coefficient estimates? Create a plot to justify your answer. set.seed(42) p <- 100 n <- 1000 betas <- rnorm(p) * 5 x <- matrix(rnorm(n * p), ncol = p, nrow = n) y <- (x %*% betas) + rnorm(n) # ignore beta0 for simplicity # multiple regression fit <- lm(y ~ x - 1) coef(fit) ## x1 x2 x3 x4 x5 x6 ## 6.9266184 -2.8428817 1.8686821 3.1466472 1.9601927 -0.5529214 ## x7 x8 x9 x10 x11 x12 ## 7.4786723 -0.4454637 10.0816005 -0.2391234 6.5832468 11.4451280 ## x13 x14 x15 x16 x17 x18 ## -6.9684368 -1.3604495 -0.6310041 3.1786639 -1.4470502 -13.2957027 ## x19 x20 x21 x22 x23 x24 ## -12.2061834 6.5765842 -1.5227262 -8.8855906 -0.8422954 6.1189230 ## x25 x26 x27 x28 x29 x30 ## 9.4395267 -2.1697854 -1.2738835 -8.8457987 2.2851699 -3.1922704 ## x31 x32 x33 x34 x35 x36 ## 2.2812995 3.4695892 5.1162617 -3.0423873 2.4985589 -8.5952764 ## x37 x38 x39 x40 x41 x42 ## -3.9539370 -4.2616463 -12.0038342 0.1981058 1.0559250 -1.8205017 ## x43 x44 x45 x46 x47 x48 ## 3.7739990 -3.6240020 -6.8575534 2.1042998 -4.0228773 7.1880298 ## x49 x50 x51 x52 x53 x54 ## -2.1967821 3.3137115 1.6406524 -3.9402065 7.9067705 3.1815846 ## x55 x56 x57 x58 x59 x60 ## 0.4504175 1.4003479 3.3999814 0.4317695 -14.9255798 1.3816878 ## x61 x62 x63 x64 x65 x66 ## -1.8071634 0.9907740 2.9771540 6.9528872 -3.5956916 6.5283946 ## x67 x68 x69 x70 x71 x72 ## 1.6798820 5.1911857 4.5573256 3.5961319 -5.1909352 -0.4869003 ## x73 x74 x75 x76 x77 x78 ## 3.1472166 -4.7898363 -2.7402076 2.9247173 3.8659938 2.3686379 ## x79 x80 x81 x82 x83 x84 ## -4.4261734 -5.5020688 7.5807239 1.3010702 0.4378713 -0.5856580 ## x85 x86 x87 x88 x89 x90 ## -5.9799328 3.0089329 -1.1230969 -0.8857679 4.7211363 4.1042952 ## x91 x92 x93 x94 x95 x96 ## 6.9492037 -2.3959211 3.2188522 6.9947040 -5.5392641 -4.3114784 ## x97 x98 x99 x100 ## -5.7287292 -7.3148812 0.3454408 3.2830658 # backfitting backfit <- function(x, y, iter = 20) { beta <- matrix(0, ncol = ncol(x), nrow = iter + 1) for (i in 1:iter) { for (k in 1:ncol(x)) { residual <- y - (x[, -k] %*% beta[i, -k]) beta[i + 1, k] <- lm(residual ~ x[, k])$coef[2] } } beta[-1, ] } res <- backfit(x, y) error <- rowMeans(sweep(res, 2, betas)^2) plot(error, log = "x", type = "b") # backfitting error error[length(error)] ## [1] 0.001142494 # lm error mean((coef(fit) - betas)^2) ## [1] 0.001138655 We need around 5 to 6 iterations to obtain a good estimate of the coefficients. "],["tree-based-methods.html", "8 Tree-Based Methods 8.1 Conceptual 8.2 Applied", " 8 Tree-Based Methods 8.1 Conceptual 8.1.1 Question 1 Draw an example (of your own invention) of a partition of two-dimensional feature space that could result from recursive binary splitting. Your example should contain at least six regions. Draw a decision tree corresponding to this partition. Be sure to label all aspects of your figures, including the regions \\(R_1, R_2, ...,\\) the cutpoints \\(t_1, t_2, ...,\\) and so forth. Hint: Your result should look something like Figures 8.1 and 8.2. library(showtext) showtext::showtext_auto() library(ggplot2) library(tidyverse) library(ggtree) tree <- ape::read.tree(text = "(((R1:1,R2:1)N1:2,R3:4)N2:2,(R4:2,(R5:1,R6:1)R3:2)N4:5)R;") tree$node.label <- c("Age < 40", "Weight < 100", "Weight < 70", "Age < 60", "Weight < 80") ggtree(tree, ladderize = FALSE) + scale_x_reverse() + coord_flip() + geom_tiplab(vjust = 2, hjust = 0.5) + geom_text2(aes(label=label, subset=!isTip), hjust = -0.1, vjust = -1) plot(NULL, xlab="Age (years)", ylab="Weight (kg)", xlim = c(0, 100), ylim = c(40, 160), xaxs = "i", yaxs = "i") abline(v = 40, col = "red", lty = 2) lines(c(0, 40), c(100, 100), col = "blue", lty = 2) lines(c(0, 40), c(70, 70), col = "blue", lty = 2) abline(v = 60, col = "red", lty = 2) lines(c(60, 100), c(80, 80), col = "blue", lty = 2) text( c(20, 20, 20, 50, 80, 80), c(55, 85, 130, 100, 60, 120), labels = c("R1", "R2", "R3", "R4", "R5", "R6") ) 8.1.2 Question 2 It is mentioned in Section 8.2.3 that boosting using depth-one trees (or stumps) leads to an additive model: that is, a model of the form \\[ f(X) = \\sum_{j=1}^p f_j(X_j). \\] Explain why this is the case. You can begin with (8.12) in Algorithm 8.2. Equation 8.1 is: \\[ f(x) = \\sum_{b=1}^B(\\lambda \\hat{f}^b(x) \\] where \\(\\hat{f}^b(x)\\) represents the \\(b\\)th tree with (in this case) 1 split. Since 1-depth trees involve only one variable, and the total function for \\(x\\) involves adding the outcome for each, this model is an additive. Depth 2 trees would allow for interactions between two variables. 8.1.3 Question 3 Consider the Gini index, classification error, and cross-entropy in a simple classification setting with two classes. Create a single plot that displays each of these quantities as a function of \\(\\hat{p}_{m1}\\). The \\(x\\)-axis should display \\(\\hat{p}_{m1}\\), ranging from 0 to 1, and the \\(y\\)-axis should display the value of the Gini index, classification error, and entropy. Hint: In a setting with two classes, \\(\\hat{p}_{m1} = 1 - \\hat{p}_{m2}\\). You could make this plot by hand, but it will be much easier to make in R. The Gini index is defined by \\[G = \\sum_{k=1}^{K} \\hat{p}_{mk}(1 - \\hat{p}_{mk})\\] Entropy is given by \\[D = -\\sum_{k=1}^{K} \\hat{p}_{mk}\\log(\\hat{p}_{mk})\\] The classification error is \\[E = 1 - \\max_k(\\hat{p}_{mk})\\] # Function definitions are for when there's two classes only p <- seq(0, 1, length.out = 100) data.frame( x = p, "Gini index" = p * (1 - p) * 2, "Entropy" = -(p * log(p) + (1 - p) * log(1 - p)), "Classification error" = 1 - pmax(p, 1 - p), check.names = FALSE ) |> pivot_longer(!x) |> ggplot(aes(x = x, y = value, color = name)) + geom_line(na.rm = TRUE) 8.1.4 Question 4 This question relates to the plots in Figure 8.12. Sketch the tree corresponding to the partition of the predictor space illustrated in the left-hand panel of Figure 8.12. The numbers inside the boxes indicate the mean of \\(Y\\) within each region. tree <- ape::read.tree(text = "(((3:1.5,(10:1,0:1)A:1)B:1,15:2)C:1,5:2)D;") tree$node.label <- c("X1 < 1", "X2 < 1", "X1 < 0", "X2 < 0") ggtree(tree, ladderize = FALSE) + scale_x_reverse() + coord_flip() + geom_tiplab(vjust = 2, hjust = 0.5) + geom_text2(aes(label=label, subset=!isTip), hjust = -0.1, vjust = -1) Create a diagram similar to the left-hand panel of Figure 8.12, using the tree illustrated in the right-hand panel of the same figure. You should divide up the predictor space into the correct regions, and indicate the mean for each region. plot(NULL, xlab="X1", ylab="X2", xlim = c(-1, 2), ylim = c(0, 3), xaxs = "i", yaxs = "i") abline(h = 1, col = "red", lty = 2) lines(c(1, 1), c(0, 1), col = "blue", lty = 2) lines(c(-1, 2), c(2, 2), col = "red", lty = 2) lines(c(0, 0), c(1, 2), col = "blue", lty = 2) text( c(0, 1.5, -0.5, 1, 0.5), c(0.5, 0.5, 1.5, 1.5, 2.5), labels = c("-1.80", "0.63", "-1.06", "0.21", "2.49") ) 8.1.5 Question 5 Suppose we produce ten bootstrapped samples from a data set containing red and green classes. We then apply a classification tree to each bootstrapped sample and, for a specific value of \\(X\\), produce 10 estimates of \\(P(\\textrm{Class is Red}|X)\\): \\[0.1, 0.15, 0.2, 0.2, 0.55, 0.6, 0.6, 0.65, 0.7, \\textrm{and } 0.75.\\] There are two common ways to combine these results together into a single class prediction. One is the majority vote approach discussed in this chapter. The second approach is to classify based on the average probability. In this example, what is the final classification under each of these two approaches? x <- c(0.1, 0.15, 0.2, 0.2, 0.55, 0.6, 0.6, 0.65, 0.7, 0.75) ifelse(mean(x > 0.5), "red", "green") # majority vote ## [1] "red" ifelse(mean(x) > 0.5, "red", "green") # average probability ## [1] "green" 8.1.6 Question 6 Provide a detailed explanation of the algorithm that is used to fit a regression tree. First we perform binary recursive splitting of the data, to minimize RSS at each split. This is continued until there are n samples present in each leaf. Then we prune the tree to a set of subtrees determined by a parameter \\(\\alpha\\). Using K-fold CV, we select \\(\\alpha\\) to minimize the cross validation error. The final tree is then calculated using the complete dataset with the selected \\(\\alpha\\) value. 8.2 Applied 8.2.1 Question 7 In the lab, we applied random forests to the Boston data using mtry = 6 and using ntree = 25 and ntree = 500. Create a plot displaying the test error resulting from random forests on this data set for a more comprehensive range of values for mtry and ntree. You can model your plot after Figure 8.10. Describe the results obtained. library(ISLR2) library(randomForest) ## randomForest 4.7-1.1 ## Type rfNews() to see new features/changes/bug fixes. ## ## Attaching package: 'randomForest' ## The following object is masked from 'package:ggtree': ## ## margin ## The following object is masked from 'package:dplyr': ## ## combine ## The following object is masked from 'package:ggplot2': ## ## margin set.seed(42) train <- sample(c(TRUE, FALSE), nrow(Boston), replace = TRUE) rf_err <- function(mtry) { randomForest( Boston[train, -13], y = Boston[train, 13], xtest = Boston[!train, -13], ytest = Boston[!train, 13], mtry = mtry, ntree = 500 )$test$mse } res <- lapply(c(1, 2, 3, 5, 7, 10, 12), rf_err) names(res) <- c(1, 2, 3, 5, 7, 10, 12) data.frame(res, check.names = FALSE) |> mutate(n = 1:500) |> pivot_longer(!n) |> ggplot(aes(x = n, y = value, color = name)) + geom_line(na.rm = TRUE) + xlab("Number of trees") + ylab("Error") + scale_y_log10() + scale_color_discrete(name = "No. variables at\\neach split") 8.2.2 Question 8 In the lab, a classification tree was applied to the Carseats data set after converting Sales into a qualitative response variable. Now we will seek to predict Sales using regression trees and related approaches, treating the response as a quantitative variable. Split the data set into a training set and a test set. set.seed(42) train <- sample(c(TRUE, FALSE), nrow(Carseats), replace = TRUE) Fit a regression tree to the training set. Plot the tree, and interpret the results. What test error rate do you obtain? library(tree) tr <- tree(Sales ~ ., data = Carseats[train, ]) summary(tr) ## ## Regression tree: ## tree(formula = Sales ~ ., data = Carseats[train, ]) ## Variables actually used in tree construction: ## [1] "ShelveLoc" "Price" "Income" "Advertising" "CompPrice" ## [6] "Age" ## Number of terminal nodes: 16 ## Residual mean deviance: 2.356 = 424.1 / 180 ## Distribution of residuals: ## Min. 1st Qu. Median Mean 3rd Qu. Max. ## -4.54900 -0.82980 0.03075 0.00000 0.89250 4.83100 plot(tr) text(tr, pretty = 0, digits = 2, cex = 0.8) carseats_mse <- function(model) { p <- predict(model, newdata = Carseats[!train, ]) mean((p - Carseats[!train, "Sales"])^2) } carseats_mse(tr) ## [1] 4.559764 Use cross-validation in order to determine the optimal level of tree complexity. Does pruning the tree improve the test error rate? res <- cv.tree(tr) plot(res$size, res$dev, type = "b", xlab = "Tree size", ylab = "Deviance") min <- which.min(res$dev) abline(v = res$size[min], lty = 2, col = "red") Pruning improves performance very slightly (though this is not repeatable in different rounds of cross-validation). Arguably, a good balance is achieved when the tree size is 11. ptr <- prune.tree(tr, best = 11) plot(ptr) text(ptr, pretty = 0, digits = 2, cex = 0.8) carseats_mse(ptr) ## [1] 4.625875 Use the bagging approach in order to analyze this data. What test error rate do you obtain? Use the importance() function to determine which variables are most important. # Here we can use random Forest with mtry = 10 = p (the number of predictor # variables) to perform bagging bagged <- randomForest(Sales ~ ., data = Carseats[train, ], mtry = 10, ntree = 200, importance = TRUE) carseats_mse(bagged) ## [1] 2.762861 importance(bagged) ## %IncMSE IncNodePurity ## CompPrice 11.2608998 104.474222 ## Income 5.0953983 73.275066 ## Advertising 12.9011190 125.886762 ## Population 3.4071044 60.095200 ## Price 34.6904380 450.952728 ## ShelveLoc 33.7059874 374.808575 ## Age 7.9101141 143.652934 ## Education -2.1154997 32.712444 ## Urban 0.9604097 7.029648 ## US 3.1336559 6.287048 The test error rate is ~2.8 which is a substantial improvement over the pruned regression tree above. Use random forests to analyze this data. What test error rate do you obtain? Use the importance() function to determine which variables are most important. Describe the effect of \\(m\\), the number of variables considered at each split, on the error rate obtained. rf <- randomForest(Sales ~ ., data = Carseats[train, ], mtry = 3, ntree = 500, importance = TRUE) carseats_mse(rf) ## [1] 3.439357 importance(rf) ## %IncMSE IncNodePurity ## CompPrice 8.5717587 122.75189 ## Income 2.8955756 116.33951 ## Advertising 13.0681194 128.13563 ## Population 2.0475415 104.03803 ## Price 34.7934136 342.84663 ## ShelveLoc 39.0704834 292.56638 ## Age 7.7941744 135.69061 ## Education 0.8770806 64.67614 ## Urban -0.3301478 13.83594 ## US 6.2716539 22.07306 The test error rate is ~3.0 which is a substantial improvement over the pruned regression tree above, although not quite as good as the bagging approach. Now analyze the data using BART, and report your results. library(BART) ## Loading required package: nlme ## ## Attaching package: 'nlme' ## The following object is masked from 'package:ggtree': ## ## collapse ## The following object is masked from 'package:dplyr': ## ## collapse ## Loading required package: survival # For ease, we'll create a fake "predict" method that just returns # yhat.test.mean regardless of provided "newdata" predict.wbart <- function(model, ...) model$yhat.test.mean bartfit <- gbart(Carseats[train, 2:11], Carseats[train, 1], x.test = Carseats[!train, 2:11]) ## *****Calling gbart: type=1 ## *****Data: ## data:n,p,np: 196, 14, 204 ## y1,yn: 2.070867, 2.280867 ## x1,x[n*p]: 138.000000, 1.000000 ## xp1,xp[np*p]: 141.000000, 1.000000 ## *****Number of Trees: 200 ## *****Number of Cut Points: 58 ... 1 ## *****burn,nd,thin: 100,1000,1 ## *****Prior:beta,alpha,tau,nu,lambda,offset: 2,0.95,0.287616,3,0.21118,7.42913 ## *****sigma: 1.041218 ## *****w (weights): 1.000000 ... 1.000000 ## *****Dirichlet:sparse,theta,omega,a,b,rho,augment: 0,0,1,0.5,1,14,0 ## *****printevery: 100 ## ## MCMC ## done 0 (out of 1100) ## done 100 (out of 1100) ## done 200 (out of 1100) ## done 300 (out of 1100) ## done 400 (out of 1100) ## done 500 (out of 1100) ## done 600 (out of 1100) ## done 700 (out of 1100) ## done 800 (out of 1100) ## done 900 (out of 1100) ## done 1000 (out of 1100) ## time: 2s ## trcnt,tecnt: 1000,1000 carseats_mse(bartfit) ## [1] 1.631285 The test error rate is ~1.6 which is an improvement over random forest and bagging. 8.2.3 Question 9 This problem involves the OJ data set which is part of the ISLR2 package. Create a training set containing a random sample of 800 observations, and a test set containing the remaining observations. set.seed(42) train <- sample(1:nrow(OJ), 800) test <- setdiff(1:nrow(OJ), train) Fit a tree to the training data, with Purchase as the response and the other variables except for Buy as predictors. Use the summary() function to produce summary statistics about the tree, and describe the results obtained. What is the training error rate? How many terminal nodes does the tree have? tr <- tree(Purchase ~ ., data = OJ[train, ]) summary(tr) ## ## Classification tree: ## tree(formula = Purchase ~ ., data = OJ[train, ]) ## Variables actually used in tree construction: ## [1] "LoyalCH" "SalePriceMM" "PriceDiff" ## Number of terminal nodes: 8 ## Residual mean deviance: 0.7392 = 585.5 / 792 ## Misclassification error rate: 0.1638 = 131 / 800 Type in the name of the tree object in order to get a detailed text output. Pick one of the terminal nodes, and interpret the information displayed. tr ## node), split, n, deviance, yval, (yprob) ## * denotes terminal node ## ## 1) root 800 1066.00 CH ( 0.61500 0.38500 ) ## 2) LoyalCH < 0.48285 285 296.00 MM ( 0.21404 0.78596 ) ## 4) LoyalCH < 0.064156 64 0.00 MM ( 0.00000 1.00000 ) * ## 5) LoyalCH > 0.064156 221 260.40 MM ( 0.27602 0.72398 ) ## 10) SalePriceMM < 2.04 128 123.50 MM ( 0.18750 0.81250 ) * ## 11) SalePriceMM > 2.04 93 125.00 MM ( 0.39785 0.60215 ) * ## 3) LoyalCH > 0.48285 515 458.10 CH ( 0.83689 0.16311 ) ## 6) LoyalCH < 0.753545 230 282.70 CH ( 0.69565 0.30435 ) ## 12) PriceDiff < 0.265 149 203.00 CH ( 0.57718 0.42282 ) ## 24) PriceDiff < -0.165 32 38.02 MM ( 0.28125 0.71875 ) * ## 25) PriceDiff > -0.165 117 150.30 CH ( 0.65812 0.34188 ) ## 50) LoyalCH < 0.703993 105 139.60 CH ( 0.61905 0.38095 ) * ## 51) LoyalCH > 0.703993 12 0.00 CH ( 1.00000 0.00000 ) * ## 13) PriceDiff > 0.265 81 47.66 CH ( 0.91358 0.08642 ) * ## 7) LoyalCH > 0.753545 285 111.70 CH ( 0.95088 0.04912 ) * Create a plot of the tree, and interpret the results. plot(tr) text(tr, pretty = 0, digits = 2, cex = 0.8) Predict the response on the test data, and produce a confusion matrix comparing the test labels to the predicted test labels. What is the test error rate? table(predict(tr, OJ[test, ], type = "class"), OJ[test, "Purchase"]) ## ## CH MM ## CH 125 15 ## MM 36 94 Apply the cv.tree() function to the training set in order to determine the optimal tree size. set.seed(42) res <- cv.tree(tr) Produce a plot with tree size on the \\(x\\)-axis and cross-validated classification error rate on the \\(y\\)-axis. plot(res$size, res$dev, type = "b", xlab = "Tree size", ylab = "Deviance") min <- which.min(res$dev) abline(v = res$size[min], lty = 2, col = "red") Which tree size corresponds to the lowest cross-validated classification error rate? res$size[min] ## [1] 6 Produce a pruned tree corresponding to the optimal tree size obtained using cross-validation. If cross-validation does not lead to selection of a pruned tree, then create a pruned tree with five terminal nodes. ptr <- prune.tree(tr, best = res$size[min]) plot(ptr) text(ptr, pretty = 0, digits = 2, cex = 0.8) Compare the training error rates between the pruned and unpruned trees. Which is higher? oj_misclass <- function(model) { summary(model)$misclass[1] / summary(model)$misclass[2] } oj_misclass(tr) ## [1] 0.16375 oj_misclass(ptr) ## [1] 0.16375 The training misclassification error rate is slightly higher for the pruned tree. Compare the test error rates between the pruned and unpruned trees. Which is higher? oj_err <- function(model) { p <- predict(model, newdata = OJ[test, ], type = "class") mean(p != OJ[test, "Purchase"]) } oj_err(tr) ## [1] 0.1888889 oj_err(ptr) ## [1] 0.1888889 The test misclassification error rate is slightly higher for the pruned tree. 8.2.4 Question 10 We now use boosting to predict Salary in the Hitters data set. Remove the observations for whom the salary information is unknown, and then log-transform the salaries. dat <- Hitters dat <- dat[!is.na(dat$Salary), ] dat$Salary <- log(dat$Salary) Create a training set consisting of the first 200 observations, and a test set consisting of the remaining observations. train <- 1:200 test <- setdiff(1:nrow(dat), train) Perform boosting on the training set with 1,000 trees for a range of values of the shrinkage parameter \\(\\lambda\\). Produce a plot with different shrinkage values on the \\(x\\)-axis and the corresponding training set MSE on the \\(y\\)-axis. library(gbm) ## Loaded gbm 2.2.2 ## This version of gbm is no longer under development. Consider transitioning to gbm3, https://github.com/gbm-developers/gbm3 set.seed(42) lambdas <- 10 ^ seq(-3, 0, by = 0.1) fits <- lapply(lambdas, function(lam) { gbm(Salary ~ ., data = dat[train, ], distribution = "gaussian", n.trees = 1000, shrinkage = lam) }) errs <- sapply(fits, function(fit) { p <- predict(fit, dat[train, ], n.trees = 1000) mean((p - dat[train, ]$Salary)^2) }) plot(lambdas, errs, type = "b", xlab = "Shrinkage values", ylab = "Training MSE", log = "xy") Produce a plot with different shrinkage values on the \\(x\\)-axis and the corresponding test set MSE on the \\(y\\)-axis. errs <- sapply(fits, function(fit) { p <- predict(fit, dat[test, ], n.trees = 1000) mean((p - dat[test, ]$Salary)^2) }) plot(lambdas, errs, type = "b", xlab = "Shrinkage values", ylab = "Training MSE", log = "xy") min(errs) ## [1] 0.249881 abline(v = lambdas[which.min(errs)], lty = 2, col = "red") Compare the test MSE of boosting to the test MSE that results from applying two of the regression approaches seen in Chapters 3 and 6. Linear regression fit1 <- lm(Salary ~ ., data = dat[train, ]) mean((predict(fit1, dat[test, ]) - dat[test, "Salary"])^2) ## [1] 0.4917959 Ridge regression library(glmnet) ## Loading required package: Matrix ## ## Attaching package: 'Matrix' ## The following object is masked from 'package:ggtree': ## ## expand ## The following objects are masked from 'package:tidyr': ## ## expand, pack, unpack ## Loaded glmnet 4.1-8 x <- model.matrix(Salary ~ ., data = dat[train, ]) x.test <- model.matrix(Salary ~ ., data = dat[test, ]) y <- dat[train, "Salary"] fit2 <- glmnet(x, y, alpha = 1) mean((predict(fit2, s = 0.1, newx = x.test) - dat[test, "Salary"])^2) ## [1] 0.4389054 Which variables appear to be the most important predictors in the boosted model? summary(fits[[which.min(errs)]]) ## var rel.inf ## CAtBat CAtBat 16.4755242 ## CRBI CRBI 9.0670759 ## CHits CHits 8.9307168 ## CRuns CRuns 7.6839786 ## CWalks CWalks 7.1014886 ## PutOuts PutOuts 6.7869382 ## AtBat AtBat 5.8567916 ## Walks Walks 5.8479836 ## Years Years 5.3349489 ## Assists Assists 5.0076392 ## CHmRun CHmRun 4.6606919 ## RBI RBI 3.9255396 ## Hits Hits 3.8123124 ## HmRun HmRun 3.4462640 ## Runs Runs 2.4779866 ## Errors Errors 2.2341326 ## NewLeague NewLeague 0.5788283 ## Division Division 0.4880237 ## League League 0.2831352 Now apply bagging to the training set. What is the test set MSE for this approach? set.seed(42) bagged <- randomForest(Salary ~ ., data = dat[train, ], mtry = 19, ntree = 1000) mean((predict(bagged, newdata = dat[test, ]) - dat[test, "Salary"])^2) ## [1] 0.2278813 8.2.5 Question 11 This question uses the Caravan data set. Create a training set consisting of the first 1,000 observations, and a test set consisting of the remaining observations. train <- 1:1000 test <- setdiff(1:nrow(Caravan), train) Fit a boosting model to the training set with Purchase as the response and the other variables as predictors. Use 1,000 trees, and a shrinkage value of 0.01. Which predictors appear to be the most important? set.seed(42) fit <- gbm(as.numeric(Purchase == "Yes") ~ ., data = Caravan[train, ], n.trees = 1000, shrinkage = 0.01) ## Distribution not specified, assuming bernoulli ... ## Warning in gbm.fit(x = x, y = y, offset = offset, distribution = distribution, ## : variable 50: PVRAAUT has no variation. ## Warning in gbm.fit(x = x, y = y, offset = offset, distribution = distribution, ## : variable 71: AVRAAUT has no variation. head(summary(fit)) ## var rel.inf ## PPERSAUT PPERSAUT 15.243041 ## MKOOPKLA MKOOPKLA 10.220498 ## MOPLHOOG MOPLHOOG 7.584734 ## MBERMIDD MBERMIDD 5.983650 ## PBRAND PBRAND 4.557491 ## ABRAND ABRAND 4.076017 Use the boosting model to predict the response on the test data. Predict that a person will make a purchase if the estimated probability of purchase is greater than 20%. Form a confusion matrix. What fraction of the people predicted to make a purchase do in fact make one? How does this compare with the results obtained from applying KNN or logistic regression to this data set? p <- predict(fit, Caravan[test, ], n.trees = 1000, type = "response") table(p > 0.2, Caravan[test, "Purchase"] == "Yes") ## ## FALSE TRUE ## FALSE 4415 257 ## TRUE 118 32 sum(p > 0.2 & Caravan[test, "Purchase"] == "Yes") / sum(p > 0.2) ## [1] 0.2133333 141 (109 + 32) are predicted to purchase. Of these 32 do which is 21%. # Logistic regression fit <- glm(Purchase == "Yes" ~ ., data = Caravan[train, ], family = "binomial") ## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred p <- predict(fit, Caravan[test, ], type = "response") ## Warning in predict.lm(object, newdata, se.fit, scale = 1, type = if (type == : ## prediction from rank-deficient fit; attr(*, "non-estim") has doubtful cases table(p > 0.2, Caravan[test, "Purchase"] == "Yes") ## ## FALSE TRUE ## FALSE 4183 231 ## TRUE 350 58 sum(p > 0.2 & Caravan[test, "Purchase"] == "Yes") / sum(p > 0.2) ## [1] 0.1421569 For logistic regression we correctly predict 14% of those predicted to purchase. library(class) # KNN fit <- knn(Caravan[train, -86], Caravan[test, -86], Caravan$Purchase[train]) table(fit, Caravan[test, "Purchase"] == "Yes") ## ## fit FALSE TRUE ## No 4260 263 ## Yes 273 26 sum(fit == "Yes" & Caravan[test, "Purchase"] == "Yes") / sum(fit == "Yes") ## [1] 0.08695652 For KNN we correctly predict 8.7% of those predicted to purchase. 8.2.6 Question 12 Apply boosting, bagging, random forests and BART to a data set of your choice. Be sure to fit the models on a training set and to evaluate their performance on a test set. How accurate are the results compared to simple methods like linear or logistic regression? Which of these approaches yields the best performance? Here I’m going to use the College dataset (used in Question 10 from Chapter 7 to compare performance with the GAM we previously built). In this model we were trying to predict Outstate using the other variables in College. library(gam) ## Loading required package: splines ## Loading required package: foreach ## ## Attaching package: 'foreach' ## The following objects are masked from 'package:purrr': ## ## accumulate, when ## Loaded gam 1.22-4 set.seed(42) train <- sample(1:nrow(College), 400) test <- setdiff(1:nrow(College), train) # Linear regression lr <- gam(Outstate ~ ., data = College[train, ]) # GAM from chapter 7 gam <- gam(Outstate ~ Private + s(Room.Board, 2) + s(PhD, 2) + s(perc.alumni, 2) + s(Expend, 2) + s(Grad.Rate, 2), data = College[train, ]) # Boosting boosted <- gbm(Outstate ~ ., data = College[train, ], n.trees = 1000, shrinkage = 0.01) ## Distribution not specified, assuming gaussian ... # Bagging (random forest with mtry = no. predictors) bagged <- randomForest(Outstate ~ ., data = College[train, ], mtry = 17, ntree = 1000) # Random forest with mtry = sqrt(no. predictors) rf <- randomForest(Outstate ~ ., data = College[train, ], mtry = 4, ntree = 1000) # BART pred <- setdiff(colnames(College), "Outstate") bart <- gbart(College[train, pred], College[train, "Outstate"], x.test = College[test, pred]) ## *****Calling gbart: type=1 ## *****Data: ## data:n,p,np: 400, 18, 377 ## y1,yn: -4030.802500, 77.197500 ## x1,x[n*p]: 1.000000, 71.000000 ## xp1,xp[np*p]: 0.000000, 99.000000 ## *****Number of Trees: 200 ## *****Number of Cut Points: 1 ... 75 ## *****burn,nd,thin: 100,1000,1 ## *****Prior:beta,alpha,tau,nu,lambda,offset: 2,0.95,301.581,3,715815,10580.8 ## *****sigma: 1916.969943 ## *****w (weights): 1.000000 ... 1.000000 ## *****Dirichlet:sparse,theta,omega,a,b,rho,augment: 0,0,1,0.5,1,18,0 ## *****printevery: 100 ## ## MCMC ## done 0 (out of 1100) ## done 100 (out of 1100) ## done 200 (out of 1100) ## done 300 (out of 1100) ## done 400 (out of 1100) ## done 500 (out of 1100) ## done 600 (out of 1100) ## done 700 (out of 1100) ## done 800 (out of 1100) ## done 900 (out of 1100) ## done 1000 (out of 1100) ## time: 4s ## trcnt,tecnt: 1000,1000 mse <- function(model, ...) { pred <- predict(model, College[test, ], ...) mean((College$Outstate[test] - pred)^2) } res <- c( "Linear regression" = mse(lr), "GAM" = mse(gam), "Boosting" = mse(boosted, n.trees = 1000), "Bagging" = mse(bagged), "Random forest" = mse(rf), "BART" = mse(bart) ) res <- data.frame("MSE" = res) res$Model <- factor(row.names(res), levels = rev(row.names(res))) ggplot(res, aes(Model, MSE)) + coord_flip() + geom_bar(stat = "identity", fill = "steelblue") In this case, it looks like bagging produces the best performing model in terms of test mean square error. "],["support-vector-machines.html", "9 Support Vector Machines 9.1 Conceptual 9.2 Applied", " 9 Support Vector Machines 9.1 Conceptual 9.1.1 Question 1 This problem involves hyperplanes in two dimensions. Sketch the hyperplane \\(1 + 3X_1 − X_2 = 0\\). Indicate the set of points for which \\(1 + 3X_1 − X_2 > 0\\), as well as the set of points for which \\(1 + 3X_1 − X_2 < 0\\). library(ggplot2) xlim <- c(-10, 10) ylim <- c(-30, 30) points <- expand.grid( X1 = seq(xlim[1], xlim[2], length.out = 50), X2 = seq(ylim[1], ylim[2], length.out = 50) ) p <- ggplot(points, aes(x = X1, y = X2)) + geom_abline(intercept = 1, slope = 3) + # X2 = 1 + 3X1 theme_bw() p + geom_point(aes(color = 1 + 3*X1 - X2 > 0), size = 0.1) + scale_color_discrete(name = "1 + 3X1 − X2 > 0") On the same plot, sketch the hyperplane \\(−2 + X_1 + 2X_2 = 0\\). Indicate the set of points for which \\(−2 + X_1 + 2X_2 > 0\\), as well as the set of points for which \\(−2 + X_1 + 2X_2 < 0\\). p + geom_abline(intercept = 1, slope = -1/2) + # X2 = 1 - X1/2 geom_point( aes(color = interaction(1 + 3*X1 - X2 > 0, -2 + X1 + 2*X2 > 0)), size = 0.5 ) + scale_color_discrete(name = "(1 + 3X1 − X2 > 0).(−2 + X1 + 2X2 > 0)") 9.1.2 Question 2 We have seen that in \\(p = 2\\) dimensions, a linear decision boundary takes the form \\(\\beta_0 + \\beta_1X_1 + \\beta_2X_2 = 0\\). We now investigate a non-linear decision boundary. Sketch the curve \\[(1+X_1)^2 +(2−X_2)^2 = 4\\]. points <- expand.grid( X1 = seq(-4, 2, length.out = 100), X2 = seq(-1, 5, length.out = 100) ) p <- ggplot(points, aes(x = X1, y = X2, z = (1 + X1)^2 + (2 - X2)^2 - 4)) + geom_contour(breaks = 0, colour = "black") + theme_bw() p On your sketch, indicate the set of points for which \\[(1 + X_1)^2 + (2 − X_2)^2 > 4,\\] as well as the set of points for which \\[(1 + X_1)^2 + (2 − X_2)^2 \\leq 4.\\] p + geom_point(aes(color = (1 + X1)^2 + (2 - X2)^2 - 4 > 0), size = 0.1) Suppose that a classifier assigns an observation to the blue class if \\[(1 + X_1)^2 + (2 − X_2)^2 > 4,\\] and to the red class otherwise. To what class is the observation \\((0, 0)\\) classified? \\((−1, 1)\\)? \\((2, 2)\\)? \\((3, 8)\\)? points <- data.frame( X1 = c(0, -1, 2, 3), X2 = c(0, 1, 2, 8) ) ifelse((1 + points$X1)^2 + (2 - points$X2)^2 > 4, "blue", "red") ## [1] "blue" "red" "blue" "blue" Argue that while the decision boundary in (c) is not linear in terms of \\(X_1\\) and \\(X_2\\), it is linear in terms of \\(X_1\\), \\(X_1^2\\), \\(X_2\\), and \\(X_2^2\\). The decision boundary is \\[(1 + X_1)^2 + (2 − X_2)^2 -4 = 0\\] which we can expand to: \\[1 + 2X_1 + X_1^2 + 4 − 4X_2 + X_2^2 - 4 = 0\\] which is linear in terms of \\(X_1\\), \\(X_1^2\\), \\(X_2\\), \\(X_2^2\\). 9.1.3 Question 3 Here we explore the maximal margin classifier on a toy data set. We are given \\(n = 7\\) observations in \\(p = 2\\) dimensions. For each observation, there is an associated class label. Obs. \\(X_1\\) \\(X_2\\) \\(Y\\) 1 3 4 Red 2 2 2 Red 3 4 4 Red 4 1 4 Red 5 2 1 Blue 6 4 3 Blue 7 4 1 Blue Sketch the observations. data <- data.frame( X1 = c(3, 2, 4, 1, 2, 4, 4), X2 = c(4, 2, 4, 4, 1, 3, 1), Y = c(rep("Red", 4), rep("Blue", 3)) ) p <- ggplot(data, aes(x = X1, y = X2, color = Y)) + geom_point(size = 2) + scale_colour_identity() + coord_cartesian(xlim = c(0.5, 4.5), ylim = c(0.5, 4.5)) p Sketch the optimal separating hyperplane, and provide the equation for this hyperplane (of the form (9.1)). library(e1071) fit <- svm(as.factor(Y) ~ ., data = data, kernel = "linear", cost = 10, scale = FALSE) # Extract beta_0, beta_1, beta_2 beta <- c( -fit$rho, drop(t(fit$coefs) %*% as.matrix(data[fit$index, 1:2])) ) names(beta) <- c("B0", "B1", "B2") p <- p + geom_abline(intercept = -beta[1] / beta[3], slope = -beta[2] / beta[3], lty = 2) p Describe the classification rule for the maximal margin classifier. It should be something along the lines of “Classify to Red if \\(\\beta_0 + \\beta_1X_1 + \\beta_2X_2 > 0\\), and classify to Blue otherwise.” Provide the values for \\(\\beta_0, \\beta_1,\\) and \\(\\beta_2\\). Classify to red if \\(\\beta_0 + \\beta_1X_1 + \\beta_2X_2 > 0\\) and blue otherwise where \\(\\beta_0 = 1\\), \\(\\beta_1 = -2\\), \\(\\beta_2 = 2\\). On your sketch, indicate the margin for the maximal margin hyperplane. p <- p + geom_ribbon( aes(x = x, ymin = ymin, ymax = ymax), data = data.frame(x = c(0, 5), ymin = c(-1, 4), ymax = c(0, 5)), alpha = 0.1, fill = "blue", inherit.aes = FALSE ) p Indicate the support vectors for the maximal margin classifier. p <- p + geom_point(data = data[fit$index, ], size = 4) p The support vectors (from the svm fit object) are shown above. Arguably, there’s another support vector, since four points exactly touch the margin. Argue that a slight movement of the seventh observation would not affect the maximal margin hyperplane. p + geom_point(data = data[7, , drop = FALSE], size = 4, color = "purple") The 7th point is shown in purple above. It is not a support vector, and not close to the margin, so small changes in its X1, X2 values would not affect the current calculated margin. Sketch a hyperplane that is not the optimal separating hyperplane, and provide the equation for this hyperplane. A non-optimal hyperline that still separates the blue and red points would be one that touches the (red) point at X1 = 2, X2 = 2 and the (blue) point at X1 = 4, X2 = 3. This gives line \\(y = x/2 + 1\\) or, when \\(\\beta_0 = -1\\), \\(\\beta_1 = -1/2\\), \\(\\beta_2 = 1\\). p + geom_abline(intercept = 1, slope = 0.5, lty = 2, col = "red") Draw an additional observation on the plot so that the two classes are no longer separable by a hyperplane. p + geom_point(data = data.frame(X1 = 1, X2 = 3, Y = "Blue"), shape = 15, size = 4) 9.2 Applied 9.2.1 Question 4 Generate a simulated two-class data set with 100 observations and two features in which there is a visible but non-linear separation between the two classes. Show that in this setting, a support vector machine with a polynomial kernel (with degree greater than 1) or a radial kernel will outperform a support vector classifier on the training data. Which technique performs best on the test data? Make plots and report training and test error rates in order to back up your assertions. set.seed(10) data <- data.frame( x = runif(100), y = runif(100) ) score <- (2*data$x-0.5)^2 + (data$y)^2 - 0.5 data$class <- factor(ifelse(score > 0, "red", "blue")) p <- ggplot(data, aes(x = x, y = y, color = class)) + geom_point(size = 2) + scale_colour_identity() p train <- 1:50 test <- 51:100 fits <- list( "Radial" = svm(class ~ ., data = data[train, ], kernel = "radial"), "Polynomial" = svm(class ~ ., data = data[train, ], kernel = "polynomial", degree = 2), "Linear" = svm(class ~ ., data = data[train, ], kernel = "linear") ) err <- function(model, data) { out <- table(predict(model, data), data$class) (out[1, 2] + out[2, 1]) / sum(out) } plot(fits[[1]], data) plot(fits[[2]], data) plot(fits[[3]], data) sapply(fits, err, data = data[train, ]) ## Radial Polynomial Linear ## 0.04 0.30 0.10 sapply(fits, err, data = data[test, ]) ## Radial Polynomial Linear ## 0.06 0.48 0.14 In this case, the radial kernel performs best, followed by a linear kernel with the 2nd degree polynomial performing worst. The ordering of these models is the same for the training and test data sets. 9.2.2 Question 5 We have seen that we can fit an SVM with a non-linear kernel in order to perform classification using a non-linear decision boundary. We will now see that we can also obtain a non-linear decision boundary by performing logistic regression using non-linear transformations of the features. Generate a data set with \\(n = 500\\) and \\(p = 2\\), such that the observations belong to two classes with a quadratic decision boundary between them. For instance, you can do this as follows: > x1 <- runif(500) - 0.5 > x2 <- runif(500) - 0.5 > y <- 1 * (x1^2 - x2^2 > 0) set.seed(42) train <- data.frame( x1 = runif(500) - 0.5, x2 = runif(500) - 0.5 ) train$y <- factor(as.numeric((train$x1^2 - train$x2^2 > 0))) Plot the observations, colored according to their class labels. Your plot should display \\(X_1\\) on the \\(x\\)-axis, and \\(X_2\\) on the \\(y\\)-axis. p <- ggplot(train, aes(x = x1, y = x2, color = y)) + geom_point(size = 2) p Fit a logistic regression model to the data, using \\(X_1\\) and \\(X_2\\) as predictors. fit1 <- glm(y ~ ., data = train, family = "binomial") Apply this model to the training data in order to obtain a predicted class label for each training observation. Plot the observations, colored according to the predicted class labels. The decision boundary should be linear. plot_model <- function(fit) { if (inherits(fit, "svm")) { train$p <- predict(fit) } else { train$p <- factor(as.numeric(predict(fit) > 0)) } ggplot(train, aes(x = x1, y = x2, color = p)) + geom_point(size = 2) } plot_model(fit1) Now fit a logistic regression model to the data using non-linear functions of \\(X_1\\) and \\(X_2\\) as predictors (e.g. \\(X_1^2, X_1 \\times X_2, \\log(X_2),\\) and so forth). fit2 <- glm(y ~ poly(x1, 2) + poly(x2, 2), data = train, family = "binomial") ## Warning: glm.fit: algorithm did not converge ## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred Apply this model to the training data in order to obtain a predicted class label for each training observation. Plot the observations, colored according to the predicted class labels. The decision boundary should be obviously non-linear. If it is not, then repeat (a)-(e) until you come up with an example in which the predicted class labels are obviously non-linear. plot_model(fit2) Fit a support vector classifier to the data with \\(X_1\\) and \\(X_2\\) as predictors. Obtain a class prediction for each training observation. Plot the observations, colored according to the predicted class labels. fit3 <- svm(y ~ x1 + x2, data = train, kernel = "linear") plot_model(fit3) Fit a SVM using a non-linear kernel to the data. Obtain a class prediction for each training observation. Plot the observations, colored according to the predicted class labels. fit4 <- svm(y ~ x1 + x2, data = train, kernel = "polynomial", degree = 2) plot_model(fit4) Comment on your results. When simulating data with a quadratic decision boundary, a logistic model with quadratic transformations of the variables and an svm model with a quadratic kernel both produce much better (and similar fits) than standard linear methods. 9.2.3 Question 6 At the end of Section 9.6.1, it is claimed that in the case of data that is just barely linearly separable, a support vector classifier with a small value of cost that misclassifies a couple of training observations may perform better on test data than one with a huge value of cost that does not misclassify any training observations. You will now investigate this claim. Generate two-class data with \\(p = 2\\) in such a way that the classes are just barely linearly separable. set.seed(2) # Simulate data that is separable by a line at y = 2.5 data <- data.frame( x = rnorm(200), class = sample(c("red", "blue"), 200, replace = TRUE) ) data$y <- (data$class == "red") * 5 + rnorm(200) # Add barley separable points (these are simulated "noise" values) newdata <- data.frame(x = rnorm(30)) newdata$y <- 1.5*newdata$x + 3 + rnorm(30, 0, 1) newdata$class = ifelse((1.5*newdata$x + 3) - newdata$y > 0, "blue", "red") data <- rbind(data, newdata) # remove any that cause misclassification leaving data that is barley linearly # separable, but along an axis that is not y = 2.5 (which would be correct # for the "true" data. data <- data[!(data$class == "red") == ((1.5*data$x + 3 - data$y) > 0), ] data <- data[sample(seq_len(nrow(data)), 200), ] p <- ggplot(data, aes(x = x, y = y, color = class)) + geom_point(size = 2) + scale_colour_identity() + geom_abline(intercept = 3, slope = 1.5, lty = 2) p Compute the cross-validation error rates for support vector classifiers with a range of cost values. How many training errors are misclassified for each value of cost considered, and how does this relate to the cross-validation errors obtained? How many training errors are misclassified for each value of cost? costs <- 10^seq(-3, 5) sapply(costs, function(cost) { fit <- svm(as.factor(class) ~ ., data = data, kernel = "linear", cost = cost) pred <- predict(fit, data) sum(pred != data$class) }) ## [1] 98 8 9 4 1 1 0 0 0 Cross-validation errors out <- tune(svm, as.factor(class) ~ ., data = data, kernel = "linear", ranges = list(cost = costs)) summary(out) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost ## 10 ## ## - best performance: 0.005 ## ## - Detailed performance results: ## cost error dispersion ## 1 1e-03 0.540 0.09067647 ## 2 1e-02 0.045 0.02838231 ## 3 1e-01 0.045 0.03689324 ## 4 1e+00 0.020 0.02581989 ## 5 1e+01 0.005 0.01581139 ## 6 1e+02 0.005 0.01581139 ## 7 1e+03 0.005 0.01581139 ## 8 1e+04 0.010 0.02108185 ## 9 1e+05 0.010 0.02108185 data.frame( cost = out$performances$cost, misclass = out$performances$error * nrow(data) ) ## cost misclass ## 1 1e-03 108 ## 2 1e-02 9 ## 3 1e-01 9 ## 4 1e+00 4 ## 5 1e+01 1 ## 6 1e+02 1 ## 7 1e+03 1 ## 8 1e+04 2 ## 9 1e+05 2 Generate an appropriate test data set, and compute the test errors corresponding to each of the values of cost considered. Which value of cost leads to the fewest test errors, and how does this compare to the values of cost that yield the fewest training errors and the fewest cross-validation errors? set.seed(2) test <- data.frame( x = rnorm(200), class = sample(c("red", "blue"), 200, replace = TRUE) ) test$y <- (test$class == "red") * 5 + rnorm(200) p + geom_point(data = test, pch = 21) (errs <- sapply(costs, function(cost) { fit <- svm(as.factor(class) ~ ., data = data, kernel = "linear", cost = cost) pred <- predict(fit, test) sum(pred != test$class) })) ## [1] 95 2 3 9 16 16 19 19 19 (cost <- costs[which.min(errs)]) ## [1] 0.01 (fit <- svm(as.factor(class) ~ ., data = data, kernel = "linear", cost = cost)) ## ## Call: ## svm(formula = as.factor(class) ~ ., data = data, kernel = "linear", ## cost = cost) ## ## ## Parameters: ## SVM-Type: C-classification ## SVM-Kernel: linear ## cost: 0.01 ## ## Number of Support Vectors: 135 test$prediction <- predict(fit, test) p <- ggplot(test, aes(x = x, y = y, color = class, shape = prediction == class)) + geom_point(size = 2) + scale_colour_identity() p Discuss your results. A large cost leads to overfitting as the model finds the perfect linear separation between red and blue in the training data. A lower cost then leads to improved prediction in the test data. 9.2.4 Question 7 In this problem, you will use support vector approaches in order to predict whether a given car gets high or low gas mileage based on the Auto data set. Create a binary variable that takes on a 1 for cars with gas mileage above the median, and a 0 for cars with gas mileage below the median. library(ISLR2) data <- Auto data$high_mpg <- as.factor(as.numeric(data$mpg > median(data$mpg))) Fit a support vector classifier to the data with various values of cost, in order to predict whether a car gets high or low gas mileage. Report the cross-validation errors associated with different values of this parameter. Comment on your results. Note you will need to fit the classifier without the gas mileage variable to produce sensible results. set.seed(42) costs <- 10^seq(-4, 3, by = 0.5) results <- list() f <- high_mpg ~ displacement + horsepower + weight results$linear <- tune(svm, f, data = data, kernel = "linear", ranges = list(cost = costs)) summary(results$linear) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost ## 0.03162278 ## ## - best performance: 0.1019231 ## ## - Detailed performance results: ## cost error dispersion ## 1 1.000000e-04 0.5967949 0.05312225 ## 2 3.162278e-04 0.5967949 0.05312225 ## 3 1.000000e-03 0.2199359 0.08718077 ## 4 3.162278e-03 0.1353846 0.06058195 ## 5 1.000000e-02 0.1121795 0.04011293 ## 6 3.162278e-02 0.1019231 0.05087176 ## 7 1.000000e-01 0.1096154 0.05246238 ## 8 3.162278e-01 0.1044872 0.05154934 ## 9 1.000000e+00 0.1044872 0.05154934 ## 10 3.162278e+00 0.1044872 0.05154934 ## 11 1.000000e+01 0.1019231 0.05501131 ## 12 3.162278e+01 0.1019231 0.05501131 ## 13 1.000000e+02 0.1019231 0.05501131 ## 14 3.162278e+02 0.1019231 0.05501131 ## 15 1.000000e+03 0.1019231 0.05501131 Now repeat (b), this time using SVMs with radial and polynomial basis kernels, with different values of gamma and degree and cost. Comment on your results. results$polynomial <- tune(svm, f, data = data, kernel = "polynomial", ranges = list(cost = costs, degree = 1:3)) summary(results$polynomial) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost degree ## 0.1 1 ## ## - best performance: 0.101859 ## ## - Detailed performance results: ## cost degree error dispersion ## 1 1.000000e-04 1 0.5842949 0.04703306 ## 2 3.162278e-04 1 0.5842949 0.04703306 ## 3 1.000000e-03 1 0.5842949 0.04703306 ## 4 3.162278e-03 1 0.2167949 0.07891173 ## 5 1.000000e-02 1 0.1275641 0.04806885 ## 6 3.162278e-02 1 0.1147436 0.05661708 ## 7 1.000000e-01 1 0.1018590 0.05732429 ## 8 3.162278e-01 1 0.1069231 0.05949679 ## 9 1.000000e+00 1 0.1069231 0.06307278 ## 10 3.162278e+00 1 0.1069231 0.06307278 ## 11 1.000000e+01 1 0.1043590 0.06603760 ## 12 3.162278e+01 1 0.1043590 0.06603760 ## 13 1.000000e+02 1 0.1043590 0.06603760 ## 14 3.162278e+02 1 0.1043590 0.06603760 ## 15 1.000000e+03 1 0.1043590 0.06603760 ## 16 1.000000e-04 2 0.5842949 0.04703306 ## 17 3.162278e-04 2 0.5842949 0.04703306 ## 18 1.000000e-03 2 0.5842949 0.04703306 ## 19 3.162278e-03 2 0.5255128 0.08090636 ## 20 1.000000e-02 2 0.3980769 0.08172400 ## 21 3.162278e-02 2 0.3674359 0.07974741 ## 22 1.000000e-01 2 0.3597436 0.08336609 ## 23 3.162278e-01 2 0.3597436 0.09010398 ## 24 1.000000e+00 2 0.3444872 0.08767258 ## 25 3.162278e+00 2 0.3545513 0.10865903 ## 26 1.000000e+01 2 0.3239103 0.09593710 ## 27 3.162278e+01 2 0.3035256 0.08184137 ## 28 1.000000e+02 2 0.3061538 0.08953945 ## 29 3.162278e+02 2 0.3060897 0.08919821 ## 30 1.000000e+03 2 0.3035897 0.09305216 ## 31 1.000000e-04 3 0.5842949 0.04703306 ## 32 3.162278e-04 3 0.4955128 0.10081350 ## 33 1.000000e-03 3 0.3750641 0.08043982 ## 34 3.162278e-03 3 0.3036538 0.09096445 ## 35 1.000000e-02 3 0.2601282 0.07774595 ## 36 3.162278e-02 3 0.2499359 0.08407106 ## 37 1.000000e-01 3 0.2017949 0.07547413 ## 38 3.162278e-01 3 0.1937179 0.08427411 ## 39 1.000000e+00 3 0.1478205 0.04579654 ## 40 3.162278e+00 3 0.1451923 0.05169638 ## 41 1.000000e+01 3 0.1451282 0.04698931 ## 42 3.162278e+01 3 0.1500000 0.07549058 ## 43 1.000000e+02 3 0.1373718 0.05772558 ## 44 3.162278e+02 3 0.1271795 0.06484766 ## 45 1.000000e+03 3 0.1322436 0.06764841 results$radial <- tune(svm, f, data = data, kernel = "radial", ranges = list(cost = costs, gamma = 10^(-2:1))) summary(results$radial) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost gamma ## 1000 0.1 ## ## - best performance: 0.08179487 ## ## - Detailed performance results: ## cost gamma error dispersion ## 1 1.000000e-04 0.01 0.58410256 0.05435320 ## 2 3.162278e-04 0.01 0.58410256 0.05435320 ## 3 1.000000e-03 0.01 0.58410256 0.05435320 ## 4 3.162278e-03 0.01 0.58410256 0.05435320 ## 5 1.000000e-02 0.01 0.58410256 0.05435320 ## 6 3.162278e-02 0.01 0.26557692 0.10963269 ## 7 1.000000e-01 0.01 0.15038462 0.05783237 ## 8 3.162278e-01 0.01 0.11224359 0.04337812 ## 9 1.000000e+00 0.01 0.10730769 0.04512161 ## 10 3.162278e+00 0.01 0.10730769 0.04512161 ## 11 1.000000e+01 0.01 0.10737179 0.05526490 ## 12 3.162278e+01 0.01 0.10480769 0.05610124 ## 13 1.000000e+02 0.01 0.10480769 0.05610124 ## 14 3.162278e+02 0.01 0.10737179 0.05526490 ## 15 1.000000e+03 0.01 0.10993590 0.05690926 ## 16 1.000000e-04 0.10 0.58410256 0.05435320 ## 17 3.162278e-04 0.10 0.58410256 0.05435320 ## 18 1.000000e-03 0.10 0.58410256 0.05435320 ## 19 3.162278e-03 0.10 0.58410256 0.05435320 ## 20 1.000000e-02 0.10 0.15301282 0.06026554 ## 21 3.162278e-02 0.10 0.11480769 0.04514816 ## 22 1.000000e-01 0.10 0.10730769 0.04512161 ## 23 3.162278e-01 0.10 0.10730769 0.04512161 ## 24 1.000000e+00 0.10 0.10737179 0.05526490 ## 25 3.162278e+00 0.10 0.10737179 0.05526490 ## 26 1.000000e+01 0.10 0.10737179 0.05526490 ## 27 3.162278e+01 0.10 0.10737179 0.05526490 ## 28 1.000000e+02 0.10 0.09967949 0.04761387 ## 29 3.162278e+02 0.10 0.08429487 0.03207585 ## 30 1.000000e+03 0.10 0.08179487 0.03600437 ## 31 1.000000e-04 1.00 0.58410256 0.05435320 ## 32 3.162278e-04 1.00 0.58410256 0.05435320 ## 33 1.000000e-03 1.00 0.58410256 0.05435320 ## 34 3.162278e-03 1.00 0.58410256 0.05435320 ## 35 1.000000e-02 1.00 0.12506410 0.05342773 ## 36 3.162278e-02 1.00 0.10730769 0.06255920 ## 37 1.000000e-01 1.00 0.10993590 0.05561080 ## 38 3.162278e-01 1.00 0.10737179 0.05526490 ## 39 1.000000e+00 1.00 0.09711538 0.05107441 ## 40 3.162278e+00 1.00 0.08429487 0.03634646 ## 41 1.000000e+01 1.00 0.08692308 0.03877861 ## 42 3.162278e+01 1.00 0.08948718 0.03503648 ## 43 1.000000e+02 1.00 0.09198718 0.03272127 ## 44 3.162278e+02 1.00 0.10217949 0.04214031 ## 45 1.000000e+03 1.00 0.09692308 0.04645046 ## 46 1.000000e-04 10.00 0.58410256 0.05435320 ## 47 3.162278e-04 10.00 0.58410256 0.05435320 ## 48 1.000000e-03 10.00 0.58410256 0.05435320 ## 49 3.162278e-03 10.00 0.58410256 0.05435320 ## 50 1.000000e-02 10.00 0.58410256 0.05435320 ## 51 3.162278e-02 10.00 0.22205128 0.12710181 ## 52 1.000000e-01 10.00 0.11237179 0.03888895 ## 53 3.162278e-01 10.00 0.10217949 0.04375722 ## 54 1.000000e+00 10.00 0.09717949 0.03809440 ## 55 3.162278e+00 10.00 0.09717949 0.03809440 ## 56 1.000000e+01 10.00 0.09711538 0.04161705 ## 57 3.162278e+01 10.00 0.11487179 0.04240664 ## 58 1.000000e+02 10.00 0.13019231 0.03541140 ## 59 3.162278e+02 10.00 0.13532051 0.03865626 ## 60 1.000000e+03 10.00 0.14044872 0.04251917 sapply(results, function(x) x$best.performance) ## linear polynomial radial ## 0.10192308 0.10185897 0.08179487 sapply(results, function(x) x$best.parameters) ## $linear ## cost ## 6 0.03162278 ## ## $polynomial ## cost degree ## 7 0.1 1 ## ## $radial ## cost gamma ## 30 1000 0.1 Make some plots to back up your assertions in (b) and (c). Hint: In the lab, we used the plot() function for svm objects only in cases with \\(p = 2\\). When \\(p > 2\\), you can use the plot() function to create plots displaying pairs of variables at a time. Essentially, instead of typing > plot(svmfit, dat) where svmfit contains your fitted model and dat is a data frame containing your data, you can type > plot(svmfit, dat, x1 ∼ x4) in order to plot just the first and fourth variables. However, you must replace x1 and x4 with the correct variable names. To find out more, type ?plot.svm. table(predict(results$radial$best.model, data), data$high_mpg) ## ## 0 1 ## 0 176 5 ## 1 20 191 plot(results$radial$best.model, data, horsepower~displacement) plot(results$radial$best.model, data, horsepower~weight) plot(results$radial$best.model, data, displacement~weight) 9.2.5 Question 8 This problem involves the OJ data set which is part of the ISLR2 package. Create a training set containing a random sample of 800 observations, and a test set containing the remaining observations. set.seed(42) train <- sample(seq_len(nrow(OJ)), 800) test <- setdiff(seq_len(nrow(OJ)), train) Fit a support vector classifier to the training data using cost = 0.01, with Purchase as the response and the other variables as predictors. Use the summary() function to produce summary statistics, and describe the results obtained. fit <- svm(Purchase ~ ., data = OJ[train, ], kernel = "linear", cost = 0.01) summary(fit) ## ## Call: ## svm(formula = Purchase ~ ., data = OJ[train, ], kernel = "linear", ## cost = 0.01) ## ## ## Parameters: ## SVM-Type: C-classification ## SVM-Kernel: linear ## cost: 0.01 ## ## Number of Support Vectors: 432 ## ## ( 215 217 ) ## ## ## Number of Classes: 2 ## ## Levels: ## CH MM What are the training and test error rates? err <- function(model, data) { t <- table(predict(model, data), data[["Purchase"]]) 1 - sum(diag(t)) / sum(t) } errs <- function(model) { c(train = err(model, OJ[train, ]), test = err(model, OJ[test, ])) } errs(fit) ## train test ## 0.171250 0.162963 Use the tune() function to select an optimal cost. Consider values in the range 0.01 to 10. tuned <- tune(svm, Purchase ~ ., data = OJ[train, ], kernel = "linear", ranges = list(cost = 10^seq(-2, 1, length.out = 10))) tuned$best.parameters ## cost ## 7 1 summary(tuned) ## ## Parameter tuning of 'svm': ## ## - sampling method: 10-fold cross validation ## ## - best parameters: ## cost ## 1 ## ## - best performance: 0.1775 ## ## - Detailed performance results: ## cost error dispersion ## 1 0.01000000 0.18250 0.04133199 ## 2 0.02154435 0.18000 0.04005205 ## 3 0.04641589 0.18000 0.05041494 ## 4 0.10000000 0.18000 0.04901814 ## 5 0.21544347 0.18250 0.04377975 ## 6 0.46415888 0.18250 0.04090979 ## 7 1.00000000 0.17750 0.04031129 ## 8 2.15443469 0.18000 0.03961621 ## 9 4.64158883 0.17875 0.03821086 ## 10 10.00000000 0.18375 0.03438447 Compute the training and test error rates using this new value for cost. errs(tuned$best.model) ## train test ## 0.167500 0.162963 Repeat parts (b) through (e) using a support vector machine with a radial kernel. Use the default value for gamma. tuned2 <- tune(svm, Purchase ~ ., data = OJ[train, ], kernel = "radial", ranges = list(cost = 10^seq(-2, 1, length.out = 10))) tuned2$best.parameters ## cost ## 6 0.4641589 errs(tuned2$best.model) ## train test ## 0.1525000 0.1666667 Repeat parts (b) through (e) using a support vector machine with a polynomial kernel. Set degree = 2. tuned3 <- tune(svm, Purchase ~ ., data = OJ[train, ], kernel = "polynomial", ranges = list(cost = 10^seq(-2, 1, length.out = 10)), degree = 2) tuned3$best.parameters ## cost ## 9 4.641589 errs(tuned3$best.model) ## train test ## 0.1487500 0.1703704 Overall, which approach seems to give the best results on this data? Overall the “radial” kernel appears to perform best in this case. "],["deep-learning.html", "10 Deep Learning 10.1 Conceptual 10.2 Applied", " 10 Deep Learning 10.1 Conceptual 10.1.1 Question 1 Consider a neural network with two hidden layers: \\(p = 4\\) input units, 2 units in the first hidden layer, 3 units in the second hidden layer, and a single output. Draw a picture of the network, similar to Figures 10.1 or 10.4. Write out an expression for \\(f(X)\\), assuming ReLU activation functions. Be as explicit as you can! The three layers (from our final output layer back to the start of our network) can be described as: \\[\\begin{align*} f(X) &= g(w_{0}^{(3)} + \\sum^{K_2}_{l=1} w_{l}^{(3)} A_l^{(2)}) \\\\ A_l^{(2)} &= h_l^{(2)}(X) = g(w_{l0}^{(2)} + \\sum_{k=1}^{K_1} w_{lk}^{(2)} A_k^{(1)})\\\\ A_k^{(1)} &= h_k^{(1)}(X) = g(w_{k0}^{(1)} + \\sum_{j=1}^p w_{kj}^{(1)} X_j) \\\\ \\end{align*}\\] for \\(l = 1, ..., K_2 = 3\\) and \\(k = 1, ..., K_1 = 2\\) and \\(p = 4\\), where, \\[ g(z) = (z)_+ = \\begin{cases} 0, & \\text{if } z < 0 \\\\ z, & \\text{otherwise} \\end{cases} \\] Now plug in some values for the coefficients and write out the value of \\(f(X)\\). We can perhaps achieve this most easily by fitting a real model. Note, in the plot shown here, we also include the “bias” or intercept terms. library(ISLR2) library(neuralnet) library(sigmoid) set.seed(5) train <- sample(seq_len(nrow(ISLR2::Boston)), nrow(ISLR2::Boston) * 2/3) net <- neuralnet(crim ~ lstat + medv + ptratio + rm, data = ISLR2::Boston[train, ], act.fct = relu, hidden = c(2, 3) ) plot(net) We can make a prediction for a given observation using this object. Firstly, let’s find an “ambiguous” test sample p <- predict(net, ISLR2::Boston[-train, ]) x <- ISLR2::Boston[-train, ][which.min(abs(p - mean(c(max(p), min(p))))), ] x <- x[, c("lstat", "medv", "ptratio", "rm")] predict(net, x) ## [,1] ## 441 19.14392 Or, repeating by “hand”: g <- function(x) ifelse(x > 0, x, 0) # relu activation function w <- net$weights[[1]] # the estimated weights for each layer v <- as.numeric(x) # our input predictors # to calculate our prediction we can take the dot product of our predictors # (with 1 at the start for the bias term) and our layer weights, lw) for (lw in w) v <- g(c(1, v) %*% lw) v ## [,1] ## [1,] 19.14392 How many parameters are there? length(unlist(net$weights)) ## [1] 23 There are \\(4*2+2 + 2*3+3 + 3*1+1 = 23\\) parameters. 10.1.2 Question 2 Consider the softmax function in (10.13) (see also (4.13) on page 141) for modeling multinomial probabilities. In (10.13), show that if we add a constant \\(c\\) to each of the \\(z_l\\), then the probability is unchanged. If we add a constant \\(c\\) to each \\(Z_l\\) in equation 10.13 we get: \\[\\begin{align*} Pr(Y=m|X) &= \\frac{e^{Z_m+c}}{\\sum_{l=0}^9e^{Z_l+c}} \\\\ &= \\frac{e^{Z_m}e^c}{\\sum_{l=0}^9e^{Z_l}e^c} \\\\ &= \\frac{e^{Z_m}e^c}{e^c\\sum_{l=0}^9e^{Z_l}} \\\\ &= \\frac{e^{Z_m}}{\\sum_{l=0}^9e^{Z_l}} \\\\ \\end{align*}\\] which is just equation 10.13. In (4.13), show that if we add constants \\(c_j\\), \\(j = 0,1,...,p\\), to each of the corresponding coefficients for each of the classes, then the predictions at any new point \\(x\\) are unchanged. 4.13 is \\[ Pr(Y=k|X=x) = \\frac {e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\] adding constants \\(c_j\\) to each class gives: \\[\\begin{align*} Pr(Y=k|X=x) &= \\frac {e^{\\beta_{K0} + \\beta_{K1}x_1 + c_1 + ... + \\beta_{Kp}x_p + c_p}} {\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + c_1 + ... + \\beta_{lp}x_p + c_p}} \\\\ &= \\frac {e^{c1 + ... + c_p}e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {\\sum_{l=1}^K e^{c1 + ... + c_p}e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\\\ &= \\frac {e^{c1 + ... + c_p}e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {e^{c1 + ... + c_p}\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\\\ &= \\frac {e^{\\beta_{K0} + \\beta_{K1}x_1 + ... + \\beta_{Kp}x_p}} {\\sum_{l=1}^K e^{\\beta_{l0} + \\beta_{l1}x1 + ... + \\beta_{lp}x_p}} \\\\ \\end{align*}\\] which collapses to 4.13 (with the same argument as above). This shows that the softmax function is over-parametrized. However, regularization and SGD typically constrain the solutions so that this is not a problem. 10.1.3 Question 3 Show that the negative multinomial log-likelihood (10.14) is equivalent to the negative log of the likelihood expression (4.5) when there are \\(M = 2\\) classes. Equation 10.14 is \\[ -\\sum_{i=1}^n \\sum_{m=0}^9 y_{im}\\log(f_m(x_i)) \\] Equation 4.5 is: \\[ \\ell(\\beta_0, \\beta_1) = \\prod_{i:y_i=1}p(x_i) \\prod_{i':y_i'=0}(1-p(x_i')) \\] So, \\(\\log(\\ell)\\) is: \\[\\begin{align*} \\log(\\ell) &= \\log \\left( \\prod_{i:y_i=1}p(x_i) \\prod_{i':y_i'=0}(1-p(x_i')) \\right ) \\\\ &= \\sum_{i:y_1=1}\\log(p(x_i)) + \\sum_{i':y_i'=0}\\log(1-p(x_i')) \\\\ \\end{align*}\\] If we set \\(y_i\\) to be an indicator variable such that \\(y_{i1}\\) and \\(y_{i0}\\) are 1 and 0 (or 0 and 1) when our \\(i\\)th observation is 1 (or 0) respectively, then we can write: \\[ \\log(\\ell) = \\sum_{i}y_{i1}\\log(p(x_i)) + \\sum_{i}y_{i0}\\log(1-p(x_i')) \\] If we also let \\(f_1(x) = p(x)\\) and \\(f_0(x) = 1 - p(x)\\) then: \\[\\begin{align*} \\log(\\ell) &= \\sum_i y_{i1}\\log(f_1(x_i)) + \\sum_{i}y_{i0}\\log(f_0(x_i')) \\\\ &= \\sum_i \\sum_{m=0}^1 y_{im}\\log(f_m(x_i)) \\\\ \\end{align*}\\] When we take the negative of this, it is equivalent to 10.14 for two classes (\\(m = 0,1\\)). 10.1.4 Question 4 Consider a CNN that takes in \\(32 \\times 32\\) grayscale images and has a single convolution layer with three \\(5 \\times 5\\) convolution filters (without boundary padding). Draw a sketch of the input and first hidden layer similar to Figure 10.8. How many parameters are in this model? There are 5 convolution matrices each with 5x5 weights (plus 5 bias terms) to estimate, therefore 130 parameters Explain how this model can be thought of as an ordinary feed-forward neural network with the individual pixels as inputs, and with constraints on the weights in the hidden units. What are the constraints? We can think of a convolution layer as a regularized fully connected layer. The regularization in this case is due to not all inputs being connected to all outputs, and weights being shared between connections. Each output node in the convolved image can be thought of as taking inputs from a limited number of input pixels (the neighboring pixels), with a set of weights specified by the convolution layer which are then shared by the connections to all other output nodes. If there were no constraints, then how many weights would there be in the ordinary feed-forward neural network in (c)? With no constraints, we would connect each output pixel in our 5x32x32 convolution layer to each node in the 32x32 original image (plus 5 bias terms), giving a total of 5,242,885 weights to estimate. 10.1.5 Question 5 In Table 10.2 on page 433, we see that the ordering of the three methods with respect to mean absolute error is different from the ordering with respect to test set \\(R^2\\). How can this be? Mean absolute error considers absolute differences between predictions and observed values, whereas \\(R^2\\) considers the (normalized) sum of squared differences, thus larger errors contribute relatively ore to \\(R^2\\) than mean absolute error. 10.2 Applied 10.2.1 Question 6 Consider the simple function \\(R(\\beta) = sin(\\beta) + \\beta/10\\). Draw a graph of this function over the range \\(\\beta \\in [−6, 6]\\). r <- function(x) sin(x) + x/10 x <- seq(-6, 6, 0.1) plot(x, r(x), type = "l") What is the derivative of this function? \\[ cos(x) + 1/10 \\] Given \\(\\beta^0 = 2.3\\), run gradient descent to find a local minimum of \\(R(\\beta)\\) using a learning rate of \\(\\rho = 0.1\\). Show each of \\(\\beta^0, \\beta^1, ...\\) in your plot, as well as the final answer. The derivative of our function, i.e. \\(cos(x) + 1/10\\) gives us the gradient for a given \\(x\\). For gradient descent, we move \\(x\\) a little in the opposite direction, for some learning rate \\(\\rho = 0.1\\): \\[ x^{m+1} = x^m - \\rho (cos(x^m) + 1/10) \\] iter <- function(x, rho) x - rho*(cos(x) + 1/10) gd <- function(start, rho = 0.1) { b <- start v <- b while(abs(b - iter(b, 0.1)) > 1e-8) { b <- iter(b, 0.1) v <- c(v, b) } v } res <- gd(2.3) res[length(res)] ## [1] 4.612221 plot(x, r(x), type = "l") points(res, r(res), col = "red", pch = 19) Repeat with \\(\\beta^0 = 1.4\\). res <- gd(1.4) res[length(res)] ## [1] -1.670964 plot(x, r(x), type = "l") points(res, r(res), col = "red", pch = 19) 10.2.2 Question 7 Fit a neural network to the Default data. Use a single hidden layer with 10 units, and dropout regularization. Have a look at Labs 10.9.1–-10.9.2 for guidance. Compare the classification performance of your model with that of linear logistic regression. library(keras) dat <- ISLR2::Boston x <- scale(model.matrix(crim ~ . - 1, data = dat)) n <- nrow(dat) ntest <- trunc(n / 3) testid <- sample(1:n, ntest) y <- dat$crim # logistic regression lfit <- lm(crim ~ ., data = dat[-testid, ]) lpred <- predict(lfit, dat[testid, ]) with(dat[testid, ], mean(abs(lpred - crim))) ## [1] 2.99129 # keras nn <- keras_model_sequential() |> layer_dense(units = 10, activation = "relu", input_shape = ncol(x)) |> layer_dropout(rate = 0.4) |> layer_dense(units = 1) compile(nn, loss = "mse", optimizer = optimizer_rmsprop(), metrics = list("mean_absolute_error") ) history <- fit(nn, x[-testid, ], y[-testid], epochs = 100, batch_size = 26, validation_data = list(x[testid, ], y[testid]), verbose = 0 ) plot(history, smooth = FALSE) npred <- predict(nn, x[testid, ]) ## 6/6 - 0s - 61ms/epoch - 10ms/step mean(abs(y[testid] - npred)) ## [1] 2.219039 In this case, the neural network outperforms logistic regression having a lower absolute error rate on the test data. 10.2.3 Question 8 From your collection of personal photographs, pick 10 images of animals (such as dogs, cats, birds, farm animals, etc.). If the subject does not occupy a reasonable part of the image, then crop the image. Now use a pretrained image classification CNN as in Lab 10.9.4 to predict the class of each of your images, and report the probabilities for the top five predicted classes for each image. library(keras) images <- list.files("images/animals") x <- array(dim = c(length(images), 224, 224, 3)) for (i in seq_len(length(images))) { img <- image_load(paste0("images/animals/", images[i]), target_size = c(224, 224)) x[i,,,] <- image_to_array(img) } model <- application_resnet50(weights = "imagenet") ## Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5 ## 8192/102967424 [..............................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 3956736/102967424 [>.............................] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 4202496/102967424 [>.............................] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 8396800/102967424 [=>............................] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 16785408/102967424 [===>..........................] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 25174016/102967424 [======>.......................] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 33562624/102967424 [========>.....................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 41951232/102967424 [===========>..................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 50905088/102967424 [=============>................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 58728448/102967424 [================>.............] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 67117056/102967424 [==================>...........] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 83894272/102967424 [=======================>......] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 101908480/102967424 [============================>.] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 102967424/102967424 [==============================] - 1s 0us/step pred <- model |> predict(x) |> imagenet_decode_predictions(top = 5) ## 1/1 - 1s - 1s/epoch - 1s/step ## Downloading data from https://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.json ## 8192/35363 [=====>........................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 35363/35363 [==============================] - 0s 0us/step names(pred) <- images print(pred) ## $bird.jpg ## class_name class_description score ## 1 n01819313 sulphur-crested_cockatoo 0.33546305 ## 2 n01580077 jay 0.18020906 ## 3 n02441942 weasel 0.08320859 ## 4 n02058221 albatross 0.07002056 ## 5 n01855672 goose 0.05195721 ## ## $bird2.jpg ## class_name class_description score ## 1 n02006656 spoonbill 0.840428233 ## 2 n02012849 crane 0.016258685 ## 3 n01819313 sulphur-crested_cockatoo 0.009740722 ## 4 n02007558 flamingo 0.007816141 ## 5 n01667778 terrapin 0.007497459 ## ## $bird3.jpg ## class_name class_description score ## 1 n01833805 hummingbird 0.9767877460 ## 2 n02033041 dowitcher 0.0111253690 ## 3 n02028035 redshank 0.0042764111 ## 4 n02009229 little_blue_heron 0.0012727526 ## 5 n02002724 black_stork 0.0008971311 ## ## $bug.jpg ## class_name class_description score ## 1 n02190166 fly 0.67558461 ## 2 n02167151 ground_beetle 0.10097048 ## 3 n02172182 dung_beetle 0.05490885 ## 4 n02169497 leaf_beetle 0.03541914 ## 5 n02168699 long-horned_beetle 0.03515299 ## ## $butterfly.jpg ## class_name class_description score ## 1 n02951585 can_opener 0.20600465 ## 2 n03476684 hair_slide 0.09360629 ## 3 n04074963 remote_control 0.06316858 ## 4 n02110185 Siberian_husky 0.05178998 ## 5 n02123597 Siamese_cat 0.03785341 ## ## $butterfly2.jpg ## class_name class_description score ## 1 n02276258 admiral 9.999689e-01 ## 2 n01580077 jay 1.388074e-05 ## 3 n02277742 ringlet 1.235042e-05 ## 4 n02279972 monarch 3.037859e-06 ## 5 n02281787 lycaenid 1.261888e-06 ## ## $elba.jpg ## class_name class_description score ## 1 n02085620 Chihuahua 0.29892012 ## 2 n02091032 Italian_greyhound 0.20332782 ## 3 n02109961 Eskimo_dog 0.08477225 ## 4 n02086910 papillon 0.05140305 ## 5 n02110185 Siberian_husky 0.05064517 ## ## $hamish.jpg ## class_name class_description score ## 1 n02097209 standard_schnauzer 0.6361451149 ## 2 n02097047 miniature_schnauzer 0.3450845778 ## 3 n02097130 giant_schnauzer 0.0164217781 ## 4 n02097298 Scotch_terrier 0.0019116047 ## 5 n02096177 cairn 0.0002054328 ## ## $poodle.jpg ## class_name class_description score ## 1 n02113799 standard_poodle 0.829670966 ## 2 n02088094 Afghan_hound 0.074567914 ## 3 n02113712 miniature_poodle 0.032005571 ## 4 n02102973 Irish_water_spaniel 0.018583152 ## 5 n02102318 cocker_spaniel 0.008629788 ## ## $tortoise.jpg ## class_name class_description score ## 1 n04033995 quilt 0.28395897 ## 2 n02110958 pug 0.15959552 ## 3 n03188531 diaper 0.14018111 ## 4 n02108915 French_bulldog 0.09364161 ## 5 n04235860 sleeping_bag 0.02608401 10.2.4 Question 9 Fit a lag-5 autoregressive model to the NYSE data, as described in the text and Lab 10.9.6. Refit the model with a 12-level factor representing the month. Does this factor improve the performance of the model? Fitting the model as described in the text. library(tidyverse) ## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ── ## ✔ dplyr 1.1.4 ✔ readr 2.1.5 ## ✔ forcats 1.0.0 ✔ stringr 1.5.1 ## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1 ## ✔ lubridate 1.9.3 ✔ tidyr 1.3.1 ## ✔ purrr 1.0.2 ## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── ## ✖ dplyr::compute() masks neuralnet::compute() ## ✖ dplyr::filter() masks stats::filter() ## ✖ dplyr::lag() masks stats::lag() ## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors library(ISLR2) xdata <- data.matrix(NYSE[, c("DJ_return", "log_volume","log_volatility")]) istrain <- NYSE[, "train"] xdata <- scale(xdata) lagm <- function(x, k = 1) { n <- nrow(x) pad <- matrix(NA, k, ncol(x)) rbind(pad, x[1:(n - k), ]) } arframe <- data.frame( log_volume = xdata[, "log_volume"], L1 = lagm(xdata, 1), L2 = lagm(xdata, 2), L3 = lagm(xdata, 3), L4 = lagm(xdata, 4), L5 = lagm(xdata, 5) ) arframe <- arframe[-(1:5), ] istrain <- istrain[-(1:5)] arfit <- lm(log_volume ~ ., data = arframe[istrain, ]) arpred <- predict(arfit, arframe[!istrain, ]) V0 <- var(arframe[!istrain, "log_volume"]) 1 - mean((arpred - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.413223 Now we add month (and work with tidyverse). arframe$month = as.factor(str_match(NYSE$date, "-(\\\\d+)-")[,2])[-(1:5)] arfit2 <- lm(log_volume ~ ., data = arframe[istrain, ]) arpred2 <- predict(arfit2, arframe[!istrain, ]) V0 <- var(arframe[!istrain, "log_volume"]) 1 - mean((arpred2 - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.4170418 Adding month as a factor marginally improves the \\(R^2\\) of our model (from 0.413223 to 0.4170418). This is a significant improvement in fit and model 2 has a lower AIC. anova(arfit, arfit2) ## Analysis of Variance Table ## ## Model 1: log_volume ~ L1.DJ_return + L1.log_volume + L1.log_volatility + ## L2.DJ_return + L2.log_volume + L2.log_volatility + L3.DJ_return + ## L3.log_volume + L3.log_volatility + L4.DJ_return + L4.log_volume + ## L4.log_volatility + L5.DJ_return + L5.log_volume + L5.log_volatility ## Model 2: log_volume ~ L1.DJ_return + L1.log_volume + L1.log_volatility + ## L2.DJ_return + L2.log_volume + L2.log_volatility + L3.DJ_return + ## L3.log_volume + L3.log_volatility + L4.DJ_return + L4.log_volume + ## L4.log_volatility + L5.DJ_return + L5.log_volume + L5.log_volatility + ## month ## Res.Df RSS Df Sum of Sq F Pr(>F) ## 1 4260 1791.0 ## 2 4249 1775.8 11 15.278 3.3234 0.000143 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 AIC(arfit, arfit2) ## df AIC ## arfit 17 8447.663 ## arfit2 28 8433.031 10.2.5 Question 10 In Section 10.9.6, we showed how to fit a linear AR model to the NYSE data using the lm() function. However, we also mentioned that we can “flatten” the short sequences produced for the RNN model in order to fit a linear AR model. Use this latter approach to fit a linear AR model to the NYSE data. Compare the test \\(R^2\\) of this linear AR model to that of the linear AR model that we fit in the lab. What are the advantages/disadvantages of each approach? The lm model is the same as that fit above: arfit <- lm(log_volume ~ ., data = arframe[istrain, ]) arpred <- predict(arfit, arframe[!istrain, ]) V0 <- var(arframe[!istrain, "log_volume"]) 1 - mean((arpred - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.4170418 Now we reshape the data for the RNN n <- nrow(arframe) xrnn <- data.matrix(arframe[, -1]) xrnn <- array(xrnn, c(n, 3, 5)) xrnn <- xrnn[, , 5:1] xrnn <- aperm(xrnn, c(1, 3, 2)) We can add a “flatten” layer to turn the reshaped data into a long vector of predictors resulting in a linear AR model. model <- keras_model_sequential() |> layer_flatten(input_shape = c(5, 3)) |> layer_dense(units = 1) Now let’s fit this model. model |> compile(optimizer = optimizer_rmsprop(), loss = "mse") history <- model |> fit( xrnn[istrain,, ], arframe[istrain, "log_volume"], batch_size = 64, epochs = 200, validation_data = list(xrnn[!istrain,, ], arframe[!istrain, "log_volume"]), verbose = 0 ) plot(history, smooth = FALSE) kpred <- predict(model, xrnn[!istrain,, ]) ## 56/56 - 0s - 58ms/epoch - 1ms/step 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 ## [1] 0.412886 Both models estimate the same number of coefficients/weights (16): coef(arfit) ## (Intercept) L1.DJ_return L1.log_volume L1.log_volatility ## 0.067916689 0.094410214 0.498673056 0.586274266 ## L2.DJ_return L2.log_volume L2.log_volatility L3.DJ_return ## -0.027299158 0.036903027 -0.931509135 0.037995916 ## L3.log_volume L3.log_volatility L4.DJ_return L4.log_volume ## 0.070312741 0.216160520 -0.004954842 0.117079461 ## L4.log_volatility L5.DJ_return L5.log_volume L5.log_volatility ## -0.039752786 -0.029620296 0.096034795 0.144510264 ## month02 month03 month04 month05 ## -0.100003367 -0.143781381 -0.028242819 -0.131120579 ## month06 month07 month08 month09 ## -0.125993911 -0.141608808 -0.163030102 -0.018889698 ## month10 month11 month12 ## -0.017206826 -0.037298183 0.008361380 model$get_weights() ## [[1]] ## [,1] ## [1,] -0.031145222 ## [2,] 0.101065643 ## [3,] 0.141815767 ## [4,] -0.004181504 ## [5,] 0.116010934 ## [6,] -0.003764492 ## [7,] 0.038601257 ## [8,] 0.078083567 ## [9,] 0.137415737 ## [10,] -0.029184511 ## [11,] 0.036070298 ## [12,] -0.821708620 ## [13,] 0.095548652 ## [14,] 0.511229098 ## [15,] 0.521453559 ## ## [[2]] ## [1] -0.006889343 The flattened RNN has a lower \\(R^2\\) on the test data than our lm model above. The lm model is quicker to fit and conceptually simpler also giving us the ability to inspect the coefficients for different variables. The flattened RNN is regularized to some extent as data are processed in batches. 10.2.6 Question 11 Repeat the previous exercise, but now fit a nonlinear AR model by “flattening” the short sequences produced for the RNN model. From the book: To fit a nonlinear AR model, we could add in a hidden layer. xfun::cache_rds({ model <- keras_model_sequential() |> layer_flatten(input_shape = c(5, 3)) |> layer_dense(units = 32, activation = "relu") |> layer_dropout(rate = 0.4) |> layer_dense(units = 1) model |> compile( loss = "mse", optimizer = optimizer_rmsprop(), metrics = "mse" ) history <- model |> fit( xrnn[istrain,, ], arframe[istrain, "log_volume"], batch_size = 64, epochs = 200, validation_data = list(xrnn[!istrain,, ], arframe[!istrain, "log_volume"]), verbose = 0 ) plot(history, smooth = FALSE, metrics = "mse") kpred <- predict(model, xrnn[!istrain,, ]) 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 }) ## 56/56 - 0s - 66ms/epoch - 1ms/step ## [1] 0.4271516 This approach improves our \\(R^2\\) over the linear model above. 10.2.7 Question 12 Consider the RNN fit to the NYSE data in Section 10.9.6. Modify the code to allow inclusion of the variable day_of_week, and fit the RNN. Compute the test \\(R^2\\). To accomplish this, I’ll include day of the week as one of the lagged variables in the RNN. Thus, our input for each observation will be 4 x 5 (rather than 3 x 5). xfun::cache_rds({ xdata <- data.matrix( NYSE[, c("day_of_week", "DJ_return", "log_volume","log_volatility")] ) istrain <- NYSE[, "train"] xdata <- scale(xdata) arframe <- data.frame( log_volume = xdata[, "log_volume"], L1 = lagm(xdata, 1), L2 = lagm(xdata, 2), L3 = lagm(xdata, 3), L4 = lagm(xdata, 4), L5 = lagm(xdata, 5) ) arframe <- arframe[-(1:5), ] istrain <- istrain[-(1:5)] n <- nrow(arframe) xrnn <- data.matrix(arframe[, -1]) xrnn <- array(xrnn, c(n, 4, 5)) xrnn <- xrnn[,, 5:1] xrnn <- aperm(xrnn, c(1, 3, 2)) dim(xrnn) model <- keras_model_sequential() |> layer_simple_rnn(units = 12, input_shape = list(5, 4), dropout = 0.1, recurrent_dropout = 0.1 ) |> layer_dense(units = 1) model |> compile(optimizer = optimizer_rmsprop(), loss = "mse") history <- model |> fit( xrnn[istrain,, ], arframe[istrain, "log_volume"], batch_size = 64, epochs = 200, validation_data = list(xrnn[!istrain,, ], arframe[!istrain, "log_volume"]), verbose = 0 ) kpred <- predict(model, xrnn[!istrain,, ]) 1 - mean((kpred - arframe[!istrain, "log_volume"])^2) / V0 }) ## 56/56 - 0s - 133ms/epoch - 2ms/step ## [1] 0.4405331 10.2.8 Question 13 Repeat the analysis of Lab 10.9.5 on the IMDb data using a similarly structured neural network. There we used a dictionary of size 10,000. Consider the effects of varying the dictionary size. Try the values 1000, 3000, 5000, and 10,000, and compare the results. xfun::cache_rds({ library(knitr) accuracy <- c() for(max_features in c(1000, 3000, 5000, 10000)) { imdb <- dataset_imdb(num_words = max_features) c(c(x_train, y_train), c(x_test, y_test)) %<-% imdb maxlen <- 500 x_train <- pad_sequences(x_train, maxlen = maxlen) x_test <- pad_sequences(x_test, maxlen = maxlen) model <- keras_model_sequential() |> layer_embedding(input_dim = max_features, output_dim = 32) |> layer_lstm(units = 32) |> layer_dense(units = 1, activation = "sigmoid") model |> compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = "acc" ) history <- fit(model, x_train, y_train, epochs = 10, batch_size = 128, validation_data = list(x_test, y_test), verbose = 0 ) predy <- predict(model, x_test) > 0.5 accuracy <- c(accuracy, mean(abs(y_test == as.numeric(predy)))) } tibble( "Max Features" = c(1000, 3000, 5000, 10000), "Accuracy" = accuracy ) |> kable() }) ## Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz ## 8192/17464789 [..............................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 3784704/17464789 [=====>........................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 4202496/17464789 [======>.......................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 8396800/17464789 [=============>................] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b 17464789/17464789 [==============================] - 0s 0us/step ## 782/782 - 16s - 16s/epoch - 20ms/step ## 782/782 - 16s - 16s/epoch - 20ms/step ## 782/782 - 16s - 16s/epoch - 20ms/step ## 782/782 - 16s - 16s/epoch - 20ms/step Max Features Accuracy 1000 0.86084 3000 0.87224 5000 0.87460 10000 0.86180 Varying the dictionary size does not make a substantial impact on our estimates of accuracy. However, the models do take a substantial amount of time to fit and it is not clear we are finding the best fitting models in each case. For example, the model using a dictionary size of 10,000 obtained an accuracy of 0.8721 in the text which is as different from the estimate obtained here as are the differences between the models with different dictionary sizes. "],["survival-analysis-and-censored-data.html", "11 Survival Analysis and Censored Data 11.1 Conceptual 11.2 Applied", " 11 Survival Analysis and Censored Data 11.1 Conceptual 11.1.1 Question 1 For each example, state whether or not the censoring mechanism is independent. Justify your answer. In a study of disease relapse, due to a careless research scientist, all patients whose phone numbers begin with the number “2” are lost to follow up. Independent. There’s no reason to think disease relapse should be related to the first digit of a phone number. In a study of longevity, a formatting error causes all patient ages that exceed 99 years to be lost (i.e. we know that those patients are more than 99 years old, but we do not know their exact ages). Not independent. Older patients are more likely to see an event that younger. Hospital A conducts a study of longevity. However, very sick patients tend to be transferred to Hospital B, and are lost to follow up. Not independent. Sick patients are more likely to see an event that healthy. In a study of unemployment duration, the people who find work earlier are less motivated to stay in touch with study investigators, and therefore are more likely to be lost to follow up. Not independent. More employable individuals are more likely to see an event. In a study of pregnancy duration, women who deliver their babies pre-term are more likely to do so away from their usual hospital, and thus are more likely to be censored, relative to women who deliver full-term babies. Not independent. Delivery away from hospital will be associated with pregnancy duration. A researcher wishes to model the number of years of education of the residents of a small town. Residents who enroll in college out of town are more likely to be lost to follow up, and are also more likely to attend graduate school, relative to those who attend college in town. Not independent. Years of education will be associated with enrolling in out of town colleges. Researchers conduct a study of disease-free survival (i.e. time until disease relapse following treatment). Patients who have not relapsed within five years are considered to be cured, and thus their survival time is censored at five years. In other words we assume all events happen within five years, so censoring after this time is equivalent to not censoring at all so the censoring is independent. We wish to model the failure time for some electrical component. This component can be manufactured in Iowa or in Pittsburgh, with no difference in quality. The Iowa factory opened five years ago, and so components manufactured in Iowa are censored at five years. The Pittsburgh factory opened two years ago, so those components are censored at two years. If there is no difference in quality then location and therefore censoring is independent of failure time. We wish to model the failure time of an electrical component made in two different factories, one of which opened before the other. We have reason to believe that the components manufactured in the factory that opened earlier are of higher quality. In this case, the difference in opening times of the two locations will mean that any difference in quality between locations will be associated with censoring, so censoring is not independent. 11.1.2 Question 2 We conduct a study with \\(n = 4\\) participants who have just purchased cell phones, in order to model the time until phone replacement. The first participant replaces her phone after 1.2 years. The second participant still has not replaced her phone at the end of the two-year study period. The third participant changes her phone number and is lost to follow up (but has not yet replaced her phone) 1.5 years into the study. The fourth participant replaces her phone after 0.2 years. For each of the four participants (\\(i = 1,..., 4\\)), answer the following questions using the notation introduced in Section 11.1: Is the participant’s cell phone replacement time censored? No, Yes, Yes and No. Censoring occurs when we do not know if or when the phone was replaced. Is the value of \\(c_i\\) known, and if so, then what is it? \\(c_i\\) is censoring time. For the four participants these are: NA. 2. 1.5 and NA. Is the value of \\(t_i\\) known, and if so, then what is it? \\(t_i\\) is time to event. For the four participants these are: 1.2, NA, NA and 0.2. Is the value of \\(y_i\\) known, and if so, then what is it? \\(y_i\\) is the observed time. For the four participants these are: 1.2, 2, 1.5 and 0.2. Is the value of \\(\\delta_i\\) known, and if so, then what is it? \\(\\delta_i\\) is an indicator for censoring. The nomenclature introduced here defines this to be 1 if we observe the true “survival” time and 0 if we observe the censored time. Therefore, for these participants, the values are: 1, 0, 0 and 1. 11.1.3 Question 3 For the example in Exercise 2, report the values of \\(K\\), \\(d_1,...,d_K\\), \\(r_1,...,r_K\\), and \\(q_1,...,q_K\\), where this notation was defined in Section 11.3. \\(K\\) is the number of unique deaths, which is 2. \\(d_k\\) represents the unique death times, which are: 0.2, 1.2. \\(r_k\\) denotes the number of patients alive and in the study just before \\(d_k\\). Note the first event is for patient 4, then patient 1, then patient 3 is censored and finally the study ends with patient 2 still involved. Therefore \\(r_k\\) takes values are: 4, 3. \\(q_k\\) denotes the number of patients who died at time \\(d_k\\), therefore this takes values: 1, 1. We can check by using the survival package. library(survival) x <- Surv(c(1.2, 2, 1.5, 0.2), event = c(1, 0, 0, 1)) summary(survfit(x ~ 1)) ## Call: survfit(formula = x ~ 1) ## ## time n.risk n.event survival std.err lower 95% CI upper 95% CI ## 0.2 4 1 0.75 0.217 0.426 1 ## 1.2 3 1 0.50 0.250 0.188 1 11.1.4 Question 4 This problem makes use of the Kaplan-Meier survival curve displayed in Figure 11.9. The raw data that went into plotting this survival curve is given in Table 11.4. The covariate column of that table is not needed for this problem. What is the estimated probability of survival past 50 days? There are 2 events that happen before 50 days. The number at risk \\(r_k\\) are 5 and 4 (one was censored early on), thus survival probability is \\(4/5 * 3/4 = 0.6\\). Equivalently, we can use the survival package. library(tidyverse) table_data <- tribble( ~Y, ~D, ~X, 26.5, 1, 0.1, 37.2, 1, 11, 57.3, 1, -0.3, 90.8, 0, 2.8, 20.2, 0, 1.8, 89.8, 0, 0.4 ) x <- Surv(table_data$Y, table_data$D) summary(survfit(x ~ 1)) ## Call: survfit(formula = x ~ 1) ## ## time n.risk n.event survival std.err lower 95% CI upper 95% CI ## 26.5 5 1 0.8 0.179 0.516 1 ## 37.2 4 1 0.6 0.219 0.293 1 ## 57.3 3 1 0.4 0.219 0.137 1 Write out an analytical expression for the estimated survival function. For instance, your answer might be something along the lines of \\[ \\hat{S}(t) = \\begin{cases} 0.8 & \\text{if } t < 31\\\\ 0.5 & \\text{if } 31 \\le t < 77\\\\ 0.22 & \\text{if } 77 \\le t \\end{cases} \\] (The previous equation is for illustration only: it is not the correct answer!) \\[ \\hat{S}(t) = \\begin{cases} 1 & \\text{if } t < 26.5 \\\\ 0.8 & \\text{if } 26.5 \\le t < 37.2 \\\\ 0.6 & \\text{if } 37.2 \\le t < 57.3 \\\\ 0.4 & \\text{if } 57.3 \\le t \\end{cases} \\] 11.1.5 Question 5 Sketch the survival function given by the equation \\[ \\hat{S}(t) = \\begin{cases} 0.8, & \\text{if } t < 31\\\\ 0.5, & \\text{if } 31 \\le t < 77\\\\ 0.22 & \\text{if } 77 \\le t \\end{cases} \\] Your answer should look something like Figure 11.9. We can draw this plot, or even engineer data that will generate the required plot… plot(NULL, xlim = c(0, 100), ylim = c(0, 1), ylab = "Estimated Probability of Survival", xlab = "Time in Days" ) lines( c(0, 31, 31, 77, 77, 100), c(0.8, 0.8, 0.5, 0.5, 0.22, 0.22) ) 11.1.6 Question 6 This problem makes use of the data displayed in Figure 11.1. In completing this problem, you can refer to the observation times as \\(y_1,...,y_4\\). The ordering of these observation times can be seen from Figure 11.1; their exact values are not required. Report the values of \\(\\delta_1,...,\\delta_4\\), \\(K\\), \\(d_1,...,d_K\\), \\(r_1,...,r_K\\), and \\(q_1,...,q_K\\). The relevant notation is defined in Sections 11.1 and 11.3. \\(\\delta\\) values are: 1, 0, 1, 0. \\(K\\) is 2 \\(d\\) values are \\(y_3\\) and \\(y_1\\). \\(r\\) values are 4 and 2. \\(q\\) values are 1 and 1. Sketch the Kaplan-Meier survival curve corresponding to this data set. (You do not need to use any software to do this—you can sketch it by hand using the results obtained in (a).) plot(NULL, xlim = c(0, 350), ylim = c(0, 1), ylab = "Estimated Probability of Survival", xlab = "Time in Days" ) lines( c(0, 150, 150, 300, 300, 350), c(1, 1, 0.75, 0.75, 0.375, 0.375) ) x <- Surv(c(300, 350, 150, 250), c(1, 0, 1, 0)) Based on the survival curve estimated in (b), what is the probability that the event occurs within 200 days? What is the probability that the event does not occur within 310 days? 0.25 and 0.375. Write out an expression for the estimated survival curve from (b). \\[ \\hat{S}(t) = \\begin{cases} 1 & \\text{if } t < y_3 \\\\ 0.75 & \\text{if } y_3 \\le t < y_1 \\\\ 0.375 & \\text{if } y_1 \\le t \\end{cases} \\] 11.1.7 Question 7 In this problem, we will derive (11.5) and (11.6), which are needed for the construction of the log-rank test statistic (11.8). Recall the notation in Table 11.1. Assume that there is no difference between the survival functions of the two groups. Then we can think of \\(q_{1k}\\) as the number of failures if we draw $r_{1k} observations, without replacement, from a risk set of \\(r_k\\) observations that contains a total of \\(q_k\\) failures. Argue that \\(q_{1k}\\) follows a hypergeometric distribution. Write the parameters of this distribution in terms of \\(r_{1k}\\), \\(r_k\\), and \\(q_k\\). A hypergeometric distributions models sampling without replacement from a finite pool where each sample is a success or failure. This fits the situation here, where with have a finite number of samples in the risk set. The hypergeometric distribution is parameterized as \\(k\\) successes in \\(n\\) draws, without replacement, from a population of size \\(N\\) with \\(K\\) objects with that feature. Mapping to our situation, \\(q_{1k}\\) is \\(k\\), \\(r_{1k}\\) is \\(n\\), \\(r_k\\) is \\(N\\) and \\(q_k\\) is \\(K\\). Given your previous answer, and the properties of the hypergeometric distribution, what are the mean and variance of \\(q_{1k}\\)? Compare your answer to (11.5) and (11.6). With the above parameterization, the mean (\\(n K/N\\)) is \\(r_{1k} q_k/r_K\\). The variance \\(n K/N (N-K)/N (N-n)/(N-1)\\) is \\[ r_{1k} \\frac{q_k}{r_k} \\frac{r_k-q_k}{r_k} \\frac{r_k - r_{1k}}{r_k - 1} \\] These are equivalent to 11.5 and 11.6. 11.1.8 Question 8 Recall that the survival function \\(S(t)\\), the hazard function \\(h(t)\\), and the density function \\(f(t)\\) are defined in (11.2), (11.9), and (11.11), respectively. Furthermore, define \\(F(t) = 1 − S(t)\\). Show that the following relationships hold: \\[ f(t) = dF(t)/dt \\\\ S(t) = \\exp\\left(-\\int_0^t h(u)du\\right) \\] If \\(F(t) = 1 - S(t)\\), then \\(F(t)\\) is the cumulative density function (cdf) for \\(t\\). For a continuous distribution, a cdf, e.g. \\(F(t)\\) can be expressed as an integral (up to some value \\(x\\)) of the probability density function (pdf), i.e. \\(F(t) = \\int_{-\\infty}^x f(x) dt\\). Equivalently, the derivative of the cdf is its pdf: \\(f(t) = \\frac{d F(t)}{dt}\\). Then, \\(h(t) = \\frac{f(t)}{S(t)} = \\frac{dF(t)/dt}{S(t)} = \\frac{-dS(t)/dt}{S(t)}\\). From basic calculus, this can be rewritten as \\(h(t) = -\\frac{d}{dt}\\log{S(t)}\\). Integrating and then exponentiating we get the second identity. 11.1.9 Question 9 In this exercise, we will explore the consequences of assuming that the survival times follow an exponential distribution. Suppose that a survival time follows an \\(Exp(\\lambda)\\) distribution, so that its density function is \\(f(t) = \\lambda\\exp(−\\lambda t)\\). Using the relationships provided in Exercise 8, show that \\(S(t) = \\exp(-\\lambda t)\\). The cdf of an exponential distribution is \\(1 - \\exp(-\\lambda x)\\) and \\(S(t)\\) is \\(1 - F(t)\\) where \\(F(t)\\) is the cdf. Hence, \\(S(t) = \\exp(-\\lambda t)\\). Now suppose that each of \\(n\\) independent survival times follows an \\(\\exp(\\lambda)\\) distribution. Write out an expression for the likelihood function (11.13). The reference to (11.13) gives us the following formula: \\[ L = \\prod_{i=1}^{n} h(y_i)^{\\delta_i} S(y_i) \\] (11.10) also gives us \\[ h(t) = \\frac{f(t)}{S(t)} \\] Plugging in the expressions from part (a), we get \\[\\begin{align*} h(t) &= \\frac{\\lambda \\exp(- \\lambda t)}{\\exp(- \\lambda t)} \\\\ &= \\lambda \\end{align*}\\] Using (11.13), we get the following loss expression: \\[ \\ell = \\prod_i \\lambda^{\\delta_i} e^{- \\lambda y_i} \\] Show that the maximum likelihood estimator for \\(\\lambda\\) is \\[ \\hat\\lambda = \\sum_{i=1}^n \\delta_i / \\sum_{i=1}^n y_i. \\] Take the log likelihood. \\[\\begin{align*} \\log \\ell &= \\sum_i \\log \\left( \\lambda^{\\delta_i} e^{- \\lambda y_i} \\right) \\\\ &= \\sum_i{\\delta_i\\log\\lambda - \\lambda y_i \\log e} \\\\ &= \\sum_i{\\delta_i\\log\\lambda - \\lambda y_i} \\\\ &= \\log\\lambda\\sum_i{\\delta_i} - \\lambda\\sum_i{y_i} \\end{align*}\\] Differentiating this expression with respect to \\(\\lambda\\) we get: \\[ \\frac{d \\log \\ell}{d \\lambda} = \\frac{\\sum_i{\\delta_i}}{\\lambda} - \\sum_i{y_i} \\] This function maximises when its gradient is 0. Solving for this gives a MLE of \\(\\hat\\lambda = \\sum_{i=1}^n \\delta_i / \\sum_{i=1}^n y_i\\). Use your answer to (c) to derive an estimator of the mean survival time. Hint: For (d), recall that the mean of an \\(Exp(\\lambda)\\) random variable is \\(1/\\lambda\\). Estimated mean survival would be \\(1/\\lambda\\) which given the above would be \\(\\sum_{i=1}^n y_i / \\sum_{i=1}^n \\delta_i\\), which can be thought of as the total observation time over the total number of deaths. 11.2 Applied 11.2.1 Question 10 This exercise focuses on the brain tumor data, which is included in the ISLR2 R library. Plot the Kaplan-Meier survival curve with ±1 standard error bands, using the survfit() function in the survival package. library(ISLR2) x <- Surv(BrainCancer$time, BrainCancer$status) plot(survfit(x ~ 1), xlab = "Months", ylab = "Estimated Probability of Survival", col = "steelblue", conf.int = 0.67 ) Draw a bootstrap sample of size \\(n = 88\\) from the pairs (\\(y_i\\), \\(\\delta_i\\)), and compute the resulting Kaplan-Meier survival curve. Repeat this process \\(B = 200\\) times. Use the results to obtain an estimate of the standard error of the Kaplan-Meier survival curve at each timepoint. Compare this to the standard errors obtained in (a). plot(survfit(x ~ 1), xlab = "Months", ylab = "Estimated Probability of Survival", col = "steelblue", conf.int = 0.67 ) fit <- survfit(x ~ 1) dat <- tibble(time = c(0, fit$time)) for (i in 1:200) { y <- survfit(sample(x, 88, replace = TRUE) ~ 1) y <- tibble(time = c(0, y$time), "s{i}" := c(1, y$surv)) dat <- left_join(dat, y, by = "time") } res <- fill(dat, starts_with("s")) |> rowwise() |> transmute(sd = sd(c_across(starts_with("s")))) se <- res$sd[2:nrow(res)] lines(fit$time, fit$surv - se, lty = 2, col = "red") lines(fit$time, fit$surv + se, lty = 2, col = "red") Fit a Cox proportional hazards model that uses all of the predictors to predict survival. Summarize the main findings. fit <- coxph(Surv(time, status) ~ sex + diagnosis + loc + ki + gtv + stereo, data = BrainCancer) fit ## Call: ## coxph(formula = Surv(time, status) ~ sex + diagnosis + loc + ## ki + gtv + stereo, data = BrainCancer) ## ## coef exp(coef) se(coef) z p ## sexMale 0.18375 1.20171 0.36036 0.510 0.61012 ## diagnosisLG glioma 0.91502 2.49683 0.63816 1.434 0.15161 ## diagnosisHG glioma 2.15457 8.62414 0.45052 4.782 1.73e-06 ## diagnosisOther 0.88570 2.42467 0.65787 1.346 0.17821 ## locSupratentorial 0.44119 1.55456 0.70367 0.627 0.53066 ## ki -0.05496 0.94653 0.01831 -3.001 0.00269 ## gtv 0.03429 1.03489 0.02233 1.536 0.12466 ## stereoSRT 0.17778 1.19456 0.60158 0.296 0.76760 ## ## Likelihood ratio test=41.37 on 8 df, p=1.776e-06 ## n= 87, number of events= 35 ## (1 observation deleted due to missingness) diagnosisHG and ki are highly significant. Stratify the data by the value of ki. (Since only one observation has ki=40, you can group that observation together with the observations that have ki=60.) Plot Kaplan-Meier survival curves for each of the five strata, adjusted for the other predictors. To adjust for other predictors, we fit a model that includes those predictors and use this model to predict new, artificial, data where we allow ki to take each possible value, but set the other predictors to be the mode or mean of the other predictors. library(ggfortify) modaldata <- data.frame( sex = rep("Female", 5), diagnosis = rep("Meningioma", 5), loc = rep("Supratentorial", 5), ki = c(60, 70, 80, 90, 100), gtv = rep(mean(BrainCancer$gtv), 5), stereo = rep("SRT", 5) ) survplots <- survfit(fit, newdata = modaldata) plot(survplots, xlab = "Months", ylab = "Survival Probability", col = 2:6) legend("bottomleft", c("60", "70", "80", "90", "100"), col = 2:6, lty = 1) 11.2.2 Question 11 This example makes use of the data in Table 11.4. Create two groups of observations. In Group 1, \\(X < 2\\), whereas in Group 2, \\(X \\ge 2\\). Plot the Kaplan-Meier survival curves corresponding to the two groups. Be sure to label the curves so that it is clear which curve corresponds to which group. By eye, does there appear to be a difference between the two groups’ survival curves? x <- split(Surv(table_data$Y, table_data$D), table_data$X < 2) plot(NULL, xlim = c(0, 100), ylim = c(0, 1), ylab = "Survival Probability") lines(survfit(x[[1]] ~ 1), conf.int = FALSE, col = 2) lines(survfit(x[[2]] ~ 1), conf.int = FALSE, col = 3) legend("bottomleft", c(">= 2", "<2"), col = 2:3, lty = 1) There does not appear to be any difference between the curves. Fit Cox’s proportional hazards model, using the group indicator as a covariate. What is the estimated coefficient? Write a sentence providing the interpretation of this coefficient, in terms of the hazard or the instantaneous probability of the event. Is there evidence that the true coefficient value is non-zero? fit <- coxph(Surv(Y, D) ~ X < 2, data = table_data) fit ## Call: ## coxph(formula = Surv(Y, D) ~ X < 2, data = table_data) ## ## coef exp(coef) se(coef) z p ## X < 2TRUE 0.3401 1.4051 1.2359 0.275 0.783 ## ## Likelihood ratio test=0.08 on 1 df, p=0.7797 ## n= 6, number of events= 3 The coefficient is \\(0.3401\\). This implies a slightly increased hazard when \\(X < 2\\) but it is not significantly different to zero (P = 0.8). Recall from Section 11.5.2 that in the case of a single binary covariate, the log-rank test statistic should be identical to the score statistic for the Cox model. Conduct a log-rank test to determine whether there is a difference between the survival curves for the two groups. How does the p-value for the log-rank test statistic compare to the \\(p\\)-value for the score statistic for the Cox model from (b)? summary(fit)$sctest ## test df pvalue ## 0.07644306 1.00000000 0.78217683 survdiff(Surv(Y, D) ~ X < 2, data = table_data)$chisq ## [1] 0.07644306 They are identical. "],["unsupervised-learning.html", "12 Unsupervised Learning 12.1 Conceptual 12.2 Applied", " 12 Unsupervised Learning 12.1 Conceptual 12.1.1 Question 1 This problem involves the \\(K\\)-means clustering algorithm. Prove (12.18). 12.18 is: \\[ \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k} \\sum_{j=1}^p (x_{ij} - x_{i'j})^2 = 2 \\sum_{i \\in C_k} \\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 \\] where \\[\\bar{x}_{kj} = \\frac{1}{|C_k|}\\sum_{i \\in C_k} x_{ij}\\] On the left hand side we compute the difference between each observation (indexed by \\(i\\) and \\(i'\\)). In the second we compute the difference between each observation and the mean. Intuitively this identity is clear (the factor of 2 is present because we calculate the difference between each pair twice). However, to prove. Note first that, \\[\\begin{align} (x_{ij} - x_{i'j})^2 = & ((x_{ij} - \\bar{x}_{kj}) - (x_{i'j} - \\bar{x}_{kj}))^2 \\\\ = & (x_{ij} - \\bar{x}_{kj})^2 - 2(x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj}) + (x_{i'j} - \\bar{x}_{kj})^2 \\end{align}\\] Note that the first term is independent of \\(i'\\) and the last is independent of \\(i\\). Therefore, 10.12 can be written as: \\[\\begin{align} \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k} \\sum_{j=1}^p (x_{ij} - x_{i'j})^2 = & \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 - \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p 2(x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj}) + \\frac{1}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p (x_{i'j} - \\bar{x}_{kj})^2 \\\\ = & \\frac{|C_k|}{|C_k|}\\sum_{i \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 - \\frac{2}{|C_k|}\\sum_{i,i' \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj}) + \\frac{|C_k|}{|C_k|}\\sum_{i \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 \\\\ = & 2 \\sum_{i \\in C_k}\\sum_{j=1}^p (x_{ij} - \\bar{x}_{kj})^2 \\end{align}\\] Note that we can drop the term containing \\((x_{ij} - \\bar{x}_{kj})(x_{i'j} - \\bar{x}_{kj})\\) since this is 0 when summed over combinations of \\(i\\) and \\(i'\\) for a given \\(j\\). On the basis of this identity, argue that the \\(K\\)-means clustering algorithm (Algorithm 12.2) decreases the objective (12.17) at each iteration. Equation 10.12 demonstrates that the euclidean distance between each possible pair of samples can be related to the difference from each sample to the mean of the cluster. The K-means algorithm works by minimizing the euclidean distance to each centroid, thus also minimizes the within-cluster variance. 12.1.2 Question 2 Suppose that we have four observations, for which we compute a dissimilarity matrix, given by \\[\\begin{bmatrix} & 0.3 & 0.4 & 0.7 \\\\ 0.3 & & 0.5 & 0.8 \\\\ 0.4 & 0.5 & & 0.45 \\\\ 0.7 & 0.8 & 0.45 & \\\\ \\end{bmatrix}\\] For instance, the dissimilarity between the first and second observations is 0.3, and the dissimilarity between the second and fourth observations is 0.8. On the basis of this dissimilarity matrix, sketch the dendrogram that results from hierarchically clustering these four observations using complete linkage. Be sure to indicate on the plot the height at which each fusion occurs, as well as the observations corresponding to each leaf in the dendrogram. m <- matrix(c(0, 0.3, 0.4, 0.7, 0.3, 0, 0.5, 0.8, 0.4, 0.5, 0., 0.45, 0.7, 0.8, 0.45, 0), ncol = 4) c1 <- hclust(as.dist(m), method = "complete") plot(c1) Repeat (a), this time using single linkage clustering. c2 <- hclust(as.dist(m), method = "single") plot(c2) Suppose that we cut the dendrogram obtained in (a) such that two clusters result. Which observations are in each cluster? table(1:4, cutree(c1, 2)) ## ## 1 2 ## 1 1 0 ## 2 1 0 ## 3 0 1 ## 4 0 1 Suppose that we cut the dendrogram obtained in (b) such that two clusters result. Which observations are in each cluster? table(1:4, cutree(c2, 2)) ## ## 1 2 ## 1 1 0 ## 2 1 0 ## 3 1 0 ## 4 0 1 It is mentioned in the chapter that at each fusion in the dendrogram, the position of the two clusters being fused can be swapped without changing the meaning of the dendrogram. Draw a dendrogram that is equivalent to the dendrogram in (a), for which two or more of the leaves are repositioned, but for which the meaning of the dendrogram is the same. plot(c1, labels = c(2, 1, 3, 4)) 12.1.3 Question 3 In this problem, you will perform \\(K\\)-means clustering manually, with \\(K = 2\\), on a small example with \\(n = 6\\) observations and \\(p = 2\\) features. The observations are as follows. Obs. \\(X_1\\) \\(X_2\\) 1 1 4 2 1 3 3 0 4 4 5 1 5 6 2 6 4 0 Plot the observations. library(ggplot2) d <- data.frame( x1 = c(1, 1, 0, 5, 6, 4), x2 = c(4, 3, 4, 1, 2, 0) ) ggplot(d, aes(x = x1, y = x2)) + geom_point() Randomly assign a cluster label to each observation. You can use the sample() command in R to do this. Report the cluster labels for each observation. set.seed(42) d$cluster <- sample(c(1, 2), size = nrow(d), replace = TRUE) Compute the centroid for each cluster. centroids <- sapply(c(1,2), function(i) colMeans(d[d$cluster == i, 1:2])) Assign each observation to the centroid to which it is closest, in terms of Euclidean distance. Report the cluster labels for each observation. dist <- sapply(1:2, function(i) { sqrt((d$x1 - centroids[1, i])^2 + (d$x2 - centroids[2, i])^2) }) d$cluster <- apply(dist, 1, which.min) Repeat (c) and (d) until the answers obtained stop changing. centroids <- sapply(c(1,2), function(i) colMeans(d[d$cluster == i, 1:2])) dist <- sapply(1:2, function(i) { sqrt((d$x1 - centroids[1, i])^2 + (d$x2 - centroids[2, i])^2) }) d$cluster <- apply(dist, 1, which.min) In this case, we get stable labels after the first iteration. In your plot from (a), color the observations according to the cluster labels obtained. ggplot(d, aes(x = x1, y = x2, color = factor(cluster))) + geom_point() 12.1.4 Question 4 Suppose that for a particular data set, we perform hierarchical clustering using single linkage and using complete linkage. We obtain two dendrograms. At a certain point on the single linkage dendrogram, the clusters {1, 2, 3} and {4, 5} fuse. On the complete linkage dendrogram, the clusters {1, 2, 3} and {4, 5} also fuse at a certain point. Which fusion will occur higher on the tree, or will they fuse at the same height, or is there not enough information to tell? The complete linkage fusion will likely be higher in the tree since single linkage is defined as being the minimum distance between two clusters. However, there is a chance that they could be at the same height (so technically there is not enough information to tell). At a certain point on the single linkage dendrogram, the clusters {5} and {6} fuse. On the complete linkage dendrogram, the clusters {5} and {6} also fuse at a certain point. Which fusion will occur higher on the tree, or will they fuse at the same height, or is there not enough information to tell? They will fuse at the same height (the algorithm for calculating distance is the same when the clusters are of size 1). 12.1.5 Question 5 In words, describe the results that you would expect if you performed \\(K\\)-means clustering of the eight shoppers in Figure 12.16, on the basis of their sock and computer purchases, with \\(K = 2\\). Give three answers, one for each of the variable scalings displayed. Explain. In cases where variables are scaled we would expect clusters to correspond to whether or not the retainer sold a computer. In the first case (raw numbers of items sold), we would expect clusters to represent low vs high numbers of sock purchases. To test, we can run the analysis in R: set.seed(42) dat <- data.frame( socks = c(8, 11, 7, 6, 5, 6, 7, 8), computers = c(0, 0, 0, 0, 1, 1, 1, 1) ) kmeans(dat, 2)$cluster ## [1] 1 1 2 2 2 2 2 1 kmeans(scale(dat), 2)$cluster ## [1] 1 1 1 1 2 2 2 2 dat$computers <- dat$computers * 2000 kmeans(dat, 2)$cluster ## [1] 1 1 1 1 2 2 2 2 12.1.6 Question 6 We saw in Section 12.2.2 that the principal component loading and score vectors provide an approximation to a matrix, in the sense of (12.5). Specifically, the principal component score and loading vectors solve the optimization problem given in (12.6). Now, suppose that the M principal component score vectors zim, \\(m = 1,...,M\\), are known. Using (12.6), explain that the first \\(M\\) principal component loading vectors \\(\\phi_{jm}\\), \\(m = 1,...,M\\), can be obtaining by performing \\(M\\) separate least squares linear regressions. In each regression, the principal component score vectors are the predictors, and one of the features of the data matrix is the response. 12.2 Applied 12.2.1 Question 7 In the chapter, we mentioned the use of correlation-based distance and Euclidean distance as dissimilarity measures for hierarchical clustering. It turns out that these two measures are almost equivalent: if each observation has been centered to have mean zero and standard deviation one, and if we let \\(r_{ij}\\) denote the correlation between the \\(i\\)th and \\(j\\)th observations, then the quantity \\(1 − r_{ij}\\) is proportional to the squared Euclidean distance between the ith and jth observations. On the USArrests data, show that this proportionality holds. Hint: The Euclidean distance can be calculated using the dist() function, and correlations can be calculated using the cor() function. dat <- t(scale(t(USArrests))) d1 <- dist(dat)^2 d2 <- as.dist(1 - cor(t(dat))) plot(d1, d2) 12.2.2 Question 8 In Section 12.2.3, a formula for calculating PVE was given in Equation 12.10. We also saw that the PVE can be obtained using the sdev output of the prcomp() function. On the USArrests data, calculate PVE in two ways: Using the sdev output of the prcomp() function, as was done in Section 12.2.3. pr <- prcomp(USArrests, scale = TRUE) pr$sdev^2 / sum(pr$sdev^2) ## [1] 0.62006039 0.24744129 0.08914080 0.04335752 By applying Equation 12.10 directly. That is, use the prcomp() function to compute the principal component loadings. Then, use those loadings in Equation 12.10 to obtain the PVE. These two approaches should give the same results. colSums(pr$x^2) / sum(colSums(scale(USArrests)^2)) ## PC1 PC2 PC3 PC4 ## 0.62006039 0.24744129 0.08914080 0.04335752 Hint: You will only obtain the same results in (a) and (b) if the same data is used in both cases. For instance, if in (a) you performed prcomp() using centered and scaled variables, then you must center and scale the variables before applying Equation 12.10 in (b). 12.2.3 Question 9 Consider the USArrests data. We will now perform hierarchical clustering on the states. Using hierarchical clustering with complete linkage and Euclidean distance, cluster the states. set.seed(42) hc <- hclust(dist(USArrests), method = "complete") Cut the dendrogram at a height that results in three distinct clusters. Which states belong to which clusters? ct <- cutree(hc, 3) sapply(1:3, function(i) names(ct)[ct == i]) ## [[1]] ## [1] "Alabama" "Alaska" "Arizona" "California" ## [5] "Delaware" "Florida" "Illinois" "Louisiana" ## [9] "Maryland" "Michigan" "Mississippi" "Nevada" ## [13] "New Mexico" "New York" "North Carolina" "South Carolina" ## ## [[2]] ## [1] "Arkansas" "Colorado" "Georgia" "Massachusetts" ## [5] "Missouri" "New Jersey" "Oklahoma" "Oregon" ## [9] "Rhode Island" "Tennessee" "Texas" "Virginia" ## [13] "Washington" "Wyoming" ## ## [[3]] ## [1] "Connecticut" "Hawaii" "Idaho" "Indiana" ## [5] "Iowa" "Kansas" "Kentucky" "Maine" ## [9] "Minnesota" "Montana" "Nebraska" "New Hampshire" ## [13] "North Dakota" "Ohio" "Pennsylvania" "South Dakota" ## [17] "Utah" "Vermont" "West Virginia" "Wisconsin" Hierarchically cluster the states using complete linkage and Euclidean distance, after scaling the variables to have standard deviation one. hc2 <- hclust(dist(scale(USArrests)), method = "complete") What effect does scaling the variables have on the hierarchical clustering obtained? In your opinion, should the variables be scaled before the inter-observation dissimilarities are computed? Provide a justification for your answer. ct <- cutree(hc, 3) sapply(1:3, function(i) names(ct)[ct == i]) ## [[1]] ## [1] "Alabama" "Alaska" "Arizona" "California" ## [5] "Delaware" "Florida" "Illinois" "Louisiana" ## [9] "Maryland" "Michigan" "Mississippi" "Nevada" ## [13] "New Mexico" "New York" "North Carolina" "South Carolina" ## ## [[2]] ## [1] "Arkansas" "Colorado" "Georgia" "Massachusetts" ## [5] "Missouri" "New Jersey" "Oklahoma" "Oregon" ## [9] "Rhode Island" "Tennessee" "Texas" "Virginia" ## [13] "Washington" "Wyoming" ## ## [[3]] ## [1] "Connecticut" "Hawaii" "Idaho" "Indiana" ## [5] "Iowa" "Kansas" "Kentucky" "Maine" ## [9] "Minnesota" "Montana" "Nebraska" "New Hampshire" ## [13] "North Dakota" "Ohio" "Pennsylvania" "South Dakota" ## [17] "Utah" "Vermont" "West Virginia" "Wisconsin" Scaling results in different clusters and the choice of whether to scale or not depends on the data in question. In this case, the variables are: Murder numeric Murder arrests (per 100,000) Assault numeric Assault arrests (per 100,000) UrbanPop numeric Percent urban population Rape numeric Rape arrests (per 100,000) These variables are not naturally on the same unit and the units involved are somewhat arbitrary (so for example, Murder could be measured per 1 million rather than per 100,000) so in this case I would argue the data should be scaled. 12.2.4 Question 10 In this problem, you will generate simulated data, and then perform PCA and \\(K\\)-means clustering on the data. Generate a simulated data set with 20 observations in each of three classes (i.e. 60 observations total), and 50 variables. Hint: There are a number of functions in R that you can use to generate data. One example is the rnorm() function; runif() is another option. Be sure to add a mean shift to the observations in each class so that there are three distinct classes. set.seed(42) data <- matrix(rnorm(60 * 50), ncol = 50) classes <- rep(c("A", "B", "C"), each = 20) dimnames(data) <- list(classes, paste0("v", 1:50)) data[classes == "B", 1:10] <- data[classes == "B", 1:10] + 1.2 data[classes == "C", 5:30] <- data[classes == "C", 5:30] + 1 Perform PCA on the 60 observations and plot the first two principal component score vectors. Use a different color to indicate the observations in each of the three classes. If the three classes appear separated in this plot, then continue on to part (c). If not, then return to part (a) and modify the simulation so that there is greater separation between the three classes. Do not continue to part (c) until the three classes show at least some separation in the first two principal component score vectors. pca <- prcomp(data) ggplot(data.frame(Class = classes, PC1 = pca$x[, 1], PC2 = pca$x[, 2]), aes(x = PC1, y = PC2, col = Class)) + geom_point() Perform \\(K\\)-means clustering of the observations with \\(K = 3\\). How well do the clusters that you obtained in \\(K\\)-means clustering compare to the true class labels? Hint: You can use the table() function in R to compare the true class labels to the class labels obtained by clustering. Be careful how you interpret the results: \\(K\\)-means clustering will arbitrarily number the clusters, so you cannot simply check whether the true class labels and clustering labels are the same. km <- kmeans(data, 3)$cluster table(km, names(km)) ## ## km A B C ## 1 1 20 1 ## 2 0 0 19 ## 3 19 0 0 \\(K\\)-means separates out the clusters nearly perfectly. Perform \\(K\\)-means clustering with \\(K = 2\\). Describe your results. km <- kmeans(data, 2)$cluster table(km, names(km)) ## ## km A B C ## 1 18 20 1 ## 2 2 0 19 \\(K\\)-means effectively defines cluster 2 to be class B, but cluster 1 is a mix of classes A and B. Now perform \\(K\\)-means clustering with \\(K = 4\\), and describe your results. km <- kmeans(data, 4)$cluster table(km, names(km)) ## ## km A B C ## 1 0 7 2 ## 2 18 1 0 ## 3 0 0 18 ## 4 2 12 0 \\(K\\)-means effectively defines cluster 1 to be class B, cluster 2 to be class A but clusters 3 and 4 are split over class C. Now perform \\(K\\)-means clustering with \\(K = 3\\) on the first two principal component score vectors, rather than on the raw data. That is, perform \\(K\\)-means clustering on the \\(60 \\times 2\\) matrix of which the first column is the first principal component score vector, and the second column is the second principal component score vector. Comment on the results. km <- kmeans(pca$x[, 1:2], 3)$cluster table(km, names(km)) ## ## km A B C ## 1 0 20 2 ## 2 20 0 0 ## 3 0 0 18 \\(K\\)-means again separates out the clusters nearly perfectly. Using the scale() function, perform \\(K\\)-means clustering with \\(K = 3\\) on the data after scaling each variable to have standard deviation one. How do these results compare to those obtained in (b)? Explain. km <- kmeans(scale(data), 3)$cluster table(km, names(km)) ## ## km A B C ## 1 1 20 1 ## 2 19 0 0 ## 3 0 0 19 \\(K\\)-means appears to perform less well on the scaled data in this case. 12.2.5 Question 11 Write an R function to perform matrix completion as in Algorithm 12.1, and as outlined in Section 12.5.2. In each iteration, the function should keep track of the relative error, as well as the iteration count. Iterations should continue until the relative error is small enough or until some maximum number of iterations is reached (set a default value for this maximum number). Furthermore, there should be an option to print out the progress in each iteration. Test your function on the Boston data. First, standardize the features to have mean zero and standard deviation one using the scale() function. Run an experiment where you randomly leave out an increasing (and nested) number of observations from 5% to 30%, in steps of 5%. Apply Algorithm 12.1 with \\(M = 1,2,...,8\\). Display the approximation error as a function of the fraction of observations that are missing, and the value of \\(M\\), averaged over 10 repetitions of the experiment. 12.2.6 Question 12 In Section 12.5.2, Algorithm 12.1 was implemented using the svd() function. However, given the connection between the svd() function and the prcomp() function highlighted in the lab, we could have instead implemented the algorithm using prcomp(). Write a function to implement Algorithm 12.1 that makes use of prcomp() rather than svd(). 12.2.7 Question 13 On the book website, www.StatLearning.com, there is a gene expression data set (Ch12Ex13.csv) that consists of 40 tissue samples with measurements on 1,000 genes. The first 20 samples are from healthy patients, while the second 20 are from a diseased group. Load in the data using read.csv(). You will need to select header = F. data <- read.csv("data/Ch12Ex13.csv", header = FALSE) colnames(data) <- c(paste0("H", 1:20), paste0("D", 1:20)) Apply hierarchical clustering to the samples using correlation-based distance, and plot the dendrogram. Do the genes separate the samples into the two groups? Do your results depend on the type of linkage used? hc.complete <- hclust(as.dist(1 - cor(data)), method = "complete") plot(hc.complete) hc.complete <- hclust(as.dist(1 - cor(data)), method = "average") plot(hc.complete) hc.complete <- hclust(as.dist(1 - cor(data)), method = "single") plot(hc.complete) Yes the samples clearly separate into the two groups, although the results depend somewhat on the linkage method used. In the case of average clustering, the disease samples all fall within a subset of the healthy samples. Your collaborator wants to know which genes differ the most across the two groups. Suggest a way to answer this question, and apply it here. This is probably best achieved with a supervised approach. A simple method would be to determine which genes show the most significant differences between the groups by applying a t-test to each group. We can then select those with a FDR adjusted p-value less than some given threshold (e.g. 0.05). class <- factor(rep(c("Healthy", "Diseased"), each = 20)) pvals <- p.adjust(apply(data, 1, function(v) t.test(v ~ class)$p.value)) which(pvals < 0.05) ## [1] 11 12 13 14 15 16 17 18 19 20 501 502 503 504 505 506 507 508 ## [19] 509 511 512 513 514 515 516 517 519 520 521 522 523 524 525 526 527 528 ## [37] 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 ## [55] 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 ## [73] 565 566 567 568 569 570 571 572 574 575 576 577 578 579 580 581 582 583 ## [91] 584 586 587 588 589 590 591 592 593 595 596 597 598 599 600 "],["multiple-testing.html", "13 Multiple Testing 13.1 Conceptual 13.2 Applied", " 13 Multiple Testing 13.1 Conceptual 13.1.1 Question 1 Suppose we test \\(m\\) null hypotheses, all of which are true. We control the Type I error for each null hypothesis at level \\(\\alpha\\). For each sub-problem, justify your answer. In total, how many Type I errors do we expect to make? We expect \\(m\\alpha\\). Suppose that the m tests that we perform are independent. What is the family-wise error rate associated with these m tests? Hint: If two events A and B are independent, then Pr(A ∩ B) = Pr(A) Pr(B). The family-wise error rate (FWER) is defined as the probability of making at least one Type I error. We can think of this as 1 minus the probability of no type I errors, which is: \\(1 - (1 - \\alpha)^m\\) Alternatively, for two tests this is: Pr(A ∪ B) = Pr(A) + Pr(B) - Pr(A ∩ B). For independent tests this is \\(\\alpha + \\alpha - \\alpha^2\\) Suppose that \\(m = 2\\), and that the p-values for the two tests are positively correlated, so that if one is small then the other will tend to be small as well, and if one is large then the other will tend to be large. How does the family-wise error rate associated with these \\(m = 2\\) tests qualitatively compare to the answer in (b) with \\(m = 2\\)? Hint: First, suppose that the two p-values are perfectly correlated. If they were perfectly correlated, we would effectively be performing a single test (thus FWER would be \\(alpha\\)). In the case when they are positively correlated therefore, we can expect the FWER to be less than in b. Alternatively, as above, FWEW = Pr(A ∪ B) = Pr(A) + Pr(B) - Pr(A ∩ B). For perfectly positively correlated tests Pr(A ∩ B) = \\(\\alpha\\), so the FWEW is \\(\\alpha\\) which is smaller than b. Suppose again that \\(m = 2\\), but that now the p-values for the two tests are negatively correlated, so that if one is large then the other will tend to be small. How does the family-wise error rate associated with these \\(m = 2\\) tests qualitatively compare to the answer in (b) with \\(m = 2\\)? Hint: First, suppose that whenever one p-value is less than \\(\\alpha\\), then the other will be greater than \\(\\alpha\\). In other words, we can never reject both null hypotheses. Taking the equation above, for two tests, FWEW = Pr(A ∪ B) = Pr(A) + Pr(B) - Pr(A ∩ B). In the case considered in the hint Pr(A ∩ B) = 0, so Pr(A ∪ B) = \\(2\\alpha\\), which is larger than b. 13.1.2 Question 2 Suppose that we test \\(m\\) hypotheses, and control the Type I error for each hypothesis at level \\(\\alpha\\). Assume that all \\(m\\) p-values are independent, and that all null hypotheses are true. Let the random variable \\(A_j\\) equal 1 if the \\(j\\)th null hypothesis is rejected, and 0 otherwise. What is the distribution of \\(A_j\\)? \\(A_j\\) follows a Bernoulli distribution: \\(A_j \\sim \\text{Bernoulli}(p)\\) What is the distribution of \\(\\sum_{j=1}^m A_j\\)? Follows a binomial distribution \\(\\sum_{j=1}^m A_j \\sim Bi(m, \\alpha)\\). What is the standard deviation of the number of Type I errors that we will make? The variance of a Binomial is \\(npq\\), so for this situation the standard deviation would be \\(\\sqrt{m \\alpha (1-\\alpha)}\\). 13.1.3 Question 3 Suppose we test \\(m\\) null hypotheses, and control the Type I error for the \\(j\\)th null hypothesis at level \\(\\alpha_j\\), for \\(j=1,...,m\\). Argue that the family-wise error rate is no greater than \\(\\sum_{j=1}^m \\alpha_j\\). \\(p(A \\cup B) = p(A) + p(B)\\) if \\(A\\) and \\(B\\) are independent or \\(p(A) + p(B) - p(A \\cap B)\\) when they are not. Since \\(p(A \\cap B)\\) must be positive, \\(p(A \\cup B) < p(A) + p(B)\\) (whether independent or not). Therefore, the probability of a type I error in any of \\(m\\) hypotheses can be no larger than the sum of the probabilities for each individual hypothesis (which is \\(\\alpha_j\\) for the \\(j\\)th). 13.1.4 Question 4 Suppose we test \\(m = 10\\) hypotheses, and obtain the p-values shown in Table 13.4. pvals <- c(0.0011, 0.031, 0.017, 0.32, 0.11, 0.90, 0.07, 0.006, 0.004, 0.0009) names(pvals) <- paste0("H", sprintf("%02d", 1:10)) Suppose that we wish to control the Type I error for each null hypothesis at level \\(\\alpha = 0.05\\). Which null hypotheses will we reject? names(which(pvals < 0.05)) ## [1] "H01" "H02" "H03" "H08" "H09" "H10" We reject all NULL hypotheses where \\(p < 0.05\\). Now suppose that we wish to control the FWER at level \\(\\alpha = 0.05\\). Which null hypotheses will we reject? Justify your answer. names(which(pvals < 0.05 / 10)) ## [1] "H01" "H09" "H10" We reject all NULL hypotheses where \\(p < 0.005\\). Now suppose that we wish to control the FDR at level \\(q = 0.05\\). Which null hypotheses will we reject? Justify your answer. names(which(p.adjust(pvals, "fdr") < 0.05)) ## [1] "H01" "H03" "H08" "H09" "H10" We reject all NULL hypotheses where \\(q < 0.05\\). Now suppose that we wish to control the FDR at level \\(q = 0.2\\). Which null hypotheses will we reject? Justify your answer. names(which(p.adjust(pvals, "fdr") < 0.2)) ## [1] "H01" "H02" "H03" "H05" "H07" "H08" "H09" "H10" We reject all NULL hypotheses where \\(q < 0.2\\). Of the null hypotheses rejected at FDR level \\(q = 0.2\\), approximately how many are false positives? Justify your answer. We expect 20% (in this case 2 out of the 8) rejections to be false (false positives). 13.1.5 Question 5 For this problem, you will make up p-values that lead to a certain number of rejections using the Bonferroni and Holm procedures. Give an example of five p-values (i.e. five numbers between 0 and 1 which, for the purpose of this problem, we will interpret as p-values) for which both Bonferroni’s method and Holm’s method reject exactly one null hypothesis when controlling the FWER at level 0.1. In this case, for Bonferroni, we need one p-value to be less than \\(0.1 / 5 = 0.02\\). and the others to be above. For Holm’s method, we need the most significant p-value to be below \\(0.1/(5 + 1 - 1) = 0.02\\) also. An example would be: 1, 1, 1, 1, 0.001. pvals <- c(1, 1, 1, 1, 0.001) sum(p.adjust(pvals, method = "bonferroni") < 0.1) ## [1] 1 sum(p.adjust(pvals, method = "holm") < 0.1) ## [1] 1 Now give an example of five p-values for which Bonferroni rejects one null hypothesis and Holm rejects more than one null hypothesis at level 0.1. An example would be: 1, 1, 1, 0.02, 0.001. For Holm’s method we reject two because \\(0.02 < 0.1/(5 + 1 - 2)\\). pvals <- c(1, 1, 1, 0.02, 0.001) sum(p.adjust(pvals, method = "bonferroni") < 0.1) ## [1] 1 sum(p.adjust(pvals, method = "holm") < 0.1) ## [1] 2 13.1.6 Question 6 For each of the three panels in Figure 13.3, answer the following questions: There are always: 8 positives (red) and 2 negatives (black). False / true positives are black / red points below the line respectively. False / true negatives are red / black points above the line respectively. Type I / II errors are the same as false positives and false negatives respectively. How many false positives, false negatives, true positives, true negatives, Type I errors, and Type II errors result from applying the Bonferroni procedure to control the FWER at level \\(\\alpha = 0.05\\)? Panel FP FN TP TN Type I Type II 1 0 1 7 2 0 1 2 0 1 7 2 0 1 3 0 5 3 2 0 5 How many false positives, false negatives, true positives, true negatives, Type I errors, and Type II errors result from applying the Holm procedure to control the FWER at level \\(\\alpha = 0.05\\)? Panel FP FN TP TN Type I Type II 1 0 1 7 2 0 1 2 0 0 8 2 0 0 3 0 0 8 2 0 0 What is the false discovery rate associated with using the Bonferroni procedure to control the FWER at level \\(\\alpha = 0.05\\)? False discovery rate is the expected ratio of false positives to total positives. There are never any false positives (black points below the line). There are always the same number of total positives (8). For panels 1, 2, 3 this would be 0/8, 0/8 and 0/8 respectively. What is the false discovery rate associated with using the Holm procedure to control the FWER at level \\(\\alpha = 0.05\\)? For panels 1, 2, 3 this would be 0/8, 0/8 and 0/8 respectively. How would the answers to (a) and (c) change if we instead used the Bonferroni procedure to control the FWER at level \\(\\alpha = 0.001\\)? This would equate to a more stringent threshold. We would not call any more false positives, so the results would not change. 13.2 Applied 13.2.1 Question 7 This problem makes use of the Carseats dataset in the ISLR2 package. For each quantitative variable in the dataset besides Sales, fit a linear model to predict Sales using that quantitative variable. Report the p-values associated with the coefficients for the variables. That is, for each model of the form \\(Y = \\beta_0 + \\beta_1X + \\epsilon\\), report the p-value associated with the coefficient \\(\\beta_1\\). Here, \\(Y\\) represents Sales and \\(X\\) represents one of the other quantitative variables. library(ISLR2) nm <- c("CompPrice", "Income", "Advertising", "Population", "Price", "Age") pvals <- sapply(nm, function(n) { summary(lm(Carseats[["Sales"]] ~ Carseats[[n]]))$coef[2, 4] }) Suppose we control the Type I error at level \\(\\alpha = 0.05\\) for the p-values obtained in (a). Which null hypotheses do we reject? names(which(pvals < 0.05)) ## [1] "Income" "Advertising" "Price" "Age" Now suppose we control the FWER at level 0.05 for the p-values. Which null hypotheses do we reject? names(which(pvals < 0.05 / length(nm))) ## [1] "Income" "Advertising" "Price" "Age" Finally, suppose we control the FDR at level 0.2 for the p-values. Which null hypotheses do we reject? names(which(p.adjust(pvals, "fdr") < 0.2)) ## [1] "Income" "Advertising" "Price" "Age" 13.2.2 Question 8 In this problem, we will simulate data from \\(m = 100\\) fund managers. set.seed(1) n <- 20 m <- 100 X <- matrix(rnorm(n * m), ncol = m) set.seed(1) n <- 20 m <- 100 X <- matrix(rnorm(n * m), ncol = m) These data represent each fund manager’s percentage returns for each of \\(n = 20\\) months. We wish to test the null hypothesis that each fund manager’s percentage returns have population mean equal to zero. Notice that we simulated the data in such a way that each fund manager’s percentage returns do have population mean zero; in other words, all \\(m\\) null hypotheses are true. Conduct a one-sample \\(t\\)-test for each fund manager, and plot a histogram of the \\(p\\)-values obtained. pvals <- apply(X, 2, function(p) t.test(p)$p.value) hist(pvals, main = NULL) If we control Type I error for each null hypothesis at level \\(\\alpha = 0.05\\), then how many null hypotheses do we reject? sum(pvals < 0.05) ## [1] 4 If we control the FWER at level 0.05, then how many null hypotheses do we reject? sum(pvals < 0.05 / length(pvals)) ## [1] 0 If we control the FDR at level 0.05, then how many null hypotheses do we reject? sum(p.adjust(pvals, "fdr") < 0.05) ## [1] 0 Now suppose we “cherry-pick” the 10 fund managers who perform the best in our data. If we control the FWER for just these 10 fund managers at level 0.05, then how many null hypotheses do we reject? If we control the FDR for just these 10 fund managers at level 0.05, then how many null hypotheses do we reject? best <- order(apply(X, 2, sum), decreasing = TRUE)[1:10] sum(pvals[best] < 0.05 / 10) ## [1] 1 sum(p.adjust(pvals[best], "fdr") < 0.05) ## [1] 1 Explain why the analysis in (e) is misleading. Hint The standard approaches for controlling the FWER and FDR assume that all tested null hypotheses are adjusted for multiplicity, and that no “cherry-picking” of the smallest p-values has occurred. What goes wrong if we cherry-pick? This is misleading because we are not correctly accounting for all tests performed. Cherry picking the similar to repeating a test until by chance we find a significant result. "],["404.html", "Page not found", " Page not found The page you requested cannot be found (perhaps it was moved or renamed). You may want to try searching to find the page's new location, or use the table of contents to find the page you are looking for. "]] diff --git a/tree-based-methods.html b/tree-based-methods.html index 163bc81..8fed720 100644 --- a/tree-based-methods.html +++ b/tree-based-methods.html @@ -1220,7 +1220,7 @@

      8.2.6 Question 12
      mse <- function(model, ...) {
         pred <- predict(model, College[test, ], ...)