path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
NeuralNetwork-SpamClassification.ipynb | ###Markdown
Data Loading
###Code
data = pd.read_csv('dataset/spam.csv', encoding='latin-1')
# Show first 5 in dataset
data.head(5)
# Drop soem unwanted columns that do not provide insight
data = data.drop(["Unnamed: 2", "Unnamed: 3", "Unnamed: 4"], axis=1)
data = data.rename(columns={"v1":"label", "v2":"text"})
# Show last 5 in dataset
data.tail(5)
data["label_tag"] = data.label.map({'ham':0, 'spam':1})
data.head(5)
# get the size of our dataset
print(data.count())
data.label.value_counts()
###Output
label 5572
text 5572
label_tag 5572
dtype: int64
###Markdown
Data Preparation Training data
###Code
# first 4572/5572 emails
training_data = data[0:4572]
training_data_length = len(training_data.label)
training_data.head()
###Output
_____no_output_____
###Markdown
Testing data
###Code
# last 1000/5572
test_data = data[-1000:]
test_data_length = len(test_data.label)
test_data.head()
###Output
_____no_output_____
###Markdown
What is the shape of our input data Training data
###Code
print(training_data.shape)
print(training_data.label.shape)
###Output
(4572, 3)
(4572,)
###Markdown
There are 3 features and 4572 samples in our trtaining set Test data
###Code
print(test_data.shape)
print(test_data.label.shape)
###Output
(1000, 3)
(1000,)
###Markdown
Develop a Predictive Theory
###Code
import random
def pretty_print_text_and_label(i):
print(training_data.label[i] + "\t:\t" + training_data.text[i][:80] + "...")
print("labels \t : \t texts\n")
# choose a random spam set to analyse
# random.randrange(start, stop, step)
pretty_print_text_and_label(random.randrange(0,4572))
pretty_print_text_and_label(random.randrange(0,4572,4))
pretty_print_text_and_label(random.randrange(0,4572,50))
pretty_print_text_and_label(random.randrange(0,4572,100))
pretty_print_text_and_label(random.randrange(0,4572,200))
pretty_print_text_and_label(random.randrange(0,4572,500))
pretty_print_text_and_label(random.randrange(0,4572,800))
pretty_print_text_and_label(random.randrange(0,4572,1000))
###Output
labels : texts
ham : Sorry me going home first... Daddy come fetch Ì_ later......
ham : I jokin oni lar.. ÌÏ busy then i wun disturb Ì_....
ham : Feb <#> is \I LOVE U\" day. Send dis to all ur \"VALUED FRNDS\" evn me. ...
ham : Pathaya enketa maraikara pa'...
ham : K.then any other special?...
ham : No..but heard abt tat.....
ham : Nice.nice.how is it working?...
ham : But i'll b going 2 sch on mon. My sis need 2 take smth....
###Markdown
It is very easy to distinguish a spam text from a non-spam text (in this case ham) . Spam text occasionaly contain words like **free**, **sell**, **promotion**, **deal**, **offer**, **discount**, **lucky** e.t.c. This way we can let our network learn some of the words assocaiated with spams and based on such criteria we can classify a text as a spam or not. Theory Validation
###Code
from collections import Counter
import numpy as np
import pprint
spam_counts = Counter()
ham_counts = Counter()
total_counts = Counter()
spam_ham_ratios = Counter()
pp = pprint.PrettyPrinter(indent=4)
for i in range(training_data_length):
if(training_data.label[i] == 0):
for word in training_data.text[i].split(" "):
ham_counts[word] += 1
total_counts[word] += 1
else:
for word in training_data.text[i].split(" "):
spam_counts[word] += 1
total_counts[word] += 1
pp.pprint(spam_counts.most_common()[0:30])
for word,count in list(total_counts.most_common()):
if(count > 100):
spam_ham_ratio = spam_counts[word] / float(ham_counts[word]+1)
spam_ham_ratios[word] = spam_ham_ratio
for word,ratio in spam_ham_ratios.most_common():
if(ratio > 1):
spam_ham_ratios[word] = np.log(ratio)
else:
spam_ham_ratios[word] = -np.log((1 / (ratio+0.01)))
# words most frequently seen in a text with a "spam" label
pp.pprint(spam_ham_ratios.most_common()[0:30])
# words most frequently seen in a text with a "ham" label
pp.pprint(list(reversed(spam_ham_ratios.most_common()))[0:30])
###Output
[ ('there', 4.6151205168412597),
('he', 4.6151205168412597),
('no', 4.6249728132842707),
('our', 4.6443908991413725),
('one', 4.6443908991413725),
('been', 4.6443908991413725),
('If', 4.6634390941120669),
('No', 4.6728288344619058),
('But', 4.7004803657924166),
('still', 4.7095302013123339),
('text', 4.7184988712950942),
('need', 4.7361984483944957),
('as', 4.7449321283632502),
('only', 4.7449321283632502),
('n', 4.7621739347977563),
("I'll", 4.7706846244656651),
('what', 4.7706846244656651),
('How', 4.7706846244656651),
('then', 4.7874917427820458),
('going', 4.7874917427820458),
('Call', 4.7957905455967413),
('...', 4.8040210447332568),
('time', 4.8202815656050371),
('want', 4.8441870864585912),
('about', 4.8520302639196169),
('send', 4.8675344504555822),
('by', 4.8978397999509111),
('was', 5.0304379213924353),
('?', 5.0498560072495371),
('now', 5.0498560072495371)]
###Markdown
Transform Text into Numbers Neural Networks only understand numbers hence we have to find a way to represent our text inputs in a way it can understand
###Code
vocab = set(total_counts.keys())
vocab_size = len(vocab)
print(vocab_size)
###Output
13874
###Markdown
We can see that from all our dataset, we have a total of **13874** unique words. Use this to build up our vocabulary vector containing columns of all these words.Because, **13874**, can be a large size in memory (a matrix of size **13874 by 4572**), lets allocate its memory once with default zeros and will only change its contents accordingly later.
###Code
vocab_vector = np.zeros((1, vocab_size))
pp.pprint(vocab_vector.shape)
pp.pprint(vocab_vector)
###Output
(1, 13874)
array([[ 0., 0., 0., ..., 0., 0., 0.]])
###Markdown
Now, let's create a dictionary that allows us to look at every word in our vocabulary and map it to the `vocab_vector` column.
###Code
# Maps a word to its column in the vocab_vector
word_column_dict = {}
for i, word in enumerate(vocab):
# {key: value} is {word: column}
word_column_dict[word] = i
pp.pprint(word_column_dict)
###Output
{ '': 0,
'!': 6573,
'!!': 6076,
'!!!': 2577,
'!!!!': 6201,
"!!''.": 8155,
'!1': 3601,
'#': 6048,
'#150': 12446,
'#5000': 7177,
'$': 3087,
'$1': 1367,
'$2': 13666,
'$350': 7222,
'$5.00': 13068,
'$50': 1484,
'$50...': 7035,
'$700': 11279,
'$900': 4446,
'$95/pax,': 3780,
'%': 6745,
'%.': 6231,
'%of': 2277,
'&': 9799,
'&SAM': 3861,
'&XXX': 789,
'&': 11599,
'>:(': 13352,
'&it': 6281,
'<#>': 13672,
'<)': 6224,
'<3': 8044,
'<DECIMAL>': 11391,
'<EMAIL>': 7317,
'<TIME>': 11333,
'<URL>': 11315,
'&othrs': 10011,
"'": 3155,
"''": 162,
"''OK'',": 9038,
"'An": 10245,
"'Comfort'": 6624,
"'IF": 10045,
"'Luxury'": 6885,
"'MARRIED'": 1036,
"'Maangalyam": 9707,
"'Melle": 13753,
"'Need'": 1538,
"'SIMPLE'": 5834,
"'Uptown": 6183,
"'Wnevr": 9641,
"'anything'": 3581,
"'doctors'": 2147,
"'help'": 8401,
"'hex'": 1645,
"'hw": 13002,
"'its": 5838,
"'ll": 1104,
"'taxless'": 6352,
"'terrorist'": 176,
"'til": 7412,
"'xam...": 5690,
'(100p/SMS)': 11505,
'(10p/min)': 13681,
'(10p/min).': 2569,
'(150p/SMS)': 12227,
'(18': 11759,
'(18+)': 1102,
'(25/F)': 7795,
'(25p),': 2208,
'(32/F)': 4580,
"(Alaipayuthe)'": 13830,
'(And': 1588,
'(Bank': 12208,
'(Book': 12977,
'(I': 7578,
'(More': 6105,
'(No': 7532,
'(Oru': 2896,
'(Send': 2576,
'(That': 13130,
'(Txt': 9687,
'(again),': 10647,
'(an': 6277,
'(but': 3874,
'(come': 7966,
'(flights': 13452,
'(getting': 2541,
"(he's": 2562,
'(kadeem)': 9287,
'(marriage)program': 8665,
'(mob': 2575,
'(not': 1166,
'(or': 6331,
'(preferably': 6104,
'(quizclub': 3055,
'(std': 13486,
'(to': 2786,
'(\x89Û_)': 9415,
')': 697,
'),': 5077,
'*': 3764,
'*****': 7808,
'***********': 8784,
'***************': 7292,
'*****UP': 773,
'*9': 12275,
'*BILLING': 8503,
'*HAVE': 2347,
"*I'm": 11799,
'*I*': 9436,
'*Missing': 752,
'*Number': 12725,
'*Sent:Date': 10496,
'*Wait,U': 12592,
'*a': 8003,
'*adoring': 3588,
'*covers': 2717,
'*deep': 11202,
'*devouring': 10530,
'*exposes': 10300,
'*grins*': 7821,
'*kiss*': 3786,
'*kisses*': 11441,
'*laughs*"': 12663,
'*loving': 2008,
'*passionate': 3305,
'*phews*': 6516,
'*possessive': 3826,
'*pouts*': 9543,
'*pushes': 12200,
'*sighs*': 12244,
'*sips': 10238,
'*smacks': 10839,
'*smiles*': 2780,
'*stomps': 12177,
'*teasing': 11576,
'*turn*': 1729,
'*wicked': 6848,
'+': 13207,
'+123': 10438,
'+447797706009': 9289,
'+449071512431': 1566,
'+CHEER': 13323,
'+std': 11972,
'+å£400': 9840,
',': 7502,
',kerala': 6006,
'-': 6908,
'-)': 4172,
'-Message': 9809,
'-NO': 9948,
'-PLS': 671,
'-Shakespeare-': 10194,
'-The': 9888,
'-apples&pairs&all': 172,
'-msg': 8617,
'-sub': 9529,
'-u': 3891,
'-via': 9376,
'.': 6217,
'..': 13717,
'.."': 13391,
'...': 3672,
'...*grins*...': 4872,
'...*kiss*': 13073,
'....': 3189,
'.....': 3719,
'.........': 4505,
'......forward': 110,
'..?': 5042,
'..Shiny': 10752,
'..it': 6100,
'..now': 7070,
'..water.?': 10261,
'.Call': 11287,
'.Please': 9041,
'.Terry\\""': 11830,
'.after': 1324,
'.be': 4601,
'.by': 10232,
'.dont': 7517,
'.irritating': 10771,
'.lovable.': 1910,
'.nt': 628,
'/': 13198,
'/-': 2155,
'/7': 5954,
'/oz,': 7650,
'0': 12631,
'008704050406': 11589,
'0089(my': 10414,
'01223585236': 4240,
'01223585334': 10703,
'0125698789': 13371,
'02': 7662,
'02/06/03!': 906,
'02/09/03!': 6639,
'0207': 3665,
'02072069400.': 5699,
'02073162414': 1123,
'02085076972': 10927,
'021': 913,
'050703': 10129,
'0578': 5455,
'06.': 4811,
'07/11/04': 3919,
'07008009200': 13300,
'07046744435': 1224,
'07090201529': 8877,
'07090298926': 4751,
'07123456789': 6441,
'0721072': 8807,
'07732584351': 13297,
'07734396839': 4975,
'07742676969': 3896,
'07753741225': 4296,
'0776xxxxxxx': 2581,
'07786200117': 3428,
'077xxx': 9581,
'07801543489': 11678,
'07808': 1430,
'07815296484': 13383,
'07821230901': 10880,
'0789xxxxxxx.': 5385,
'07946746291/07880867867': 10247,
'0796XXXXXX.': 5323,
'07973788240': 8559,
'07XXXXXXXXX': 12754,
'07xxxxxxxxx': 6421,
'0800': 12176,
'08000407165': 9572,
'08000776320': 2929,
'08000839402': 3350,
'08000930705': 9018,
'08000930705.': 8918,
'08000938767': 5472,
'08001950382': 5194,
'08002888812': 8656,
'08002986030': 10058,
'08002986906': 11130,
'08002988890': 3210,
'08006344447': 2902,
'0808': 12290,
'08081263000': 11489,
'08081560665': 13303,
'0825': 1773,
'0844': 8097,
'08448714184': 4221,
'0845': 4283,
'08450542832': 12838,
'08452810071': 12229,
'08452810073': 9866,
"08452810075over18's": 2130,
'0870': 8608,
'0870..k': 7823,
'08700621170150p': 729,
'08701237397': 5776,
'08701417012': 9958,
'08701417012150p': 5598,
'0870141701216+': 12970,
'087016248': 5452,
'08701752560.': 9185,
'087018728737,': 11671,
'0870241182716': 2466,
'08702490080': 2825,
'08702840625': 2015,
'08702840625.COMUK.': 4767,
'08704439680.': 3785,
'08704439680Ts&Cs': 2413,
'08706091795': 12911,
'08706091795.': 247,
'0870737910216yrs': 8041,
'08707500020': 3718,
'08707509020': 11139,
'0870753331018+': 1475,
'08707808226.': 6068,
'08708034412': 2168,
'08708800282': 11549,
'08709222922.': 514,
'08709501522': 13513,
'0871-4719-523': 986,
'0871-872-9755': 8371,
'087104711148': 4555,
'08712101358': 242,
'08712103738': 11334,
'0871212025016': 7308,
'08712300220': 3984,
'087123002209am-7pm.': 4434,
'08712317606': 1448,
'08712400200.': 3635,
'08712400603': 13388,
'08712402050': 1616,
'08712402578': 7590,
'08712402779': 5491,
'08712402902': 475,
'08712402972': 2290,
'08712404000': 8661,
'08712405020.': 12585,
'08712405022,': 2801,
'08712460324': 9276,
'08712460324(nat': 10026,
'08712466669': 4287,
'0871277810810': 3361,
'0871277810910p/min': 3385,
'08714342399.2stop': 38,
'087147123779am-7pm.': 1863,
'08714712379': 10257,
'08714712388': 2704,
'08714712394': 13709,
'08714714011': 6232,
'08715203028': 2340,
'08715203649': 11371,
'08715203677': 13283,
'08715203694': 6997,
'08715500022': 11428,
'08715705022,': 13751,
'08717111821': 4948,
'08717168528': 2339,
'08717205546': 7310,
'0871750.77.11!': 11219,
'08717890890å£1.50': 9970,
'08717895698': 13632,
'08717898035.': 10664,
'08718711108': 6365,
'08718720201': 13331,
'08718723815.': 184,
'08718725756.': 5777,
'087187262701.50gbp/mtmsg18': 5941,
'08718726970': 6324,
'08718726971': 1870,
'08718726978': 9446,
'08718727868.': 1555,
'08718727870': 7959,
'08718727870.': 10923,
'08718730555': 9051,
'08718730666': 96,
'08718738001': 3781,
'08718738034.': 5724,
'08719180248': 8563,
'08719181259': 4712,
'08719181503': 7455,
'08719181513.': 7518,
'08719839835.': 9190,
'08719899217': 8704,
'08719899230': 11231,
'09041940223': 5798,
'09050000301.': 11347,
'09050000460': 9257,
'09050000555.': 2960,
'09050000878.': 3873,
'09050000928.': 13535,
'09050001295': 7614,
'09050001808': 8007,
'09050002311': 5743,
'09050003091': 6107,
'09050005321.': 7493,
'09050090044': 9155,
'09050280520,': 5572,
'09053750005': 12098,
'09056242159': 13061,
'09057039994': 12456,
'09058091854': 7413,
'09058091870': 581,
'09058094454': 9770,
'09058094455': 4768,
'09058094565': 2468,
'09058094594': 1999,
'09058094597': 7804,
'09058094599': 9705,
'09058095107': 11553,
'09058095201': 105,
'09058097189': 1264,
'09058097218': 2746,
'09058098002.': 7334,
'09058099801': 2739,
'09061104276': 11745,
'09061104283': 10244,
'09061209465': 11196,
'09061221061': 9548,
'09061221066': 7887,
'09061701444.': 7142,
'09061701461.': 10821,
'09061701851.': 4778,
'09061701939.': 8834,
'09061702893.': 11285,
'09061743386': 3109,
'09061743806': 6702,
'09061743810': 9363,
'09061743811': 7565,
'09061744553': 4538,
'09061749602': 1258,
'09061790121': 6984,
'09061790125': 6392,
'09063440451': 4743,
'09063442151': 1766,
'09063458130': 868,
'0906346330.': 4024,
'09064011000.': 4553,
'09064012103': 1210,
'09064012160.': 7330,
'09064017305': 2002,
'09064018838.': 4504,
'09064019014': 13532,
'09064019788': 10306,
'09065069120': 5593,
'09065069154': 8348,
'09065171142-stopsms-08': 4572,
'09065174042.': 9403,
'09065394514': 7259,
'09065394973': 12906,
'09065989180': 9479,
'09065989182': 8101,
'09065989182.': 1229,
'09066350750': 1802,
'09066358152': 4393,
'09066358361': 6762,
'09066362206': 8137,
'09066362220': 829,
'09066362231': 12427,
'09066364311': 6963,
'09066364349': 3847,
'09066364589': 9418,
'09066368327': 3805,
'09066368470': 8643,
'09066368753': 9308,
'09066382422': 6160,
'09066612661': 12056,
'09066660100': 11778,
'09071512433': 4071,
'09071517866': 13430,
'09090204448': 7296,
'09090900040': 3488,
'09094100151': 7193,
'09094646899': 768,
'09096102316': 10893,
'09099725823': 4761,
'09099726395': 5128,
'09099726429': 4668,
'09099726481': 9159,
'09099726553': 178,
'09111030116.': 3091,
'09111032124': 7738,
'09701213186': 13053,
'0A$NETWORKS': 8684,
'1': 5854,
"1's": 12562,
'1)': 12402,
'1)McFly-All': 2994,
'1)Unbreakable,': 479,
'1,2': 9193,
'1,His': 5366,
'1-Hanuman': 8887,
'1-month': 9406,
'1-u': 2436,
'1.': 11266,
'1.20': 1951,
'1.50': 6819,
'1.50p': 3190,
'1.50p/wk.': 12688,
'1.5p/min': 6812,
'1.childish': 2407,
'1/08/03!': 10166,
'1/1': 3066,
'1/2': 12268,
'1/2price': 1760,
'1/3.': 2020,
'10': 11084,
'10,000': 10542,
'10..': 4866,
'10.1mega': 621,
'10/06/03!': 10102,
'100': 1919,
"100's": 10557,
'1000': 8339,
"1000's": 365,
'1000s': 2870,
'1013': 2090,
'1030': 8842,
'10:10': 378,
'10:30': 10890,
'10K,': 11274,
'10am': 4606,
'10am-7pm': 7666,
'10am-9pm': 13379,
'10k': 13690,
'10p': 5815,
'10p.': 10732,
'10p/min': 12067,
'10p/min.': 10590,
'10ppm.': 7002,
'10th': 7874,
'11': 12138,
'11.48': 4585,
'1120': 8689,
'113,': 8508,
'1131': 9632,
'114/14': 12481,
'116': 10616,
'118p/msg': 7350,
'11?': 9020,
'11mths': 13229,
'11mths+?': 13295,
'12': 8392,
'12,000pes,': 11237,
'1205': 13143,
'121': 4693,
'1225': 869,
'123': 6845,
'125gift': 5382,
'12Mths': 5354,
'12hours': 11492,
'12hrs': 13790,
'12mths': 6839,
'1327': 2410,
'139,': 10156,
'1405,': 8271,
'140ppm': 6130,
'145': 2758,
'146tf150p': 7604,
'14thMarch.': 9337,
'150': 9346,
'150P': 5159,
'150PPM': 12884,
'150p': 10302,
'150p.': 11722,
'150p/MTmsg': 10623,
'150p/Msg': 8616,
'150p/Msg.': 7709,
'150p/MsgrcvdHG/Suite342/2Lands/Row/W1J6HL': 11697,
'150p/Mt': 836,
'150p/Mtmsgrcvd18': 6463,
'150p/Mtmsgrcvd18+': 11951,
'150p/day,': 11566,
'150p/min': 5471,
'150p/msg': 2389,
'150p/msg,': 12738,
'150p/msg.': 10924,
'150p/rcvd': 513,
'150p/tone': 8457,
'150p/tone.': 4826,
'150p/wk': 5850,
'150pm': 1934,
'150pm.': 4471,
'150ppermessSubscription': 7149,
'150ppm': 7779,
'150ppm.': 11607,
'150ppmPOBox10183BhamB64XE': 1769,
'150ppmsg': 8394,
'153': 12690,
'16': 3200,
'16+': 7012,
'16+.': 4266,
'16+only': 13209,
'16+only!': 5336,
'1680,': 11260,
'169': 1470,
'18': 525,
'18+': 7693,
'18+6*å£1.50(moreFrmMob.': 10659,
'18+only': 11011,
'18+only.': 6129,
'18/11/04': 9024,
'1843.': 2427,
'18p/txt': 8103,
'18yrs': 1056,
'195': 3317,
'1956669': 11882,
'1Apple/Day=No': 10955,
'1Cup': 7039,
'1DA': 12299,
'1ER': 1674,
'1IM': 10516,
'1Lemon/Day=No': 3926,
'1Tulsi': 8263,
'1Winaweek,': 8002,
'1YF': 5716,
'1hr': 13336,
'1hr.': 8768,
'1pm': 1789,
'1st': 9808,
'1st..."': 10919,
'1st4Terms': 12195,
'1stchoice.co.uk': 7641,
'1stone,': 5736,
'1x150p/wk': 4875,
'1x150p/wk.': 2210,
'2': 1368,
'2!': 10220,
'2%': 1016,
'2)': 12222,
'2)Untamed,': 8790,
'2,': 12066,
'2,Police': 10147,
'2-4-1': 983,
'2-Bajarangabali': 4843,
'2-u': 12539,
'2.': 11473,
'2.15': 7352,
'2.30ish': 44,
'2.IM': 9630,
'2.U': 11000,
'2.naughty': 12218,
'2/2': 13254,
'20': 11076,
'20%': 1550,
'20,000': 5287,
'200': 10285,
'2000': 7989,
'2003': 7585,
'2004': 11533,
'2004,': 8874,
'2005.': 8933,
'2006': 7030,
'2007': 10164,
'20M12AQ.': 11581,
'20p': 1911,
'20p/min': 11550,
'21': 10845,
'21/m': 3941,
'21870000>Hi': 10002,
'21st': 6038,
'22': 9106,
'220-CM2': 4086,
'2309.': 6096,
'24': 6173,
'24/10/04': 3654,
'24/7MP': 4623,
'24Hrs': 5090,
'24M': 13271,
'24hrs.': 9100,
'24th': 1175,
'25': 6280,
'25.': 13663,
'250': 5805,
'25p': 4260,
'26.03.05': 11204,
'26th': 2493,
'27/03': 7867,
'27/6/03.': 7664,
'28': 2667,
'28,': 2930,
'28/5': 12786,
'2814032': 2914,
'28days.': 1692,
'28th': 5775,
"28thFeb.T&C's": 9942,
'29/03/05': 5647,
'2?': 8923,
'2C': 13365,
'2EZ': 4224,
'2End': 6486,
'2GETHA!': 9522,
'2KBSubject:': 2814,
'2MORO!': 979,
'2MOROW': 11560,
'2MORRO': 5391,
'2MRW': 6481,
'2NITE': 6647,
'2NITE-TELL': 13472,
'2NITE?': 10369,
'2PX': 4925,
'2StopTx': 1325,
'2U': 2043,
'2WT': 10754,
'2WU.': 8497,
'2channel': 1495,
'2day': 4793,
'2day!': 9860,
'2day,': 644,
'2day.love': 5900,
'2exit.': 7961,
'2geva,': 13604,
'2go': 1335,
'2go,': 4476,
'2go.did': 2482,
'2gthr': 6689,
'2hrs.': 6734,
'2marrow': 2696,
'2moro': 6804,
'2morro': 9207,
'2morrow': 8220,
'2mro': 12484,
'2mrw': 1059,
'2nd': 4276,
'2nhite': 6055,
'2nights': 4781,
'2nite': 3553,
'2nite!.': 13375,
'2nite?': 3080,
'2optout': 6251,
'2optout/D3WV': 12726,
'2rcv': 4160,
'2stop': 464,
'2stoptxt': 997,
'2u': 13685,
'2waxsto': 806,
'2wks': 1003,
'2wks.': 13227,
'2years': 11419,
'2yr': 4696,
'3': 388,
'3!': 10826,
'3)': 7767,
'3)Cover': 11012,
'3)Unkempt.': 7348,
'3,': 6467,
'3,Wife:': 3048,
'3-Maruti': 12148,
'3-u': 8468,
'3.': 12237,
'3.Sentiment': 4931,
'3.U': 6771,
'30': 4380,
'300': 8910,
'300%': 5196,
'300603': 228,
'300p': 2097,
'300p.': 2573,
'3030': 3154,
'3030.': 9113,
'30Apr': 11887,
'30pp/txt': 2229,
'30th': 4171,
'31': 10978,
'31/10/04': 13783,
'3100': 11192,
'310303.': 9577,
'31p.msg@150p': 9143,
'3230': 3504,
'32323.': 9679,
'326': 6664,
'330.': 499,
'3510i': 6598,
'35p.': 13441,
'3650': 1176,
'36504': 7520,
'3680': 4841,
'3680.Offer': 1936,
'373': 11244,
'3750': 9934,
'391784.': 6688,
'3AJ': 2414,
'3D!!B)': 1875,
'3G': 7510,
'3GBP': 10768,
'3Lions': 4928,
'3POUND': 5250,
'3SS.': 6248,
'3UZ': 1753,
'3UZ.': 2784,
'3d': 4972,
'3days': 1477,
'3g': 183,
'3lp': 9880,
'3miles': 6472,
'3mins': 3494,
'3optical': 11780,
'3qxj9': 3707,
'3rd': 5403,
'3rd?': 13168,
'3xå£150pw': 3201,
'4': 7990,
"4'o": 9729,
'4)Press': 12638,
'4*': 5044,
'4*Lux': 8645,
'4-6...': 12251,
'4-Pavanaputra': 1398,
'4-some1': 8430,
'4.': 1326,
'4...': 4533,
'4.15': 10731,
'4.47per': 10149,
'4.49/month': 9597,
'4.50.': 13674,
'4.Cook:': 4333,
'4.rowdy': 3910,
'40': 12893,
'400': 5632,
'400mins...Call': 6182,
'4041': 12767,
'40533': 7790,
'40GB': 10275,
'40mph.': 12762,
'41685': 7553,
'41782': 8567,
'420': 11721,
'4217': 6859,
'42478': 8429,
'430': 12897,
'434': 9485,
'44': 6978,
'4403LDNW1A7RW18': 5657,
'447801259231': 5866,
'448712404000>Please': 1387,
'449050000301': 12239,
'450p': 4603,
'45239': 13421,
'47': 2311,
'4742': 10040,
'4882': 5756,
'4?': 9321,
'4GET': 2177,
'4JX': 2937,
'4T&Ctxt': 7428,
'4U': 12038,
'4U!:': 4620,
'4WARD': 4457,
'4a': 2322,
'4d': 3917,
'4few': 5019,
'4fil': 13543,
'4get': 4655,
'4give': 3241,
'4got,': 2804,
'4goten': 13396,
'4info': 5889,
'4mths': 8368,
'4th': 2191,
'4the': 13679,
'4txt/120p': 12983,
'4txt/̼1.20': 2505,
'4u': 11464,
'4u,': 9909,
'4u.i': 6001,
'4uTxt': 2770,
'4w': 10484,
'4wrd': 9656,
'5': 12801,
'5)': 4310,
'5)Gently': 6929,
'5+-': 261,
'5+3+2=': 933,
'5-Sankatmochan': 426,
'5.': 10561,
'5..': 2204,
'5.15pm': 12308,
'5.30': 1531,
'5.Gardener:': 1500,
'5.ful': 10284,
'5/9/03': 2323,
'50': 3416,
'50%': 6054,
'500': 7253,
'505060': 6977,
'50p': 6395,
'50s?': 10112,
'515': 7632,
'5226': 11491,
'526,': 5435,
'528': 13490,
'530': 11724,
'54': 11424,
'542': 2110,
'545': 10487,
'5K,': 12149,
'5WB': 5923,
'5WE': 3920,
'5WQ': 11027,
'5digital': 10646,
'5free.': 6259,
'5ish.': 7181,
'5min': 8255,
'5pm': 8139,
'5pm.': 3972,
'5th': 2683,
'5we.': 6514,
'5wkg': 5423,
'5years': 3634,
'6': 816,
'6,': 471,
'6-Ramaduth': 2273,
'6.': 11208,
'6...': 12909,
'6.30': 8068,
'6.45pm.': 6690,
'6.House-Maid:': 9734,
'6.romantic': 9491,
'60,400thousad.i': 3321,
'600': 8920,
'6031': 10018,
'60P': 4356,
'60p/min.': 2518,
'61200': 12703,
'61610': 4436,
'62220Cncl': 4458,
'6230': 7420,
'62468': 9433,
'630': 11479,
'63miles.': 12124,
'645': 12619,
'650.': 13247,
'6669': 3834,
'67441233': 4816,
'68866.': 12827,
'69101.': 7287,
'69200': 6287,
'69669': 31,
'69669.': 1389,
'69696': 8239,
'69698': 4835,
'69855,': 1736,
'69866.18': 6301,
'69876.': 7271,
'69888': 9401,
'69888!': 1428,
'69888Nyt.': 6035,
'69911(å£1.50p.': 9477,
'69969': 916,
'69988': 25,
'6HL': 4562,
'6WU': 8690,
'6ZF': 8462,
'6days,': 1942,
'6hrs': 3150,
'6hrs.': 9587,
'6ish.': 5739,
'6missed': 80,
'6months': 11494,
'6pm.': 13733,
'6pm..:-)': 7728,
'6th': 2265,
'7': 10265,
'7+2+5=?????': 4564,
'7,': 2794,
'7-Mahaveer': 1437,
'7.': 2485,
'7.30': 6697,
'7.30ish?': 6049,
'7.30pm': 4911,
'7.8p/min': 488,
'7.Children:': 2940,
'7.shy': 13220,
'7250': 13161,
'7250i.': 8379,
'730.': 11411,
'731': 6854,
'74355': 11362,
'750': 11548,
'7548': 1717,
'7732584351,': 9237,
'786': 12498,
'7876150ppm': 6247,
'7WS.': 10210,
'7am': 8121,
'7cfca1a': 12210,
'7ish,': 2729,
'7ish?': 9391,
'7oz.': 4492,
'7pm': 6816,
'7th': 1319,
'8': 7628,
"8'o": 3976,
'8+6+3=': 8988,
'8-8:30': 10596,
'8.': 9152,
'8.30': 2040,
'8.30.': 10110,
'8.Attractive': 2724,
'8.Neighbour:': 5114,
"80's": 12824,
'800': 6535,
'8000930705': 12076,
'80062': 8771,
'80062.': 11073,
'8007': 3397,
'80082': 9009,
'80086': 8553,
'80122300p/wk': 8853,
'80155,': 10352,
'80160': 61,
'80182': 6670,
'8027': 13658,
'80488': 12293,
'80488.': 13043,
'80488.biz': 12826,
'80608.': 11812,
'8077': 5193,
'80878.': 1322,
'81010': 777,
'81151': 13470,
'81303': 9952,
'81303.': 4727,
'81618,': 2749,
'81618-?3': 8378,
'82242': 3023,
'82277.': 109,
'82277.unsub': 2959,
'82324.': 6983,
'82468': 5292,
'83021.': 2135,
'83049.': 13203,
'83110': 7985,
'83222': 6405,
'83222.': 9702,
'83332.Please': 711,
'83355': 11677,
'83355!': 11631,
'83370.': 13238,
'83383': 6967,
'83383.': 4003,
'83435.': 4627,
'83600': 4397,
'83600.': 8164,
'83738.': 13807,
'84,': 1328,
'84025': 6316,
'84122': 3853,
'84128': 1121,
'84199': 3445,
'84199.': 8788,
'84484': 8023,
'85': 10052,
'85.': 2921,
'850.': 5994,
'85023': 13583,
'85069': 4245,
'85222': 10966,
'85233': 10700,
'8552': 2067,
'8552.': 5248,
'85555': 4932,
'86021': 4913,
'861': 8150,
'86688': 2550,
'87021.': 627,
'87066': 9430,
'87070.': 13262,
'87077': 2935,
'87077:': 8658,
'87121': 5886,
'87131': 6768,
'87131.': 41,
'8714714': 812,
'87239': 895,
'87575.': 10873,
'88039': 5130,
'88039.SkilGme.TsCs087147403231Winawk!Age16+å£1.50perWKsub': 6240,
'88066': 8892,
'88088': 9701,
'88222': 7452,
'88600': 2441,
'88600.': 6713,
'88800': 5006,
'8883': 1751,
'88877': 3589,
'88877>FREE': 6361,
'88888.': 2240,
'89034': 2530,
'89070': 1956,
'89080': 6127,
'89105.': 831,
'89123"': 12215,
'89545': 3507,
'89555': 521,
'89555.': 4516,
'89693': 2483,
'89693.': 11300,
'89938': 6137,
'8WP': 4273,
'8am': 11002,
'8am.': 43,
'8lb': 4987,
'8pm,': 4641,
'8pm.': 4254,
'8th': 6428,
'8th)': 11561,
'8th?': 4362,
'9': 7221,
'9+2+4=': 1933,
'9,': 12115,
'9.': 5265,
'9.funny': 12945,
'9061100010': 4281,
'9153.': 7560,
'930': 6680,
'930.': 5314,
'945+': 9531,
'946': 4994,
'97N7QP,': 1038,
'98321561': 3326,
'9996.': 6144,
'9?': 12449,
'9AE': 808,
'9YT': 13764,
'9am': 2038,
'9am-11pm': 2146,
'9ja': 6264,
'9ja.': 3170,
'9pm.': 1450,
'9t:)': 12712,
'9t;-)': 10354,
'9th': 2538,
':': 1422,
':(': 12924,
':)': 8668,
':)"': 5446,
':)all': 3537,
':)finally': 5054,
':)going': 6032,
':)it': 3978,
':)my': 4451,
':)so': 188,
':)voice': 13272,
':)why': 9457,
':-(': 3449,
':-)': 13107,
':-):-)': 915,
':-):-)."': 11902,
':-)only': 975,
':-)please': 3956,
':-/': 13475,
':-D': 3832,
':-P': 2174,
':/': 10001,
':V': 8327,
':getzed.co.uk': 2594,
':zed': 4563,
';)': 13640,
';).': 3699,
';-(': 894,
';-)': 9410,
';_;': 11834,
';abel': 2597,
'<Forwarded': 2589,
'<UKP>2000': 3116,
'=': 130,
'=)': 4559,
'=/': 5340,
'=D': 8588,
'?': 9877,
'?!': 11047,
'?!\\"': 2762,
'?-': 12779,
'?.': 9698,
'?1,000': 12645,
'?2,000': 12855,
'?350': 9027,
'??': 5368,
'@': 8417,
'@Shesil': 11058,
'A': 4927,
'A,': 3579,
'A-Green': 6798,
'A-ffectionate.': 2169,
'A.': 5125,
'A21.': 2343,
'ABOUT': 7373,
'ABOUTAS': 12189,
'ABTA': 9640,
'ACCEPT': 6685,
'ACL03530150PM': 2750,
'ACTION': 12600,
'AD': 8295,
'ADAM': 12529,
'ADDAMSFA,': 8948,
'AFEW': 7264,
'AFTER': 8670,
'AFTERNOON?': 11150,
'AG': 4809,
'AGAIN': 5625,
'AGAIN!': 11583,
'AGAIN.call': 9293,
'AGAINST': 9175,
'AGE': 3715,
'AGE,': 10190,
'AGES..RING': 10071,
'AGO.': 1074,
'AGO.CUSOON': 7314,
'AIG': 9441,
'AINT': 6258,
'AL!!!!!!!!!': 10446,
'ALERT!': 195,
'ALEX': 649,
'ALL': 3912,
'ALRITE': 10734,
'ALSO': 9161,
'ALWAYS': 12152,
'AM': 6012,
'AM!?!': 10141,
'AM.': 10450,
'AMY': 7528,
'AN': 5068,
'ANAL': 9110,
'AND': 672,
'ANNIE': 4438,
'ANS': 13260,
'ANSWER': 3036,
'ANTELOPE': 13566,
'ANYTHING': 229,
'AOM': 13337,
'AOM,': 4398,
'APRIL.': 9600,
'ARE': 8226,
'AREA.': 1190,
'ARIES': 1749,
'AROUND!': 3674,
'ARR': 6638,
'AS': 4542,
'ASAP': 6974,
'ASAP!': 4161,
'ASKED,': 12678,
'ASTHERE': 3438,
'ASUSUAL!1': 2438,
'AT': 6484,
'AUCTION': 7912,
'AUGUST!': 12366,
'AV': 9227,
'AVA': 5755,
'AVE': 7132,
'AVENT': 1920,
'AWAITING': 4684,
'AWARD!': 5538,
'AXIS': 11571,
'Aah!': 6940,
'Aah.': 4523,
'Aaniye': 13211,
'Aaooooright': 8879,
'Ab..,': 2565,
'Abbey!': 13381,
'Abeg,': 2647,
'Aberdeen,': 4658,
'Abiola': 7905,
'Abiola.': 6622,
'About': 1633,
'Accept': 9716,
'Accident': 1617,
'Account': 4961,
'Accounts': 6554,
'Ache': 10488,
'Acnt.': 342,
'Aco&Entry41': 5970,
'Activate': 58,
'Active': 7010,
'Actually': 13730,
'Actually,': 8749,
'Address': 8195,
'Admirer': 13808,
'Adrian': 5717,
'Adult': 6141,
'Aeronautics': 36,
'Affections...&': 10736,
'Aft': 3940,
'After': 3491,
'AfterNoon.': 12844,
'Afternoons,': 8787,
'Aftr': 2481,
'Again': 12407,
'Agalla': 1462,
'Age': 13794,
'Ages': 12989,
'Ah': 3123,
'Ah,': 1403,
'Ahhh.': 4801,
'Ahmad': 6386,
'Aight': 10231,
'Aight,': 6043,
'Air': 5260,
'Airtel': 171,
'Aiya': 3769,
'Aiyah': 11742,
'Aiyar': 6599,
'Aiyo': 2646,
'Aiyo...': 2320,
'Aka': 8212,
'Al': 7371,
'Aldrine,': 2330,
'AlertFrom:': 8436,
'Alex': 13353,
'Alfie': 13339,
'Algarve': 2690,
'All': 6260,
'Allah,meet': 540,
'Allah.Rakhesh,': 3259,
'Allo!': 3450,
'Almost': 617,
'Already': 13798,
'Alright': 12458,
'Alright,': 853,
'Alright.': 5291,
'Alrite': 2760,
'Also': 3630,
'Although': 4824,
'Always': 6178,
'Alwys': 6618,
'Am': 156,
'Amanda': 13299,
'Amazing': 5198,
'Ambrith..madurai..met': 6343,
'American': 3427,
'Ami': 10012,
'Amrca': 12245,
'Amrita': 9292,
'An': 1490,
'And': 7915,
'Annoying': 1656,
'Another': 10887,
'Any': 11155,
'Anyone': 12197,
'Anything': 5517,
'Anything!': 7431,
'Anything...': 13601,
'Anytime': 8075,
'Anytime.': 10366,
'Anyway': 42,
'Anyway,': 4701,
'Anyways': 8630,
'Apart': 7328,
'Apnt': 8897,
'Apparently': 13133,
'Apply': 2455,
'Apps': 10391,
'Appt': 10416,
'Ar.Praveesh.': 2533,
'Arabian': 8573,
'Arcade': 13465,
'Ard': 6344,
'Are': 13010,
'Argh': 12684,
'Argument': 4197,
'Arms': 11670,
'Arngd': 4203,
'Arrow': 4957,
'Arul.': 5565,
'Arun': 4792,
'As': 6833,
'Ask': 10499,
'Asking': 8694,
'At': 801,
'Athletic': 2588,
'Atlanta.': 1698,
'Atlast': 3214,
'Atleast': 146,
'Auction': 2472,
'Audrey': 1856,
'Audrie': 8166,
'August': 13460,
'August.': 4706,
'Auntie': 11642,
'Available': 11934,
'Ave': 6719,
'Avenge': 7524,
'Awarded': 13276,
'Awesome': 8914,
'Awesome,': 12432,
'Aww': 13759,
'Awww': 9925,
'Ay': 6674,
'Aå£1.50': 2467,
'B': 7014,
"B'day": 12583,
"B'tooth*.": 9260,
'B-Blue': 10492,
'B-day': 12733,
'B.': 2404,
'B/Tooth': 10982,
'B4': 7401,
'B4U': 9282,
'BA128NNFWFLY150ppm': 4537,
'BABE': 13178,
'BABE!': 13736,
'BABE,': 4643,
'BABE..': 9815,
'BABE.SOZI': 6932,
'BABES': 3918,
'BABY!HOPE': 5337,
'BACK': 11876,
'BACKWARDS)': 11454,
'BAHAMAS!': 11622,
'BAK': 5989,
'BAK!': 12238,
'BANG': 7392,
'BANK': 4452,
'BANNEDUK': 4211,
'BATH.': 1481,
'BBC': 9735,
'BBD(pooja)': 12785,
'BBDELUXE': 8530,
'BCK': 4350,
'BCM1896WC1N3XX': 766,
'BCM4284': 8663,
'BCMSFWC1N3XX': 2516,
'BCoz,I': 5937,
'BE': 826,
'BEAUTIFUL,': 8053,
'BED': 12837,
'BEDROOM!LOVE': 11106,
'BEEN': 12642,
'BEFORE': 4503,
'BEGIN': 5228,
'BELIEVE': 1913,
'BETTER': 12749,
'BETTERSN': 3760,
'BIDS': 5594,
'BIG': 3180,
'BIRD': 12660,
'BIRTHDAY': 4531,
'BIT': 9168,
'BLOKE': 11308,
'BLOKES': 10594,
'BLOOD.Send': 6891,
'BMW': 11914,
'BONUS': 1613,
'BORED': 9119,
'BOUT': 5168,
'BOUT!)xx': 4886,
'BOX': 7266,
'BOX139,': 3426,
'BOX385': 505,
'BOX420.': 4556,
'BOX42WR29C,': 11624,
'BOX95QU': 4879,
'BOX97N7QP,': 8173,
'BOYF?': 5489,
'BRAND': 2688,
'BREAK': 5370,
'BREAKIN': 11320,
'BREATHE1': 78,
'BRISTOL': 4325,
'BROKE': 961,
'BROTHER': 13507,
'BSLVYL': 1700,
'BT': 3078,
'BT-national': 1838,
'BT-national-rate': 7530,
'BT-national-rate.': 209,
'BTW': 11330,
'BUDDY!!': 12154,
'BUT': 11101,
'BUTTHERES': 216,
'BY': 7670,
'BYATCH': 1928,
'Baaaaaaaabe!': 1584,
'Baaaaabe!': 8601,
'Babe': 5057,
'Babe!': 13218,
'Babe,': 9708,
'Babe:': 7091,
'Babe?': 7217,
'Baby': 7944,
'BabyGoodbye': 13518,
'Back': 7219,
'Back.': 5996,
'Bad': 5251,
'Badrith': 11039,
'Bahamas': 12004,
'Ball': 12739,
'Balls.': 11108,
'Bam': 5075,
'BangBabes': 7051,
'Bani': 5789,
'Bank': 5974,
'Barbie\\"?': 8712,
'Barkleys': 90,
'Barry': 8803,
'Bbq': 8774,
'Bc': 1416,
'Bcm': 11516,
'Bcoz': 1117,
'Bcoz,': 2860,
'Be': 10830,
'Bears': 11680,
'Beautiful': 5555,
'Beauty': 3701,
'Bec': 5389,
'Becaus': 12615,
'Because': 11547,
'Become': 11641,
'Becomes': 4588,
'Becoz': 13096,
'Been': 6661,
'Beer': 9145,
'Beer-Rs.': 6778,
'Beerage?': 5231,
'Behind': 548,
'Belovd': 336,
'Ben': 3380,
'Best': 676,
'Best1': 2718,
'Beth': 4163,
'Better': 3979,
'Better.': 6404,
'Beware': 7596,
'Beware!': 5110,
'Big,': 4519,
'Bill': 5874,
'Bill,': 12452,
'Bin': 3014,
'Biola': 12820,
'Biro': 8642,
'Birthday': 13056,
'Birthday?': 457,
'Bishan': 12683,
'Bit': 5066,
'Bite': 9573,
'Bits': 8987,
'Black': 8593,
"Black;i'm": 12772,
'Blank': 1195,
'Blank.': 10098,
'Bless': 12221,
'Blind': 1254,
'Block': 2180,
'Bloody': 144,
'Bloomberg': 1444,
'Blu': 4680,
'Blue;u': 11126,
'Bluetooth': 7424,
'Bluetooth!': 10695,
'BluetoothHdset': 5157,
'Bob,': 5286,
'Bognor': 3334,
'Boltblue': 13201,
'Bone': 4597,
'Bonus': 10705,
'Bonus!': 12035,
'Boo': 4437,
'Boo.': 6095,
'Book': 4747,
'Booked': 8049,
'Bookmark': 12088,
'Boooo': 1980,
'Bored': 1551,
'Boss': 4022,
'Both': 1581,
'Bought': 10311,
'Box': 13823,
'Box1146': 1733,
'Box177.': 5800,
'Box326': 4985,
'Box334': 8374,
'Box39822': 12580,
'Box434SK38WP150PPM18+': 1220,
'Box61,M60': 13502,
'Boy': 12191,
'Boy:': 8284,
'Boy;': 954,
'Boys': 1375,
'Boys.': 3774,
'Brainless': 3758,
'Brandy': 5046,
'Bray,': 1669,
'Brdget': 10081,
'Break': 3906,
'Breaker': 1507,
'Brief': 3505,
'Bright': 2037,
'Brilliant.': 13099,
'Bring': 570,
'Bristol': 10033,
'British': 702,
'Britney.': 2354,
'Broadband': 13378,
'Brother': 4099,
'Brought': 3054,
'Brown;i': 5304,
'Bstfrnd': 10339,
'Bt': 7189,
'Btw': 1643,
'Buffy.': 11117,
'Bugis': 5871,
'Building': 7492,
'Bull.': 13329,
'Burger': 2761,
'Burns': 10337,
'Busy': 6381,
'But': 12118,
'But,': 8410,
'Buy': 5410,
'Buzz': 3762,
'Buzz!': 9087,
'Buzzzz!': 11456,
'Bx': 1219,
'Bx526,': 10529,
'By': 7751,
'Bye.': 7192,
'C': 7979,
'C!': 3047,
"C's": 10849,
'C)': 5556,
'C-IN': 3622,
'C-Red': 12845,
'C.': 4032,
'C52.': 4939,
'CAL': 123,
'CALL': 9377,
'CALLIN': 13706,
'CALM': 1523,
'CAMERA': 2961,
'CAMERA,': 2584,
'CAN': 10507,
'CANCEL': 13008,
'CANT': 11620,
'CARD': 8494,
'CARE': 762,
'CAREFUL': 758,
'CARLIE': 3651,
'CASH': 4396,
'CASH!': 1268,
'CASH,': 5041,
'CAT': 142,
'CATCH': 9739,
'CAUSE': 10216,
'CC': 5063,
'CC:': 2666,
'CD': 11168,
'CDGT': 960,
'CDs': 7722,
'CER': 9857,
'CERI': 13052,
'CHANCE': 448,
'CHARGE': 2061,
'CHARITY': 5365,
'CHAT': 4331,
'CHECKIN': 2899,
'CHEERED': 11704,
'CHILLIN': 4656,
'CHIT-CHAT': 11275,
'CHOSEN': 1739,
'CL.': 2964,
'CLAIM': 9243,
'CLAIRE': 10572,
'CLUB': 11198,
'CLoSE': 13501,
'CM': 11588,
'CNN': 1885,
'COLLECT': 1253,
'COLLEGE': 8720,
'COLLEGE!': 9517,
'COME': 8504,
'COMIN': 10107,
'COMPLETELY': 3165,
'CONCERNED': 13796,
'CONTENTION': 10114,
'CONTRACT!!': 10554,
'CONVINCED': 3404,
'COOL...': 9557,
'COS': 1103,
'COUNTINLOTS': 5272,
'CR01327BT': 6531,
'CR9': 4902,
'CRAZYIN,': 562,
'CREDIT': 3236,
'CRISIS!SPK': 6270,
'CRO1327': 3447,
'CSH11': 7227,
'CULDNT': 5841,
'CUM': 13265,
'CUP': 3609,
'CUTE': 8463,
'CW25WX': 8184,
'CaRE': 1569,
'Cab': 3171,
'Cable': 1601,
'Cali': 2699,
'Call': 11497,
'Call,': 7321,
'Call.': 4787,
'Call2OptOut/674&': 6530,
'CallFREEFONE': 12396,
'Caller': 9899,
'Callers.': 10252,
'Callertune': 1724,
'Calls': 818,
'Callså£1/minMobsmore': 9883,
'Callså£1/minMobsmoreLKPOBOX177HP51FL': 7391,
'Callså£1/minmoremobsEMSPOBox45PO139WA': 2109,
'Camcorder': 8821,
'Camcorder?': 10287,
'Camera': 7171,
'Camera!': 969,
'Can': 3801,
"Can't": 11987,
"Can't.": 11145,
'Can.': 9743,
'Can...': 2609,
'Can?': 6211,
'Canada,': 6003,
'Canary': 602,
'Cancel': 8496,
'Cancer.': 12561,
'Cannot': 4890,
'Cant': 8256,
'Captain': 10430,
'Cardiff': 1052,
'Care..:-)': 12228,
'Careful!': 9202,
'Carlos': 11714,
"Carlos'll": 5940,
'Carry': 957,
'Case': 2298,
'Cash': 4222,
'Cash.': 12073,
'Catching': 10748,
'Caught': 3927,
'Cause': 1714,
'Cbe': 1124,
'Celebrated': 11496,
'Celebrations': 6028,
'Centre': 3493,
'Cha': 13643,
'Chachi': 2094,
'Chance': 1410,
'Chance!': 7652,
'Change': 12377,
'Channel': 5456,
'Charles': 9172,
'Chart': 12474,
'Chasing': 11194,
'Chat': 3642,
'Chat.': 7893,
'Chat80155': 6444,
'Cheap': 5371,
'Check': 3790,
'Cheer': 12722,
'Cheers': 2788,
'Cheers,': 2952,
'Chef': 9620,
'Children': 11055,
'Chinatown': 434,
'Chinese': 10964,
'Chk': 1447,
'Choose': 12551,
'Christians': 12381,
'Christmas': 10,
'Christmas!': 12387,
'Christmas!Merry': 6825,
'Christmas...': 9140,
'Ciao!': 7955,
'Cine': 1709,
'City': 2353,
'Claim': 4508,
'Claim,': 3060,
'Claim.': 1314,
'Classic': 11172,
'Click': 7665,
'Clos1': 4999,
'Close': 13608,
'Club': 8518,
'Club.': 7322,
'Club4': 3521,
'Club:': 10084,
'Club>>': 12353,
'Co': 2522,
'Co.': 6469,
'Coca-Cola': 13219,
'Code': 9740,
'Code:': 9431,
'Coffee': 8078,
'Cold.': 1046,
'Colleagues': 13269,
'Colleagues.': 9652,
'Collect': 5808,
'Colour': 1060,
'Com': 11448,
'Come': 6799,
'Companion': 11957,
'Company': 7875,
'Compass': 12714,
'Complete': 5000,
'Concentrate': 6507,
'Confidence:': 3294,
'Congrats': 415,
'Congrats!': 6569,
'Congrats.': 6610,
'Congratulations': 10548,
'Congratulations!': 6756,
'Consider': 13031,
'Contact': 8064,
'Content': 8170,
'Convey': 949,
'Cool': 8037,
'Cool,': 13253,
'Cool.': 3496,
'Cornwall': 3742,
'Correct': 8862,
'Correct.': 12961,
'Cos': 12382,
'Cost': 7231,
'Costa': 2990,
'Costs': 643,
'Costå£1.50/pm,': 4834,
'Cougar-Pen': 2918,
'Could': 1285,
'Courageous': 5095,
'Coz': 5400,
'Coz..somtimes': 11163,
'Cps': 1068,
'Crab': 11875,
'Crack': 7723,
'Cramps': 9465,
'Crazy': 5312,
'Credit!': 5239,
'Croydon': 1207,
'Crucify': 13089,
'Crying': 2408,
'Cs': 7836,
'Ctagg.': 1087,
'Ctargg.': 9268,
'Cttargg.': 6987,
'Ctter.': 4878,
'Cttergg.': 6272,
'Cud': 10371,
'Cup': 6166,
'Cust': 10461,
'CustCare:08718720201': 400,
'CustCare:08718720201.': 2418,
'Customer': 4428,
'Cut': 9218,
'Cute': 12864,
'Cute:': 1017,
'Cutefrnd': 9511,
'Cutter.': 6052,
'Cuz': 11723,
'D': 3378,
'D=': 9186,
'DA': 7913,
'DABOOKS.': 8385,
'DADS': 11785,
'DAMMIT!!': 13432,
'DAN': 12867,
'DARLIN': 1329,
'DAS': 3953,
'DATE': 13834,
'DATEBox1282EssexCM61XN': 9719,
'DAY': 8901,
'DAY!': 1067,
'DAY!2': 9827,
'DAY..U': 6765,
'DAYS': 4307,
'DAY\\"': 3072,
'DD': 108,
'DE': 5740,
'DEAD': 1241,
'DECIDED': 6934,
'DEF': 9696,
'DEFO': 1589,
'DEL': 7612,
'DENA': 8605,
'DETAILS': 1459,
'DID': 4953,
"DIDN'T": 2598,
'DIDNT': 10816,
'DIGITAL': 13443,
'DIRTY': 4013,
'DIS': 4690,
'DLF': 12321,
'DO': 2606,
'DOESDISCOUNT!SHITINNIT\\""': 3610,
'DOESNT': 10201,
'DOGBREATH?': 10983,
'DOIN': 12301,
'DOIN?': 11535,
'DONE': 4926,
'DONE!': 5255,
'DONT': 13458,
'DONåÕT': 1801,
'DOT': 10613,
'DOUBLE': 10976,
'DOWN': 12628,
'DOWNON': 4923,
'DPS,': 7698,
'DRACULA': 486,
'DRAW': 8641,
'DREAMS..': 4082,
'DREAMZ': 13581,
'DRUNK!': 9211,
'DUMB?': 1731,
'DVD': 8113,
'DVD!': 8843,
'Da': 4982,
'Dad': 9080,
'Dad.': 11675,
'Daddy': 905,
'Dai': 9070,
'Damn': 230,
'Damn,': 4053,
'Dan': 5758,
'Dare': 3396,
'Darling': 11911,
'Darren': 11186,
'Darren...': 8779,
'Dasara': 10344,
'Dat': 13816,
'Date': 7382,
'Dates': 11870,
'Dating': 5018,
'Dave': 179,
'Day': 11772,
'Day!': 9975,
'Day,': 210,
'Day.': 12886,
'Day...!!!': 2829,
'Day:)': 2489,
'Day\\"': 9060,
'De': 11668,
'Deal?': 11051,
'Dear': 2596,
'Dear,': 13264,
'Dear,Me': 442,
'Dear,regret': 4177,
'Dear,shall': 857,
'Dear.': 8733,
'Dear...........': 4511,
'Dear..:)': 4464,
'Dear1': 2913,
'December': 3565,
'Deeraj': 8915,
'Def': 2763,
'Del': 13202,
'Delhi': 9016,
'Deliver': 3013,
'Delivered': 4560,
'DeliveredTomorrow?': 13128,
'Delivery': 10255,
'Den': 2068,
'Dependable': 11451,
'Depends': 10277,
'Derp.': 12752,
'Designation': 4121,
'Desires...,': 12097,
'Determined': 2806,
'Detroit': 6239,
'Detroit.': 6975,
'Devils': 10068,
'Dey': 4837,
'Dial': 3974,
'Dick.': 4253,
'Dictionary': 7766,
'Did': 7638,
"Didn't": 1240,
'Die': 2459,
'Die...': 5218,
'Digital': 6246,
'Dileep.thank': 1476,
'Din': 13710,
'Dint': 2534,
'Direct': 11886,
'Dis': 9331,
'Disconnect': 6592,
'Discussed': 5067,
'Diseases': 5592,
'Ditto.': 550,
'Diwali': 385,
'Dizzamn,': 9983,
'Dizzee': 10153,
'Dload': 10753,
'Dnt': 1149,
'Do': 5543,
'Doc': 703,
'Doctor.': 10443,
'Does': 12058,
"Doesn't": 10151,
'Dogging': 13725,
'Doggy': 9623,
'Doing': 4765,
'Doll..:-D;-),': 1568,
'Dom': 10080,
'Don': 13856,
"Don't": 13868,
"Don't4get2text": 3258,
'Done': 7623,
'Dont': 5245,
'DontCha': 2397,
'Don\x89Û÷t': 10403,
'[email protected]': 4472,
'Double': 6327,
'DoubleMins': 7577,
'DoubleTxt': 8982,
'Download': 1696,
'Dozens': 8758,
'Dr': 9669,
'Draw': 10861,
'Draw.': 5374,
'Dream': 1532,
'Drinks': 10729,
'Driver': 9837,
'Drop': 7166,
'Drop,': 13540,
'Duchess': 2525,
'Dude': 3333,
'Dude.': 2456,
'Dun': 13363,
'Dunno': 3531,
'Dunno,': 7159,
'Dvd': 10317,
'E': 8799,
'E-namous.': 10609,
'E-ternal.': 9011,
'E.G.23F.': 6120,
'E14': 1621,
'E=': 5780,
'EACHOTHER.': 3369,
'EASTENDERS': 11838,
'EAT': 12276,
'EH74RR': 9449,
'ELLO': 12944,
'ENC': 7276,
'END': 8419,
'ENG': 6210,
'ENGLAND': 5477,
'ENGLAND!': 4551,
'ENJOY': 7059,
'ENJOYIN': 7991,
'ENTER': 2185,
'ENTER.': 2585,
'ENTRY': 8114,
'ESPECIALLY': 6882,
'EURO': 4814,
'EURO2004': 5360,
'EVE': 2293,
'EVE.': 4282,
'EVEN': 1055,
'EVENING*': 3352,
'EVERY': 8395,
'EVERY1': 6072,
'EVERYTHIN': 3752,
'EVONE': 8112,
'EXACT': 5518,
'EXAMS': 574,
'EXETER': 6357,
'EXORCIST,': 3284,
'EXP.': 5934,
'EXPLAIN': 13290,
'EXPLOSIVE': 932,
'EXTREME': 7372,
'Each': 6743,
'Early': 13197,
'Earth...': 10912,
'Easter': 4410,
'Easy': 3724,
'Eat': 13791,
'Echo': 2171,
'Eckankar': 12937,
'Edison': 12165,
'Ee': 12564,
'Eek': 886,
'Eek!': 1689,
'Eerie': 3897,
'Egbon': 7208,
'Eh': 10425,
'Eire.': 5763,
'Either': 4300,
'Ela': 4728,
'Elvis': 8063,
'Em,': 5791,
'Email': 8287,
'Emily': 2234,
'Emotion': 12302,
'En': 7449,
'End?': 4566,
'Energy': 9498,
'Energy!': 4394,
'England': 8415,
'Enjoy': 7218,
'Enjoy!': 1761,
'Enjoy,': 8525,
'Enna': 8276,
'Er': 9680,
'Erm': 9513,
'Erm.': 10031,
'Erm...': 2188,
'Err...': 590,
'Erutupalam': 5576,
'Ese': 6629,
'Especially': 8159,
'Euro': 2470,
'Euro2004': 9794,
'EuroDisinc': 13170,
'Europe': 8303,
'Eve': 978,
'Even': 9736,
'Evening': 10411,
'Evenings': 4662,
'Ever': 8383,
'Ever.': 1065,
'Every': 11482,
'Everyday': 11104,
'Everyone': 1860,
'Everything': 6493,
'Everytime...': 4402,
'Evn': 897,
'Evr"': 8257,
'Evrey': 10960,
'Evry': 9704,
'Ew': 12325,
'Exactly.': 10088,
'Exam': 13525,
'Excellent!': 1965,
'Excellent,': 5616,
'Executive': 1716,
'Exorcism': 7099,
'Expected...': 7494,
'Expecting': 1339,
'Expires': 12994,
'Explain!': 450,
'Express': 11567,
'Expression': 13012,
'Extra': 12796,
'Eyes': 12502,
'F': 3121,
'F=': 13506,
'FA': 11996,
'FANCY': 13183,
'FANTASIES': 6222,
'FANTASTIC': 11616,
'FANTASY': 10555,
'FAR': 10533,
'FEEL': 10030,
'FFFFUUUUUUU': 9659,
'FIELDOF': 5654,
'FIFA': 5589,
'FIFTEEN': 12341,
'FILTH': 11141,
'FINAL': 7584,
'FIND': 7183,
'FINE': 10266,
'FINEST,': 1888,
'FINISH': 7730,
'FIRST': 12775,
'FIT': 615,
'FLAG': 5856,
'FLOWER': 8757,
'FM...you': 5143,
'FML': 11896,
'FONE': 10859,
'FONE,': 10397,
'FOR': 11220,
"FR'NDSHIP": 2213,
'FRAN': 4675,
'FRANYxxxxx': 8079,
'FREE': 2461,
'FREE!': 8260,
'FREE-Nokia': 7721,
'FREE.': 13827,
'FREE2DAY': 5731,
'FREE>Ringtone!': 1296,
'FREE>Ringtone!Reply': 8228,
'FREEFONE': 3696,
'FREEMSG:': 6755,
'FREEPHONE': 3592,
'FRESHERS': 2315,
'FRIEND': 10243,
'FRIENDS': 12455,
'FRIENDSHIP': 1180,
'FRND': 3930,
'FRNDSHIP\\""': 9927,
'FRNDS\\"': 13615,
'FROM': 10489,
'FUCK': 11750,
'FUCKED': 8107,
'FUCKIN': 4068,
'FUCKING': 4202,
'FUDGE': 3845,
'FUNNY!': 11357,
'Face.:)': 3389,
'Facebook?': 12357,
'Fails': 12593,
'Fair': 10431,
'Faith': 4735,
'Fancy': 6684,
'Fantasy': 377,
'Fat.': 1035,
'Fear': 3347,
'Feb': 872,
'Feel': 11052,
'Feeling': 8536,
'Feels': 10687,
'Fffff.': 10271,
'Ffffffffff.': 8856,
'Fifty?': 5547,
'Fighting': 12095,
'Filling': 1869,
'Filthy': 9086,
'Final': 3330,
'Finally': 4132,
'Find': 6544,
'Fine': 8698,
'Fine.': 10996,
'Fingers': 667,
'Finish': 10049,
'Finished': 11210,
'First': 9789,
'Flag': 10463,
'Flight': 447,
'Flirt!!': 4785,
'Flung': 12114,
'Fml': 13596,
'Foley': 6085,
'Follow': 2368,
'Food': 6861,
'Fools': 1665,
'Football': 12069,
'For': 368,
'Force.': 3937,
'Forever': 13315,
'Forevr...': 11787,
'Forgets': 2393,
'Forgot': 9111,
'Found': 1834,
'Fr': 8612,
'Fredericksburg': 9191,
'Free': 8039,
'Free!': 5827,
'Free,': 10903,
'Free-message:': 9836,
'FreeMSG': 2552,
'FreeMsg': 2783,
'FreeMsg:': 11856,
'FreeMsg:Feelin': 8802,
'FreeMsg>FAV': 3193,
'Freemsg:': 2047,
'Fresh': 3020,
'Fri': 7832,
'Friday': 12501,
'Friday,': 5711,
'Friday.': 1100,
'Friends': 12895,
'Friends..!!': 9248,
'Friendship': 4823,
'Friendship,Mother,Father,Teachers,Childrens': 12647,
'Friendship\\"...': 11001,
'Frnd': 7718,
'From': 5735,
'From:': 867,
'Fuck': 10309,
'Fuck,': 7265,
'Fun': 6513,
'Funny': 11749,
'Future': 205,
'Fwiw': 13829,
'Fyi': 11648,
'G': 12059,
'G.': 13351,
'G.B.': 3628,
'G.W.R': 11336,
'G2': 9817,
'GAIL': 4054,
'GANG': 3506,
'GAS': 5499,
'GBP': 8827,
'GBP/week!': 2126,
'GBP1.50/week': 10479,
'GBP4.50/week.': 12782,
'GBP5/month.': 2065,
'GENDER': 1636,
'GENT!': 3968,
'GET': 10019,
'GETTIN': 10154,
'GHOST,': 7563,
'GIFTS!!': 1141,
'GIMMI': 11982,
'GIRL': 2611,
'GIRL,': 10724,
'GIRL.': 7879,
'GIRLS': 1797,
'GIVE': 4670,
'GM': 4430,
'GM+GN+GE+GN:)': 4661,
'GMW': 8488,
'GO': 10686,
'GO!': 11539,
'GOD': 3733,
'GOD,I': 11688,
'GOIN': 8292,
'GOING': 474,
'GONE(U': 1030,
'GONNA': 1949,
'GONNAMISSU': 6522,
'GOOD': 1082,
'GOODEVENING': 6255,
'GOODFRIEND': 397,
'GOODMORNING': 4600,
'GOODMORNING"': 5861,
'GOODMORNING:)': 3157,
'GOODNIGHT': 5707,
'GOODNOON:)': 3759,
'GOODTIME!OLI': 11873,
'GORGEOUS': 3567,
'GOSS!x': 2578,
'GOT': 4083,
'GOTTA': 13479,
'GOWER': 10050,
'GR8FUN': 7624,
'GRAVEL': 3824,
'GREAT': 3247,
'GROW.RANDOM!': 10142,
'GSOH?': 13085,
'GUARANTEED': 12916,
'GUARANTEED!': 9731,
'GUARANTEED.': 6869,
'GUD': 1266,
'GURL': 5872,
'Gam': 8576,
'Game': 3335,
'Games': 8731,
'Games,': 1393,
'Gamestar': 6645,
'Ganesh': 10678,
'Gary': 11716,
'Gay': 3329,
'Gaze': 4036,
'Gd': 6698,
'Ge:-)..': 3991,
'Gee,later': 11102,
'Gee...': 12418,
'Geeee': 13181,
'Geeeee': 418,
'Genius': 91,
"George's": 7430,
'Germany': 12737,
'Get': 650,
'Gettin': 1010,
'Gibbs': 3348,
'Gift': 4000,
'Gimme': 10253,
'Gin': 600,
"Girl'": 1349,
'Girls': 8307,
'Give': 12789,
'Glad': 8509,
'Gn"': 9197,
'Gnarls': 3900,
'Go': 7845,
'GoTo': 6241,
'God': 9149,
"God's": 11746,
'God,': 5465,
'God.': 11398,
'Gods': 309,
'Goin': 12728,
'Going': 738,
'GoldDigger': 3523,
'Good': 11288,
'Good!': 1810,
'Good.': 11469,
'Goodmorning': 13208,
'Goodmorning,': 3250,
'Goodmorning,my': 5992,
'Goodnight!"': 10387,
'Goodnight,': 10547,
'Goodnight.': 8202,
'Goodnoon..:)': 5980,
'Goodo!': 1005,
'Google': 5812,
'Gopalettan.': 1596,
'Gosh': 1849,
'Gossip,': 8602,
'Got': 9373,
'Got\\"': 5315,
'Goto': 7346,
'Gotta': 198,
'Gr8': 631,
'Grahmbell': 1833,
'Granite': 12641,
'Gravity..': 8708,
'Great': 7663,
'Great!': 10799,
'Great.': 3839,
'Green;i': 10701,
'Greetings': 940,
'Grl:': 5093,
'Grumpy': 4343,
'Guaranteed': 8767,
'Gud': 7153,
'Gudni8': 10786,
'Gudnite"': 594,
'Gudnite....tc...practice': 5701,
'Gudnyt': 1527,
'Guess': 6965,
'Guessin': 13160,
'Guide': 11615,
'Guild.': 560,
"Gumby's": 3467,
'H&M': 5395,
'H*': 3636,
'HAD': 2656,
'HALF': 10644,
'HAPPY': 3185,
'HARDCORE': 12345,
'HARDEST': 6354,
'HARRY,': 13591,
'HATES': 7058,
'HAV': 7281,
'HAVE': 611,
'HAVENT': 6267,
'HAVENTCN': 10150,
'HAVIN': 8741,
'HAVING': 5177,
'HCL': 12096,
'HEART': 5302,
'HELL': 11369,
'HELLO:\\You': 1861,
'HELLO??': 6808,
'HELLOGORGEOUS,': 12347,
'HELP': 155,
'HER': 8849,
'HEY': 2958,
'HG/Suite342/2Lands': 11899,
'HG/Suite342/2Lands/Row/W1J6HL': 1921,
'HI': 4415,
'HIDE': 10802,
'HIM': 1676,
'HIS': 6503,
'HIT': 12063,
'HIYA': 1715,
'HL': 3869,
'HMM': 12566,
'HMV': 4829,
'HMV1': 12041,
'HOLIDAY': 3687,
'HOLIDAY?': 10797,
'HOME': 12002,
'HOME.': 10132,
'HONEY?DID': 13305,
'HONI\\""': 4049,
'HOPE': 579,
'HOPEU': 11123,
'HORO': 294,
'HOT': 7996,
'HOTMIX': 11109,
'HOUSE': 3287,
'HOW': 10262,
'HOWDY': 6425,
'HOWS': 11823,
'HP20': 12698,
'HRS': 9025,
'HTTP://WWW.URAWINNER.COM': 5091,
'HUN!': 390,
'HUN!LOVE': 625,
'HUNNY': 8445,
'HUNNY!HOPE': 3680,
'HUNNY!WOT': 9966,
'Ha': 8686,
'Ha!': 7023,
'Ha.': 5092,
'Ha...': 5017,
'Habit': 11711,
'Hack': 11064,
'Had': 5702,
'Haf': 9723,
'Haha': 259,
'Haha,': 4959,
'Haha..': 7226,
'Haha...': 2515,
'Haha...take': 2854,
'Hahaha..use': 6436,
'Hai': 12167,
'Haiyoh...': 11520,
'Haiz...': 5693,
'Half': 6740,
'Hallaq': 5742,
'Halloween': 10447,
'Handset?': 677,
'Hang': 4935,
'Hanging': 9303,
'Hanumanji': 1984,
'Happy': 3000,
'Happy..': 2957,
'Happy?': 60,
'Hard': 320,
'Hardcore': 1725,
'Hari-': 11538,
"Harish's": 421,
'Has': 6004,
'Hasbro...in': 9296,
"Hasn't": 12466,
'Hav': 5939,
'Have': 13587,
"Haven't": 9535,
'Havent': 7475,
'Having': 12590,
'Havnt': 485,
'He': 7537,
"He's": 3298,
'Head': 11593,
'Headin': 10567,
'Headset': 12851,
'Healer': 1306,
'Hear': 11783,
'Heart': 10130,
'Hearts..': 6188,
'Hee': 246,
'Hee..': 6849,
'Hee...': 9213,
'Heehee': 11523,
'Height': 8330,
'Helen,': 8210,
'Hell': 10937,
'Hello': 12563,
'Hello!': 11793,
'Hello,': 1108,
'Hello-': 12512,
'Hello.': 3059,
'Hello\\"': 6506,
'Helloooo...': 10635,
'Help': 13549,
'Help08700621170150p': 1546,
'Help08714742804': 7890,
'Help08718728876': 1107,
'Help:': 8197,
'Help?': 5904,
'Helpline': 2070,
'Here': 8432,
'Hero,i': 12846,
'Heroes,': 9514,
'Hey': 13819,
'Hey!': 8921,
'Hey!!!': 4072,
'Hey,': 11841,
'Hey.': 10471,
'Hey..': 12280,
'Hey...': 12661,
'Hey...Great': 2886,
'Hhahhaahahah': 3147,
'Hi': 9362,
'Hi!': 4764,
"Hi'": 2701,
'Hi,': 1683,
'Hi.': 7184,
'Hi:)cts': 6363,
'Hi:)did': 4175,
'High': 6367,
'Him': 3583,
'Hint': 7599,
'His': 10046,
'History': 2833,
'Hiya': 9628,
'Hiya,': 9117,
'Hiya.': 6204,
'Hlp': 2060,
'Hm': 10073,
'Hmm': 8208,
'Hmm,': 6519,
'Hmm.': 6615,
'Hmm...Bad': 3009,
'Hmm...my': 5156,
'Hmmm': 4349,
'Hmmm,': 13631,
'Hmmm..': 4628,
'Hmmm...': 613,
'Hmmm...k...but': 4079,
'Hmmm:)how': 7398,
'Ho': 3314,
'Ho.': 5257,
'Hockey.': 2433,
'Hogli': 5961,
'Hogolo': 3316,
'Hol': 1380,
'Holder': 1111,
'Holder,': 13548,
'Holding': 413,
'Holiday': 5427,
'Hols.': 9478,
'Home': 11853,
'Homeowners': 1435,
'Honestly': 8333,
'Honey': 10602,
'Honeybee': 12572,
'Hope': 10650,
'Hopefully': 11,
'Hoping': 844,
'Horrible': 12201,
'Hospital': 11574,
'Hospitals': 7332,
'Host-based': 11893,
'Hot': 29,
'Hotel': 7274,
'Hottest': 11626,
'House-Maid': 5764,
'How': 9368,
"How's": 11190,
'How.': 2756,
'How...': 3653,
'How?': 1953,
'Howda': 2275,
'However': 9629,
'Hows': 8267,
'Howz': 7515,
'Hrishi': 9333,
'Hrishi.': 7846,
'Hubby': 407,
'Hugs': 13060,
'Huh': 8522,
'Huh...': 3789,
'Huh?': 12071,
'Hui': 5033,
'Hungry': 193,
'Hurry': 177,
'Hurt': 10121,
'Hurts': 13153,
'Hyde': 13667,
'I': 10892,
"I'd": 4646,
"I'll": 2695,
"I'm": 5586,
"I'ma": 4040,
"I've": 8928,
'I,': 9335,
'I-ntimate.': 305,
'I.': 6124,
'I.ll': 4510,
'I?': 981,
'I?This': 3102,
'IAS': 12550,
'IBH': 9660,
'IBHltd': 603,
'IBN.': 3043,
'ID': 12972,
'IDPS': 6154,
'IF': 7774,
'IFINK': 2970,
'IG11': 9576,
'IJUST': 7446,
'IKEA': 9026,
'IKNO': 6988,
'IKNOW': 8875,
'IL': 13665,
'ILL': 6949,
'IM': 1233,
'IMF': 12503,
'IMIN': 6298,
'IMPORTANT': 11703,
'IN': 10491,
'INC': 1008,
'INDIA': 11653,
'INFORMATION': 6538,
'INK': 8748,
'INR': 5050,
'INSTEAD!': 13777,
'INTELLIGENT': 8926,
'INTERFLORA': 10746,
'INTERVIW': 10941,
'INTO': 8474,
"INVITED'": 5849,
'IP4': 9579,
'IQ': 9609,
'IQ.': 2496,
'IS': 8781,
'ISH': 1211,
'ISNT': 3381,
'IT': 11595,
'IT+BOTH': 8519,
'IT.(NOW': 6655,
'IT.,': 13036,
'IT?T.B*': 4753,
'IT?xx': 4148,
'ITS': 1974,
'ITXT': 6578,
'IVE': 12848,
'IWANA': 13653,
'Iam': 8056,
'Ibiza': 584,
'Ice': 9279,
'Icic...': 4165,
'Id': 5139,
'Idea': 1193,
'Identifier': 12153,
'Idk': 8971,
'Idk.': 5268,
'If': 12606,
'Ill': 7284,
'Ilol': 5136,
'Im': 9844,
'Imagine': 5326,
'Immediately.': 7301,
'Important': 9396,
'Imprtant': 12910,
'In': 13358,
'Including': 3905,
'Inclusive': 3936,
'Incorrect?': 12682,
'Incredible': 6475,
'Independence': 8711,
'India': 1556,
'India.': 10929,
'India..!!': 7734,
'Indian': 13325,
'Indians': 13141,
'Indians.': 10942,
'Indyarocks.com': 3181,
'Inever': 5492,
'Infact': 3663,
'Inform': 9343,
'Information': 367,
'Insha': 3745,
'Instant': 10711,
'Intelligent': 3975,
'Interflora': 13236,
'Invaders': 999,
'Invest': 9621,
'Iraq': 6881,
'Irritates': 1629,
'Is': 11751,
'Isaiah.=D': 9233,
'Islands': 10697,
"Isn't": 11258,
'It': 3858,
"It'll": 6491,
"It's": 1987,
'It,,s': 12269,
'It..': 6020,
'It...!..!!': 4695,
'It?': 8031,
'Italian': 9928,
'Itna': 1395,
'Its': 8743,
'Itz': 9603,
'It\x89Û÷s': 3136,
'ItåÕs': 496,
'Ive': 5386,
'Iwas+marine&thatåÕs': 10061,
'Izzit': 8840,
'I\x89Û÷ll': 4543,
'I\x89Û÷m': 9366,
'IåÕLLSPEAK': 1587,
'IåÕm': 6207,
'IåÕve': 4286,
'J': 2878,
'J!': 6468,
'J89.': 8179,
'J?': 3549,
'JAN': 5377,
'JANE': 9256,
'JANINExx': 12414,
'JANX': 4595,
'JAZ': 1451,
'JD': 8600,
'JEN': 9945,
'JESS': 9234,
'JJC': 88,
'JSCO': 11605,
'JSco:': 13750,
'JULY': 12028,
'JUS': 4077,
'JUST': 3120,
'Jackpot!': 813,
'Jada': 6529,
'James': 13735,
'James.': 12443,
'Jamster!Get': 542,
'Jamz': 12835,
'January': 6284,
'January..': 3529,
'Japanese': 6212,
'Jay': 4166,
"Jay's": 5467,
'Jealous?': 7661,
'Jen': 13775,
'Jenny': 2600,
'Jeremiah.': 13435,
'Jeri': 9889,
'Jerry': 5799,
'Jersey': 3599,
'Jesus': 10534,
'Jez': 13189,
'Jia': 9993,
'Joanna': 10790,
'John': 10630,
'John.': 11273,
'Join': 3618,
'Join.': 2605,
'Joke': 12271,
'Jokes!': 2599,
'Jokin': 4389,
'Joking': 12106,
'Jon,': 3791,
'Jones!': 11816,
'Jordan': 9244,
'Jordan!': 5115,
'Jordan!Txt': 9459,
'Jordan,': 239,
'Jorge-Shock..': 4748,
'Jos': 9214,
"Joy's": 4184,
'Jst': 6220,
'Jstfrnd': 8242,
'Jsut': 4865,
'July': 62,
'July.': 13267,
'June': 11175,
'Junna': 5507,
'Jus': 4021,
'Just': 11859,
'Juz': 7283,
'K': 2661,
'K,': 9562,
'K.': 5100,
'K..': 8118,
'K...': 8780,
'K...k...when': 393,
'K...k...yesterday': 9861,
'K...k:)why': 7548,
'K..give': 12522,
'K..i': 673,
'K..k...from': 10767,
'K..k..any': 5031,
"K..k..i'm": 5124,
'K..k:)how': 13610,
'K..k:)where': 1503,
'K..u': 11534,
'K.:)do': 2642,
'K.:)you': 8060,
'K.i': 98,
'K.k..how': 11903,
'K.k:)advance': 2850,
'K.k:)apo': 11591,
'K.k:)when': 1864,
'K.then': 5308,
'K52.': 12181,
'K61.': 5154,
'K718.': 6758,
'K:)all': 8841,
'K:)i': 3669,
'K:)k..its': 8124,
'K:)k.are': 4924,
'K:)k:)good:)study': 12800,
'K:)k:)what': 11408,
'KATE': 13394,
'KAVALAN': 5458,
'KEEP': 2363,
'KING': 8032,
'KL341.': 492,
'KNACKERED': 4979,
'KNOW': 11105,
'KNOWS': 10999,
'KR': 1525,
'Kaiez...': 5258,
'Kallis': 6793,
'Kanagu': 8158,
'Karaoke': 10524,
'Kate': 12506,
'Katexxx\\""': 1206,
'Kath.': 114,
'Kay...': 1025,
'Keep': 13508,
"Ken's": 5631,
'Kent': 1640,
'Kerala': 10709,
'KeralaCircle': 8591,
'Keris': 12857,
'Kicchu': 2697,
'Kick': 13462,
'Kids': 12316,
'Kidz,': 5803,
'Kind': 10685,
'Kinda.': 5761,
'Kindly': 3468,
'King': 3837,
'Kingdom.': 8590,
'Kit': 323,
'Knock': 7505,
'Know': 8428,
'Kusruthi': 13483,
'L': 11158,
'L-oveable.': 7921,
'L8R.\\""': 4031,
'L8TR': 13414,
'LA1': 9114,
'LA3': 13187,
'LA32WU.': 9285,
'LADIES': 12968,
'LAPTOP': 5201,
'LAST': 12462,
'LATE': 1542,
'LATER': 3614,
'LAY': 10454,
'LCCLTD': 7923,
'LDN': 1505,
'LDN.': 5705,
'LE': 6888,
'LEKDOG': 8153,
'LES': 4127,
'LET': 5715,
'LETS': 5083,
'LIFE': 5857,
'LIFT': 4051,
'LIKE': 1179,
'LIKELY': 9345,
'LIKEYOUR': 4854,
'LIKING.BE': 9950,
'LILY': 1516,
'LISTEN': 4454,
'LISTENING': 13600,
'LITTLE': 7057,
'LIVE': 5918,
'LIVE.': 10332,
'LKPOBOX177HP51FL': 12022,
'LMAO': 1058,
'LOCAXX\\""': 5751,
'LOG': 12948,
'LOL': 9255,
'LONG': 5814,
'LOOK': 221,
'LOOKIN': 12658,
'LORD': 3285,
'LOST': 6591,
'LOT.': 4583,
'LOTR': 9411,
'LOTS': 3323,
'LOVE': 11409,
'LOVE..!': 2364,
'LOVE?:-|': 1218,
'LOVEJEN': 10521,
'LOVEME': 11537,
'LS1': 11776,
'LS15HB': 11944,
'LS278BB': 9395,
'LST': 3037,
'LUCKY': 308,
'LUCY': 6078,
'LUCYxx': 13514,
'LUTON': 11582,
'LUV': 150,
'LUV!': 712,
'Lacs.there': 2027,
'Lancaster,': 8476,
'Landline': 1806,
'Landline.': 9435,
'Langport.': 6568,
'Lara': 12559,
'Large': 6275,
'Last': 9732,
'Later': 1713,
'Latest': 6122,
'Laughed': 6065,
'Laughing': 4651,
'Ldn': 2877,
'Ldn,': 5383,
'LdnW15H': 6893,
'Leaf/Day=No': 5256,
'Leanne.what': 8992,
'Leave': 7108,
'Leaving': 10470,
'Left': 6309,
'Lemme': 11476,
'Lessons.': 8154,
'Let': 2841,
"Let's": 7859,
'Lets': 10546,
'Libertines': 6176,
'Life': 10384,
'Life..': 8361,
'Life\\"': 3793,
'Lifpartnr': 13770,
'Like': 12111,
'Lil': 3212,
'Limited': 1096,
'Lindsay': 8441,
'Line': 3573,
'Linerental': 3315,
'Lion': 13374,
'Lions': 6374,
'Litres': 8377,
'Live': 4035,
'Liverpool': 11295,
'Living': 11154,
'Lk': 7299,
'Lmao': 6193,
'Lmao!nice': 7930,
'Lmao.': 2815,
'Loads': 9444,
'Loan': 5141,
'Loans': 11146,
'Log': 6855,
'Logon': 1848,
'Lol': 2412,
'Lol!': 551,
'Lol.': 11681,
'Lolnice.': 9469,
'Lololo': 11435,
'London': 9307,
'Long': 1664,
'LookAtMe!,': 10774,
'LookAtMe!:': 10294,
'Looks': 2967,
'Lool!': 11060,
'Loosu': 13359,
'Lots': 11942,
'Lovable': 13468,
'Love': 9061,
'Love!!': 9464,
'Love.': 10858,
'Love:': 13757,
'Loved': 1591,
'Lovely': 2121,
'Lover': 10979,
'Lover.': 891,
'Loverboy': 13558,
'Loves': 1143,
'Loving': 8713,
'Low-cost': 1619,
'Loyalty': 7797,
'Ltd': 8477,
'Ltd,': 7922,
'Ltd.': 8687,
'Ltd.å£1,50/Mtmsgrcvd18+': 7188,
'LtdHelpDesk:': 899,
'Luck!': 3179,
'Luckily': 12542,
'Lucky': 10240,
'Lucozade': 6192,
'Lucy': 1162,
'Lul': 8010,
'Luv': 3670,
'Lux': 4666,
'Lvblefrnd': 11483,
'M': 11885,
'M.': 2802,
'M221BP.': 9048,
'M26': 13138,
'M263UZ.': 11769,
'M39M51': 1690,
'M6': 420,
'M95.': 12507,
'MAD1,': 3107,
'MAD2': 3346,
'MAHAL': 3057,
'MAKE': 7277,
'MALL': 10838,
'MAN': 9326,
'MAN!': 7570,
'MAN?': 6,
'MANEESHA': 2218,
'MARCH': 7572,
'MARSMS.': 8465,
'MAT': 6014,
'MATCH': 11182,
'MATCHED': 5384,
'MATE': 6606,
'MATE!': 236,
'MAYBE': 13831,
'ME': 13866,
'ME,': 2111,
'ME.': 13392,
'ME...': 7725,
'ME?': 6574,
'MECAUSE': 8131,
'MEET': 3409,
'MEETIN': 8249,
'MELNITE': 6477,
'MEMBERS': 101,
'MEREMEMBERIN': 4859,
'MESSAGE': 704,
'MESSED': 11396,
'MF': 12450,
'MFL': 4976,
'MIDNIGHT': 2721,
'MILLIONS': 7755,
'MIN': 13047,
'MINE': 11594,
'MINI!!!!': 13534,
'MINUTES': 11519,
'MISS': 11385,
'MISSED': 610,
'MISSIN': 7180,
'MISSY?': 12348,
'MITE': 10223,
'MIX\\"': 8294,
'MK45': 7128,
'MMM': 3838,
'MMSto': 10640,
'MO': 11413,
'MO...': 12787,
'MOAN': 9878,
'MOB': 9646,
'MOBNO': 9034,
'MOMENT': 7750,
'MON.L8RS.x': 4305,
'MONKEESPEOPLE': 13095,
'MONKEYAROUND!': 5442,
'MONO#': 1378,
'MONOC': 9348,
'MOON': 9157,
'MORAL:': 10789,
'MORE': 1521,
'MORNING': 166,
'MORNING!': 3130,
'MOST': 2510,
'MOVIE': 8190,
'MP3': 1767,
'MR': 12113,
'MR!ur': 10513,
'MRNG\\"."': 363,
'MSG': 7793,
'MSG*': 3115,
'MSG:We': 3555,
'MSGS': 10213,
'MTALK': 2563,
'MUCH': 1901,
'MUCH!!I': 7407,
'MUCHXXLOVE': 1333,
'MUM': 4760,
'MUNSTERS,': 10517,
'MUSIC': 1205,
'MUST': 10677,
'MY': 4849,
'MYSELF': 10780,
'Ma': 473,
'Macedonia': 13862,
'Macha': 11760,
'Madam,regret': 7046,
'Made': 9139,
'Mah': 7854,
'Mailbox': 9130,
'Maintain': 13194,
'Make': 6930,
'Makes': 12082,
'Making': 6268,
'Male': 4321,
'Mallika': 4904,
'Man': 12186,
'Manchester.': 13860,
'Mandan': 2206,
'Mandy': 3576,
'Many': 11752,
'Map': 11443,
'Map..': 6948,
'March': 7785,
'Maretare': 74,
'Mark': 3867,
'Mark.': 3461,
'Marley,': 12013,
'Married': 8651,
'Mathe': 11263,
'Mathews': 6851,
'Matrix3,': 10972,
'Matthew': 1077,
'Max10mins': 11129,
'May': 2941,
'May,': 2632,
'Mayb': 5935,
'Maybe': 5574,
'Maybe?!': 3152,
'Me': 1943,
'Me,': 1009,
'Means': 948,
'Meanwhile': 3443,
'Meat': 5330,
'Meds': 11720,
'Meet': 2003,
'Meet+Greet': 1826,
'Meeting': 493,
'Melle': 8443,
'Men': 8310,
'Merry': 344,
'Message': 3095,
'Message:some': 2197,
'Messages': 7173,
'Messages.': 8235,
'Messaging': 10716,
'Met': 8495,
'Michael': 12533,
'Midnight': 3382,
'Might': 7700,
'Mila,': 2194,
'Milk/day=No': 9328,
'Min': 4441,
'Mind': 3098,
'Mine': 3076,
'Minimum': 9610,
'Minnaminunginte': 11925,
'Mins': 7395,
'Misplaced': 1287,
'Miss': 5878,
'Missed': 9402,
'Missing': 12843,
'Missing*': 898,
'Mittelschmertz.': 2066,
'Mm': 6711,
'Mmm': 9474,
'Mmmm': 12376,
'Mmmm....': 5025,
'Mmmmm': 11017,
'Mmmmmm': 10396,
'Mob': 6018,
'Mob!': 12823,
'Mob?': 13216,
'MobStoreQuiz10ppm': 6605,
'MobcudB': 2143,
'Mobile': 10435,
'MobileUpd8': 9194,
'Mobiles': 8637,
'MobilesDirect': 9461,
'Mobileupd8': 13813,
'Moby': 4686,
'Moji': 6611,
'MojiBiola': 10932,
'Mom': 7971,
'Moment': 2189,
'Moms': 2248,
'Mon': 13250,
'Monday': 4023,
'Money': 4989,
'Monthly': 6227,
'MonthlySubscription@50p/msg': 2951,
"Moon's": 2302,
'Moral': 2324,
'Moral:One': 9134,
'Moral:\\Dont': 13134,
'More': 5295,
'Morning': 10297,
'Morning.': 13741,
'Mornings': 7777,
'Most': 251,
'Mostly': 12039,
'Mother': 13225,
'Motivate': 2700,
'Motorola': 13626,
'Motorola,': 588,
'Movie': 1460,
'Mr': 2335,
'Mr.': 8205,
'Ms.Suman': 2705,
'Msg': 7116,
'Msg*': 4320,
'Msg150p': 9552,
'Msg:': 3291,
'Msgs': 993,
'Much': 12068,
'Muhommad,': 13454,
'Multiply': 3473,
'Mum': 3400,
'Mum,': 459,
'Mumtaz': 5407,
"Mumtaz's": 13277,
'Murdered': 3673,
'Music': 5175,
'Must': 9916,
'Muz': 952,
'Mwahs.': 13692,
'My': 9404,
'MySpace': 12459,
'Mystery': 9795,
'N': 11015,
'N-Gage': 7672,
'N-atural.': 2370,
'N-oble.': 10348,
'NAKED...!': 5505,
'NAME': 2513,
'NAME1': 7247,
'NAME2': 1511,
'NBME': 9614,
'NEED': 12084,
'NEEDS': 6543,
'NEFT': 4011,
'NEO69': 8994,
'NETHING': 12162,
'NEVA': 4892,
'NEVER': 10268,
'NEW': 10237,
'NEWQUAY-SEND': 10480,
'NIC': 3053,
'NICE': 9853,
'NICHOLS': 6976,
'NIGHT': 2974,
'NIGHT"': 1379,
'NITE': 11528,
'NITE+2': 4499,
'NITW': 8675,
'NO': 13614,
'NO-434': 4990,
'NO-440': 10372,
'NO.': 2928,
'NO.1': 11879,
'NOK': 5514,
'NOKIA': 8422,
'NOKIA6600': 13326,
'NOT': 3455,
'NOTHING': 12397,
'NOTHING!': 3541,
'NOW': 3232,
'NOW!': 11968,
'NOW!REPLY': 1673,
'NOW.': 3859,
'NOW?': 1608,
'NRI': 6103,
'NTT': 1085,
'NUMBER': 3915,
'NUMBER-SO': 11513,
'NVQ,': 9932,
"NY's": 8384,
'NY-USA': 6109,
'Nah': 11849,
'Nah,': 9473,
'Nan': 11326,
'Nasdaq': 12472,
'Natalja': 7745,
'National': 12475,
'Nationwide': 3843,
'Natural': 3412,
'Nature': 11696,
'Near': 6945,
'Need': 11506,
'Neshanth..tel': 6973,
'Netcollex': 2707,
'Network': 12912,
'Network.': 12625,
'Networking': 12438,
'Neva': 6155,
'Never': 8070,
'Neville?': 4637,
'New': 1840,
'Neway': 9767,
'Newport': 4445,
'News!': 3515,
'Next': 11213,
'Ni8;-)': 13306,
'Nic\\""': 1650,
'Nice': 4498,
'Nice.': 1708,
'Nice.nice.how': 10365,
'Nick,': 6189,
'Nickey': 11839,
'Nigeria.': 5120,
'Nigh': 10907,
'Night': 12549,
'Nights.': 797,
'Nikiyu4.net': 10162,
'Nimbomsons.': 2966,
'Nimya.': 2006,
'Nimya..pls': 1994,
'Nite...': 9501,
'No': 5895,
'No!': 1926,
'No,': 12999,
'No.': 12694,
'No..but': 5062,
'No..few': 7500,
'No..he': 11090,
'No..its': 7860,
'No..jst': 10462,
'No1': 6676,
'No:': 10274,
'No:-)i': 7960,
'No:81151': 12706,
'No:83355!': 2508,
'NoWorriesLoans.com': 7814,
'Nobody': 9421,
'Noice.': 2249,
'Nokia': 6311,
'Nokia,': 2457,
'Nokia/150p': 13070,
'Nokia6650': 4292,
'Nokias': 10259,
'None': 4694,
'None?': 6956,
'Nookii': 1894,
'Nooooooo': 4513,
'Nope': 10063,
'Nope.': 4821,
'Nope...': 7169,
'Norcorp': 6266,
'Normal': 7582,
'Normally': 6306,
'Not': 10717,
'Nothin': 9747,
'Nothing': 8804,
'Nothing,': 6414,
'Nothing.': 6915,
'Nottingham': 7358,
'November': 877,
'Now': 10948,
'Now!': 10161,
'Now!4T&': 4501,
'Now,': 10725,
'Now.': 973,
'Now?': 2789,
'Nowadays': 4721,
'Nt': 6923,
'Nt.': 692,
'Number:': 11243,
'Nurungu': 10478,
'Nutter.': 1159,
'Nvm': 12997,
'Nw': 412,
'Nyt.': 918,
'Nyt.EC2A.3LP.msg@150p': 11805,
'O': 11233,
'O.': 9727,
'O2': 1436,
'O2FWD': 11978,
'O:-)': 7516,
'OF': 2906,
'OFF': 13605,
'OFFICE': 2663,
'OFSI': 180,
'OFTEN': 1409,
'OH': 9519,
'OJA': 12574,
'OK': 12936,
'OK,': 12604,
'OK?': 13020,
'OK?TAKE': 6190,
'OKDEN': 8277,
'ON': 8433,
'ONCALL.': 4984,
'ONE': 2181,
'ONE.': 4466,
'ONLY': 5362,
'ONLYFOUND': 1625,
'ONTO': 12170,
'OPT': 3375,
'OR': 6027,
'ORANGE': 7984,
'OREOS': 5466,
'OTBox': 13341,
'OTHERWISE': 13575,
'OUR': 2362,
'OUT': 3227,
'OUTL8R': 9749,
'OVER': 1464,
'OVERDOSE': 8420,
'Obviously,': 12868,
'Ocean..:-D': 7400,
'Of': 1062,
'Off': 345,
'Offer': 9231,
'Offer!': 440,
'Offer:': 8403,
'Offer:The': 13694,
'Oh': 312,
'Oh,': 2241,
'Oh.': 2,
'Oh...': 6954,
'Oh...i': 5746,
'Oh:)as': 11915,
'Oi': 12030,
'Oi.': 4786,
'Oic': 4219,
'Oic...': 6450,
'Ok': 885,
'Ok,': 8607,
'Ok.': 9728,
'Ok..': 5781,
'Ok...': 4404,
'Ok.ok': 9580,
'Ok?': 4570,
'Okay': 6453,
'Okay,': 655,
'Okay.': 9919,
'Okay...': 6714,
'Okey': 7647,
'Okie': 11463,
'Okie..': 569,
'Okie...': 12305,
'Okies...': 72,
'Ola': 1472,
'Old': 5984,
'Olol': 11107,
'Omg': 10044,
'Omw': 7555,
'On': 11709,
'Onam': 3685,
'Once': 2712,
'One': 6692,
'Onion-Rs.': 3295,
'Only': 12903,
'Only1more': 8718,
'Onum': 11080,
'Ooh,': 819,
'Oooh': 5097,
'Oooooh': 13418,
'Oops': 5166,
'Oops,': 7409,
'Oops.': 4271,
'Open': 6457,
'Opinion????': 1946,
'Opt': 7645,
'Opt-out': 11693,
'OptOut': 5550,
'Optout': 1473,
'Or': 10611,
'Orange': 11800,
'Orange.': 12896,
'Orange;i': 85,
'Oranges': 10506,
'Orchard': 7706,
'Order': 4379,
'Order,': 9029,
'Organizer': 13210,
'Other': 12107,
'Other..': 7282,
'Other...': 5658,
'Others': 13473,
'Otherwise': 5453,
'Our': 6596,
'Over': 7075,
'Oyea.': 11652,
'Oz?': 13557,
'P': 8764,
'P.': 2872,
'P.S': 6449,
'PA': 12303,
'PARIS': 3629,
'PARIS.FREE': 4118,
'PARK': 12578,
"PARTNER'S": 12774,
'PARTY': 428,
'PASS': 7406,
'PC': 9466,
'PC.': 1635,
'PERIL': 4268,
'PERSON.': 3265,
'PHONE': 10455,
'PHP': 12784,
'PHP.': 6736,
'PIC': 4952,
'PICK': 5949,
'PICS': 837,
'PICSFREE1': 3852,
'PIN': 8061,
'PISS': 244,
'PIX': 11500,
'PIX!': 11490,
'PLAY': 8186,
'PLEASE': 3431,
'PLEASSSSSSSEEEEEE': 11455,
'PLUS': 13278,
'PO': 2446,
'PO19': 9599,
'POBOX': 1752,
'POBOX114/14TCR/W1': 11427,
'POBOX84,': 11660,
'POBOXox36504W45WQ': 4391,
'POBox': 8606,
'POBox334,': 4759,
'POBox36504W45WQ': 2945,
'POBox365O4W45WQ': 6817,
'POBox84,': 1948,
'POD': 2078,
'POINT': 10518,
'POKKIRI': 1486,
'POLY': 3911,
'POLY#': 2856,
'POLY3': 5643,
'POLYC': 1028,
'POLYPHONIC': 6831,
'POOR': 6139,
'PORN': 1327,
'POST': 11071,
'POSTCARD': 3620,
'PREMIER.': 13294,
'PRICE': 2313,
'PRIVACY': 4102,
'PRIVATE!': 7175,
'PRIZEAWAITING': 10404,
'PROBLEM.': 13858,
'PROBTHAT': 8594,
'PROPERLY': 6828,
'PS': 8015,
'PT2': 8998,
'PUB': 939,
'PX3748': 4548,
'Pa.': 7906,
'Package': 10106,
'Pain': 10448,
'Painful': 2519,
'Panasonic': 12691,
'Pandy': 6470,
'Pansy!': 11618,
'Parents': 4517,
'Parents,': 19,
'Paris.': 13248,
'Part': 7434,
"Party's": 13125,
'Pass': 6079,
'Passwords,ATM/SMS': 5347,
'Pathaya': 1427,
'Payee.': 12340,
'Peace.': 6938,
'Peaceful': 1118,
'Penny': 11250,
'People': 12723,
'Per': 8744,
'Peripherals': 3835,
'Person': 5794,
'Personality': 10131,
'Pest': 883,
'Pete': 12792,
'Pete,is': 8777,
'Petey!noiåÕm': 13660,
'Petrol-Rs.': 7220,
'Pg': 12967,
'Ph:08704050406)': 2730,
'Pharmacy': 13142,
'Phil': 12488,
'Phoenix': 7557,
'Phone': 3478,
'Phone,': 7677,
'Phone?': 5240,
'Pic': 11462,
'Pick': 6013,
'Pics': 12807,
'Pie': 6557,
'Piggy,': 8047,
'Pin': 11797,
'Pink;u': 8168,
'Pl': 5123,
'Platt': 3111,
'Play': 9876,
'Player': 13110,
'Playin': 1873,
'Please': 5741,
'Please.': 6505,
'Pleasure': 7770,
'Pls': 6410,
'Pls.': 13399,
'Pls.i': 9169,
'Plus': 9575,
'Plyr.': 3550,
'Plz': 139,
'Plz.': 3128,
'PoBox1,': 11831,
'PoBox12n146tf15': 2082,
'PoBox12n146tf150p': 170,
'PoBox45W2TG150P': 4733,
'PoBox75LDNS7': 6686,
'PoBox84': 4591,
'PocketBabe.co.uk': 11038,
'Points.': 8283,
'Police': 13087,
'Police.': 3621,
'Polo': 9359,
'Poly': 10279,
'Poly/200p': 11901,
'Polyphonic': 3996,
'Polys.': 3344,
'Poop.': 1320,
'Pose': 7122,
'Potter': 4248,
'Pound': 12711,
'Poyyarikatur,kolathupalayam,unjalur': 13873,
"Prabha..i'm": 12605,
'Prabu': 5412,
'Prakesh': 2117,
"Prashanthettan's": 11229,
'Pray': 2127,
'Prayrs..': 3885,
'Premarica.kindly': 10209,
'Prepare': 11238,
'Presleys': 7737,
'Press': 8354,
'Prey.': 2891,
'Price': 4900,
'Princess!': 12289,
'Princess,': 5718,
'Printer': 543,
'Prize': 6197,
'Prize,': 13109,
'Prize.': 1381,
'Pro': 4401,
'Probably': 9867,
'Probably,': 3340,
'Problms': 1234,
'Prof:': 11339,
'Prof:????': 369,
'Promo': 12214,
'Promo"': 8654,
'Promotion': 5877,
'Props?': 6790,
'Proverb:': 12621,
'Provided': 6738,
'Psychiatrist': 659,
'Psychic?': 10633,
'Psychologist': 10148,
'Ptbo': 2142,
'Pub': 5490,
'Purity': 480,
'Purple;u': 6091,
'PushButton': 4047,
'Put': 334,
'Putting': 4258,
'Q': 4195,
'Q!': 7302,
'Q?': 311,
'QUITE': 2355,
'QUITEAMUZING': 4461,
'Qatar': 11585,
'QlynnBV': 273,
'Que': 2383,
'Queen,': 9542,
'Queen?': 944,
'Ques-': 10827,
'Question': 1371,
'Questions:': 6320,
'Quick': 7139,
'Quite': 5595,
'Quiz': 4269,
'Quiz.': 10751,
'Quiz.Win': 6033,
"Quote''": 13188,
'R': 8980,
'R*reveal': 5274,
'R836.': 2257,
'RALLY': 10476,
'RANDOM!': 8820,
'RANG': 3086,
"RCT'": 8679,
'READ': 13122,
'REAL': 9931,
'REAL1': 7297,
'REALISE': 6589,
'REALITY': 5732,
'REALLY': 4383,
'REBEL!': 3026,
'RECD': 1163,
'RECEIVE': 8054,
'RECPT': 5817,
'RED': 53,
'REPLY': 12257,
'REPLYS150': 5048,
'REVEAL': 6117,
'REVISION?': 5052,
'RG21': 1501,
'RGENT!': 12441,
'RIGHT': 9666,
'RING': 1983,
'RINGS:RETURN': 1440,
'RINGTONE': 4494,
'RITE': 9965,
'ROMCAPspam': 1063,
'ROSES': 7268,
'RP176781.': 5275,
'RSTM,': 2016,
'RT-KIng': 11690,
'RTM': 3804,
'RTO': 9142,
'RUDI': 285,
'RV': 152,
'Racal,': 12959,
'Rain': 3500,
'Raining!': 2425,
'Raj': 5285,
'Raji..pls': 6753,
'Rajnikant': 11373,
'Rally': 6400,
'Randy,': 6747,
'Ranjith': 6951,
'Rate': 5163,
'Rate.': 9084,
'Raviyog': 12010,
'Reach': 10728,
'Reaction': 2314,
'Read': 11666,
'Reading': 8134,
'Ready': 1631,
'Real': 10292,
'Really': 12558,
'Really...': 2586,
'Really?': 9873,
'Realy': 5137,
'Reason': 5566,
'Reason,': 1639,
'Reckon': 11234,
'Records': 12223,
'Red': 2932,
"Red;i'm": 7368,
'Ref:9307622': 26,
'Reference': 8147,
'Reflection': 11392,
'Refused': 1816,
'Registered': 12552,
'Remember': 11249,
'RememberI': 8900,
'Remembered': 8216,
'Remembr': 8332,
'Remembrs': 9791,
'Remind': 2843,
'Reminder:': 6870,
'Reminding': 3166,
'Remove': 13841,
'Rent': 6148,
'Rental': 6565,
'Rental?': 2200,
'Reply': 2374,
'Requests': 8599,
'Resend': 12635,
'Reverse': 4249,
'Rgds': 8903,
'Right': 1851,
'Rightio.': 516,
'Ring': 772,
'Ringtone': 8848,
'Ringtone!From:': 1661,
'Ringtone.': 5796,
'Ringtones': 967,
'Rock': 7004,
'Rodds1': 5345,
'Rodger': 3702,
'Rofl': 13384,
'Roger': 8538,
'Roger?': 8338,
'Romantic': 12236,
'Ron': 2198,
'Rose': 11394,
'Row/W1J6HL': 784,
'Row/W1JHL': 3306,
'Rs': 9023,
'Rs.': 9831,
'Rs.5': 5590,
'Rum': 7936,
'Ryder': 2864,
'S': 7551,
"S'fine.": 1223,
'S.': 8986,
'S....s...india': 13397,
'S...from': 5397,
'S...i': 5513,
'S..antha': 3772,
'S.I.M.': 8835,
'S.i': 3444,
"S.i'm": 1029,
'S.s:)i': 5579,
'S3XY': 4618,
'S89.': 4616,
'S:)but': 11871,
'S:)no': 9382,
'S:)s.nervous': 4820,
'S:-)if': 3775,
'S:-)kallis': 5459,
'SAD': 12524,
'SAE': 1297,
'SAE,': 5498,
'SAID': 12768,
'SAM': 8268,
'SAME': 1671,
'SARY!': 1990,
'SAT': 6864,
'SAT.LOVE': 4724,
'SAT?SOUNDåÕS': 8883,
'SAW': 1360,
'SAY': 9199,
'SCARY': 8965,
'SCOTLAND': 8513,
'SEE': 5434,
'SEE..NO': 7993,
'SEEING?': 9103,
'SEEMED': 8638,
'SELFINDEPENDENCE': 13332,
'SEND': 6780,
'SENDS': 609,
'SENT:)': 1289,
'SERIOUSLY.': 1641,
'SERVICES': 2674,
'SERVICES.': 10698,
'SEX': 10831,
'SF': 8145,
'SHEFFIELD!': 512,
'SHESIL': 11743,
'SHIT.': 1299,
'SHITIN': 7045,
'SHOP': 190,
'SHOULD': 1554,
'SHOW': 12603,
'SIB': 10625,
'SIM': 12144,
'SING': 300,
'SIR.': 7735,
'SITUATION,': 6296,
'SIX': 4813,
'SK3': 10125,
'SK38XH.': 7536,
'SK38xh,': 10280,
'SLAP': 4594,
'SLEEP..SWEET': 7013,
'SLEEPING': 13162,
'SLEEPINGWITH,': 3640,
'SLO(4msgs)': 10632,
'SMILEY': 690,
'SMS': 11138,
'SMS,': 5335,
'SMS.': 4111,
'SMSSERVICES.': 8598,
'SN': 5381,
'SNAP': 5290,
'SNORING.THEY': 7911,
'SO': 8752,
'SOFA': 12818,
'SOIREE': 6257,
'SOMEONE': 11865,
'SOMETHING': 9559,
'SOMETHING?': 7511,
'SOO': 970,
'SOON': 3945,
'SOON.C': 11136,
'SOONLOTS': 12281,
'SORRY': 9455,
'SORTED,BUT': 11187,
'SOUNDING': 5512,
'SP': 11069,
'SP:RWM': 13499,
'SP:Tyrone': 1185,
'SPAM': 12384,
'SPECIAL': 5527,
'SPECIALE': 1279,
'SPEEDCHAT': 7918,
'SPEEDCHAT,': 5158,
'SPJanuary': 7982,
'SPK': 10465,
'SPOOK': 13844,
'SPORT': 1820,
'SPORTSx': 5651,
'SPTV': 6424,
'SPUN-OUT': 12326,
'ST': 12639,
'STAPATI': 2369,
'STAR': 13101,
'STARS': 4515,
'START': 1938,
'STATION.': 7901,
'STIL': 5460,
'STILL': 10442,
'STOP': 8515,
'STOP,': 4819,
'STOP.': 8830,
'STOP2stop)': 5967,
'STOP?': 4265,
'STOPBCM': 7363,
'STOPCS': 8343,
'STORE': 5976,
'STORES': 12363,
'STRIKE': 13281,
'STU': 4285,
'STUDENTFINANCIAL': 3088,
'STUDY': 10138,
'STUPID': 6086,
'STaY': 11143,
'SUBPOLY': 3689,
'SUE': 4678,
'SUM': 8738,
'SUMMER': 2487,
'SUMTHIN?xx': 2755,
'SUNDAY': 5526,
'SUNDAY..:)': 10969,
'SURE': 4752,
'SURPRISE': 1528,
'SUZY': 2685,
'SW7': 2709,
'SW73SS': 13357,
'SWAP': 12595,
'SWEET': 2504,
'SWITCH': 6634,
'Sac': 8653,
'Sad': 8935,
'Saeed': 11431,
'Safe': 11269,
'Sagamu': 6732,
'Said:': 10273,
'Salad': 2740,
'Salam': 7713,
'Sale!': 11732,
'Sam': 1904,
'Same': 5146,
'Same,': 5559,
'Same.': 5912,
'Sankranti': 12517,
'Sara': 1969,
'Sarcasm': 6235,
'Saristar': 9503,
'Sary': 7782,
'Sat': 8082,
'Saturday!': 8611,
'Saturday,': 2954,
'SavaMob': 316,
'SavaMob,': 5317,
'Save': 6904,
'Say': 113,
'Says': 3966,
'Scared': 8844,
'School?': 9977,
'Schools': 7484,
'Science': 13747,
'Scoring': 10191,
'Scotch': 2327,
'Search': 8120,
'Second': 7411,
'Secret': 8318,
'Secured': 2901,
'See': 5920,
'See?': 6959,
'Seem': 851,
'Send': 2982,
'Sender:': 9627,
'Sender:Name': 10453,
'Sending': 5171,
'Sends': 11798,
'Sent': 5478,
'Senthil': 7831,
'Sept': 2831,
'Serena:)': 7251,
'Serious?': 7613,
'Service': 597,
'Services': 4707,
'Set': 4368,
'Seventeen': 2768,
'Sex': 1590,
'Sexy': 1995,
'Sez,': 1152,
'Sh!jas': 1876,
'Shahjahan': 13214,
"Shahjahan's": 10671,
'Shall': 3245,
'Shampain': 1879,
'Shant': 4440,
'Shb': 632,
'She': 12211,
"She'll": 8199,
"She's": 2693,
'She.s': 6928,
'Sherawat': 4252,
'Shhhhh': 11628,
'Shifad': 10078,
'Shijas': 10314,
'Shinco': 12747,
'Shit': 3882,
'Shivratri': 7482,
'Shola': 7262,
'Shop': 11021,
'Shopping': 8583,
'Shopping?': 6132,
'Shoranur': 6261,
'Short': 4200,
'Shoul': 13385,
'Should': 10218,
'Show': 1386,
'ShrAcomOrSglSuplt)10,': 7041,
'Shuhui': 8528,
'Si': 5666,
'SiPix': 11504,
'Sian...': 10917,
'Silent': 13009,
'Simple': 8636,
'Simply': 4344,
'Simpsons': 7238,
'Since': 2507,
'Sinco': 705,
'Sindu': 3277,
'Single': 1931,
'Single?': 10527,
'Singles': 7747,
'Sir': 3650,
'Sir,': 11659,
'Sir,I': 3627,
'Sirji.I': 13407,
'Sister': 2919,
'Sitting': 391,
'Siva': 1960,
'SkilGme.': 5180,
'SkillGame,': 10823,
'SkillGame,1Winaweek,': 1445,
'Sky': 4185,
'Slaaaaave': 4683,
'Sleep': 5024,
'Sleep.': 1076,
'Sleeping': 3362,
'Sleepwell&Take': 6555,
'Slide': 10985,
'Slow': 10936,
'Slowly': 8826,
'Small': 10901,
'SmartCall': 6715,
'Smile': 5790,
'Smile,D': 4608,
'Smiling!!': 12579,
'Smith': 8238,
'Smith-Switch..': 5910,
'Snd': 2254,
'So': 11636,
'So,': 12958,
'Sol': 3571,
'Sold': 9223,
'Solve': 3233,
'Some': 4431,
'Somebody': 612,
'Someone': 5580,
'Something': 6971,
'Sometimes': 5952,
'Somewhr': 4535,
'Sonetimes': 3231,
'Sony': 7938,
'Sony,': 3681,
'SonyEricsson': 12371,
'Sorry': 761,
'Sorry!': 5837,
'Sorry,': 13867,
'Sorry,in': 13144,
'Sorry.': 4061,
'Sort': 9845,
'Sos!': 8740,
'Soul': 9900,
'Sounds': 12102,
'Soup': 1044,
'Space': 13386,
'Spain': 11564,
'Spanish': 9868,
'Speak': 11201,
'Speaking': 6011,
'Special!': 9972,
'Spiral': 4709,
'Spl': 11956,
'SplashMobile:': 10381,
'Spoke': 523,
'Spook': 4133,
'Spoons': 7649,
'Spose': 4478,
'Sppok': 1678,
'Spring': 9221,
'Sprint': 13270,
'Sptv:': 11617,
'Squeeeeeze!!': 7155,
'Squishy': 12394,
'St': 11603,
'St,': 7374,
'Staff': 8479,
'StarWars3,': 10615,
'Start': 5670,
'Starts': 332,
'Statement': 5830,
'Stay': 1703,
'Staying': 12422,
'StdTxtRate.': 8376,
'Stereophonics,': 8760,
'StewartSize:': 8487,
'Still': 4884,
'Stockport,': 3802,
'Stop': 10653,
'Stop2': 3796,
'Stop?': 5840,
'Stop?txt': 10801,
'Storming': 13850,
'Stream': 1292,
'Street': 9560,
'Strip': 8688,
'Strokes!': 6683,
'Strong-Buy)': 10122,
'Student:': 167,
'Studying.': 13745,
'Stupid': 12951,
'Stupid.its': 2449,
'Stylish': 11203,
'Stylist': 13071,
'Sub.': 6116,
'Subs': 5359,
'Subscriber': 1677,
'Subscriptn3gbp/wk': 8551,
'Such': 1298,
'Suganya.': 5859,
'Sugar': 4845,
'Suite': 13447,
'Sullivan': 11438,
'Summer': 13196,
'Summers': 7753,
'Sun': 7019,
'Sunday': 4335,
'Sunscreen': 11773,
'Sunshine': 13716,
'Sup,': 13786,
'Super': 8805,
'Superb': 5428,
'Suprman': 4730,
'Sure': 11342,
'Sure!': 4152,
'Sure,': 372,
'Surely': 9204,
'Sux': 2805,
'Sweet,': 7980,
'Sweetest': 10595,
'Sweetheart': 12819,
'Sweetheart,': 3538,
'Swtheart': 2388,
'Symbol': 5012,
'Sympathetic': 1170,
'Syria': 13244,
'T': 765,
'T&C': 1815,
"T&C's": 9667,
'T&C:': 7610,
'T&Cs': 7739,
'T&Cs/stop': 13428,
'T&CsBCM4235WC1N3XX.': 6656,
'T&CsC': 7077,
"T's": 9910,
"T's&C's": 3714,
'T-Mobile': 5411,
'T-Mobile.': 5266,
'T-ruthful.': 5355,
'T.': 5634,
'T91.': 7932,
'TA': 4529,
'TAJ': 3744,
'TAKE': 8555,
'TALKBUT': 6556,
'TALKED': 5087,
'TALKIN': 3688,
'TALKING': 6175,
'TAMPA': 2424,
'TAROT': 9427,
'TBS/PERSOLVO.': 11252,
'TC': 9832,
'TC-LLC': 3820,
'TCR/W1': 2664,
'TCs': 7935,
'TCs,': 2156,
'TEL': 7877,
'TELL': 12866,
'TELLMISS': 13416,
'TEX': 2793,
'TEXD': 793,
'TEXT': 10810,
'TEXT?': 11739,
'TEXTBUDDY': 7957,
'TEXTPOD': 7602,
'TEXTS!': 2024,
'TH': 10473,
'THANKS': 2998,
'THANX': 13533,
'THANX4': 11863,
'THASA': 4888,
'THAT': 7579,
'THATåÕS': 5659,
'THATåÕSCOOL': 4417,
'THE': 12822,
'THE!': 12834,
'THEACUSATIONS..': 1675,
'THEMOB': 10330,
'THEN': 476,
'THERE': 13314,
"THERE'S": 9681,
'THERE.': 4062,
"THESE...your's": 6435,
'THEW/END...': 2613,
'THING': 7293,
'THINK': 2242,
'THIS': 3836,
'THNQ': 863,
'THO!': 10586,
'THOSE': 12447,
'TIME': 1918,
'TIME.': 1662,
'TISSCO,Tayseer.': 8812,
'TITLE': 11909,
'TO': 12332,
'TOBED': 2352,
'TODAY': 9181,
'TODAY,': 1334,
'TODAY..': 4571,
'TOMORROW': 13017,
'TOMORW.': 10844,
'TONE': 13593,
'TONE.': 1405,
'TONES!Reply': 4291,
'TONES2U': 2953,
'TONEXS': 7104,
'TONIGHT!': 12213,
'TONITE': 2499,
'TOOL!': 3354,
'TOP': 4657,
'TOPLAY': 13427,
'TOTAL': 11075,
'TOTALLY': 9978,
'TOWN': 1295,
'TOWN.DONTMATTER': 7531,
'TOWN?': 3821,
'TROUBLE?': 3776,
'TRUBLE': 7257,
'TRUE': 9591,
'TRY': 8356,
'TS&Cs': 4621,
'TULIP': 9787,
'TV': 1684,
'TV.': 2289,
'TWILIGHT': 32,
'TXT': 12477,
'TXT:': 930,
'TXTAUCTION': 266,
'TXTAUCTION!': 3248,
'TXTAUCTION!Txt': 619,
'Ta': 11782,
"Ta's": 6203,
'Ta-Daaaaa!': 12378,
'TaKe': 3282,
'Take': 10676,
'Takecare..:)': 11272,
'Takin': 2479,
'Talk': 12157,
'Tata': 6322,
'Tayseer,TISSCO': 3,
'Tb': 10145,
'Teach': 11963,
'Teacher': 9397,
'Team.': 5247,
'Tear': 9964,
'Tease': 8540,
'Tee': 6847,
'Tel': 7760,
'Telephonic': 695,
'Teletext': 5367,
'Tell': 1991,
'Ten': 10409,
'Tenants': 2081,
'Tenerife': 6586,
'Tension': 4358,
'Teresa.': 3374,
'Terms': 3292,
'Tessy..pls': 5813,
'Test': 6735,
'Text': 11256,
'Text82228>>': 1261,
'Text:': 3931,
'TextOperator': 10624,
'Thank': 12241,
'Thanks': 10472,
'Thanks.': 10377,
'Thanku': 9903,
'Thankyou': 9201,
'Thanx': 6488,
'Thanx.': 9516,
'Thanx..': 547,
'Thanx...': 11177,
'That': 7733,
"That'll": 13117,
"That's": 6153,
'Thats': 10239,
'That\x89Û÷s': 12744,
'ThatåÕs': 13547,
'The': 6706,
'TheDailyDraw)': 313,
'TheMob': 3922,
'TheMob>': 7195,
'TheMob>Yo': 5733,
'Them': 6347,
'Then': 13342,
'Then.': 13058,
'Theoretically': 9995,
'Theory:': 9263,
'There': 10921,
"There're": 6567,
"There's": 8717,
'These': 13037,
'They': 13491,
"They're": 12042,
'Theyre': 7634,
'Thgt': 7095,
'Thing': 3430,
'Things': 1941,
'Think': 6608,
'Thinkin': 13074,
'Thinks': 7539,
'This': 3735,
'Thk': 942,
'Thnx': 10550,
'Those': 12036,
'Though': 2617,
'Thought': 12315,
'Thought-': 7286,
'Thout': 1155,
'Thts': 2680,
'Thurs': 2570,
'Thurs,': 10598,
'Thursday': 10079,
'Thx.': 12633,
'Thy': 7983,
'Tick,': 567,
'Tickets': 143,
'Tiger': 8974,
'Till': 12805,
'Tim': 3833,
'Time': 10636,
'Tired.': 5111,
'Tis': 2543,
'Titles:': 11961,
'Tiwary': 13564,
'Tmorrow.pls': 12043,
'Tmr': 3965,
'Tmrw.': 1831,
'TnC': 8703,
'TnCs': 2567,
'To': 6601,
'Toa': 13688,
'Today': 10202,
"Today's": 9386,
'Today-sunday..sunday': 4636,
'Todays': 8289,
'Toledo.': 4038,
'Toll': 7508,
'Tom,': 1609,
'Tomarrow': 10722,
'Tomorrow': 689,
'Tomorrow?': 11120,
'Tone': 12263,
'Tones': 404,
'Tones,': 456,
'Tonight?': 8406,
'Too': 4596,
'Took': 11296,
'Top': 1222,
'Total': 7501,
'Touch.': 4966,
'Tour.': 6335,
'Toxic': 4737,
'Trackmarque': 6520,
'Traditions....': 153,
'Traffic': 11632,
'Transaction': 13568,
'Trav,': 9814,
'Travel': 1431,
'Treat': 1031,
'Triple': 6455,
'True': 10424,
'True.': 1413,
'Truro': 12956,
'Trust': 6452,
'Truth': 7027,
'Try': 5501,
'Try:WALES,': 289,
'Trying': 5127,
'Ts&Cs': 2687,
'TsCs': 5748,
'TsCs087147403231Winawk!Age16': 8128,
'TsandCs': 7383,
'Tsunamis': 11932,
'Ttyl': 6510,
'Ttyl!': 7249,
'Tues,': 9943,
'Tuesday': 2572,
'Tuesday.': 9319,
'Tuesday?': 6582,
'Tunde,': 11162,
'Tunji,': 6057,
'Turns': 1758,
'Twiggs': 5065,
'Twinks,': 4041,
'Twittering': 2494,
'Two': 13726,
'Txt': 606,
'Txt250.com': 10083,
'Txt:': 11943,
'TxtNO': 2049,
'Txting': 5650,
'Txts': 12554,
'Txts!': 4453,
'Tyler': 9902,
'U': 4590,
'U!': 4146,
'U!U': 10775,
"U'll": 4159,
"U're": 12169,
"U've": 13781,
'U,': 10241,
'U-find': 6289,
'U.': 164,
'U..': 10566,
'U...': 7974,
'U.CALL': 3056,
'U4': 8402,
'U?': 5824,
'U?????': 727,
'UIN': 12183,
'UK': 11524,
"UK's": 11629,
'UK,': 9224,
'UK-mobile-date': 3470,
'UK.': 4993,
'UKs': 10863,
'UNBELIEVABLE.': 12740,
'UNDERSTaND': 2151,
'UNLIMITED': 9203,
'UP': 1592,
'UP!': 11568,
'UP+NOT': 8655,
'UP.YEH': 3706,
'UP4': 13417,
'UPDAT': 7191,
'UR': 13544,
'UR...': 2820,
'URE': 10930,
'URFEELING': 4599,
'URGENT': 5725,
'URGENT!': 3110,
'URGENT!!': 1290,
'URGENT!:': 322,
'URGENT,': 10628,
'URGENT.': 11298,
'URGOIN': 554,
'URN': 6177,
'URSELF': 13476,
'US': 5825,
'USED': 181,
'USER': 3213,
'USER.': 1337,
'U\\"': 5887,
'Ubi': 3368,
'Ugadi': 13126,
'Ugh': 9153,
'Ugh.': 3342,
'Uh,': 5508,
'Uhhhhrmm': 1169,
'Ujhhhhhhh': 9803,
'Uks': 8359,
'Ultimately': 5696,
'Umma': 545,
'Umma.': 10188,
'Ummma.will': 12525,
'Ummmmmaah': 11706,
'UnSub': 443,
'Uncle': 13436,
'Under': 7522,
'Understand.': 8253,
'Unfortunately': 10991,
'Uni': 5993,
'United': 7826,
'University': 7105,
'Unless': 4963,
'Unlimited': 3124,
'Unni': 6252,
'Unsecured?': 2675,
'Unsub': 6669,
'Unsubscribe': 2095,
'Until': 406,
'Up': 838,
'Upd8': 12541,
'Update': 3041,
'Update_Now': 3140,
'UpgrdCentre': 10432,
'Ups': 1927,
'Ur': 10940,
'Urgent': 11495,
'Urgent!': 9854,
'Urgh,': 11181,
'Us': 7496,
'Use': 11390,
'Usf': 5081,
'Usher': 7179,
'Usmle,': 2145,
'Usually': 8243,
'Uve': 519,
'U\x89Ûªve': 1136,
'V': 3089,
'V,': 11918,
'V-aluable.': 7365,
'V.': 11657,
"VALENTINE'S": 1316,
'VAT': 7558,
'VIDEO': 11872,
'VILLA': 13398,
'VIOLENCE': 10343,
'VIOLET': 5564,
'VIP': 11821,
'VPOD': 11676,
'VU': 12078,
'Valentine': 1830,
'Valentines': 233,
'Valid': 12755,
'Valid12hrs': 558,
'Values...,': 11937,
'Vatian': 3971,
'Vegas': 947,
'Velly': 8169,
'Verify': 6326,
'Very': 4860,
"Vettam)'": 8219,
'ViVa..': 882,
'Video': 10359,
'Videophones': 11061,
'Vijay': 13367,
'Virgin': 1979,
'Voda': 6675,
'Vodafone': 6942,
'Vodka': 12692,
'Voila!': 286,
'Vote.': 9423,
'Voucher': 2491,
'Vouchers': 12686,
'Vry': 9344,
'W111WX': 7963,
'W14RG': 5765,
'W1A': 7816,
'W1J': 9823,
'W4': 13503,
'W45WQ': 11370,
'WA14': 11480,
'WAIT': 10270,
'WALES': 12965,
'WANNA': 11174,
'WANNATELL': 2131,
'WANT': 13334,
'WAP': 1791,
'WAS': 11557,
'WAY': 682,
'WAY2SMS.COM': 8227,
'WC1N3XX': 483,
'WE': 2164,
'WEEK': 5832,
'WELL': 744,
'WELLDA': 12375,
'WEN': 1390,
'WENT': 13098,
'WENWECAN': 5454,
'WERE': 11171,
'WEREBORED!': 13749,
'WERETHE': 5,
'WHASSUP?': 13007,
'WHAT': 9606,
'WHEN': 8828,
'WHERE': 6023,
'WHICH': 3010,
'WHILE': 8776,
'WHITE': 1023,
'WHO': 11432,
'WHORE': 12546,
'WHY': 6238,
'WIFE': 11665,
'WILD': 9089,
'WILL': 7981,
'WILL!!': 2615,
'WIN': 8922,
'WIN:': 6577,
'WINNER!': 9761,
'WINNER!!': 8959,
'WISH': 6221,
'WISHING': 11289,
'WITH': 5662,
'WITHOUT': 7162,
'WIV': 8248,
'WK': 4114,
'WK.': 13619,
'WK?': 13743,
'WKEND': 8913,
'WKENT/150P16+': 892,
'WOKE': 3825,
'WOMAN': 6046,
'WOMEN.': 13541,
'WON': 2995,
'WON!': 8,
'WONT': 984,
'WORDS': 1116,
'WORK': 12398,
'WORLD': 13415,
'WORLD...!!!': 11312,
'WORRIED!x': 13617,
'WORRY.': 10117,
'WORRY.C': 573,
'WOT': 1358,
'WOTU': 293,
'WOULD': 8839,
"WOULDN'T": 11725,
'WOW!': 5801,
'WRC': 2443,
'WRK!\\""': 3989,
'WRK.I': 2557,
'WRLD.': 5691,
'WRONG!!TAKE': 11532,
'WTF.': 3160,
'WWW.ASJESUS.COM': 9236,
'Wa,': 3557,
'Wa...': 13147,
'Waaaat??': 1269,
'Wah': 9052,
'Wah,': 9314,
'Wah...': 12062,
'Wahleykkum.sharing': 13695,
'Wait': 8623,
'Wait,': 4664,
'Wait...should': 11941,
'Wait.i': 9318,
'Waiting': 6358,
'Wake': 6336,
'Wan': 3203,
'Wan2': 12469,
'Wana': 4998,
'Wanna': 2356,
'Want': 2874,
'Wanting,': 11606,
'Waqt': 12888,
'Warm': 64,
'Was': 4920,
'Wasted...': 8213,
'Wat': 4593,
"Wat's": 7353,
'Watch': 12184,
'Watching': 522,
'Wating': 5792,
'Watr/Day=No': 7646,
'Wats': 4046,
'Waves': 3158,
'We': 9564,
"We'd": 6362,
"We'll": 5402,
"We're": 13035,
"We've": 3415,
'WeBeBurnin': 1754,
'WeLL': 5445,
'Webpage': 3890,
'Wed': 707,
'WeddingFriend': 9162,
'Wednesday.': 2236,
'Weekly': 132,
'Weightloss!': 9686,
'Weiyi': 8937,
'Welcome': 5570,
'Welcome!': 7201,
'Welcome.': 557,
'Well': 3533,
'Well,': 7129,
'Well.': 12656,
'Welp': 7569,
'Wen': 3386,
'Wendy': 5475,
'Went': 2616,
'Were': 9879,
'Westlife': 12794,
'Wet': 8996,
'We\x89Û÷re': 577,
'What': 3288,
"What's": 1579,
'Whatever,': 10553,
'Whats': 1002,
'Whatsup': 245,
'What\x89Û÷s': 11278,
'When': 4448,
"When're": 5186,
"When's": 6741,
'When?': 11056,
'Whenevr': 8133,
'Whenevr..': 9278,
'Where': 10808,
"Where's": 13616,
'Wherevr..': 12913,
"Wherre's": 8018,
'Which': 2447,
'Who': 7404,
"Who's": 10511,
'Whom': 5760,
'Whos': 2548,
'Why': 10393,
'Why.': 7556,
'Why:-)': 920,
'Wicklow,': 1841,
'Wife': 13846,
'Wife,': 10706,
'Wife.how': 4676,
'Wil': 3164,
'Will': 7763,
'Win': 7692,
'Wine': 7540,
'Wings': 1780,
'Winner': 4673,
'WinnersClub': 6138,
'Winning': 1184,
'Wins': 4528,
'Wire3.net': 8981,
'Wish': 1080,
'Wishing': 8543,
'With': 11701,
'Wk': 10963,
'Wkly': 9251,
'Wld': 10684,
'Wn': 3819,
'Woke': 8649,
'Won.': 9598,
'Wondering': 9372,
'Wont': 9898,
'Woo': 3446,
'Woodland': 9235,
'Woods': 13776,
'Woohoo!': 8795,
'Words.Evry': 8085,
'Work.': 12272,
'Working': 5076,
'World': 6868,
'World*': 850,
'World:-)..': 4844,
'Worth': 8572,
'Wot': 5752,
'Wotz': 5205,
'Would': 3216,
'Wow': 3101,
'Wow.': 8861,
'Wright': 9642,
'Wtf.': 10464,
'Wun': 12286,
'Wylie': 5551,
'X': 3607,
'X.': 2649,
'X2': 6106,
'X29.': 321,
'X49.': 535,
'X49.Your': 2380,
'XCHAT,': 566,
'XCHAT.': 7919,
'XCLUSIVE@CLUBSAISAI': 9492,
'XMAS': 4614,
'XX': 9206,
'XXUK': 6982,
'XXX': 5222,
'XXX.\\""': 2250,
'XXXMobileMovieClub:': 10603,
'XXXX.\\""': 7843,
'XXXXX': 8813,
'XXXXX.\\""': 7926,
'XXXXXX': 4029,
'X\\""': 6143,
'Xavier': 7928,
'Xmas': 4757,
'Xmas...': 10562,
'Xmas?': 8633,
'XoXo': 4270,
'Xuhui': 1434,
'XxX': 13761,
'Xy': 13176,
'Y': 6992,
'Y87.': 6382,
'Y?': 6742,
'Y?WHERE': 6263,
'YA': 8233,
'YEAH': 13412,
'YEAH,AND': 9069,
'YEAR': 3153,
'YES': 10713,
'YES!': 11924,
'YES-434': 12130,
'YES-440': 12260,
'YES.': 10569,
'YEST': 1829,
'YESTERDAY': 1432,
'YET?': 11152,
'YM': 7048,
'YMCA': 3957,
'YO': 12930,
'YOU': 10324,
'YOU!': 11322,
"YOU'RE": 7597,
"YOU'VE": 5973,
'YOU,': 12718,
'YOU,CLEAN': 12333,
'YOU.': 12141,
'YOUR': 4558,
'YOURJOB?': 936,
'YRS': 3647,
'Ya': 4544,
'Ya!': 487,
'Ya,': 6303,
'Ya:)going': 8667,
'Yahoo!': 12674,
'Yalru': 5959,
'Yar': 11974,
'Yavnt': 337,
'Yaxx': 4937,
'Yaxxx': 6090,
'Yay!': 2233,
'Yeah': 8102,
'Yeah,': 9098,
'Yeah.': 3902,
'Year': 6773,
'Year!': 13310,
'Year,': 10418,
'Year.': 1804,
'Years': 12460,
'Yeesh': 3934,
'Yellow': 6350,
'Yelow;i': 7050,
'Yen': 2451,
'Yep': 8453,
'Yep,': 2830,
'Yep.': 9693,
'Yes': 4346,
'Yes!': 11644,
'Yes,': 4206,
'Yes.': 12060,
'Yes..': 9253,
'Yes..but': 1415,
'Yes..gauti': 1844,
'Yes..he': 13769,
'Yes.he': 13030,
"Yes.i'm": 3018,
'Yes.mum': 10208,
'Yes:)from': 11536,
'Yes:)here': 928,
'Yes:)sura': 8165,
'Yest': 2955,
'Yesterday': 5511,
'Yet,': 8404,
'Yetunde': 11368,
'Yetunde,': 11970,
'Yo': 9570,
'Yo!': 82,
'Yo,': 810,
'Yogasana': 13438,
'You': 10490,
"You'd": 6451,
"You'll": 12659,
"You're": 8699,
"You've": 127,
'You?': 6802,
'Youi': 5417,
'Your': 605,
'Yours': 4125,
'Yourself': 3071,
'Yoyyooo': 5622,
'Yummmm': 10702,
'Yummy': 9710,
'Yun': 5704,
'Yunny': 11225,
'Yunny...': 4002,
'Yup': 11345,
'Yup,': 1078,
'Yup.': 6552,
'Yup...': 1478,
'Yupz...': 9468,
'Z': 2166,
'ZOE': 7123,
'ZOUK': 10651,
"Zaher's?": 11382,
'Zindgi': 10076,
'Zogtorius.': 7998,
'[colour=red]text[/colour]TXTstar': 1866,
'[sic]': 11037,
'[\x89Û_]': 7303,
'\\': 11647,
'\\"': 9187,
'\\"\rham"': 7927,
'\\".': 10074,
'\\"1.U': 10670,
'\\"Best': 6627,
'\\"Enjoy\\"': 3526,
'\\"GUD': 1232,
'\\"How': 10482,
'\\"Margaret': 7882,
'\\"She': 12380,
'\\"This': 7550,
'\\"VALUED': 241,
'\\"You\\"': 7685,
'\\"life': 12473,
'\\"morning\\"': 6294,
'\\"our': 6464,
'\\"welcomes\\"': 6380,
'\\"with': 3436,
'\\3000': 5537,
'\\A': 9544,
'\\ADP\\""': 10198,
'\\AH': 4439,
'\\ALRITE': 4776,
'\\Are': 1570,
'\\Aww': 5530,
'\\BOO': 3617,
'\\Be': 694,
'\\Because': 4168,
'\\Boost': 10601,
'\\CAN': 6253,
'\\CHA': 4120,
'\\CHEERS': 3962,
'\\Crazy\\"': 5215,
'\\Divorce': 5560,
'\\EY!': 5773,
'\\GOODMORNING': 8833,
'\\GRAN': 11400,
'\\Getting': 10053,
'\\Gimme': 3782,
'\\HELLO': 13687,
'\\HELLO\\"': 11403,
'\\HEY': 1344,
'\\Happy': 10599,
'\\Hello-/@drivby-:0quit': 6960,
'\\Hey': 8876,
'\\Hey!': 7561,
'\\Hi': 1109,
'\\Hurt': 8978,
'\\I': 1565,
'\\Im': 6359,
'\\Its': 5919,
'\\KUDI\\"yarasu': 9798,
'\\Life': 8275,
'\\ME': 9037,
'\\MIX\\"': 3251,
'\\Miss': 8320,
'\\NONE!NOWHERE': 11836,
'\\Nver': 10954,
'\\Oh': 11662,
'\\Our': 3938,
'\\POLYS\\"': 11990,
'\\POWER': 9661,
'\\Pete': 1544,
'\\Petey': 5921,
'\\SHIT': 11270,
'\\STOP': 1113,
'\\SYMPTOMS\\"': 5408,
'\\Si.como': 13450,
'\\Sometimes': 13847,
'\\Speak': 6652,
'\\Sweet\\"': 12352,
'\\The': 3139,
'\\Ur': 4074,
'\\VALENTINES': 11985,
'\\Walk': 4169,
'\\Wen': 12331,
'\\What': 11551,
'\\X\\"': 13478,
'\\YEH': 11569,
'\\alright': 7876,
"\\don't": 7873,
'\\find': 3141,
'\\get': 5246,
'\\go': 1147,
'\\how': 1332,
'\\julianaland\\"': 4969,
'\\paths': 5059,
'\\smokes': 2205,
'\\song': 9834,
'\\suppliers\\"': 10289,
'\\the': 4509,
'\\usf': 9059,
'\\what': 1,
'\\wow': 13589,
'\\wylie': 8580,
'^': 4630,
'____': 12480,
'a': 12669,
'a-?': 8296,
'a30.': 3336,
'aathi..love': 2977,
'aathi..where': 12420,
'abi': 7018,
'abi!': 8697,
'abi.': 6045,
'ability': 2050,
'abj': 2767,
'able': 5151,
'abnormally.': 13018,
'about': 2983,
'about!': 319,
'about,': 11262,
'about.': 10504,
'about...': 5481,
'about?': 3151,
'above': 7202,
'abroad': 1120,
'absence....': 4828,
'abstract?': 7953,
'abt': 10115,
'abt...': 8621,
'aburo.': 9483,
'abuse???': 12526,
'abusers': 9486,
'ac': 6135,
'ac/W/icmb3cktz8r7!-4': 8221,
'ac/smsrewards': 5628,
'academic': 13515,
'acc': 11544,
'accenture': 3865,
'accept': 12760,
'access': 1923,
'access!': 11097,
'accessible.': 13232,
'accidant': 12093,
'accident': 5016,
'accidentally': 3161,
'accommodation': 8400,
'accomodate.': 520,
'accomodations.': 8066,
'accordin': 12037,
'accordingly.': 12298,
'accordingly.or': 258,
'account': 13094,
"account's": 4006,
'account,': 10199,
'account..': 5010,
'account?': 12354,
'accounting': 968,
'accumulation.': 2774,
'ache': 11843,
'achieve': 12440,
'acid!': 9697,
'acknowledgement': 2115,
'across': 4400,
'act': 8437,
'actin': 6867,
'acting': 3274,
'action': 9076,
'action.': 6540,
'activate': 1167,
'activate,': 12500,
'activities': 5131,
'actor': 9339,
'actor.': 4189,
'actual': 9448,
'actually': 2861,
'ad': 11467,
'ad.': 10158,
'add': 6903,
'added': 3401,
'addicted': 11235,
'addie': 13464,
'address': 7587,
'address.': 5353,
"address.u'll": 9043,
'address?': 1992,
'adewale': 11933,
'adi': 6764,
'adjustable,': 11351,
'admin': 13498,
'administrator': 8245,
'administrator..': 6031,
'admirer': 3083,
'admirer.': 8484,
'admit': 1966,
'admit,i': 10781,
'adore': 7667,
'adoring': 6512,
'ads': 12493,
'adult': 10144,
'adults': 5187,
'advance"': 8858,
'advance.': 5653,
'advance..': 12963,
'advance.."': 7280,
'adventure?': 3879,
'advice': 9754,
'advice.': 5749,
'advise': 11737,
'advising': 7654,
'aeroplane.': 2898,
'affair?': 7588,
'affairs': 9420,
'affection,': 12988,
'affectionate': 11153,
'affidavit': 7167,
'afford': 7544,
'afghanistan.': 8050,
'afraid': 4807,
'africa?': 4167,
'african': 11962,
'aft': 8325,
'aft.': 4411,
'aft?': 6274,
'after': 3105,
'after.': 2978,
'afternon,': 7592,
'afternoon': 13590,
'afternoon,': 13084,
'afternoon.': 12003,
'aftr': 7298,
'again': 12185,
'again!': 13062,
'again*loving': 5195,
'again,': 8622,
'again.': 4876,
'again..': 10626,
'again...': 7355,
'again:)': 7908,
'again?': 10323,
'against': 11949,
'agalla': 3656,
'age': 4303,
'age.': 6293,
'age16': 12014,
'age16.': 5906,
'age16.150ppermessSubscription': 8526,
'age23,': 13699,
'agency': 5640,
'agent': 11115,
'agents': 10133,
'ages': 10638,
'ages,': 9261,
'ages.': 11998,
'agidhane..': 11200,
'aging': 3909,
'ago': 9305,
'ago"': 7076,
'ago,': 3366,
'ago.': 8581,
'ago...': 7089,
'agree': 7852,
'agree.': 3955,
'ah': 7376,
'ah,': 12075,
'ah...': 13480,
'ah...gee..': 8038,
'ah.now': 13649,
'ah.the': 1294,
'ah?': 1770,
'ah?sen': 3300,
'ah?what': 1174,
'aha.': 10778,
'aha:-.': 2925,
'ahead': 13289,
'ahead:': 8387,
'ahold': 13159,
'aid': 7830,
'aids': 159,
'aight': 10535,
'aight.': 13453,
"ain't": 10984,
'aint': 7535,
'air': 3357,
'air1': 5449,
'air?': 6723,
'airport': 9563,
'ak': 2304,
'aka': 4338,
'al': 5225,
'album-quite': 3135,
'album.': 5913,
'alcohol,': 1128,
'alcohol.': 7742,
'alcohol?': 13588,
'alert.': 814,
'alerts': 151,
'alex': 8088,
"alex's": 5573,
'alfie': 6962,
'algebra': 4482,
'algorithms': 8473,
'ali': 12665,
'alian': 8180,
'alibi.': 8489,
'alive.better': 7552,
'alive?': 192,
'all': 6233,
"all's": 4554,
'all,': 13395,
'all.': 13713,
'all?': 12832,
'allalo': 9017,
'allday!': 3045,
'alle': 937,
'allow': 10004,
'allowed': 1412,
'allows': 8931,
'almost': 4629,
'alone': 5021,
'alone.': 966,
'alone?': 6654,
'alone\\""': 212,
'along': 8884,
'along.': 10835,
'alot': 3017,
'already': 3746,
'already,': 2653,
'already.': 9645,
'already...': 8449,
'already..sabarish': 8691,
'already?': 5633,
'alright': 12150,
'alright,': 8329,
'alright.': 11423,
'alright.Okay.': 13111,
'alrite.': 4056,
'alrite.have': 1352,
'also': 11020,
'also,': 467,
'also.': 4649,
'also..': 6943,
'also...': 173,
'also.or': 13676,
'alter': 7903,
'alternative.hope': 7710,
'although': 593,
'alwa....!!:)': 6187,
'always': 4235,
'always,': 10588,
'always?': 9422,
'alwys': 1502,
'am': 8864,
'am,': 2428,
'am.': 9341,
'am.x': 3240,
'amazing': 11971,
'ambitious.': 11852,
'american': 10807,
'amigos': 2014,
'amk.': 8792,
'ammae....life': 12091,
'ammo': 5023,
'among': 7126,
'amongst': 11578,
'amore': 7951,
'amount': 12451,
'amplikater': 4262,
'ams': 6288,
'amt': 7256,
'amt.': 3554,
'an': 11183,
'ana': 5902,
'analysis': 5892,
'anand': 12355,
'and': 12523,
'and,': 13174,
'and/or': 13727,
'anderson': 3257,
'andrews-boy': 9812,
'andros': 12518,
'angels': 8554,
'angry': 2450,
'animal!': 5342,
'anjie!': 175,
"anjola's": 9049,
'anna': 770,
'annie!': 4008,
'anniversary': 12596,
'annoncement.': 6069,
'announced': 7765,
'announcement': 13355,
'announcement.': 2183,
'annoyin!': 11683,
'annoying': 8372,
'anonymous': 13457,
'anot...': 1517,
'another': 7141,
'another.': 11744,
'ans': 13097,
'ans.': 2671,
'ansr': 10739,
'answer': 269,
'answer.': 996,
'answer:': 8466,
'answer\\"': 7595,
'answered': 1050,
'answered:': 12409,
'answerin': 8237,
'answering': 13843,
'answers': 1307,
'answers.': 5669,
'answr.': 7272,
'anthony.': 4791,
'anti': 7387,
'any': 1530,
'any,': 11818,
'anybody': 7581,
"anybody's": 7872,
'anybody.': 4308,
'anyhow': 5665,
'anymore': 4477,
'anymore.': 6245,
'anymore...': 12465,
'anymore?': 6679,
'anyone': 11337,
'anyone.': 7074,
'anyone?': 13864,
'anyplaces': 12054,
'anythin': 13408,
'anything': 3410,
'anything!': 10097,
"anything's": 2271,
'anything,': 3950,
'anything.': 12892,
'anythingtomorrow': 8254,
'anytime': 5695,
'anyway': 544,
'anyway,': 8017,
'anyway.': 12708,
'anyways,': 13788,
'anywhere': 6118,
'anywhere.': 1847,
'apart': 792,
'apartment': 12160,
'apartment.': 9122,
'apes,': 12156,
'apeshit': 4328,
'aphexåÕs': 5338,
'apologetic.': 9784,
'apologise': 8529,
'apologise.': 848,
'apologize,': 12196,
'apology': 9340,
'app': 1383,
'app?': 5673,
'apparently': 12695,
'appeal': 11814,
'appear': 1854,
'appendix': 3967,
'applebees': 2630,
'application': 8119,
'apply': 7683,
'apply.': 13387,
'apply.2': 4139,
'apply:': 13212,
'applying,': 3114,
'appointment': 8138,
'appointment.': 13440,
'appointments': 12408,
'appreciate': 7304,
'appreciate.': 7506,
'appreciated': 7309,
'appreciated)': 3886,
'approaches': 7079,
'approaching.': 2089,
'appropriate': 8176,
'approve': 12328,
'approx': 13729,
'apps': 7052,
'appt': 3448,
'appt.': 13861,
'april': 11004,
'april.': 12813,
'aproach': 5480,
'apt': 4650,
'aptitude': 11459,
'ar': 12753,
'ar.': 1638,
'ar...': 12624,
'ar?': 8797,
'arab': 7898,
'archive.': 2268,
'ard': 3218,
'ard...': 65,
'are': 9010,
'are.': 922,
'area': 6955,
'area!': 13463,
'area.': 10974,
'area?': 2025,
"aren't": 5864,
'arent': 12974,
'arestaurant': 4186,
'aretaking': 4586,
'argentina.': 3655,
'argh': 6073,
'argue': 7405,
'arguing': 9565,
'argument': 13437,
'arguments': 11024,
'arise,': 735,
'arises': 2336,
'arithmetic': 2104,
'arm': 1898,
'armand': 3370,
"armand's": 11784,
'armenia': 11255,
'arms': 9454,
'arms...': 12798,
'arng': 7977,
'arnt': 2524,
'around': 6597,
'around.': 8011,
'around?': 8973,
'aroundn': 10619,
'arrange': 5409,
'arrange.': 5833,
'arranging': 971,
'arrested': 7839,
'arrival': 4962,
'arrive': 4524,
'arrived,': 6823,
'arsenal': 11131,
'art': 7776,
'artists.': 8953,
'arts': 5607,
'arty': 9081,
'arun': 6894,
'as': 3034,
'as?': 8270,
'asa': 1276,
'asap.': 1408,
'asap?': 1509,
"ashley's": 9212,
'ashwini': 3281,
'asia.': 2637,
'ask': 5352,
'ask.': 12155,
'ask...': 13340,
'ask:-)': 13637,
'askd': 13106,
'asked': 6909,
'asked.': 5482,
'asked:': 4702,
'askin': 2341,
'asking': 974,
'asks': 4579,
'asleep': 10796,
'asleep?': 642,
'aspects': 1110,
'ass': 2619,
'ass!': 11389,
'ass!!': 2741,
'ass*': 4115,
'ass?': 14,
'asshole..................': 11826,
'associate.': 8167,
'assume': 11639,
'assumed': 6877,
'asthma': 11088,
'astne': 693,
'astoundingly': 5219,
'asus': 4345,
'at': 248,
'at!': 6402,
'at,': 10883,
'at/your': 9689,
'at?': 10714,
'atHome': 2140,
'ate': 7393,
'ate?': 6180,
'atlanta': 2660,
'atm': 2103,
'atrocious..': 1809,
'attach': 13361,
'attached': 8970,
'attack,': 10592,
'attempt': 10364,
'atten': 8057,
'attend': 8997,
'attended': 2452,
'attending': 2079,
'attitude': 1041,
'attraction.i': 3766,
'attracts': 1019,
'attributed': 5388,
'auction': 1893,
'auction.': 9658,
'auction..': 9361,
'auction:)punj': 2124,
'audiitions': 13527,
'audition': 2317,
"audrey's": 10312,
'aunt': 1959,
'aunties': 9990,
'aunts': 5185,
'aunty': 6659,
'aunty!': 10512,
"aunty's": 2421,
'aust': 9633,
'authorise': 3512,
'auto': 11586,
'autocorrect': 7886,
'av': 6744,
'availa': 9317,
'available': 5351,
'available!': 2337,
'available..i': 8585,
'available.they': 3481,
'available?': 11429,
'avalarr': 4502,
'avatar': 3921,
'avble.': 9365,
'ave': 11633,
'avent': 3999,
'avenue': 12349,
'avin': 3359,
'avo': 4215,
'avoid': 163,
'avoiding': 9196,
'avoids': 3814,
'await': 6269,
'awaiting': 7939,
'awake': 13350,
'awake?': 6912,
'award': 6234,
'award!': 9748,
'award.': 8458,
'awarded': 10171,
'away': 10631,
'away,': 10099,
'away.': 3548,
'awesome': 2947,
'awesome,': 6617,
'ax': 679,
'ayn': 6007,
'b': 2728,
"b'day.": 12704,
'b,': 11543,
'b4': 10699,
'b4190604,': 9922,
'b4280703.': 3678,
'ba': 4131,
'babe': 11226,
'babe!': 12815,
'babe,': 11475,
'babe.': 1634,
'babe?': 4581,
'babes': 11352,
'babies!': 9663,
'baby': 3392,
'baby!': 2514,
'baby,': 10758,
'baby.': 12513,
'baby?': 12874,
'babyjontet!': 3414,
'babysit': 8481,
'babysitting': 4423,
'bac': 3913,
'back': 4355,
'back!': 1198,
'back,': 6886,
'back.': 5908,
'back..': 7156,
'back...': 9245,
'back?': 4412,
'back?.a': 8483,
'backdoor': 8796,
'bad': 7473,
'bad!': 270,
'bad,': 13108,
'bad-': 6360,
'bad.': 9683,
'badly': 8427,
'bag': 7591,
'bag..i': 6636,
'bags,': 7566,
'bags.': 6318,
'baig': 3474,
'bak': 5085,
'bakra': 4672,
'bakrid!': 834,
'balance': 6999,
'balloon!': 5894,
'bambling': 13370,
'band': 1642,
'bandages': 2988,
'bang': 280,
'bangb.': 9917,
'bank': 1474,
'banks': 5804,
'banned': 3929,
'banter': 9665,
'bao': 10296,
'bar': 5509,
'barcelona.': 4018,
'bare': 2243,
'barely': 6546,
'bari': 8355,
'barolla..': 5073,
'barred': 13752,
'bars': 10318,
'base': 1284,
'bash': 1586,
'basic': 13092,
'basically': 9539,
"basket's": 9273,
'basketball.': 8657,
'basq!ihave': 717,
'bat': 6681,
'bat:)': 3933,
'batch': 2962,
'batch!': 3810,
'batchlor': 3595,
'bath': 13620,
'bath:-)': 8631,
'bathe': 3311,
'bathe.': 10855,
'bathe...': 2769,
'bathing': 11311,
'bathing...': 5204,
'bathroom,': 6417,
'batsman:-)': 9002,
'batt': 12759,
'battery': 3268,
'bay.': 3224,
'bb': 1114,
"bb's": 3204,
'bb.': 6640,
'bc': 1178,
'bcaz': 4170,
'bck.': 7242,
'bcoz': 9826,
'bcum': 12616,
'bcums': 9856,
'bday': 3848,
'bday,': 1053,
'be': 4078,
'be.': 6633,
'be...': 1278,
'be?': 6775,
'beach': 2212,
'beads': 5173,
'bear': 6810,
'bears,': 10475,
'beauties': 2113,
'beautiful': 2387,
'beautiful,': 1438,
'beautiful,May': 11215,
'bec': 13609,
'because': 4037,
'becausethey': 10690,
'become': 1977,
'becoz': 2044,
'becz': 9124,
'bed': 2912,
'bed!': 9437,
'bed,': 995,
'bed.': 11822,
'bed...': 12653,
'bed?': 2566,
'bedrm-$900...': 12982,
'bedroom': 6161,
'bedroom...': 10388,
'beehoon...': 9985,
'been': 7754,
'been?': 59,
'beer': 2471,
'beers': 11113,
'befor': 9820,
'before': 5324,
'before.': 1783,
'before.went': 11845,
'before?': 1567,
'beforehand.': 9636,
'beg': 9378,
'beggar.': 3829,
'begging': 1303,
'begin': 4718,
'begin.': 2644,
'begins': 7640,
'begun': 4050,
'behalf': 2136,
'behave': 5727,
'behind': 3935,
'bein': 7523,
'being': 5987,
'believe': 5869,
'believe.': 13327,
'belive': 7841,
'bell': 13456,
'bellearlier': 9490,
'belligerent': 5164,
'belly': 8791,
'belongs': 9208,
'belovd': 3552,
'beloved': 951,
'ben': 2394,
'bend': 13165,
'beneath': 217,
'beneficiary': 716,
'benefits': 7509,
'beside': 6809,
'best': 9605,
'best.': 8647,
'best:)congrats...': 11128,
'best?': 8510,
'best?rply': 9626,
'bet': 252,
'beta...': 2751,
'betta': 11728,
'better': 4857,
'better.': 2812,
'better?': 2579,
'between': 5344,
'beverage': 1763,
'bevies.waz': 7969,
'beyond': 2848,
'bf': 5380,
'bf...': 3794,
'bfore': 9364,
'bhaskar,': 8438,
'bhayandar': 11804,
'bian': 4117,
'biatch!': 7878,
'bid': 2521,
'bid,': 2282,
'bids': 6476,
'big': 9745,
'big!': 4479,
'big,': 2571,
'big..|': 7005,
'big?': 1129,
'bigger': 4485,
'bigger?': 4697,
'biggest': 12350,
'bike?': 9639,
'bill': 11857,
'billed': 8071,
'billion': 13512,
'bills': 6795,
'billy': 7586,
'bilo': 4827,
'bimbo': 12267,
'bin': 5022,
'bird!': 5101,
'birds': 3619,
'birds...': 8217,
'birla': 5474,
'birth': 2931,
'birthdate': 9907,
'birthday': 4880,
'birthday.': 12745,
'birthday...': 2986,
'birthday?': 5348,
'bishan': 1508,
'bishan?': 7197,
'bit': 2603,
'bit.': 13838,
'bit...': 9215,
'bitch': 1598,
'bitch.': 2391,
'bitch..........': 1115,
'bitch?': 7137,
'bite': 8408,
'bites': 4800,
'bk': 10345,
'black': 4145,
'black..and': 1672,
'blackberry': 10301,
'blah': 2173,
'blah...': 260,
"blake's": 13536,
'blame': 6299,
'blank': 763,
'blank?': 4164,
'blanked': 13517,
'blanket': 182,
'blankets': 2911,
'blastin..': 12799,
'bleak,': 13630,
'bleh.': 7526,
'bless.get': 4097,
'blessed': 5015,
'blessing': 11148,
'blessings!': 9753,
'blimey,': 10444,
'block': 13032,
'blocked': 1630,
'blog.': 8719,
'blogging': 4831,
'bloke': 9432,
"bloke's": 13345,
'blonde,': 2344,
'bloo': 9,
'blood': 9066,
'blood,blood': 8067,
'bloody': 7847,
'bloomberg.com': 3840,
'blow': 6454,
'blown': 8285,
'blue': 10070,
'bluff': 3582,
'blur': 7668,
'blur...': 4443,
'board': 7318,
'boat': 5880,
'boat.': 13737,
'body': 1811,
'body?': 7240,
'boggy': 3304,
'bold': 4214,
'bold.': 158,
'bold2': 6737,
'bollox': 4010,
'bonus': 1127,
'boo': 6053,
'book': 12024,
'book...': 6094,
'book?': 536,
'booked': 4893,
'bookedthe': 6788,
'booking': 9967,
'books': 3457,
'books,': 3598,
'bookshelf': 5172,
'boost': 5617,
'booty': 9310,
'bootydelious': 3064,
'borderline': 5968,
'bored': 7200,
'bored.': 5678,
'bored...': 423,
'borin': 11590,
'boring': 11679,
'boring.': 7394,
'boring?!': 904,
'born!': 13509,
'borrow': 9838,
'borrow?': 3283,
'boss': 9510,
'boss?': 4642,
'boston': 2851,
'boston,': 7267,
'boston.': 6083,
'bot': 3924,
'both': 12021,
'both!': 4233,
'both.': 7291,
'bother': 1832,
'bother,': 3419,
'bother?': 13120,
'bothering': 9929,
'bottom': 10322,
'bought': 6969,
'boughtåÓbraindanceåÓa': 57,
'bout': 3075,
'bowa?': 10196,
'bowl': 2910,
'bowls': 860,
'box': 11185,
'box245c2150pm': 781,
'box334sk38ch': 8825,
'box403': 11897,
'boy': 2430,
'boy?': 4898,
'boye': 13840,
'boye,': 6653,
'boye.': 6822,
'boyfriend': 5519,
'boys': 11327,
'boytoy': 2196,
'boytoy!': 8008,
'boytoy.': 3177,
'boytoy?': 10091,
'bpo..': 1202,
'brah,': 5011,
'brain': 10095,
'brains': 4589,
'brainy': 10514,
'brand': 5785,
'brand?': 6662,
'bras': 6093,
'brats': 2429,
'braved': 8478,
'bread': 5155,
'breadstick.': 5431,
'break': 4870,
'break,': 9764,
'break.': 6490,
'break:).': 8916,
'breakfast': 6794,
'breaking': 5443,
'breath': 11323,
'breathe': 325,
'breather.': 4665,
'breeze...': 8814,
'breezy': 9780,
'bribe': 4967,
'bridge': 134,
'bridgwater': 9096,
'brief': 2030,
'bright': 13466,
'brighten': 3958,
'brilliant': 2517,
'brilliant...1thing.i': 1346,
'brilliantly.': 5484,
'brin': 12536,
'bring': 1021,
'bringing': 9849,
'brings': 3406,
'brisk': 10584,
'brison': 10597,
'bro': 8098,
'bro.': 8991,
'broad': 9551,
'broke': 12064,
'broken': 9992,
'bros': 3923,
'brothas': 4877,
'brothas?': 13842,
'brother': 7021,
'brother.': 13612,
'brothers': 1013,
'brought': 6703,
'brownie': 13641,
'brownies': 2178,
'browse': 10072,
'browser': 3818,
'browsin': 1146,
'bruce': 5497,
'brum!': 11179,
'bruv!': 1397,
'bruv,': 10037,
'bsn': 9247,
'bsnl': 7789,
'bt': 12547,
'btw': 7673,
'bucks': 10495,
'bucks,': 13157,
'bucks.': 11159,
'bud': 10884,
'buddys.': 349,
'budget': 1457,
'buen': 720,
'buff': 2072,
'buffet': 1506,
'buffet...': 11091,
'bugis': 9158,
'bugis.': 12720,
'build': 1171,
'building': 2029,
'bullshit': 7290,
'bunch': 10015,
'bundle': 7463,
'bunkers': 4762,
'buns': 4910,
'buns!': 4840,
'burden': 3371,
'burger': 13348,
'burgundy': 4965,
'burial.': 2297,
'burn,': 13076,
'burning': 7305,
'burnt': 10251,
'burrito,': 10914,
'bus': 9336,
'bus!': 7674,
'bus.': 9914,
'bus..': 654,
'bus8,22,65,61,66,382.': 3126,
'bus?': 3364,
'buses': 3743,
'busetop': 4861,
'business': 1580,
'busty': 7386,
'busy': 5064,
'busy.': 10853,
'busy.i': 145,
'but': 8888,
'but,': 444,
'butt': 9802,
'butt.': 1480,
'butting': 24,
'buttons': 11437,
'buy': 8800,
'buy.': 2716,
'buy...': 4264,
'buy?': 213,
'buyer': 11452,
'buyers.': 3767,
'buying': 13854,
'buying...': 2011,
'buying?': 787,
'buzy': 7659,
'buzz': 5557,
'bx420-ip4-5we.': 10879,
'bx420.': 13027,
'by': 5567,
'by,': 8012,
'by?': 7107,
'bye': 11499,
'bye.': 5138,
'byåÓleafcutter': 7361,
'b\x89Û÷ham.': 13801,
'båÕday': 9550,
'båÕday,': 10825,
'c': 865,
"c's": 13800,
'c,': 8475,
'c...': 10866,
'cThen': 1119,
'cabin': 9920,
'cafe': 2031,
'cage': 6492,
'cake,': 8455,
'cake...n': 11453,
'cakes.': 9720,
'cal': 4862,
'cal.': 1728,
'cal;l': 12664,
'calculated': 6244,
'calculation.': 9350,
"cali's": 3223,
'calicut': 9064,
'calicut?': 12778,
'california.': 7036,
'call': 2558,
'call,': 6371,
'call,coz': 6528,
'call.': 8578,
'call.drove': 10655,
'call.urgent.': 4539,
'call09050000327': 10094,
'call09050000327.': 8288,
'call2optout/!YHL': 1865,
'call2optout/4QF2': 3145,
'call2optout/F4Q=': 1882,
'call2optout/HF8': 7214,
'call2optout/J': 13754,
'call2optout/LF56': 9959,
'call2optout/N9DX': 5901,
'call2optout=J5Q': 135,
'call:': 6312,
'callback': 9120,
'callcost': 6202,
'calld': 5112,
'called': 8664,
'caller': 1520,
'callertune': 10374,
'callin': 7945,
'calling': 779,
'calling.': 12960,
'calling.Forgot': 3266,
'callon': 10143,
'calls': 3239,
'calls!': 11360,
'calls.': 10712,
'calls..messages..missed': 8033,
'cam': 117,
'cam,': 3856,
'camcorder': 7429,
'camcorder.': 11035,
'came': 401,
'camera': 10745,
'camera.': 3192,
'camera/video': 7194,
'camp': 12230,
'campus': 262,
'campus.': 5867,
'campus?': 356,
'can': 7015,
"can't": 11259,
"can't,": 7288,
'can,': 5985,
'can.': 7695,
'can...': 445,
'can.dont': 12504,
'can?': 9894,
'canada': 2488,
'canal.': 13150,
'cancel': 8142,
'cancel,': 870,
'canceled,': 1280,
'cancelled': 5280,
'cancer.': 7467,
'canlove': 876,
"cann't": 8930,
'cannot': 10025,
'cannot?': 13624,
'cant': 12188,
'cant.': 4716,
'cantdo': 7003,
'can\x89Û÷t': 9508,
'canåÕt': 9383,
'cappuccino': 5663,
'cappuccino*': 12833,
'caps.': 1978,
'captaining': 10913,
'car': 2650,
'car...': 439,
'car..ente': 6536,
'car.so': 919,
'car?': 9309,
'card': 12793,
'card!': 4839,
'card.': 1248,
'cardiff': 13700,
'cardin': 10152,
'cards': 1852,
'care': 2551,
'care!': 2227,
'care.': 13524,
'care.umma': 4853,
'care:-)': 1148,
'careabout': 4549,
'cared': 2869,
'cared,': 5034,
'career': 3269,
'careful': 11230,
'carefully:': 9154,
'careless.': 133,
'cares': 8072,
'caring': 8857,
'caring,': 4084,
'carlin': 11869,
'carlos': 4435,
'carlos,': 2549,
'caroline': 5303,
'carpark.': 12467,
'carry': 1374,
'carryin': 1603,
'cars': 10104,
'cars.': 10539,
'cartons': 7245,
'cartoon': 8632,
'cartoon,': 13662,
'case': 2764,
'case.': 10579,
'case...': 11078,
'cash': 6952,
'cash,': 1304,
'cash-balance': 4033,
'cash-in': 8116,
'cash.': 7016,
'cashed': 3995,
'cashto': 8941,
'casing': 5422,
'cast': 925,
'casting': 8614,
'castor?': 8261,
'casualty': 8418,
'cat': 6668,
'catch': 3546,
'catches': 10811,
'catching': 70,
'categories': 6317,
'caught': 10395,
'cause': 9763,
'causes': 6487,
'causing': 11407,
'cave\\".': 4794,
'caveboy': 5263,
'cbe': 2395,
'cc100p/min': 764,
'ccna': 11977,
'cd': 5270,
'cedar': 5671,
'celeb!': 8794,
'celeb!4': 4188,
'celebrate': 8957,
'celebrate.': 5297,
'cell': 498,
'cell?': 9374,
'census.': 5845,
'center': 12724,
'centre': 8890,
'century': 4995,
'cereals.': 11034,
'certainly': 13404,
'certificate': 2501,
'cha': 7055,
'chad': 13839,
'chain': 7366,
'chain.': 8685,
'challenge': 5597,
'challenge?': 2224,
'challenging': 10642,
'champ': 8442,
'champlaxigating,': 12314,
'champneys.': 9182,
'chance': 13485,
'chance,': 11422,
'chance.': 7519,
'chances': 12548,
'change': 10820,
'change,': 656,
'changed': 7432,
'changed!': 5436,
'changed.': 5875,
'changes': 6290,
'chaps': 1989,
'character': 4887,
'character?': 2405,
'charge': 2039,
'charge)': 10905,
'charge.': 2338,
'charged': 6384,
'charges': 2766,
'charity': 8046,
'charity.': 8480,
'charles': 12876,
'charlie': 1944,
'charming': 5637,
'chart': 4447,
'charts': 5623,
'chase': 1988,
'chasing': 6498,
'chat': 8214,
'chat!': 6092,
'chat,': 921,
'chat..': 1800,
'chat...': 2163,
'chat.\\""': 5719,
'chat?Im': 9416,
'chatter!': 3196,
'chatting': 5487,
'chatting!': 11098,
'cheap': 12334,
'cheap:-)': 2926,
'cheaper': 10525,
'cheaper,': 12205,
'cheaper.': 7345,
'cheat': 10891,
'cheating.': 253,
'chechi': 9765,
'chechi.': 1291,
'check': 1369,
'check.': 3498,
'check?': 13731,
'checkboxes': 9923,
'checked': 11009,
'checking': 13137,
'checkup': 7635,
'cheek': 318,
'cheers.': 10747,
'cheery': 3842,
'cheese': 2601,
'cheese.': 7124,
'cheesy': 6017,
'cheetos': 11257,
'chennai': 3502,
'chennai:)': 923,
'chennai:)because': 3536,
'chennai:)i': 11727,
'chennai:-)': 1822,
'cheque.': 1747,
'cherthala.in': 11966,
'chest': 3572,
'cheyyamo?and': 2052,
'chez': 8035,
'chg': 6827,
'chgs': 1007,
'chicken': 4231,
'chickened': 5165,
'chik.': 8898,
'chikku': 9617,
'chikku,': 7637,
'chikku..': 12919,
'chikku..:-)': 2871,
'chikku..ali': 6125,
'chikku..going': 10848,
'chikku..il': 4763,
'chikku..k,': 2350,
'chikku..simple': 9555,
'chikku..wat': 8353,
'chikku:-)': 13402,
'chikku:-):-DB-)': 8022,
'chikku:-);-)B-)': 10386,
'child': 10290,
'childish': 11685,
'children': 9132,
'chile,': 12424,
'chill': 4130,
'chillaxin,': 1889,
'chillin': 2669,
'china': 1208,
'china.': 3495,
'chinchillas': 662,
'chinese': 8241,
'chinese.': 8517,
'chinky': 10612,
'chiong': 6196,
'chip': 4181,
'chocolate': 9166,
'choice': 9097,
'choice.': 4929,
'choices': 3367,
'choose': 2582,
'choose.': 7324,
'chords': 360,
'chores': 6482,
'chosen': 3969,
'chrgd@50p': 10335,
'christ': 1236,
'christmas': 4014,
'christmas.': 2686,
'christmassy': 13852,
'chuckin': 6329,
'church': 8162,
'church..': 8824,
'cine': 13135,
'cine...': 3068,
'cinema': 52,
'cinema,': 6626,
'cinema.': 3137,
'citizen': 1794,
'city.': 8521,
'citylink': 6349,
'claim': 8460,
'claim,': 9994,
'claim.': 9891,
'claimcode': 9209,
'claims': 4565,
'claire': 9313,
'clarification': 7451,
'clash...': 2689,
'class': 10779,
'class,': 3489,
'class.': 6695,
'class:-)': 9776,
'class?': 10256,
'classes': 8627,
'classes,': 13026,
'classmates?': 3471,
'claypot': 33,
'cld': 2731,
'clean': 6064,
'clean...': 13792,
'cleaning': 7032,
'clear': 5235,
'clear,': 10363,
'cleared': 509,
'clearing': 6332,
'clearly.': 4289,
'clever': 4455,
'click': 6169,
'cliff': 8934,
'cliffs': 3197,
'clip': 8025,
'clock': 12946,
'clock,': 945,
'clock.': 4795,
'clocks': 9136,
'close': 8424,
'close.': 11294,
'close?': 13855,
'closeby': 4182,
'closed': 13309,
'closed,including': 4891,
'closed.': 9589,
'closer': 1488,
'closer..': 10093,
'closes': 1331,
'closes?': 11246,
'closingdate04/09/02': 122,
'cloth': 4213,
'clothes': 8838,
'clothes,': 2473,
'cloud': 7234,
'club': 2673,
'club!': 12404,
'club,': 12795,
'club4mobiles.com': 1461,
'clue,': 9228,
'cm': 1986,
'cmon': 2773,
'cn': 5421,
'cnl': 267,
'coach': 1098,
'coast...': 6433,
'coat': 13011,
'coaxing': 7513,
'coccooning': 3697,
'cochin': 8099,
'cock': 2654,
'cock!': 6341,
'cocksuckers.': 10975,
'coco': 1867,
'code': 11654,
'code:': 9357,
'coffee': 6159,
'coherently,': 2826,
'coimbatore.': 7102,
'coin': 9611,
'coins': 1835,
'coins.': 6321,
'coins???': 10062,
'cold': 9601,
'cold!': 2858,
'cold,': 929,
'cold.': 5683,
'collages': 3569,
'collect': 1524,
'collected': 9327,
'collected.': 8909,
'collecting': 5955,
'collecting.': 10134,
'collection': 2398,
'collection.': 2506,
'colleg?': 5926,
'college': 12947,
'college.': 3734,
'college?': 12032,
'college?xx': 648,
'color': 11277,
'colour': 8493,
'colours': 2800,
'colours!': 12489,
'comb': 4734,
'combination!': 13577,
'combine': 11358,
'come': 9963,
'come.': 9012,
'come...': 7815,
'come..til': 13301,
'come:)i': 13152,
'come?': 7085,
'comedy..cant': 9128,
'comes': 9596,
'comes!': 11527,
'comes,': 12591,
'comes.': 10693,
'comes..': 11967,
'comes?': 8435,
'comfey;': 11099,
'comin': 11570,
'comin.': 11630,
'comin...': 2033,
'coming': 9670,
'coming.': 5473,
'coming.tmorow': 9512,
'coming?': 13642,
'comingdown': 4901,
'comment': 5591,
'commercial': 4173,
'commercial?': 7060,
'commit': 10776,
'common': 7042,
'community': 3755,
'community.': 9960,
'comp': 1649,
'comp.ofstuff': 12492,
'companies': 2526,
'company': 1014,
'company!': 10160,
'company.': 6858,
'company?': 6906,
'compare': 9721,
'competition': 11135,
'complain': 12360,
'complaining': 3209,
'complaint': 1361,
'complementary': 4803,
'complete': 5118,
'completed': 9984,
'completely': 1595,
'completes': 8194,
'completing..': 9288,
'complexities': 4392,
'complimentary': 6291,
'comprehensive': 357,
'compromised': 7469,
'compulsory': 782,
'computational': 199,
'computer': 13683,
'computer..': 4983,
'computer?': 2927,
'computerless.': 3605,
'computers?': 374,
'comuk.220cm2': 7333,
'conacted': 741,
'concentrate': 5036,
'concentrating': 303,
'concentration': 4293,
'concern': 9148,
'concert': 10918,
'concert?': 4899,
'conclusion': 1396,
'condition': 8351,
'conditions': 13765,
'conditions,': 12741,
'conditions?': 800,
'conducts': 2545,
'conected.': 6366,
'conference': 6501,
'confidence': 3006,
'confidence,': 4237,
'configure': 9595,
'confirm': 5133,
'confirm.': 6074,
'confirmd': 6947,
'confirmed': 11325,
'confirmed...': 2278,
'conform': 1526,
'conform.': 2361,
'confused': 10720,
'confused...': 11029,
'confuses': 12534,
'congrats': 1964,
'connect': 5267,
'connected': 4805,
'connection': 5415,
'connections': 12873,
'connections,': 6128,
'cons...': 7203,
'consensus': 11694,
'consent': 2540,
'conserve': 5620,
'consider': 6420,
'considering': 1658,
'consistently': 3299,
'console.': 7616,
'constant': 5433,
'constantly': 3093,
'contact': 9936,
'contacted': 6215,
'contacts': 10719,
'content': 7883,
'content,': 7028,
'content.': 13190,
'contents': 6088,
'continent?': 746,
'continue': 2411,
'contract': 5462,
'contract.': 6059,
'contribute': 1877,
'control': 5426,
'convenience.': 10248,
'conversations': 4366,
'converted': 7580,
'converter': 6746,
'convey': 2883,
'conveying': 4625,
'convince': 887,
'convincing.': 11160,
'convincing..just': 5213,
'cook': 12445,
'cooked.': 4323,
'cookies': 4129,
'cookies.': 7828,
'cooking': 10382,
'cooking.': 2439,
'cool': 9971,
'cool!': 4298,
'cool,': 1785,
'cool.': 4299,
'cooped': 4909,
'cooperative': 10819,
'copied': 879,
'copies.': 8373,
'coping': 3146,
'copy': 7606,
'corect': 3485,
'cornwall.': 5049,
'corporation': 1407,
'corrct': 10920,
'correct': 13322,
'correct.': 10782,
'correct.!': 6935,
'correction.': 2396,
'correction?or': 9160,
'corrupt': 7423,
'corvettes': 13366,
'cos': 5045,
'cos...': 12856,
'cosign': 11999,
'cost': 12311,
'cost.': 4016,
'cost?': 4541,
'costing': 835,
'costs': 4779,
'costs,': 13356,
'costumes.': 8906,
'costå£3.75max': 5214,
'cough': 1795,
'coughing': 10304,
'could': 9901,
"couldn't": 11888,
'couldnåÕt': 12190,
'count': 725,
'countin': 8533,
'country': 989,
'country.': 9890,
'coupla': 11206,
'couple': 1051,
'courage': 13744,
'course': 11399,
'course:)': 10981,
'course?': 13230,
'courtroom': 12192,
'cousin': 12359,
'cousin...': 4547,
'cover': 8683,
'coveragd': 7115,
'covers': 2046,
'coz': 8024,
'cozy.': 3159,
'cr': 11314,
'crab': 13001,
'crab..': 9005,
'crammed': 7275,
'cramps': 11122,
'crap': 13707,
'crash': 12940,
'crashed': 8450,
'crashing': 3849,
'crave': 6062,
'craving': 11070,
'craziest': 12101,
'crazy': 9741,
'crazy,': 2305,
'crazy..': 2009,
'cream': 9733,
'created': 12748,
'creative.': 317,
'creativity': 6364,
'credit': 7209,
'credit!': 11764,
'credit,': 10537,
'credit.': 2865,
'credit?': 5432,
'credit\\""': 10226,
'credited': 790,
'credits': 287,
'credits!': 6098,
'credits,': 5288,
'creep': 6926,
'creepy': 8311,
'cres,ubi': 1499,
'cricket': 12671,
'cricketer': 8027,
'crickiting': 297,
'cried': 639,
'crore': 284,
'crore\\"': 11329,
'cross': 2437,
'crowd': 2077,
'crucial': 5585,
'cruise': 10082,
'cruisin': 3332,
'crushes..!!!;-)': 3807,
'cry': 6902,
'cry.': 4782,
'cry...': 3328,
'cst': 7656,
'cstore': 13123,
'ctla': 4874,
'cuck': 3721,
'cud': 12406,
'cuddle': 5420,
'cuddle!': 13522,
'cuddled': 7736,
'cuddling': 12478,
'cuddling..': 11317,
'cudnt': 13215,
'cultures': 8893,
'cum': 1084,
'cum.': 10167,
'cum...': 203,
'cum?': 4385,
'cumin': 6786,
'cup': 6494,
'cupboard': 9841,
'cuppa': 9270,
'curfew.': 2905,
'curious': 866,
'current': 11240,
'currently': 674,
'curry': 1256,
'curtsey?': 4093,
'cust': 926,
'custcare': 2651,
'custom': 13872,
'customer': 1257,
'customer,': 9624,
'customer.': 7679,
'cut': 7958,
'cut.': 11332,
'cut?': 833,
'cute': 11702,
'cute:)': 2737,
'cutest': 470,
'cutie.': 10574,
'cutting': 10410,
'cuz': 4124,
'cya...': 4017,
'cyclists': 1293,
'cysts.': 2267,
'd': 1883,
'da': 9786,
'da,': 10422,
'da.': 636,
'da..': 6727,
'da..al': 12763,
'da..always': 8181,
'da..please:)': 3987,
'da..today': 13296,
'da.i': 5043,
'da:)': 4294,
'da:)good': 8439,
'da:)he': 3558,
'da:)how': 7417,
'da:)i': 34,
'da:)nalla': 13793,
'da:)urgent:)': 1345,
'da:)whats': 11447,
'da:-)': 10421,
'da:-)i': 12957,
'dabbles': 884,
'dad': 12092,
'dad.': 3015,
'daddy': 2884,
'daddy...': 7904,
'daily': 13500,
'daily.': 794,
'daily:)': 908,
'damn': 1924,
'dancce': 900,
'dance': 7869,
'dancin,': 3090,
'dancing': 292,
'dane': 8570,
'dang!': 187,
'danger.': 7306,
'dao': 8200,
'dare': 12471,
'dark': 4687,
'dark.': 11927,
'darker': 3514,
'darkness,': 3235,
'darlin': 1112,
'darlin..': 7320,
'darling': 8983,
'darlings': 3065,
'darlings!': 12216,
'darren': 10578,
'dartboard.': 11917,
'dat': 13124,
"dat's": 4066,
'dat.': 9472,
'dat..': 12457,
'dat?': 8516,
'data': 5975,
'date': 8077,
'date.': 6171,
'date?': 3417,
'dates': 13282,
'dating': 3132,
'dating?': 2245,
'dats': 2034,
'dawns': 5708,
'day': 1267,
'day!': 1836,
'day!2find': 4340,
'day,': 5140,
'day,has': 2813,
'day.': 1088,
'day.!': 5178,
'day..': 8818,
'day...': 10665,
'day..\\"': 3451,
'day:)except': 10787,
'day?': 10911,
'day\\"': 9582,
'days': 6650,
'days.': 7427,
'days...': 7724,
'days.he': 5960,
'days.so': 10197,
'days.will': 4669,
'days.̬n': 425,
'daytime': 13708,
'daywith': 11767,
'de': 10228,
'de.': 3307,
'de.am': 3690,
'de:-)': 1196,
'de:-).': 5816,
'dead': 9807,
'dead!Well': 13006,
'dead...': 10298,
'dead?': 5787,
'deal': 13177,
'deal...Farm': 1341,
'deal:-)': 5437,
'dealer': 12553,
"dealer's": 3238,
'dealing': 5688,
'deals': 12405,
'dear': 10735,
'dear"': 11572,
'dear,': 494,
'dear-loving': 11930,
'dear.': 1996,
'dear..': 9419,
'dear.."': 4001,
'dear...i': 6587,
'dear..i': 9568,
'dear.Rakhesh': 12599,
'dear.take': 10357,
'dear?': 13205,
'dear?y': 2535,
'dearer': 8405,
'dearly': 7339,
'dearly.': 9242,
'death': 45,
'debating': 227,
'dec': 2376,
'decide': 4487,
'decide...': 6351,
'decided': 6342,
'deciding': 5357,
'decimal': 9240,
'decision': 11555,
'decision,': 7285,
'decision?': 15,
'decisions': 10219,
'deck': 8148,
'decking?': 8342,
'decorating)': 6087,
'dedicate': 8157,
'dedicated': 7972,
'deduct': 2176,
'deep': 8860,
'deepak': 3337,
'def': 6874,
'defeat': 9532,
'definite': 8863,
'definitely': 13401,
'definitly': 10272,
'degree.': 9369,
'degrees.': 4067,
'dehydrated.': 9091,
'dehydration': 10815,
'del': 93,
'delTOMORROW': 2435,
'delay.': 12652,
'delayed': 6216,
'delete': 9726,
'deleted': 3652,
'delicious': 2620,
'deliver': 2845,
'delivered': 503,
'delivery': 7399,
'delivery.': 9768,
'deluxe': 8989,
'dem!!!': 624,
'dem.': 11280,
'demand': 94,
'den': 10212,
'den,': 1981,
'den...': 7809,
'dengra.': 2331,
'dent!': 6990,
'dentist.': 6807,
'denying': 9968,
'department': 11316,
'department,': 5903,
'dependents': 11609,
'depends': 6070,
'depends.': 13022,
'deposit': 4873,
'deposited': 2144,
'depressed': 1187,
'depressed.': 12431,
'depression': 11195,
'der..': 7818,
'derek': 8381,
"derek's": 12545,
'describe': 6134,
'description': 10536,
'desert': 12649,
'deserve.': 9712,
'desparate': 10694,
'desparately': 2987,
'desperate': 4229,
'despite': 7727,
'dessert.': 3871,
'destination': 4311,
'destiny': 10203,
'detail': 4974,
'detailed': 8629,
'details': 9013,
'details!': 13393,
'details.': 2604,
'details..i': 10558,
'details:)': 13484,
'determine': 1827,
'deus': 3661,
'develop': 8143,
'developed': 7840,
'developer': 6998,
'dey': 9290,
'dey,hope': 3666,
'dey.i': 1454,
'dha': 6838,
'dhina': 9077,
'dhorte': 12143,
'di': 9075,
'dial': 6385,
'dialogue': 1952,
'diamonds': 2187,
'diamonds.': 4179,
'dice,': 11299,
'dick': 963,
'dick!': 4019,
'dict': 12265,
'dictionary.': 4442,
'did': 11151,
"did'nt": 965,
"did't": 12029,
'did.': 3631,
'did?': 1213,
'diddy': 12121,
"didn't": 9703,
"didn't.": 10843,
'didnt': 3851,
'didnt.': 6429,
'didntgive': 10931,
'didn\x89Û÷t': 7347,
'didnåÕt': 6133,
'die': 9790,
'die.': 3800,
'die...': 8191,
'died': 12270,
'died,': 10375,
'died.': 12535,
'diesel': 8716,
'diet.': 1654,
'diet?': 11161,
'dieting': 11417,
'diff': 10090,
'differ': 13444,
'differ.be': 7090,
'difference.': 13302,
'different': 11074,
'different.': 11993,
'different?': 13762,
'difficult': 6002,
'difficult..': 327,
'difficulties,': 1353,
'dificult': 6044,
'digi': 6682,
'digital': 2633,
'digits)': 6581,
'dignity': 13274,
'dime': 2798,
'dimension': 8819,
'din': 5836,
'dine': 1458,
'dined': 8451,
'dinero': 8904,
'ding': 5582,
'dinner': 9785,
'dinner.': 12881,
'dinner.msg': 6071,
'dinner?': 13307,
'dint': 7307,
'dippeditinaDEW,': 13249,
'direct': 8537,
'directly': 4123,
'directly.': 7422,
'director': 6635,
'directors.': 6751,
'dirt,': 6995,
'dirtiest': 2828,
'dirty': 2992,
'dis': 9674,
'dis,': 2556,
'disappeared,': 10077,
'disasters': 5488,
'disastrous': 11302,
'disc': 11584,
'disclose': 4098,
'disconnected': 903,
'discount': 6403,
'discreet': 4949,
'discuss': 10777,
'discussed': 4109,
'disk...you': 10118,
'dislikes': 11222,
'dismay': 708,
'dismissial': 11319,
'display': 1252,
'distance': 10246,
'distract': 1819,
'disturb': 11232,
'disturbance.might': 1788,
'disturbing': 4640,
'divert': 7114,
'division': 13552,
'division,': 5506,
'divorce.': 500,
'diwali': 9178,
'dl': 11067,
'dled': 13103,
'dnt': 268,
'do': 3097,
'do!': 4775,
'do,': 3074,
'do.': 11406,
'do..': 1092,
'do.Interested?': 5734,
'do?': 2005,
'dob': 324,
'dobby': 3290,
"dobby's": 10434,
'doc': 11377,
'dock,': 4609,
'docs': 7143,
'doctor': 5626,
'doctor.': 8100,
'doctor?': 10137,
'documents': 7205,
'dodda': 4598,
'dodgey': 11042,
'does': 8925,
'does,': 2287,
"doesn't": 13528,
"doesn\\'t": 11955,
'doesnt': 13740,
'doesn\x89Û÷t': 13173,
'doesnåÕt': 4191,
'dog': 13338,
'dogg': 9301,
'doggin': 11754,
'dogging': 5766,
'doggy': 3712,
'dogs': 8967,
'dogwood': 12370,
'doin': 5698,
'doinat': 950,
'doing': 9428,
'doing.': 9897,
'doing?': 6803,
'doing?how': 1214,
'doke.': 131,
'dollar': 11992,
'dollars': 13474,
'dollars.': 3005,
'domain': 1718,
'don': 5441,
"don't": 10451,
"don't,": 7965,
'don,t': 8952,
"don\\'t": 9471,
'done': 1647,
'done!': 10607,
'done,': 5028,
'done.': 8144,
'done...': 13064,
'done/want': 2539,
'done?': 9325,
'donno': 11682,
'dont': 9742,
'dont.': 13859,
'donyt': 9507,
'don\x89Û÷t': 3709,
'donåÕt': 4347,
'dooms.': 7172,
'door': 3908,
'door,': 12134,
'door:)': 3637,
'door?': 5486,
'doors': 8309,
'dorm': 2274,
'dorm.': 10024,
'dose': 2536,
'dosomething': 3191,
'dot': 441,
'double': 6314,
'double-faggot': 3100,
'doubles': 12938,
'doubt': 987,
'doug': 13597,
'dough': 10042,
'down': 6925,
'down.': 12300,
'down...': 10174,
'down:)': 6613,
'down?': 12313,
'download': 13559,
'download,': 11579,
'downloaded': 7575,
'downloads': 6931,
'downs': 13714,
'downstem': 10667,
'dr.': 8610,
'drama': 8562,
"dramastorm's": 6149,
'dramatic.': 12027,
'drastic.': 7810,
'draw': 8316,
'draw.....Please': 5002,
'draws!': 9019,
'dream': 10600,
'dream.': 2460,
'dream.love': 2862,
'dreams': 7096,
'dreams,': 11910,
'dreams.': 10876,
'dreams....u': 13531,
'dress': 6145,
'dressed': 7233,
'dresser': 10123,
'drink': 12618,
'drink,': 2500,
'drink.pa': 7525,
'drinkin': 2497,
'drinkin,': 3925,
'drinking': 4360,
'drinks': 4220,
'drive': 11920,
'drive\\"': 207,
'driver!': 8500,
'drivin': 8502,
'drivin...': 13377,
'driving': 1439,
'driving,': 13652,
'driving...': 1971,
'drizzling': 11383,
'drms': 5318,
'drop': 6337,
'drop.': 3591,
'dropped': 1738,
'drops': 4110,
'drove': 13773,
'drpd': 5350,
'drug': 2703,
'drug.': 6162,
'drugdealer': 1263,
'drugs': 12258,
'drugs!': 5703,
'drum': 2822,
'drunk': 3949,
'drunk!': 6142,
'drunkard!': 13319,
'drunken': 8211,
'drvgsTo': 700,
'dry': 10738,
'dryer': 901,
"dsn't": 3462,
'dt': 5726,
'dual': 12667,
'dub': 13567,
'dubsack': 5953,
'ducking': 1793,
'dude': 6774,
'dude!': 9170,
'dude,': 9549,
'dude.': 5942,
'dudes': 10814,
'due': 2949,
'duffer': 3880,
'dun': 12476,
'dungerees': 5004,
'dunno': 13704,
'dunno...': 7833,
'duo': 1808,
'durban:)': 8995,
'durham': 11068,
'during': 809,
'dusk': 13362,
'dust?': 2544,
'duvet': 3377,
'dwn': 8696,
'dying': 4034,
'e': 8104,
'e-mail?': 11356,
'e-threats.': 13561,
'e.': 506,
'e.g': 12117,
'e.g.23G.': 5911,
'each': 2677,
'ear': 12570,
'earlier': 2796,
'earlier-we': 2560,
'earlier.': 2999,
'earlier...': 6396,
'earliest': 9724,
'early': 8710,
'early,': 10759,
'early.': 11484,
'early..': 4677,
'early...': 1182,
'early?': 4142,
'earn': 10822,
'earning': 7794,
'ears': 2735,
'earth': 12756,
'earth&sofa': 5211,
'easier': 7642,
'easier.': 2771,
'easiest': 8577,
'easily': 1606,
'east': 11245,
'easter.': 6019,
'easy': 7464,
'easy,': 5687,
'easy.': 2842,
'eat': 5931,
'eat.': 12915,
'eat?': 1712,
'eaten': 11434,
'eaten?': 5604,
'eatin': 1429,
'eatin,': 9388,
'eating': 11613,
'eating.': 9192,
'eating?': 2321,
'ebay': 11755,
'ec2a.': 8613,
'ecstasy?': 5310,
'edge.': 8702,
'edhae,': 11133,
'edition...': 8523,
'edrunk': 1906,
'education': 10872,
'educational': 1774,
'edukkukayee': 8426,
'edward': 7798,
'edwards': 2659,
'ee': 12094,
'eerie': 11623,
'eerulli': 982,
'effect...': 3356,
'effects!': 4420,
'efficient...': 2778,
'eg': 254,
'egg': 2528,
'egg-potato': 10023,
'egg.': 10639,
'eggs-pert': 3831,
'ego': 6563,
'eh': 5947,
'eh?': 2887,
'eight': 12611,
'eight,': 7481,
'eighth': 9648,
'eighth?': 5179,
'eightish': 10308,
'either': 12735,
'either.': 5611,
'el': 830,
'ela': 1657,
'elaborate': 7223,
'elaborating': 5535,
'elaine,': 1173,
'elama': 10614,
'elaya.': 13213,
'eldest': 7844,
'elections': 11008,
'electricity': 11969,
'elephant': 3817,
'elliot': 3226,
'else': 6042,
'else,': 5325,
'else.': 6140,
'else....': 7507,
'else?': 13413,
'elsewhere': 8009,
'elsewhere.': 630,
'elsewhere?': 9179,
'em': 2276,
'email': 10867,
'email.': 5262,
'email?': 13119,
'emailed': 8358,
'embarassed': 1452,
'embarassed,': 13285,
'embarassing."': 5249,
'embarrassed': 9766,
'embassy': 1628,
'emergency': 3061,
'emerging': 4980,
'emigrated': 1172,
'employee': 3646,
"employer's": 7630,
'empty': 4230,
'en': 408,
'end': 13206,
'end,': 10092,
'end.': 11601,
'ended': 3099,
'ended.': 11860,
'ending': 1364,
'endowed.': 13560,
'ends': 1286,
'ends.': 9825,
'ends..': 11093,
'enemies': 12630,
'enemy': 10575,
'enemy..': 13853,
'energy': 6996,
'energy.': 10721,
'energy..': 8673,
'energy\\"': 4521,
'engaged': 9249,
'engalnd!': 1597,
'engin?': 9561,
'english': 4771,
'enjoy': 4626,
'enjoy!': 6760,
'enjoyed': 6709,
'enjoying': 8514,
'enketa': 5224,
'enough': 9953,
'enough,': 7619,
'enough.': 10888,
'enough?': 8832,
'enter': 6328,
'entered': 4917,
'enters': 7851,
'entertain': 13429,
'entertaining': 1651,
'entey': 13724,
'entire': 7881,
'entirely': 11939,
'entitled': 8700,
'entrepreneurs': 11756,
'entropication....': 5080,
'entry': 13580,
'enuff': 2719,
'enuff..': 7788,
'envelope': 10970,
'envelope,': 10742,
'envelope.': 8993,
'envy': 11395,
'epi,': 9271,
'epsilon': 12204,
'equally': 1301,
"er'ything!": 5753,
'ericson': 2777,
'ericsson': 13572,
'errors': 9226,
'ertini': 2753,
'eruku.': 13131,
'erupt,': 446,
'esaplanade': 6899,
'escalator...': 5541,
'escape': 7967,
'escape.': 10320,
'ese': 8739,
'eshxxxxxxxxxxx': 10818,
"espe'll": 549,
'especially': 1039,
'especially.': 1768,
'esplanade..': 8870,
'esplanade?': 11847,
'essay': 2837,
'essential': 7769,
'establish': 9500,
'eta.': 10136,
'etc': 7740,
'etc.': 6021,
'ethnicity': 12087,
'europe?': 6147,
'evaluation': 8635,
'evaporated': 1618,
'eve': 1812,
'eve.': 7425,
'eve...i': 6950,
'eve?': 3543,
'eveB-)': 6050,
'eveB-).': 9447,
'even': 2946,
'evening': 7460,
'evening!': 6840,
'evening.': 4497,
'evening?': 9962,
'evenings': 2972,
'event': 845,
'event?': 8258,
'events': 8224,
'eventually': 4622,
'ever': 13821,
'ever!': 6478,
'ever!!': 11086,
'ever,': 13571,
'ever:': 3765,
'every': 4075,
'everybody': 5113,
"everybody's": 3684,
'everyboy': 7885,
'everyday': 8927,
'everyone': 13015,
'everyone.': 10951,
'everyone...': 9230,
'everyones': 988,
'everyso': 4339,
'everything': 9387,
'everything.': 3952,
'everything\\".': 2184,
'everywhere': 3016,
'everywhere...': 13364,
'evey': 8556,
'eviction': 4825,
'evil': 5584,
'evn': 8019,
'evng': 2885,
'evo.': 8006,
'evry1': 8020,
'evrydy': 6445,
'ex': 13083,
'ex-wife': 7426,
'ex...': 8582,
'ex?': 5108,
'exact': 8706,
'exactly': 12662,
'exam': 8375,
'exam,': 5147,
'exams': 283,
'exams,': 51,
'excellent': 3062,
'excellent.': 5098,
'except': 1510,
'excited': 1858,
'exciting': 691,
'excuse': 11440,
'excused.': 7732,
'excuses': 5927,
'excuses)': 12123,
'exe': 2759,
'executive': 4846,
'exercise,': 8724,
'exeter': 3167,
'exhaust': 11065,
'exhausted': 7729,
'exhausted.': 8115,
'exhibition...': 8949,
'exist,': 10182,
'exmpel:': 5419,
'expect': 12168,
'expecting': 6779,
'expensive': 9664,
'expensive.': 11731,
'experience': 12717,
'experience.': 13102,
'experiment.': 10291,
'expert': 3625,
'expired': 7040,
'expired..so': 5522,
'expiry.': 12000,
'explain': 7752,
'explicit': 13185,
'explicitly': 4375,
'expression?': 5009,
'ext.': 4731,
'exterminator': 8393,
'extra': 13680,
'extra,': 8801,
'extract': 4912,
'ey?': 11989,
'eye': 12386,
'eyes': 2333,
'eyes.': 8357,
'eyes...': 1828,
'eå£nd': 4682,
'f': 8990,
'fab': 11874,
'faber': 7917,
'face': 10347,
'face.': 9035,
'face...............asssssholeeee................': 2426,
'facebook': 3587,
'facebook,': 12907,
'facebook.': 2056,
'fact': 11381,
'fact,': 2722,
'fact.': 10563,
'fact:': 2261,
'factory': 6658,
'facts': 6167,
'faded': 8000,
'faggy': 8122,
'faglord': 10494,
'failed': 4833,
'failed.': 6496,
'fainting': 11502,
'fair': 8714,
'faith,Evening': 8831,
'fake': 6208,
'fake..my': 3863,
"fakeye's": 12859,
'fal': 10038,
'falconerf': 9463,
'fall': 799,
'fallen': 2329,
'falling': 12780,
'falls': 5014,
'falls.': 10938,
'fals': 13115,
'famamus....': 12182,
'familiar': 6250,
'family': 69,
'family.': 8911,
'family..': 5007,
'famous': 8626,
'fan...': 402,
'fancied': 11212,
'fancies': 5243,
'fancy': 5577,
'fans': 1181,
'fantastic': 1242,
'fantasy': 1340,
'far': 2382,
'far!': 1868,
'far.': 1354,
'far...': 2924,
'farm': 2979,
'fast': 3395,
'fast!': 12100,
'fast.': 8094,
'fast...': 3425,
'fast.pls': 10163,
"fast?'": 68,
'faster': 6040,
'fastest': 4418,
'fat': 3038,
'fat!': 4818,
'fat...': 2299,
'fated': 2511,
'father': 6776,
'father.': 8411,
'father:': 12806,
'fathima': 791,
'fathima.': 7608,
'fats...': 871,
'fatty': 2342,
'fault': 10021,
'fault&al': 7842,
'fault&fed': 5737,
'fault,': 9480,
'fault.': 13121,
'fav': 7144,
'fave': 4348,
'favor.': 2866,
'favour': 1614,
'favour.': 5001,
'favourite': 11946,
'favourite.': 759,
'fb': 9700,
'fb.': 5476,
'fear': 9266,
'feathery': 4388,
'features': 6937,
'feb-april': 6063,
'feb.': 9884,
'february': 11471,
'february.': 11643,
'fedex': 7988,
'feel': 8557,
'feel.': 9713,
'feel..': 6378,
'feel?': 2944,
'feeling': 13537,
'feeling.': 4978,
'feeling.wavering': 2676,
'feels': 9631,
'fees': 12783,
'feet*': 6355,
'feet,': 11261,
'fell': 451,
'fellow?': 12790,
'felt': 331,
'female': 1814,
'feng': 2076,
'festival': 10679,
'fetch': 5655,
'fetch?': 11364,
'fetching': 8816,
'fever': 6413,
'fever,': 4081,
'fever:)': 13857,
'few': 8634,
'few\\"': 6315,
'fidalfication?': 10850,
'field': 6641,
'field.': 8141,
'fiend/make': 7273,
'fifteen': 2203,
'fifth': 13811,
'fight': 5933,
'fighting?': 257,
'fightng': 10034,
'fights.': 3497,
'figure': 9524,
'figures,': 13865,
'figures...': 4850,
'figuring': 2442,
'file': 7600,
'files': 4312,
'fill': 2019,
'filled': 13661,
'fills': 11747,
'film': 533,
'film?': 5863,
'films': 5648,
'films.': 1734,
'filthy': 10039,
'filthyguys.': 7931,
'final': 5532,
'finalise': 10236,
'finally': 5523,
'finally...': 3585,
'finance.': 66,
'financial': 10994,
'find': 1691,
'find.': 7541,
'finding': 9367,
'finds': 11796,
'fine': 12364,
'fine!': 3757,
'fine!\\"': 6108,
'fine,': 5642,
'fine.': 12808,
'fine.,': 40,
'fine..': 11964,
'fine..absolutly': 100,
'fine.Inshah': 7748,
'fine:)': 572,
'fine:)when': 10946,
'fingers': 11651,
'fingers..?': 9987,
'finish': 4745,
'finish?': 8386,
'finished': 4711,
'finished.': 7625,
'finishes': 13055,
'finishing': 4156,
'fink': 9833,
"finn's": 1318,
'fire.': 9164,
'firefox.': 6460,
'fireplace.': 12715,
'fires.Are': 5768,
'first': 6632,
'first.': 10881,
'first..': 532,
'first...': 6933,
'first?': 6562,
'fish': 5601,
'fishhead': 6271,
'fishrman': 5990,
'fit': 6418,
'fiting': 4030,
'five': 6824,
'five.': 10177,
'fix': 2638,
'fixed': 8947,
'fixedline': 2349,
'fixes': 7117,
'flag': 9078,
'flaked': 1040,
'flaked.': 12109,
'flaky': 9358,
'flame.': 1720,
'flash': 2817,
'flash.': 12356,
'flat': 326,
'flat,': 775,
'flatter': 3977,
'flavour': 1823,
'flea': 2628,
'fletcher': 12249,
'flies': 8902,
'flight': 6152,
'flight!': 9711,
'flights': 8762,
'flim.': 9941,
'flip': 7337,
'flippin': 5281,
'flirt': 5621,
'flirt,': 4450,
'flirt?': 3578,
'flirting': 6005,
'floating': 10096,
'flood': 3970,
'floor,': 1015,
'floor?': 7230,
'floppy...': 4914,
'florida': 4951,
'flow': 5873,
'flower': 3217,
'flower...': 11542,
'flowers': 9996,
'flowing': 5709,
'fluids': 5925,
'flurries': 10165,
'flute': 9752,
'fly..': 1494,
"fly..i'm": 9074,
'flying': 5652,
'flyng': 12980,
'fml.': 11193,
'fo': 8412,
'folks': 8772,
'follow': 5440,
'followed': 12904,
'followin': 147,
'following': 201,
'fond': 9644,
'fondly': 3649,
'fone': 1922,
'fone...': 12085,
'fones,': 2490,
'food': 1235,
'food,': 10293,
'food.': 3981,
'food...': 1540,
'food?': 8894,
'fool': 2648,
'fooled': 11472,
'foot': 5656,
'football': 7147,
'footbl,crckt..': 10606,
'footie': 2645,
'footprints': 11952,
'footprints?': 8281,
'footy': 2231,
'for': 5951,
'for!': 8336,
'for.': 12582,
'for?': 6837,
'force': 481,
'forced': 12788,
'foreign': 12254,
'forever': 2071,
'forever!!!': 13255,
'forever!\\""': 3562,
'forever,': 3645,
'forever....': 13647,
'forevr': 11758,
'forfeit...': 3458,
'forget': 5957,
'forgets': 2587,
'forgive': 2401,
'forgiven': 7457,
'forgiven!': 5747,
'forgiveness?\\"': 1781,
'forgot': 9504,
'forgot.': 9297,
'forgot..': 5624,
'forgotten': 10895,
'form:)clark': 5161,
'formal...': 11282,
'formally.Pls': 11719,
'format': 5107,
'formatting...': 4796,
'forms': 12607,
'forth': 599,
'fortune': 9058,
'forum': 8117,
'forums': 2727,
'forums.': 1099,
'forward': 7351,
'forwarded': 10971,
'forwarding': 3705,
'forå£38': 4272,
'found': 11808,
'four': 1189,
'four,': 11807,
'fourth': 13226,
'fox': 10593,
'fps': 3411,
'fr': 2458,
'fraction': 12292,
'frank...good': 5190,
'frauds.': 1571,
'freak': 5277,
'freaked?': 2973,
'freaking': 6209,
'freaky': 7237,
'free': 8360,
'free!': 1026,
'free,': 8729,
'free-send': 3472,
'free.': 3388,
'free..': 10682,
'free...': 13668,
'free?': 3393,
'freedom': 653,
'freefone': 3944,
'freek': 1463,
'freely,': 7246,
'freezing': 12243,
'fren': 13671,
"fren's": 7038,
'french': 8486,
'frens': 10055,
'frens...': 11216,
'frequently': 1489,
'fret.': 1373,
'fri': 13505,
'fri!': 10957,
'fri.': 4520,
'fri?': 10440,
'friday': 9777,
'friday.': 6254,
'friday.hope': 1727,
'friday?': 10358,
'fridays': 92,
'fridge!': 18,
'fried': 2379,
'friend': 12295,
"friend's": 13234,
'friend,': 6878,
'friend-of-a-friend': 9499,
'friend.': 10159,
'friends': 6620,
'friends,': 5319,
'friends.': 5058,
'friends..': 8127,
'friendsare': 12854,
'friendship': 11079,
'friendship:': 880,
'friendships': 3490,
'fring': 12309,
'fringe': 8069,
'frm': 8558,
'frnd': 6700,
'frnd,': 6631,
'frnd..': 1699,
'frndZ': 6678,
'frnds': 7135,
'frnds..': 6900,
'frndship': 2909,
'frndshp': 12764,
'frndsship': 3740,
'frnt': 5398,
'fro': 1757,
'frog': 5802,
'from': 7937,
'fromm': 11354,
'front': 10328,
'frosty': 13655,
'frwd': 9672,
'frying': 12219,
'ft.': 11044,
'fuck': 9997,
'fucked': 6286,
'fuckin': 11975,
'fucking': 4945,
'fucks': 12680,
'fuelled': 2132,
'fujitsu': 6279,
'fujitsu,': 2631,
'ful': 9475,
'fulfil': 4070,
'full': 9101,
'full..': 5332,
'fullonsms.com': 7410,
'fumbling!': 592,
'fun': 265,
'fun.': 2453,
'fun...': 8855,
'fun?': 2087,
'function': 2367,
'functions?': 11066,
'fundamentals': 11953,
'funeral': 9156,
'funk': 8397,
'funky': 4488,
'funny': 9476,
'funny...': 10658,
'funs': 6980,
'furniture': 5032,
'further': 76,
'fusion': 399,
'future': 890,
'future.': 11363,
'fyi': 9982,
'g': 13116,
"g's": 8209,
'g,': 8644,
'g.': 10452,
'g696ga': 5945,
'g?': 11414,
'gET': 11169,
'ga.': 13445,
'gain': 7549,
'gained': 721,
'gal': 893,
'gal.': 13789,
'galileo': 2502,
'gals': 11635,
'gals..': 4426,
'game': 11558,
'game!': 3556,
'game,': 9216,
'game.': 5916,
'game?': 9254,
'games': 2114,
'games!': 11457,
'games,': 5822,
'gandhipuram': 3460,
'ganesh': 12950,
'gang': 645,
'gap': 8871,
'gaps': 1487,
'garage': 1191,
'garage.': 1151,
'garbage': 9604,
'gari': 6415,
'garments': 4804,
'gas': 9036,
'gate?': 555,
'gauge': 9417,
'gautham': 743,
'gautham..': 5539,
'gautham?': 3823,
'gave': 2948,
'gay': 11565,
'gayle.': 4705,
'gays': 10205,
'gaytextbuddy.com.': 1132,
'gbp/sms': 10618,
'gd': 7068,
'gd&the': 9951,
'gd,now': 12890,
'gd.': 2252,
'gd...': 3985,
'gd?': 11663,
'ge': 8596,
'gei.': 4950,
'general': 13804,
'generally': 4645,
'genius': 1370,
'gentle': 13100,
'gentle.': 5119,
'gentleman': 8109,
'genuine': 10906,
'gep': 2790,
'ger': 11698,
'get': 4610,
'get!': 3508,
'get.': 3280,
'get?': 1090,
'getiing': 1071,
'geting': 828,
'gets': 7228,
'getsleep': 4486,
'getstop': 9021,
'gettin': 410,
'gettin.': 6759,
'getting': 4802,
'getting?': 13016,
'gf': 12050,
'gf?': 5944,
'ghodbandar': 7112,
'gibbs.': 5710,
'gibe': 13293,
'gift': 11738,
'gift.': 13607,
'gift?': 12246,
'gifted': 8273,
'giggle': 4353,
'gigolo?': 3623,
'gimme': 6051,
'girl': 1158,
'girl,d': 12046,
'girl.': 478,
'girl..!': 3435,
'girl??': 5200,
'girlfrnd': 13492,
'girlie': 11695,
'girls': 13200,
'girls.': 9862,
'gist': 807,
'giv': 9643,
'give': 8569,
'give.': 1406,
'given': 405,
'gives': 8416,
'gives.': 13129,
'giving': 9044,
'glad': 11674,
'glad,': 8747,
'glad.': 11465,
'glands': 8615,
'glasgow!': 4113,
'glass': 10428,
'glo': 9269,
'global': 8340,
'glorious': 9275,
'glory,': 12008,
'gloucesterroad': 299,
'go': 4044,
'go,': 1048,
'go.': 6541,
'go..': 9040,
'go...': 4644,
'go..a': 931,
'go.wait': 13019,
'go2sri': 11602,
'go?': 7403,
'goal': 839,
'goals': 10474,
'goals/team': 1446,
'gobi': 1962,
'god': 10836,
'god!': 7956,
"god's": 786,
'god.': 2432,
'god..': 10439,
'god..taken': 9846,
'god.You': 4852,
'god.not': 8798,
'goes': 7390,
'goggles': 347,
'goin': 6447,
'goin,': 3032,
'goin...': 11672,
'goin2bed': 2907,
'goin?': 9456,
'going': 6733,
'going,': 13822,
'going.': 7101,
'going?': 8280,
'gold': 12710,
'golden': 3437,
'gon': 13814,
'gona': 237,
'gone': 10282,
'gone.': 1572,
'gong.': 11223,
'gonna': 12232,
'gooD': 5729,
'good': 4278,
'good,': 8188,
'good.': 6179,
'good..': 3994,
'good...fine': 13828,
'good...no': 2416,
'good.environment': 2652,
'good:)dhanush': 537,
'good:)when': 2792,
'good?': 4386,
'goodies!': 7186,
'goodmate': 1281,
'goodnight': 4122,
'goodnite': 9007,
'goods.': 1312,
'google': 4318,
'gorgeous': 10770,
'gossip': 12253,
'got': 8076,
'got.': 10069,
'got...': 6897,
'gota': 11205,
'gotany': 10466,
'goto': 12707,
'gotta': 11378,
'gotten': 11905,
'gotto': 4647,
'govt.instituitions': 4374,
'gprs': 7773,
'gpu': 12282,
'gr8': 2325,
'gr8.': 1735,
'grab': 881,
'grace': 11790,
'graduated': 8319,
'gram': 8815,
'grams': 626,
'grand': 7031,
'grandfather': 1574,
'grandma': 1317,
'grandmas.': 7456,
'granted': 3208,
'graphics': 11082,
'grasp': 12163,
'grateful': 1011,
'grave': 4749,
'gravy.': 2225,
'gray,': 9177,
'great': 8491,
'great!': 11085,
'great,': 7474,
'great.': 11041,
'great..bhaji': 4895,
'great.bye': 4250,
'greatest': 12901,
'greatly': 4681,
'greatness.': 6377,
'greece.': 13677,
'green': 3394,
'green,': 7848,
'greet': 7571,
'greeting..': 5605,
'greetings': 2836,
'grief': 10643,
'grinder?': 9805,
'grinule': 10179,
'grooved': 3884,
'groovy.': 10176,
'groovying': 6170,
'ground.': 9299,
'ground.amla': 3603,
'group': 2624,
'grow.': 12015,
'growing': 10645,
'grown': 6857,
'grr': 9088,
'grumble': 5037,
'gt': 3959,
'gua...': 6060,
'guaranteed': 8763,
'guaranteed!': 6407,
'gucci': 8548,
'gud': 10036,
'gud..': 12672,
'gud..k,': 11545,
'guess': 13223,
'guess,': 12952,
'guess.': 7897,
'guess...': 11401,
'guessed': 3063,
'guesses': 3659,
'guessing': 719,
'guide': 8789,
'guilty': 12470,
'guilty...': 7705,
'guitar': 8040,
'guoyang': 10973,
'guy': 7933,
"guy's": 12277,
'guy,': 10925,
'guys': 991,
'guys.': 3459,
'gv': 4116,
'gving': 8293,
'gym': 3272,
'gym!!!': 10657,
'gym.': 13139,
'gym?': 8345,
'gymnastics': 12575,
'ha': 10949,
'habba..hw': 3225,
'habit..': 8945,
'had': 13287,
'had.': 5183,
'had..ya': 10649,
"hadn't": 8595,
'haf': 11866,
'haha': 13304,
'hai': 3297,
'hai..........': 12917,
'hail': 13835,
'hair': 10570,
'hair.': 3777,
'haircut.': 9816,
'hairdressers': 6905,
'half': 2399,
'half-8th': 7857,
'half8th': 531,
'hall': 3040,
'halla': 197,
'ham': 13112,
'hamper!!!': 11828,
'hamster': 6471,
'hand': 455,
'hand,': 4740,
'hand..': 3980,
'hand:)': 13728,
'handed': 12839,
'handing': 11981,
'handle': 8321,
'hands': 4373,
'hands!': 12817,
'hands...': 7056,
'handset': 1740,
'handset?': 7067,
'handsome': 4986,
'handsomes': 3387,
'hang': 8734,
'hanger': 3888,
'hangin': 12581,
'hanging': 4043,
'hanks': 6163,
'happen': 10904,
'happend': 13618,
'happend?': 12320,
'happened': 11114,
'happened?': 4149,
'happenin': 3322,
'happening': 11881,
'happening,': 13771,
'happening?': 3286,
'happens': 4992,
'happier': 6842,
'happily': 7783,
'happiness': 9056,
'happiness.': 12178,
'happiness..': 1400,
'happy': 12206,
'happy!': 11699,
'happy,': 13446,
'happy.': 1188,
'happy..': 2288,
'happy...': 3676,
'happy?': 10254,
'hard': 4076,
'hard!': 12971,
'hard.': 8388,
'hard...': 5259,
'hardcore': 12233,
'harder': 13756,
'hardly': 11328,
'harlem.': 12816,
'harri': 9399,
'has': 12928,
'has,': 3308,
"hasn't": 1973,
'hasnt': 7711,
'hate': 2266,
'haul': 5223,
'haunt': 1846,
'hav': 13057,
'hava': 6648,
'have': 8125,
'have.': 11655,
'have?': 9722,
'haven': 1483,
"haven't": 1871,
'havent': 4671,
'havenåÕt': 10851,
'havin': 2211,
'having': 10549,
"havn't": 5826,
'hdd': 5788,
'he': 2377,
'he!': 8266,
"he'll": 3747,
"he's": 2640,
'he...': 7065,
'head': 4738,
'headache': 10837,
'headin': 1648,
'heading': 13,
'heads': 11517,
'headstart': 6229,
'head\x89Û_': 1217,
'heal': 458,
'hear': 11596,
'hear,': 11900,
'hear.': 8823,
'heard': 10962,
'hearing': 6781,
'heart': 5972,
'heart,heart': 1135,
'heart-': 12770,
'heart.': 10648,
'heart..': 136,
'heart....gn': 8866,
'hearted': 2714,
'hearts,not': 5612,
'heater': 4883,
'heater?': 249,
'heavy': 1585,
'heavy...': 9534,
'heavy?': 11531,
'hectic': 2853,
'hee': 10785,
'hee.': 13186,
'held': 9281,
'held?': 6551,
'helen': 13279,
'hell': 3711,
'hell,': 4280,
'hella': 12417,
'hello': 7225,
'hello,': 11621,
'hello-ed': 102,
'help': 5907,
'help,': 2179,
'help.': 760,
'help:)': 2285,
'help:08700469649.': 12991,
'help:08712400602450p': 9462,
'help?': 7929,
'helpful': 9220,
'heltini..Iyo': 7902,
'hen': 6276,
'hence': 6308,
'hep': 8603,
'her': 9413,
'her,who': 4615,
'her.': 13224,
'her...': 9033,
'her.I': 6016,
'her.love': 12880,
'her.my': 7731,
'her.she': 13715,
'her.will': 10128,
'her:': 4971,
'her?': 7825,
'here': 7863,
'here!': 7078,
'here,': 7454,
'here,remember': 12070,
'here.': 4225,
'here..\\""': 4729,
'here.pls': 552,
'here:)': 4790,
'here>>': 1912,
'here?': 7976,
'heron': 11468,
'herself': 10497,
'hes': 4208,
'hesitant': 12074,
'hesitate.': 11442,
'hesitation': 3042,
'hex': 1937,
'hey,': 4244,
'hey.': 3271,
'heåÕs': 11386,
'hi': 9079,
'hi.': 12841,
'hidden': 3039,
'hiding': 6961,
'high': 9239,
'high,': 9280,
'high.': 11512,
'highest': 11460,
'hilarious-also': 12809,
'hill': 1772,
'hill.': 7384,
'hill.....': 10054,
'hills': 7820,
'hillsborough': 1274,
'him': 11116,
'him!': 5078,
'him.': 10043,
'him..': 4429,
'him:)then': 5298,
'him?': 8297,
'himself.': 10744,
'himso': 1385,
'hint': 4713,
'hip': 7688,
'hire': 3903,
'his': 11488,
'his/her': 9045,
'history': 2153,
'hit': 1857,
'hit.': 4490,
'hitman': 5307,
'hits': 12330,
'hitter.anyway': 10103,
'hittng': 9047,
'hiya': 9940,
'hlday': 4065,
'hme': 10803,
'hmm.': 5515,
'hmmm': 4856,
'ho': 10812,
'ho.': 13088,
'hockey': 2916,
'hogidhe..chinnu': 11891,
'hol': 5697,
'hold': 7858,
'holding': 12842,
'hole,': 3211,
'holiday': 5578,
'holiday,': 12136,
'holiday.': 12499,
'holiday..so': 2732,
'holla': 2392,
'holla?': 9583,
'hollalater': 9782,
'hols': 10351,
'holy': 7749,
'home': 12929,
'home!But': 4810,
'home,': 7248,
'home.': 2867,
'home..': 8086,
'home...': 10338,
'home.check': 7950,
'home.left': 13075,
'home.love': 6206,
'home.wot': 12425,
'hon': 7349,
'hon!': 13040,
'honest.': 8946,
'honesty': 946,
'honey': 12990,
'honey.': 12531,
'honeymoon': 12863,
'hont.': 7811,
'hoo': 8413,
'hook': 4607,
'hooked': 9488,
'hoops': 4574,
'hop': 295,
'hope': 7025,
'hope,afternoon': 9546,
'hope?': 1680,
'hoped': 5928,
'hopeful...': 12020,
'hopefully': 5770,
'hopefully,': 9793,
'hopes': 10510,
'hoping': 1940,
'hor...': 7564,
'horniest': 12203,
'horny': 1215,
'horny,': 732,
'horrible': 6024,
'horse': 8036,
'hospital': 12434,
'hospital.': 9822,
'hospitals': 10519,
'hostel': 1093,
'hostel.': 1721,
'hostile': 3492,
'hot': 4719,
'hot,': 12247,
'hot.': 13554,
'hotel': 13772,
'hotels': 4698,
'hour': 11575,
'hour?': 4316,
'hourish?': 3278,
'hours': 4744,
'hours.': 12048,
'house': 1917,
'house,': 306,
'house.': 3942,
'house?': 3657,
'houseful': 4232,
'housewives!': 2332,
'housework': 6379,
'housing': 2420,
'how': 5375,
"how're": 1351,
"how's": 724,
"how've": 7489,
'how?': 2139,
'howard': 8984,
'howard?': 2195,
'however': 13569,
'hows': 13156,
'hp,': 436,
'hppnss,': 7676,
'hr': 10608,
'hrs': 13455,
'hrs.': 10610,
'html': 11189,
'http://alto18.co.uk/wave/wave.asp?o=44345': 5614,
'http://careers.': 1807,
'http://doit.': 6242,
'http://gotbabes.co.uk.': 1362,
'http://img.': 9062,
'http://tms.': 12426,
'http://wap.': 7682,
'http://www.bubbletext.com': 2503,
'http://www.e-tlp.co.uk/expressoffer': 734,
'http://www.e-tlp.co.uk/reward.': 6729,
'http://www.urawinner.com': 821,
'http://www.wtlp.co.uk/text.': 353,
'huai': 3692,
'hubby': 5035,
"hubby's": 5055,
'hudgi': 5418,
'hug': 4632,
'hug..': 9930,
'hug?...': 11184,
'huge': 6114,
'hugging': 10692,
'hugh': 10392,
'huh,': 1632,
'huh...': 10368,
'huh?': 5134,
'huiming': 126,
'hum': 11995,
'humans': 9991,
'hun': 4470,
'hun!': 1401,
'hun-onbus': 1449,
'hundred': 5828,
'hundred.he': 7949,
'hundreds': 13051,
'hungover!': 10683,
'hungry': 5853,
'hungry.': 3169,
'hungry...': 9797,
'hunks': 9647,
'hunny': 5088,
'hunt': 12902,
'hunting': 9976,
'hurricanes': 10500,
'hurried': 10047,
'hurry': 1343,
'hurt': 6873,
'hurt,': 1426,
'hurt..': 1456,
'hurt...:-)': 13313,
'hurting': 7215,
'hurts': 5082,
'husband': 468,
'husband.': 10587,
'hussey': 4336,
'hustle': 7161,
'hut?': 3770,
'hv': 11552,
'hvae': 13136,
'hw': 13261,
"hw'd": 3113,
'hw,keep': 5986,
'hyde': 9538,
'hypertension': 9262,
'hypotheticalhuagauahahuagahyuhagga': 8146,
'i': 11425,
"i'd": 5252,
"i'll": 12304,
"i'm": 4915,
"i've": 12777,
'i,m': 9300,
'i.e.': 11415,
'i.ll': 12829,
'iPOD': 8624,
'iPod': 788,
'iPod!': 10857,
'iZ': 3310,
'ibm,': 9998,
'ibuprofens': 3052,
'ic.': 9737,
'iccha': 3440,
'ice': 2217,
'ice.': 767,
'icicibank.com': 11880,
'icky': 10577,
'icon': 13553,
'id': 6297,
'id,': 6785,
'id.': 11627,
'idc': 994,
'idea': 12025,
"idea's?": 4592,
'idea,': 13758,
'ideal': 8852,
'ideas.': 6115,
'ideas?': 10467,
'idiot': 9109,
'idiot?': 7134,
'idk': 89,
'idu': 13603,
'ie': 11921,
'if': 5774,
'if/when/how': 5675,
'iff': 1518,
'ignorant.': 12804,
'ignore': 10868,
'ignoring': 10634,
'ikea': 11241,
'il': 6150,
'ill': 12001,
'ill!': 12202,
'ill?': 9811,
'im': 1091,
'image': 12757,
'images': 7118,
'imagination..My': 12412,
'imagine': 12882,
'imma': 559,
'immed.': 7062,
'immediately': 6398,
'immunisation': 3844,
'imp': 12049,
'imp.': 1626,
'impatient': 3731,
'important': 7487,
'important.': 1667,
'importantly,': 3309,
'imposed': 255,
'impossible': 8246,
'impossible.': 8367,
'imposter': 482,
'impress': 11729,
'impressed.': 4387,
'impression': 10870,
'impressively,': 3730,
'improve': 3312,
'improved': 6710,
'in': 9675,
'in!': 7254,
'in!!': 3779,
'in,': 11507,
'in.': 5969,
'in...': 6442,
'in.our': 8671,
'in2': 5883,
'in:': 4327,
'in?': 12491,
'inc': 2141,
'inc)': 10029,
'inch': 1249,
'inches': 86,
'inches...': 13742,
'incident.': 4259,
'incident..': 7024,
'incident?': 4357,
'include': 10874,
'include:': 13023,
'includes': 5610,
'inclusive': 7838,
'incomm': 5358,
'inconsiderate': 5470,
'increments.': 30,
'inde': 11481,
'indeed': 12643,
'independently': 10652,
'india': 11309,
'india.': 8912,
'indian?': 8677,
'indicate': 3907,
'individual': 10950,
'individual.time': 302,
'infernal': 4144,
'info': 11125,
'info!': 6123,
'info:': 12922,
'info:www.100percent-real.com': 7157,
'[email protected]': 1723,
'[email protected]': 10829,
'info@vipclub4u.': 10997,
'inform': 382,
'information': 12135,
'information,': 10740,
'information.': 5571,
'informed': 11466,
'informed.Rgds,Rakhesh,Kerala.': 1134,
'infra': 2221,
'infront': 1018,
'ing': 10227,
'ingredients!': 3050,
'initiate\\"': 13042,
'inlude': 4361,
'inmind': 6199,
'inner': 7150,
'innings': 878,
'innings.': 2583,
'innocent': 7648,
'innu': 10048,
'inperialmusic': 1666,
'inpersonation': 10412,
'insects': 37,
'inside': 235,
'inspection': 861,
'inst': 11855,
'install': 5759,
'installation': 11349,
'installing': 5997,
'instantly': 2754,
'instead': 5644,
'instructions': 5922,
'instructions.': 2209,
'insurance': 7438,
'insurance.': 6628,
'intelligent': 8511,
'intelligent,': 4257,
'intend': 4234,
'intention': 7359,
'intentions': 2237,
'interest': 10987,
'interested': 3383,
'interested.': 5430,
'interesting.': 5293,
'interesting..:)': 8431,
'interfued': 3519,
'internal': 7504,
'internet': 5897,
'internet.': 9393,
'internet/service': 7462,
'interview': 6657,
'interview?': 873,
'interviews.': 6532,
'intha': 7813,
'into': 10762,
'into:': 2112,
'intrepid': 12577,
'intro': 2752,
'intrude': 119,
'invention': 11167,
'invest': 10319,
'investigate': 5313,
'invitation': 10710,
'invite': 6025,
'invited': 13118,
'invited?': 3732,
'inviting': 10833,
'invnted': 7207,
'invoices': 383,
'involve': 6223,
'involved': 10622,
'iouri,': 5638,
'ip': 497,
'ip4.': 7148,
'ipad.': 4633,
'ipaditan;)': 11766,
'ipads': 7176,
'iphone': 10329,
'ipod.': 12234,
'irene.ere': 11928,
'iron': 8055,
'ironing': 6356,
'irritated': 977,
'irritating': 495,
'irritating.': 8421,
'is': 10869,
'is!': 3176,
'is,': 47,
'is-': 389,
'is.': 7756,
'is..': 852,
'is/are': 10509,
'is:': 8539,
'is?': 13286,
'is?!': 9039,
'isComing': 5685,
'iscoming': 12468,
'ish': 8721,
'ish!': 9349,
'ish?': 13080,
'ishtamayoo?happy': 10433,
'islands,': 9839,
"isn't": 5848,
'isnt': 3559,
'isn\x89Ûªt': 7545,
'isn\x89Û÷t': 6871,
'isnåÕt': 4842,
'issue': 663,
'issue.': 9127,
'issues': 7716,
'issues..': 701,
'it': 8315,
'it!': 12145,
'it!!': 6325,
"it's": 802,
'it)': 2559,
'it*': 5948,
'it,': 12878,
'it,LEAVE': 8004,
'it,U': 1308,
'it,pls,pls': 9673,
'it.': 12716,
'it..': 5546,
'it...': 6249,
'it....': 11638,
'it..\\"': 3542,
'it..just': 50,
'it..let': 6783,
'it..mail': 4330,
'it.may': 10016,
'it.or': 3638,
'it:)': 10526,
'it?': 4469,
'italian': 2839,
'itcould': 10415,
'items': 9725,
'iter': 3613,
'itried2tell': 10538,
'its': 4246,
'itself': 83,
'itself!': 4322,
'itself.': 2199,
'itself..': 1391,
'itwhichturnedinto': 11514,
'it\x89Û÷s': 9332,
'itåÕs': 1615,
'ivatte': 6593,
'ive': 12291,
'iz': 13045,
'izzit': 3961,
'izzit?': 8222,
'i\x89Ûªm': 7069,
'iåÕd': 1288,
'iåÕm': 6548,
'j': 3129,
'jabo': 11350,
'jack': 9757,
'jacket': 4871,
'jackson': 8565,
'jacuzzi': 9881,
'jaklin': 13573,
'jamster': 10795,
'jamster.co.uk!': 4741,
'jan': 11221,
'janarige': 7593,
'jane.': 11393,
'january': 4685,
'jap': 596,
'jason': 8272,
'java': 10006,
'jay': 2084,
"jay's": 13805,
'jaya': 11634,
'jaykwon': 2045,
'jazz': 9751,
'jb': 2936,
'je': 7999,
'jealous': 2096,
'jeans': 12033,
'jeans...': 5192,
'jeetey': 553,
'jelly': 9115,
'jenne.': 1157,
'jenny': 11178,
'jess': 7986,
'jesus.': 9602,
'jesus..': 5806,
'jet': 1126,
'jetton': 7542,
'jewelry,': 10875,
'ji': 9440,
'jiayin.': 6673,
'jide': 2253,
'jiu': 2463,
'jo': 7154,
'job': 12634,
'job!': 4798,
'job,': 5862,
'job.': 7490,
'job..': 1968,
'job...': 12486,
'job:)': 9354,
'job?': 12127,
'jobs': 6434,
'jocks': 7871,
'jod': 7573,
'jog.': 8619,
'jogging,': 2256,
'john': 1259,
'johnåÓ-sounds': 608,
'join': 5700,
'join...': 13723,
'joined': 2161,
'joined.Hope': 1850,
'joining': 2017,
'joke': 10791,
"joke's": 11959,
'joke--thet': 1491,
'joke.': 11848,
'jokes...': 5079,
'jokin': 3466,
'joking': 11813,
'joking.,': 2956,
'jolly': 6642,
'jolt': 7968,
'jontin': 10341,
'jot': 9063,
'journey': 1310,
'journey!': 11170,
'joy': 5754,
'joy\\"..': 2692,
'joys': 575,
'jp': 13739,
'js': 4027,
'jst': 2625,
'juan': 11019,
'juan.': 3864,
'judgemental....i': 11811,
'juicy': 10620,
'jules': 11379,
'juliana.': 5261,
'jump': 5668,
'june..': 11436,
'jungle': 1164,
'jurong': 6797,
'jus': 11763,
'just': 6534,
'just..': 4715,
'justbeen': 13114,
'justify': 5544,
'juz': 4703,
'k': 8542,
'k.': 8262,
'k...': 12077,
"k...i'm": 1548,
'k.good': 10980,
'k.i': 4847,
'k.k..where': 3510,
'k:)but': 8846,
'k?': 2849,
'kaaj': 2310,
'kadeem': 5575,
'kaitlyn?': 13670,
'kalaachutaarama..!!': 1604,
'kalainar': 2351,
'kalisidare': 12651,
'kallis': 6489,
'kalstiya..then': 7072,
'kama': 4416,
'kanji': 5135,
'kano': 4799,
'kano.,il': 12830,
'kano..whr': 13077,
'kano:-)': 7367,
'kano;-)': 11239,
'kappa': 8609,
'karaoke': 6572,
'karo': 5599,
'kaypoh..': 3421,
'kb': 956,
'ke': 7534,
'keen': 379,
'keep': 8750,
'keeping': 1186,
'keeps': 4773,
'kegger)': 3954,
'keluviri...!': 3303,
'kent': 11684,
'kept': 4381,
'kerala': 10935,
'kettoda': 7868,
'key': 2004,
'keys': 8187,
'keys?': 13818,
'keyword': 4484,
'kfc': 12284,
'kg': 9083,
'khelate': 13251,
'ki': 12146,
'kick': 2920,
'kickboxing': 12942,
'kickoff!': 1900,
'kicks': 11006,
'kid..': 13496,
'kidding': 4243,
'kidding,': 9933,
'kids': 13497,
'kids.': 2123,
'kids?': 1485,
'killed': 1165,
'killing': 7236,
'kills': 8742,
'kilos': 8722,
'kind': 11563,
'kind.': 13654,
'kinda': 12367,
'kindly': 13550,
'king': 6687,
'kintu': 7884,
'kip': 6409,
'kisi': 2772,
'kiss': 8745,
'kiss*': 9775,
'kiss,': 8889,
'kisses*': 2099,
'kissing!': 13431,
'kittum': 3868,
'kitty': 5176,
'knackered.': 4708,
"knee's*": 10334,
'knew': 7478,
'knew,': 13288,
'knickers': 9274,
'knocking': 11180,
'know': 11922,
'know!': 5981,
'know,': 8950,
'know,yetunde': 8434,
'know.': 2272,
'know..he': 3122,
'know..wait': 7861,
'know:)this': 6119,
'know?': 3551,
'knowing': 12175,
'known': 9405,
'knows': 4090,
'knw': 12012,
'ko': 713,
'kochi.': 4217,
'kodstini': 9921,
'kodthini!': 684,
'konw': 11140,
'korche': 12322,
'korean': 5769,
'korli.': 12986,
'korte': 2811,
'kothi': 3857,
'ktv': 3327,
'kuch': 11253,
'kvb': 4918,
'kz': 6722,
"l'm": 8778,
'l8': 13826,
'l8r': 9730,
'l8r.': 524,
'la': 7807,
'la,': 2855,
'la.': 2975,
'la...': 13259,
'lab': 3641,
'labor.': 8469,
'lac': 641,
'lacking': 6319,
'lacs..thats': 9554,
'laden.': 222,
'ladies': 5167,
'ladies.': 154,
'ladies?U': 10170,
'lady': 11276,
'lady!': 9585,
'lady.': 2757,
'lag': 9788,
'lag.': 3144,
'lage.': 1549,
'lager.': 1037,
'laid': 12274,
'laid?want': 6862,
'lakhs:)': 12496,
'lambda': 5630,
'lambu': 5613,
'land': 2917,
'landline': 1543,
'landline,': 2000,
'landline.': 5116,
'landlineonly': 7824,
'landlines!': 9030,
'landmark,': 4518,
'lane': 12379,
'lane.': 11063,
'langport?': 2691,
'language': 11829,
'language.': 6416,
'lanka?': 5369,
'lanre': 2417,
'lap': 13241,
'lapdancer.': 4534,
'laptop': 6704,
'laptop...': 7397,
'laptop?': 4605,
'lar': 3892,
'lar.': 2134,
'lar..': 3662,
'lar...': 452,
'laready': 9821,
'largest': 7174,
'lark.': 9028,
'lasagna': 8407,
'last': 11645,
'lastest': 817,
'lasting': 7477,
'late': 4777,
'late!': 7567,
'late,i': 9370,
'late,so': 8091,
'late.': 2708,
'late...': 9750,
'late?\\""': 8149,
'lately': 2215,
'latelyxxx': 12679,
'later': 11304,
'later,': 11018,
'later.': 7416,
'later..': 10353,
'later...': 4919,
'later?': 3563,
'later?\\""': 8822,
'latest': 6560,
'latest,': 1105,
'latests': 9961,
'latr': 7229,
'laugh': 8409,
'laugh!': 2678,
'laugh,': 8051,
'laugh?': 6101,
'laughing': 11477,
'laundry': 8836,
'laurie': 13636,
'lautech.': 3737,
'lavender?': 12771,
'law': 1891,
'law,': 7362,
'laxinorficated': 714,
'laying': 4946,
'lays': 6964,
'lazy': 9835,
'lazy.': 12629,
'lazy...': 6393,
'le..u': 2775,
'lead': 3624,
'lead:)': 7263,
'leadership': 1730,
'leads': 11825,
'league': 4766,
'learn': 803,
'learned': 10627,
'least': 9121,
'least,': 2618,
'least5times': 6219,
'leave': 2657,
'leave...': 1342,
'leave?': 10101,
'leaves': 2498,
'leaving': 3717,
'leaving?': 414,
'lect': 4421,
'lect.': 11380,
'lect...': 5296,
'lecture': 13520,
'lecture,': 3964,
'left': 4611,
'left,': 8938,
'left.': 5401,
'leftovers': 1122,
'legal': 5811,
'legal!': 1529,
'legal.': 11734,
'legs': 2069,
'leh': 5722,
'leh.': 12009,
'leh...': 5174,
'leh...Haha': 10842,
'leh?': 5771,
'lei': 13072,
'lei.': 7707,
'lei..': 11025,
'lei...': 11761,
'lei?': 10407,
'lemme': 7007,
'length': 4007,
'lengths': 355,
'lennon.': 7323,
'leo': 1896,
'leona': 2226,
"leona's": 11686,
'leona?': 2262,
'leonardo': 12226,
'less': 11958,
'less.': 4933,
'lesser': 103,
'lesson': 5299,
'lesson,': 6815,
'lesson...': 5329,
'lesson?': 12825,
'lessons': 1004,
'lessons..': 9692,
'lessons...': 2303,
'lessons?': 3349,
'let': 8907,
"let's": 529,
'lets': 8851,
'letter': 3883,
'letters': 4635,
'level': 5089,
'liao': 5635,
'liao,': 874,
'liao.': 13595,
'liao..': 6602,
'liao...': 6080,
'lib': 2694,
'lib.': 502,
'library': 204,
'library?': 7775,
'lick': 3391,
'licks': 3988,
'lido': 2626,
'lido.': 4480,
'lido?': 6168,
'lie': 10755,
'lies,': 1737,
'life': 12836,
'life!': 12516,
'life!?This': 12654,
'life,': 4624,
'life.': 2868,
'life.and': 12454,
'life.you': 12979,
'life:': 7829,
'life?': 12108,
'life?\\"': 527,
'lifebook': 8349,
'lifeis': 9055,
'lifetime': 1787,
'lifetime!': 12072,
'lift': 9495,
'lift...': 9760,
'lifted': 7006,
'lifting': 3186,
'light': 11710,
'lighters': 11344,
'lightly,': 3148,
'lik': 10737,
'like': 13167,
'like.': 2294,
'like?': 5229,
'liked': 430,
'likely': 3339,
'likes': 4568,
'lil': 6564,
'lim': 7064,
'limits': 4868,
'line': 2295,
'line--you': 640,
'line.': 6835,
'line:': 2119,
'linear': 5965,
'lined': 5884,
'linerental': 12693,
'linerental,': 5744,
'lines': 7892,
'lingerie': 9116,
'lingo.': 3035,
'link': 9954,
'link,': 10952,
'links': 2365,
'linux': 11384,
'lion': 9144,
'lionm': 9312,
'lionp': 1212,
'lip': 4042,
'lipo.': 723,
'liquor?': 8865,
'list': 3206,
'listed': 4085,
'listen': 12657,
'listen.': 6302,
'listener': 5924,
'listening': 10457,
'listening2the': 4058,
'listn': 4648,
'literally': 7127,
'little': 6411,
'little.': 12622,
'live': 3982,
'live,': 5071,
'live.': 3479,
'live..': 11786,
'live:)': 9123,
'lived': 4275,
'lives': 12636,
'lives.': 8715,
'living': 9379,
'lk': 4119,
'll': 3704,
'lnly': 5322,
'lo': 5461,
'load': 6789,
'loads': 7113,
'loan': 2316,
'loan.': 12393,
'loan?': 1976,
'loans': 728,
'lobby': 4938,
'local': 9520,
'local.': 3273,
'location': 3612,
'locations': 5531,
'lock': 5554,
'locks': 3527,
'lodge': 4573,
'lodging': 10958,
'log': 5858,
'logged': 1047,
'login': 912,
'login=': 6726,
'logo': 780,
'logo&pic': 10477,
'logo/pic': 1194,
'logoff': 5387,
'logos': 3483,
'logos+musicnews!': 1145,
'loko': 12240,
'lol': 4546,
'lol!': 12530,
'lol.': 12217,
'lonely': 375,
'lonely,': 854,
'long': 3748,
'long!': 6426,
'long,': 4297,
'long.': 12018,
'longer': 8089,
'longer.': 4550,
'loo': 7780,
'look': 3195,
'look.': 12090,
'looked': 6731,
'lookin': 2881,
'looking': 1255,
'looks': 12358,
'looovvve': 9619,
'loose': 8905,
'lor': 2537,
'lor,': 13439,
'lor.': 10756,
'lor..': 13078,
'lor...': 10788,
'lor?': 3524,
'lose': 10185,
'lose.': 12528,
'losers': 3889,
'loses': 9755,
'losing': 1384,
'loss': 12065,
'lost': 9955,
'lost,': 13240,
'lost.': 7182,
'lot': 219,
'lot.': 2260,
'lot..': 3590,
'lot...': 3811,
'lot..will': 9946,
'lot.will': 10663,
'lotr': 1199,
'lots': 1277,
'lots.': 8765,
'lotsly!': 5406,
'lotta': 7708,
'lotto': 4552,
'lou': 10173,
'loud.': 10772,
'loud..': 1653,
'lounge': 10186,
'lounge?': 7697,
'lousy': 7448,
'lousy,': 980,
'lov': 12122,
'lovable': 11717,
'love': 3513,
'love!': 10295,
'love!!': 7110,
'love,': 12261,
'love.': 1880,
'love..': 10794,
'love...': 2819,
'love.rakhesh': 7279,
'love:': 5189,
'loved': 4199,
'lovely': 5103,
'lovely,': 4183,
'lover': 1465,
'lover!': 1755,
'loverboy': 6394,
'loverboy!': 4151,
'loverboy.': 106,
'lovers': 13235,
'lovers...': 11005,
'loves': 595,
'lovin...': 4699,
'loving': 11525,
'loving,': 8073,
'lovingly': 3260,
'low': 2904,
'lower': 855,
'loxahatchee': 4604,
'loyalty.': 8919,
'lt': 5333,
'lttrs,': 10847,
'lubly!': 6056,
'luck': 2495,
'luck!': 8282,
'luck.': 409,
'luck.2': 7084,
'lucky': 751,
'lucky.': 3754,
'lucky;)': 13127,
'lucozade.co.uk/wrc': 6067,
'luks': 9949,
'lunch': 13369,
'lunch.': 8806,
'lunch...': 2698,
'lunch:)you': 11965,
'lunch?': 840,
'lunchtime,': 4319,
'lunsford': 1600,
'lush!': 2319,
'luv': 811,
'luv!': 1957,
'luv!..': 4836,
'luv,Night': 8464,
'luv.': 22,
'luvd:*': 6777,
'luvs': 9105,
'luxury': 13368,
'lv': 8092,
'lyf': 13451,
'lyfu': 9688,
'lying.': 7671,
'lyk': 1216,
'lyrics..': 12650,
'm': 12520,
'm)': 11707,
'm...': 2465,
'm100': 564,
'm8?': 2478,
'm8s.': 8204,
'ma': 11906,
'maaaan': 2064,
'mac': 9222,
'mac.': 13082,
'machan.': 13623,
'machi?any': 8847,
'machines': 12538,
'macho': 12519,
'mack': 12435,
'macleran': 8960,
'macs': 12389,
'mad': 7690,
'mad."': 13824,
'mad.then': 11867,
'madam': 3081,
'made': 3575,
'madodu,': 6195,
'madoke': 189,
'madstini': 10087,
'mag': 11511,
'maga': 10286,
'maga..': 9935,
'maga?': 7862,
'magazine,': 8456,
'maggi': 5881,
'magic.': 3998,
'magical': 5999,
'magicalsongs.blogspot.com': 11945,
'mah': 2591,
'mah,': 104,
'mah.': 13471,
'mah..': 11072,
'mah...': 8189,
'mahal': 7497,
'mail': 10260,
'mail.': 1070,
'mail?': 13803,
'mails': 1137,
'mails.': 13625,
'main': 3990,
'major': 3234,
'make': 13806,
'makes': 12973,
'makiing': 11529,
'makin': 12850,
'making': 2639,
'malaria': 1670,
'malarky': 889,
'male': 2108,
'mall?': 4481,
'man': 7715,
'man!': 3939,
'man,': 6769,
'man.': 678,
'man...': 5914,
'manage': 7626,
'management': 5629,
'manda...': 7278,
'mandara': 12362,
'manege': 12323,
'mango': 6749,
'maniac?': 7744,
'manky': 13163,
'manual': 11587,
'many': 10568,
'map': 3423,
'mapquest': 7485,
'maps': 3031,
'maraikara': 13330,
'march': 6561,
'march.': 888,
'mark': 11770,
'market': 13346,
'marketing': 795,
'marking': 2734,
'marrge..remembr?': 9908,
'marriage': 4815,
'marriage.': 7696,
'married': 4679,
'married.': 13646,
'marry': 11508,
'mary': 9371,
'mas': 8505,
'masked': 12080,
'massage....tie-pos': 12429,
'massages.': 1424,
'massive': 7232,
'masteriastering': 10398,
'masters': 10902,
'match': 3700,
'matches': 1644,
'matches.': 10691,
'mate': 1706,
'mate!': 10773,
'mate!!': 1144,
'mate,': 1786,
'mate.': 3660,
'mate...': 9394,
'mates': 11214,
'mates,': 277,
'mates.': 4064,
'math.': 10436,
'mathematics.': 1576,
'matra': 10793,
'matric': 4922,
'matter': 3070,
'matter.': 3465,
'matter..msg': 431,
'matter?': 7029,
'matters': 7093,
'matured': 10193,
'maturity': 5930,
'max6/month': 9584,
'maximize': 13449,
'maximum': 2900,
'maxå£7.': 6156,
'may': 5132,
'may.': 6787,
'mayb': 1997,
'maybe': 364,
'mb...': 8335,
'mc?': 2012,
'mca.': 75,
'mcat': 11111,
'mcat.': 12199,
'mcr': 6883,
'me': 1575,
'me!': 7385,
'me!!': 2608,
'me!;': 3668,
'me,': 6784,
'me.': 1620,
'me..': 10503,
"me..!''": 11950,
'me...': 3534,
'me..."': 2100,
'me....': 11121,
'me.........': 6278,
'me...ummifying...bye.': 12006,
'me..i': 10656,
'me..ok,': 1467,
'me..so': 9042,
'me.I': 1160,
'me.\\"...': 3108,
'me.i': 5553,
'me.need': 5860,
'me.she': 7896,
"me:)i'm": 6728,
'me?': 9518,
"me?'": 11083,
'me?Are': 11986,
"me?Don't": 823,
'me?like': 7239,
'meal': 469,
'meal.': 13442,
'meals': 7529,
'mean': 5649,
'mean,': 3695,
'meaning': 12879,
'meaning:::::': 13233,
'meaningful': 12953,
'meaningless': 10342,
'means': 11912,
'meant': 8485,
'meant.': 1975,
'measure': 11846,
'meatballs': 489,
'med': 665,
'medical': 11254,
'medicine': 1559,
'medicine.': 13400,
'meds': 12609,
'mee..': 2334,
'meet': 13308,
'meet,itz': 5209,
'meet.': 3809,
'meet?': 2889,
'meetin': 4960,
'meeting': 4756,
'meeting,': 2207,
'meeting.': 2255,
'meeting?': 4069,
'meetins': 13192,
'meets': 4026,
'meh': 2791,
'meh.': 4012,
'meh...': 1610,
'meh?': 4087,
'mei,': 10945,
'meive': 12976,
'mel': 1605,
'melody!': 5099,
'melt': 11127,
'member': 7856,
'members': 4422,
'membership': 13046,
'memorable.': 12758,
'memory': 798,
'men': 12403,
'men.': 11404,
'mens': 7119,
'mental': 4364,
'mental:)': 438,
'mention': 5680,
'mentioned.tomorrow': 67,
'mentionned': 4496,
'menu': 10760,
'menu.': 13312,
'meow': 2574,
'meow:-D': 6696,
'merely': 10674,
'merry': 5181,
'mesages': 10127,
'mess': 8759,
'message': 3539,
'message,': 9855,
'message.': 11664,
'message..': 5203,
'message..Its': 2055,
'message..no': 8885,
'message.it': 1512,
'message.pandy': 4557,
'message?': 7607,
'messaged': 2895,
'messages': 8160,
'messages,': 5217,
'messages-Text': 8084,
'messages.': 2882,
'messages..im': 7458,
'messenger': 12081,
'messenger.': 4048,
'messy': 7300,
'messy...': 20,
'met': 4981,
'met.': 976,
'method': 6437,
'mgs': 81,
'mi': 9306,
'mia': 9824,
'mid': 11087,
'middle': 6677,
'midnight': 7880,
'midnight,': 2419,
'mids': 6651,
'might': 718,
'miiiiiiissssssssss': 10908,
'miles.': 2390,
'milk': 5977,
'milk..': 3516,
'millers': 11374,
'million': 6533,
'milta,Zindgi': 5104,
'min': 6089,
'min.': 8492,
'min..': 740,
'min..stand': 6075,
'min:)': 9779,
'minAPN': 2216,
'mind': 3373,
'mind,': 7396,
'mind.': 13258,
'mind...': 9744,
'mind?': 2283,
'minded': 13686,
'mindset.believe': 12198,
'mine': 3866,
'mine&all': 7289,
"mine's": 2723,
'mine.': 10221,
'minecraft': 9988,
'mini': 3365,
'mins': 6136,
'mins!': 7106,
'mins&100txt/mth': 7615,
'mins.': 10065,
'mins?': 10402,
'mint!': 5645,
'minus': 11580,
'minute': 2799,
'minute!': 6911,
'minute.': 1905,
'minutes': 8093,
'minutes,': 7973,
'minutes.': 1425,
'minuts': 264,
'miracle': 10696,
'mirror': 3773,
'misbehaved': 7243,
'mising': 7158,
'miss': 8766,
'miss.': 13516,
'miss.take': 13066,
'misscall': 804,
'missed': 7757,
'missin': 6718,
'missing': 6423,
'missing*': 4500,
'missionary': 6814,
'missions': 774,
'misss': 9586,
'missunderstding': 8328,
'mist,': 8951,
'mistake': 8459,
'mistake.': 3728,
'mistakes': 4190,
'mistakes?': 5279,
'mite': 4855,
'mitsake..': 9567,
'mj': 215,
'mjzgroup.': 12318,
'ml': 13257,
'mnth': 1365,
'mo': 7026,
'mo,': 12369,
'mo.': 5646,
'mo?': 6391,
'moan': 8291,
'moan?': 3855,
'mob': 7791,
'mob.': 7771,
'mob?': 5876,
'mobile': 1271,
'mobile!': 12388,
'mobile.': 3256,
'mobile?': 13424,
'mobiles': 748,
'mobiles.': 11598,
'mobilesvary.': 10502,
'mobs': 10896,
'mobsi.com': 1583,
'moby': 6372,
'moby.': 13836,
'mode': 3899,
'mode.': 3094,
'model': 4239,
'model..sony': 56,
'modl': 4196,
'module': 7033,
'modules': 348,
'mofo?': 6717,
'moji': 4147,
'mokka': 13482,
'molested&someone': 3648,
'mom': 8737,
"mom's": 518,
'moment': 6446,
'moment,': 11007,
'moment.': 9842,
'moments': 8250,
'moms': 6852,
'moms.': 1133,
'mon': 5562,
'mon.': 1064,
'monday': 11016,
'monday!': 9267,
'monday...': 1748,
'monday..nxt': 12962,
'mone,eppolum': 2148,
'moneY...as': 5226,
'money': 1020,
'money!!!': 9258,
'money,': 3963,
'money.': 10233,
'money...': 10544,
'money?': 4459,
'monkey': 6466,
'monkeys': 3763,
'mono': 6600,
'monos': 561,
'monster!': 5142,
'month': 526,
"month's": 335,
'month.': 12390,
'month.not': 11861,
'monthly': 10008,
'months': 1842,
'months?': 5932,
'mood': 5865,
'mood.': 13013,
'moon': 11913,
'moon.': 2816,
'more': 1272,
'more!': 507,
'more,': 598,
'more.': 10540,
'more?': 776,
'more\\"': 7375,
'morn': 9756,
'mornin..': 263,
'morning': 3503,
'morning,': 661,
'morning.': 10437,
'morning..': 5399,
'morning.:': 5253,
'morning.take': 7017,
'morning:)': 8785,
'morning?': 7669,
'morphine': 1872,
'morphine.': 4507,
'moseley': 10493,
'most': 623,
'mostly': 2137,
'mother': 4020,
'motherfucker': 7151,
'motivating': 12342,
'motive': 2846,
'motorola': 12264,
'mouth': 8305,
'mouth,': 8723,
'move': 2908,
'move,': 6527,
'move.': 5609,
'move...': 12335,
'moved': 10000,
'moved,': 6994,
'moves': 339,
'movie': 2621,
'movie,': 278,
'movie.': 11687,
'movie...': 1784,
'movie..wat': 7316,
'movies': 7111,
'movies.': 433,
'moving': 11335,
'mp3': 10206,
'mr': 4561,
'mrng': 13746,
'mrng!': 7768,
'mrng"': 11649,
"mrng''": 186,
'mrng.': 9133,
'mrng:-)"': 13105,
'mrt': 1564,
'mrt?': 7943,
'ms': 517,
'msg': 10531,
'msg.': 10832,
'msg..': 4313,
'msg...': 12987,
'msg/subscription.': 9892,
'msg:': 11049,
'msg?': 10877,
'msg@å£1.50rcvd': 338,
'msging': 10989,
'msging...': 3024,
'msgrcvd18+': 5242,
'msgs': 4341,
'msgs,': 3263,
'msgs.': 911,
'msgs:D;):': 10564,
'msgs@150p': 7772,
'msn.': 5772,
'mt': 6431,
'mt..': 11089,
'mths': 7388,
'mths?': 3798,
'mu': 6693,
'mu,': 6724,
'mu?': 7895,
'much': 3547,
'much,': 10899,
'much.': 8045,
'much...': 7940,
'much.i': 2032,
'much/impede': 4378,
'much:)': 13551,
'much?': 12814,
'much\\"': 737,
'muchand': 13222,
'mudyadhu.': 12159,
'multimedia': 9896,
'multis.': 137,
'mum': 2150,
"mum's": 55,
'mummy': 7568,
'mundhe': 2042,
'murali': 2400,
'murder': 9547,
'murdered': 1079,
'murderer': 7954,
'murderer,': 10458,
'mus': 12464,
'mush!': 435,
'mushy...': 13629,
'music': 7488,
'music,': 7691,
'musical': 13390,
'must': 8111,
'must.': 2765,
'musthu': 7803,
'mutai': 12140,
'mutations.': 771,
'muz': 4634,
'mw': 9167,
'my': 1302,
'my-tone.com/enjoy.': 1022,
'mylife.': 2984,
'mymoby.': 3182,
'myparents': 11715,
'mys': 9232,
'myself': 3597,
'myself.': 1376,
'n': 10846,
'na': 3894,
'na-tuition..': 1192,
'na.': 10075,
'naal': 8782,
'nachos.': 9828,
'nag': 1000,
'nagar': 10086,
'nahi': 12361,
'nails': 2838,
'naked': 4216,
'naked.': 10910,
'nalla': 6473,
'nalli': 2744,
'name': 3722,
'name.': 5392,
'name...': 5510,
'name.my': 10189,
'name?': 12016,
'named': 3092,
'names': 2834,
'nammanna': 13778,
'nan': 5372,
'nange': 4201,
'nanny.': 9324,
'nannys': 8829,
'nap': 2402,
'nap..': 7210,
'narcotics': 11118,
'naseeb': 2795,
'nasty': 13738,
'national': 12011,
'nattil': 5667,
'natural': 3594,
'natural?': 9882,
'nature': 6766,
'natwest.': 4869,
'naughty': 504,
'nauseous.': 9467,
'nav': 13481,
'nb.': 9819,
'nb...': 7476,
'nd': 6796,
'ne': 9655,
'near': 12776,
'nearby': 6800,
'nearer...': 8917,
'nearly': 5069,
'necesity': 398,
'necessarily': 13774,
'necessary': 4198,
'necessity': 9926,
'neck': 5464,
'necklace': 4660,
'ned': 490,
'need': 6986,
'need.': 9470,
'need...': 8279,
'need....|': 7603,
'needa': 5106,
'needed': 395,
'needed!': 6901,
'needed.Salary': 13044,
'needing': 2306,
'needle': 1027,
'needs': 10389,
'needy': 10331,
'neglect': 7627,
'neglet.': 2942,
'neighbor': 10953,
'neighbors': 10214,
'neither': 5003,
'nelson': 6730,
'net': 1694,
'net..no': 12687,
'netflix': 10750,
'network': 9107,
'network.': 13280,
'networks': 11176,
'neva': 4930,
'never': 12998,
'nevering..': 5327,
'nevr': 10204,
'new': 4210,
'newest': 7761,
'news': 350,
'news.': 8223,
'news...Hype': 2894,
'news.By': 10116,
'news:': 6829,
'newscaster': 1388,
'newspapers': 4832,
'next': 10576,
'next.': 13523,
'next:-)..': 7822,
'ni8': 465,
'ni8"': 12410,
'nice': 3756,
'nice!': 10559,
'nice.': 6966,
'nice...': 8620,
'nice:)': 191,
'nicky': 8013,
'nig': 11486,
'nigeria': 7684,
'nigeria.': 11947,
'night': 6346,
'night!': 4906,
'night"': 985,
'night,': 11306,
'night.': 7835,
'night...': 722,
'night.nobody': 6300,
'night?': 11485,
'nighters.': 7925,
'nightnight': 9684,
'nights,': 13648,
'nights.': 10313,
'nights...Excellent': 11165,
'nigpun?': 6401,
'nigro': 12891,
'nike.': 8440,
'ninish,': 2159,
'nino': 5504,
'nipost': 7381,
'nit': 7617,
'nit.': 13718,
'nite': 9685,
'nite!': 10426,
'nitro': 5879,
'nitros.': 12255,
'nitz...': 8198,
'no': 4830,
'no!': 8058,
"no's..": 1704,
'no,': 6607,
'no.': 13268,
'no..': 1359,
'no...': 1560,
'no:)this': 13795,
'no?': 3729,
'no?!listened2the': 11376,
'nobbing': 8334,
'nobody': 12338,
"nobody's": 2523,
'noe': 5676,
'noe...': 2568,
'noe?': 13519,
'noisy.': 466,
'nokia': 8470,
'nokias': 2520,
'noline': 10604,
'non': 2480,
'noncomittal': 12985,
'none': 5306,
'noon': 11614,
'noon..': 7994,
'nor': 13373,
'nora': 7787,
'nordstrom': 1095,
'norm': 5349,
'norm150p/tone': 7160,
'normal': 10681,
'normally': 8954,
'northampton': 12119,
'nos': 13242,
'nose': 3841,
'nosh': 5148,
'nosy': 10234,
'not': 9264,
'not!': 5536,
'not,': 12700,
'not.': 10089,
'not...': 13570,
'not?': 4406,
'note': 9783,
'note:': 753,
'notebook': 3112,
'notes': 6813,
'nothin': 11242,
'nothing': 11217,
'nothing,': 1594,
'nothing.': 12104,
'notice': 6993,
'notice.': 3577,
'notifications,': 12172,
'notified': 9657,
'notixiquating': 3119,
'noun': 2385,
'novelty': 2193,
'now': 13785,
'now!': 5782,
'now!"': 5448,
'now!SKY': 4138,
'now!SavaMob-member': 7444,
'now!Send': 9615,
'now!T&Cs': 4958,
'now!Use': 13755,
'now!nyt.': 12810,
'now!still': 5061,
'now,': 2220,
'now.': 1719,
'now..': 2074,
'now...': 2296,
'now.i': 7601,
'now.onion': 6131,
'now?': 12978,
'now?:)': 11994,
'now?can': 9014,
'nowadays:)lot': 4205,
'nt': 5378,
'nt.swt': 6518,
'nt?': 8545,
'nte.:-': 10009,
'ntwk': 6989,
'nuclear': 2238,
'nudist': 3751,
'nuerologist.': 10704,
'num': 6389,
'num.': 10865,
'number': 5072,
'number!': 10456,
'number,': 629,
'number.': 11898,
'number?': 4977,
'numbers': 10126,
'numbers,': 54,
'numbers.': 8136,
'nursery!': 437,
'nurses': 11199,
'nus': 13155,
'nusstu...': 5600,
'nvm,': 4584,
'nw': 2301,
'nw,': 4582,
'nxt': 13420,
'nxt?': 8728,
'nyc': 5030,
'nyc,': 2943,
'nydc': 2781,
'nyt': 6876,
'nyt"': 6583,
'nyt..': 3398,
'nyt:-*': 11247,
'nz': 9574,
'o': 13657,
'o2.co.uk/games': 9810,
'oath': 859,
'obedient,': 11050,
'obese.': 12918,
'obey': 4514,
'objection.': 4717,
'oblisingately': 8625,
'oblivious': 7445,
'obviously': 8850,
'obviously,': 11667,
'occasion': 1514,
'occupied,': 13158,
'occupy': 8977,
'occur': 3325,
'occurs': 3019,
'oclock': 2469,
'october.': 11862,
'odalebeku:': 5191,
'odi': 7726,
'odi:-)': 3545,
'of': 8264,
'of,': 5301,
'of.': 6791,
'of?': 7354,
'off': 6830,
'off!': 11449,
'off,': 3360,
'off.': 2053,
'off:)': 4100,
'off?': 7889,
'offc': 2083,
'offcampus': 1776,
'offer': 8761,
'offer,': 6752,
'offer:)': 4060,
'offered.': 9217,
'offering': 3106,
'offers': 2486,
'offers.': 3270,
'office': 10723,
'office.': 7022,
'office..': 6200,
'office..understand?': 11412,
'office.thenampet': 3002,
'office:)whats': 5750,
'office?': 2743,
'official': 6991,
'officially': 4212,
'offline': 675,
'ofice': 5797,
'ofice...got': 6525,
'often': 1492,
'often,': 1203,
'ofå£2000': 9001,
'ogunrinde,': 6750,
'oh': 8452,
'oh!': 3749,
'oh.': 9515,
'oh...': 6537,
'oil...': 3486,
'ok': 8423,
'ok!': 2824,
'ok,': 5839,
'ok.': 13024,
'ok.,': 7335,
'ok..': 670,
'ok...': 11458,
'ok....take': 13469,
'ok..come': 2026,
'ok..then..whats': 8652,
'ok.varunnathu': 3033,
'ok:-)': 8043,
'ok?': 10283,
'okay': 11809,
'okay!': 4638,
'okay.': 13151,
'okay...': 4326,
'okay?': 13732,
'okie': 3932,
'okie...': 4104,
'okmail:': 7164,
'ola?': 13034,
'olage': 10882,
'olave': 6921,
'old': 12344,
'old.': 7636,
'ollu.but': 699,
'olowoyey@': 10552,
'olympics.': 7651,
'omw': 11810,
'omw,': 3275,
'on': 11923,
'on!': 4468,
'on)': 3338,
'on,': 5051,
'on-edge': 3162,
'on.': 11048,
'on..': 12392,
'on...': 11355,
'on:)it': 7418,
'on?': 9173,
'once': 4944,
'once.': 1541,
'once?': 9046,
'ondu': 10967,
'one': 2748,
'one!!': 12129,
"one's?": 211,
'one,': 13529,
'one,ta.': 3228,
'one.': 7660,
'one...': 2682,
'one?': 5005,
'ones': 6946,
'ones,': 6621,
'ones..': 8962,
'oni': 6146,
'oni...': 5414,
'online': 1925,
'online,': 12166,
'online.': 6439,
'online?': 5793,
'online?why?': 1547,
'onluy': 12939,
'only': 7440,
'only!': 5767,
'only.': 10360,
'only..': 783,
'only...': 11526,
'only..bettr': 8746,
'only.don': 5603,
'only:)': 6575,
'only:-)..': 6879,
'only?': 4263,
'onto': 3319,
'onwards': 2098,
'open': 5468,
'open.': 1792,
'open?': 9863,
'opened': 12588,
'openin': 9375,
'opening': 11983,
'operate': 8177,
'operator': 3686,
'operator.': 3175,
'opinion': 13182,
'opinions': 586,
'opinions.': 8247,
'opponenter': 432,
'opportunity': 2803,
'opportunity.all': 6523,
'opportunity.pls': 10860,
'opposed': 5991,
'opps,': 4691,
'opt': 9165,
'opted': 13376,
'optimistic': 1853,
'optin': 9662,
'option': 5807,
'optout': 8589,
'or': 4576,
'or2optout/HV9D': 9311,
'or2stoptxt': 12532,
'oral.': 10565,
'orange': 12713,
'orc': 5661,
'orchard': 6228,
'orchard.': 4352,
'order': 9540,
'order,': 5962,
'ordered': 11444,
'ore': 3828,
'oredi': 1744,
'oredi..': 9352,
'oredi...': 12601,
'oredi?': 5038,
'oreo': 5393,
'organise': 4714,
'orig': 4108,
'original': 9351,
'orno': 2091,
'ors': 9390,
'oru': 12421,
'os': 5040,
'oscar.': 3199,
'oso': 7461,
'oso,': 12996,
'oso.': 6701,
'oso...': 79,
'oso?': 5545,
'other': 5674,
'other!': 11059,
'other.': 7802,
'others': 6502,
'others..': 2980,
'otherwise': 10733,
'otside': 2492,
'ouch': 11418,
'our': 4414,
'ours:)so': 9484,
'out': 8680,
'out!': 5829,
'out,': 10662,
'out--have': 3454,
'out--if': 2384,
'out.': 5893,
'out..': 281,
'out...': 12699,
'out?': 13702,
'outage.': 1282,
'outages': 13263,
'outbid': 2923,
'outdoors.': 1001,
'outfit.': 2284,
'outfor': 12173,
'outgoing.': 4602,
'outrageous.': 12287,
'outreach.': 4137,
'outside': 1685,
'outsider': 1183,
'outstanding': 6338,
'outta': 1955,
'ovarian': 5278,
'over': 5070,
'over!': 12828,
'over,': 5966,
'over.': 10305,
'over..': 310,
'over?': 8942,
'overa': 2023,
'overemphasise.or': 10362,
'overheating': 2711,
'overtime': 1930,
'ovr': 7799,
'ovulation': 6440,
'ow': 2852,
'owe': 11765,
'owl': 3815,
'own': 5608,
"own--you've": 6782,
'own.': 4783,
'owned': 5835,
'owns': 5188,
'owo': 7746,
'oz': 7380,
'p': 387,
'pa': 5995,
"pa'": 10933,
'pa,': 5641,
'pa.': 955,
'pa..': 9176,
'pa...': 10427,
'pa?': 11292,
'paces,': 5689,
'pack': 9174,
'pack.also': 8300,
'package': 4742,
'packing': 12099,
'packs': 9429,
'padhe.g.m.\\""': 9389,
'page': 8512,
'page.': 13067,
'page...': 3898,
'pages': 8880,
'pages..': 6480,
'pages?': 12026,
'pai': 1230,
'paid': 8096,
'paid.': 6036,
'pain': 8676,
'pain.': 9536,
'pain.it': 2476,
'pain?hope': 6853,
'painful': 12089,
'paining': 8809,
'painting': 6015,
'painting?': 12383,
'pale': 2857,
'palm,': 12116,
'pan': 4973,
'panalam...but': 4009,
'panic': 6588,
'panicks': 1305,
'panren': 6030,
'panties?': 11673,
'pants.': 9286,
'pap': 13487,
'papa': 6008,
'paper': 7764,
'paper,': 9330,
'paper.': 12103,
'papers': 8062,
'paperwork.': 6772,
'paracetamol': 6483,
'parachute': 8244,
'parade': 8265,
'paragon,': 3464,
'paragraphs?': 8185,
'paranoid': 563,
'parantella': 5738,
'parchi': 5013,
'parco': 13204,
'parents': 11166,
"parents'": 2075,
'parents.:)i': 174,
'parish': 13578,
'park': 7329,
'park.': 301,
'park...': 12781,
'park.6ph': 6225,
'parked': 2154,
'parkin': 12900,
'part': 2554,
'part!': 7443,
'part.': 2263,
'part..': 4324,
'participate': 12497,
'particular': 7605,
'particularly': 8365,
'partner': 785,
'partner?': 12411,
'partnership': 6542,
'parts?': 5823,
'party': 3535,
'party:-)': 6667,
'paru..': 7145,
'pases': 3355,
'pass': 8161,
'pass.They': 371,
'passable.': 1939,
'passed': 2051,
'passes': 8531,
'passion': 11991,
'passionate': 11359,
'passport': 10792,
'password': 1778,
'past': 5998,
'pataistha': 6625,
'patent.': 12811,
'path': 9986,
'pattern': 12373,
'patty': 10871,
"patty's": 11878,
'pay': 2991,
'pay.': 5917,
'pay?': 11791,
'payasam': 13563,
'payback.': 13231,
'payed.': 8575,
'paying': 7061,
'paying,': 11416,
'payment': 484,
'payment.': 4700,
'payments': 5936,
'payoh': 4848,
'paypal.': 9353,
'pc': 1042,
'pdate_Now': 9850,
'peace..': 10589,
'peaceful': 3703,
'peak!': 6307,
'pears': 160,
'peeps': 5682,
'pehle': 13644,
'pei': 5494,
'pen': 9195,
'pence': 5109,
'pending.': 3242,
'pending.i': 9714,
'penis': 5677,
'penis.': 8090,
'people': 11577,
'people!\\"': 6612,
'people,': 7643,
'people.': 7547,
'peoples': 12419,
'per': 12927,
'per..': 7498,
'percentages.': 10897,
'perf?': 1504,
'perfect': 11792,
'perfect!': 12565,
'perform,': 8105,
'performance': 8817,
'performed?': 8869,
'perhaps.': 12248,
'period': 2186,
'period..': 5721,
'permanent': 910,
'permission.': 9887,
'permissions': 10798,
'perpetual': 11023,
'persevered': 5784,
'person': 4419,
'person!': 7196,
'person.': 3163,
'person.Meet': 7103,
'person2die': 10761,
'personal': 680,
'personally': 5855,
'persons': 9099,
'persons..\\"': 11343,
'perspective': 12899,
'pesky': 1862,
'petrol': 13825,
'pg': 13039,
'pg..': 1818,
'ph:08700435505150p': 97,
'ph?': 3792,
'phasing': 11522,
'phd,': 2890,
'philosophical': 8939,
'philosophy': 11218,
'philosophy.': 2167,
'phne,': 77,
'phone': 8969,
'phone!': 11541,
'phone!!': 10007,
"phone's": 9071,
'phone,': 8074,
'phone.': 2546,
'phone...': 3172,
'phone750': 9219,
'phone:)': 11521,
'phone?': 696,
'phoned': 5596,
'phones': 7063,
'phones.': 10763,
'photo': 1057,
'photos': 3452,
'photos.': 8966,
'physics': 4395,
'piah': 2627,
'pic': 964,
'pic.': 12273,
'pic?': 515,
'pic?Txt': 13734,
'pick': 5540,
'pick.': 8106,
'picked': 2440,
'picking': 10585,
'pickle': 8390,
'pics': 824,
'pics,': 5233,
'pics.': 3727,
'picture': 472,
'pictures': 13722,
'pictures,': 8366,
'pie!': 5714,
'pie.': 10543,
'piece': 2787,
'pieces': 1250,
'pierre': 1061,
'pig': 8304,
'pilates': 2963,
'pilates...': 477,
'pile': 9409,
'pillows': 10211,
'pimples': 9118,
'pimples..even': 6944,
'pin': 4881,
'pin?': 11324,
'pink': 2893,
'pints': 6285,
'piss': 13246,
'pissed': 3482,
'pissed.': 3691,
'pix': 1622,
'pix?': 13014,
'pixels,': 538,
'pizza': 9057,
'pizza.': 10267,
'pizza...': 11802,
'place': 6720,
'place,': 6927,
'place.': 453,
'place..': 7450,
'place.No': 12399,
'place?': 5390,
'placed': 1902,
'placement': 11889,
'places': 5379,
'places.': 10672,
'plaid': 9438,
'plan': 9137,
'plan!': 8648,
'plan.': 688,
'plan?': 2022,
'plane': 5328,
'plane..': 13237,
'planet': 1932,
'planet.I': 11211,
'planned': 3084,
'planned.': 10909,
'planned?': 10581,
'planning': 6181,
'plans': 10429,
'plans.': 11405,
'plans?': 271,
'plate.': 6126,
'play': 12174,
'play!!': 6408,
'play,': 7336,
'play.': 10977,
'play?': 3675,
'played': 8564,
'player': 4370,
'player.': 578,
'player.why': 9690,
'players': 17,
'playing': 3973,
'playng': 5528,
'plaza': 6236,
'pleasant': 731,
'please': 3593,
'please!': 1414,
'please.': 12594,
'please....': 7470,
'please?!': 11762,
'pleased': 1745,
'pleasure': 8730,
'pleasure!': 13659,
'pleasure...': 6022,
'plenty': 8770,
'plenty.': 2013,
'plm': 8083,
'ploughing': 11757,
'pls': 3694,
'pls.': 7910,
'pls:-)': 9638,
'plum': 12433,
'plumbers': 1423,
'plumbing,remixed': 12696,
'plural': 6387,
'plus': 4302,
'plz': 4194,
'plz.': 2128,
'pm': 4284,
'pm.': 2827,
'pmt': 5821,
'po': 12685,
'pocked': 5227,
'pockets:)': 2636,
'pocy': 4739,
'poem:': 1204,
'poet': 7360,
'point': 4619,
'point!': 7379,
'point,': 3194,
'point?': 3643,
'points': 646,
'points.': 6646,
'poker': 11806,
'poker,': 11119,
'poking': 11228,
'pole': 3784,
'pole.': 12329,
'police': 9533,
'politicians.': 9334,
'poly': 7168,
'poly.': 4399,
'polyH': 635,
'polyPH': 13780,
'polypH': 1652,
'polyphonic': 5153,
'polys': 6603,
'polys:': 4309,
'pongal.': 3626,
'pongal?': 206,
'pongal?do': 290,
'ponnungale': 11402,
'poo': 745,
'pookie': 3771,
'pool': 3008,
'poop.': 8837,
'poor': 2059,
'poortiyagi': 664,
'pop.': 12,
'pop?': 1899,
'popcorn': 6863,
'popped': 4107,
'popping': 6649,
'porn!': 9090,
'porridge,': 10769,
'portal.': 8252,
'portege': 12734,
'posh': 3616,
'posible': 7924,
'position.': 12926,
'position?': 8290,
'positions': 250,
'possession': 9771,
'possessiveness': 4808,
'possibility': 13079,
'possible': 2106,
'possible,Hope': 12933,
'possibly': 9618,
'post': 4530,
'post,erode': 13812,
'postal': 13845,
'postcode': 3632,
'posted': 3787,
'posting': 5102,
'postponed,': 5502,
'posts': 5457,
'posts..': 8810,
'potato': 6524,
'potential': 5241,
'pouch': 10385,
'pouch?': 5496,
'pound': 5086,
'pounded': 10100,
'pounds': 6922,
'poured': 3264,
'pours': 4788,
'power': 10235,
'ppl': 11691,
'pple': 9241,
'pple...': 11445,
'pple...$700': 12875,
'ppm': 194,
'ppm150': 12514,
'ppt150x3+normal': 12242,
'pract': 3143,
'practical': 9073,
'practical.': 8014,
'practice': 2381,
'practicing': 11421,
'practicum': 9147,
'practising': 12151,
'praises': 13810,
'praps': 11346,
'prasad.': 11433,
'pray': 9494,
'pray!': 6767,
'prayers': 9298,
'praying': 4403,
'praying.will': 13195,
'pre': 6880,
'pre-book': 1221,
'predict': 7471,
'predicting': 10155,
'prediction.': 3376,
'predictive': 4157,
'prefer': 10169,
'prefer...': 11430,
'prem': 4577,
'premium': 5757,
'prepaid': 2635,
'prepare.': 7270,
'prepared': 7914,
'prepared.': 149,
'prepayment.': 9252,
'prescribed': 10057,
'prescripiton': 13004,
'prescription.': 9425,
'presence': 9590,
'present': 5121,
'present.': 1741,
'presents.': 11562,
'press': 8218,
'pressies': 3960,
'pressure': 6376,
'prestige': 8639,
'pretend': 847,
'pretsorginta,': 6566,
'pretsovru': 6716,
'pretty': 13343,
'pretty.': 5396,
'prevent': 5728,
'previews.': 4612,
'previous': 8646,
'previously': 2880,
'prey': 6890,
'price': 9616,
'price!': 12179,
'price,so': 3207,
'price.': 9612,
'price...': 1563,
'prices': 4936,
'priest': 7778,
'prin': 12761,
'prince': 10727,
'princess': 1275,
'princess!': 8206,
'princess.': 12871,
'princess?': 7258,
'printed': 9439,
'printing': 9556,
'prior': 13059,
'priority': 3424,
"priscilla's": 6461,
'privacy': 5619,
'private': 8867,
'prix?': 3993,
'priya': 7165,
'prize': 6916,
'prize!': 587,
'prize.': 8461,
'prize.To': 12317,
'prizes': 10288,
'prizesWith..': 1775,
'prob': 8172,
'prob.': 5469,
'prob..': 3184,
'prob...': 2222,
'probably': 6763,
'probably,': 7644,
'problem': 13562,
'problem!': 12923,
'problem,': 12766,
'problem-free': 7319,
'problem.': 2922,
'problem..': 12193,
'problem..but': 7000,
'problem.i': 8566,
'problem:-)': 9578,
'problem?': 11092,
'problematic': 7125,
'problems': 2286,
'problems.': 11733,
'problum': 9342,
'probs': 9979,
'process': 13086,
'process.': 10885,
'process.Excellent': 1154,
'process.networking': 10408,
'process:)its': 341,
'process?': 1356,
'processed': 4897,
'processed...': 12487,
'prods': 3604,
'products': 4140,
'professional': 12131,
'professors': 6158,
'profile': 4095,
'profiles?': 756,
'profit': 5588,
'profit.': 10995,
'programs': 7402,
'progress.': 7235,
'project': 8695,
'project.': 346,
'projects.': 107,
'prolly': 13748,
'prominent': 9322,
'promise': 7472,
'promise.': 11248,
'promised': 1033,
'promises': 9006,
'promoting': 1623,
'promptly': 6438,
'prompts.': 8182,
'prone': 11268,
'proof': 3584,
'proove': 7853,
'proper': 5117,
'properly': 12969,
'property': 12259,
'propose': 7546,
'propsd': 13779,
'pros': 13146,
'prospects': 4096,
'prospects.': 11997,
'protect': 7543,
'provided': 6214,
'provider': 8081,
'province': 10854,
'proze': 10757,
'prsn': 12463,
'ps3': 2607,
'pthis': 10459,
'pub': 3429,
'pub!': 8709,
'pub?': 12840,
'pub??': 12568,
'public': 9796,
'publish': 8156,
'pudunga': 1929,
'pull': 449,
'pulling': 121,
'pulls': 4465,
'pump': 3799,
'punch': 3951,
'punish': 7639,
'pura': 10583,
'purchase': 7997,
'purchases': 8754,
'pure': 6792,
'purpose': 7598,
'purse': 2702,
'push': 7080,
'pushes': 1498,
'pussy': 10249,
'put': 12416,
'puts': 3878,
'puttin': 4493,
'putting': 8640,
'puzzeles.': 116,
'puzzles': 9969,
'på£3.99': 46,
'qatar': 6397,
'qet...': 11774,
'qi...': 11851,
'qing': 11197,
'quality': 7479,
'quality.': 778,
'queen.': 8108,
'queen?': 7870,
'question': 3713,
'question(std': 11028,
'question.': 5606,
'question..': 1443,
'question:': 12732,
'questioned': 3708,
'questions': 10010,
'questions.': 4128,
'quick': 3007,
'quickly': 6368,
'quiet': 2312,
'quiet.': 8095,
'quit': 7109,
'quit.': 10124,
'quite': 2172,
'quitting': 12337,
'quiz': 12992,
'quiz,': 9067,
'quizzes': 3827,
'quote': 6547,
'quoting': 9412,
'r': 7657,
'racing': 754,
'radiator!': 5964,
'radio': 4193,
'raed': 4689,
'rael': 5152,
'raglan': 6084,
'rahul': 7130,
'raiden': 3341,
'railway': 12023,
'railway.': 48,
'rain': 3453,
'rain,': 11948,
'rain.': 13029,
'raining': 2264,
'raining?': 6910,
'raise': 4527,
'raised': 1536,
'rajas': 3606,
'rajini': 8391,
'rakhesh': 5818,
'raksha': 3881,
'ralphs': 200,
'ran': 11348,
'random': 9338,
'random.': 7049,
'randomlly': 13689,
'randomly': 13574,
'randy.': 9818,
'rang': 4797,
'range': 3104,
'range.': 13041,
'raping': 12005,
'rate': 276,
'rate)': 4838,
"rate)T&C's": 2531,
'rate.': 3353,
'rate..': 4174,
'rates': 4495,
'rather': 333,
'ratio': 11173,
'rawring': 9651,
'rayan': 168,
'rays': 3830,
'rays.': 8579,
'rcb.battle': 2529,
'rcd': 11771,
'rcv': 4425,
'rcvd': 7622,
'rcvd.': 3221,
'rd': 7992,
'rd.': 9315,
'rdy': 1796,
're': 633,
're-schedule.': 1961,
're-send.': 10445,
're-sub': 8968,
're.': 8301,
'reach': 5929,
'reach?': 7311,
'reache': 13638,
'reached': 13696,
'reached.': 12646,
'reaching': 13311,
'reaching?': 7121,
'reacting': 11935,
'reaction': 11031,
'read': 10192,
'read...': 13712,
'readers!': 13797,
'readiness.': 5819,
'reading': 12494,
'reading.': 749,
'ready': 934,
'ready!': 3202,
'ready,': 875,
'ready.': 7,
'ready...': 1049,
'ready.all': 291,
'real': 11597,
'real,': 411,
'real.': 7970,
'realised': 3525,
'reality': 5779,
'realize': 6619,
'realized': 7178,
'realizes': 13651,
'really': 3476,
'really,': 12697,
'really?': 13360,
'really??': 11827,
'realy': 9864,
'reason': 161,
'reason..': 12773,
'reasonable': 231,
'reasons': 10310,
'reboot': 5660,
'rebooting': 1225,
'rebtel': 3522,
'rec': 10707,
'recd.': 2876,
'recd@thirtyeight': 10573,
'receipt': 7453,
'receipts\x89ÛÓwell': 3633,
'receive': 10184,
'receive..': 10355,
'receivea': 11837,
'received': 13782,
'receiving': 12521,
'recent': 6499,
'recently': 10551,
'recently.': 4228,
'reception': 1311,
'recession': 12620,
'recharge.': 1156,
'recharge..Rakhesh': 1562,
'recharged': 1377,
'recieve': 3012,
'reckon': 11387,
'recognise': 11110,
'recognises': 2981,
'record': 3615,
'record:)': 9460,
'recorded': 9323,
'records': 13081,
'recovery': 6705,
'recovery,': 6448,
'recreation': 10400,
'recycling:': 1607,
'red': 11149,
'red,red': 10990,
'redeemable': 8882,
'ref': 11817,
'reference': 4889,
'references..': 226,
'reffering': 403,
'refilled': 1886,
'reflex': 9304,
'reformat.': 7136,
'refreshed': 565,
'refund': 5946,
'refunded.This': 1843,
'refused?': 2281,
'reg': 9400,
'regard': 12423,
'regarding': 11984,
'regards': 2048,
'register': 2561,
'register.': 13530,
'registered': 2670,
'registration': 4274,
'regret': 10749,
'regretted': 12180,
'regular': 9847,
'rejected': 1697,
'related': 10886,
'relation': 7326,
'relation!': 2592,
'relation..': 4306,
'relationship.....its': 8550,
'relatives': 3025,
'relax.': 307,
'relaxing': 13711,
'released': 7888,
'reliant': 2360,
'relieved': 2371,
'religiously.': 4822,
'relocate.': 8979,
'reltnship..!!': 1024,
'rem': 6672,
'remain': 1573,
'remains': 3021,
'remb...': 2725,
'remember': 1075,
'remember.': 3222,
'remembered': 9102,
'remind': 12560,
'reminded': 8193,
'reminder': 3753,
'reminding': 12925,
'reminds': 4039,
'removal': 8132,
'remove': 7655,
'remove.': 5720,
'removed': 4491,
'removed.': 10501,
'renewal': 9265,
'renewed': 5706,
'renewing': 3253,
'rent': 3276,
'rent,': 10523,
'rent.': 11842,
'rental': 12461,
'rental:': 3904,
'rental?': 13347,
'renting': 4432,
'rentl.': 7260,
'repair': 8727,
'repairs': 6887,
'repeat,': 2192,
'repent': 841,
'replacement': 10113,
'replacement.': 7294,
'replacing.': 4045,
'replied': 12306,
'replied:': 13434,
'replies': 8701,
'reply': 8669,
'reply-': 298,
'reply.': 5521,
'reply..': 7633,
'reply...': 5463,
'reply.Be': 6034,
'replying': 1140,
'replying.': 5891,
'report': 6186,
'report.': 11077,
'report?': 6708,
'reppurcussions.': 7034,
'representative': 10481,
'republic': 8026,
'request': 12847,
'requests': 5212,
'requests.': 858,
'required:)': 11461,
'requirements': 6375,
'requires': 1558,
'research': 7020,
'research?': 2821,
'reservations.': 9054,
'reserve': 6388,
'reserved': 10361,
'reserves': 12164,
'reset': 4575,
'residency': 2745,
'resizing': 7224,
'reslove.': 1246,
'resolution.': 12750,
'respect.': 510,
'responce..what': 10998,
'respond': 2736,
'responding': 10680,
'response': 8527,
'responsibility.': 11291,
'responsible': 2088,
'rest': 4372,
'rest!U??Wud': 8326,
'rest,Wish': 3946,
'rest:-).': 279,
'restaurant.': 8963,
'restaurant..': 11824,
'restock': 5363,
'restocked': 13410,
'restrict': 8317,
'restrictions,': 10399,
'resubbing': 2527,
'resubmit': 9031,
'result': 11669,
'result.': 12509,
'results': 8286,
'resume': 13175,
'retard': 11510,
'retired': 6165,
'retrieve': 5027,
'return': 8944,
'returned': 2326,
'returning': 10486,
'returns': 13063,
'reunion?': 7094,
'revealed.': 3407,
'review': 9126,
'revision': 6865,
'reward!': 7934,
'rewarding': 8313,
'rhythm': 3029,
'rhythm.': 359,
'rice,': 1321,
'rice.': 4473,
'rich': 1552,
'riddance': 589,
'ridden': 11290,
'ride': 8751,
'right': 13405,
'right!': 2641,
'right,': 3487,
'right.': 11611,
'right..': 1561,
'right?': 3570,
'rightly': 11768,
'rights': 1201,
"riley's": 2969,
'rimac': 1244,
'ring': 11877,
'ring.': 3877,
'ring...': 10109,
'ringing': 1859,
'ringtone': 4750,
'ringtone!': 5334,
'ringtone-get': 7185,
'ringtoneking': 11478,
'ringtones': 2634,
'ringtones,': 3379,
'ringtones.': 12319,
'rinu': 9521,
'rip': 8682,
'risk': 5404,
'risks,': 8535,
'rite': 4192,
'rite...': 10336,
'rite?': 11124,
'river': 11251,
'road': 6826,
'road.': 13833,
'road...': 11142,
'roads': 5615,
'roads!RVx': 8174,
'roads.': 5232,
'roast': 1239,
'roast.': 1153,
'rob': 3313,
'robs': 7837,
'rock': 13093,
'rock,': 2997,
'rock.': 8447,
'rocks': 7377,
'rocks...': 9200,
'rofl': 11960,
'roger.': 13494,
'role': 4238,
'roles': 12266,
'rolled': 7364,
'romantic': 9800,
'romantic!': 5664,
'ron': 2623,
"ron's": 8140,
'room': 1313,
'room.': 9858,
'room:)': 10333,
'room?': 4092,
'roomate': 6850,
'roommate': 8736,
"roommate's": 2036,
'roommate,': 9426,
'roommates': 6066,
'rooms': 5672,
'ros': 12137,
'rose': 3677,
'rough': 1097,
'round': 10140,
'round,': 1759,
'round.': 843,
'rounder:)so': 3658,
'rounds': 1611,
'route': 9772,
'row': 4101,
'royal': 992,
'rpl': 376,
'rply': 10862,
'rr..': 3296,
'rs': 10350,
'rs.': 9398,
'rs..': 12307,
'rs..i': 4226,
'ru': 9956,
'ru?': 10992,
'rub': 3205,
'rubber': 3501,
'rude': 13433,
'rude,': 4522,
'rugby': 1034,
'ruin': 864,
'ruining': 9865,
'rule': 6979,
'rule.': 5762,
'rules': 9151,
'rummer.': 13179,
'rumour': 1300,
'run': 13000,
'run.': 9694,
'running': 9283,
'running.lets': 7701,
'runs': 8574,
'rupaul': 3803,
'rush': 12587,
'rush,': 13149,
'ryan': 9184,
's': 9443,
's,': 6616,
's.': 7133,
's..first': 10591,
'sOOn': 10715,
'sachin': 11487,
'sachin.just': 1357,
'sack': 4722,
'sacked': 4424,
'sacrifice': 240,
'sacrifice.': 5254,
'sad': 2797,
'sad,': 5356,
'sad.': 10017,
'sad..': 12158,
'sad...': 13555,
'safe': 7315,
'safe.': 8547,
'safely': 5778,
'safety': 9505,
'said': 2406,
'said,': 7138,
"said,''": 12889,
'said:\\if': 9180,
'sake?!': 7341,
'salary': 5958,
'salary.': 3750,
'sale': 11286,
'sale.': 10441,
'sales': 3499,
'sales/pee': 8783,
'salon': 3399,
'samachara': 9205,
'samantha': 8341,
'sambar.life': 157,
'same': 12931,
'same,so': 11700,
'same.': 5870,
'same..': 12415,
'samus': 1803,
'sandiago': 2873,
'sang': 224,
'sao': 4997,
'sapna': 7054,
'sar': 9497,
'sarcasm..': 962,
'sarcastic': 4150,
'sariyag': 10661,
'sat': 7527,
'sat.': 1878,
'sat...': 8344,
'sat?': 10841,
'sat?Ì_': 2085,
'satanic': 1493,
'sathy': 1577,
'sathya': 12995,
'sathya.': 6304,
'satisfied': 11713,
'satisfy': 3530,
'satsgettin': 12598,
'saturday': 917,
'saturday.': 3403,
'saucy': 13349,
'save': 4223,
'save.': 427,
'saved': 3679,
'saved!': 5416,
'saves': 10621,
'saw': 6334,
'say': 8552,
'say,': 9384,
'say.': 2346,
'say...': 2054,
'say/ask': 4409,
'sayin': 10560,
'saying': 9762,
'says': 2423,
'sayy': 9905,
'sc.': 941,
'scallies,': 4112,
'scammers': 11815,
'scarcasim': 9758,
'scared': 4433,
'scared!': 4371,
'scary': 118,
'scary,': 9530,
'scenario': 12147,
'scenery': 11718,
'sch': 12675,
'sch.': 6832,
'sch...': 1945,
'schedule': 5444,
'school': 13221,
'school.': 4055,
'school?': 6243,
'schools.': 5122,
'science...': 7043,
'scold': 11367,
'scorable': 7987,
'score': 7295,
'scores': 9813,
'scotland': 4652,
'scotland.': 6218,
'scouse': 12047,
'scrappy.': 4882,
'scratches': 11474,
'scratching': 11112,
'scream': 10817,
'scream.': 9528,
'screamed': 11147,
'screamed,': 2681,
'screaming': 4158,
'screaming..': 3520,
'screen': 3948,
'screwd': 5149,
'scrounge': 10250,
'scrumptious...': 10060,
'sculpture': 13678,
'sd': 9829,
'sdryb8i': 12966,
'se': 9138,
'sea': 314,
'sea*': 1372,
'sea,': 1125,
'search': 12803,
'search:)': 9895,
'searching': 12921,
'season': 12537,
'season.': 7241,
'seat': 3602,
'sec': 7199,
'sec,': 1881,
'sec?': 11353,
'second': 11365,
'second,': 3028,
'second.': 5221,
'secondary': 4367,
'seconds': 13585,
'seconds.': 9989,
'secret': 9843,
'secret.': 7356,
'secretary': 10066,
'secretary,': 4141,
'secretly': 8251,
'secs?': 8707,
'section': 6226,
'sed': 8755,
'see': 583,
'see!': 4301,
"see's": 12670,
'see,': 5305,
'see.': 13691,
'see..': 10419,
'seeing': 1711,
'seekers.': 12584,
'seeking': 3187,
'seem': 10020,
'seemed': 12212,
'seemed.': 1355,
'seems': 736,
'seen': 11209,
'sef': 3422,
'seh': 13028,
'sehwag': 11236,
'seing': 8192,
'selected': 2118,
'selected?': 9558,
'selection': 5843,
'self': 13316,
'self.': 3103,
'selfish': 5238,
'selflessness.': 585,
'sell': 11267,
'selling': 11033,
'sells': 2378,
'sem': 7343,
'semester': 3384,
'semester.': 11095,
'semester?': 8236,
'semi': 1743,
'semiobscure': 7073,
'sen': 11053,
'send': 12019,
'send,': 13863,
'send.': 7916,
'sending': 11907,
'sending.': 4369,
'senor': 7269,
'senrd-dnot': 1805,
'sense': 9939,
'sense,': 12648,
'sense.': 4059,
'sense?': 13594,
'senses.respect': 8793,
'sensible,': 9637,
'sensitive': 652,
'sent': 2542,
'sent.': 8808,
'sentence': 2993,
'senthil.hsbc': 13317,
'sept.': 9650,
'september': 12602,
'series': 13154,
'series.': 352,
'serious': 13820,
'serious.': 7465,
'serious?': 6806,
'seriously': 739,
'seriously..': 8499,
'served': 8448,
'server': 8726,
'service': 3509,
'service.': 13851,
'services': 686,
'services.': 4261,
'serving.': 8908,
'set': 10056,
'setting': 12482,
'settings': 6939,
'settings.': 7212,
'settle': 1637,
'settled': 12614,
'settled?': 366,
'settling': 12802,
'seven': 3051,
'seven.': 6237,
'several': 9613,
'sex': 4337,
'sex.': 3862,
'sexiest': 6443,
'sextextuk.com': 9414,
'sexual': 3178,
'sexy': 10405,
'sexy!': 4536,
'sexy!!': 13239,
'sexy,': 3219,
'sexy...': 7920,
'sexy?': 4613,
'sh': 11338,
'sha': 5339,
'sha!': 2938,
'sha.': 9566,
"shade's": 7100,
'shadow.': 730,
'shag?': 4688,
'shagged': 13488,
'shakara': 7658,
'shake': 733,
'shaking': 7717,
'shall': 11801,
'shame': 3127,
'shame!': 8030,
'shangela.': 7120,
'shanghai': 6841,
'shaping': 5533,
'share': 12034,
'share...': 9653,
'shared': 6462,
'sharing': 7978,
'shattered.': 7704,
'shaved': 10281,
'shd': 4955,
'she': 6872,
"she'll": 3318,
"she's": 9774,
'she.s': 7800,
'sheet': 9553,
'sheets': 384,
'sheets.': 7071,
'sheffield': 12731,
'shelf:)': 2808,
'shell': 1967,
'shelves': 8662,
'shes': 4506,
'shining': 3254,
'ship': 4525,
'shipped': 13511,
'shipped.': 822,
'shipping': 11040,
'shirt': 6595,
'shirt.': 13621,
'shirts': 12689,
'shit': 7369,
'shit"': 9545,
"shit's": 2903,
'shit,': 10939,
'shit.': 6305,
'shit....!!\\"': 10230,
'shite.': 13546,
'shitload': 13344,
'shitstorm': 13050,
'shld': 10417,
'shldxxxx': 8314,
'shock': 12644,
'shocking': 10135,
'shoes.': 7620,
'shoot': 13003,
'shop': 6892,
'shop.': 11283,
'shop..': 13273,
'shopping': 5847,
'shopping!': 3096,
'shopping.': 4905,
'shopping?': 10376,
'shore': 12125,
'shore..The': 2107,
'short': 10928,
'short.': 12914,
'shortage': 9947,
'shortcode': 8382,
'shorter': 9150,
'shortly': 340,
'shortly.': 2776,
'shorts...': 10325,
'shot': 12194,
'should': 7559,
'should.': 10175,
'shoulders': 5896,
"shouldn't": 5795,
'shouldn\x89Û÷t': 12225,
'shouted': 2223,
'shouting..': 6811,
'shove': 1907,
'shoving': 6805,
'show': 12617,
'showed': 591,
'shower': 5160,
'shower!': 12288,
'shower,': 225,
'shower.': 715,
'showered': 11439,
'showers': 11753,
'showing': 11779,
'showrooms:)city': 8278,
'shows': 3475,
'shows...': 10571,
'shrek': 11604,
'shrink': 218,
'shuhui': 10222,
'shuhui.': 13871,
'shuhui?': 4241,
'shun': 8042,
'shut': 12453,
'shy': 4908,
'shy!': 12017,
'si.': 11100,
'sian': 9487,
'sick': 10813,
'sick.': 7899,
'sick?': 9526,
'sickness': 1914,
'side': 2001,
'side,': 12279,
'side.': 616,
'sigh*': 6339,
'sight': 3528,
'sight,': 2832,
'sight..': 4780,
'sign': 8507,
'sign,': 2129,
'signal.': 12086,
'significance?': 3850,
'significant': 6256,
'signin': 1197,
'signing': 8399,
'signing,': 6671,
'siguviri': 5271,
'silence': 13848,
'silence...\\"': 7574,
'silent': 5105,
'silent.': 7864,
'silently': 330,
'silly': 8302,
'silver': 6549,
'sim': 914,
'simonwatson5120': 10889,
'simple': 8123,
'simple..': 2643,
'simpler,': 11224,
'simply': 2915,
'simulate': 9537,
'since': 8232,
'since.': 6594,
'sing': 8298,
'singing': 10195,
'single': 11094,
'single?': 8240,
'singles': 5364,
'sink': 6553,
'sip': 7312,
'sir': 2292,
'sir,': 2279,
'sir.': 4376,
'sis': 1081,
'sis.': 13682,
'sister': 5978,
'sister.': 9316,
'sister..': 6097,
'sit': 8672,
'site': 6313,
'sitll': 710,
'sitter': 2062,
'sittin': 9918,
'sitting': 6330,
'sitting.': 12485,
'situation': 8524,
'situation:': 3082,
'six': 1243,
'size': 909,
'sized': 3343,
'skateboarding': 6041,
'skills': 1663,
'skinny': 6895,
'skins': 6699,
'skint': 9904,
'skip': 10321,
'skirt...': 10373,
'sky': 11284,
'skye': 11919,
'skype': 9329,
'skype,': 10064,
'skype.': 6459,
'skyped': 13191,
'slap': 9682,
'slave': 4091,
'slave,': 7414,
'slave.': 5169,
'slave?': 1054,
'sleep': 8756,
'sleep!': 7948,
'sleep,': 10689,
'sleep.': 3044,
'sleep..': 1790,
'sleep...': 9718,
'sleeping': 7891,
'sleeping,': 11980,
'sleeping.': 8955,
'sleeping..and': 223,
'sleeping?': 12346,
'sleeps': 3085,
'sleepy': 2840,
'sleepy.': 380,
'slept': 9284,
'slice': 11297,
'slice.': 1330,
'slices': 13539,
'sliding': 1535,
'slightly': 148,
'slip': 6037,
'slippers': 63,
'slippery': 1837,
'slo': 12943,
'slob-': 2580,
'slots': 3073,
'slovely.': 8350,
'slow': 12831,
'slow.': 5294,
'slower': 2158,
'slowing': 6739,
'slowly': 7758,
'slowly.?': 12797,
'slurp!': 10401,
'small': 1419,
'small,': 3261,
'smart': 12860,
'smart..Though': 8005,
'smarter': 4004,
'smash': 5182,
'smashed': 1089,
'smear': 3720,
'smell': 7011,
'smells': 827,
'smeone': 370,
'smidgin.': 12220,
'smile': 13627,
'smile*': 9295,
'smile,': 5503,
'smile.': 5988,
'smile..:)': 392,
'smile.:-)': 12637,
'smiled': 5888,
'smiling': 13598,
'smiling.': 7801,
'smiling:-)': 1323,
'smiling?': 10263,
'smoke': 12769,
'smoke,': 328,
'smoked': 10183,
'smokes': 9407,
'smokes,': 9171,
'smokin': 7703,
'smoking': 666,
'smoothly.': 1742,
'sms': 13228,
"sms'd": 8940,
'sms-08718727870': 4073,
'sms.': 6340,
'smsing': 9913,
'smth': 11936,
'smth.': 9924,
'sn': 11313,
'snake': 9085,
'snake.': 5331,
'snappy': 12161,
'snatch': 11600,
'snickering': 9360,
'snogs!': 2971,
'snogs.': 3928,
'snow': 7097,
'snow,': 202,
'snow.': 1441,
'snowball': 5495,
'snowboarding': 13635,
'snowman': 12898,
'so': 668,
'so,': 128,
'so.': 13703,
'so...not': 3736,
'so.so': 10157,
'so:)': 7331,
'so?': 6707,
'soc...': 1247,
'sochte': 10805,
'social': 5029,
'sofa': 1471,
'sofa.': 10944,
'sofa..': 9347,
'soft': 10641,
'software': 7053,
'soil..': 4968,
'soiree': 10605,
'soladha.': 11904,
'solve': 8312,
'solved': 7436,
'solved!': 10356,
'some': 658,
'some1': 11894,
'somebody': 8571,
'someday': 8178,
'someone': 8048,
"someone's": 11375,
'someone.': 10515,
'someplace,': 1887,
'somerset...': 12385,
'somethin': 5882,
'somethin.': 10180,
'somethin...': 5842,
'something': 13266,
'something!': 2307,
"something's": 5438,
'something,': 8370,
'something.': 256,
'something...': 6623,
'something?': 4304,
'sometime!': 6110,
'sometime.Rakhesh,visitor': 13422,
'sometimes': 6539,
'sometme': 2835,
'somewhat': 620,
'somewhere': 2149,
'somewhere.': 9163,
'somewhereSomeone': 4,
'somone': 3568,
'somtimes': 5170,
'sonathaya': 2726,
'song': 9893,
'song..:-)': 2328,
'songs': 12207,
'soo': 1348,
'soon': 3477,
'soon!': 4317,
'soon,': 12120,
'soon.': 9769,
'soon..': 2809,
'soon...': 11191,
'soon?': 10824,
'sooner': 10460,
'sooner.': 3324,
'sooner?': 2462,
'sooo': 3249,
'soooo': 5982,
'sooooo': 9886,
'sophas': 13768,
'sore': 1238,
'sore!': 8520,
'sore.': 10668,
'sore...': 7442,
'sorrow,': 5244,
'sorrows.I': 2318,
'sorry': 11022,
'sorry,': 1732,
'sorry-i': 13217,
'sorry.': 10264,
'sort': 4153,
'sorta': 11144,
'sorted': 12294,
'sorting': 6474,
'sorts': 1682,
'sory': 5207,
'soryda..realy..frm': 9053,
'soul': 6333,
'sound': 6834,
'sound..': 8065,
'sound...': 1161,
'sound?': 5282,
'sounds': 6712,
'soundtrack': 3434,
'soup': 9593,
'soup.': 10468,
'source': 5885,
'sources': 5413,
'south': 13033,
'southern': 8454,
'souveniers': 115,
'space': 5084,
'spaces.': 9608,
'spageddies': 12666,
'spanish': 10367,
'spare': 10934,
'spares': 7131,
'spark.': 6521,
'spatula': 9355,
'speak': 5810,
'speak,': 10764,
'speaking': 9032,
'special': 7894,
'special-call': 4754,
'special.': 11854,
'special?': 1779,
'specialisation.': 2934,
'specialise': 8975,
'specially': 13538,
'specific': 3301,
'specify': 12444,
'specs.': 4126,
'speechless': 7213,
'speechless.': 12505,
'speed': 4136,
'speeding': 13787,
'speling': 6559,
'spell': 4817,
'spelled': 35,
'spelling.': 1935,
'spend': 9677,
'spending': 2989,
'spent': 3778,
'spiffing': 11366,
'spile': 12821,
'spin': 2230,
'spirit.': 7008,
'spk': 7206,
'spl..wat': 3405,
'splendid': 9022,
'splleing': 4704,
'spoil': 1686,
'spoiled?': 10617,
'spoilt': 5534,
'spoke': 8568,
'spoken': 9093,
'sponsors': 1593,
'spontaneously..': 2747,
'spoon': 3320,
'sporadically': 8886,
'sport': 6082,
'sport,': 815,
'sports': 4103,
'spot': 3198,
'spotty,': 11650,
'spouse': 8873,
'spreadsheet': 9668,
'spree': 10965,
'spree,': 2715,
'spring': 9065,
'springs': 5786,
'spys': 5311,
'sq825,': 10390,
'squatting': 13622,
'squid!': 1522,
'srs': 10580,
'srsly': 6913,
'srt': 13132,
'sry': 1482,
'st': 4736,
'stable': 3797,
'stadium?': 2175,
'staff': 8269,
'staff.science.nus.edu.sg/~phyhcmk/teaching/pc1323': 12139,
'stagwood': 6390,
'stairs': 1418,
'stalk': 9792,
'stalking': 11081,
'stamped': 5568,
'stamps': 8592,
'stand': 10242,
'standard': 4916,
'standing.': 5450,
'standing...|': 417,
'stands,': 1915,
'star': 4025,
'staring': 8811,
'starring': 6844,
'stars': 2782,
'starshine!': 7499,
'start': 11207,
'start,': 5938,
'start.': 4332,
'start.i': 13799,
'start?': 5684,
'started': 13633,
'starting': 2444,
'starting.': 8597,
'starts': 8234,
'starve': 8021,
'starving.': 8446,
'stash': 3358,
'stated': 11840,
'statements': 13802,
'station': 7554,
"station's": 9277,
'station.': 634,
'status': 3683,
'stay': 3125,
'stay.': 234,
'stayed': 11916,
'stayin': 165,
'staying': 8896,
'stays': 6458,
'std': 11795,
'steak': 4405,
'steal': 3229,
'stealing': 12083,
'steam': 11137,
'steamboat?': 4247,
'steed': 846,
'steering...': 6497,
'steps.': 534,
'stereo': 9830,
'sterling': 10051,
'steve': 7204,
'steve,like!': 685,
'sth...': 528,
'stick': 7370,
'stick.': 11540,
'sticky': 2706,
'stifled': 12250,
'still': 4667,
'still,': 3331,
'still.maybe': 13425,
'still?': 8414,
'stitch': 4653,
'stock': 2484,
'stocked': 12511,
'stomach': 5915,
'stone': 10926,
'stoners': 13403,
'stones,': 5905,
'stones.': 2431,
'stool.': 12894,
'stop': 511,
'stop.': 9738,
'stop..': 3876,
'stop...': 1702,
'stopCost': 11470,
'stop\\"': 8932,
'stopped': 9957,
'stopped.': 750,
'stops': 4934,
'stoptxtstopå£1.50/week': 7459,
'store': 358,
'store.': 11692,
'store.like': 6725,
'stores': 6194,
'stories': 11781,
'story': 6917,
'story.': 125,
'story:-': 726,
'str': 5868,
'str*': 4942,
'str8': 1895,
'straight': 4692,
'straight,': 8259,
'strain': 1722,
'strange': 8666,
'stranger': 9974,
'stranger!!saw': 9183,
'stranger?': 1782,
'street': 10269,
'street,shall': 3518,
'stress.': 8472,
'stressed': 5439,
'stretch...': 6295,
'strewn': 1813,
'strict': 6345,
'strike': 4178,
'strings': 6456,
'stripes': 9453,
'strips': 361,
'strong': 9691,
'strong.': 3916,
'strong:)': 10959,
'strongly': 9072,
'strt': 5963,
'strtd': 3067,
'struggling': 5581,
'sts': 12626,
'stubborn': 9380,
'stuck': 1627,
'studdying': 10013,
'students': 4569,
'students.': 5485,
'studio': 7435,
'study': 8895,
'studying': 11789,
'studying.': 1764,
'studying?': 6112,
'studyn': 13664,
'stuff': 12128,
'stuff!"': 10014,
'stuff,': 13091,
'stuff.': 12479,
'stuff42moro': 546,
'stuffed': 12278,
'stunning.': 5524,
'stupid': 3644,
'stupid.': 4382,
'style': 11705,
'style?': 8976,
'styling': 1655,
'sub': 10675,
'subject': 8130,
'subletting': 6292,
'submitted': 1086,
'submitting': 2779,
'subs': 7491,
'subscribe': 7408,
'subscribed': 2269,
'subscriber': 7714,
'subscriber,': 6310,
'subscribers': 9493,
'subscription': 796,
'subscriptions.': 2448,
'subscrition': 3413,
'subtoitles': 1903,
'success': 5971,
'success.': 6113,
'successful': 220,
'successfully': 2844,
'successfully.': 2610,
'sucker.': 5956,
'suckers.': 7447,
'sucks': 9885,
'sucks,': 6265,
'sucks.': 11103,
'suddenly': 1209,
'sue.': 9527,
'suffer.': 9272,
'suffers': 3408,
'sufficient': 4088,
'sufficient,': 6558,
'sugar': 10483,
'sugardad': 13766,
'suggest': 1012,
'suite:': 5424,
'suitemates': 7576,
'suits': 10172,
'sum': 1131,
'sum1': 4774,
'summer': 4354,
'summon': 709,
'sumthin!': 5237,
'sun': 3402,
'sun.': 12883,
'sun...': 87,
'sun0819': 6580,
'sun?': 856,
'sunday': 6754,
'sunday..': 1874,
'sunday?': 4732,
'sundayish,': 10326,
'sunlight.': 2375,
'sunny': 5899,
'sunoco': 7909,
'sunroof': 990,
'sunshine!': 5783,
'sunshine.': 10532,
'suntec': 2063,
'suntec...': 8087,
'sup': 3726,
'super': 7719,
'superb': 2733,
'superior': 4363,
'supervisor': 4407,
'supervisor.': 13613,
'suply': 73,
'supose': 5026,
'supplies': 12439,
'supply': 2170,
'support': 9198,
'support...very': 5602,
'supports': 8681,
'suppose': 12885,
'supposed': 4489,
'supreme.': 2710,
'sura': 1453,
'sura,': 3808,
'sure': 1420,
'sure,': 12736,
'sure.': 5316,
'surely': 6875,
'surf...': 4080,
'surfing': 7211,
'surgical': 4996,
'surname': 657,
'surprise': 862,
'surprised': 1659,
'surprised,': 7678,
'surrender': 1363,
'surrounded': 5639,
'survey': 11775,
'survey...': 8735,
'surya': 3914,
'sutra': 8872,
'sux': 958,
'suzy': 10139,
'svc.': 9717,
'swalpa': 11515,
'swann': 11372,
'swatch': 13495,
'sway': 5500,
'swear': 1693,
'sweater': 8163,
'sweatter..': 5276,
'sweet': 4769,
'sweet!': 2415,
'sweet,': 8546,
'sweet...': 1142,
'sweetie': 10660,
'sweets': 13426,
'swell': 9678,
'swhrt': 1824,
'swimming': 1839,
'swimming?': 7702,
'swimsuit': 1417,
'swing': 8891,
'swiss': 12932,
'swollen': 2190,
'swoop.': 2454,
'swt': 13145,
'swt.': 6821,
'syd': 4390,
'syllabus': 13582,
'symbol': 12852,
'synced': 3667,
'system': 1687,
'systems.': 13656,
't': 953,
"t's": 3262,
't-shirt.': 1963,
'ta': 11868,
'table': 12436,
"table's": 13489,
'tablet': 7618,
'tablets': 9129,
'tablets.': 10878,
'tackle': 7441,
'tacos': 4770,
'tactful': 3243,
'tactless': 4532,
'tag': 1421,
'tagged': 2512,
'tahan': 4342,
'tai': 11054,
'tait': 5056,
'take': 4746,
'take?': 12053,
'taken': 8753,
'taken...Only': 9778,
'takes': 4545,
'takin': 4204,
'taking': 4094,
'talent.': 5569,
'talent?': 6571,
'talents!': 10420,
'talk': 4209,
'talk.': 1399,
'talk?': 508,
'talking': 1433,
'talks?': 11835,
'tall': 3345,
'tallent': 8364,
'tampa': 924,
'tank': 4063,
'tank.': 5563,
'tap': 12729,
'tape': 12007,
'tariffs': 8398,
'tariffs.': 13815,
'tarpon': 12586,
'taste': 1245,
'tat': 10522,
'tat..': 1710,
'tattoos?': 6369,
'tau': 757,
'taught': 11013,
'taunton': 1855,
'taunton.': 11509,
'taxes': 10528,
'taxi.': 5129,
'taxi?': 12105,
'taxt': 1998,
'taylor': 1993,
"taylor's": 9188,
'tb': 9112,
'tb,': 7686,
'tc': 9709,
'[email protected]': 3215,
'tea': 8225,
'tea.': 5429,
'tea/coffee?': 4242,
'tea?': 4512,
'teach': 8347,
'teacher': 11748,
'teacher...': 12623,
'teaches': 424,
'teaching': 3351,
'team': 1138,
'team..': 10187,
'teams': 12400,
'tear': 8773,
'tear....\\""': 4207,
'tears.': 1950,
'teasing': 4943,
'tech': 11450,
'technical': 9852,
'technologies': 1404,
'teenager': 959,
'teeth': 12701,
'teeth?is': 902,
'teju': 1043,
'tel': 11301,
'tel,': 11708,
'telephone': 12668,
'tell': 5679,
'tell.': 9104,
'telling': 2823,
'tells': 112,
'telly': 3049,
'telly?': 12544,
'telphone...': 10059,
'telugu': 4290,
'telugu..thts': 2358,
'temp': 7975,
'temper': 11788,
'temple': 2897,
'ten': 11929,
'tendencies.': 5529,
'tensed': 5943,
'term': 4864,
"term's": 11307,
'terms': 6770,
'terms.': 6663,
'terrible': 8650,
'terrible.': 6406,
'terrific': 4456,
'tescos': 7357,
'test': 3887,
'test.': 12031,
'test:-).': 3131,
'test?': 9676,
'testing': 8215,
'tests,': 13275,
'tests.': 12045,
'text': 3372,
'text!': 13256,
'text.': 2985,
'text?': 2622,
'textand': 95,
'textbook': 6399,
'texted.': 9999,
"textin'.": 6465,
'texting': 9571,
'textoperator': 3872,
'texts': 4483,
'texts,': 4218,
'texts.': 10217,
'texts/weekend': 3469,
'tgxxrz': 11281,
'th': 13148,
'than': 12705,
'than.': 11988,
'thandiyachu': 7687,
'thangam,': 12110,
"thangam.it's": 138,
'thank': 9869,
'thanks': 5561,
'thanks.': 2629,
'thanks...': 6836,
'thanks2.': 9525,
'thanksgiving': 7849,
'thanksgiving,': 1442,
'thanx': 4639,
'thanx!': 10765,
'thanx...': 7609,
'thanx.xx': 3220,
'that': 4288,
'that!': 10215,
"that'd": 8299,
"that'll": 4106,
"that's": 3133,
'that)': 5074,
'that,': 416,
'that.': 4427,
'that..': 3442,
'that...now': 601,
'that.dont': 6427,
'that.i': 11318,
'that2worzels': 6511,
'thats': 8471,
'that\x89Û÷s': 13784,
'thatåÕs': 13870,
'the': 8331,
'the4th': 3463,
'theKingshead': 11546,
'theater': 5273,
'theatre': 6818,
'theatre.': 381,
'their': 5686,
'theirs?': 9424,
'them': 5126,
'them!': 7712,
'them.': 13409,
'them...': 8532,
'them..:-P': 2386,
'them?': 6970,
'themed': 2679,
'then': 5220,
'then!': 8016,
'then,': 1539,
'then.': 2422,
'then...': 12709,
'then.will': 5361,
'theory': 9000,
'there': 1688,
'there!': 13510,
"there're": 5320,
"there's": 1897,
'there,': 2246,
'there--': 1260,
'there.': 9502,
'there...': 4663,
'there..do': 9859,
'there.goodnight': 11741,
'there.xx': 84,
'there?': 13556,
'theres': 3560,
'these': 7827,
'thesedays': 3574,
'thesis': 11036,
'thesis!': 1982,
'thesmszone.com': 1985,
'they': 7865,
"they'll": 4526,
"they're": 11341,
'thia': 12339,
'thin': 5525,
'thing': 5202,
'thing!': 12187,
'thing,': 556,
'thing.': 7325,
'thing...': 4885,
'thing....how': 9874,
'thing?': 4005,
'things': 3432,
'things,': 10968,
'things.': 4896,
'things..': 6517,
'things...': 1646,
'things?': 5745,
'think': 8346,
'think.': 3813,
'think?': 6941,
'thinked': 13611,
'thinkin': 13639,
'thinking': 8584,
'thinking.': 5405,
'thinks': 6102,
'thinl': 9759,
'thirunelvali': 3246,
'this': 13243,
'this,': 6694,
'this.': 13493,
'this...': 429,
'this:)don': 6843,
'this?': 7140,
'thk': 13291,
'thk.': 13634,
'thk...': 6373,
'thkin': 8541,
'thm': 10541,
'thm.': 3788,
'thnk': 4408,
'thnk.': 1072,
'tho': 9302,
'tho!': 5909,
'tho)': 1251,
'tho-': 8363,
'tho.': 4351,
'those': 5373,
'those!': 4154,
'those?': 11305,
'thot': 12812,
'though': 12351,
'though!': 13038,
'though,': 7675,
'though.': 12079,
'thought': 13809,
'thought,': 11890,
'thought-': 3289,
'thought:': 6184,
'thoughts': 12527,
'thoughts.I': 5627,
'thousands': 6000,
'thread': 2366,
'threats': 13817,
'three': 6801,
'threw': 604,
'thriller': 3439,
'throat': 2720,
'throat.': 1315,
'through': 2021,
'throw': 7866,
'throwin': 6121,
'throwing': 11844,
'thrown': 7762,
'throws': 1756,
'thru': 2357,
'thru.Respect': 9482,
'ths': 7344,
'tht': 5060,
'thts': 2035,
'thuglyfe': 1916,
'thurs': 1668,
'thurs...': 12235,
'thursday': 461,
'thus': 8196,
'thx': 12727,
'tick': 10340,
'tick,': 10327,
'ticket': 3983,
'ticket,': 13719,
'tickets': 1455,
'tickets.': 1534,
'tiempo': 2092,
'tight': 4894,
'tightly.': 13542,
'tigress...': 12171,
'tihs': 3738,
'tiime': 10105,
'til': 1466,
'till': 5234,
'time': 5341,
'time!;-)': 9912,
'time,': 1578,
'time-hope': 8878,
'time.': 8490,
'time...': 1970,
'time..dhoni': 2247,
'time.Your': 5583,
'time.you': 10111,
'time?': 11730,
'time?)': 12368,
'times': 9458,
'times.': 11501,
'times:)lil': 5208,
'times?': 2564,
'timi': 419,
'timin': 7855,
'timing': 282,
'timing.': 10961,
'timings': 2976,
'tips': 2028,
'tired': 8936,
'tired.': 4015,
'tiring': 11003,
'tirunelvai': 10469,
'tirunelvali': 10669,
'tirupur': 11612,
'tirupur.': 1817,
'tis': 4334,
'tiwary': 896,
'tiz': 4567,
'tkts': 5730,
'tlk': 13166,
'tm': 972,
"tm'ing": 13049,
'tm.': 3420,
'tming': 3870,
'tmr': 11820,
'tmr..': 6643,
'tmr...': 7900,
'tmr?': 9872,
'tmrw': 8151,
'tmw,': 8956,
'to': 5483,
'to,': 12112,
'to.': 13298,
'to:': 6884,
'to?': 11940,
'toClaim.': 6230,
'toDo': 169,
'toa': 10413,
'tobacco...': 3586,
'today': 11931,
'today!': 10637,
'today!From': 10916,
"today's": 10864,
'today,': 2160,
'today.': 3943,
'today..': 3986,
'today...': 5283,
'today..but': 7680,
'today.Good': 111,
'today.\\":-)"': 13048,
'today.do': 8587,
'today.he': 7947,
'today:)': 4236,
'today?': 12612,
'todays': 5216,
'tog': 1032,
'together': 4867,
'together.': 2879,
'together...': 10032,
'tok': 11559,
'token': 6889,
'toking': 11062,
'tol': 2888,
'told': 3895,
'told..she': 6504,
'tolerance': 11976,
'tom': 386,
'tomarrow': 6914,
'tomeandsaid,THIS': 12743,
'tomo': 2434,
'tomo!': 13849,
'tomo,': 7081,
'tomo.': 7621,
'tomo?': 12252,
"tomo?Can't": 3252,
'tomorro': 9291,
'tomorrow': 11530,
'tomorrow,': 1283,
'tomorrow.': 4227,
'tomorrow..': 6526,
'tomorrow.call': 3511,
'tomorrow/today?': 5493,
'tomorrow?': 12296,
'tone': 1382,
'tone!': 11803,
'tone,': 1972,
'tone.': 4723,
'tones': 13104,
'tones2you.co.uk': 1701,
'tonght': 9806,
'tongued': 10168,
'tonight': 8337,
'tonight"': 2219,
'tonight,': 129,
'tonight.': 12677,
'tonight?': 2595,
'tonights': 9804,
'tonite': 2102,
'tonite!': 927,
'tonite...': 4947,
'tonite.busy': 12372,
'tonite.things': 4135,
'tonite?': 1537,
'tons': 2464,
'too': 11610,
'too!': 2658,
'too,': 3390,
'too.': 13586,
'too...': 5264,
'too?': 3156,
'took': 12870,
'took.': 11303,
'tookplace': 7419,
'tool?': 6644,
'tooo': 9506,
'toot': 1468,
'toothpaste': 7152,
'tootsie': 10784,
'top': 10406,
'topic..sorry': 10743,
'topped': 274,
'toppoly': 618,
'tops': 9294,
'tor': 2509,
'torch': 4772,
'torch.': 12285,
'torrents': 10629,
'tortilla': 5844,
'tosend': 13335,
'toshiba': 12044,
'toshiba...': 5199,
'toss': 12905,
'tot': 12040,
'totally': 1469,
'totes.': 8501,
'touch': 4143,
'touch.': 9699,
'touched': 6660,
'touched...': 3723,
'tough': 9229,
'tough,': 13406,
'toughest': 4970,
'tour': 11164,
'tour?': 11908,
'towards': 1705,
'town': 12655,
'town,': 362,
'town.': 11858,
'town?': 3027,
'tp': 12283,
'track': 11264,
'trade': 8604,
'train': 9981,
'training': 614,
'training.': 12765,
'training:-)': 8324,
'trainners': 3183,
'trains': 4295,
'tram.': 1545,
'transaction?': 10840,
'transcribing.': 4758,
'transfer': 8207,
'transfer?acc': 9108,
'transfered': 12627,
'transferred': 13721,
'transfr': 2018,
'transfred': 2590,
'transport': 7521,
'trash.': 8775,
'trauma': 7995,
'travel': 5694,
'travel.': 1045,
'traveling?': 6866,
'travelling': 943,
'treat': 8943,
'treat.': 11892,
'treat?': 935,
'treated': 2996,
'treated?': 12849,
'trebles!': 8868,
'tree': 12401,
'trek': 12887,
'trends': 12374,
'trial': 2532,
'tried': 6761,
'trip': 1338,
'trip.': 3947,
'trips?': 12052,
'trishul': 13579,
'triumphed.': 5096,
'tron.': 12543,
'trouble': 11032,
'troubleshooting:)': 6985,
'trouser': 7583,
'truck': 3540,
'true': 2742,
'true.': 11014,
'true..': 13193,
'true...': 8674,
'true..k,Do': 12746,
'truffles': 4028,
'truffles.': 6198,
'truly': 9210,
'truly..': 10370,
'trust': 4578,
'trusting': 13684,
'truth': 8618,
'try': 12640,
'try!': 1624,
'try.': 10498,
'try:-)': 12495,
'tryin': 10804,
'trying': 6262,
'tsunamis': 3142,
'tt': 11833,
"tt's": 13628,
'ttyl': 11777,
'ttyl!': 842,
'tue': 7037,
'tues': 10505,
'tuesday': 13720,
'tuesday.': 1909,
'tui': 13477,
'tuition': 8544,
'tuition...': 13245,
'tul': 11735,
'tune': 8786,
'turn': 12324,
'turned': 11973,
'turning': 6353,
'turns': 10378,
'tuth': 3783,
'tv': 4725,
'tv.:)lol.': 13698,
'tv/': 10898,
'twat': 2939,
'twelve!': 11397,
'twelve,': 12975,
'twenty': 1513,
'twice': 3639,
'twice-': 5020,
'twins': 9523,
'two': 10394,
'two!': 7098,
'two.': 10520,
'two?': 5236,
'txt': 3244,
'txt>': 2348,
'txtX': 11030,
'txtin': 9496,
'txting': 373,
'txts': 2101,
'txts.': 10800,
'txt~journey': 10741,
'tyler': 3693,
'type': 3795,
'type.': 10582,
'type...': 5184,
'type..lyk': 5548,
'types': 8924,
'typical': 23,
'u': 1226,
'u!': 5820,
'u!xxxx': 9509,
"u'll": 10988,
"u're": 4587,
"u've": 11592,
'u,': 7538,
'u.': 4162,
'u..': 11446,
'u...': 2547,
'u....': 13599,
'u..if': 5890,
'u.:-)': 11493,
'u.Othrwise': 7421,
'u.So': 10379,
'u:': 3237,
'u?': 3566,
'u?\\"\rham"': 4991,
'ubandu': 10383,
'ubi': 5394,
"ugo's": 9141,
'uh,': 7819,
'uk': 6981,
'uk!': 3484,
"uk's": 4540,
'ultimatum': 1599,
'um': 9911,
'umma': 3875,
'un': 7378,
'un-redeemed': 12993,
'unable': 6009,
'unbelievable': 9246,
'unclaimed!': 7187,
'uncle': 6205,
'uncle.': 2120,
'uncles': 12861,
'uncomfortable.': 11661,
'unconditionally': 2259,
'unconscious': 12721,
'unconsciously': 21,
'unconvinced': 539,
'uncountable': 10730,
'under': 10556,
'underdtand': 576,
'understand': 5979,
'understand"': 7514,
'understand.': 1884,
'understanding': 6856,
'understanding.': 5950,
'understood': 8964,
'underwear': 4720,
'underwear.': 12872,
'undrstnd': 7001,
'undrstndng': 1958,
'unemployed': 12862,
'unemployed,': 1394,
'uneventful': 7170,
'unfolds.': 9320,
'unfortunately...': 4474,
'unfortuntly': 2372,
'unhappiness!': 12515,
'unhappy': 6479,
'uni': 9622,
'uni?': 8126,
'uniform?': 651,
'unique': 7962,
'unique&I': 7812,
'univ.': 11010,
'university': 12365,
'unknown': 8274,
'unless': 1771,
'unlimited': 8999,
'unlimited...': 4659,
'unmits': 6604,
'unnecessarily': 6111,
'unrecognized.': 2041,
'unredeemed': 8498,
'unsold': 13526,
'unsold.': 8659,
'unsold.mike': 9938,
'unsold.now': 4812,
'unsubscribe': 5150,
'unsubscribe.': 2409,
'unsubscribed': 3816,
'until': 4858,
'unusual': 9937,
'up': 4449,
'up,': 6570,
'up.': 7389,
'up..!': 11132,
'up...': 9135,
'up?': 13675,
'update': 4187,
'update:': 12869,
'upgrade': 2152,
'upgrading': 580,
'upload': 10258,
'upping': 13021,
'upset': 12126,
"upset--it's": 12573,
'upset.': 12673,
'upset.i': 4105,
'upstairs': 2133,
'upstairs.': 8229,
'upto': 2968,
'ur': 3564,
'ure': 11057,
'urgent': 622,
'urgent.but': 7786,
'urgently..its': 13005,
'urgnt': 4365,
'urgnt,': 12610,
'urgnt.': 647,
'urination': 3710,
'url': 8845,
'urmom.i': 7741,
'urself': 2300,
'urself.': 6630,
'us': 2859,
'us,': 5230,
'us.': 2593,
'us..': 6972,
'us.GET': 9607,
'us.LET': 9541,
'us:)no': 5713,
'us?': 10278,
'usb': 9189,
'usc': 2875,
'usc.edu': 6010,
'use': 11134,
'use.': 10028,
'use?': 1954,
'used': 463,
'used.': 10085,
'useful': 2165,
'useful?': 3997,
'useless': 8725,
'user': 11340,
'user.': 9592,
'usf': 3433,
'usf,': 11046,
'using': 13065,
'usps': 2073,
'usual': 582,
'usual...': 12133,
'usual..iam': 13169,
'usually': 5479,
'uterus.': 9654,
'utter': 2058,
'uttered': 998,
'uup': 769,
'uv': 9452,
'v': 12336,
'v.pist': 12310,
'v.tired': 7437,
'va?i': 3003,
'vaazhthukkal.': 11573,
'vague.': 5636,
'vaguely': 7784,
'vale': 8692,
'vale...': 11157,
'valentine': 71,
'valentine?': 9450,
'valentines': 1750,
'valid': 296,
'valuable': 3117,
'value': 10022,
'value-Morning': 13380,
'valued': 10003,
'valuing': 5321,
'varaya': 10726,
'vargu': 10423,
'various': 11819,
'vary': 3544,
'vary).': 28,
'vasai': 5309,
'vava': 3022,
'vday': 4277,
've': 12142,
'vegas': 5831,
'vegetables.': 6609,
'veggie': 12954,
'vehicle': 9434,
'velachery:)': 2602,
'venaam..': 6953,
'venugopal': 5587,
'verified': 13292,
'verify.': 8467,
'verifying': 13837,
'version': 6099,
'version:': 1777,
'versus': 2239,
'very': 3293,
'vewy': 1557,
'via': 7082,
'vibrate': 13382,
'vibrator': 10035,
'vic': 7086,
'victors': 13419,
'vid': 9594,
'video': 272,
'video.': 1150,
'video/pic': 4413,
'videochat': 9489,
'videosound': 11689,
'videosounds+2': 11388,
'view': 9569,
'vijay': 1262,
'vikky': 2122,
'vikky,': 1582,
'vikky..': 7759,
"vikky..i'm": 1168,
'vill': 1227,
'village': 8859,
'village,': 8482,
'violated': 9015,
'violated.': 8425,
'violence': 10181,
'virgins': 7483,
'virtual': 13760,
'visa': 9588,
'visionsms.com': 12681,
'visit': 10207,
'visit.need': 8958,
'visiting': 3480,
'visitors,': 12597,
'vital': 7631,
'vitamin': 5343,
'vivek..': 1695,
'vl': 422,
'vodafone': 2228,
'vodka...': 8705,
'voice': 3077,
'voice.': 6273,
'voice...': 3812,
'voice.....': 11026,
'voicemail.': 3173,
'volcanoes': 10783,
'vomit': 5197,
'vomitin.': 11426,
'vomiting': 13320,
'vote': 13180,
'vote?': 7340,
'voted': 9092,
'voucher': 12312,
'voucher,': 4921,
'voucher.': 6637,
'vouchers': 9238,
'vouchers!': 5094,
'vouchers-Text': 607,
'vry': 7699,
'vs.': 9915,
'vth': 4617,
'w': 12442,
'w/c': 4988,
'w/question.': 2477,
'w1t1jy': 462,
'w8in': 1237,
'wa.': 5809,
'wad?': 2612,
'wadebridge.I': 3682,
'wahala.': 9944,
'wahay.': 4462,
'waheed': 1497,
'waheeda': 9131,
'waht': 12571,
'wait': 238,
'wait,': 907,
'wait.': 8001,
'wait?': 12297,
'waited': 1762,
'waitin': 7342,
'waiting': 9851,
'waiting..\\"': 6061,
'wake': 13565,
'wake,': 1231,
'wake.': 13411,
'waking': 7190,
'waliking': 5008,
'walk': 8769,
'walkabout': 1707,
'walked': 3664,
'walkin': 3517,
'walking': 5284,
'walking..': 9445,
'walks': 2214,
'wall?': 3854,
'wallet': 11646,
'walls': 2553,
'walmart': 12256,
'walmart.': 10708,
'walsall': 4057,
'wamma': 3608,
'wan': 7338,
'wan...': 120,
'wana': 6907,
'wanna': 1553,
'wanna?': 2555,
'want': 2665,
'want.': 11954,
'want2come.': 9068,
'want?': 2010,
'wanted': 681,
'wanting': 8444,
'wants': 3992,
'wap': 275,
'wap.': 3893,
'warm': 7087,
'warm.': 9225,
'warm?': 2738,
'warming': 5047,
'warned': 2359,
'warning': 13113,
'warranty.': 3860,
'warwick,': 12231,
'was': 10688,
'was.': 8693,
'washob': 4256,
"wasn't": 10449,
'wasnåÕt': 6432,
'waste': 5039,
'waste.': 1094,
'wasting.': 6323,
'wat': 9381,
"wat'll": 11608,
"wat's": 11498,
'wat,': 7044,
'wat.': 6860,
'wat..': 141,
'wat...': 637,
'wat?': 10346,
'watch': 6508,
'watch.': 4940,
'watch...': 2965,
'watches': 1519,
'watchin': 3001,
'watching': 4941,
'watchng': 11938,
'water': 13592,
'water,': 7952,
'water.': 8034,
'watevr': 13389,
'wats': 4279,
'watts.': 3138,
'waves': 8052,
'way': 2807,
'way!': 6820,
'way&this': 6419,
'way,': 10766,
'way.': 5376,
'way?': 7088,
'wc1n3xx': 7066,
'we': 354,
"we'll": 1892,
"we're": 13693,
"we've": 2093,
'weak': 13584,
'weakness': 1273,
'weaknesses': 9649,
'wear': 13767,
'wearing': 7629,
'wearing?': 315,
'weaseling': 8129,
'weasels...': 6430,
'weather': 6383,
'weather.': 1821,
'web': 1660,
'web2mobile': 12555,
'webadres': 6579,
'website.': 6283,
'website..now': 11321,
'website?': 12955,
'wed': 12791,
'wed,': 3230,
'wed.': 13467,
'weddin': 6077,
'wedding': 12490,
'wednesday': 12437,
'wednesday,': 3030,
'wednesday.': 2373,
'weds': 8029,
'wee': 8028,
'weed': 9259,
'weed-deficient': 6958,
'week': 3671,
'week!': 13333,
"week's": 4956,
'week,': 5289,
'week-stop': 13054,
'week.': 351,
'week.:/': 2182,
'week.|': 13321,
'week?': 140,
'week??': 5618,
'weekdays': 7439,
'weekend': 3698,
'weekend!': 10545,
"weekend's": 12632,
'weekend.': 7942,
'weekend?': 4314,
'weekends': 11926,
'weekends?': 5681,
'weekly': 8152,
'weekly!': 13869,
'weeks': 1825,
'weeks!': 7817,
'weeks>': 9094,
'weigh': 8899,
'weighed': 1402,
'weight': 13328,
'weight!': 6550,
'weight,': 1130,
'weight.': 638,
'weight...': 11736,
'weight...Haha...': 13324,
'weird': 6029,
'weird,': 11658,
'weirdest': 568,
'weirdo?': 8549,
'weirdy': 8230,
'welcome': 2655,
'welcome.': 12730,
'welcome...': 13184,
'well': 4180,
'well!': 460,
'well!!': 9392,
'well,': 9706,
'well.': 13705,
'well...': 9875,
'well..:)': 1392,
'well.you': 49,
'well?': 2713,
'welp': 1515,
'welp?': 6509,
'wen': 5144,
'went': 1083,
'wer': 10380,
'were': 7694,
'were/are': 7806,
"weren't": 288,
'wesley': 6614,
'wesleys': 1336,
'west': 11265,
'western': 9980,
'westonzoyland,': 12865,
'westshore': 660,
'wet': 11832,
'wet.': 9848,
'wet?': 5723,
'wetherspoons?': 12908,
'we\x89Û÷ll': 2232,
'we\x89Û÷re': 12132,
'weåÕve': 5549,
'what': 3596,
'what!': 4784,
"what's": 5162,
'what,': 1746,
'what...': 3532,
'whatever': 11637,
'whatever,': 2474,
'whatever.': 11794,
'whats': 2933,
'wheel': 7255,
'wheellock?': 11864,
'when': 12327,
'when,': 13504,
'when...': 8352,
'whenever': 6896,
'where': 10943,
"where's": 8972,
'where.': 6920,
'where?': 2251,
'where?btw': 11045,
'whereare': 4674,
'wherever': 9634,
'whether': 2280,
'which': 2684,
'while': 11227,
'while,': 7261,
'while,&': 2475,
'while.': 6185,
'whillTake': 8308,
'white': 10224,
'white.': 7796,
'whn': 16,
'who': 4134,
"who's": 1602,
'who.': 12556,
'who?': 2007,
'whole': 1533,
'whom': 12719,
'whos': 6919,
'whose': 1726,
'whr': 6918,
'why': 501,
'why?': 7792,
'wi': 3004,
'wid': 7480,
'widelive.com/index.': 4907,
'wif': 1612,
'wife': 5983,
'wife,': 2080,
'wife.': 6039,
'wife....': 3255,
'wifes.': 7252,
'wifi': 1101,
'wihtuot': 832,
'wikipedia.com': 10027,
'wil': 6936,
'wildest': 8135,
'wildlife': 571,
'will': 4377,
'will!': 4710,
'will,': 12934,
'willing': 11850,
'willpower': 6191,
'win': 11043,
'win150ppmx3age16': 4155,
'wind': 11712,
'wind.': 7562,
'window:-)': 12576,
'windows': 8396,
'windows,': 10307,
'winds': 7092,
'windy.': 9050,
'wine': 3302,
'wined': 9408,
'wining': 8985,
'winner': 12589,
'winner!': 13423,
'wins': 11883,
'wins.': 1139,
'winterstone': 7834,
'wipro': 10809,
'wipro:)you': 8322,
'wisdom': 10947,
'wisdom..': 8586,
'wise': 12262,
'wish': 4654,
'wish!': 7468,
'wish.': 10178,
'wisheds.': 6691,
'wishes': 3188,
'wishin': 10041,
'wishing': 9870,
'wishlist': 10718,
'wiskey': 13521,
'wit': 13252,
'with': 5425,
'with:': 12508,
'withdraw': 7611,
'wither': 9773,
'within': 11188,
'without': 4964,
'witin': 6968,
'witot': 2202,
'witout': 2818,
'wiv': 11331,
'wizzle': 805,
'wk': 10986,
'wk!': 12877,
'wk,': 698,
'wk.': 10834,
'wkend': 5851,
'wkend.': 849,
'wkg': 1411,
'wkly': 4755,
'wknd': 12391,
'wks!': 11271,
'wlcome': 2672,
'wld': 12964,
'wml?id=1b6a5ecef91ff9*37819&first=true18:0430-JUL-05': 6412,
'wnt': 10276,
'wo': 10349,
'woke': 10915,
'woken': 4789,
'woman': 13171,
'womdarfull': 4359,
'women': 2244,
'won': 3046,
'won!': 1890,
"won't": 3600,
'won.': 3149,
'wondar': 1845,
'wondarfull': 5206,
'wonder': 2258,
'wonder...': 3716,
'wonderful': 12984,
'wonderful.': 12395,
'wondering': 3456,
'wondering,': 11554,
'wonders': 6348,
'wont': 3279,
'wont.': 1177,
'woould': 6545,
'woozles': 6495,
'word': 12224,
'word).': 10856,
'word.': 8732,
'word.....not': 8201,
'word:': 7533,
'word:COLLECT': 7653,
'word:START': 5712,
'words': 6846,
'words,': 2308,
'words-': 8628,
'words.': 12949,
'words...': 1679,
'words....': 8203,
'work': 11619,
'work,': 3725,
'work,Love': 9008,
'work.': 5692,
'work..': 12510,
'work...': 2345,
'work?': 5542,
'workAnd': 6748,
'workage.': 4267,
'workin': 683,
'workin.': 6957,
'working': 11740,
'working!': 8678,
'working.': 1309,
'working?': 1799,
'workout': 4052,
'works.': 2138,
'world': 11884,
'world,': 5852,
'world,may': 12567,
'world.': 11726,
'world....gnun': 8175,
'world:).very': 4467,
'worlds': 7244,
'worried': 6047,
'worried.': 5053,
'worries': 9801,
'worries,': 9871,
'worries.': 3134,
'worry': 7964,
'worry,': 7486,
'worry.': 8110,
'worry..': 12430,
'worry...use': 4329,
'worrying': 8961,
'worse': 4251,
'worse,': 747,
'worst': 13461,
'worth': 11361,
'worthless': 4255,
'wot': 7594,
'wot?': 4631,
'woul': 39,
'would': 6213,
'would,': 11895,
"wouldn't": 5898,
'wow': 2668,
'wow.': 10067,
'wrecked.': 1073,
'wrench': 11518,
'write': 329,
'writhing': 2057,
'wrking': 396,
'wrks': 491,
'wrnog..': 9356,
'wrong': 2785,
'wrong!!': 7781,
'wrong,': 2201,
'wrongly': 2309,
'wrote.': 3739,
'ws': 8080,
'wt': 7907,
'wtc': 2235,
'wtf': 4384,
'wth': 11410,
'wthout': 6721,
'wud': 11420,
"wudn't": 4176,
'wuld': 9004,
'wuldnt': 12051,
'wun': 10229,
'www.07781482378.com': 9003,
'www.4-tc.biz': 6585,
'www.80488.biz': 1228,
'www.Applausestore.com': 3741,
'www.B4Utele.com': 12557,
'www.Idew.com': 4444,
'www.Ldew.com': 8881,
'www.Ldew.com.subs16+1win150ppmx3': 6282,
'www.Ldew.com1win150ppmx3age16': 9095,
'www.SMS.ac/u/bootydelious': 13697,
'www.SMS.ac/u/nat27081980': 232,
'www.areyouunique.co.uk': 13832,
'www.bridal.petticoatdreams.co.uk': 4903,
'www.clubzed.co.uk': 11310,
'www.cnupdates.com/newsletter.': 12935,
'www.comuk.net': 5145,
'www.dbuk.net': 7495,
'www.flirtparty.us': 7720,
'www.fullonsms.com': 394,
'www.getzed.co.uk': 6898,
'www.ldew.com': 2105,
'www.movietrivia.tv': 4806,
'www.music-trivia.net': 8306,
'www.orange.co.uk/ow': 13164,
'www.phb1.com': 4726,
'www.regalportfolio.co.uk.': 10508,
'www.ringtoneking.co.uk!': 12061,
'www.ringtones.co.uk,': 10108,
'www.rtf.sphosting.com': 10900,
'www.smsco.net': 13318,
'www.t-c.biz': 2662,
'www.textcomp.com': 7466,
'www.textpod.net': 7805,
'www.tkls.com': 9973,
'www.txt-2-shop.com': 13284,
'www.txt43.com': 3580,
'www.txt82228.com.': 6422,
'www.txttowin.co.uk': 1798,
'www.win-82050.co.uk': 6500,
'x': 12981,
"x'mas.": 938,
'x-net': 10316,
'x.': 13576,
'x\\""': 10922,
'xafter': 3267,
'xam': 742,
'xavier': 7198,
'xin': 2291,
'xmas': 12343,
'xmas!': 10993,
'xx': 6164,
'xxSP': 8059,
'xxx': 9906,
'xxx.': 3611,
'xxx\\""': 10806,
'xxxmobilemovieclub.com?n=QJKGIGHJJGCBL': 8929,
'xxxxxxx\\""': 185,
'xxxxxxxx': 9146,
'xxxxxxxxxxxxxX': 6026,
'xy': 530,
'xy?': 2847,
'y': 7216,
"y'day": 8183,
'y.': 1066,
'ya': 13354,
'ya!!': 8369,
'ya,': 3079,
'ya.': 12941,
'ya...': 7327,
'yahoo': 6590,
'yalrigu': 7433,
'yam': 2445,
'yan': 9635,
'yay': 4954,
'yay!': 10894,
'yck': 12483,
'yck?': 8380,
'yeah': 7743,
'yeah!': 5346,
'yeah,': 6370,
'yeah?': 9481,
'year': 2892,
'year,': 7083,
'year.': 8323,
'year?': 3058,
'years': 669,
'years!': 13199,
'years,': 3901,
'years.': 1069,
'years?': 6081,
'yelling': 7850,
'yelling.': 12209,
'yellow': 12055,
'yep': 3768,
'yer': 9746,
'yes': 5520,
'yes,': 9781,
'yes.': 4475,
'yest': 7313,
'yest...': 13069,
'yest?': 8560,
'yesterday': 12613,
'yesterday!': 3069,
'yesterday,': 1106,
'yesterday.': 5447,
'yesterday?': 6485,
'yet': 3118,
'yet)': 9625,
'yet,': 3441,
'yet.': 8561,
'yet..': 6058,
'yet...': 5558,
'yet?': 12608,
"yetty's": 1496,
'yetunde': 7946,
'yi': 13602,
'yifeng': 11293,
'yijue': 10225,
'yijue,': 3822,
'yijue.': 12057,
'yijue...': 7163,
'[email protected]': 4089,
'ym': 12853,
'yo': 7589,
'yo,': 10673,
'yo-Here': 6515,
'yo.': 687,
'yo?': 11656,
'yoga': 10200,
'yoga...': 755,
'yogasana': 12676,
'yogasana?': 2403,
'yor': 12751,
'yorge': 3363,
'you': 4315,
'you!': 9715,
'you!To': 3761,
"you'd": 2614,
"you'ld": 1350,
"you'll": 7503,
"you're": 7009,
"you've": 3168,
'you,': 243,
'you,so': 1006,
'you.': 11096,
'you..': 6584,
'you...': 5552,
'you....dear.with': 13025,
'you...money': 7512,
'you.:)k:)where': 10828,
'you.my': 208,
'you.thats': 9671,
'you/carlos': 3418,
'you:)any': 12569,
'you:-)': 12702,
'you:-.': 1347,
'you?': 6757,
'you?how': 3806,
'you?when': 6576,
'youPhone': 3174,
'your': 10299,
'your"': 13459,
'youre': 1200,
'yourinclusive': 8362,
'yours': 5451,
'yours,': 3846,
'yours.': 2950,
'yourself': 5300,
'yourself,': 13673,
'yourself.': 1479,
'yourself...': 6151,
'youuuuu': 825,
'youwanna': 10120,
'you\x89Û÷ll': 1908,
'youåÕre': 4863,
'yowifes.': 11156,
'yr': 8231,
'yrs': 8389,
'yrs.': 2116,
'ystrday.ice': 7047,
'yummy': 12448,
'yun': 2863,
'yuo': 10852,
'yuou': 7689,
'zac': 12742,
'zealand.': 13372,
'zed': 6924,
'zhong': 9695,
'zoom': 10119,
'zyada': 12428,
'|': 2270,
'||': 9082,
'~': 12920,
'\x89Û_': 10654,
'\x89Û_.': 9451,
'\x89Û_Thanks': 1947,
'\x89ÛÏ': 13448,
'\x89ÛÏHarry': 8534,
'\x89ÛÒ': 2086,
'Ì_': 13645,
"Ì_'ll": 820,
'Ì_.': 8660,
'Ì_...': 706,
'Ì_?': 13090,
'ÌÏ': 10303,
"ÌÏ'll": 214,
'å£1': 11979,
'å£1,500': 2125,
'å£1.50': 5516,
'å£1.50.': 10485,
'å£1.50/msg': 2157,
'å£1.50/msg.': 27,
'å£1.50/week.': 4851,
'å£1.50/wk."': 7941,
'å£1.50ea.': 1681,
'å£1.50perWKsub': 1270,
'å£1.50pm': 541,
'å£1.50pmmorefrommobile2Bremoved-MobyPOBox734LS27YF': 13701,
'å£1/minMobsmoreLKPOBOX177HP51FL': 12413,
'å£10': 454,
'å£10)': 13763,
'å£10,000': 10956,
'å£100': 12540,
'å£100,000': 7250,
'å£1000': 5210,
'å£1000.': 6666,
'å£12': 13669,
'å£1250': 2810,
'å£1450': 1265,
'å£150': 4460,
'å£1500': 13650,
'å£1million': 1765,
'å£2,000': 6174,
'å£200': 304,
'å£2000': 11640,
'å£250': 11556,
'å£250k': 5269,
'å£3': 13140,
'å£3.00': 13172,
'å£3/wk': 6157,
'å£33.65': 124,
'å£350': 9125,
'å£350!': 1366,
'å£4.50.': 3011,
'å£400': 11625,
'å£48,': 8854,
'å£5': 8171,
'å£5/month': 196,
'å£50': 99,
'å£50-å£500.': 343,
'å£500': 10005,
'å£500.': 9442,
'å£5000': 10315,
'å£5000,': 4463,
'å£5000.00': 9250,
'å£50award.': 11503,
'å£54.': 13545,
'å£6': 5846,
'å£600.': 6172,
'å£71.': 3561,
'å£75,000.': 7415,
'å£79': 2162,
'å£800': 10666,
'å£900': 9385,
'å£s': 7681,
'åÈ10': 10146,
'åÐ': 7146,
'åÒHarry': 13606,
"åÒIt's": 12858,
'åÔMORROW.': 6665,
'åÔrents': 8506}
###Markdown
We are going to use the count of words as the input to our neural network. The `vocab_vector` will have columns for all the words in our training data in the form of `{key: value}` i.e `{word: count}` as held by the `word_column_dict` python `Dictionary`. The individual word counts in any particular text is updated from 0 to a number based on a word's total count in any single text.This means that the words with a higher count might have a higher weight in determining whether a text is a spam or not.
###Code
def update_input_layer(text):
pp.pprint(text)
global vocab_vector
# clear out previous state, reset the vector to be all 0s
vocab_vector *= 0
for word in text.split(" "):
vocab_vector[0][word_column_dict[word]] += 1
update_input_layer(training_data["text"][random.randrange(0,4572,4)])
###Output
'Can you open the door?'
###Markdown
Build the SpamClassificationNeuralNetwork
###Code
import time
import sys
# Let's tweak our network from before to model these phenomena
class SpamClassificationNeuralNetwork(object):
def __init__(self, training_data, num_hidden_nodes = 10, num_epochs = 10, learning_rate = 0.1):
# set our random number generator
np.random.seed(1)
# pre-process data
self.pre_process_data(training_data)
self.num_features = len(self.vocab)
self.vocab_vector = np.zeros((1, len(self.vocab)))
self.num_input_nodes = self.num_features
self.num_hidden_nodes = num_hidden_nodes
self.num_epochs = num_epochs
self.num_output_nodes = 1
self.learning_rate = learning_rate
# Initialize weights
self.weights_i_h = np.random.randn(self.num_input_nodes, self.num_hidden_nodes)
self.weights_h_o = np.random.randn(self.num_hidden_nodes, self.num_output_nodes)
def forward_backward_propagate(self, text, label):
### Forward pass ###
# Input Layer
self.update_input_layer(text)
# Hidden layer
hidden_layer = self.vocab_vector.dot(self.weights_i_h)
# Output layer
output_layer = self.sigmoid(hidden_layer.dot(self.weights_h_o))
### Backward pass ###
# Output error
output_layer_error = output_layer - label
output_layer_delta = output_layer_error * self.sigmoid_derivative(output_layer)
# Backpropagated error - to the hidden layer
hidden_layer_error = output_layer_delta.dot(self.weights_h_o.T)
# hidden layer gradients - no nonlinearity so it's the same as the error
hidden_layer_delta = output_layer_error
# update the weights - with grdient descent
self.weights_h_o -= hidden_layer.T.dot(output_layer_delta) * self.learning_rate
self.weights_i_h -= self.vocab_vector.T.dot(hidden_layer_delta) * self.learning_rate
if(np.abs(output_layer_error) < 0.5):
self.correct_so_far += 1
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self,x):
return x * (1 - x)
def train(self):
for epoch in range(self.num_epochs):
self.correct_so_far = 0
start = time.time()
for i in range(len(training_data)):
# Forward and Back Propagation
self.forward_backward_propagate(training_data["text"][i], training_data["label_tag"][i])
samples_per_second = i / float(time.time() - start + 0.001)
sys.stdout.write("\rEpoch: "+ str(epoch)
+" Progress: " + str(100 * i/float(len(training_data)))[:4]
+ " % Speed(samples/sec): " + str(samples_per_second)[0:5]
+ " #Correct: " + str(self.correct_so_far)
+ " #Trained: " + str(i+1)
+ " Training Accuracy: " + str(self.correct_so_far * 100 / float(i+1))[:4] + "%")
print("")
def pre_process_data(self, training_data):
vocab = set()
for review in training_data["text"]:
for word in review.split(" "):
vocab.add(word)
self.vocab = list(vocab)
self.word_to_column = {}
for i, word in enumerate(self.vocab):
self.word_to_column[word] = i
def update_input_layer(self, text):
global vocab_vector
# clear out previous state, reset the vector to be all 0s
self.vocab_vector *= 0
for word in text.split(" "):
self.vocab_vector[0][word_column_dict[word]] += 1
nn = SpamClassificationNeuralNetwork(training_data, num_epochs = 10, learning_rate=0.01)
nn.train()
###Output
Epoch: 0 Progress: 99.9 % Speed(texts/sec): 2107. #Correct: 3296 #Trained: 4572 Training Accuracy: 72.0%
Epoch: 1 Progress: 99.9 % Speed(texts/sec): 2165. #Correct: 3887 #Trained: 4572 Training Accuracy: 85.0%
Epoch: 2 Progress: 99.9 % Speed(texts/sec): 2187. #Correct: 4042 #Trained: 4572 Training Accuracy: 88.4%
Epoch: 3 Progress: 99.9 % Speed(texts/sec): 2136. #Correct: 4139 #Trained: 4572 Training Accuracy: 90.5%
Epoch: 4 Progress: 99.9 % Speed(texts/sec): 2157. #Correct: 4192 #Trained: 4572 Training Accuracy: 91.6%
Epoch: 5 Progress: 99.9 % Speed(texts/sec): 2181. #Correct: 4251 #Trained: 4572 Training Accuracy: 92.9%
Epoch: 6 Progress: 99.9 % Speed(texts/sec): 2144. #Correct: 4290 #Trained: 4572 Training Accuracy: 93.8%
Epoch: 7 Progress: 99.9 % Speed(texts/sec): 2137. #Correct: 4314 #Trained: 4572 Training Accuracy: 94.3%
Epoch: 8 Progress: 99.9 % Speed(texts/sec): 2158. #Correct: 4353 #Trained: 4572 Training Accuracy: 95.2%
Epoch: 9 Progress: 99.9 % Speed(texts/sec): 2150. #Correct: 4381 #Trained: 4572 Training Accuracy: 95.8%
|
tutorials/Tutorial 5. Adversarial Autoencoder.ipynb | ###Markdown
Tutorial 5: Adversarial Autoencoder**Author** - [Yatin Dandi](https://yatindandi.github.io)[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/torchgan/torchgan/blob/master/tutorials/Tutorial%205.%20Adversarial%20Autoencoder.ipynb)In this tutorial we will explore **Adversarial Autoencoders** (AAE), which use Generative Adversarial Networks to perform variational inference. As explained in [Adversarial Autoencoders (Makhzani et. al.)](https://arxiv.org/pdf/1511.05644), the aggregated posterior distribution of the latent representation of the autoencoder is matched to an arbitrary prior distribution using adversarial training.The tutorial helps you with the following:1. General workflow for implementing GAN models with inference networks using **TorchGAN**2. Implementing custom losses with a nonstandard training loop This tutorial assumes that your system has **PyTorch** and **TorchGAN** installed properly. If not, the following code block will try to install the **latest tagged version** of TorchGAN. If you need to use some other version head over to the installation instructions on the [official documentation website](https://torchgan.readthedocs.io/en/latest/).
###Code
try:
import torchgan
print(f"Existing TorchGAN {torchgan.__version__} installation found")
except ImportError:
import subprocess
import sys
subprocess.check_call([sys.executable, "-m", "pip", "install", "torchgan"])
import torchgan
print(f"Installed TorchGAN {torchgan.__version__}")
###Output
_____no_output_____
###Markdown
IMPORTS
###Code
# General Imports
import os
import random
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
from IPython.display import HTML
# Pytorch and Torchvision Imports
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as T
from torch.optim import Adam
import torch.utils.data as data
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import torchvision.utils as vutils
# Torchgan Imports
import torchgan.models as models
import torchgan.losses as losses
from torchgan.trainer import Trainer
# Set random seed for reproducibility
manualSeed = 999
random.seed(manualSeed)
torch.manual_seed(manualSeed)
print("Random Seed: ", manualSeed)
###Output
_____no_output_____
###Markdown
LOAD THE DATASET We make the following transforms before feeding the **MNIST Dataset** into the networks1. The default size of MNIST is $1 \times 28 \times 28$. However, by convention, the default input size in **torchgan.models** is a power of 2 and at least 16. Hence we shall be resizing the images to $1 \times 32 \times 32$. One can also **zero-pad** the boundary, without any noticeable difference 2. The output quality of GANs is improved when the images are constrained in the range The images are normalized with a mean and standard deviation of **0.5** , thereby constraining most of the inputs in the range (-1, 1)Finally the **torchgan.trainer.Trainer** needs a **DataLoader** as input. So we are going to construct a DataLoader for the MNIST Dataset.
###Code
dataset = dsets.MNIST(
root="./mnist",
train=True,
transform=transforms.Compose(
[
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5,), std=(0.5,)),
]
),
download=True,
)
loader = data.DataLoader(dataset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
GENERATOR MODEL The AAE Generator corresponds to the encoder of the autoencoder. It takes as input an image in the form of a torch Tensor of size $batch\ size \times 1 \times 32 \times 32$ and outputs a tuple of reconstructed images of size $batch\ size \times 1 \times 32 \times 32$ and the encoding of size $batch\ size \times \ encoding \ dims$. The sampling of images is done by feeding the noise through the decoder. Unlike GANs, AAE allows inference of latent encodings from images which can be used for various downstream tasks. The distribution of the sampled encodings from the dataset (the aggregated posterior) is encouraged to match the prior distribution through adversarial training. Here we have implemented the deterministic version of the autoencoder which gives similar results to the variants with gaussian/universal approximator posterior as described in Adversarial Autoencoders (Makhzani et. al.) **For encoder**:1. **Channel Dimension**: $input \ channels \rightarrow d \rightarrow 4d \rightarrow 16d \rightarrow \ encoding \ dims$.2. **Image size**: $(32 \times 32) \rightarrow (16 \times 16) \rightarrow (4 \times 4) \rightarrow (1 \times 1)$**For decoder (sampling)**:1. **Channel Dimension**: $\ encoding \ dims \rightarrow d \rightarrow 4d \rightarrow 16d \rightarrow input \ channels$.2. **Image size**: $(1 \times 1) \rightarrow (4 \times 4) \rightarrow (16 \times 16) \rightarrow (32 \times 32)$LeakyReLU is used as the default nonlinearity in both the encoder and the decoder as Relu kills most of the gradients. One can easily change the nonlinearity of the intermediate and the last layers as per their preference by passing them as parameters during initialization of the Generator object.
###Code
class AdversarialAutoencoderGenerator(models.Generator):
def __init__(
self,
encoding_dims,
input_size,
input_channels,
step_channels=16,
nonlinearity=nn.LeakyReLU(0.2),
):
super(AdversarialAutoencoderGenerator, self).__init__(encoding_dims)
encoder = [
nn.Sequential(
nn.Conv2d(input_channels, step_channels, 5, 2, 2), nonlinearity
)
]
size = input_size // 2
channels = step_channels
while size > 1:
encoder.append(
nn.Sequential(
nn.Conv2d(channels, channels * 4, 5, 4, 2),
nn.BatchNorm2d(channels * 4),
nonlinearity,
)
)
channels *= 4
size = size // 4
self.encoder = nn.Sequential(*encoder)
self.encoder_fc = nn.Linear(
channels, encoding_dims
) # Can add a Tanh nonlinearity if training is unstable as noise prior is Gaussian
self.decoder_fc = nn.Linear(encoding_dims, step_channels)
decoder = []
size = 1
channels = step_channels
while size < input_size // 2:
decoder.append(
nn.Sequential(
nn.ConvTranspose2d(channels, channels * 4, 5, 4, 2, 3),
nn.BatchNorm2d(channels * 4),
nonlinearity,
)
)
channels *= 4
size *= 4
decoder.append(nn.ConvTranspose2d(channels, input_channels, 5, 2, 2, 1))
self.decoder = nn.Sequential(*decoder)
def sample(self, noise):
noise = self.decoder_fc(noise)
noise = noise.view(-1, noise.size(1), 1, 1)
return self.decoder(noise)
def forward(self, x):
if self.training:
encoding = self.encoder(x)
encoding = self.encoder_fc(
encoding.view(
-1, encoding.size(1) * encoding.size(2) * encoding.size(3)
)
)
return self.sample(encoding), encoding
else:
return self.sample(x)
###Output
_____no_output_____
###Markdown
DISCRIMINATOR NETWORK The AAE discriminator is trained to classify an input noise vector as being real(sampled from the prior) or fake (generated by the encoder). It is thus a simple MLP which outputs the logits to be used with the minimax loss.For reasons same as above we use a **Leaky ReLU** activation. The encoding size is halved successively till it reaches the threshold of 16. The last linear layer subsequently converts this to real output for the logit.
###Code
class AdversarialAutoencoderDiscriminator(models.Discriminator):
def __init__(self, input_dims, nonlinearity=nn.LeakyReLU(0.2)):
super(AdversarialAutoencoderDiscriminator, self).__init__(input_dims)
model = [nn.Sequential(nn.Linear(input_dims, input_dims // 2), nonlinearity)]
size = input_dims // 2
while size > 16:
model.append(
nn.Sequential(
nn.Linear(size, size // 2), nn.BatchNorm1d(size // 2), nonlinearity
)
)
size = size // 2
model.append(nn.Linear(size, 1))
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
###Output
_____no_output_____
###Markdown
LOSS FUNCTIONS The loss function of the autoencoder consists of two terms:1. The reconstruction error: Mean squared error between the input image and the reconstruction.2. The negative log likelihood of the generated noise (encodings) w.r.t to the discriminator.The loss function of the discriminator consists of the minimax loss with the encodings generated by the encoder as the fake samples and the noise generated by the prior as the real samples.
###Code
class AdversarialAutoencoderGeneratorLoss(losses.GeneratorLoss):
def forward(self, real_inputs, gen_inputs, dgz):
loss = 0.999 * F.mse_loss(gen_inputs, real_inputs)
target = torch.ones_like(dgz)
loss += 0.001 * F.binary_cross_entropy_with_logits(dgz, target)
return loss
def train_ops(
self,
generator,
discriminator,
optimizer_generator,
real_inputs,
device,
batch_size,
labels=None,
):
recon, encodings = generator(real_inputs)
optimizer_generator.zero_grad()
dgz = discriminator(encodings)
loss = self.forward(real_inputs, recon, dgz)
loss.backward()
optimizer_generator.step()
return loss.item()
class AdversarialAutoencoderDiscriminatorLoss(losses.DiscriminatorLoss):
def forward(self, dx, dgz):
target_real = torch.ones_like(dx)
target_fake = torch.zeros_like(dx)
loss = 0.5 * F.binary_cross_entropy_with_logits(dx, target_real)
loss += 0.5 * F.binary_cross_entropy_with_logits(dgz, target_fake)
return loss
def train_ops(
self,
generator,
discriminator,
optimizer_discriminator,
real_inputs,
device,
batch_size,
labels=None,
):
_, encodings = generator(real_inputs)
noise = torch.randn(real_inputs.size(0), generator.encoding_dims, device=device)
optimizer_discriminator.zero_grad()
dx = discriminator(noise)
dgz = discriminator(encodings)
loss = self.forward(dx, dgz)
loss.backward()
optimizer_discriminator.step()
return loss.item()
losses = [
AdversarialAutoencoderGeneratorLoss(),
AdversarialAutoencoderDiscriminatorLoss(),
]
###Output
_____no_output_____
###Markdown
OPTIMIZERS AND HYPERPARAMETERS The models, their corresponding optimizers and other hyperparameters like the nonlinearities to be used in the intermediate layers are bundled in the form of a dictionary and provided to the trainer for instantiation. The dictionary specifies the models that are to be trained, the optimizers associated with said models and learning rate schedulers, if any1. "name": The class name for the model. Generally a subclass of the ```torchgan.models.Generator``` or ```torchgan.models.Discriminator```2. "args": Arguments fed into the class during instantiation, into its constructor 3. "optimizer": A dictionary containing the following key-value pairs defining the optimizer associated with the model * "name" : The class name of the optimizer. Generally an optimizer from the ```torch.optim``` package * "args" : Arguments to be fed to the optimizer during its instantiation, into its constructor * "var": Variable name for the optimizer. This is an optional argument. If this is not provided, we assign the optimizer the name ```optimizer_{}``` where {} refers to the variable name of the model. * "scheduler": Optional scheduler associated with the optimizer. Again this is a dictionary with the following keys * "name" : Class name of the scheduler * "args" : Arguments to be provided to the scheduler during instantiation, into its constructor
###Code
network = {
"generator": {
"name": AdversarialAutoencoderGenerator,
"args": {"encoding_dims": 128, "input_size": 32, "input_channels": 1},
"optimizer": {"name": Adam, "args": {"lr": 0.0002, "betas": (0.5, 0.999)}},
},
"discriminator": {
"name": AdversarialAutoencoderDiscriminator,
"args": {"input_dims": 128,},
"optimizer": {"name": Adam, "args": {"lr": 0.0002, "betas": (0.5, 0.999)}},
},
}
###Output
_____no_output_____
###Markdown
TRAINING THE ADVERSARIAL AUTOENCODER Next we simply feed the network descriptors and the losses we defined previously into the Trainer. Then we pass the **MNIST DataLoader** to the trainer object and wait for training to complete.---Important information for visualizing the performance of the GAN will be printed to the console. The best and recommended way to visualize the training is to use **tensorboardX**. It plots all the data and periodically displays the generated images. It allows us to track failure of the model early. *NB: Training the models are quite expensive. Hence we will train the models for **10** epochs if a GPU is available, else we will be training for only **5** epochs. We recommend using the **GPU runtime** in Colab. The images will not look even close to realistic in **5** epochs but shall be enough to show that it is learning to generate good quality images. If you have access to powerful GPUs or want to see realistic samples, I would recommend simply increasing the **epochs** variable (to around **200**) in the next code block.*
###Code
if torch.cuda.is_available():
device = torch.device("cuda:0")
# Use deterministic cudnn algorithms
torch.backends.cudnn.deterministic = True
epochs = 10
else:
device = torch.device("cpu")
epochs = 5
print("Device: {}".format(device))
print("Epochs: {}".format(epochs))
trainer = Trainer(network, losses, sample_size=64, epochs=epochs, device=device)
trainer(loader)
###Output
_____no_output_____
###Markdown
VISUALIZING THE SAMPLES Once training is complete, one can easily visualize the loss curves, gradient flow and sampled images per epoch on either the **TensorboardX** or **Vizdom** backends. For the purposes of this tutorial, we plot some of the sampled images here itself.*NB: It is highly recommended to view the results on TensorboardX or Vizdom if you are running this tutorial locally*
###Code
# Grab a batch of real images from the dataloader
real_batch = next(iter(loader))
# Plot the real images
plt.figure(figsize=(10, 10))
plt.subplot(1, 2, 1)
plt.axis("off")
plt.title("Real Images")
plt.imshow(
np.transpose(
vutils.make_grid(
real_batch[0].to(device)[:64], padding=5, normalize=True
).cpu(),
(1, 2, 0),
)
)
# Plot the fake images from the last epoch
plt.subplot(1, 2, 2)
plt.axis("off")
plt.title("Fake Images")
plt.imshow(plt.imread("{}/epoch{}_generator.png".format(trainer.recon, trainer.epochs)))
plt.show()
fig = plt.figure(figsize=(8, 8))
plt.axis("off")
ims = [
[plt.imshow(plt.imread("{}/epoch{}_generator.png".format(trainer.recon, i)))]
for i in range(1, trainer.epochs + 1)
]
ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)
# Play the animation
HTML(ani.to_jshtml())
###Output
_____no_output_____ |
testing_notebooks/results.ipynb | ###Markdown
Real SNR
###Code
e1_wav_0 = [0, 0, 0]
e1_wav_1 = [0, 0, 0]
e1_wav_3 = [0, 4.66e-3, 0]
e1_L256 = [4.54e-3, 0, 0]
e2_wav_0 = [0, 0, 0]
e2_wav_1 = [0, 0, 0]
e2_wav_3 = [0, 3.60e-3, 0]
e2_L256 = [3.78e-3, 0, 0]
R2_wav_0 = [0, 0, 0]
R2_wav_1 = [0, 0, 0]
R2_wav_3 = [0, 1.49e-2, 0]
R2_L256 = [1.86e-2, 0, 0]
# big models
e1_big_wav_3 = [4.65e-3, 0, 0]
e1_big_L256 = [5.05e-3, 0, 0]
e2_big_wav_3 = [3.59e-3, 0, 0]
e2_big_L256 = [5.05e-3, 0, 0]
R2_big_wav_3 = [1.48e-2, 0, 0]
R2_big_L256 = [2.06e-2, 0, 0]
###Output
_____no_output_____
###Markdown
Flat SNR
###Code
# Densities:
# s
e1_wav_0 = [0, 3.54e-3, 0]
e1_wav_1 = [0, 3.39e-3, 0]
e1_wav_3 = [0, 4.33e-3, 0]
e1_L256 = [0, 3.35e-3, 0]
e2_wav_0 = [0, 3.06e-3, 0]
e2_wav_1 = [0, 2.63e-3, 0]
e2_wav_3 = [0, 3.10e-3, 0]
e2_L256 = [0, 2.70e-3, 0]
R2_wav_0 = [0, 1.27e-2, 0]
R2_wav_1 = [0, 1.08e-2, 0]
R2_wav_3 = [0, 1.13e-2, 0]
R2_L256 = [0, 1.18e-2, 0]
###Output
_____no_output_____ |
plate_recognizer/notebooks/mnist_data_test.ipynb | ###Markdown
Clustering on MNISTHere is the result of clustering experiments on MNIST data. I wanted to see how robust clustering algorithm is for the data. I was hoping to see that most of the images would come to cluster into 10 groups. Unfortunately, this is not what I see. The images were too similar and the clustering algorithm could not reliably group them into neat categories whether you use cosine or Euclidean distance method.The lesson? You need to know the dataset well and apply what works for the given dataset. Method Definitions Feature Map Clustering
###Code
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input, decode_predictions
from keras.preprocessing import image
from sklearn import preprocessing # to normalise existing X
# #Calculate similar matrics
# def cosine_similarity(ratings):
# sim = ratings.dot(ratings.T)
# if not isinstance(sim,np.ndarray):
# sim = sim.toarray()
# norms = np.array([np.sqrt(np.diagonal(sim))])
# return (sim/norms/norms.T)
def get_feature_maps(input):
# i = tf.keras.layers.Input([None, None, 3], dtype = tf.uint8)
# x = tf.cast(i, tf.float32)
# x = tf.keras.applications.mobilenet.preprocess_input(x)
# #Convert to VGG input format
# NB: This messes up the existing data so skipping it
# similarity measures do not seem to be affected by this.
vgg_input = preprocess_input(input)
#include_top=False == not getting VGG16 last 3 layers
# model = VGG16(weights = "imagenet", include_top=False,
# input_shape = (input.shape[1], input.shape[2], 1))
model = VGG16(weights = None, include_top=False,
input_shape = (input.shape[1], input.shape[2], 1))
#Get features
feature_maps = model.predict(vgg_input)
# feature_maps = model.predict(input)
return feature_maps, model
# #Calculate similar metrics
# features_compress = features.reshape(len(y_test), 7*7*512)
# sim = cosine_similarity(features_compress)
# model_vgg16, feature_maps = get_feature_maps(X)
from sklearn.cluster import KMeans
def get_clusters(X_train_pca):
kmeans = KMeans(n_clusters=K, random_state=0)
X_train_pca_clusters = kmeans.fit(X_train_pca)
return X_train_pca_clusters, kmeans
def get_feature_map_clusters(X, K):
"""
param X: input data
param K: number of clusters
returns: X_clusters - clustered input data
(side effect): plots the frequency histogram of clusters
"""
X_fm, _ = get_feature_maps(X)
# use cosine distance to find similarities
X_fm_normalized = preprocessing.normalize(X_fm.reshape(len(X_fm), -1))
return get_clusters(X_fm_normalized)
def to_cluster_idx(bins, labels):
"""
param bins: range of K
param labels: cluster labels
returns: dictionary of cluster IDs
"""
cluster_dict = dict()
for cluster_id in bins:
cluster_dict[cluster_id] = np.where(labels == cluster_id)[0]
return cluster_dict
def to_clusters_dict(X, y, X_clusters, K):
X_cluster_idx = to_cluster_idx(range(K), X_clusters.labels_)
X_dict = {}
y_dict = {}
for id in range(K):
ids = X_cluster_idx[id]
X_dict[id] = X[ids]
y_dict[id] = y[ids]
return X_dict, y_dict
###Output
_____no_output_____
###Markdown
PCA Clustering
###Code
from sklearn.decomposition import PCA
def to_pca(X_train):
X_train_flatten = X_train.reshape(X_train.shape[0], -1)
X_train_flatten.shape
pca = PCA(2)
X_train_pca = pca.fit_transform(X_train_flatten)
# plot the scatter plot along the way
plt.figure(1)
plt.clf()
plt.scatter(X_train_pca[:, 0], X_train_pca[:, 1], c=y_train, cmap="Paired")
plt.colorbar()
return X_train_pca
###Output
_____no_output_____
###Markdown
Plotting
###Code
import matplotlib.pyplot as plt
def plot_images(X, y, limit=10):
fig = plt.figure(figsize=(20,40))
# The number of images for plotting is limited to 50
end_id = len(y) if len(y) < limit else limit
for i in range(0, end_id):
axis = fig.add_subplot(10, 5, i+1)
plt.axis('off')
image = X[i]
plt.title("{}".format(y[i]))
plt.imshow(np.clip(image, 0, 1))
def plot_cluster_histogram(X_clusters, K):
histo_x, bins = np.histogram(X_clusters.labels_, bins=range(K + 1))
plt.bar(bins[:-1], histo_x, align='center')
def plot_pca_clusters(X_train_pca, kmeans):
# kmeans, X_train_pca_clusters = get_clusters(X_train_pca)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = X_train_pca[:, 0].min() - 1, X_train_pca[:, 0].max() + 1
y_min, y_max = X_train_pca[:, 1].min() - 1, X_train_pca[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(2)
# plt.clf()
plt.imshow(Z, interpolation="nearest",
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired, aspect="auto", origin="lower")
plt.plot(X_train_pca[:, 0], X_train_pca[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1], marker="x", s=169, linewidths=3,
color="w", zorder=10)
plt.title("K-means clustering on the digits dataset (PCA-reduced data)\n"
"Centroids are marked with white cross")
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
###Output
_____no_output_____
###Markdown
Executions Load MNIST data & rescaleLoad MNIST data and rescale from 28x28 to 32x32 so that they can be processed by VGG16
###Code
from keras.datasets import mnist
from skimage.transform import rescale
import numpy as np
IMAGE_SIZE = 32
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train, X_test = X_train / 255.0, X_test / 255.0
# rescale to 32x32 to be processed by VGG16
X_train_rescaled = np.array([rescale(x,
IMAGE_SIZE/X_train.shape[1],
anti_aliasing=False) for x in X_train])
###Output
_____no_output_____
###Markdown
Visualize the data a bit
###Code
plot_images(X_train, y_train)
plot_images(X_train_rescaled, y_train)
###Output
_____no_output_____
###Markdown
Use feature map to get clusters
###Code
K = 10
X_train_fm_clusters, kmeans = get_feature_map_clusters(X_train_rescaled, K)
X_train_fm_dict, y_train_fm_dict = to_clusters_dict(X_train_rescaled, y_train, X_train_fm_clusters, K)
kmeans
plot_cluster_histogram(X_train_fm_clusters, K)
###Output
_____no_output_____
###Markdown
Use PCA to get clusters
###Code
skl_digits_pca = to_pca(X_train_rescaled)
skl_digits_pca_clusters, skl_digits_kmeans = get_clusters(skl_digits_pca)
plot_pca_clusters(skl_digits_pca, skl_digits_kmeans)
plot_cluster_histogram(skl_digits_pca_clusters, K)
skl_digits_pca_idx = to_cluster_idx(range(K), skl_digits_pca_clusters.labels_)
# X_train_pca_dict, y_train_pca_dict = to_clusters_dict(X_train_rescaled, y_train, skl_digits_pca_clusters, K)
# from collections import Counter
for key, idx in skl_digits_pca_idx.items():
print(key, len(idx))
cluster0 = X_train_rescaled[skl_digits_pca_idx[0]]
cluster0.shape
from scipy.spatial.distance import cdist
def find_duplicates(X_train_pca, threshold=0.001):
# Calculate distances of all points
distances = cdist(X_train_pca, X_train_pca)
# Find duplicates (very similar images)
dupes = [np.array(np.where(distances[id] < threshold)).reshape(-1).tolist() \
for id in range(distances.shape[0])]
to_remove = set()
for d in dupes:
if len(d) > 1:
for id in range(1, len(d)):
to_remove.add(d[id])
print("Found {} duplicates".format(len(to_remove)))
return to_remove, dupes
# to_remove = find_duplicates(skl_digits_pca)
pca_data = skl_digits_pca[skl_digits_pca_idx[0]]
to_remove, dupes = find_duplicates(pca_data)
for d in dupes:
if len(d) > 1:
print(d)
to_display = [224, 6658, 5289, 6506]
plot_images(X_train_rescaled[to_display], y_train[to_display])
skl_digits_pca = to_pca(X_train)
dupes
X_test.shape
X_train_pca_dict[0]
###Output
_____no_output_____ |
semana05-16-10-2020/.ipynb_checkpoints/parte01-regressao-linear-checkpoint.ipynb | ###Markdown
Andrew Ng’s Regressão Linear - Solução Pythônica Regressão Linear com uma variável Notação- **m** = número de exemplos treináveis;- **x's** = dados de entrada / características;- **y's** = dados de saída / valor esperado;- **h** = hipótese do valor de y; ![ciclo de vida da regressão linear](Imagens/ciclo-de-vida-regressao-linear.png) ![determinando a hipótese](Imagens/determinando-hipotese.png)
###Code
# importando as bibliotecas do python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# importando os dados
data = pd.read_csv('ex1data1.txt', names=['População', 'Lucro'])
# visualizando os 5 primeiros dados
data.head()
plt.figure(figsize = (20,10)) # definindo a dimensão do gráfico
X = data['População'].values # obtém os dados da população
Y = data['Lucro'].values # obtém os dados do lucro
m = len(Y) # número de exemplos treináveis
plt.scatter(X, Y, color = "red", marker = "*") # plotar os dados
# aplicando as legendas ao gráfico
plt.xlabel('População da Cidade')
plt.ylabel('Lucro da População')
plt.title('Plotando os Dados de Treinamento')
print(X)
print(Y)
X = np.append(np.ones([m,1]), X.reshape(m,1), axis=1) # adicionando x0 = 1 e ajustando a dimensão para mx1
print(X)
Y = Y.reshape(m,1) # ajustando a dimensão para mx1
Y.shape
teta = np.zeros([2,1]) # definindo uma array de zeros para armazenar os coeficientes da equação da reta
teta
###Output
_____no_output_____
###Markdown
![objetivos da regressão linear](Imagens/objetivos-regressao-linear.png)
###Code
def Custo(x, y, teta):
'''
Retorna o custo (erro da predição)
'''
m = len(y) # número de exemplos treináveis
h = x.dot(teta) # definindo a hipótese do algoritmo
J = 1/(2*m)*(np.sum((h-y)**2)) # Implementando a função de custo
return J # retorna o cursto
custo = Custo(X, Y, teta) # Chama a função que calcula o custo e a imprime
print(custo)
###Output
32.072733877455676
###Markdown
Usando o Gradiente Descendente para minimizar o custo ![gradiente descendente](Imagens/visualizando-gradiente-descendente.png) ![agoritmo do gradiente descendente](Imagens/gradiente-descendente-algoritmo.png) ![derivadas do gradiente descendente](Imagens/gradiente-descendente-derivadas.png) ![taxa de aprendizado](Imagens/taxa-de-aprendizado.png)
###Code
interacoes = 1000 # número de interações
alfa = 0.01 # taxa de aprendizado
def GradienteDescendente(x, y, teta, alfa, interacoes):
'''
Calculando o gradiente descendente para minizar o custo
'''
historico_custo = [] # define uma lista vazia para armazenar o valor do custo em cada interação
m = len(y) # número de exemplos treináveis
for i in range(interacoes): # intera 1000 vezes
h = x.dot(teta) # calcula a hipotese
teta = teta - (alfa/m) * (x.T.dot(h-y)) # aplica o gradiente descendente
historico_custo.append(Custo(x, y, teta)) # adiciona o valor do custo em cada etapa (erro de predição)
return teta, historico_custo
# obtendo os parâmetros da equação da reta e o histórico do custo em cada etapa de treinamento
novo_teta, historico_custo = GradienteDescendente(X, Y, teta, alfa, interacoes)
novo_teta
historico_custo = np.array(historico_custo) # transformando a lista em um array numpy
historico_custo.shape
# definindo a dimensão do gráfico
plt.figure(figsize = (20,10))
# plotando os dados x e y no gráfico
plt.scatter(X[:,1], Y, color = "red", marker = "*") # plotar os dados
plt.plot(X[:,1], np.dot(X, novo_teta), label = "Previsão")
plt.xlabel('População da Cidade')
plt.ylabel('Lucro da População')
plt.title('Plotando os Dados de Treinamento')
# colocando a legenda no gráfico
plt.legend()
# colocando uma grade ao gráfico
plt.grid(True)
# removendo a moldura do gráfico
plt.box(False)
def predicao(x, teta):
'''
Essa função retorna uma predição para novos dados
'''
pred = np.dot(x, teta)
x = np.array(x)
print('Para uma população de {} habitantes, teremos {} $ de lucro'.format(x[1] * 1000, pred * 10000))
return None
predicao(([1, 3.5]), novo_teta)
predicao(([1, 7]), novo_teta)
plt.figure(figsize = (20, 10))
plt.plot(historico_custo)
plt.ylabel('Custo J')
plt.xlabel('Número de Interações')
plt.title('Minimizando o custo usando gradiente descendente')
###Output
_____no_output_____ |
systemIdentification/system_identification.ipynb | ###Markdown
Nonstationarity Idea 1: Assuming a linear drift- Add time to the set of features- Let linear regression learn the parameter for the drift (shift/time) Idea 2: Assuming some kind of drift or seasonality- Stationarize time series using Difference Transform (https://machinelearningmastery.com/remove-trends-seasonality-difference-transform-python/)- https://pypi.org/project/stationarizer/ Idea 3: No assumptions about the nature of the nonstationarity- Train initial model, measure R^2 on test set- Monitor R^2 for all observations that were added later- If R^2 becomes too low, train a new model using the most recent observations, monitor R^2 for all observations that were added later
###Code
def load_data(path):
data = pd.read_csv(path).dropna(subset=['Shop'])[INPUT_FIELDS + [OUTPUT_FIELD]]
data['beta'] = data.apply(lambda row: row['PROVIDED_INTERFACE'] + (0 if row['PROVIDED_INTERFACE'] >= 2 else 1), axis=1)
data['Linear'] = data['RELIABILITY'] * data['CRITICALITY'] * (data['PROVIDED_INTERFACE'] + data['REQUIRED_INTERFACE'])
data['Saturating'] = data['RELIABILITY'] * \
data['CRITICALITY'] * \
data[' PMax'] * \
np.tanh(data['alpha'] * \
data['In Use REPLICA'] / data['LOAD']) * \
(data['PROVIDED_INTERFACE'] + data['REQUIRED_INTERFACE'])
data['Discontinuous'] = data['RELIABILITY'] * data['CRITICALITY'] * (data['REQUIRED_INTERFACE'] + 1) * data['IMPORTANCE'] * data['beta'] * data['PROVIDED_INTERFACE'] - 10 * data['ADT']
data['Combined'] = data['RELIABILITY'] * data['CRITICALITY'] * data['IMPORTANCE'] * data['beta'] * data[' PMax'] * np.tanh(data['alpha'] * data['In Use REPLICA'] / data['LOAD']) * (data['REQUIRED_INTERFACE'] + 1) - 10 * data['ADT']
return data
data_nonstationary = load_data('../data/TrainingmRUBiS_Theta0.01_NonStationary.csv')
data_nonstationary
sn.scatterplot(x='Combined', y='OPTIMAL_UTILITY', data=data_nonstationary)
sn.scatterplot(x='Combined', y='OPTIMAL_UTILITY', data=data)
X_nonstationary = data_nonstationary[INPUT_FIELDS + ['beta', 'Linear', 'Saturating', 'Discontinuous', 'Combined']].drop(['IMPORTANCE', 'CONNECTIVITY', 'REQUIRED_INTERFACE'], axis=1)
y_nonstationary = data_nonstationary[OUTPUT_FIELD]
run_ridge_online(X_nonstationary, y_nonstationary)
run_ridge_online(X, y)
###Output
_____no_output_____ |
diabetescsv-regression-aks/diabetescsv-regression-aks.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/training/train-within-notebook/train-within-notebook.png) Train and deploy a model_**Create and deploy a model directly from a notebook**_------ Contents1. [Introduction](Introduction)1. [Setup](Setup)1. [Data](Data)1. [Train](Train) 1. Viewing run results 1. Simple parameter sweep 1. Viewing experiment results 1. Select the best model1. [Deploy](Deploy) 1. Register the model 1. Create a scoring file 1. Create the environment configuration (yml file for Conda and pip packages) 1. Deploy the as web service on Azure Kubernetes Service (AKS) 1. Test the Web Service (run and HTTP methods) 1. Clean up--- IntroductionAzure Machine Learning provides capabilities to control all aspects of model training and deployment directly from a notebook using the AML Python SDK. In this notebook we will* connect to our AML Workspace* create an experiment that contains multiple runs with tracked metrics* choose the best model created across all runs* deploy that model as a serviceIn the end we will have a model deployed as a web service which we can call from an HTTP endpoint --- SetupCreate an Azure Machine Learning servcie in Azure, and launch the studio. Create a Workspace, a Compute Instance (VM) and a new Notebook running on that VM as a compute target. This example was forked from https://github.com/Azure/MachineLearningNotebooks, and further developed to present an end-to-end example. For this notebook we need the Azure ML SDK and access to our workspace. The following cell imports the SDK, checks the version, and accesses our already configured AzureML workspace. See more detail on [Git Integration](https://docs.microsoft.com/en-us/azure/machine-learning/concept-train-model-git-integration:~:text=Azure%20Machine%20Learning%20provides%20a%20shared%20file%20system,work%20with%20Git%20via%20the%20Git%20CLI%20experience) if you need to upload this notebook in AML.
###Code
import azureml.core
from azureml.core import Experiment, Workspace
# Check core SDK version number
print("This notebook was created using version 1.0.2 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
print("")
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep='\n')
###Output
_____no_output_____
###Markdown
--- DataWe will use the diabetes dataset for this experiement (https://aka.ms/diabetes-data). The dataset consists of 8 baseline variables for n=10000 diabetes patients: Pregnancies, PlasmaGlucose, DiastolicBloodPressure, TricepsThickness, SerumInsulin, BMI, DiabetesPedigree, and Age.The dataset has one dichotomous outcome variable: Diebetic.
###Code
from azureml.core import Dataset
import pandas as pd
from sklearn.model_selection import train_test_split
# load the diabetes dataset from the same folder where this notebook is located
print("Loading Diabetes Data from the CSV file...")
dataset = pd.read_csv('./diabetes.csv')
# Separate features and labels
X, y = dataset[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, dataset['Diabetic'].values
# Split data into training set and test set (80% Training and 20% Testing)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=0)
data = {
"train":{"X": X_train, "y": y_train},
"test":{"X": X_test, "y": y_test}
}
print ("Data contains", len(data['train']['X']), "training samples and",len(data['test']['X']), "test samples")
###Output
_____no_output_____
###Markdown
--- TrainLet's use scikit-learn to train a simple Ridge regression model. We use AML to record interesting information about the model in an Experiment. An Experiment contains a series of trials called Runs. During this trial we use AML in the following way:* We access an experiment from our AML workspace by name, which will be created if it doesn't exist* We use `start_logging` to create a new run in this experiment* We use `run.log()` to record a parameter, alpha, and an accuracy measure - the Mean Squared Error (MSE) to the run. We will be able to review and compare these measures in the Azure Portal at a later time.* We store the resulting model in the **working** directory, which is automatically captured by AML when the run is complete.* We use `run.complete()` to indicate that the run is over and results can be captured and finalized
###Code
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
import joblib
# Get an experiment object from Azure Machine Learning
experiment = Experiment(workspace=ws, name="train-diabetes-regression")
# Create a run object in the experiment
run = experiment.start_logging()
# Log the algorithm parameter alpha to the run; where alpha is between 0 and 1
run.log('alpha', 0.03)
# Create, fit, and test the scikit-learn Ridge regression model
regression_model = Ridge(alpha=0.03)
regression_model.fit(data['train']['X'], data['train']['y'])
preds = regression_model.predict(data['test']['X'])
# Output the Mean Squared Error to the notebook and to the run
print('Mean Squared Error is', mean_squared_error(data['test']['y'], preds))
run.log('mse', mean_squared_error(data['test']['y'], preds))
# Save the model to the working directory
model_file_name = 'diabetesregressionmodel.pkl'
joblib.dump(value = regression_model, filename = model_file_name)
# upload the model file explicitly into artifacts
run.upload_file(name = model_file_name, path_or_stream = model_file_name)
# Complete the run
run.complete()
###Output
_____no_output_____
###Markdown
Viewing run resultsAzure Machine Learning stores all the details about the run in the Azure cloud. Let's access those details by retrieving a link to the run using the default run output. Clicking on the resulting link will take you to an interactive page presenting all run information.
###Code
run
###Output
_____no_output_____
###Markdown
Simple parameter sweepNow let's take the same concept from above and modify the **alpha** parameter. For each value of alpha we will create a run that will store metrics and the resulting model. In the end we can use the captured run history to determine which model was the best for us to deploy. Note that by using `with experiment.start_logging() as run` AML will automatically call `run.complete()` at the end of each loop.This example also uses the **tqdm** library to provide a thermometer feedback
###Code
import numpy as np
from tqdm import tqdm
# list of numbers from 0 to 1.0 with a 0.10 interval
alphas = np.arange(0.0, 1.0, 0.10)
# try a bunch of alpha values in a Linear Regression (Ridge) model
for alpha in tqdm(alphas):
# create a bunch of runs, each train a model with a different alpha value
with experiment.start_logging() as run:
# Use Ridge algorithm to build a regression model
regression_model = Ridge(alpha=alpha)
regression_model.fit(X=data["train"]["X"], y=data["train"]["y"])
preds = regression_model.predict(X=data["test"]["X"])
mse = mean_squared_error(y_true=data["test"]["y"], y_pred=preds)
# log alpha, mean_squared_error and feature names in run history
run.log(name="alpha", value=alpha)
run.log(name="mse", value=mse)
# Save the model to the outputs directory for capture
joblib.dump(value=regression_model, filename=model_file_name)
###Output
_____no_output_____
###Markdown
Viewing experiment resultsSimilar to viewing the run, we can also view the entire experiment. The experiment report view in the Azure portal lets us view all the runs in a table, and also allows us to customize charts. This way, we can see how the alpha parameter impacts the quality of the model
###Code
# now let's take a look at the experiment in Azure portal.
experiment
###Output
_____no_output_____
###Markdown
Select the best model Now that we've created many runs with different parameters, we need to determine which model is the best for deployment. For this, we will iterate over the set of runs. From each run we will take the *run id* using the `id` property, and examine the metrics by calling `run.get_metrics()`. Since each run may be different, we do need to check if the run has the metric that we are looking for, in this case, **mse**. To find the best run, we create a dictionary mapping the run id's to the metrics.Finally, we use the `tag` method to mark the best run to make it easier to find later.
###Code
runs = {}
run_metrics = {}
# Create dictionaries containing the runs and the metrics for all runs containing the 'mse' metric
for r in tqdm(experiment.get_runs()):
metrics = r.get_metrics()
if 'mse' in metrics.keys():
runs[r.id] = r
run_metrics[r.id] = metrics
# Find the run with the best (lowest) mean squared error and display the id and metrics
best_run_id = min(run_metrics, key = lambda k: run_metrics[k]['mse'])
best_run = runs[best_run_id]
print('Best run is:', best_run_id)
print('Metrics:', run_metrics[best_run_id])
# Tag the best run for identification later
best_run.tag("Best Run")
###Output
_____no_output_____
###Markdown
--- DeployNow that we have trained a set of models and identified the run containing the best model, we want to deploy the model for real time inference. The process of deploying a model involves* registering a model in your workspace* creating a scoring file containing init and run methods* creating an environment settings file describing packages necessary for your scoring file* creating a deployment configuration (for AKS in this example)* deploying the model and packages as a web service on an AKS cluster Register a modelWe have already identified which run contains the "best model" by our evaluation criteria. Each run has a file structure associated with it that contains various files collected during the run. Since a run can have many outputs we need to tell AML which file from those outputs represents the model that we want to use for our deployment. We can use the `run.get_file_names()` method to list the files associated with the run, and then use the `run.register_model()` method to place the model in the workspace's model registry.When using `run.register_model()` we supply a `model_name` that is meaningful for our scenario and the `model_path` of the model relative to the run. In this case, the model path is what is returned from `run.get_file_names()`
###Code
from azureml.core.model import Model
# View the files in the run
for f in best_run.get_file_names():
print(f)
model_path = "diabetesregressionmodel.pkl"
model_name = "diabetesregressionmodel.pkl"
model = Model.register(model_path = model_path,
model_name = model_name,
tags = {'area': "diabetes", 'type': "regression"},
description = "Ridge regression model to predict diabetes",
workspace =ws)
###Output
_____no_output_____
###Markdown
Once a model is registered, it is accessible from the list of models on the AML workspace. If you register models with the same name multiple times, AML keeps a version history of those models for you. The `Model.list()` lists all models in a workspace, and can be filtered by name, tags, or model properties.
###Code
# Find all models called "diabetesmodel" and display their version numbers
from azureml.core.model import Model
models = Model.list(ws, name=model_name)
for m in models:
print(m.name, m.version)
###Output
_____no_output_____
###Markdown
Create a scoring file Since your model file can essentially be anything you want it to be, you need to supply a scoring script that can load your model and then apply the model to new data. This script is your 'scoring file'. This scoring file is a python program containing, at a minimum, two methods init() and run(). The init() method is called once when your deployment is started so you can load your model and any other required objects. This method uses the get_model_path function to locate the registered model inside the docker container. The run() method is called interactively when the web service is called with one or more data samples to predict.Important: The schema decorators for pandas and numpy are required to implement the automatic swagger schema generation for input and output variablesAfter a successful run of the this script, the score.py file be created in the working folder
###Code
%%writefile score.py
import json
import pickle
import numpy as np
import pandas as pd
import joblib
from azureml.core.model import Model
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType
def init():
global model
model_path = Model.get_model_path('diabetesregressionmodel.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
input_sample = pd.DataFrame(data=[{
"Pregnancies": 0,
"PlasmaGlucose": 171,
"DiastolicBloodPressure": 80,
"TricepsThickness": 34,
"SerumInsulin": 23,
"BMI": 43.51,
"DiabetesPedigree": 1.21,
"Age": 21,
}])
output_sample = np.array([0])
@input_schema('data', PandasParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data):
try:
result = model.predict(data)
return result.tolist()
except Exception as e:
error = str(e)
return error
###Output
_____no_output_____
###Markdown
Create the environment settings The environment settings will also be exported into a yml file (myenv.yml) to verify the conda and pip packages.The yml file will be in the working folder for this deployment (but it is not needed - for verification only)This step will create the python environment with the required conda and pip packages/dependencies. And then, it will create the inference configuration that will build the Docker container based on the scoring file and the environment configuration. The Docker image is transparent and will be created and registered behind the scenes with the AzureML SDK.
###Code
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.environment import Environment
from azureml.core.model import InferenceConfig
env = Environment('deploytocloudenv')
env.python.conda_dependencies = CondaDependencies.create(conda_packages=['numpy','scikit-learn'],pip_packages=['azureml-defaults','inference-schema[numpy-support]'])
inference_config = InferenceConfig(entry_script="score.py", environment=env)
with open ("myenv.yml","w") as f:
f.write(env.python.conda_dependencies.serialize_to_string())
###Output
_____no_output_____
###Markdown
Verify the myenv.yml file in the working folder to ensure it contains the exact following configurations
###Code
# DO NOT RUN THIS STEP - for verification only
# Conda environment specification. The dependencies defined in this file will
# be automatically provisioned for runs with userManagedDependencies=False.
# Details about the Conda environment file format:
# https://conda.io/docs/user-guide/tasks/manage-environments.html#create-env-file-manually
name: project_environment
dependencies:
# The python interpreter version.
# Currently Azure ML only supports 3.5.2 and later.
- python=3.6.2
- pip:
- azureml-defaults~=1.6.0
- inference-schema[numpy-support]
- numpy
- scikit-learn
channels:
- anaconda
- conda-forge
###Output
_____no_output_____
###Markdown
Provision the AKS Cluster
This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it.
This step will take a few minutes. If it fails, try again or create the AKS cluster in the studio
###Code
from azureml.core.compute import AksCompute, ComputeTarget
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your AKS cluster
aks_name = 'aks-cluster'
# Verify that cluster does not exist already
try:
aks_target = ComputeTarget(workspace=ws, name=aks_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
# Use the default configuration (can also provide parameters to customize)
prov_config = AksCompute.provisioning_configuration()
# Create the cluster
aks_target = ComputeTarget.create(workspace = ws,
name = aks_name,
provisioning_configuration = prov_config)
if aks_target.get_status() != "Succeeded":
aks_target.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Deploy a web service with the model to the AKS Cluster
###Code
from azureml.core.webservice import AksWebservice
# Set the web service configuration (using default here)
aks_config = AksWebservice.deploy_configuration()
# # Enable token auth and disable (key) auth on the webservice
# aks_config = AksWebservice.deploy_configuration(token_auth_enabled=True, auth_enabled=False)
%%time
import datetime
aksservice_name ='aks-service'
print(str(datetime.datetime.now()))
# Create the webservice using all of the precreated configurations and our best model
aksservice = Model.deploy(workspace=ws,
name=aksservice_name,
models=[model],
inference_config=inference_config,
deployment_config=aks_config,
deployment_target=aks_target)
# Wait for the service deployment to complete while displaying log output
aksservice.wait_for_deployment(show_output = True)
print(aksservice.state)
print(aksservice.get_logs)
###Output
2020-12-13 19:18:36.103198
Tips: You can try get_logs(): https://aka.ms/debugimage#dockerlog or local deployment: https://aka.ms/debugimage#debug-locally to debug if deployment takes longer than 10 minutes.
Running.....
Succeeded
AKS service creation operation finished, operation "Succeeded"
Healthy
<bound method Webservice.get_logs of AksWebservice(workspace=Workspace.create(name='wstw', subscription_id='61489282-e75d-4996-b2ef-3126311e55e6', resource_group='tw'), name=aks-service, image_id=None, compute_type=None, state=AKS, scoring_uri=Healthy, tags=http://23.101.156.108:80/api/v1/service/aks-service/score, properties={}, created_by={'hasInferenceSchema': 'True', 'hasHttps': 'False'})>
CPU times: user 710 ms, sys: 302 ms, total: 1.01 s
Wall time: 33.1 s
###Markdown
Obtain the Swagger URL if successfully deployed
###Code
aksservice.swagger_uri
###Output
_____no_output_____
###Markdown
Test web service - run mehtod
Call the web service with some dummy input data to get a prediction.
###Code
import json
# Raw dataset (Actual Diabetic is 1)
test_sample = json.dumps({"data": [{
"Pregnancies": 9,
"PlasmaGlucose": 103,
"DiastolicBloodPressure": 78,
"TricepsThickness": 25,
"SerumInsulin": 309,
"BMI": 29.58,
"DiabetesPedigree": 1.28,
"Age": 43,}]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aksservice.run(input_data=test_sample)
print(prediction)
# Raw dataset (Actual Diabetic is 0)
test_sample = json.dumps({"data": [{
"Pregnancies": 0,
"PlasmaGlucose": 171,
"DiastolicBloodPressure": 80,
"TricepsThickness": 34,
"SerumInsulin": 23,
"BMI": 43.51,
"DiabetesPedigree": 1.21,
"Age": 21,}]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aksservice.run(input_data=test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Test web service - HTTP request
Alternatively you can construct a raw HTTP request and send it to the service. In this case you need to explicitly pass the HTTP header and the API key.
###Code
# if (key) auth is enabled, retrieve the API keys. AML generates two keys.
key1, key2 = aksservice.get_keys()
print(key1)
print(key2)
# construct raw HTTP request and send to the service
import requests
import json
# Raw dataset (Actual Diabetic is 1)
test_sample = json.dumps({"data": [{
"Pregnancies": 9,
"PlasmaGlucose": 103,
"DiastolicBloodPressure": 78,
"TricepsThickness": 25,
"SerumInsulin": 309,
"BMI": 29.58,
"DiabetesPedigree": 1.28,
"Age": 43,}]})
test_sample = bytes(test_sample,encoding = 'utf8')
# If (key) auth is enabled, don't forget to add key to the HTTP header.
headers = {'Content-Type':'application/json', 'Authorization': 'Bearer ' + key1}
response = requests.post(aksservice.scoring_uri, test_sample, headers=headers)
print("prediction:", response.text)
###Output
_____no_output_____
###Markdown
Clean up Delete the ACI instance to stop the compute and any associated billing.
###Code
%%time
aksservice.delete()
model.delete()
###Output
_____no_output_____ |
Input_utils.ipynb | ###Markdown
Binary vector generator Version 1 [Type checking](http://stackoverflow.com/questions/9225679/how-to-have-different-input-types-for-the-same-function)
###Code
from scipy.special import comb
import numpy as np
def how_many(max_n = 6, length = 16):
"""
Compute how many different binary vectors of a given length can be formed up to a given number.
If a list is passed, compute the vectors as specified in the list.
"""
if isinstance(max_n, int):
indexes = range(1,max_n+1)
if isinstance(max_n, list):
indexes = max_n
else:
raise TypeError("how_many(x,y) requires x to be either list or int")
rows_n=0
for i in indexes:
rows_n = rows_n + comb(length,i, exact=True)
return(rows_n)
def binary_vectors(length = 16, max_n = 6, one_hot = False):
"""
Return an array of size [how_many(max_n, length), length]
Each row is a binary vector with up to max_n ones.
Return a label array of size how_many(max_n, length) either as
integer or as one_hot representation
The function computes all possibilities by converting successive integers into
binary representation and then extracts those within range
"""
#Compute the dimension of the matrix for memory allocation
# numbers of column
columns_n = 16
# numbers of rows
rows_n = 2**columns_n
#location matrix
locations = np.zeros((rows_n, columns_n))
#populate the location matrix
for i in range(rows_n):
bin_string = np.binary_repr(i,length)
# we need to convert the binary string into a "boolean vector"
# http://stackoverflow.com/questions/29091869/convert-bitstring-string-of-1-and-0s-to-numpy-array
bin_array = np.fromstring(bin_string,'u1') - ord('0')
locations[i,:] = bin_array
#Exctrat vector within range
locations = locations[np.sum(locations, axis=1)<=max_n]
return locations
# The 50.000 inputs
# Repeat the matrix 4 times and cut the excess
# inputs = np.tile(locations,(4,1))
# inputs = inputs[0:50000,:]
# labels = np.sum(inputs, axis=1).reshape(50000,1)
# First we store the
# print("vector {} has label {}".format(inputs[2532,:], labels[2532,:]))
###Output
_____no_output_____
###Markdown
Binary vector generator Version 2 - via Itertool
###Code
# def binary_vector_2(rows_n = [2,4,6,8,10], columns_n = 10):
# rows = how_many(rows_n, 10)
# index = 0
# locations = np.zeros((rows, columns_n))
# for i in rows_n:
# for bin_string in kbits(10,i):
# bin_array = np.fromstring(bin_string,'u1') - ord('0')
# locations[index,:] = bin_array
# index = index+1
# return locations
# inputs = binary_vector_2()
# labels = find_labels(inputs, one_hot=True)
# #dataset_ver = Dataset(inputs, labels)
# #pickle_test(dataset_ver)
# inputs.shape
import numpy as np
import itertools
from scipy.special import comb
def kbits(n, k):
""" Generate a list of ordered binary strings representing all the possibile
way n chooses k.
Args:
n (int): set cardinality
k (int): subset cardinality
Returns:
result (string): list of binary strings
"""
result = []
for bits in itertools.combinations(range(n), k):
s = ['0'] * n
for bit in bits:
s[bit] = '1'
result.append(''.join(s))
return result
def binary_vector_2(rows_n = [2,4,6,8,10], distribution=[45], columns_n = 10):
""" Matrix of binary vectors from distribution.
Args:
rows_n (int, ndarray): nx1
distribution (int, ndarray): nx1
Returns:
ndarray of dimension rows_n * distribution, columns_n
TODO: check inputs, here given as list, but should it be a ndarray?
remove index accumulator and rewrite via len(kbit)
Examples:
Should be written in doctest format and should illustrate how
to use the function.
distribution=comb(columns_n, row)
returns all possible combinations: in reality not, should remove randomness: or better set flag
replacement = False
"""
rows_n = np.array(rows_n)
distribution = np.array(distribution)
assert np.all(rows_n >0)
assert np.all(distribution >0), "Distribution values must be positive. {} provided".format(distribution)
if len(distribution) == 1:
distribution = np.repeat(distribution, len(rows_n))
assert len(distribution) == len(rows_n)
rows = np.sum(distribution)
index = 0
locations = np.zeros((rows, columns_n))
cluster_size = comb(columns_n,rows_n)
for i in range(len(rows_n)):
kbit = kbits(10,rows_n[i])
take_this = np.random.randint(cluster_size[i], size=distribution[i])
lista =[]
for indices in take_this:
lista.append(kbit[indices])
kbit = lista
for bin_string in kbit:
bin_array = np.fromstring(bin_string,'u1') - ord('0')
locations[index,:] = bin_array
index = index+1
return locations
###Output
_____no_output_____
###Markdown
Accumulator Inputs
###Code
import numpy as np
class accumulatorMatrix(object):
"""
Generate a matrix which row vectors correspond to accumulated numerosity, where each number
is coded by repeating 1 times times. If zero = true, the zero vector is included.
Args:
max_number (int): the greatest number to be represented
length (int): vectors length, if not provided is computed as the minimum length compatible
times (int): length of unity representation
zero (bool): whether the zero vector is included or excluded
Returns:
outputs (int, ndarray): max_number x length ndarray
"""
def __init__(self, max_number, length=None, times=2, zero=False):
self.max_number = max_number
self.length = length
self.times = times
self.zero = zero
if not length:
self.length = self.times * self.max_number
assert self.max_number == self.length/times
if self.zero:
self.max_number = self.max_number + 1
add = 0
else:
add = 1
self.outputs = np.zeros((self.max_number, self.length), dtype=int)
for i in range(0,self.max_number):
self.outputs[i,:self.times * (i+add)].fill(1)
def shuffle_(self):
np.random.shuffle(self.outputs)
#def unshuffle(self):
"""We want to access the random shuffle in order to have the list
http://stackoverflow.com/questions/19306976/python-shuffling-with-a-parameter-to-get-the-same-result"""
def replicate(self, times=1):
self.outputs = np.tile(self.outputs, [times, 1])
import warnings
def accumulator_matrix(max_number, length=None, times=2, zero=False):
"""
Generate a matrix which row vectors correspond to accumulated numerosity, where each number
is coded by repeating 1 times times. If zero = true, the zero vector is included.
Args:
max_number (int): the greatest number to be represented
length (int): vectors length, if not provided is computed as the minimum length compatible
times (int): length of unity representation
zero (bool): whether the zero vector is included or excluded
Returns:
outputs (int, ndarray): max_number x length ndarray
"""
warnings.warn("shouldn't use this function anymore! Now use the class accumulatorMatrix.",DeprecationWarning)
if not length:
length = times * max_number
assert max_number == length/times
if zero:
max_number = max_number + 1
add = 0
else:
add = 1
outputs = np.zeros((max_number, length), dtype=int)
for i in range(0,max_number):
outputs[i,:times * (i+add)].fill(1)
return outputs
# np.random.seed(105)
# Weights = np.random.rand(5,10)
###Output
_____no_output_____
###Markdown
Label the data
###Code
def find_labels(inputs, multiple=1, one_hot=False):
"""
Generate the labels corresponding to binary vectors. If one_hot = true, the label are
on hot encoded, otherwise integers.
Args:
inputs (int, ndarray): ndarray row samples
multiple (int): lenght of unity representation
one_hot (bool): False for integer labels, True for one hot encoded labels
Returns:
labels (int): integer or one hot encoded labels
"""
labels = (np.sum(inputs, axis=1)/multiple).astype(int)
if one_hot:
size = np.max(labels)
label_matrix = np.zeros((labels.shape[0], size+1))
label_matrix[np.arange(labels.shape[0]), labels] = 1
labels = label_matrix
return labels
###Output
_____no_output_____
###Markdown
Create dataset ** Namedtuple **
###Code
from collections import namedtuple
def Dataset(inputs, labels):
"""Creates dataset
Args:
inputs (array):
labels (array): corresponding labels
Returns:
Datasets: named tuple
"""
Dataset = namedtuple('Dataset', ['data', 'labels'])
Datasets = Dataset(inputs, labels)
return Datasets
###Output
_____no_output_____
###Markdown
Pickling
###Code
from collections import namedtuple
Dataset = namedtuple('Dataset', ['data', 'labels'])
#data_verguts = Dataset(inputs, labels)
import pickle
def pickle_test(Data, name):
f = open(name+'.pickle', 'ab')
pickle.dump(Data, f)
f.close()
#pickle_test(data_verguts, "verguts")
# # Test opening the pickle
# pickle_in = open("Data.pickle", "rb")
# ex = pickle.load(pickle_in)
# ex.labels[25]
###Output
_____no_output_____
###Markdown
We now pickle the named_tuple cfr. [When to pickle](http://stackoverflow.com/questions/21752259/python-why-pickle) See http://localhost:8888/notebooks/Dropbox/Programming/Jupyter/Competitive-Unsupervised/NNTf.ipynb for creating a panda dataframe out of the namedtuplehttp://stackoverflow.com/questions/16377215/how-to-pickle-a-namedtuple-instance-correctlyhttps://blog.hartleybrody.com/python-serialize/ Simon and Petersons 2000, Input Dataset The dataset consist of vecors of lenght 16 and vector of lenght 6 as label, one hot encoded. 50.000 inputs pattern are generatedA numerosities in range(6) is picked randomly. Then locations are randomly selected. Verguts and Fias: Inputs Uniformly distributed input The outlier 5 is represented only 10 times, this to allow the net to see it a reasonable numbers of times, but not too much, considering that it can only have one shape.
###Code
rows_n = [2,4,6,8,10]
#comb(10, rows_n)
inputs = binary_vector_2(distribution = comb(10, rows_n))
labels = find_labels(inputs, multiple=2, one_hot=True)
count = 0
for i in inputs:
print(count, i, int(np.sum(i)/2), labels[count])
count +=1
###Output
/home/clint/.local/lib/python3.5/site-packages/ipykernel/__main__.py:56: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
/home/clint/.local/lib/python3.5/site-packages/ipykernel/__main__.py:62: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
###Markdown
Accumulator inputs - Verguts& Fias Numerosity from 1 to 5, where unity is represented by 3 repeated ones. (e.g. 2 is represented as [1,1,1,1,1,1,0,0,0,0,0,0,0,0,0]). No zero vector.
###Code
inputs = accumulatorMatrix(5, times=2).outputs
labels = find_labels(inputs, multiple=2, one_hot=True)
Dataset = namedtuple('Dataset', ['data', 'labels'])
verguts2004 = Dataset(inputs, labels)
pickle_test(verguts2004, "verguts_accumulator")
verguts2004.labels
###Output
_____no_output_____ |
notebooks/Patterns exploration.ipynb | ###Markdown
Patterns processingThis notebook reads in the patterns data and: 1. Filters down to Philadelphia zipcodes based on a (currently hard-coded) list. a. I have not yet run this using this list. Previously it filtered down by looking at the state and city columns in the data 3. Concatenates the files 4. Writes the result to philly_patterns.csv in the processed data folder. It takes a long time to run and the resulting dataset is very large so it is worth thinking about ways to cut down the data.
###Code
MONTH_LEVEL = True
DAY_LEVEL = False
import pandas as pd
from safegraph_py_functions import safegraph_py_functions as sgpy
import os
from dotenv import load_dotenv, find_dotenv
# find .env automagically by walking up directories until it's found
dotenv_path = find_dotenv()
# load up the entries as environment variables
load_dotenv(dotenv_path)
ROOT_DIR = os.environ.get("ROOT_DIR")
os.chdir(ROOT_DIR)
from src import DATA_DIR
raw_data_dir = DATA_DIR / 'raw'
# Read in all patterns files in the monthly-patterns folder
patterns_path = raw_data_dir / "monthly-patterns-2020-12"
files = [f for f in patterns_path.glob("**/*.csv.gz")]
norm_files = [f for f in patterns_path.glob("**/normalization_stats.csv")]
philly_places = pd.read_csv(DATA_DIR / 'processed' / 'philly_places.csv.tar.gz', low_memory = False)
for col in ['valid_from', 'valid_to']:
philly_places[col] = pd.to_datetime(philly_places[col], format = '%Y-%m-%d')
keep_cols = ['safegraph_place_id', 'location_name', 'street_address',
'city', 'region', 'postal_code', 'safegraph_brand_ids', 'brands',
'date_range_start', 'date_range_end', 'raw_visit_counts',
'raw_visitor_counts', 'visits_by_day', 'poi_cbg', 'visitor_home_cbgs',
'visitor_daytime_cbgs', 'visitor_work_cbgs',
'distance_from_home', 'median_dwell',
'device_type']
# for files with information disaggregated at the state level, keep only the country-wide info
def keep_total_level(norm_stats):
if 'region' in norm_stats.columns:
if len(norm_stats[norm_stats['region'] == 'ALL_STATES']) == 0:
raise ValueError('no region named "ALL_STATES"')
norm_stats = norm_stats[norm_stats['region'] == 'ALL_STATES']
norm_stats = norm_stats.drop(columns = ['region'])
return norm_stats
def filter_to_philly(file):
# zip codes are read as integers rather than strings so we add leading zeros.
# this is not strictly necessary since Philadelphia zipcodes don't have leading zeros.
# Philadelphia selection
# HK: adding leading zeros because some zipcodes in MA are 0191X.
df = pd.read_csv(file)
df['postal_code'] = df['postal_code'].apply(lambda x: ('00000'+str(x))[-5:])
in_philly = df['postal_code'].astype(str).str.startswith("191")
df = df.loc[in_philly]
return df
def get_places(df):
df = df.reset_index(drop = True)
df['date_range_start'] = pd.to_datetime(
df['date_range_start'].apply(lambda x: x[:10])
)
if len(df['date_range_start'].unique()) > 1:
print('More than one date in {0}!'.format(file))
file_date = df.loc[0,'date_range_start']
current = (philly_places['valid_from'] <= file_date) & (philly_places['valid_to'] > file_date)
current_places = philly_places[current]
df = df.merge(current_places, on = 'safegraph_place_id', how = 'left')
return df
def get_norm_stats(df, norm_df):
df['year'] = df['date'].dt.year
df['month'] = df['date'].dt.month
df['day'] = df['date'].dt.day
df = df.merge(norm_df, on = ['year','month','day'])
return df
def explode(df):
# The visits_by_day column contains a list of integers.
# This explodes that list so we get one row per day.
df = sgpy.explode_json_array(
df, array_column ='visits_by_day', value_col_name='day_visit_counts',
place_key='safegraph_place_id', file_key='date_range_start', array_sequence='day',
keep_index=False, zero_index=False)
df['date_range_start'] = pd.to_datetime(df['date_range_start'])
temp = df['day'].apply(lambda x: pd.Timedelta(x-1, unit='D'))
df['date'] = df['date_range_start'] + temp
return df
norm_stats = pd.concat([keep_total_level(pd.read_csv(file)) for file in norm_files])
norm_stats['year'] = norm_stats['year'].astype(int)
norm_stats['month'] = norm_stats['month'].astype(int)
norm_stats['day'] = norm_stats['day'].astype(int)
# HK: I only downloaded patterns data from 2019 onwards due to memory constraints
norm_stats = norm_stats[norm_stats['year'] >= 2019]
if MONTH_LEVEL or DAY_LEVEL:
philly_patterns = []
for file in files:
print("Reading {}".format(file))
df = filter_to_philly(file)
df = get_places(df)
philly_patterns.append(df)
if MONTH_LEVEL:
pd.concat(philly_patterns).to_csv(
DATA_DIR / 'processed' / "philly_patterns.csv.tar.gz", index=False
)
if DAY_LEVEL:
philly_patterns = [get_norm_stats(explode(df), norm_stats) for df in philly_patterns]
pd.concat(philly_patterns).to_csv(
DATA_DIR / 'processed' / "philly_patterns_exploded.csv.tar.gz", index=False
)
###Output
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/03/06/02/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/03/06/02/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/03/06/02/patterns-part4.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/03/06/02/patterns-part2.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/04/08/06/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/04/08/06/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/04/08/06/patterns-part4.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/04/08/06/patterns-part2.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/02/04/06/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/02/04/06/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/02/04/06/patterns-part4.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/02/04/06/patterns-part2.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/01/06/10/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/01/06/10/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/01/06/10/patterns-part4.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2021/01/06/10/patterns-part2.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2020/12/04/04/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2020/12/04/04/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2020/12/04/04/patterns-part4.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns/2020/12/04/04/patterns-part2.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/03/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/03/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/03/patterns-part4.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/03/patterns-part2.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/04/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/04/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/04/patterns-part4.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/04/patterns-part2.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/05/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/05/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/05/patterns-part4.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/05/patterns-part2.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/02/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/02/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/02/patterns-part4.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/02/patterns-part2.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/11/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/11/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/11/patterns-part4.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/11/patterns-part2.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/10/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/10/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/10/patterns-part4.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/10/patterns-part2.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/07/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/07/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/07/patterns-part4.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/07/patterns-part2.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/09/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/09/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/09/patterns-part4.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/09/patterns-part2.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/08/patterns-part3.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/08/patterns-part1.csv.gz
Reading /Users/hannahkronenberg/SafegraphCOVIDPhilly/src/../data/raw/monthly-patterns-2020-12/patterns_backfill/2021/04/13/10/2019/08/patterns-part4.csv.gz
|
src/zaubacorp.com/basic_data.ipynb | ###Markdown
> Collect data from Zaubacorp
###Code
from selenium.webdriver.support.ui import WebDriverWait
from tqdm.notebook import tqdm_notebook
from selenium.webdriver.common.by import By
from IPython.display import display
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import json
import time
import sys
import re
# chromium
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
sys.path.insert(0,'/usr/lib/chromium-browser/chromedriver')
# Zaubacorp login
url = 'https://www.zaubacorp.com/user/login?destination=node'
driver = webdriver.Chrome('chromedriver',options=chrome_options)
driver.get(url)
def login(cuser_text, pass_text, captcha_text):
"""Login to zaubacorp
Args:
cuser_text (string): username
pass_text (string): password
captcha_text (integer): captcha result
"""
cuser = driver.find_element(By.ID, "edit-name")
cpass = driver.find_element(By.ID, "edit-pass")
cpt = driver.find_element(By.ID, "edit-captcha-response")
cuser.send_keys(cuser_text)
cpass.send_keys(pass_text)
cpt.send_keys(captcha_text)
driver.find_element(By.ID, "edit-submit").click()
# captcha location from webpage
t = driver.find_element(By.XPATH,"//div[@class='form-type-textfield form-item-captcha-response form-item form-group']")
variables = ''.join(list(filter(lambda x:len(x)!=0,re.split(r'[Math question *]*',t.text)))[:-1])
# ans -> captcha ans
ans = eval(variables)
# login
login('prince404','PRINCE@123',str(ans))
print(driver.current_url)
# wait till it loads
WebDriverWait(driver=driver, timeout=10).until(
lambda x: x.execute_script('return document.readyState') == 'complete'
)
print(driver.current_url)
time.sleep(7)
print(driver.current_url)
def display_all(data):
"""diplays data without trimming in notebook
Args:
data (string, optional): The data
"""
for i in range(len(data)):
print(i)
display(data[i])
def load_page(link):
"""Loads the page in webdriver
Args:
link (string): page link
"""
driver.get(link)
WebDriverWait(driver=driver, timeout=10).until(
lambda x: x.execute_script('return document.readyState') == 'complete'
)
print(driver.current_url)
time.sleep(3)
def get_emailetc(d_company):
"""Hadles extaction of Email, Address, Website and MailID and noise removal
Args:
d_company (dictionary): dictionary containing company details
"""
all_text = driver.find_element(By.XPATH,"/html/body").text
a = all_text.split('Email ID:')
if(len(a)>1):
a = a[1].split('Director Details')[0]
# split to get other details
a = a.split('\n')
a = list(filter(lambda x:len(x)>0,a))
if(len(a)>2):
# check if its an email
if('@' in a[0]):
d_company['Email']=a[0].strip()
# update adress in the dictionary
try: d_company['Address']=a[3].strip()
except: pass
temp = a[1].split(' ')
# remove noise and update Website if present
if('Click' not in temp):
ot_temp = a[1].split(':')
if(len(ot_temp)>1):
d_company['Website']=ot_temp[1].strip()
def basic_details(data,d_company):
"""Extracts Basic company details & noise removal
Args:
data (list): basic details in tabular format
d_company (dictionary): dictionary containing company details
"""
temp = np.array(data)[0]
for i in range(len(temp)):
if((temp[i][1]!='-') and (temp[i][1]!='') and (str(temp[i][1])!='nan')):
if(temp[i][0]=="Activity"):
# Remove all part present after Click to view ....
clean_value = list(filter(lambda x:len(x)!=0,temp[i][1].split('Click')))
# Also remove deep description present
if(len(clean_value)>0):
more_clean = list(filter(lambda x:len(x)!=0,clean_value[0].split('[')))
if(len(more_clean)>0):
d_company[temp[i][0]]=more_clean[0].rstrip()
else:
d_company[temp[i][0]]=temp[i][1]
def pre_data(data,d_company,val):
"""Handles Previous CIN and Names column of zaubacorp
Args:
data (list): data from webpage converted in tabular format
d_company (dictionary): dictionary containing company details
val (string): key
"""
temp = np.array(data).tolist()[0]
# if present by search for found in (Not Found)
if(len(temp)<1 or (len(temp)==1 and 'found' in temp[0][0].split(' '))):
return
l = []
for i in range(len(temp)):
l.append(temp[i][0])
d_company[val]=l
def establishments(data,d_company):
"""Handles Establishments owned by the Company
Args:
data (list): data from webpage converted in tabular format
d_company (dictionary): dictionary containing company details
"""
temp = np.array(data).tolist()[0]
# check if data is present
if(len(temp)<1 or (len(temp)==1 and 'found' in (temp[0][0].lower()).split(' '))):
return
l = []
estab_dict = {}
# update the value in dictionary as per location in the passed argument
for i in range(len(temp)):
s_dict = {}
if (len(str(temp[i][0]))>0 and temp[i][0]!='-' and temp[i][0]!=''):
s_dict['Establishment Name'] = temp[i][0]
if(len(str(temp[i][1]))>0 and temp[i][1]!='-' and temp[i][1]!=''):
s_dict['City'] = temp[i][1]
if(len(str(temp[i][2]))>0 and str(temp[i][2])!='-' and str(temp[i][2])!='' and temp[i][2]!=float('nan')):
s_dict['Pincode'] = temp[i][2]
if(len(str(temp[i][3]))>0 and temp[i][3]!='-' and temp[i][3]!=''):
s_dict['Address'] = temp[i][3]
estab_dict[i] = s_dict
d_company['Establishments'] = estab_dict
def charges(data,d_company):
"""Handles Charges lodged against the Company
Args:
data (list): data from webpage converted in tabular format
d_company (dictionary): dictionary containing company details
"""
temp = np.array(data).tolist()[0]
# check if data is preset
if(len(temp)<1 or (len(temp)==1 and 'found' in str(temp[0][0]).split(' '))):
return
# charges disctionary
c_d = {}
# holds total amount under charges
amount = 0
# update the value in dictionary as per location in the passed argument
for i in range(len(temp)):
t_d = {}
if(len(str(temp[i][1]))>0 and str(temp[i][1])!='-' and str(temp[i][1])!=''):
t_d['Creation Date'] = temp[i][1]
if(len(str(temp[i][2]))>0 and str(temp[i][2])!='-' and str(temp[i][2])!=''):
t_d['Modification Date']=temp[i][2]
if(len(str(temp[i][3]))>0 and str(temp[i][3])!='-' and str(temp[i][3])!=''):
t_d['Closure Date']=temp[i][3]
if(len(str(temp[i][4]))>0 and str(temp[i][4])!='-' and str(temp[i][4])!='' and str(temp[i][4])!='nan'):
t_d['Assets Under Charge']=temp[i][4]
if(temp[i][5]!=float('nan') and str(temp[i][5]).isnumeric()):
amount+=int(temp[i][5])
t_d['Amount']=temp[i][5]
if(len(str(temp[i][6]))>0 and str(temp[i][6])!='-' and str(temp[i][6])!=''):
t_d['Charge Holder']=temp[i][6]
c_d[temp[i][0]]=t_d
d_company['Charges'] = c_d
d_company['total Charges/Borrowing Amount'] = amount
d_company['Number of Charges']=len(temp)
def persecution(data,d_company):
"""Handles pesecution against the Company
Args:
data (list): data from webpage converted in tabular format
d_company (dictionary): dictionary containing company details
"""
temp = np.array(data).tolist()[0]
# holds total amount under charges
if(len(temp)<1 or (len(temp)==1 and 'found' in (temp[0][0].lower()).split(' '))):
return
# conatains all persecution details with key as persecutin id
p_d = {}
for i in range(len(temp)):
t_d = {}
if(len(str(temp[i][1]))>0 and str(temp[i][1])!='-' and str(temp[i][1])!=''):
t_d['Defaulting Entities'] = temp[i][1]
if(len(str(temp[i][2]))>0 and str(temp[i][2])!='-' and str(temp[i][2])!=''):
t_d['Court Name']=temp[i][2]
if(len(str(temp[i][3]))>0 and str(temp[i][3])!='-' and str(temp[i][3])!=''):
t_d['Prosecution Section'] = temp[i][3]
if(len(str(temp[i][4]))>0 and str(temp[i][4])!='-' and str(temp[i][4])!=''):
t_d['Date Of Order'] = temp[i][4]
if(len(str(temp[i][5]))>0 and str(temp[i][5])!='-' and str(temp[i][5])!=''):
t_d['Status'] = temp[i][5]
p_d[i]=t_d
d_company['Persecution'] = p_d
d_company['Number of Persecutions']=len(temp)
def cur_directors(data,d_company):
"""Handles current directors of the company
Args:
data (list): data from webpage converted in tabular format
d_company (dictionary): dictionary containing company details
"""
temp = np.array(data[0]).tolist()
if(len(temp)<1 or (len(temp)==1 and 'not' in (temp[0][0].lower()).split(' '))):
return
l = []
d_d = {}
for i in range(len(temp)):
if(str(temp[i][0]).isdecimal()):
t_d = {}
if(len(str(temp[i][1]))>0 and str(temp[i][1])!='-' and str(temp[i][1])!=''):
t_d['Name']=temp[i][1]
if(len(str(temp[i][2]))>0 and str(temp[i][2])!='-' and str(temp[i][2])!=''):
t_d['Designation'] = temp[i][2]
if(len(str(temp[i][3]))>0 and str(temp[i][3])!='-' and str(temp[i][3])!=''):
t_d['Appointment Date'] = temp[i][3]
d_d[temp[i][0]]=t_d
d_company['Current Directors'] = d_d
def fetch_data(link):
"""Main function for calling other subordinate functions
Args:
link (string): link of company details on zaubacorp
Returns:
disctionary: Comapany details are stored in the dictionary
"""
# call load page to load the current company page
load_page(link)
# get html page
page = driver.page_source
# parse the page
soup = BeautifulSoup(page, 'html.parser')
# find all table instances in the table
temp_data = soup.find_all('table')
# Check if company is present/Page is loaded fully
if(len(temp_data)<5):
print("Company Not Found!")
return
# dictionary containing all the data of the company
d_company = {}
basic_details(pd.read_html(str(temp_data[0]),header=None),d_company)
basic_details(pd.read_html(str(temp_data[3]),header=None),d_company)
basic_details(pd.read_html(str(temp_data[4]),header=None),d_company)
basic_details(pd.read_html(str(temp_data[5]),header=None),d_company)
basic_details(pd.read_html(str(temp_data[6]),header=None),d_company)
get_emailetc(d_company)
try:
pre_data(pd.read_html(str(temp_data[1]),header=None),d_company,'Previous Names')
pre_data(pd.read_html(str(temp_data[2]),header=None),d_company,'Previous CIN')
except:
pass
try :
establishments(pd.read_html(str(temp_data[-1]),header=None),d_company)
charges(pd.read_html(str(temp_data[-2]),header=None),d_company)
persecution(pd.read_html(str(temp_data[-3]),header=None),d_company)
cur_directors(pd.read_html(str(temp_data[7]),header=None),d_company)
except:
pass
return d_company
# load links
with open('./temps/missing_links.json','r+') as jsonfp:
company_links = json.load(jsonfp)
print("Length of Data :",len(company_links))
company_data = {}
count = 0
# Loop for iterating over all the comnpany in loaded data
with tqdm_notebook(total=len(company_links)) as pbar:
for i in company_links:
try:
json_data = fetch_data(company_links[i]['link'])
company_data[i] = json_data
except Exception as e:
print(e)
pbar.update()
# dump the data
file = open("./missing_links.json", "w+")
json.dump(company_data, file, indent=4)
file.close()
###Output
_____no_output_____ |
project/code/RelationshipAndCommunity.ipynb | ###Markdown
To get the platform and the matched role
###Code
from pandas.core.frame import DataFrame
import pandas as pd
df_1 = pd.DataFrame(columns=['Role','Post_on'])
df_1.loc[1]={'Role':'GAMER',
'Post_on':'Reddit'}
df_1.loc[2]={'Role':'GAMER',
'Post_on':'Twitter'}
df_1
# Import all the libraries needed
# Establish the Role-Post_on sheet
df_2 = pd.DataFrame(columns=['Role','Post_on'])
df_2.loc[1]={'Role':'GAMECONTENT',
'Post_on':'Twitter'}
df_2
df_3 = pd.DataFrame(columns=['Role','Post_on'])
df_3.loc[1]={'Role':'GAMEDEVELOPER',
'Post_on':'Instagram'}
df_3
relationship = pd.concat([df_1,df_2,df_3])
relationship
# Get the relationship between Role and Post_on
relationship.to_csv("relationship_final.csv",index=False)
# Save all the data to the csv file named relationship_final.csv
###Output
_____no_output_____
###Markdown
To read all the 3 related csv to make them a total.
###Code
csv1 = pd.read_csv(r'GameCompanyFinal2.csv')
csv2 = pd.read_csv(r'GameContent2.csv')
csv3 = pd.read_csv(r'Gamer_Final2.csv')
# Read all the 3 csv files
###Output
_____no_output_____
###Markdown
Now we get all the 3 entities' relationships: game content, game developer and gamer.
###Code
community = pd.concat([csv1,csv2,csv3])
community
# Concat all the 3 csv files into one
community.to_csv("community.csv",index=False)
# Export it to csv file and delete the index column
###Output
_____no_output_____ |
nlp_with_python_for_ml/Exercise Files/Ch05/05_04/End/05_04.ipynb | ###Markdown
Building Machine Learning Classifiers: Building a basic Random Forest model Read in & clean text
###Code
import nltk
import pandas as pd
import re
from sklearn.feature_extraction.text import TfidfVectorizer
import string
stopwords = nltk.corpus.stopwords.words('english')
ps = nltk.PorterStemmer()
data = pd.read_csv("SMSSpamCollection.tsv", sep='\t')
data.columns = ['label', 'body_text']
def count_punct(text):
count = sum([1 for char in text if char in string.punctuation])
return round(count/(len(text) - text.count(" ")), 3)*100
data['body_len'] = data['body_text'].apply(lambda x: len(x) - x.count(" "))
data['punct%'] = data['body_text'].apply(lambda x: count_punct(x))
def clean_text(text):
text = "".join([word.lower() for word in text if word not in string.punctuation])
tokens = re.split('\W+', text)
text = [ps.stem(word) for word in tokens if word not in stopwords]
return text
tfidf_vect = TfidfVectorizer(analyzer=clean_text)
X_tfidf = tfidf_vect.fit_transform(data['body_text'])
X_features = pd.concat([data['body_len'], data['punct%'], pd.DataFrame(X_tfidf.toarray())], axis=1)
X_features.head()
###Output
_____no_output_____
###Markdown
Explore RandomForestClassifier Attributes & Hyperparameters
###Code
from sklearn.ensemble import RandomForestClassifier
print(dir(RandomForestClassifier))
print(RandomForestClassifier())
###Output
['__abstractmethods__', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getstate__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__setstate__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_abc_cache', '_abc_negative_cache', '_abc_negative_cache_version', '_abc_registry', '_estimator_type', '_get_param_names', '_make_estimator', '_set_oob_score', '_validate_X_predict', '_validate_estimator', '_validate_y_class_weight', 'apply', 'decision_path', 'feature_importances_', 'fit', 'get_params', 'predict', 'predict_log_proba', 'predict_proba', 'score', 'set_params']
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=None, verbose=0,
warm_start=False)
###Markdown
Explore RandomForestClassifier through Cross-Validation
###Code
from sklearn.model_selection import KFold, cross_val_score
rf = RandomForestClassifier(n_jobs=-1)
k_fold = KFold(n_splits=5)
cross_val_score(rf, X_features, data['label'], cv=k_fold, scoring='accuracy', n_jobs=-1)
###Output
_____no_output_____ |
Modulo3Capsula2_abiertaUGR.ipynb | ###Markdown
![cabecera_slide_abiertaugr_bigdata.jpg](data:image/jpeg;base64,/9j/4QAYRXhpZgAASUkqAAgAAAAAAAAAAAAAAP/sABFEdWNreQABAAQAAABkAAD/4QN1aHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLwA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/PiA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJBZG9iZSBYTVAgQ29yZSA1LjYtYzE0OCA3OS4xNjQwMzYsIDIwMTkvMDgvMTMtMDE6MDY6NTcgICAgICAgICI+IDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+IDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiIHhtbG5zOnhtcE1NPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvbW0vIiB4bWxuczpzdFJlZj0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL3NUeXBlL1Jlc291cmNlUmVmIyIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bXBNTTpPcmlnaW5hbERvY3VtZW50SUQ9InhtcC5kaWQ6MDc4MDExNzQwNzIwNjgxMTgwODNGRUVENjUwRjYwQTciIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6MEQyNzI4OTdBREYxMTFFQUExNEZBNzM5OTkwRjA1RjIiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6MEQyNzI4OTZBREYxMTFFQUExNEZBNzM5OTkwRjA1RjIiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNiAoTWFjaW50b3NoKSI+IDx4bXBNTTpEZXJpdmVkRnJvbSBzdFJlZjppbnN0YW5jZUlEPSJ4bXAuaWlkOjY4MGM3YWMxLWViY2UtNDlkYS05MDEwLWY2YmMyZTAyZWEwZCIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDowNzgwMTE3NDA3MjA2ODExODA4M0ZFRUQ2NTBGNjBBNyIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/Pv/uAA5BZG9iZQBkwAAAAAH/2wCEAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQECAgICAgICAgICAgMDAwMDAwMDAwMBAQEBAQEBAgEBAgICAQICAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDA//AABEIAUcD6AMBEQACEQEDEQH/xAEFAAABBAMBAQEBAAAAAAAAAAAHBQYICQMECgIBAAsBAAICAwEBAQEAAAAAAAAAAAUGBAcCAwgAAQkKEAABBAEDAgQDBgQCBwMEACcBAgMEBQYREgcAITFBEwhRIhRhcTIjFQmBkUIWoVLwscHRMyQXYnJD4fGCUzQlGAqSomODsnOTRGQ1JjYnwtKjVFYZlKRVdZV2lqa2N0dXZ3coKREAAgECBAMFBAYGBgYDCwEZAQIDEQQAIRIFMUEGUWEiEwdxgTIU8JGhsUIjwdFSYhUI4XKCsjMW8ZKiQ7MkU3M0wtJjk2R0tCVVdReDo1SUxDVlJicJwzY309TkRWYY4uNEhIUoOP/aAAwDAQACEQMRAD8A6RcMgRnK6vkTEawK6tZnTv8A3oyyhvZGB/8AWTZCkND/AL5Pl10fJIyxBY/8V6Bfb2+4VPuxwPHbxPePJMP+Xjq7d4ByX2saL78Jduh21lyJj+jj8t5bzmgASkrJIQkdglttGiUjySAOiFsywRiNclUUGAV4Hu52nkzkckn38vdy7sIf9uh3XVHy+Z7D7dO2vj1MF4Vyrge23a+WWG5b4u2pBHpjQg6aAfDT+OmvUqC+YGtcDrvalI4YCuT4S24lxXpD+rwH3/x6YrPceWFK+2ugJpliMuV4o2wpz8vQany0/wBD0zW12WwpXdmErXhgWxqpDNnECmxtTJbc+4NH1T/gjoi0tYzTs+/AoQ+IA40EwmiEnaO4B/n3+7rMyNwqcZJGo4jHxcJruNun3eI+7y6x1sMxjekajjhPciFCiBqR38f/AD9bFlrxxmUQdvfjYZio26kdzrpqf9g1+HWPmHV+7jXJEhBGFltpoNxHO3ypWyrx8Wl9v5JWOtiyE1Hbgc8INRhdZW02k6AeGvx1/n93XwsxGeInlU4Y8OSWVakDw07ajUfy79Y5nLGapTMZ4zwp/ovsvx3NjzDzbralHsFtqC0bvDVJI7/EajrF0JUq3wkY8CFkDx5MD9P6cPqycZYiofj9os5sTYydddjbpIWwSP6oshK2z8SjXz6Fw1acq3xrkf1+8Z4ZZlEdgGTJXzHsPL3HLGziT6IzjlorQLCjDhD4vuo/PeA+EdhWgP8AmX9nWG5gtF5S+0+zs9+PnTsIS6E79tB3nn9Q+/Bch2LUhJQToUga6gjXTx6R7iB0NcXLZXMNwNNCCMaE5+OkkLcAJ8u/b/TTr0UbHMY3TSxpkSAcadS4z+sQFa6gPqUD/wB1h5Wunb4dSHQ+Sw50/SMQRL+epFOP6DhrS32vmSk6jcT/AB3En+B6LwR8O3CtczaSc8NySplxJUSDqSD938PHokiachgBNMWGXbjdsvTnVGOWyPncipdxiwV/V6tUEO1bjh11Bcp5DKQfMoPw6+RApLLFwBo49jfF/tA4xnk8y3inrUisbe1fhP8AqkY01xmylKtvcj7tNf5a69bgSDTliOxUjvw3rWEChRT27d/s8Ot8T0NDiLOmoGnHDVYiqAWFE6A6D/ePs6mFhWowNWM50x+UwO4A63qx4nEGRXqQOGNMRvz0aDt31/h/r6keZVe/EULVqHjjYejEjUA669tf8dPj1oDCufDE3S1OzCRMjEHTQ+R/8nbrJTqyxg45E4240PVv8Cj8dO3b7NetbccSEyXMGmNaTB/LPyqHfvr/AL+slGfLGLEaedcNeRXnedAf9PLTqWr5CuILqSSRwxrGteCQv03AjyUUqCf5kaa9fda1oMY6W9+MZr1Ea6Ejw+P+z7OvupR7cfNDVqceDXkE/KfHy8P4Ajr2sew4+eW1c6Y9pgHTsk/4j/ydYmRQeOeMxEfsxsJgqHfadf4nt1rLqRTG4RkdlcbbUDuNRofj/T4fDxHWovThU4kJGOOQwrR4pbKdU9ifxJ7g/wAetfmVOXHsxt8mhry7RhyxWdyRuT208QPEfaPs6xZ65A42IhBxtJifMPMDz7fy+/qI7DBS3QjjTCgwwEK1A+Y9tBqe3+n8OoLvXBNF9xw+Khso27e47bkHuNewHbXsdP8Az9DLlwag/XgvaxEUHBvpwwSK1hLzeg11/wAqtNQPE7SR8PL+Y6DTyFDU4YbaBWWnIfXhwNsrBRu3go7IfQNXGwf6FpOqXWleaTr9h6gFwa0pQ8V5HvHYe8e/BiG3IILewEZke3tHd9RxtIbdCwUgBZPZCVEsSUp7EtHUFK9P6DotPlqO3WlgCM66Rz5r7e7v4HngtEhU+Gmo8vwuP3T29qnxDl2YVPSIbOxPZQ+dpWnbXxI1Gnf+R+w9R2Go58RwP0+g78EUXSKp8NM1Pf8AT3+3GsinVIcKmgS2T+An8JHiBr30063IGPH4hz7cQLtkgjLR5xHiOY+n07cSCpfbzns3Cm85RTIRjDoK/rVutl70Eq2qkiPruU2D21+PQ99922LcP4W0v/Pfs5/VXt54jf5e6gm2c9SRWz/wMMQZiRTI0JoTWgOVeFcMidiE6I2tmKlmBEV/xX5DyRIlaeawjcpKD5JGnRBpFbNqlu7gMaLG8UjSpon2n/ThhyqSI28oPTVOEeKIrBI+0BTpA8vh1GkZqZDDLbhHzrljSVErkKSGoTz3j80l8gfxQ0EjTqGSx44KxRqTQDGZZfaSlEZiJF7/AImmElen/fWFK8vj1pNDmxOCMcbDIDGxE+udX+bIec0PmpW37tAQB1GlKrkKYnwxOfiwUMVtLPHbKtvql1UayrJLcqM6Cr8SCNzS9CNzTqdUqHmk9BNwggv7WSyuhWCRSD+gjvHEd+GPbpZ7G5jvLY6Zo2BB7+w9xGR7sTMzSXV8iYZU8yY1GQuywu8pbzNaVv8A9jKl2DMYXaWTTaCFqhTIXqKWR8u4b/Aq28/75s88K3HTV8KpPC6RNyaorHTvVwPZw4Y6K2De7e/gh3qA6XikBlXmtRpkB/dZWP2GvZXv7/MHrk5zOzfGW1Jo7+3tihO0JEa6r5mltFUG/k9OQX2pjQ82JafgeuYumtwSHdLmwUFE8xqKeVDmP+6H7pA5Y6M2oG/6da2ejXNqQw/qns+0e0YgPWVgmTILO3c2+80HNO21AWkuE/BIQD0zkVvAudNX2YIFAduZxx0H6+WHpi1RW5RnlBX20tyvq52QlhUplYacHqodcjRUPK1QyJshpLO4+G/ojZvbbjucMVwxSJ5qEjLiDQe8invwu7p87s2zTzWaB544AaEVFAQGanPQpLU7sTB5Js4HF+EyWMfpI0SkjsogwqdiOlMZL78lCCxMKkqcUkSX0rcWolS06anXqwt7ubXYNnKW0SCBQAq0y1Ehc68c8yTmcLfTm3y3svzU8rTX8zankJqzClQQeymQAyHAYgxXXdfMuFWF9EisvLlF9uRWV8OGuKpS94DbbTKQkI17HuT569VGZLO5FbiNCx7AB9dKfbh+aK+jGmJ30gcyT9P0YISszU1esWjWVXtpJbV6kWxW68uXBQlwqbZWZZWELbX3HpAJOnl4dSJZ4XlWV5JC6gBTqPhA4AZ5ADhTGpYLnyWt/LiEDAgrpFDXjXtr9uDlQ8y3b0dbUaxtH5ziGmEPuhALSW20IS448EJU6Ut6BKdPAaE9j1NTcQxrm0h5nuyz+rEL+HeUgiVQkA5DIZ1NB78SJwTL7qXIjplyH33VoQ6HVqKlg70pUCT4aFYI/j0as9wcSAg+MZ4Wt72uPyvOGRBp9lQcGqVTRnn4E1lpESRZplic20NrK/T9ItS22vwtLdeKkq00SopJ0116NXMcb6JVGiSWtQOGXBqd5xC2u5mkt2E3iCEAHn7D7MSixDmWvqMQRj64SlyGo6oyVJTqlWoKPsCjr8ema1360jtQjV8xRT9GAVxst38wTGAYmbjXgDnmO7Auh1gsp70hhSWXH3nXiVD8tn1CUh0jxSGQouH7EHoAwFxOZVIDM31Vz+7DXaxaFjtR8AAH1ccOKVHpVZVdZNbPN0uCcd4jQRXJc1RYW5NuICLWwZYcP5W5mrhwUuKTqpKpCwNVaAs23q7MsVutWelO3IUAp2kkn28cDd7EEl491O+mztoaEnJQDRjn20ouK5+XuSZvMuaOXgQ7FxqpQutxSsUktoj1yVJCpjjIJDcqw2JUR3KG0oR32km+umduTarIR8bh83Pf2ewfaanHJnX++y9R7l5q1WwhBSJeFF5tTtbj7ABywwFQilOgQfD4fZ01o9eJxWkkRrnxwgy6z1O41B1+BH+H2dS45KYHyw1zpRsZscjWFVe19pWSJEGfXyEyo0uMtTbrLrQKkqQ4k6g/6+tzFXjKNQoRQg88QtDRuHFQymoIyIIzqDyPfgl5Sq/5LkPXmQ2Mq3ySOwksOS3C4uTAQdEnRXb8nTVKf6ld/Dx9aQxWqiC3UJb1oaCgB7B7eZ5Y039zJcVurp2kujnVjqJ7yT2cu2nZhtUOCPyFqcWlWqidSrUkq1OuuuupPRtToUAchhSuL+PMOc+3twrWuGKbbUktAEDsrb389e/8OtocnAYXSNJVeGBzJozGdO5GuuoCQNN3jqo+AQj7evGtDT6frODsT1UBhyyH05Y11RAlOoOnkVgfDt6bQ01JB8+tNDWh+necEA+Vfp7BhJkMH8AHkSG9ewJP43ln/V1tQZ6vt/VjTPL+GmfZ3d+ExxkpPjuX/n0Py6d9rY/p7efU2Oh45L2frwBuSwbtPb+gY1FRtfHy8+/if5nrfXsxAY1zNcsaT8XaknT4DT+P8CRoOpMbAnuxCmXOoFMN6Q0ELPbt8NO2uvfT7NepQaq0PHEUx0qTl+jHv0Hm0B1TLjaFDRKlNqSk/cdND15XByGZxgY+3HhCTrqSraSO6vw6ny8O5+7rarAih44jyQ9nAYUWWkaj00KUfNatAkfHQEEdvt1P2dYMW5/UMYLEBwwtx4JJSQhTiz31J7DX49+4P2/DqLIxpmaDBG2jOqgzOG7ex1eod/zBIICE+H+ry18ut1oc8vrx83Onl0PAClP6f1YQ2o/5ZKz6epJAA1Vp9w/29GlYV7cIdyhJPIY3AylSflSNP86j3BH2+X8Otuo1zOIZUjhl34Spbfp/OrVZ0J8dE/7z/h1vQ6shkMQ5F0+I5j7MNebJcII1SE+BAAAGv2eZ6lIqjP8AFjRRjn+HswjIC1Eq/pHYD4/b18YgHI54++WWzxsAL7dvE/Dr4rEZDhjzRUFcKKV7UeAACf8AT49fK59+MdH141yCrVStNT4aA+HX1iBkMxj5oyzwVMD4wyvNApylhvvIQCQW2lq8fPUAjoZfblZ2Qrcsqg9pwV27a7+/almjOR2A/ox+yjCcpxSZ9LdR5DHfQb21pOg+G4AHU/DrK2u7S6TXbsrL3Y+XdlfWkhjuVKPXgaj78JTS3m0gBKhoD3Kvm/n36zKqcaAzpgj8K8SZbzXat0GNONJksQGnVlbqUdkspPie2p08OhG87zYbBZC8vqiKoGQrhj6d6c3jq/dztW0LruzqNKgZDjmcNbPsNyzjnI7LHbYj62qe2u+mrfoO+h7ajTt36nbfd2e52qXdvnFIKjAvc7PctmvZdvuwFuYXKsOwj2YaiLx1aQt1xzdp3BJ01/n216lm3QcMQxduVqxzGEeTZSHXCvcoDwSAetqQJwAxHku5Sa1yxgTYvdvzFfcT4HrMQL2Y0G7lrXPG4zYvA/j8fPx17/fpr17yk7MYtPK2YOFNu3fA/Ert9vl/EHrEwpjDz560rljcaunB/wCIrX+Pb4+B7d+sDAOVMZiZ+dTh3Y0m0yGYqJDfYjMRmFTbS1mrLVZS1rX/AB7Gyf01bab10QgauPOEIQCo9RbpobWMO4JLGiqBVmY8FUc+88AMzlibYw3V7KY4yFRRqd2yVF5sx+4DNjkM8EFzN0yG42LYqiUjHm5LSgp5Gyzyu2/4SLi2SkkoRqSIkTUoitnvq4VKMGOw0s17e0N1Tl8Ma/sr/wB03Fj3UGC024lo127bg62IYHPJ5n4eZJ/3CcEHaxJxiub5S5UOijSN0WA+t2xlIOqJU9DZM+UFa/PHhNILLPiCAtQ/F1It7caTcOKOwoB2LyHtPE+4csR7q5ZStshqiGpPa3M+xR4R7zzw1nsmkSJD7ylqSl9ZUlBJ/LbACGWxqewbZSlP8OpQtkVQOzEQXchPHHlVup2vs2VOnQIiyBqdf+HJQ0tXj/6t49a2iCyKQO0fZjekzlCpJ5ffgczpAKlakq7+OnY/f1KWmMWVqVPDDjxiI3kU+BCmAhyOtKUKB0XPgtncqDqfGQwNSyT3UnVH+XSNcSfLozpwP2Ht9nb9eJdrAJnVJMiOfaOz29n1dmD/AMzcW4LU4dSXGMy0JsZbTZdjeolSlEoBJ0GhAP8APpb2bdNwuL2WC7SkSHI0w173s+12thDc2b/nOM1PEf6frxDaVAfir1caUADruA1A7+J06bAwPA4TniOmhGeN+BY+gQddCP8AyeXWziMBbi11k4UZFqX+xJPbQAntqe33a9fKCmNUFmVauEdTBlKCUjXcdDp9/frA0XPByJcgBgrYligfU3q3r4aDTz/0+/oZdXWhcF7S0DkDjiQ9DhyGghQa+7t9uvw79Ldzf5EE4abPayaVGDPj2PIXIix1I0S680k/xWncf/cel+6vDpL8wMNlht4DhCMiRh4OVYU+87tGrjzjnh/mWSP5DqAJzQDlTBcWYB1U4kk+/Cm3GS0PwjQjuPj2060M2o1xLjQKMfFxWwCoBPfvqPh9/WOog4z0ilRjLHf+gxWhqh8kmwhwbWxHfchpTA/S4i+2oKWll9Q+LidfDqNbp5jmb8CjSv8A3R/R7jgnuTeRH8qP8SRtb+zPQvuFWP8AWHZjJCZ9RQ1GpVoB/p9vUhzpGWBsMetqYcJgIS2Bp30Gp7eP+vqN5hrgibVQlO7CRLrApKtRqPIkDx8tfh1uSYjEKWzUg9mBlkdYlLbiNoJIPlqNv+g6MWc5qD2YWdxtFAK04/T6e3EXc3pgQ6oJAHcjtpoQCdPgOnGwnrTFfbhb6WIPD9OIzWbKY0txeqQWmJ7g+zbCk6E66eCtOmWMll+r7xhUlAQ5cc8D9M5CDtK06f6v9nl1N8skVoaYiiTOhxnL6T3CgfMnXxH3dYkYz1qM65YwqdSR+IfzH3fb14KaVxi0px9S6lvT5066fEf+br7p5Y+CQUzNTjP9UgxXRuALTzbuoP8AS4C2rv8AeB19VWrXGp3Fff8AT7MePqiU9nAPLt/v6+gHGI0E+3GByQ4Bqlep/wBY6+gnnyx8MY4DGsie6hfc6d/LzPh/q63CunGlowDXDyr7GXZwF1berkmM59ZARqTvQ6W2p8cfYPkfA8glw9RmjSOTzzkGFD7uB/R9WJSyPLB8uK1U1Uf3h+n6zhXVbLjPMx4zg9CvR6Dax4PO67pL+mni69qR9gHWp4PMUlhm32DsxthuGt5B5ZyX7+ZwuRMxlR1lailR08T21+zoTcbSkmS1GGSy6ilt21sa/pxrP5g5KdUpatup0AGoH8Ovke0iNaDGU3UnnyFmrxwuUWSD6+CFLBIcfOvmdsKUvx18Rp1HuNuOlqDs+8YmWm+oWUV8Wf8AdOGc5fFaz84UCpXj376n7fh0USz0jhywvzbsHfL4cYlWRUkq3jv/AIeX+HW1YCDSmeIr3ysKjDkxZ1dhHv6PcSqdATc16T/+NMf9R9SU/wDakVL7+vmfRSPLrRcr5Zjn/ZbSfY+X2MB9eM7SXz1ltuJdda/1o6n7VLfVj2ibqEBR0O0efjqB20+PX0x8csYCfhXGhYyApJCSNdO/8fs16yjTOpx55qjCJFG9KlEjxPl5a+HW9sssRY21KT342PSSQT2/kOs1YjEd11VOPMeIlchAJ17eGmnfrMsdOWNAA1DDoapfVA0SD5+GvWpSScfJ51TCRcUnpEjb8PLz+zw63x8O7ET5lWORxoNxPTbHbsfAE6H+I1161ufFgpAQy17cJ77PyHv466jx+7x8R1uQ59+NMoI+vCElhBkthY1b9RBcHjqjcN2n8OpDfCWHGmNKMpNOOeJO5M5xqvjSM1CQwbosJ1CUp9UO7Ru3KHiNw7fDpZt13IbiTIf+Xw0XTbYdrAiA8+nvriKOxI7a6dyPLplIoKjC2KVpjzsRr3Ph92vWsmgAGMgorXH3YnXt3B+B/wDP1ichwzxmFGMiGgT2P8D2P3fDrBn7sbFj+vBNwXjDIc6eLNPEce2jUlKSdftB07a9Dr3crezTzLhgq4Lbftd1uEnlWyFm9mM2Xcd32CzBGt4zjKtdPzE6Aj7dRtUOtdtf297H5sDAjEqbbbiwl8q5Uq/YRhDipa1AH5Z/mgn7e5Ke/wB463ayO/78YLAo4ZH7P6MK5i9gdu0q8FeKT9xHY/6+tbODw4Y3xIUNWGePSY4Chr8vwPcgkeRPl1Ec8cEoYwxoMOuuOxSdeygAQRpqPj9ih0KmzqRwwXgWlBgjVbze1IUUpPbRxP4CfIKABLZ/w6Dz689NT3c/6fvwzWgQgVy7+WHkw4jRIeHcjsvsex7d/EKBH+h6GMCc094+n09mGKAKoCSj3/TiPpnjZDASflKVNrIKka/IvTwUnw2qHx8R/h17XXuccO0d3ePp34lrbhc1oY24jkf1H7vswqwauxt3vRr4zskp/E72ShhI8TKfVtbCB/mP8etYaOMfmEAV4dvs/ViQUd8oqmnPmP63aO/68EGqg43SOo+ufTkFoNP+RhLLdUwsaf8AsRKHzSlBX9Kfl+3rYouLj4B5cXac29w5YEX7QRAmQ+ZJTgD4feeeJWVvK+VScFRg6noyKVSEIVHZYS2Wojit4hNrHdLI0108eg3+XdtXdRu2km8HMnnw1U7cQL3rjqR+mm6TWamxs9TGFHCtdNaV0k50rxwM7mhiPsqW1oNEnVpR1WO3kT4joyHINGHvwsWepSCpPswE7aobQ6shOmhIPbQ+PgesJ6U5Z4ftqZ2pq4YbTkEbwlIToO/h8PMdCnNMONuooBzx7NWXNNNAfLtp38+3n1EeWnHBeGGueFKFVBJ29idRuVr2+7+HQ+aaueC9vb1oBh8wYKENhOg7aHv0ImmIOD9vbACgGHbgVjb02WQY9VKdaj5C83j91CT88a2qLNf0syFKjL1aeS6w6QNR8pOo6Cb9b293trtMoLxeNGoCVYHIj38ueC+33c21yyXEJ8BhcMDXSw0nJqUy59o44V+YKCBybT5lQRYsdmU8upmMVsZJU1QZpWUjKWKRDwbSyXbegSGDtJC5Jb3fMjQcJ+pmxXfQXW6z3A/5O5IIkAOln4kV4BwpIdeRIIyx1d6U9Y2m/wC2wbpDqS3kVo3RqatIYxl6VJ8ssNUbHMr34qug0yqxV166Nj1cxLSwlY2r3Op9FO1PY9nXx93RcXKSQfPRkafLNPaRl9pxasFuIbz+GPmpcU7xWv3DDQfppzUZ0ta7B6LzSgv01IU07sCkrBBCkep2OoPw6W33GNFZSfECD9WWGptvSWRW0ihqD2Zj+jBqgczzbWniYvyVWsXdWuOYK5sWa1As7Jwp9GAu3d+nlKlqjLbTr6QQp4f8Td0fXr9rm2G27rGLiHTp1fCTl4dZPGlRU5V51phJuPT2fb5WuunLr5YFtXluvmIo4ssYBBUHsqdP4aYFdhS8cR5gZsH8xxiap5Laoko1NgtlH5aFqdqG46ZrKUKBUQt1glIG1OnfpdO47fO9IhPF4qGulguefIE040B9hwYjh3SJB54s5hSvg1qxyyzJIFe8HvwZsH4dRkbCrDH6TKshx+MpLT+QM1JZirebCFS/p2z6inGmfEDVS9uhWBrp1K0B5SLaRpIR+Irpr20GeXZU40TXyQrpmWOOYj4dVfZX6ezEp8H4OxlxaHI0qRLjpbDhQqIhiU0NwCUu7lqQtRA1KkfKNem3bbC0dqu8jJ3AD3Gv6MLF3uN0ylWijjmr+0WB7xkPqxIOvwGpoIjMqHXumQy4g+pIWltnY2olxDiEJ3vIdSdPxAp01Tp01xwWdtGJIo2LAggscsuRHMH7KYW7mK5vHKTynyiOA/QeRH288epLkqdKU8PkG4ABpHpobQgj02GUf+Gw2OwHj5nuetEt088ms8e7gB2AcgMb4beK3jEKr4R9KnvOHAzAKFt6DsSlaR49lAK/wCusm8JoPbjFFDNqPswp5LkdThOJ22UXM0VtbDr5Vhazyy++K+igoUZ091mM268ptOhT8qSSkq06lxPmHFakZ0ByA+JjQV0jmeQBwRsrcLDJNNQJQgE0HuFSPExoqiviY0HHEM+e8/yTLMlpcemOOU+H3NWjKFUcGSl+rsblq4tcdaulTGQk2EWfS4/CkxwoqbaS9onv36tLpiGa13W4s7vQZrfQqlcxRl1alPMMCCG/Zpikeqt8s+oem7Td9oMg2+7aVirjS2pCqUkSp0yKQVZcwCDhrQaAssoUyfUQUggeKtAPj59XLbSeAUxz5fW58wnljO5CQdRpoR2IPYj+Hn0TjkOAE0HIUxouVyT301+Hx+B6mo1eOBk0WnlhRqqppCXJDiQdUqS0gj/iFI+Yq+DSfP8AzEaDz6khiToX3939P3YFyxgDWR207/6P9GHfRQyy7Cmp7vIWoKJ8FpJO5tXltKe2ngOikSKylPw0wm7lLKh1cWrgzRqSAylFjFaQY8sb1IAH5Lx/GjTwAKut0bM35TnxL9oxX+6vKh86Opjb7DhCyFiEWFax0BQB1JHypGn4ld/HTwHUpFbVQHL7cRdvq0lXJr93t7+7Ef7ivbU86dvZRJ08FK89XCOyW/s63sNI+n2d+Hi0Gugbh9/ee7uwzJMVCQpW7TTwVtHYf5WUeQ08+o5qDSnu/WcGQFpqrTv/AEAfpw15DQOoA0TrqQPMg+Kj5nrfGprXngbcNTMZLhOXHB8PE6+f/n6kr9mB0mkio440HGlJ+P8Ap/v63054HtWtDxxqOIJBGunbsf8Af59ZqaezGh4zz4YSkNIMuMp1IU2h9suAjts3DcCfDw62l8suzGHlDVmMsSbza247l4HVwamGyLhLaEurCEpAVtG4lWmqu+p79A7KPcVvneZvyOWDu5SbY23JHAoM/biOzkGJt10S4pPfTTRtJ01+bTTt0wIzjjkMKzotKDM4xNxW1LSkkEBQAS2SkDXv4jTQfcP49bTJQeHLvxp0AHLPDxrKptfZSiEJH4ELB/gTqR2/j1Anm/1u04LWdvmCfhHZhAu6ppTqtrhQCSO5BJBP/uWp/gOt1rIRnxONG5oq1HBcfKTArfJJ0Snxursby3mbhFra2IuZNf2J3LUhhsfKhCRqVKISnzPU+W+htojPdOkcK8WY0A95woNZyXEwhtUkkuHNAqqXZj2Kqgk+4Y1kYBfPZU3h8uLKqbr60QJUCzYdiS4cjUBSJDDuikKTr28j4gnx63/PwLam8Rg0GnUCpBBHcRxxCWymmuhZ6dM5fQVYFSGrQhgaFSOYIqMOzmHgfJeJzXt3b7DqLKOh5otkLIC0BSSQkkgEHqJsnUVnvSu9pXwNQ1ywV6k6R3LpqVIdyUBpEDClDkRkcRuepwXFb3gASddBoP8AHpjE5HLPCn8sOZ+zHkV0NvQKf8Ow1IH+rz6+CR6ZccZeQg542W4lcnuVbvLuonrweThyx4RRU8WMgZhK7Ia36d9Qk6fcSdOvpaSuZxh5URNAM8K0WvYWN62fl7EjalPYeOnmetLSMMgcbUgTjTw4sT9rfMuCYMwqLcx46VNtlKQ6WgknTQdynXUdVr1p0/uW7ANascXB6ZdZbL0veO24xB6ig4fqwDfdVyfTZderex6NHDZcUoFJ3DuddAO3YdMHSG03G22QiumJfCr17v1rvm7SX1jGqQs2QxDQTLKWSkubUeYSgDX/AM3ToqoudMVw7MxoprgicR8j5TxnZRLnG7FyDI+jZaeWnXapJaRrqBpqft6G7nttnutp8teIHjNMjgvtO57lsm4/PbdM8NyhNGXI58cEHKcpZzdM27sZf117N3LfcWfnUSe+mvcpTr4dR7W1Fiq28S6bdRQDkMSr26G4l7qdy945JZjmSTxJOANKjrYWpCtBqe2hGhGvik/x6NowYVwuSRFTQY0VIHgfA9tD4g/b1kT2Y1NGK58MfG4xUr5B4/yH+s9uvF6ccfPJLZ0yxtfROJGvb7uvnmDlj6ICuZ4YwqC0HQgj4dvHrKvZxxiUNT2YXsborDI56okRbEZiMyqZaWk1RbrqavbP51hYPeCW0DshA+d1eiUgk9abm4jto9b1JJoqj4mPJR+k8AMziTZ2ct5LoQhVUVZjkqKOLMfuHEnIYdVzkMJUJGL4wl6Pi8V9EiRJfSG7HKrJobRc3AT3Sw33+ki/8OOg6kFZJ6iQW7+Z83dUN2RQAZrGp/Cvf+03EnuwQmuIjF8lZVFipqSfilYfjfu/YXgo78K1Gv8ASYIsyds+wQ7GqBro4xHA9KdbJHilWhLLB/zlSh+Dr5L+a/lH/DXNu88l/Se6g54+xKYk84f4jAhe4c2/QPeeWNeT/wAtBkPDs5LWmEz9jCAHJSh9hRtR/wCl1IVqkDszxEeIceRyw1XXl+AJ089fj93W7L348sYHLG1XKekOSIwJKpEGY0kAeKgwtxvwHYhxA60zgKA3IEYmwJU07R9PtxrJrlOBJX86z8yU6fIkaa/N8fHrUXzPZiR5Y4YWIyv09KXmlFDzakuJeB2ltaDqgt+BBSRqOsCoc6T8J5YzB8sVHEc8KdtbWFtHNm9KdeU2UidGcUVNsqUoBua034JZkKP5mmgQ6dfBXb5FDHF+WBTsP6Pdy7sYyyyTHWST2j9I/T34aEqYhSfmSNADuGm5J0+H9Q/x62aaHH0NUYbz8WNI1cZV6a/ig6pJ+BHx62K7LkeGNDwxuew4TVw5bJ17uJ18Uanx/wCye/l9vWzzFPtxqFvow9MagKkvt6pJGoB1GpHfU6g/b1Gml0qaYlQRFmz5nErsMoQlLRUjyB8Nfu08dfH/AB6VNwueIGG/a7SrCuJE01Q2Wkq2gEAa9vsHfw8OlO4nNacsP1raJpGHzTwEtPF0Duwy+8PiClpSUfAdlrHQyeWooeZGDlrCASRTJT+r9OFZERt1pOmoX59/P4EaefUfzCD3Ymi3V1AAzwnzGC0k6g+Gn8+t8bVOWIk0ZjwlhZ02nuD/AKafd1tIxGDZ0PDCBUzZFxHi2Uo6vzGGpDu0aISpxCSltCR2S22nRKR5JA6+oqxQrGvAADG26le5vJJpD42c/f8AcMEGujhtAWR30Gn2AjuT9/UOVqmnLE62i0jUeJGFjuoadu3WkVwQPiFMYXQlKFKUB2Gv3/Dr6Ca41uAFqcD+8i+sFaeJB18O+un+o9E7aTTTC3fQF66eP0+gwAcwpkuMvFSeyUq1Onj28O/3dNNhcUIAwhbraeEsBwxCzM4iGXbRQG0IhSUjt4F11iPp/wDXunizbUq9tRiu75NLtQYBC/xnt59HFNAK8cBzma4yNr2nQnt/q+/+fXjw1YyHZjeRort8e32a9YEgHLjjIcM8bCmkqB7d9P8AT+A6wGVO3Hqd2MLaBueZPb1mXEjt4qR+Yk+Guvy9ZitQRXGmShFew48xQFo0Pc9jp9/x06xk+3G1U1Goxu+mND28O+o6150ryxt0fXhOks6KJGo8wPP/AE1162Rns488a3XlhyY7JXUq/WdoU9Gc9GEhWu1595BRKCif6BDWtB/+GdfLgCRfJ5EZ+7h9ufux8gYxP5wpUGg7zz+zL34VZMZMeQUsqUqM8luTDcV4uRJCQ4wSfDelKtqh5KSR5dao5KirfEMj7R9Ptxm8OlqL8BzHs+n24wenvUoeXcn/AFafx6+Fjx54xaPwnHwxgrU7e48PLy7eOmuvXtVMavLY5UONqoZW3aRNwI+Wcex/y1s4/d1jKVMJI45f3hjdbo6zjjSjf3ThBDavEJPj2/n1N1AccQ/LPGhxvNNKKOw8e/j/AKutLsK+zGaqR78OLHprlPa1tolBWa6YzLW2P/FYQrSXHPxEmGpxs/YvqJOgmheLkyke/kfcaHE20dre4ScCuhgado5j3rUYc+SVArLufCZO+Oh4vwnQdUvQZaRKhug/5Vx3Rp1FtpRNAsh+KlD3EZEfWMTby2EF08X4A1VP7pzU/UcNKWhfzakg6fy+74dSVIxFZDQ9mMUVtYR5+f8AHxI6+uc8fI46ISQcbqQdh111/j3P8Pj19FK5Y0MDTvxuVyCZaQU9gBp/A6/7OsjTTXEVwQ2CxUxklLfy9tNTr9nl93WK0zwv7jIRw54ScjYa1V8umh0Hn/Hw8B1JjpSpxAtpHLe/DClpCUEBIP4vA9/LqK58WHexTwCorUYbzyiEAf4Hv9w+PbrfGcY3AIGrnhOW3t+c+J0J1+zy7+XUwEUpyOIA8OeNRTiydFE7VHUAk6DTy0B6wKrQ4kKQcxwwnPpIUTp4+P8Av8vHrEcKY2MMb9dUrnJK92xAOg1Hc/H7NNOo8kmnLG+GDzPZjzOrnIK0hR+RX4V9+5Hx6+CXWO/GxoNBFcgRjXbQtw7QkrJ/y+P3ntrp9/WLMBnXG2NCcqfViwv2gcnYvgMwm9bjuJCknSQkaE6abfmGmgPn1XvWm0Xe7W2i1Yh+7Fq+m3UW39NbmtzfoJIxyI/XhT91Oc4jyLYlyiajtrUVKT6KQkJ1HkoADvp1o6S2692i18q6JOJ/Xe77d1JurXligjUmtAPpmcQaMCRDWErSSB2Go8fuPh3HTp5qsMsV+YHQ0YYVYqyE6Akgn8J7pOnh2PY9eYhs+ePBGT4RhTbabUew9JX+VXzNK+zU6qRr9uv8OoUrMo7V+36fVghboK9n07cbzDa2lDt6ZI12rOrTg8tiwVaa/YeoErhu8d3Ee3BeGMk1+/gffh0V8lTagDubUdBsWRov4bVn5XB/j0OmGoVGY7Ry93LB2zGhgGybs7f14ftU3LmlKIralNk9/VJQzrr32qIJB+xOo6GysFJZj4u7j/Thit0d/CgqnYeHu7D7MPpiPTUw32slc6SBqmrYVoN3bT1VjVLae3n30+HUBnkk+AUHb9PuwXhSGDiSx/Z+n3j7MeJ2UTrBsRGVJroCRqmDC/KTtA0H1Ck6Lk9vMnrZDEiHVTU3af0dmMZpGlFF8IHIZf6cZsfVumMo10Tu9RWn4SlPclJ8tdO46IiXStTgJcW2saafq936sHGjuAWpa9+g+VSQD4JSQAND9nWiUgaRgQ22atTUxuTLz1fwrI08CD4eXbTt1pJAGeN1rtPjBAyGGXZzY8nUOq2unwdT5+OgcA8uokr0y5YcrGz0gUGG0WlJdAWnUeSwfkI+w9CppQeHDDXaWpGbY3UtAnsSPtHn5aD7uhsknbg5BBx7MKkZpKSNQe3fTofK5ODlvCFpXhhfaBIQEg6A69j2P2dh9nQ6V6HuwahgLUJ4Ye+BxFO5nQLZSC8zNEpsLOiA7HQt1sqJ8gtI16EbkxezMY/Eyj21YffjdcrFFYzyS10LC9SONCKfp9+BfPzefi/JmQVFW4idWWFPRPusreXpK+msbQTMgeQtQDci5ulOhhaPzEMMtqB1I654/mLtoL7b4xMSywtWg/ZfWpc95ehUjMaBi5/QqxaW2jMy+WZZGjbKmg6EaKIU/wCiiVdVeLOwpkcDPnrB4b8FjkrFGVvU2SPIFu0y3+bT36CZVlDmttDRlMtTYeb7bSSoJGgTrzzsW7eTbHZbxqzLQKTlqGbVHfwqOR7sdMQJM8qmTK/tRTudOCsO2gJr/pxFQssWLDkJ10MKXGkCO4Vp+QhBdQ2tPZRRvb7KAPWyaMCVmBB1KcvtqMO0NwLmFWoVkVhq9vA/fjYsGYrtXVsY4pnArSJX/R3FhCpmL1F9KLjhVcN3Zdbvah59laUKYSXGUlG5ATqR0tmQRmk6lnrWp7K9nDLhljKa0u3kaja4CclrpoOzLI551OefHDGRgUqZKYlMX9RZ2pU01JbSm7M6ahTmyLLDP6Y+++/uIaXpuP4D5k9T1vPOUGMEzCgIAqSMgCAMzyBy7MDjbPbK3nJot6E1JAC8yCTQAcxU9uJ0cYMZPhv0GJxeQXa+JKZbdvKZuZPjoqbN+PHcnRXoqGEt7yHNupKVqKNqgCOm63QWskMJmXVIoLjh5Tc1cdor+jCpeBbm3e9FuZNFdBAB1qCQpU8wePZQ1xMrEa+NRONynZHqstjRgqbDDElBQUpWdSpTiCFfhBGvmPLp5sw1mSZMo6ZHkw5EdowqTKbtFZRRiMxzU8wezDwsblFg0GkJS2w2dQnTuvt2Ur7Ps6ITX7XKBfhQHHyOBYDU5vTCCPQU4gfINSO3y99PHsND1ijxggVFca31EkmuFxhH1Ba2hSUICkLWlOpS3qlWqf8AMpQUEoHmpQHUp3qFpmR9Bj1rbGeXy1oOZPIDmTgCe8K1s8e4cXHrn2Ytjlea4Lj8Zp9akQJKf1qDJZxec6kFYrLr00wpBAOvrKURoT1PhnltgjW7Bbhp40Un4WZnULG37kn+GT+8Scat2kt760uLNlY7bHayltIq4VEYtMo5vER5qA81pxOI6ZVTVrWNcRTaR6TIo0Y5a19CZhKpsCjZv7Cxr6Kbr86ZeNptFVzqT3S5EI8NOrO2XyYd7k8hi1tJbQMgPxKgBVUb96OnlHvjxRtx5110yEvYxHusO4XKzhfgaZtBeVP3Lg/8wp4Ulwt08hbTTYHdG0aoIOnlpp8NR1bdnJVBxrior+HTIQcL648acO2rTpHw0P8AuUOi0cjDAGe2U+3Ca9VvMauOpKmgSE7QfzlDTVA07pSP6leX3nqYk/4V+L7u/wDUMCZbYjxsPD9/d+s/pxmQlxKFhQ7hoAgDQJ3EjakDsAkDsOp8MiqAMA7q3diWGF2rcAjFOp3tr1T2O7vpqPu7/wAuikUoBr+HCrfWJcUp4jh+UuQoipMGS5rHlfIFk/8ACe8Eqb17dz4q8B1Nekg81MmX7u/9WE+ba6MY3zRvsPd39pw1MjtnGHXI7p02khKh8wUD4Fsf1rI8/DojA6Oobu+le7A5Nre3egHbT+jtPfgU2U5C9511VqTs8dD/AJnFa/Or7PDqTpD+zt/VicpeEZfEOX6+37sMabIKlKJUT469tBof5Dt1j5I5cMbTdyAVepOG6894jQ+Onx8/H+HWawkYxa8VhTkcau4eev8AEaef8utojamITXKfDwxrOgKPyjU+fh2+89bAMqNjQ3iNV4401tgAj8R8wPwg/Aq8+vU+nPHxiAT2j6sajcb1nAe69hBAA2NjU+Z7A6dZUplw+/GsurcDUD6sKbjYWtCB82zwSAQgEjuCdB4fw6+qCueNcjB8uOPxYCklKjuGnytoGgAP2j4Hr2sg1+04xKg9+XAY8RoJK0pcUAnUfIgfb4kjz68ZKio+3GKRDVQnPsGHzHjtx46lISEgNnvr8xOnmdOh8mpnzwctkCpkMDewC3ZJHzFSnOwGvhr4+ff7Oi1sKCuVKYWt0Ysxz5/T34lB7cuSZXBuUv8AIy6ti3jKon6hVW8sMyZjMuS0A/DlFKzC2upIC9p9XQ6dgCQPVG0RdS2H8IaRo6yBtQz0kDgR+L2cvbjd0b1PedEb7H1TbRxSzxqyLHJWjh6AmozSlMmGde7DU5gz6dy7yLcctQktYzkDrsVtyjS8n6ZhmuQI8KRHsSlordkISN63flLh0KkdtZ+xbbFsO1xbIxM1qikBzxNcyCM/qHLkcD+o93uOqt8uep2CQblcy+Y8a5Ip4DQePACpapJzJFcBDM+QMwzKUhnKp0yQ9XJEdDEnehyOEjRIU2v5gdPA+BHcajo/Y2NlZKfk0VUY1y54Xdx3HcdwcfxCR3lQUAY5jsGeBm9BRIU4UK2rT2IJPj8SD4g9Eg9MCTGGGeWEZ2plqcADSgknu5oSgfHy+HWwSrTjjDyXrlwwps0wG0LTvV5AAaa6eaR4+HWPm/VjaIlrTi2FER2Yw1c01H4UDvpp9n9R6wDFuHDGenR7cJs+2LKFJQAn4AEbvgO3gOtqRajjVJLpGQ92GszMkqlBxLrqCT4JWoDT7dPEjqQQAtOeIWZbUTnh0iO7PSFPLJ0T3UskqUPsJ8uo1dByxLqHFBSoxhdihtsCOkJQr8R76q+74J7+HWzXn4saDHQVSgGNaExtqo4A7/SMn7vykdYK1QDjdIlC3bU/fjDFfdivBYKtvn9/huGn+rrcdJWhxDXUjcMsK76EzAELSU7/AMK0nsFeOoI8Nfh1r1CM1HLElYTONNM8Jq6t9tQC9f8AsqIO1Q+/x3dYG4HLEuPaXGbj34cVXWeoQktkknw+7Tv8etDy864kG3WIaaYXZlCptneEfKBr4f6vLrKOYE0PHAW6Vg1V+HCBW4vNvp6ocQtRmI7apVnZyjsgVEFv/iy5ruoASlIOxGu5xXYdbprpLdNb5scgo4sewfp7MfbS1kvG8tKKoFWY8EHaT9w4k5DG7dXkAwk4vjCHo+MR30vSZLqdk/KbJvsLW1I0IjIOv0sb8DSe5G7w1wwvr+auyDdEUAHCNf2V7/2jxJxIupk8sWVqNNmDUng0jftt/wByvAYW8axOPYaOTCpERpky7CQnxYht6b9g83nVEIbH9S1Ada7i7aPJM2rQDtP6uZxutLJZM3+ClSewfTIYwzXFyrNbiGlMN6txoUXT5Y0RkelEit+GobSfmP8AUsqV4nr7H4I6VqeJPaTxOM3TXISBQDIDsHIfTnXH29BDgjI7pgtoj/L4KePzyV6fEuqKfuT1nA4pq7c/1Y1Sx8hy+hw0bCTBqKyxuriXHq6ipgy7O0sprqWIsGur47kqbMkvL0Q1HixmVLWpWgSlJPl1lNcwwRNPOwWJFLEk0AAFSSeQAqScfbazubu4jtbZDJcyuqIqglmZiAqqBmSSQAOZxE/29++bi7n3koYRxngnLs/9OddkW+Xz8dxiBiFXWNreajWFlPVmbtrGZuXGtkRj6IzHVK7spS26W0HYvUPauq799t2m2vX0AlpGSMRKorRmPmlgGI8I0aj+yKMRcXV/ox1F6f7NHvfUd9tcfmMoSBJZmuJGNNSIvy4QmMGsjeZ5aj8ZLIGnEzH/ABhY2hham3EjsdW1KRsJHj3T36eSM8ueKoU5Z8BxwnzhvPYaJHiB2H2a6fHrai0xFkcn2YwpeXFR6qAk6BSVNrALbrS0lDjLif6mnEEgj4HrzAMaYyQkGvPDWsUoYKHmNyoMgrLBUdy2FJ0LsN4nxcYJ7H+tGivjpkpLZH4x9vf7/vx56rTT8J+lPd92G4+lQcLrCik67iAfHz8PD+HW1aUo2NLAk6kyONqFaqS6hD6N6VEDUDTufiPD+XWLR5eHGSTkGjcMSS48qq20U2QEpWdO4ISrU6diNdegG4zSRKcMO3QxTOF54lfQY09GQgtgOoSBppoFH4ajwPSTdXYYmuRxYO37cyLqGa4JUFHoJCVJKT5gjQ9/Eafw6Eu2rDLAmjlTDviISmNMcA/EhllP/eW56h0P/da8Oh7mrge0/T68GogBGe+g/T+jH5lWxW0+B/8AguvNmMbUoppj1YNBbJUBqfP79fH+PXozRsfLqINGTzphoOoKVHt/5P5fb1N44AupBwiYWzrSVJUPCFFP3/lI/wB3Xx2ogp2Y3hP+ZYH9o/fgkxz208v4/Z5eHUJuGC68B2UxuJ1Hfy8/u6xpjaMsJE6YwHWo5fZS88l1bLCnEB55DJR6622id7iGvVTuIB27hr4jrNaAgH4j+jGiYsyllBKilT2V4fXTCRIWwVNx1uMpkPodWwwtxCXXks7A8tpskLcSz6idxAITuGviOt6uFbSTniC8TPGXC5DiaZCvD+jAbz5KIsZ0D/Ird38yP9B0xbUSziuEjflWKM0xVPyfz9wtT5XdYLa8pYLW5qiXWVC8WmZLVx7xNhZSIUmHCNa7ITJEiU080UJ26qDiSPEdNsHUewWl2u3XN5bJfhgvltIofUwGkaSa1NRQc6jCbN0R1luG2tvVltd9LtJjZ/OSF2j0JXW2sLpoulqmuVDXDaWdVH7z08AUAHPFenjht5TmGKYNVG9zLIqfFqYSGYirW8sI1bBTJklQYYMmU4216zuw7U66nQ/DqHuG5bftNsbzc5ooLUMBqdgq1PAVJpU8sFdn2PeOobwbdsdrPd35Ut5cSM76V4tpUE0FRU4X8ByrCORat26wfLKPL6mNPdq5Vnj1nFtYUezYjxZj0B2REcdbRKbjTmXCgncEOpJGih1CsN423doWudqmiuLcPpLRsGUMACVJHOhBp3jEneOnN86culseoLW4s7x4xIEmRo2KEsocBgCV1KwB7VI5YX72dV4zX2VxfWUKopaiI9YWdtYyGokCBAjILsmZMlPKQ1HjsNpJUpRAA8epVxeW9pbtd3TrHbRqWZmNFUAZkkmgAHbwxCs9vvNxvI9usInmvZnCpGgLO7E0CqBmSeAAzJw3sLznjzkhMqXx9nGN5pHqJEZizfxq3h27MF6WlxTDMpcJ11LK322llIUQSEk9Qtt37at4R5NquIbhEIDeW6sATwrQmhNMEN96T6h6cZIt/s7mylmUlBNG0ZYKRUqGAqASASO3DgDSosp1hY0UhxaD/BR0Pby7dFWbUtRgJHVaYHuec3cQ8WutxeQeRMWxae6yJDdXYWjJuHI6tdshFPH9e0VHWUkJWGdqiNASegG69T7BsbCPdryCCUioVmGunaEFWp30phv2HobrDqtTN05tt3d24OkukZ8sH9kyGiA92qoGeNzB+TuO+VWXnuOc0xvMQwtpEpqjtYsyXCVIJSwJ8FDgmwQ8UnZ6zaN+06a6HqVtG+7NvMbS7TdQ3Cp8WhwSvZqA8S1zpUDEDqLpTqbpaZYOorC6s5JPg82NlV6cdDEaW01FdJNOeHbmOV4tg1S5b5ZkFTjeNUhjxZV1dTY9dWtypkhuOlb8uQttlCpc11LaNT8xKQPHqTeX9ltlo19uMscFtUVd2CqKmgqTkKk0HaTgdtezbnvt+m1bLbzXV+wOmOJS7tpBZiFWpNACT2AVON7AeQcH5Qx2ZY4FluP5jDxycqLInY7aRbWNHRISzIkQXXobjqEPw3ZjbuwnUNydfDqDY7vtm6Brja7iG4t9WktGwYBwK0JBIB0kZezBHd+nd+6edbTqCzubO80a1SZGjYxliuoBgCVDAioyrXsw2cr534UwG3exzNOUsFxi/jNR5EiovMjrK6xaZltJfjOuRJMht5LchlYUkkaEHUdQ73qfp7bLg2u4XttBdAAlHkVWAIqMiQcxwwU2noPrHfbFdw2Xa7+7sXYgSRQu6EqaEalUioORzywv5nybx/xzAi2meZfQYjWzZgr4k7IbOLVxJM5TL0kRGXpjjTbkgsR1r2g67Uk+A6m7juu27REs+6TxW8DNpDSOFBahNASQK0BNOwYE7L09vnUdw1psFnc3tzGmtlhjaRlSoXUQoJAqQK9pAwr4FyBhudMRMgw3IqjJ6N1N0yxb0k2PYV7z8StnNSWWpUZbjK1sO/KoA/Kex69bX9luln81t0sc9qWADIwZahgDmKjLge/GG4bLumw7kdv3q2ntb9ULGOVCjgMhIJVgCARmMs8Cyp574ev8lRhtJyVhdrlbkqbCbx6DkFZIuFzK1mQ9YRRXtvqkmRCahuqcRt1QG1a+HWq26l6eu70bbbXttJuJZl8tZFL1WpYaa1qACSKZUOJN70F1nt21nfL7a7+HZgiuZ3hdYgrlQjayNOliyhTWhqKYI9vl9DjVPPvsgs4NPTVUZcuytLCQ3FgwIjYHqSZUh0paZZbB7qUQAOiV3cQWVu93dOkdrGpLMxoFA4kk5Ad+AW3bfebleRbft8Uk19MwVI0Us7seCqozJPIDCbx/zDxnyQ5Zf2Bm+MZiKUQv1b+3beDbfp36j9X9AJv0bzv0/wBZ9A96e7Tf6S9PA9DNv3rad4D/AMJuYLny6avLdX06q6a0JpWhp20ODu89K9SdMtF/mGxu7Hz9Xl+fE0evRp16dQFdOpa04ah2jGxf+7L24Ro9bDtebOMoGQ485Y4jfVc3L6aPYxXaKUpqJ9VGdlpebWxHUIy9w1S6wpJ0I6Anqvpi0up7abcLRGD1IMqAq3B1IrkQwJI78NSenHX24bdbXtrs+5SRPEAGFvIQ6U1ROpC5qyEUbgaYYU73Ze2twKW3zjxeSSewzKkJOvw/5vrevWvSXA7lZU/65P141N6WepGg6dj3Sv8A5tL/AN7gvY5llDkdJXX9BawLqktoqJlZa10hqVBnRXAfTkRZDKlNvMueSkkg9MNvcQX0KXVo6yWzqCrKaqwPAgjIjvwl31lebVcS7fuUTwX0T6HRxpdGHFWU5gjmDhYdt4rLLjrrrbbTTanHXFqCUIbbBWtayeyUoSNT1JoFGpslAwO0tIQiZuTQAdp5YZHHPPXDvId+5Q4NyVhOWXTcN2wVV0F/W2c9MCM4y0/LVGivuOiOy7JbSpZGgKwNe/Qmw6i2HdpfldsvLa4uQpYrHIrNpFATQEmgJAJ7xg5vvQ/WPTtr8/v+2X1nYlwgkmheNCxBIUMygVIUkDjQHEq6uY0EpJUNAnv4f7/h0ZVDSmKs3GRS9AcsBXlnm7ivjORXR+QuQMSwx+5TKdq2slvIFQuwahKZRLXDTNeaMhEZUlsLKddpcTr4jqDuO+bNsuhd2uoLZpAdPmOqatNKkaiK0qK07Rg/0n0b1X1UJJem9uvL+OAqJDBE8gQtUqG0g01aWpXjQ9mATO93HtqWkhHOHFy/+7mVIfH4aS+gJ606S1f/AFTsv/HJ+vFrW3pV6kJHRti3WtP/AJ2l/wC9wt4bzRxZyM/Oj4HnuKZhJq2GpFgxj13BtXILEhxTbD0tMR5wsIdW2oIKvEg6eHRjat+2fd5GTarqC4dACwjdXKg8CQCaAnAHqDo/qfpuJJeoNvu7KGViEM0TxhiBUhSwFSARUDhUY3srz/FMOqZGQ5dkNRjVBDcYak291PYr4DDsp5EaM27KkLQyhT77iUJBOpUQB0Zvb+w2u0a93OaOC1UgF3IVRU0GZyzJoM+OFzatl3bf9wXbNkt5rrcXDFY4kLuQoLMQqgk0UEnsAwnYxyFhud1f69heSUuU0v1T0H9VorCPYwPq46WlPxxJjLcbLzIdSVJ11G4dYWO57du1v83tc0VxbVK6kYOtRxFQaVFRUcsbt22LeenLz+Hb9az2d9oDeXMjI+k1o2lqGhoaHnTCjc5JTUdTYXd3Yw6mnqoj860s576I0KBCitqekS5Uh0pbYYZaSVLUogADXr7c3EFnC91dssdtGpZmbJVAzJJJoAKcezGG32N3uV1FYbfG817K4RI0Gp3ZjQKqjMknIAcTgd0/u39uEVpUd/mrjTaDqhYy6m8D5f8AsV0pS9adIsarudl/45P14sS29LvUeOofYt1p/wCbS/8Ae4flJy9xxyih3/p7m+KZgiCCqW3j1/V278X5koCpbEGS89FQSsaFaUg7hp4joltm87Rumo7ZcwXGnj5civT2gEke/AbfemOotiZF32wvLPX8PnQvGG/ql1APurT3Y2MhzXEMApncizTIqjFqdh5iO/b3U5iur2ZElfpxmlyZLiGUrfcO1I3Dcew627juVnt1ubq/ljhtAQCzsFUE5AVOWZ4Y07LsW5b1eLYbPbzXO4MCRHEpdyFzJCrU5DM04DG7hHKmF8gVkm2wDLKHLauLNcrZlnjdrFsY7E9tiNKXDeeiOOJbkojy2llJ77XEnz6i2G57busJuNsningVtJKMHAagNCQSK0INONDidu/T+9dP3K2e+2k9rcugcLKjRuUJKhgGAJFVIr2gjljVyfnzjbjWdCi8gciYrij1gy6/Xxsou4VW5MYjrS2+5FEx1r1kMuOAKKfAkfHqFuu8bNtRVNzuYbeSQErrdV1AcSATmBgvsHTHUm/o77DZXV5DEQH8qNpApIqASoNKgGlaHjgmnkbCXMRGcyMhp4+Iqo28mOSvzWP0BWOvQ02LNyLMq+mFauAtLyXtxQWyFa6dYNeWyWn8RMiHb/LD+ZUadFNWuvDSRnWtKY3RbVfybh/B/Il/i3neSYdJL+aG0mPTSusN4dNK1yw2cH5g4h5MlWEbjrkPEMxl1LDMmxZxm9gXKq+PIcW3HdmIgvPLYaedbUlClaakH4daNv3/AGzdWaPbbiG4ZACwRgxFeBNDzxL3npDf+n0SXerK6tI5CQpljZAxGZA1AVIrnThXGpL9wPClVlycAs+VcBh5qbWHRDFpGTVbV4bixcYagVgrXJCZYmy3JTYab2hSytOmuvWmbqLYkvP4fJdwLf6wvll1D6jQKumtamooOdRjfbdGdUzbZ/GYdvvH2ry2k85YnMehQSz6wKaVANTWgocGpqSlqLImvOsRKyHHdlTZs5aWa+PGYQp15515zRAQ22gknxAHj1ncuiVdzSlTXmAPp7MabCCSZljiFSxAApkSTQe8/XhE4z509vnI1rIpMF5ZwLObuFXu28uoosjrrl9mujyYkJ6c1XQpDsz6dqXPYbLru1tK3Ujd3HS1Bvm37lMYNvuIZpQuqkbqWC1AqQOVSAadoFMPd30tvOx24ud5s7i3tmYIDNGyoXIJCqWHxUViAeQJrlg1WWYMV0GTKXLj0VTBjuyJljYyo8VMeGwgrekuz3VNxYcRptJUo7glKR3V1IZYlUyykaRmTwA7a1+84jxPNI6wQKxcmgXizE8AtONeQ54i/D97ntMfuhQo9wnGS7Fa0pDrmSRm6tTjj3opAyF7bQKJc+Eojb834e/QA9TdOvL5IvLfX/XFP9b4ftw5DofrBLf5lttvPLpX/DOsc/g+P7PtyxLGvmx5rEeVFkMyosllqREksPIdZkMPoS6y/GkNlbbzTzagpKgSlQII7dFwagMuYPMZgj6cxheIIJVwQ4NKHIgj9Pdh9VKvSjuvdgpwhlJ008fxlSe206eY6+azUD6e4/ox4QhuPHnl9/68O+DNLTLze4j/AJdR1/qG3Q9/ALT2+/rBpBUHv+nsxsFqDyzp9PbjUVauK+UOHQ+YPj9xHWqWcnE23sgMzwxh9b1T8x7HXtrr/P4DXodLIxwft7YUAAw0c75b4y4eqoF5ytnuJ4Bj9nYJqK+zzG8r6CvmWi40iYIEWXYvMMuSzEiOuBsEqKG1HTQHoPeXttar5ly6RqTQFiAK+/nhk27bLy+fyrKJ5WC1IUE0FQK5VyqcD6P7z/Zm8tst+6vgNreQnZI5PxNnYVEAbluWaG0pBPcqISB3JHQSTe9sOQni/wBYYaIOm96UVa1nB/qH9WJcYzHpspqIGR43fVOSY/asIlVl5QWUG3prKK5+CTAs65+TCmR16dltrUk/HrU96jrWMgqeeJUVhJG5WVSrjiDl9nHCXyTyPxJwjS12Qcv8k4bxpRWlmmmrrfNchrcerp1suJInJrY0qyfjsvTVQ4brobSSottqOmgPQu4vI4s5WCg9uWDlrYTTtpiQswHAAnL3YJnBuX8a8qVSeQONc9xzOMMfiZJCay7ELeHeVAfrq+e3ZIiWEB16K5LhuNFtQSolLnY9+hs1zHcGFVYeWZkzGfwsCfqxju9tPZbXOwiLXFEUKRxZmFK93b3Yr69xvOXG3FvNmSyuUM/xDjfH28VrIzDeS3cOnkPym51PPhUFUwtwTbabW16UqcQwlakuessgJBPVBerdjd7zfXdtYwyzXPkRkKoJpoljKqByIUknhWrdmOj+gLmPbOhNmsbpo1t4LyVjLkHlklglE00j8ShZtCDgqqgGZx74W943BvJUyVVcW8sYVyJDvIr7+SYDHvGUZJ9GxsQixh0dh9LcNu1hZLiJAYKW9wSs6KGvNm67Jvu0PDd7zZzwPFNqWQqdBIpRS61WrAEEVr2Yta1k23eIym23MckoSmkMNa1rqOk+Ioa8aZcD3mqqn1tNYWKKuPiuTUK7hcO3iSobMyNBsS0y45CmANrk4/ZFh9KlbR6DmuoPfXpz2HquNJ5LK6htmmD/AJiFQ6moHiGVVrX4lyrxzwJ3Dp2ZBHeBroaYqoysyyaKnlUCRQQQK+IAU5Uw45GP8Qy2lWUvjyFUFIKnWo8iyXD1+YatfSvOsrCjroUnQeenRyf/ACdOnnXm3BacdBYr9QPdifaXu9yDyrPcJyB2ldY55hhqFO8YhvnXvM9vHF2TWGHVvJvF3HF7Rvrj3EO4yahqcmaE2NHkNRp0d6Q3MZR9C+hTHq6rCHAvvqCA/wArubr5/Su0zJYyVpKsZJcVIIBA4ClAa1Gdc8SpP4H8ww6l3FZrpaflSzDShpUeEmgrkTl9mMnEvuZ9uFvkbVZQ+4LivLb+xeSqspq/kDGpl3ZvPqLiEMwv1Iy59kdSHG20rUCCdNCD1CHTm8bSpvL21uBCDXU0bZVzqxpSvbXn7sEJOoNp3MraWdxbmQimlXXllRRXMdlMWO4xyA84GApwKjI0UmI4oLa+GikKJB3Dsejdhv8AcaRGxDW44Kcx9RwDvdpWpZPDMfxDjhdzfmzjLjLEbHOOR7fFcGxKukQmrLJ8nsotFj8B+yltQa+O9ZzXWYcf6ybIQ00kkFbiwB3PTJZXQv38q2tleZs6KpP1Ach3dmFm5tri1HmXNzoiGVSQO7nzw7+I+ZuHuVcKl8l4vyhxpccX1js9iwybEMhp72rEiqbZdsYk2+r5D8CC7FakJK2i76p3D5QejaN8irNugitYVTUQQAwXtZvwg50FanliHFbNfSqNtaW7uWagpUrXuUcaczTLAqX+4J7Ea6ytK+092Ht/qp1fLeiTIM/kvFq+dCmV7zkd+DKhS7BmVElxiktONOISttxJSoAgjqZHbXV2i3EELGF0BUhTQqcwRlwIONk19bWitYtKiyq5EgLCuoZEHPgGB+lMQf8Af/77fZ/m/EuKUOFe6HhnI5KORqKzs28T5Fxq1uK6LDS8+q2Yjw57rifopCUuJURt9RCB59B+o9s3WXa0igguGb5yEny0YuqgsS4AH4CAR3gVxN6a3Haot0eS4mtgPk5wBIwEbFgqhGJP4wSCOOkseWDHgPKPFPuE4pqsl4wzrEc0k47kt3X5p/ZtzBt6ysyp1nGxaPsrgOvNRK/I3Fm1jN66pRLUkgKQrq0bW5EW9QTSLouZbZhIKUXzI5DrZB+y5YSr2eZTiDimILOdNkutvesltDdK0MhNXMEsZMSSH/pIQpgft8oMMmGEHA+buAM2zZ7i7EeZuOMl5GhyLeHKwily6oscnjSseL6bxh6njSnJrTtUqM4JAKB6ZQQrTTq0rDcYmoiupenCor9WK13Ta7hAZXRhF+1Q0z4Z4lBEwt90b9ykoT4kdyonulCT/mPmfId+jYvRQAfF9M8LL2DVJPw4UFY7LbUht9oOtAbUgJ0KEJ77R46/7epUNwvI+L7ziBPaH8Q8IGEmZQNLafeaCUKLiUbdNEkJ1PhoNpOnRGO4IpgPPZqa5Z/TjhuqhGMp1JISFI0UD2108NdPwoJPh4q6Iw3JOWdfp9K4B3digJJ4c/p+jnhuSZpSVoKgdB3JOn3biD8oHkkd+jEFyQB9Poe/Cre7epJp9Pb2ezGrOm/rEAo3kzoSdu/tueYHZJSB4BPmPh93U6OQxtX8DZ078CpLdWWgB1jn2jAulvON7grxBIIOoPn2/h0QE9Rga1uFbhnhsyZK06666nz17af6DrakwOIkltQZYSVPlRGn8h5/+bqUswpnwxAktwfhBrjyuSyNCs6qHilJA07f1qJ0SP49SUlFO76cMD5LVueZH0440HbFk9gpJGo/CdGx5fMSQVH+PWYKn2/bjHySB+rh/TjUcsGSPxheg8ddrQI+PmrTz62qg9n34iSBgaHh9mPTM9tCCoqSvQ9gNENj+Og179bPLrwy+/GlqgAnOmPBuWg4ncsE/wCVPyo/idPEdeMJ7Mu04+F8+PuxufrLJCQCEg6jQaAH7/DU/f1r8k+/HjJ+GuWPlll2O4vTWeSZFaQaaio4MiyuLezfaiV1ZAhtqelTZsp5SWo8aM0kqWtRASkak9Q7l47aFridlSBAWZiaAKOJJPADtwR2+2lv7mO0tY3ku5GCqiirMxNAABmSTkKccCJz3se1FUV1I9wvEJWRoB/fNBqf4fW69LLdUdN6wRfWtP8ArF/Xiwo+gusliI/hd/Wn/Qv+rD+435C425Sju5Hhea4tm2PVz6GZUjEr+tuw/PWCtmqUutlPqivuJGrvqbVIb8BqRoctN0s76HVts0UwOWpGVgveaE0I5Dtwhb5sO5bVP5W9W1xbPSoWRGjZ+5QwFQeZGXLjggcl8g4lx5jb+Q8gZNRYhQRJERd1d3thFqaWBIkPMwK2CqVJcajsNIfeRHZSSNyyNO56899aWEBvLyRIrZaDU5oMzQEk82J+0DASDZdy3vcV27bIJbi+YE+XEpZiQKkKoBNEQcuAFcC7HOZONuS02FvxXnmK5xAqpv0c+fjFtBvILMqQwh4181cN15kF2OsKU2rxQdfh1N2rctv3aJnsZo54lNCUYNpPEZg5HmMRN76f3vpuZIN5tbi0uHUsomRkLLWhIDAVWuXZjHnHIXH2FY6vKeQckosTxOI81Cds8iuYNRJopj+70IdfMsXmv1eDLIPoRz6i9RsACtD1LutxtNoiM9/NFBH+07BUf21IAf2ZnsPDETbdk3PqK6Fns9rPd3B/BEjPInaQADWPvOS8yOOA7iXuR9tnIFy1j+B804DlF7IedZgV0e3RXWlk60RvZg1dumDNmPJB1AYS8laQVIUpIJEHbesOn92n+WsryB7kmgTUAxP7oNNX9muDm8+mfWnT1ob/AHbbbuKyABMhjJRQeBcrqEfZ4yM8q1pg0puY7WqFoS8PA6AagfDU9umPQzZjLCYrKBQgEYFNf7guCLnKP7IpeWcBlZs5Zy6VOJxclrHr5VtBW+3Nq/05EhUoTIi4zgcbCdyCg6jt0Gi6k2OW9/hsV3bvfayvliRS+pa1XTWtRQ1FKihwzz9C9WW21nfJ9tvY9p8tZPNaFxHoahV9ZAGk1FDWhqO3GhyPzXxbxjIgwM/zvE8Nm2jL0muZyS8gVTs5iOtLT70RMx5oyG2XHEhRTqASNepl7vuz7Sypul1Bbs4JUSOqagOJGojhzwM2npHqbqJJJNgsLu9SIgOYY3kClhUBtINCQDT2Y9Qsjqchrq68pbCHbVFvBi2dZaQXm5UKxr5zKJMOZEkNKWy/FkMOpWhSSQpKtR0ctZ4bmBLm3ZZLeRQyspqGVhUEEZEEZg9mFi+srqxvJbK+jeK9hdkdHBVkdSQyspoQVIIIPPLDHTznwzAy9vB7Lk3CImZrtYVG3icjIK1q+Xc2LjDMCsFYuQJRmzHZTaWm9u5ZWkAdx0Jm6l2CO+/hUl5bLuWtU8oyKH1sQFXTWuo1FBxNRhjg6E6vuNo/j0O2Xz7KImlM4hcxeWgJd9YGnSoU6jWgoa8MEHOuVsA46qoNtneXY9htZMmJroc3ILOJVRpU70HpCYjb8txttx8sR1rCB32IJ8utl/um3bRGJ90nit4WbSGkYKC1CaAkjOgJp3YhbL05vfUU7Wmw2lxeXKJqZIUaRgtQNRCgkLUgV7SO3Aqb93vtt7tq5u4y299D/d9LoP8A8L79Cj1n0l/7Ssf/AByfrwyj0u9Rxl/A91/+hpf+9w4sO9wfDedPw8ZwfkvCcpvF131aqmiyGtsrIR4rbQkuqiRZDjqWmC4kKURoCR1J2/qPYdzm+W2+8tp7nTXSkisaDiaAk0HPEPeehur9kga+3nbb20sdYXzJYnRNTVotWAFTQ0w5ctz7CsBp3MhzbJqPFKRlaGl2d/ZxKuIXnNfSjtOy3WUvSXSDsbRuWs9gD1Nv91sdstzc380UNuPxOwUVPKp4k8gMziFs/S+6b5diy2m3mubwioSJGdqDiSFBoBzJyHPDX489z/t65GtUYviPK+G3N7JdWxAqk2rcKynSGjopmsiWIiP2avNIjpc3JBI1AJAKz6v6d3WcW1he28lwTQKGAZiP2QaFv7Ncs8OG4emnWHT9sb7dtsu4bRQCZChKKD+2y1Cf2iM8jmcSAbtIpUYc3bqOyVnQEjyVu8j0bIb4kwAVUYaG44Ezvur9tWO2s2ptucOL4FnVTZVdYQpeY0keZBnQn3I0yFMYclpcYkxZDakOIUApK0kHw6XperemoZGgmv7RJUYqymVAQwNCCK5EEEHswwv6bdcXkCXNptG4SQSKGRlgkIZWAKsCFoQRQg8wcsSA9v8Azz7XeauWcM4+HN/GtlX3FpHRcxanOsdFmutCgHURGv1BMl4uObUKLKVqQFanTUHqHd9W7W9jMdjurW43URny41kVizcgFBqfdgfH6adTWl5A/U237jZbAZl8+doJFEcVfG2orQELUivZXliVPvlx7grAnsTx7gjIcdbgzYk+3zOhpr1mbAdbjJQayyftVrcYdfUAolhTwGnzadtetXQO49R39tPe9VxOkyMBG7LpIX8Q08gP2qYlep+x9E7Vu8O2em1381tLQhpQHMg84kBQsjAFyy5stSFNAONMVU4Tzxw9yBdDH8J5KwzKbwR3Zn6RQ5DW2s9cZhbTbr6I8OQ6tbTS3kBSk6pBUO/TfYdSbBuk/wArtt5bT3GktpSRWbSKVNAa0Fc8J+79D9Y9PWov982y+tLLWE1ywuiamrpUMwAqaGgrU0OJYR8kbq69GPAJddQtuTbvNq13ywN0eBqCdWq9CvmHgXif8vUlofNk+Y4clHdzPv8AuwNSYQxi3pU8W9vIe77/AGYGOZ87cQ8a3EJHJfI2FYPPlwnbKqr8nyCsp5Fg026Y6X4rE19lb7CJPZSkggEEdC9y3rZ9oKwbldQQPIKgSOqkjmRUg05YYtm6U6m6lR7rYtvu7yCNtLGKJ5ArEVAJUEA0z9mH1XT6jI6auyOnsYdzUXtbBuqm1r5DcqBZ1lpGanQLGHKaUpqTEnRX0OtuJJStCgQdD1Pt7iOeFJ7dw9vIoZWBBVlYVUg8wRQg88CLywnsriS0vY2jvInZHRhRkdSVZWBzDKwIIOYIzxA/3/w8lyXiqqwKr5X4r4exPMr5msznKeRsotMdlT6qMEzkY3jLUGolxrJdglhx6W07Ji+qxHDJJaedKUf1GivLvZ02+O8s7GynkCyyTyMhZRnoQBSGJoSwLLULp4MaW36Jy7ZtvU0m9T7ZuW7bpaQF7eG1hSVVc1XzZSzgoFqFRlR6M2v4lUEpe3LAuDuAuC62Zx9kVBJ4/i18nKL7k5yfXqiZG7EC27jLbe6ZX9GI7BiLQgBZZiMNBtJ0QSS/TVj0/wBN9Or/AAuWM7YELvPVaSEfFIzjKmVONFAoMhha653XrTrnrNzv8Ey760iwxWgVqwgkaIUjPiBOoHhqdiWOZw5J3u+9r6Zsox+f+JlMKeU6kjN6E71PAPKOomnUBayPvHW6PrPpPQNW5WVaf9Kn68RpPS/1EDHy9j3Sn/m0uf8As4TT7tvbETqefOJju11/+bmh7fw+t63f506S/wDaVl/45P140D0u9Rh/+o90/wDoaX/vcOPI+fOEcYpccusg5WwOlp8ygvWeJ2dnktXDhZHXx1MJfm08l+QhqfGZVJb3LbKkjePiOpVx1JsFpFFc3d7bRwTrqjZpFAdRSpUk+ICozHaMD7LofrDcLm4stv2y+mu7Rwk6JC7NExrRZAASjGhoDStD2YcOP5BR5RUQ7mhsod/jF/GamQbSpkNzYUuMrcY1nXSWitl7adSlSSUrTqPA9GLee3vLdLy0kWSFxVHU1VgewjiD2j24XL6zvNrvJdv3CKSG7jbTJG4KujDkVOYYcwc+WGByFyVx1w+ll7kzOcXw9maFLrRd20WFItWE6f8AMVcFbhnWDQ1G/wBJtfpnsrQ9Q9z33ZtnjEm6XUNuG4B3Ck040WtTTnQHBDYuluqOpZmg6fsLq9KU1GKNmVa8CzfCteWog8acMN/j/mjh7laYtnjfkbE8rnMNfVv1VbasfrLMVJQFyl00j0LVEZClgKcLIQFEAnXrTtfU2w73WParuCeQCpVWGsDtKGjU76UGJO/9E9YdKgS9Q7bd2kDNQPJGfLLZkKJBVC2RNA1SM+GJy8VbQ80pSjqkjaEkg9vM/eetG61CHGvajWUV7cSRyz3F8GcJN0UTmPlvj3jSTfx5cihZzfKajHXbdiuXGbnO16LKTHVKbiOS2g4U6hBcSD49VXu99Y2MgF3NHEXrTUwFacaV40yxe/TOy7vu8DPtlrNcJHQMY0Z9Na0rQGlaGle/Eh6i1oslqqy6qZkSyqreBDtauyhOofh2FbYR25cGdDkNlTb0aXFdS42tJKVIUCOtKsSodDVCAQeRB4e3E1401mGVdMqkqwORBBoQewg5YcaIZbgtpaXuS/KcWAo99rSEtpGo8QFKPWsvWQlhwH343rCEiGk5Ek/oxpuNrbV8yVJII8R5+Pb49bAQ3DhjAilAeONtSgtHhqFAAgf4nt1gKg0xmx1LTDVnsFtauxA11SSD3Hh8OpkbVXAW4j0PSmXHCVjKEtUtUEjQJgxu3/vlGvWGZQY2yLS5fuY/fh5x/Ef6fcetTYmxsCMZH3QhJSD3I8Ph/wCfr6q/VjGaQLkOOKv8ns844c92nKPK2S8Jcycv12T4xitFxXfcZVFhlELFsdjV0f8Auyhn1USQiHXuT71lt1IeSHCttx1sEPrV0nTyXO29QT7hPa3NwkkaLE0QLBFA8akA0FWoc+wkcTi0bGLbuoOiLLZbPcbCwlgmle5S4dY2kcsfKdWIq2lCQaZUIB+EYafNHImbe4SfxbXYf7b/AHDcfZ3iHIdLfUPIGb43ZYZQYrVmTG/uSRMmfVJgykSoEZCS08ApSUkNkqUW3NN9c3m+S28drZXkV2k6ssjqUVBUaiTWhqOR93YZWzWG19IW99Pue77Vc7bNZujwRSLLJK1D5YVaahRjxGVczkKiVPKGRthp5CXR4K7BX+rXwPV4bPakUJGOSuoL0MStcscT/v8AJjyPeVzROjPusSGsgxyRHksOKZeYeZw7GVtPMutlK2nWloBSpJBSRqD1zl6is8XXF6yEq6vGQQaEERRkEHjUHnjvH0RSOf0m2qOVQ0TwzggioINxMCCDkQRkQcji8fg7kRrljibA+QG1NqkZDQRXbRDSgpDF7DK66/jJICdEx7mG+hPYfKB2HXYXSm9J1D07abwtNU0IL04B18Mg9zqwx+cPqB0xJ0b1nuPTjV8u2uWEdeLRNR4W5/FEyH2nFZ37nvJQkWfH/EkN0FNaw/nV+gakCZNEmlx1rUL2oejxET1rSUlWyQ2QQCdaK9ed8Dz2fTsRyQGeT2tVIx7QPMJFK0ZT7er/AOUvpUxWm5dZ3C0aVhaQn91dMsx7wWMIBrSqMM+Um/2qH2Gvb5mQdVov/rJkKgNNdQcI48A0+3UHo/6IBj0ncU4fxGT/AIMGEf8AmsKD1Esg3/sWH/0m8wQf3EM8Xi3tny6LHkfSS83tcfwuGtKtHVtSp6bm1jpBSoKEqio5TS9dPy1q0IOnR71b3A7d0RPEraZbmSOId9W1sPeiMD3E88Kf8uWzDevVS0mdNVvYwzXDDkNK+XGT/VlljYd4FRSuII/tSZkiq5Y5DwiQ4W4+XYTGuWtVAJdscPtUoYa2kjc59BkktaTodAhXx6qj0P3Dyd8u9tPC4tww/rRNQD/Vkb6jjob+a3Zfm+k9u3wfFZ3rRnuS4SpP+tCg94xbv7iM7c4x4t5D5IhNNLnY/iU60qkPJKmF3LjAhVIkIAJWx+qvNb0+adRqPHroTqPdn2Lp283VADLBAzLXhrpRK92oivdXHG3Q3TsfVnWe3dPykrbXd3GrkZHy66pKfvaA1DyOOf326+3fNPeVm+e29znxqF1IhXGWZbbV0jJbWfbZE/PFcw1AFhUNPeumrklSlSmksoaSlCFDsnlPpDpDc/Ubcru5uLryzHpeWVlMjM0hbSAupAa6G/EAoAABHD9CPUn1J2P0U2PbrKy2/wA5Zi0dvbxuIY0jhCayX0SEUMiUARixYlmBzOrluN8kexD3FUy6y/TYWdAirySptYCXayFmGI2T70ebXWMFxc1UaNZKgSoUloqe9JxouNrKktudatysd39L+roxFL5kkYSRWWqCaJjQqy50DaWRh4qEVByBxv2Xdum/Xv04mNxbmKCcyQuj0ka2uEUFXRwF1MgdJEYBdQbSwALLi3r3+yWLD2fZbasBYYtH+PrCOHBtc9GZlVDIa3jU6L9Nwaj49dBerE6XHp9cTR/A7QN7jKhH3444/l5tXs/WOztZf8SNbtTThVbeUGnvGBX+1JaCq4fz2Qob2hyfLTIaB7vRXsRxtmUz9pWws6fBWh8ulv0QXX01doPiN8ae3yo6fbh2/mqJTrrbn5fwoA94M89fsxAf9xxO33U5egLDqE0OHei6nul5hWPw3I7yfsdZWlX8eqv9XTq61mPPyYfr0DF+fy4Jo9LbVeP/ADVz7x5pofqxNH9zqYqVw9ge4khPJUc99dNf7XyP/f1anrmCOnLQHj86P+FJjn/+U8qetdwp/wCym/48GD3+2XRXVt7cqBytgyJTce45DUtTaVFIIcsdRqARr36K+k00UXQsAkYD8+T/AIuF3+YuCWX1WuzGCaWcH/AxRpYZjacce4W5zar3pssT5ZvLhDIcLX1AgZTNclV7q06kMT4wWw6PNtxQ8+ucJN0m2XrGTdrb/Ft9wdxyqBKxKnuYVU9xOO3INhtep/TSHp69p8vebNFESRXSWt1CuB+0jUdexlGL5vcbcw8k9q/JeRVDwmU19xjIuayY2pDjUmus4MebDkIW2paFIdjvpUCFEd+xPXW/Wt1DfdA317bsGt5rAuhHAqyhgcsswcfnT6XWN1tXq/tW13yFLy33ZYnU8VeNyrAg0IoQRmK92IcftMoWs8+lAUdo4sJ2jX/+o2n+rqpvQb/9a/8A9t/9MY6D/m8oP8vE/wDl/wD9JYhRU8Qvc8e8TLOJ2bz+23st5R5YQm5/TP1kwlUz+W5CQK39RqfqlSRUlkD6hvaXN3fTaavbZxv/AF9cbOZfJE+4XC69OvTR5Grp1LXhT4hi+16nPRvo7Z9Trb/Mm02aybytfl69UcKU16H001V+A8Kc64mU5+1Ns26c8klXkeLSkg/A/wD0RT1aY9Aia13alP8AyX/9Ixz8f5w1GX+Xs/8Az/8A/QsWf8U4O5xXxrhfHwtTepxCihUn6t9D+mfXmKkp+p+g+ssPpA5r+D13dP8AMerw6f2f+CbNbbR5nmm3iCa9OnVTnpq1PZqPtxyj1n1OOqupb7qPyvl/nbhpfL169Go/Dr0pqp26V9mHNkE4op7dISo/+yuf4ny+ke8vI9FpoSbWQ1/3bfccLdjOPnoc/wDep/eGKOv2uXFNe5SwUnx/6ZZMO3wN3iuvXKfocmvrKRf/ACGX+/Fj9Bv5uZPK9LImrT/1tB/wrjHSjX2C0MLUTponz+7y79ddiDH5UXt4plIJxRF+77LMvJeECTrsqc+/hum4p/8AU9c2fzBJoudq7DHcffDj9Cv5HZPM2zqI/wDhrL+7c4FHG37cQ5C49wrPDzIacZhjVPkP6X/07/UP042sJqYYf1399Qvq/p/V2+p6LW7TXaPDoVsfood62e13Ybl5ZuYEk0fL6tOtQdOrz1rSvHSK9gw7dVfzUr0z1PuHTh2LzvkbyWDzPndGvy3K6tHyj6dVK6dTU4VOJm+2f2sH2y22X2TXIJzM5XW1lcpn+1Bjn0IrpUmT6nq/3LffU+t9Rt27W9umup10Fs+n3pv/AJJu7i4N58156KtPK8vTpJNa+bJWteGXDHPHrJ65j1SsbKyXbfkDZyyPq+Y8/XrVV008iHTTTWtWrXlgN/uS5y7D4vw3C2ZJZcyrLV2cplKu8qrxeucLrK0lJHpJtLmI6TqDvbTp216XvXrcTa9O2m1o1HubosRzKRKaj2a3Q+0Dvw5fyg7MNw613LqB11RWNgI1PJZbhxQjvMcUq8xRjzphsftlZko45yhgqnFp+guqbLIje75Vm4guU9itKddQpv8AQooUdO4WPh1A/l9vxLY7jtDcY5UmHf5ilG+ry0+sYNfzj7Sbfddl6jUZTW81sx7PKcSpX2+dJT2HE4/cjKdV7f8AmMHwVxvliT4+dPKHx8terZ6+jA6M3Q8/kZv7hxzx6QTk+puwLyO62w/+arikj2w+2ce4+bmEM5scN/tSPSP+oMb/ALiM/wDWXbNrbs/X6L6X6b9O111c37/6dO/JPQPQf+eZLqP5v5X5ZUP+F5mrWWFP8SOlNPfWvKmP0V9YPV0elENhMdv+f+eeVaef5GjyhGa/4M2rV5n7tKc65LPMvDfI3so5Lwm9os1TOflofu8Ryypiyad51yqkMM2lXa1S5UxCU+nLaD7H1EiPIjSAlROq0D71L01u/prvVtcW1zrLVeKVQUJ0EBlZKt2jUuplZWoeYx86G656b9cel72zvrExqhEdxbyMJABIpMbxyaVrmraW0IyulQMgTYd7yeQ2OS/ZpVZpFZERGXM8cZA/CSoqTClWD8OVKhBRUrd9HLWtvXU67derk9Qr8bv6XpuyjStx8tJTsLFSR7jUe7HNPo1tD9N+vMvTrtqa0N7Fq/aCKwVqfvCh9+Iy/tn8qHHOQ8q4wnyiivzmoFzTNLeSkJyTGkOuPsxWlkAu2FC++46U6qKYKO2gJCH6Lb0bfd59ilakV1HrQE/7yPiAO1oyxNOUY5Ytz+Z7pj53py06rt0JnsZvKlIH+5moFZj2JMEVa5VlOdTQrP7orxey/iYlROmOZKPmHca2dd4+OvUn1wFNw2/Kn5Mn95cQv5Vm1bPu5/8AKYf7j4nBZT1f/c+BH3LTr7U6VspWNUqA4xgJBR9+nT3cL/8AatB/+sif+jriqbOQf/HlkNQf80Scef8Azj4if+0UmUc55kMZQbSnFsWU+6shLLbYt7Lu7r4j4DqtvR0qL2+1Z/lR+34m4Yur+ZUMdr2rTw+Ymr2fAnHEOfejYyaz3fcvWtROUxNrs1r7CusYZLK482HU00iNLiqHdtbEhsKQfIpB6SeuJJIusryVCVlWZSDzBCqQa9oOLR9K4Ybj002y3lVXt3tWVgRkys8gII7CCRjpDxHl+HzL7Zqzk2saafk5ZxbYzrRbamXIlTft0kyDktXBhMqUw2Kq9jSY+9Wrn5fcJOqR0LZX67x02u5oQFltiSONGCkONX7rAimXDHG247M3TPW8mxSqS1tfKqk5Exl1aNtPDxRlWrnWuWWKTP2kFKR7kcyKAVH/AKKZH2Gp7f31xufAdz4dUt6V0/zBPXh8k/8AxYcdRevoJ6PtdIqf4nH/AMC4wRv3Zue8qsM6oeAKq0lV+FUVBVZTlNdFk+mjIMltn5EisatkNne9Coq2Oy9HZWQkyJKnVIJbYWmV6n7xO+4Js0TEWyRh3A/GxJpq7QoAIHaa8gcQPQvpy0j2iTqWdA19JK0cZI+BFADFa8C7EhiM9KgA5sDiyD9nnkui4dn57/1Tx+Xn9TjMjJLHjZOMy2YGsKA7YzKWDm6r1YlWyGmi2jfVsx1v/L6qUaOGDcenF9BtxuzOhulTUYwppkKlderjy+GlfrwVtPWrarveV28Wki2Dy6FmLitCaBjHoyHM+MkDkTlgx/szc+ZPZXub+3bILSXaUUPHF5/grc15yQaFcC0q6bIqWC46pa2q6wTcxZTUZOjTLjD60gKeWTN9Od4uC8u0SsWiCeYlT8NCAyjuNQQOAIJ54GesvTlokdv1FboFuGl8qWmWuqsyMe0jSyk8SCoPAY6LkD0ksM99rSNyiPxb1f4Ht1ZxfiRijY0rSvDtxvMOqU6Up00W24keQHynuR4oP2jt1HeU0z+n68EYYAM+P0+zH6M0t0pSkFZOg008T8VBOv8AMdR5JKYIQwVPCv04/wBOCXjmCzbRaCptQQdCRofA+X3d/HoTc3yRDjnhhs9uklOlQdOKdv8A24LxBGOe1HhaRsCXHufoLB0010/6dZ44Qf4o6rvqu9+ZtkUcBJX/AGWxb3Qm3m0vJJCKVhp/tKcVw+zT9lef7wPbFifuHrvcbEwSdlszL4MTCZ3FD17DivYrlNtjSVSMsY5Gq3i1YGr9YlNUSzv2aOabiuWu1/MwCbzKE1y014GnGv6MON7vfyVy1uYtSrTPVTiAeGnv7can7Y3LPNPsu9/CvaNnlrIaxLLc8veLOQcOTKlWWOx82ixZzOK5viodaa+ndn2cWK39YhtpM2ol7n0EtsKZy2+aazvPlmPhJII5V5EfTMYx3a3ttw2/51B41UMDzpzB/VyIy54sh/8Abg2SHfbBw22kDRPPUNQIOoP/AND3Ok6+H29S96JMC146/wBBxB6eULcvT/o/0jEov2SMoqcP/bJ/ue3ZK4GGO845pZ6uNNJehV+TONx44W6pttIcLEreVqCAkakgAnqFt8mmaEtmqMxA7SAzH7l+vGjqyKWa0MEJKtPcxIW7F0Gp91frpjlzxPG+Y/3Pfdjm93kmXRazIcmav+QMlu7REizr8MwyHbQoMOloKplcQyotXKv4cGHGDkcOKcLrrgWpxxSN1j1PD0ts1x1BcxmVlaukHTqdjQAtQ6R2tQ0AyByGLM6d2OTer6HaLZhHGEABOYVFoMlqK0rwqK5knicbvu49luf+xq5wbLqbktWTV9naus0WZUddMwjIaDKKmNHsA0qEzd3LkRSmnFKjSGJqyr0V70tnaFKnRXqBtnqElzts9p5M8aAvE7CVHRsjQ6VrQ0DAqOIpXOjB1P0pfdHXCTw3HmIJXVZFBjYMhIBpqalQKghjzB7+kX2M8wUXJHtGc53u8q+g5Ly9wqyKA64mL9dnlRYrx6XYRKxgIgyMbuGsbfeMdxJSy7tA/Ekdfmx653nV/R3rkOmdkjkjsoJFnjuENa2ksYZklY5iSOVggp8SZnmcXp0vd3nVllt91NGDF8sY20igJjZlcnsYtVhTLS1OWCrxn7lLP9eeostxZDodkyECdj4WzHlR3XSpl+XTrKmmkOtOIVq0UI89Ort2H1OlgeOHdIWmSQ01RjxA0qdQHLiMNm/+mVrPbC+2yYRyooNHIqCMqK/M15Gp78cx/vpxqBzH+5xneE0c5VRE5L5V4kwyJZCs+pVVvZVjPH2PuzRU/U1/1aoMuapfoesz6pTt9RO7cO1ujtwguejLbcdvBkhMMjKvwklXeq8MvECOB9+OUOqLKa26onsb9wsglRWY50BVKMc86KQeI92H57xP2rLL2scaZByPS87Y7yonDrKtiZdjreKIxS3rodlZx6ZFhWCJmeZotvo7Gcx9Q2oRvSYUt3cQ2UkTs3qVtW59Q/5ZnVIdzJZQBMsgLqCxXILnQGnHPKmeC26+n+6bdsh6gi8yTbwFYkxMngYgBsy2VSK8MjWuLnf2Y/c5bcs+27JajlCbOyHIOEMjRRHJX0JsbZ/A51THscekXb0nR2ZKqnmZsT6gqWtUdhj1dV6rWsdZbJZbPupv7eH/AJGSMyOqU8BBo7U5IcjwpUmlMHuld1ut025bKaWt4sojQvXx1FVXVzfIinGgBzzwGP3zPeLRZf7d+OuDMCdtYtfl3JaMkvZL0ViuYvKLA6iYE164qWfVks/3BkddLLilJIciN6agq6y9LOqtv6n3G7j2qN/k7SJauV0qWkYhQO06UfPh9mMvUbpy/wCnNvtzubxi7uZDRAdRCooLEnkAzLl/TiFv7SXIKLDj3nXhi6srp+oasa/NaLFokyT9BZXmV1P9qWklmtbdSw7bKRjlY2lxxCkgBIPgOq6/mVG/We4bLf7SPNt5vOgaMmieZ4fLdhzyd+GeQGLZ/lpuNnl2verDcAiXEXlzCSg1eUyusig8cjGppwq2Jicy/wDtvbJ/tLlzn+d7tPpJzOP59zBNwf8A6ECR9NJbrrbM5OKnJRzKwHVsvJVD+tFendp6v0419Pq8enOo2Ta7OxkgpJHBFGTr5qqqTTR3VxQnUOwxy7neX0U1Y5J5pANFMi7MB8XZzxz7e2jgB33F5nkOINZR/aZoMNkZcqw/QzfiSGMnxXGhBMUW9MWA4vJw76vqL0LWzYd+5JHrbqs9H7Qm6C3+ZLzrHo8zy/iR2rq0P+xSmnnxyoR/R3TA6s3R9tM/y+iBpNWjzK6WRdOnWnHVWteXDs6tfYJwpiXsF9pHOWbZPyQjNcbtRL5JXcSsTXisapjHhrKJlLCkx2siyd1145nh36fIIU2pp0A7SFA9Cf8AMkO4XG07rCpSK7yXOpKyrGV5ChWTzI3XkyV55ZSbLdWMm6bLIQz22bGlKNDI4bmaq0TRyoa5qxHEGvMl7F+Yp+A++bgPle/sJEmVYcvwYmU2y1IS+7H5MkTMQymzkFPpNndCyuS86BoFDUad9OrPtJTDcpIOIP35fpwibhAs9lJCeBX7sx92P6FGOPx5yUemEbE6bQFDt3B8/EnxJ8z05rIaV5nFcSRUahHhwQUV0VxC/VYHZPZRHiT4aHTvr1sWd1pnjTLao/IYbFvhrD0ZKoqdilLW4dAPEHsT5Hv5nsOiUF+wajnL6fTvwGvNsBFUFDgNXlDIhvFD7J0JUkEAkHUHukkfL4d1Hv8ADpgtbhJBVfp7f1YU720eI0cZfT6VwHLmqfaccDeqghSvy9O6Rr5fE/ae/RyCQUwsXUGZUduGWZLsKUh1IILatFJIOi0n8SD9hH+PRFH1ChOF+eFlbVTGO6goeSmZFTq08kL7DuPin4lSCf4jqRFNTwk54g3EBI1KK4YcqL+IlJ7f0p0J/idSEjqSJuw4HmA56hn2YQ1teISnVPcHTsnX/tL8Tr9nW9GNcziJJFn4R+r3nnjTapZNm6tCT6bTQKlHbtQB9idNVdutwuBGAef24jm2aQ0/D9n9OGHk8WXSPJSpRWyonatXyIH3J8T36nW03mjL4vrOBd0hgIBpTt4YZrly4VpBUteh/qJCU6eYSPH+PRCMsMuH34EzSJUHifpyx8lXryUISVnxCtB2GmnhoNAB1KhOeIMrgjLkfplhNVduqKe51+8jT7epasMQ2z554WGbRbiE7laqOh18xpp3B7dz14BSa8saHdgtCeeBb7rJ6Fe1fntpPcr4kzcKPj3NBN7ntrqelfrGMf5X3Fv/ACOX+4cPfprcV672cczuMA/+aLjnX9nvtGHuusM8iO8hjjyLgkGhnypysU/ulMxq7etmAhLIyXHCw8yusAQkKcLpc/oCSTzd0Z0b/m97iMXHy7QBCPy/M1a9X76Upp458fr7V9TfUxfTiKzlez+bW7aUH87ytHlhD/0UmrVroB4aU51yUMuxnl/9vH3AYte4rlrc+XFahZDRXkKPIg1GX0SJvpWuN5NSOSJTfpOPRS3Jil5/YhbL7biXC2pGzeNp3j0536Mxyh9SBldQVWRK0dHU15ihFTkVYEGlI3TnUHTHrb0jOlxblNMhjkjYqzwSAao5Y3AHIgq1FqQ6MCtdVzf7jXI8LPPZHlFzA9QQsj/6X3MMP6peMWflOOWTRcQoJUlex4btQDr5dXD6gqkvQct3H/huIGHsaRCPsOOYPRYPaer1vt03/aImvEc9rJDKD9oOI/ftC2LEPinldDxSCvkKCoFR8v7bhA6Akd9R0M9GULbPdkf/AD0P7i4YP5onVepttrx+Qb/ivivfmHI+Q/fB7vJGFwLhtqHJyvIcS4/g2Elw0WMYljibCVOs0MRkuBcufV0bthLUgLckPkNhZbQ0EV1vE259fdbHbYpAFMzxQhj4I449RZqDmVQu1KkmgrQCl39MWuw+j3pUN8uISZRaxT3LKB5k002kIhJp4VeVYkrQKtWI1FquT3Qft/ZR7a+Pazlan5Gi55RRritqMjDeOv4hdYta2KnjVymWBf5A3Y1b0iOltElDzLqXlpHpDuRI6u9ObzpKyXc47pZ4Q4BIQxspPAjxvXOgOYIJBoRUiJ6cet22eou6vsM1g1pdNEzKDIJkcL8St+XGVJUkjwspAYEg0Btd9gPL9vzX7d6uXkFm/Y5ngt3OwS3dnOf8zkEerhVtjTWyZjij9RYqprVlh/1VepIfjrdKipZHV2+mfUF1v3TKvfEvdW8hhZ+JbSFZS3MnSwBbiSCTnnjln116OsOkeupIdqVY7C9hW5SIZKmtnSRU5AB0ZlTIKrKq5CmKfuInXGf3CkPALQ4jnfkY6EFDiFGzysEKB0UlQ17g9x1S3TihvVhV5fxO4/vS46j60On+XliMqbFZ/wB2DB0/dZnOzcx4fLpKi1jOTpCj4ndaVh/jpp00eusfl7ht9OcMv95cIX8p0nmbPvJPEXMH9yTFl/7WjHHnLvsJzq3sc3+k5g9vfIDeJHEpM6M69a4DlMMXuF3DUYufWNtGam2rWwUemEVQ2qJ3JSY9Jes9xu44el5YddpAjDzc6qCxZQ3cKlV7gOzCp/Mb6abVtt1P13b3YjvL2VP+WIADsEVZHQ8SSQHf95jXjimjkhbiP3GoK1p0dR7hOLlFPh3TdYiQP8OkneAD6yoBwO8Wv9+HFtdNZfyzyn/9nL//AIdziZv7ok1cnhzAUKQpITyWwrVWvj/a2SDQak/HqyvXWPR05aGtf+dH/CkxRv8AKfJr623ECtBtTf8AHgxFP29/t9vc8cU4/wAnjlVWLN3su7i/o4wP9d+l/RrmbUFz9R/vKn9f6gw/U09BGzdt1Vpqa06V9L06m2WLd/n/ACGkZxo8jXTSxX4vOStaV+EUrTPF5eoPrzJ0L1RP05/CPmkhWM+b815VfMjV/g+XkpTVT4zWlcuGJt+2v2Pte3rkRjkYcpKzBS8csaj9FGD/ANv7TbCG4JIsf7vu9fpvp/wegN+v4k6d7T6K9Ln6X3Ybx875wMLJo8nR8VM9Xmvwpwp78UF6n+vkfXnT7dNfwv5RluUk8z5nzfg1Cmn5ePjXjqypwOK3OUrzPveN7pncOh2yER38lvcWweHOkOKpcdxjH0zpM6xQzHDgXJmVtM7OlKQFuPvaNhexDQRTm9Tbn6hddHbIZAEM8kUIY+COOPUWag5sqF2pmxyrQCnTXS9tsfpD6VLvdzEWlW0inuWUDzJZpdIVKmmSvIsSVoFWrEai1Vr3HeyHJfbxh0DkSvzyPmNQxawIFotuifxezpZs1Tn6fLjoF1dImRfqWkoLgdZdQ4tGjZGqkyusfTK86Q29d3iu1uIBIqtRDGyE/Cw8b6hXKoIIJGVMxC9OPW/bfUTeH6euLB7O6aJnSsomSRVprUny4yrUJNCrKQDVgaA2x+xLmLIubPb/AFsnJJj9nluC3MvCLK1krK5lxHrYddYVFjMeUVOSJi6mzZZeeWS4+/HW4slSyTcvpnv8++9MpJesXureQwsx4tpCsrHtOlgCTmSCTxxzX63dK2vSfXEkO2II9vvIVuERclQuzK6KOQ1ozKoFFVgoyGKacd4dRz/7z8h4ik5XHwZrMuW+UI8vLJtabhijYqpmV30qW7WCxqVTj6NUpAbEhpRUoaEn5TzzJtJ33rm42lXMZmvrgBgusghpGHh1LWtKfEKVrypjsFepF6S9K7PqN4hMlttdmxQv5YIZIUPj0PSmqvwmtKc64N3u1/bqyH2vYW1yPScmtciY9Cn17dsp3EH8Iu6SNbTG4dFaLhIybK2CiZLcaBQZLbzPrNHadx2nOqPTW+6Z2o71HcrPBG6q4CFGUsaAqdThgGIUmqmpyBAJwo9AeuW19d7+OmZ7B7S7mjdoiZRMkgRdTK35cZRigZgKMCFNSCQDOj2Y8y5JzF7cbNnJZUi0yXj963xmXayll6Zb1zNRHnV86Y8tS3H5aYEwMuLUd7q4pWolSyTdfpr1Fcb/ANIub5i99bF4WY5s4ChkYnmaNpJOZKknMk45d9cei7Po/wBSIl2lFj2m/WO4SNRRY2MhSRFAFAupdYAyUOFFAAMU0+2Dl2XwZztx1yTHWymLTXaYd0iUkOw3KC8YepblcqMsKalJhQZ65LaFggPsNqGikpUOaukd3/gfUdpuLsVt1lCyUJH5b+F/qUlqdoHtx3H6j9N/5s6J3DZEVWu3gLw1AP50f5kVK8NTqFJGeliMwSD1UU13Wz3UF4SKxbhDi5kVZnQ3S4dynFRnl+sAsnUlDgHfw67dfWFqpDjly+o8Psx+VgjjZtLhkfmRmPeOPtzxzSe/blVrlj3OZ/PgS0TqHDX2uOcdktpdSh6BiTkiPYyWy6tRVHnZNInyGSAlJZdR2J1Urjb1F3r+N9WXMy/4EB8lPZGSG9tZC5BHIj24/TL0T6X/AMq+ndjbSCl3dA3Un9aYAplxBEIiUg1IYH2DpF9tUpS/bT7fWUnaEcG8TJIB+btgNB3PfwPl11H0kB/lfbT/APW+3/4SY4H9RM+vd9p/7YvP/SZMVGfu+5wt/I+HuN2ZZDddS3+bWcJKtfVcuZzFFRyXgU9jHRR2CW9Fd/WXqPw9U563biWubHalbJUeVh26iEQn2aHA9pryx0x/KrsoSx3bqGRc5JYrdG7AimSQD2+ZET/VFOeDn+3+wvnj2Lci8Gm3VRvRJmf8fi3VDFqKerzWCm/j2aKz66vMxCLDIJf5Prs71Nq+ca69G/TlR1D6e3OwmTy3DTQBqatIlGsNpqK0LtlUVpxGFf1sc9G+slj1esQmjZLa6KatGtrdvLKF9LUJWJPFpagYZHhiEPux/bs/9pg4qY5OTzD/AH02/fU1GKY8ff2wUqthZ7pBsP73yDX6ZVbps9Ab9/4k6d6/6v8ATP8AyptB3U3vnkTKmjydHxVz1ea/CnCnvxcHpt66f/ELqQdPja/lAbd5fM+Z834Co06Pl4+NeOrKnA4Q/aN7B0e6fji7z9XLX9hmnzaxw79I/sT+5/qRX0WN3f6l+of3njvo+r/cPpej6C9vo7t537U4dFem/wDnDapNz+d+X8u4aLT5PmV0pG+qvmpSuulKcq1zy2+qfrf/APDTqCHYv4X875tmk+v5nyaapJY9GnyJa08qurUK6qUyqST+5Tx2OJMH9ovGQuRkX9kYLm9B+tiu/Sf1P6OVh6fq/wBN+usvovV1/wCH9Q7p/mPRT1V2z+C7fsm06/M+Xt5U1U06qGLPTVqV7Kn24Xv5et+PVG89VdReT5Hzl7by+Xq16NQuDp16U1U7dK17MXC+0lmsoPZbxRl1mFmNR8Ps5BMCDp/ydRWS7F/QhKtFKYjq07H7urn6RvhZ9DWM7/4cVirH2KpJ+7HLvqXtb7l6rbrZxGk8+6tGvtdwo+045+OI+OOTf3D/AHJXUK8zWNT29pVXGXXmQW0eXaw8axismQ4MGnoqhp+IlcaLOuokSLHL8VpKXC4pzdrv5w2rb929SuqJPmp9Mzq0juQWEcakKFRKioUsqqtVFKmtePbHVXUHTfoD6dwSWVmZbWKRLeKJWEZmndWcvLIQ1C4jd3fS5qAAtKUy+5z2ycj+xLkrAbGszpdoqzblX+EZzU1zuN2Me3xyVFYuIEqodsLhEeRA/UIxWn6iQxIjyglWoK2xn1V0vuXp5u1tPa3XmFhrimRdBDIRqBXU1Kal/EQwah5jEf0y9S9g9dOmr+2vtv8AIMTCK5tZHEylJQxjYSBI6htD08CsjpUfhbHUF7aOQl8h8U8YcmORkV72bYdj+QzILaypiHYzoDK7OKwtSUqcjx55cS2ogFaEgkAnTrpnbdwO+7Jbbmy6XngRyOxitSB3A1APZjhTqDZB0p1Tf7AjmSKzvJI1Y5FkViFJHIlaVHI1GKGf3puVFZ17rqfDIs/16vinjHGaNcFCwtqLkOTOzcytZR+QFMqZT3NWhYClJCI6PBW7rmr1MuDJ1H8pXKCJQR2M1XP1qV+oY7o/l/sBB0INxZaPeXLsD2pHSID2B1k95Pdjpa/al5ERyt7CPb1ardUqdimKP8Z2Da1ha46uN7WdiFW2SFHRLuP1UN1A/pQ4kadum3pm6NxsVu3NU0f6hKj7AMVf6g7YLHrG+TICSXzQe3zQHP8AtMQe8YsgeU5ERDbBJDccOEH/ADPLU6T8PAjoytHq3f8AdlhVLNEqitQFH25/pwrxpceWlKHUp3DTXt30+I+4DrS6NHmOGJUcscuR44VodZHXuLak669kq0IHwPc69+tEkzLx4YmQ2yPXSRhFyCtCWFhTe1afmQrTTXXXUa/A9SLaarZHLA7cLUBDUeIHL6dmBnj/AP8AKis0/wDsON5f+8kf49Th8Ar2YET1+ZenacOZt5LKdSe+miR9v+4dYkahjYsgRKnGAu+orXXUnuT1kKjuxHLajXtxUhzpkeFp9y/JVZ7oeUeW+McFgUmH/wDRBjCJWVQMbua+RVh3L5ji8Xpbt2TZtXWxC1rbCtSptatrTCRX26T2o3uePfZ7iC0Cp5GguFII8Z8Cmp1fq5DF69N2e4HpCzm6MsrG83JpJfm/OETSKwakQHmOgC6KmgPYRmWOAdlGa8MVFthsn2uc383ZryScxomW6W7ss5n0U2hekFu7bu4uS41TRFV5iq1WdywhIKlJCRvRjFLtfzNuOmbu8n3drhAIz5hUqTRtQZANNOJrlxOWY2T23UZsb5vUHbNrtOmFspWaYCBZFkC1j8sxyu2vVkBQVJAFTkZL59mBlOv6OnupX9Xn9+vw66jsLPy1GWOB9xvjIxJPPHJd714U6+94HKVbWx3Jllb5Ji9fAitlIdlTZ2LYzFiR0FakoC3n3EpGpA1Pcjrk71GgluPUC9toFLTPLEqgc2aKMAe8nH6OeiN1BZ+jm13t0wS2it7h3Y8FVLidmJpyABJpibn7YPJSZuL5/wAW2UvReOTmMypEvuj5Ki3SmBeNMhR/JiV9jEYeV/T6k5R7EnW1vQzfde33ewzN4oXEyVP4H8LgdgVgp9r459/mw6V8jeNt6utk8N1GbaUgf7yM64ie1nRmUc6RAYrX56zSx5n5c5U5IgsvS6Vu0L0eQlRLMHFYMyvxTG3V+oUBsymjF1Qkal15StNNyuqU6u3SfqbqK/3qIFrUSZHksSlYoz7xpyHMk9px1L6dbDa9C9F7R0vOVS+MNCvNrh1e4nApx0nzMz+FQK8Bi3f9rRP/ANADMFf/AN4sgA//AFK4+/39X/6Gn/7Ergf/AFxk/wCDBjjr+a8V9RbL/wByw/8ApN5gOfutZkoL4k49YcbCduQZnaskpU7vJi0lC4kBW5tsAWIOo0WdND8p6XPXjcT/AOr9oUin5kzDnyRD/wAT2+7Dt/KTsgpvHUcinV+TbRnlTxSyjvP+DTsz7RiOXH1C77XffDxnj0lyRHhsy8Dpp763FhuU3yPg9VU3EmR6qgkxYt9fvrWD8jS4+qB8iNE7arQ9EeptlaMSsQaBCeR+YhVHJryEjsTyBXLgMWZ1DuC+qnoVue5IFacpdSqKCq/J3Ukkaig+JoYlAPFg/iPiOLiPe+Av2ocsrHctUEdlR/7JyCnWj/A9X/6jj/7CNyP/AIEf31xxz6It/wDbW2QDh8y3/CkxBP8AaMirlL5+bbBU4V8ToQkf1KWeSQkfdr1WPoIwX+Kk8P8Alf8A6YxfP83q6j06o4n5/wD+ksWb8u+0vgbmzJIGRclYH/c97R4/FxiNZjJ8ypdK2JYWlqiOYuPZDUwXQiyuZK/UU0XSHNpUUpSE2rvvRvTPUl2u473beddqgQHzJUogLMFokijIsxqRXPjQCnPPSXqb130RYPs3S998rt7zNKy+TbyVkZURm1TRSNmsaCgYLlUCpJxHj9xGvj1XtIzCuhtejDgTMBhRWd7jnpR4uV0TDDfqOqcdc2NNgaqUVHTUknpd9VAkfQNzDGKRq0AA7hKlO85duHf+Xwyz+rtjczHVK6XbMaAVLW8pOQoBUmuQA7sRl/bMs6qLxJnUWfdVVY45yO+6hqfZQ4TriDjOOoDiESX2lqb3JI1A01B+HS36K3tra9OXSzyRoxvSaMwBI8qPkTh8/mh2u/v+srB7WGaSMbYoJRGYV8+Y0qAc6Z0xCj9wRcdfuWyP6WfEsmEY5h6G5UKUzMYKRRRiloPx1uNksJIRprqAkDqsfVi4huetJpoGVozFFmDUVCCuYxev8vNpc2Xpja290jpKLi4yYEGhlNMjnw4Yn1+6TSyKvhnAFvtKaJ5OjtaKSU6k4rkqu2vwCerV9cJVk6ctNP8A8+j/AIUmOe/5U4jF1vuIIIP8Lb/jwYsD/Zt5ExjDvaFChXEFD8ude8juMvbEqWAmbLSEHUE7dP8AX1o9PNuub3oy1eF9KrcSVH/yuNfrlultYeqF/HcJqLWcNP8AxAGOX/lWJLu+ZeWE1cR2U4rPOSrb0GE7lt19Zb31xYSCkaaMwayI684f6W2yfLrn/doZZd7vI4wWcXE5NOxGdmPsCgk9wx2l05cwW/Sm2TTsEjaztFBP7UiRoi+1nZVHeRi0XhTkhGf/ALdHNONzpHrXnFeLX+Nuhfp+sqhmtfqmNSFBGm1ltlx+E3qASIJJJPfq8Ont+bdPSPcLCU1uLKCSI8K+WRqjPsAJQf1McqdadIJsP8xuz7xAumz3S7hnFK081TomGfMkLIf+s5cMKv7PMZMge4okA+mOJNARrrv/AOp2v/wPWj0Lah3Tv+W/+mMbv5uEDjp8nl89/wDSeIw8BXUbGf3J2byXIjxItRzly1LedlvNR43pMqzfcy86+pDSEyR+X8x779OlDpzyj6rkTMqxm/uwSSABUTAGp7DiyeuBP/8Au6r8sjyTDZ9uIVQWZqNakgAVJqAa92L6sqynBIGQ2EaJmGLuwvqPqYTrd9UrQ7CmBMmOoFMtSezbumnlpp11zabnt8tsjvPAJKUPjXiMjzx+cV9se8xXjxR2l0YgaqRFJmpzGenvxvLW2U6dj9w16JLGRywCeUEU5YamR6fo1wR4/pVht7DxMV4Dy8esrhf+WkB/6NvuOPWb/wDPQkc5k/vDFIf7Wjfqe5awT46cYZOdPjpd4r/v65I9Cv8A8NJP/MJf78WP0O/nGNPSaL/3xb/8G5x0ihsNQSSNCrtr5dh5duuwAc6DH5MS+ObnSuKGv3bjrknCv/5KzsD/AO/MW65o/mH/AO0bT/1dx98OP0e/kWBG1dR1/wCnsv7lzibXt4yjFoft94YYkZHQxpTPG2JNvx3rivaeadTTRUrQ80uQlbbiFDQpUAQerX6Gv7BOkdsR54VcWMNQXUEEIuRBOKH9WNp3h/UvfpIbW5aJt1uiGWJyCDK1CCFoR7MjgqJmwLZtcmssIVkwh1TS34MpiYyl1KEuKaU5HW4gOBDiSRrqAoHzHT3bzwXCF4HR1BpVSGFcqiorwyyxVF7aXdm4iu45IpStQrqVJBqKgMAaVBFeFQcU3++eXI5A9xvHvF0N4bYcDGaBKUbVPM3mcXgL6joTpurnIBShQB1BPcKHXLXrLM++9e2HTsR8KJFHlxElxJn/ALJjoPfwOP0A/lgtIukfR/d+trlTqkluJzXgYbOHwj3SCepB7uIOMPtOUOJPeflXGxLsavspmf4DFaW4tTK2qqa7eUMhZfWVrVIjUCEsuK1cP1Gn9autfpm3+V/Vi52DNYJGubcCppRGMkZNe1YxpJz8VOZxK9dUbr7+Xax6vUK95CljesQBUGVBDOooKAK05LqKL+XX8Ixd/wAncaY/kPtF902RXNq1Cs8b4U5BtaqEtYC5UqJjs55llAJGpcWgD+PV2eqG4XFt0vewQRlo5LWRWI5Aoan3Y5M9C9utrz1E2m4nkCyQ7jbsqnmRKpoPbin39r1IVc8yEjXSswj7gDLyfXX+XVRegH/ad0/6uD75cdP/AM4QrZbB/wBdd/3bfGr+55lNXZZBxPi0afHk2uNQMysLOI0+269AYyF3Fm61MptCithyQKN1SQvQlAB00IJj+vV3ayXm3WMTqbqJJmdQalRIYgtRyroYivLPEr+UTbb+HbN63W4jdbG4ltY4nIIDmEXBk0k5MF81ASMgcuIw4uXa2VVft6YHDmev9R+mcfzCmSoqeQ1aWabOM0de6G2o0tCUIPdCAE+XRbqa3ltvRKzim1a/Ltmz40dtQHsAIAHIZYBdC3cN9/NJuVxb6fK869Tw8CY4zGx9pZSSeZJPPFafHGUXfGeY4PybAjyEs0WUtyI7yUpDViqnNc/fVCFrBSVP1Ns2074FKJQIIJBFA7LfXWx7la75EDpinqD+1o0mRM+1HAPcwx171PtNh1Vsl/0pcMuu4tCCOaeZrEMhA7JIyy9pjNQRlifn7l1hFubfhG9rnRJqrvC7m1q5iR+XLgTpdTLiyGj4KQ4w+lQI1Hfq1/Wq4hurnbLiAhoZLZ2U9qsUIPvBxz9/LDZ3G32W+WV2pS5hvY42B4hkWRWHuIIxYNY1zaf27DKajvSF/wDtJtK45IlERorBXxjA3iKlwpXKeQCdu0EFX2dPty3/ANq5QSB/6lThx/7OuKis/LHrywzLnqiT/wBMbEaf2bqihnWnuIsJrql5FW0/GDWO1ilOGLYM2F1lKbpyXHQQH24DEZpxO46BRA77uqt9JJZk3O7RQPIaBdR5ghvCB7an6sXn/MbDHLtO1mpMwupAFpkVMY1Mf6pC0HPViC3vZqLe495nMFFAiO2N3bZ1V1dfAhMBL02xsKmjiwYUSMjQepIfeQ22geZA6Uus1afq67jhBLvMoA4kkqoA95xZfpjIlr6c7dLcMFjjtmZjwAAdyT3ADFh37XHJxyLhLnvhewkBybhlDe5xjTLhbS6cfv6mXBvY8fbtcVHq7yO08srB+ey0100AfPTfetWy3mzSk6olaRP6rqQw9zCvtfFSetvTHl9UbZ1PbqNFw6QykV+ONgUJ5VZCVA7I8An9nmIiZ7ms3aX5cGZM4PmKDuTn3GQBSR/V83S96XsV3+Yj/wCc35V/3kWHL16XV0hbD/65x93+4uMDf90+OYvvEzFklR24rgZ1Xpu0VjMIjXTsfHx6H+oZ1dTyEU/w4+H9XBr0ar/kWDVWvnTf3zjq1l1cS1gTqucyZEGyiSYE2MVusKeiTGFx5LIcaW28gONOEbkKSoa9iDoer1mZZEKP8JBB7x7sxjlS3DQyLLEfzAwI7iMwaHL9eAdwL7J/bTwFmjuecT8ajFMndpZtDIs1Zhn99up50iFLmRDCyXKbmvQXpFcyr1AyHU7NEqAKgVqz6e2baZjc7fD5cxUrXW7ZGhpRmYcQOWHfdOsOpd/tBY7xc+dbBw4Xy4k8QBANURTwJyrTPEwVFLhKkEblEkoV27eACVeB7fHToizFfi4YERRK1NP1HFc1B70c7rv3BnvZ7nuG41UY3dxJEnjzMK826La7+qxhGU0yZqZU16vDbzESdBdUhtG6fHCUAA6FQbqCZOoG2a5RBAVJRxWreHUK505MPaMWSvSNrL0knUllLI1wrASRmlF8WhqUFeasP3Tgg577wuQsU/cF4Q9knF2CYXlMzPWsWm53kGSP3n12KxLB29yHJVVkOolssuv0fG1IbRtLwCXXXUpUUN6r6Gbx1FNDuibdbKrKVBYmtRxJGXYor78HNg6QtrnY5N5vHdWViEUUo1KAE17XNMuQxf3X08Cmjp+RCAhOpPYafHXT/wA3Qq4u5JmOeDNnYRQKAAMsc7n/ALcc3MOd7XeF4cdaVOM+4GE8vbp+EcdZ834Dt4r8u3QDeA3yyk8Nf6Dhw6cZPm3VeUf/AHQwxP2tvet7beF/YdxrhPJPOnG+F5HjtpyZNtcYucngMZHFjWPIGSWsRX6A247byFzIUpDrSG2VrdQpJSDqOpG2XNpFYqJXQOK5EivE8uOI+82V9cbm7QxO0RC0IGXwjnwxVHwzPX70P3g6bkbjept7DF5vPUXlVmWliTEdh4Nxk/BnwskuUqSldVHs00ETcy9t1kTW4p1W4EkQri53Tzk+DXX3Dn9mGFozZ7N5EhGvy9PvbiO/ifqri2j/ANuCsdmVXtP4WmySoIf5/gMtJWhYJCuOs9cKgop2Afl6Aa6nyGg627pcJIBEM3DZ9gy4E9pxE2VFSZqEfB2945cffwHDBZ/afpZ8r9lvnefGYcfcewr3C1cdSUFRaZRHzS1nekojQLRGhLI0PZTg6iWjjXpB+GKUn3jT9xy78fOoFJuLCMVobnWQOdNKio7Kip7gcUdfssNJf9zfJjKk7vV9vWQtJ+bTapzlfhxsLKf6wjdrp5ePl1TXrQ4XoaVT+O5gX63499MXF6aIX6shA5I59tBw7vb7ueL8uXPb/wAK+4VVNVc34QvO8YoMmfvYtUjI8qxlxDr/AK0OS41MxS8oZjjjkBxSEpccW2lWh08+uTenerd76WvHvdmmMLyjS/gjeqaq0AkRwOWYAPfTF/dR9PWG+NJa38fmKkzlfE6gNVhU6GUn2HL34RbLgzijg+ix7COC6mTG4yx552JGZlSLua64u8eduZ1g8vIrS4skqOQTHwQ5IUlv1VentbCUhJ9SdwvOotwO+Xkpub06FaTy1QsAqqKqgVVAoBw5Z8cNfQG3W22WX8LjjWADWyJrZwpJJIDMSxrx+oY3KuH9HYJkVVNXv2K6epImJZdQlKTE9NwrbQ4ULR+XqEHtrpr0oWjywwQfKa/NkeQkjkNRH2Uw2hvMRorpyIPMYaTTtqM/fjnh5OVKb/dnxVU35ZSfc17fFvaJKBuFlxuoEJ11SCnQj4dfor6PySH0n26WUt5ny85JPHKebHF3qlHG3qBuEUQHlmSIAcs4YsXOfugVsOJw17mNYLaJjcNCn3G4qGUttryHH1RHAUrB9RSnCFkgkntqdNeucen7ixj9ereyQE3ovXLUUCgMNVrn3nlXgMxnjoLeEupPRWWbV/y5skpUk1o4DDh3ZcB92IgfsVJUjjv38zAtTf0+JcKxUObSpttVjI5aCwvQK0DoiDUbTu2DXwHXR/q/NJadLXN1F/iCznWv9YxUHs50xQnpZGtx1XZW0grH87E9P6iTZ07c+PLENv3Gpc3kb3UcYcOwJaH0wKjEMcYSxscdYyDkS+C3uyVHTfWPVpQ2pKVDTXwUOq9/l/UbJ6abl1XdBqvNPL4sh5VrHQAexxKCR7OIOHz1qcb76h7f01a0VQkMeXES3MuZP9jyiB7TnXBX/alq6zi791qDw9dKXFpLC/5WwaHGkLU5ElSMSi3GY4aqQZCypf1j+IR0sOK1cUZAT/4iun/d4bfrboXaOoLgK8kYt7s5D42hKOuXZJJUgZVTuGE7b5rjorrbd9gt9SRubi0AqaaBMro2eZrGnhJzo/ecdpXuPShPto9wISj0QODuWEhOpUARgN+TqfH5yTofuHUPanAuIx++v3jGV+pEEhOfgb7jjg2/a3YD/MPL6lbgmNwDZyVOIIC2gjmHhhIdRr/WlSxpp31PW71wIXotXbJRexVPZVZFB+sjHvRrPrHR+JrSUAcifCaHuyxdz70uS2sN/bH9wdY6pMK1zK6wDCayHIcZSY+RKzaLGyxDcZToWUZRgJdtWEt66PuyXhq3u6F9AxybtZbYJAQttdu0i5eGWOJgTzok5EFwORMklPiGPdfn+G75dSRMGFzaKqNyeGRgyDve3Pn27cwqQg5g452eSPbXa8Y+0T2n+6WM9ZIXz5nHPde3OYU/HapW+J77EMexlMWQ2pAamTbVm7eQtH5msZWpAQjS8wx1kV4UxVjxgRKx4tX6hl+vHcd7ZeV0cqcM8WclRZPyZ1gOJ5Y4Py0lqTd0kKwmxVpaJbbehzH3GnEjshaCkeHTjY3AmjBPHFdbpaGGRlGQBy7xiS/9+OREstEkgE6k66aDyOp7+P8Av6LCFXFeeAHnyRtp/Dimv38/u38/e1/3N4j7eOE+CsS5fm5Xx/imR1EJ+HmtrmNtkeSXWTVho6ekxSYl2zWU0jZZbaYW8pa1AA6DoDfXU9pci3jXUSARxrmTll7MNG2bfbbhZNdTuUAYgnKgAAzqfbhQ9pf7uWV8t+4XHPav7x/bNe+2XlPPWGP7DmW0LLKCPbW05uY9UVVviGd0tbkVI1kYhLZrZyJEtiVM0Z2o3BfUiw3mZZxBOpjc8OI91D9CcQt26Zge1a6tZBLEOPDIdoIyNOYw8/3ZPdpeewvDeNcixDGcdyrJeR8ysqiPVZNIsWIDdBR0bs26sI6KuRElLlR7GdXNAlfppQ+rVJJSQx3/AFM+3WqPEqvKzUoa5ACpOXfTCftPRcW83skVw7xwIldSgVJLAAZ8qVwV8Zz+q5H4lxblOmba+myzjqmzyuZCipIau8cj30dhwpUFaNiQELGoUNCDoR062t2JrRLuP4XjDD3iuK0v9uaC/ksJcnjlKHt8Laa4oW49/dq95HIcG0lcfe0aDyPSUkiOm/lYNi/KWSsVBfQ4+lmfYUf6rGqnpUZhxSPWT3CSoJUEnpDt+td7u6vbWQlVeOgSNT2kVp78WjeemPS1gRHe7oYJHB0iRokJ5ZBqEivGmLGvZn7zcT942GZFaRcbfwjOcImwYGaYbInfqkeILVEpdRc01iIkFydVWiq6Sj03WW5EV+O424FI9J5536b6ii363ZwpjuYiA61rx4EGgyNDyqCCOwmrutejbjpK7jR3E1jMCY5ANNdNNSstT4lqM6kEEEGtVAS973vsm+1HkjiHCq3GKC9g5gw9eZvKt3p4sKPGRe19THlU0eBIYZEpxliyVq+HEqWygAABW6N1F1W2w3ltboqOklS5NaquoCopx/Ec+wYndGen8fVu3Xt7JJJHJDRYgAtGfSzENUZD4OGeZ7sWNQ7j6H/moyESWn0BYUFBaXG1p3JWhadUFKknUEdj09aFlAqcu7FTs7QsSBn34hZ72+d73hnhvJOUaKkq7ewoJ+PRY9ZcGUmA8i5voFQ8p1cN5iRvZamFSdFfiA17dQd/3aTpzY5dytlWSVCgCtWh1OFPChyrXBLpDp6DrXqmDY7ySSGCRZCWSmoaI2cU1AjMimEH28Z1Z808PYDyXcV0Cqs8wrH58qvrC+qDFWzaT4IRHMpx6QUFuIFHconUny06M9O7rLu+y2+5zKqSSpUgVoMyMq+zCt1p09D071PebHbu8kFvIFDNTUQVVqmgA4nswU8wdgYrjd9ktkAa/HaW0vJyypKAIdPBfsJSisghADMdR10IHj0WlvFtbeS5fKONGY+xQSfuws222yXt9DYw186eVI19rsFGXPM4gx7MfdHae52qzpzIKGkx69w2xpkfQ0js1cZ+pvI00w5ShYyJDxfTMq5CVbTtCdngT3VOhOs5uq4bg3UaRXMDrktaFXBoc6nirA+7FkerPprb+nt3ZixmmnsruN/FIFqHjK1HhAFNLqRzrXkMTlCfTAAH3jXXx+7QdP0btU4p5xywCfdE/Yf+03c6xmwpxtXFebOLSB3Qw3RTFPOKOhCUIR8fMgDuR0D6wZf8qbgTx+Tl+vQcNnpsjf8AxA2YCv8A9Urf6hIuKjf2z+TuPuN7XmFef5zjeFR7etwtNacjuYdQ3ZPQJOTqkJiGY60mQ5ERLQVAa7fUGviOqW9I942raJ75tzuIYBIsQXzGC1oZK0rxpUVpjp3+Y7pvqHqKz2pNhtLm7MUlwXEMbOV1CHSTpBpWhpXjQ4bH7h3LuCc18p8e41xHZJzdnF6FypmXVMzJkRrzL8ntm3XKmjJaSu1ZgsR4rDbzIU286pQbKtNSO9TN/s+pt7t7fZ289YkKAqCQzOwoq5eLgMxWpagrTBb0H6P3ToTpa+vepl+Ue4lEpWQgFI4kNXkz8AzbJqEKmo0riZ3u7rZGNextrD30rS5i+O8RY48hbvrFLlHY4xWrBdJV6yvUinVep3Hv1avXto1h6Y/Iv8cMVqh55o0SnPnwxz36P7ku8+vX8WjzjurjcJRlTKSOdwacuPDlwwyv2tS8njXkn01KAOdRdQDoncMfg6E/dr0M9EiBsl5X/wCeh/wxhg/mqUt1Vtf/ALvb/jNiG/skUUe+nCF+YyHlc9//ANAs96rXoD/8ZEH/AF1z/wAGbF6esI/+0hdj/wAmsf8A0m1xax+5TKKvarkre7QuZPhm8JOm9AvGFBKwD3SFJB+8dXL6sAf5MmPLzov74xzL/LzUeqFsP/Jrj/hHA6/aVq3ZvBfIz+wLjN8svMulKtVMOLxHFyyp5AOrbT51ShfhuSRqDp0E9Fplj2C6UmjG8JHf+VHWneOzDP8AzQ2zTdY2DDNP4YoPd+fNSvt5HuxAbguqFn+5NDqHnXGxI9wPJUZx5CfWcQU2WXbnNqiPV2lGpGoJH29VvsEoj9UhMOA3K4P2y4vHq+3M38v5tqmp2OzFefwwYkV+8RjBxjN+EmAW3WJOJ5Y4zLZUVMyg3cVSVKRuAWhSCdFJUAUk+fiWX1suhdX1g3AiGQEdniXCR/K1ZfJbTu6fha5gIPb4JMR9/bq5AsuLfcnT4RfKk1VRy9RwMfkRZBSy0udb10bKeP7RaVaKX+oJkoZjaHRSLPXQ9tF/0t3iTY+qktLiqwXkQQg04sokibtz4Dt18Oxv9fum4Oq/T+Tc7Kkl3tc5lBFa6VYw3Kf2aamrwMXHt2uS65X/AN0/q6xf5qnPcvxFFI07rD9/haANNB+IOdfN4nA9WFuOAG6Wx+pov1Yz6XtHk/l5ayObvsN6vt1JOP04sE/d/wAJax3gPjK0bCkGTy1FiKbUNCNcOyt7X+bXTp6v7t89s9vDx03YP/zNx+nFdfy6dKnY+oby9YUL2BSntlib9GJIfto4JMuvYvxtbR2CQ7dciJS7tJCvRzzIWtAe+mikdTfS/coYNjgt5Dnqk+2RjgL6/wCw3N11Jc38C5FIs/ZEg/RiRqeP7YMMpUwQW4rAUdh8Q0nvrp8errjv4BGtDlTtxyfcbXdfMOGB1aj9+Oa/2XxVPe+fD4v9X9x8sgj7WsFz1ZH/ALp1yh6fNT1Kgb/w1z/wZsfot6wgj0QuxzFtY/8ApNri0X9x1lDPtayUDuoZPhoV28P/AGdM9XN6tknoyavDzov74xzZ/L2Y/wD4l2uk5/LT/wDCOPP7M+BWuX8McmzYq2YVVUcpPu3VxLCjDrYYxLF1KWsDu/JeB2ssp1W652Hn0p+kd7FadPXCsC0z3pCqOLHy48u4dp4AYdP5kbOa66xsilFgTbFLseCjz5uPaTwVeJOK9+AfoGP3PnSzHcmQGOd+bVQ2JiEhxxtlHIKoBmtp1TqhSUKdSO3YgdI3SwdvVJQxo5vrmtPZNWn6MWx195f/AO7+2kEx/wAJsaV4/FbUr+nFov7l2XVFR7Uc+pLiyjC7zi2w6BRR5MlpFjZzoGbY9ktkuNGUr1pDUWspnlOFI2tApBI1ANzeq13bW/RdxbSMqzTNEqLUVOmVHNBxNFUk09+OZ/5e9svbv1Ps76FHe2tY7h5XoSFD28sS6jwGp5FAB48uGI/ftQ4NYz+DOZckW3J/TpmWS4DS3dfo22qLDo0q2lsoOgB9K5bQ6vwWUIT4pICp6Qa4Om7+d6+XJNpXsJWMVp3+IAnuHZh+/mTMV11ttFoukyxWoZjzAeZqaj2DQSByqTzxRJCqrGxjW0uFFckRqKvbtbd5G3bBrnrWso2pT25QPprtrmKwNoJ3vJ7aakc9pFJIrugqsa6m7hqVa/6zKPfjsyW4hgeOKVgHmcog/aYI8hA/sIzZ8h20x0McYc7NRfYAedpU0P32DYhOwuYXng44vO6V5jF8bROX861v3Ts2tludt2yUVH49dSbR1uE9Of4zIwN7bW5iNTmZU/Ljr3sSjH+tjgTqT0u831sPS8KkbbfXonFBQLbyVml0jsjAlQf1QMc9UqvtRBi381iSYVxOs40axkKK/r59cmC/aALWouuuMfqrBWs9ipzTUkK05deKfy1upAfLkZgGP4itC3flqFT3+3HfEU9r5zWEJXzoUQlB+FX1BO4A6GoOQHChFevz2u2zkz24e36JNixrCPG4Q4pjt+ugtyWm2sDoUpQxNYLchASkaAakDTrszpVAnTG2shIY2Fue7/CTlwx+YfqCS/Xm9hgGQbveceP/AGiTgRnikD3C0Uf3S/uUP8XsyExaBq9qsBYL8kkxKvCsWcusqjypsMKcA/WY1mN6AHGm1AH50FXVEdRwnqz1P/hchrCHWLw/sxIXcZH9oPWlCO4jHXHRFyPTr0EHUES6bponuPFmNc8ojhOY4aDFStQTnwOCP+0JmsnGeUuZeKLQvxX7zGIGQIhPnVuLbYNdO09iwE7lJaluR8pJc2jRaIvzH5E9E/Rm8a33a92eaqtJEHoeTRNpYf1j5nLiF7hgF/M9tcd705tfUttRo4p2jLDiUuEDqSf2QYcq8C+XxHE0v3Y1IX7SoCk6ap5MxpjUaaEIj3DqT27d/VPTt6uinSTf+dRfc+Kq/luofUZTX/8AoJ/vjwG/2ppCmPbtmRSraf8ArRkR8T/9o/HI/wAdOveiIr0rcA8P4g//AAYMbP5qWI9QbKn/ALGi/wDSbvAF/dzmKl3PBJUSS3U8gDv/ANqZiP8Au6WPXUUudt/qT/fFh7/lLYtZ75XlLa/3bjFjPCOTiJ+3lQwvT3H/ANpvvo47n+vE7hBP8AerC2KDV6awt/8AWtj/APM2xSvV9xT1zuY6Z/x9P+OmKpf2jpn0XuRzRzX8fCWRtEa6ag53xsvT/wB06qP0RFeq7j/3fJ/xoMdCfzdIX9NrJR/7bh/9FvMXwc0cE8Fe5qDjB5kwsZdY4Gi6/QUnJctx8sxr4VIt3kDFr6k+tfcFFFCg/wCqUpa1RtJXuvnf+k9k6ieL+NQefHFq8vxyJpL6dX+G6VDaV48KZUqccadB9e9X9Brcf5UvPlHuvL878qGXzBFr8v8AxopNJTzH+Gla51oKLeHxMX41xzHMDwmB+kYni8CPV0taJs+wECvjahhgTrSVNsJIbT/U884s+aj0Xsdqttvso7GzTRaxIFRak0UcqsSx9pJOIu6b/f71uU277nL5u43MhkkfSq6mY1J0oFUexVA7sc2U/GZPu+90HurykFU9qlw3njk+mZhqWn62FgdTIp8AiByO4r1Vl01adQr03nEgK/LWU9ckXFq3VfVG7XCeJY47mZac1iGmIZf2O4nuOP0bs9wj9OfT/p2xkGh5p7C1fV+FrgiS4OfAD83lUDvGLwv/AG3j5aNhxZzpwvKlOuPYjntDntXHfc3BFfntIaOazXhS1LRHi2GDpddbSA2l2ZvA3OrPRboKfzbG4tSc45Aw9jimXvSvv78KPrTaG23qx3JQNFxC0Rp+1G1RXvIloDxotOAx0e3W4yHAkfKkJbT9zaQgD/Dp2gyUV9uKgum1OdPDh9WG/GkuNuhPYEH/AA/0PUp0BWuIUMrI4GHO3Pks7XGlqOp7jv8Ay/h1EMaNk2CizupDIcKTlkuWhAkp3ajRQV20H2fHrWItBquJD3BmX8wYFtIAKeuPhpCjHT/30nqePhHswBucp3P7x+/GZyQSvz08B9nf/DrcBTLEJnzz4Y3WDrpr5jx+z7esH7sbE40xX3yLy3zhyJzTybxNxRN4txai4gh4qq1kchVM67t8ltcpqlWiZtXDYDjceBWsbmSoICklQWpag8hDaz5+87pvM+2bWbaNLYJXzQWLFxWoA5AZcPvysUWPSnTvStn1B1GL+eXcGlCC3ZUWNYm0kMx4sxzpXOhFBpJMUuT8+574vcrbbNLrim/pp13ApbGtxSjm0Vq81PdLXrwXFoaDzsVXzbSFjTxTpqQxNP1X0msG5bg23zbe9xHG6IjJIQ5pVCeJHGmfsphVg2/059R2u9l2RN6td5is5p45ZpElgUxLqpIASQr8K+HuNaA6VxauypCtVnuo+J+J+Hx066AiiCjIcscivKZGzPDHPHzkrd+4SpRP/wDOHibv9zWEjX/DrkfqoU9YKf8A1xtPugx+jfp//wD82f8A+F3H77rDM5pOTe173K8vQcPIrY9/Ay2BWaoKWf7R5Np3pDIgqaLZS5QvWKRHWPwSoCQrcAoEb1Kb7oXrbcI9v8CzJKF7PKuUJGmn/RlvCeTRitc8HehhtXqx6XbNPvX5sttJbs+efzFjIFOutcplQ6xzSY0pUEL+K8amj9inKnJ02OUzM65DwulqHVjXXH8UufTdfYVp8iZl9OktODzMNJ8h1JsNl+V9LL/e5B+ZdXcKIf8AwcT0JHtdmB/qDEDd+qPn/X/aOlYGrBYbbcySD/w1xFUA/wBWJUYdnmHtxYR+1srb7f8AMP8A/MWQH/8AYrj0dW36GU/yncV/9oyf8G3xzv8AzW//AIxLP/3LD/6VeYiB7lZDnNP7gOPYU0x9XX1GTcc4EU7nF76qC9EvsnccR2LTcJ+2sNwSdChrd2KjpXvWZPUfq1DtajVFHPbwc/hUiSSvZpLycOQrzxcnpiq9Ffy73O/O2i4ntby6HD/EcNDAB2lhHDSvNqcBhw/uf0UnHOZOMuQqx1yFLuMREZiQ0AFt2+F3zsxua2spIL7bF9GHnoGk9vjL9b7V7LqKx3eAlZJLegI5PC+oN7aSL9QwO/lX3CLdOi916culDwQ3mog8DHcxBSp7iYX/ANY4sL9zOTRs39j3IOXQtioWUcc4jlMMo3bQzczsdnISAvVadqJQBB+YHse/Vr9b3ke5em1zuUX+HPZRyD2OY2H2H24519Ktrn2P1xsNluAfOtNznhavbEsyH6yOIyPLEef2UaV22sPcStlJW5GTxG2wkDU/Uy/+p6Gl6eYbS0o9Vd6JTrDHuurgflvs+YxfP81tq9zJ0/oFWHz31n5OmOjmFwHZP1YkhpXqFG7UpJJKgSdSPt6tifqa2WbRWoxzZadF3slv52mgp2YqX/dUwWVjHtO5EW8kgNXWCpUSCPmXmtEgAE+P4uk/1J3Bbvou5CcC8X/FQ4tf0L2l9v8AU+xEmTCO4/4EmKfPZT+3LlvvRxHIsux7kmhwiPjuWLxR6Jb0FjbuSH0VFXbmWh2FOiobbLdolG0gnVJOvfqkOnejpeobCS/jnSIRylNJUknwq1ciP2vsx1L156q2nQu7wbTPZyXDzW4lDK6qAC7pQgqf2Ca9+AH7xfa7e+z3m+34SyLJ63MLKqocZvjeVVfKrIj7GTVTNqwymHLfkPNrjod2KJWQojUaeHQDedsO0X7WLOJCoBqBTiK8M+GHPpLqROq9kj3qOFoEkZgEZgx8LFa1AHGleGL8v36cDRiHCXGa/pBHcHNjcBxSE6IXtwrMHAUkDuFBoEfZ1bXqRuPzvT1staj5lT/8zfHM38v2zvtvWu4M66W/h7L3ZTxfqwwv2x2HR7WMaeKD6arjkcpXpqCpNnJQRr5aE9WD6TuB0XCvPz5P+IcVT/MWjH1Rumpl8pD/AMEYqk9suP1eW+/p7FryK3Npclyf3C0NvDcCS3KrLfAuT4E6OoLStG16NIUnuCO/VOdK28N56ltaXKhreWe9RgeatFcAj3g46d9Qr662v0JTcrJil5b2m1SIw4q6XNmykewgHA6x6+s/bXlHug4OytbyYuUYTm/GstZjrSy7kFHIcscMvg2NXRHtWmHGY6gFJ9O0StWiQVpE211N0pd7v09dk+XNbywHLIuhrE/sYVA7pKnLMMV/Y23qJtnTXWm2hTLbXlvdqK5iOQBbiOvCqEgtwNYSBnkbEv2X4qpR9yQSkKKP+jx0+/8A6p/7urC9En0fxOv/AJN/9/xSv81sfmfwHu+e/wDpPFblzxba8y+8HkHi+lsIFRaZRzHyrFiWFmmQqDFVX3eUWzhkJiNvSCFs16kjak/Moa9teq4OzTdRdcXGz27rHNPfXADNXSKNI2dKngtMsXinVFt0V6TWfU15FJNbWm02ZZEoHbVHDGANRAyLg5ngMSvP7TvLf6Z+qJ5K4+U0iycrH2xGyT1GHkx2pLC1/wDss2lqS078h8dyVDy6fx6G715vlG+tK6dQyk4VIP4eXPFOn+bPpXyfPG1biVDlT4oag0B/b4EHLF2zTRCu7Z07+WunkD11Xr51x+eXlHVWmRxqXlaH6O3AaO41k86hPkIjvn8D1puJgLaTP/dt/dOJFtGPnYKjxecn94Yoz/acY+o9z9ojbu04pypWn3X2Ij/b1yd6GmnWUh/8hk/vxY/QT+co09JIf/fFv/wbnHSvOqnvpUpSjb8moG06nUdv49+uuFnUHjXH5WRWjyPWmRxQH+73EXEyHg5LiSla6vkAnXXQ6TMS0018u/XNn8wMgkutrI4COf74sfo1/JFD5O19QgihM1n/AHbnAH4+/bc5M5DwLEc/rs7wuDX5hj1VkcOFMj3ypcWLbQ2pjLMhTFe4wXm0OgK2qKdR2PS1s3ozu+87Xb7rDeWqRXMKyBWD1AYAgGi0qK8ssWZ1N/NF0x0x1Be9PXW2X8k9lcyQs6mLSzRsVJWrg0JFRXOmLIvav7f8h9vPHd1hWR21VfTrPNbHJmplM3NRFbizaLHKpEZYnMMO/UJdpVqJAKdq099deugPTbpO66L2GXa7yaKaWS7aUFKgANHEgHiANaxk9mYxxv64+pG3+qPVtvv+2W09rbw7cluVlKliyTXEhYaCRQiUDM1yOXDFbXGoXzL+4XLugwZFbV57ktx6gUtaW6nAK6XW47MWTopHrSKmCAn8KFuBPcDvRWzSHqb1qe+IrDHeSvXP4LdSkTe8pHlyJpwx1v1VEvQX8q0e1BtN1NtdvFTIVkvZFknTsNFlmz5hScicevdE5/0X989Fnza1RIT9vx1yA+lKVbf01pUSmv2TtG9xqwTRSw5p3IdUAQfDHr+T/LXq1FvStpjaS2uD/VFI5B7G8t6+04z9Gof89/y43HS8ih50gvrJf651SwHuKGaPTXLwgnFw3uRXIT7bec3WXltsv8V5kFJacPpvNLopf4tp0WlSf8OuiOvyjdG7mcj/AMjNT/UOOKfSBHj9UNg1VB/i1sP/AJquOengDgnlPm5/LGeMbitqn8aiVcm3RYXtjRqmN2K7BEJqMuBDlIkOJXBc1DpbSncO/c6cfdHdJ9QdUvcLsEqRtAqF9UjR1DatIGkGvwnjSmP0z9SvUTo/oCOzfq6CWaO7eQRaIY5dJTRrJDstMnX4ak0OWQwue1bjHFOTucI+IclO2LaY0a3tRTqUGnr69o5DEmXR2rr6vqm2Vw25TsgNj1lhhSdyNSoEfTjp/bOoOr12zqAyAKrvo4GSSMgmNyc6U1lqeI6SKjM4DetnWO+dHenD770gsRLvFH5vEQQzKyrNGB4SQxjVCfCNYNGyGLQPfbXute2a/cjxCzXxL7D2UKba9KM0kWzLTTLSUpShKUjQJSBoAOr89Y/LToWWNaCk0ICjgAHGVOWOQ/5avMk9VreZ6sTbXJLHMkmNiSScySeeK+ca4viZp7DcrzSG2HMm4z56urZCENl2Q5ilthfHNZkLLYSjVttuQqJMdUVbQ1CVqPMUPZbI25emE+4xLWax3N3rTPy3ht1kHsHgc9yHHWu59UrsnrxabLOxFpumxRR5mi+dHcXjwk9pI8yNe+QUwFuS+VjyFxhwbjM5x1y64sp8sxCQ46XCX6Ry1r7LGXmzqWUsxq58wkpToofR6qHzJUpa3fef4ps23WkhrcWaSxHL8GpWjP8AqnR2+DPkS89OdMHYept73GIAWW5SwTrTlIEdJgedS48zs/MyORAvccgmR+3RMnzFrWzG9o9Y3EStZO55HFsBtBQCdA1HKkgfFWg66DvXp6Yoi8Tssdf/AKHXHHO3p/8Ab2kc8P8AM8v/AKY/3Yjr+yDDjzeR+dGn9uoxDD1tbhrq4Lq2Og/7wGn8eqp9MnZJ7wr/ANGn3ti/fXlEe02vXw8+X+6mAxyvHYV+8ZQRm0pMdfu64HZCSBsUheTceJUkp8NDuOvQHcmb/PIZvi+di+9MOGyIo9JjGvw/wu4H+zLhLzSGP2+f3DeRqFxhyDxtaOZbTsNMxSIi+KuXqKS/RllttKVSIuFT7CN6pZTuVIplpSgn5DiJP8s9TycVtW1DL/o5Rl7lJH+rjLyR130FCTR9wj8tszn58DDV75FBpXlICTzw4/2Zm0ue5/PAry4EyhSe2vzDkPi0Dt/Hol6aEjfpaf8Azo3d/vIsBvXIBukrcH/2jHyr/ubjAu/dZCx7yszCyCoYrgI1TroR/bMLTx8O3UL1BNepZP8Aq4/7uC3o+NPQ8Iy/xpv75xaT+4p7NfcTzzybXcp8Q5bjtDheKcQxKm6g2GZ5HQWDttRZBnGRWMmNW1VPOivJdqraMhC1uoWpxBSQEpBLd1dsW77nere2EipbpAFILspqGdiaKCDkRnXFdenPVnTux7W+17pA8l3LdllKxo4CskSAEswI8Sk0pQA15nEIv2cMpya392F1Et8hvLWJ/wBG8wdEaytrGdHQ6nIsKQl4NSJDraHUIcUAoDUBRHn0q9DXE77ywkd2HkNxJP4k7cWB6qWlrF00jQxRq3zaCoUA/DJzAx1INRlvHa356ePdP8FDt/PTq2WemZxQcUPZinf913ju/wCFM09p/vwx2BLekcVcm45i+X/RR1KfkVcG3Xm+KMS5CSGWa6UYFzAdL35TirFpoqG8JVW3WJEV3bbpB/ixNpanMV1D/uhn24uj06BuLC82K6/wZkLKDwBI0tTv+Ain7JODR+0BWse573ye+P8AcFsY+/GUX83jfiR+Ul2ORWXsqOYTioz4Ehu2x3i7E6SI+pz0kn9Vc2tjXa0s2sjXl9NfNnqNB9PYBh1v4U27a7bakyCrVvbz+tix92L3885B2KciQ3PAlIA7FR8NfsSOmKC2qNTYUri7z8qPIY5zf35J8id7cOJXH1lX/wBHCIQCfDXA82/hr0I6iULaoB/0n6Dhl6TNbuT/AKr/ALpcQg9lX7PVV7w/bfjPMsXmy3wfJMpmZXBi0xwiJkNJDfx3J7SgYU+8Mjpp77U1ut3r2lBaUvsFhOig9vtqT2onLlWNcqZZH24PXm8SWt4bcRhkFM60OYB7CMP/APan5O5C9iv7it/7KuQqXHbCqz/kOz4oyqzh0NfJvYeWVEecvBcpx66TDbvbLHL9xphKayU+IqI1n9SGm5CFpci27vHKbdGADGmoCvvFPs4jH3dNE+3/AD8QqyLqALaQQeIJPDtqM8qA54sz/wDbkyXEd9pvBjEWZOmLR7hK5Un6gNMx2HE8a8hIbZYjMfkocSj8Z1OnYfHr7dNHpEcJYoDmTzPbgX06l013Jc3KRIGQhdNSStVNSxz014DIc8PH9snktjEf2Icybghl2whVPuzZsoxSC6tq3hZHCq0DsDrOmWaGknvoELPYDoNJcrYLcXcg8BSNR3lnCgDvLaR7DXBobfNd9RW6rWhUkHkAFYuT/URWY99BxOKG/wBk1JPuj5IXqdGvbxk7pA0+Yp5Q4h2A6/Bwg/eOqn9c30dD1rT/AJyL+7If0YuT0kjWTrKJWGXln3eNAfsNPfiOHuc9p3ui9puP0GS8o57Ck12SXruPV/8AameZXayBPar3rJSpCJ9ZUNojqjMK0Ula1bu2nn0U6P626M63u5bLZrRllhiDt5kESihIWgKs1TU8Ke/EXq/o/qzoph/GbhSzS6Py5ZGqaFq+JVyI59uL/P2wo9llnsJwNEmTJn2lla8jB2zsn3ZslTsbPr9MRJekLcdcLaGUpAJ0CRoOuaPXG3jbrC+sLVERWih4ACn5SEaQMq1z9uLo9KLhk2Gzvblmch5eJJP+I44/ZizLiHihWQssfQMPOy3Eeno0Ap4NM7FqK9dqUtth4g69h0j9F9KncILKSQnLzTIQPiBfUKjlxrhv6k3o2Fw5koEBqOQzy/Rjlq90+Nrxr97GJjb6VNLg+6L20R3Eq2rUj12+JnvL5VaJf/l13N0lZJYdCw2Uf+GlvMB7C8h/TjlLqi7G4dXTXY/3k0f91B+jF5/7tS6Sv9v/ALrq043HExyhgLj3La1LfG/IseWC8CFJKt6SCQRp2+HXJdhcXUH8xtraaT8u13XUBy8hqgnuPacdD39rJN6LSXizHQtoAYz3SKMvdnwpiuH9hBiA7xr+4B+oqlIiqp/bxGdXFDanWW35PNi1SWm3flcfjKYC0DxOhHnqOhvW2Yw9KSn8Jglr9cVCO8GhHspwrinPSWN5OqoPKAMgmUivA+GQUPca0P14gXxa3K5y/dEsbyMP1uupOVctvBYPIWpD1FxtGmVGM28hla97Qlu09cfS1IbW8EDUDurdS16N/l4Symcm9nsoI2PAtLeSrJOPqklNeJAJ4nDT083+a/XJrxU0W0V1M4oahUtYmjhNcvxRxAcgSBww8/cfcyPat+6NxfzeFP0lL/e/EHMDi0oc2vYpBu2MTzVkbEeq/Dt28StmHtmq1JdWlJCvA/6Kyy7t6Sw2MwrJCbmAE86SOyn+zrCj+qMCvWFI9v8AVCa9jbwyi3lanKsaq3+toLH+tjuA9y4SfbJz642pK23eEOWnG3EKCkLaGAXYSpK06pWhQPYjsevm3yEXcVeBlX7xiLe+KCXs8s/cccKv7SNe5Y8xc9tNpCiz7Yr6QdddEhPNfBTYWSPw+mt0K/hp0T9cAT0DMwFStzAQOZOugp31IPuxD9H5NHXEHYYZh7tBP6Ke/Bt/da5GejcWcEcPtrlKMy+v82tVSSoKmwMTrk49hNiop1bkOoczO/rzuPqMN1zae+/sE9BvPl6fknudQmQJEK5holLNGa8zGWkgPMiNa8Bgt60LHF1AscQTy21SeE/BI+lZU08g4SKevCsrU4nE6fexxB9X+wl7dIr+OpxjIfbJk/D+Q3UNlL7hda5NprGBdy3i+FqjqvMp5QbekBJCPq4ZCdEkJLX6fdVv1HJuJcgxxbrdRRkGoMQbXC3tMZrTvwn9R7Ku2bfa6WLMbdGavFXOUiexWpTnSlcTy/ZIsoHOv7e+CR4d1HZzjhzKM34uuDOjyGYtg1Cuf7uxiFNm/wDsKy/Dw/MoESM8hP8Aw46UOakFXVrWlzJbyk6h5XZQgj2HgRzI4g4r+92+K+h8ApdL+8tGH9X4gRwrwPtriyOZUSI8p6DOiKjS4+xtyM6kE6K/C6laFKbeZc8ULQVIUPAnputbxZFDIQR21xX93tskTlJVKuDmCKHHNt712XIn73/s/bQNHG6rhctgdu6c0z/TTT7R0OunDb1Cx7F+84L2KFOmrlRxq/3Lgh/ubZM1m37lv7Y2L4vOj2WcYxyXx9KtIFW59RbUcaTzZh86sm2Yihx6BHjtUk2Tuc0DLLDjqglGqj7dlX5+FYzV6j3eIf04+bA7fwm6eYERaSKnn4TUD6x9eE397DG773de/fir2t4eta18Te1TmDlZx1hSFLby9eGZ5yG3TPtqDiW4t2zxrjkP1AC5rZHRICUqMbcNVzdLAPwoT76E/bQfXibtBSzsnuz+OVV91QK+7U31YOf7a2Xq5J/bmwiw9duTaceQuQeMrv0QdY6sblzp1DHcSSopdZw27rNdToQdQADoLB6VvvP2ZYnPijDL9VaD6iMVL11tfy3UjTxiiSsj/WBqP+sDiuv9lzljjjjLj73BPZ/nmG4Wy9kmGzm15Vk1JjxciQ6a/wDqZTH6vNiF1iOVDepAISSAe/QzoS5tbe1uTcyRxjWp8TBcgD2kYLeq1jfXl9ZCyhlmPluPAjPmWWgyB44cH7UsV7OPc372+SMXWt3jy3vrBVbPbiOR4E5zJ+RckvsbXHDiGy24zSxnVloAKbbfTuCdU6yuiSJN4v7qLO1ZzQ0yNXYr/s8u/A/1QXyem9o2+fK/SMVFakaYkVq+1sq8yPbiHPv/AKK75490/ukn1ylrhe2fifGvTU2UrbltU1/hsW/hPE7y0qLLzq5kJCfEQPmKSojoV1VG+8b3fSRf4dlbJTvoyagfZrc/2cMPQMsfTnS+1wT083c72QHu1LJoI7aiKIf2+dMXu+1HOhyd7aOE81ckNypltx7QRbV9G7Y5e0cUY9kBAUVEEXdVIBBJII01Pj1b/Tt78/strdHNmhWp/eUaW/2gcc6dY7YNr6mvrACiJcvpB5Ix1p/sMMRf/dFjBv2fciLAHa6wLv5983oh3/n0L6/avSlwP3ov+IuDHpFHp9QbMn9if/gyYcHsNYUPaVwe8llThGMziNqFKOv9yXY8gSdfh0Y6JP8A9itmK/7o/wB5sKnqop/+IG5t/wCHH/DTGp+4Rmq8G9pfK01uM8xMySursHheq2tltw5bZxam1RvISSpGPuzHEga7lIAPYkjDrq/+S6UumBGuVRGP/lGCt/slj7sbPSfaDuvqFt6uCIoJDO2XAwqXX65Ag9+K3P2/sRvuGfdXF4ouUOA8w8AYlm1RFlbWJEiyssWoM+SyjulrSoYXdRiO5Uljdr46oXp+JOn+p/4dM1I7uwjkHbqKJJT+z+Yvuxb3rI0XWHQY3q1Ws23bvNCacNAkkgr2+OkD/wBqlMX3tcfT/Wa+p+hj73EIQ19Yyt1a1nskJRqQf9D1egvFpVan3Y5Iexkr4qD34EHvExurqPar7g4sO0gPSEcSZu9YPRG3njKeZoJi2oLD+0ITFjrHzK/rX38AOlvqmeSXpq/Z1IHystAeXhOftw6en9tFB15tCK6k/wARgrSuZ8xcvZ9+Odz2I+zbFfdxO5QRlvJFrx3A47r8UmNPVGOxsilW72SzLqN9MliTZVqWFRkVBWCCrdqR27dUD0b0onVU00LStG0QQigBrq1VrXs0j68dh+qfqPP6d21pcQ2yXHzJlFGYrQxiPSBQH4tZr2AYe/uV4Bvv2wvcXx5PwzkPEedMaybDIuZY5lD2JRaZx6DLspdZkWH3dDYTslmYfmFR9C2TMgy1q+mmsrbd0ckR0/RFvHp71FFdvGrMjHSWUFZEBo1Ca6SRlqXxLXI046be+6b9cOhp9vWSWEyIolRHYPbzEak1UKiVQc9LAo9MwCARPb39Utm57O7rLjBcTSZIzxte1U9Ojkd+FdZDj86Gr1UEpQ4piSnVJ0IPbq9vUi+tr7oCe4gbJ/IYA5GjSIfuOOT/AEN2u62r1itLS5Wjw/NxkjMVWCZTQ+0YDf7VzJVxTya5tJCeQYiNQNdNcbgkj+Q6Ceif/wBRrwf+VD/hrhv/AJpRXqjbTz+Qb/jNiEXsrJT75sLI/wDti5X/AMcEz0dVx0D/APjHg/665/4M2Lv9X/8A8SV3/wCbWP8A6Ta4tO/cjb09rWSLOu45Phuv/wCemfLy6uT1Xb/7DJgOHnRf3xjmb+XsU9T7U/8Ak1x/wjhH/ZsfbhcS8hTHFpUk8pSYzsRwb2JrC8SxcuxpLf8A4jSwfvSe4II6WfSMF+m7pBx+cJrzB8uPMYdv5kCI+trCU/8As1RQ8CPPmyOIO+2ZqJK/dhp2/T9GG77mOVdjS1bi2ybLN1NNKX/UUp0Gvn1WW1s6eoBbi4vp/rrJi+N9WOX0aVaUiO02uXYNMNBiXP79UKFD5F9u/wBElKUOYTnBWEjQb03lINdPAHQ9FPUyWSW6tfM4iN/vXC/6EQQw7duHk00maL+6+Iu++jhCx4gwT2De53DW364cie2vgc2Vi0whX0HJXHvHWFTKm0ekJQGyu2x56GIyF7l6VTvcpAAAb6rwLt2723hl+VgBIHCSJF0k9+kKB/Vw69KIJzvPT+4DXatuF2yqTkYriWQuo7tZcn/rMDTG+QqXmz9y7hTkagaUxWcge5L23WwhOIWhdfNsMh46atqlwLKty6q2D8ZSkqWhZa3JUpJCjGn3Ibj1Um6L/vLmF/YapUe4inuwTstkGy9EPsX4IbSeMd4IkofeDX34uo/fopI1V7Y+HVMpSHF85REKIA10/sHNlEa/eOmLrS4knsI9fDzh/dbCv6f2kNruMoj+LyD/AH1xTn7cv2r/AHKe6Himg5i44t+KYmKZHLu4dexlOU3tXcpdoLibRzjJhwcSto7SFTICy2Q+oqbIJAJ0C3t3S257naLe2xi8liQNRIORI5KeY7cM289ebFsW4Ntt753zKBSdKqR4gGGZYciOWL1v2vf2+eXPaZG51b5js+P5rvIv/S9WMpw+8tLn00YmOQzdCz/U8foxF9X+54no+mXd+1zXbtG6yeiNs3Dpw3D3JQrN5dNJJ+HXxqB+0KYoP1a37Zetvko7ESCS0M2rzFC/4nlUpRmr/hmvDlih32KRWXP3F8QiPAFoZZzg2QfD8rjzksp/gFIHSn0GxHqBbuOPm3H/AApcWV6voP8A4N3iHgILP7Li2xah+51RPD2o5tYRIi1RK3K8ETOlJGjEdUvIGWorSnD2VIkKJKWwSooSpWmiT1dHqtMv+TJEY+NpYqDmaOCfcO3tyxy5/LzFI3qfA6KfKS3n1HkKxmgr2nkOPE8BjS/ZRyMRPb5yjSypyo9a3zLNu5LZGjDZGDYhGVMfA7vutoZ2MIOui1naNVHpV9JbbXslzOi1l+aKg8/gjNO7tPsw9fzJ3nl9W2Fs7ERfw8PTlXzphU9uQoK8KntxSnf8fZVzF7zuR8B46mxafJ8s5s5aboJllYy6qLD9C/yq1c+psa+NLlxh+nw3EattqKlHTTQk9VW+2X269Yz7ZYOqX0l5OFYsVAKs7HNQSMgeAx0Zb75tfT/plZ75u8bSbXDtloXRVV2IZIlACsQpozDiRwrhD5F4Pyri33EU3DXuBv3a9xN7hMHJMsrbB6+jRsRydytlu3lHPukQRMjwK6ydP5iW0NyGXEKGqFDrRf7Pdbf1Cm0dQSkfmRq8gYtSNyCWUvSoAJ40AIPZiZtHUtjvPRsvUfR8CsDDM0ULKIyZowwCOseqhZlHAklSpBzGOwjjH2/Y7wlwi1xNgdUazHajGbIzC+UvWMguxZMyRIsZexH1VnbTHFSpa9qU7lBKUpQEpT01FBtmx7ULDb1020MbKg5kkHU7HmSa1PbwyoMcD3+47z1X1C277w3mX9zMjOeAVQQEjUZ6VVQAB2DMkkk8y37WXCsX3Dcwc4cPykNqGa+1XlarhuuhBTBuHbnBhQ2id6VoDlTeGNJSSCApoHrmzpG3S7vLmyk4TWUiewkoQfcQD7sdx+qV/NtO02G7QfHa7tBIR2qFlDr/AGlJU9xOIVr5M5AxHi/N/btNSutx+z5JpssyOrktOM2cDJsNgXuPS6p0BQSI8p+YwuU0tKiJFXHKSnasKDDcb2326XZSaWzzq7DnqQMtPYaio7VGG07HtV7vlt1YAGvorR4o2HAxysjhh3gBgpH4ZGrxFJae+LhVfA3GPs7weZG+lvHOKshyHKG1D85GT5Nc1t/dRXl6D1FVcqyMNJ/9XHSPLpu6y23+E7Ts9iRSVbaRn/ruyuwPsJp7AMVp6V9Qf5n6j6n3dG1WrX8McXZ5USPGhH9YLrPexxfv7bFRMe9rPCV7OUG4MDgDjS5lu6b/AEosTjqlnS3NuoKvTabUdPs66C6euli6WsJJDSNNvgJ9ghT9Axx31pZPdeoG7wQis0u83SqP3muXA+04pd/bNrrDlr3f8kcu20P54GP5zmsx/e4+iJkmfZFHiNxg8rapxb8G2sdqljVSWlHTXwpz0u17n1pc7zMMxHLIedHmcCle8M/HsOOmvX0xbB6X2PTFs1Q0tvCBSmqK2jJJpyoyRZDtHLDZw60ke3j90G0jRJjlbBtOWMhpFtoBbaXVctRH5NJBdQEgKixpmTQlp1+XVlKiduvXzbmTYPV1oWNLeW9dCOVLkEoPYGkQj2A42bwj9Y/y4Jcouq8g2uOQHmGsWCyMK82SGQEfvEccT+/dGvv1b2puNSIkUS2uUsOdEuMCwVoXX5GlxLsdBDCiohPzAA6jqzPWKLy+kaqTo+ZjFD/a54on+WmfzfUjSyjX8hPmMuacuGGD+1NAdme3fMvp3oqnk80ZEPpHHksyFJ/sbjohxr1drTgUSRpuB1HWj0ScL0pcBgafxCTP/wCRgxN/mnjMnqDZ6SK/waLKuf8A2m7xHz92th+PfcINyGXWFiqz35HUKQf/AGMxTuNeygfiNR0s+upBuNtKmo0T/fFh6/lMRks98DCh821/u3GJ1cPr19g9APAD2+Xg+/8A+Ze2/l1ZnT//AOLGKn/spv8AhNijesR/9va4P/7QR/8AHTFY/wC1SVD3DZkU+I4ZyH+A/vfjvv1TfoeK9WXH/u6T/jQY6W/muUN6d2Vf/bUP/o13joGMt5ktutKUh1ohSF6/hI8vtGvj9nXUuitQfhx+f48GYyIwJudcuGBcPcmcjQ07XMewzI7FlhKloSzeorJAqWPUQCtqPNs3GkpWAdm77Og3UW4HZ9hvdwP+JBayOvKrBTpHd4qD31w0dF7KOpOsds2Yf4N1fQxvzohkHmEDgQE1GnOlOeKzf2dcPakXPNuZWMFL9e/SY/gLa3ArZKj3L1jZ5DCGh2lKWYMErHjo4nTz6ob0W2oXC7lfyrWPRHCD26tTOPqCV9uOuP5puoGs22TaLd6S+bNcsBy0aEib62lp7Dhe/Zhyd7hP9xbKuF7GxWIuVVHJfGzkd4KQ3NyLjm3OTw5i0pCQiYxV4hZIRu0TpJWnTcU9I/Sldr6ludpc8RLF7XjbI+2it9eLK9UdO/8Ap7YdTxKKI1tckjlHOlCPYWkj+oHhXHZzHbRPjqcUdSoqWD95I/j26sd/ynoOGOerYi4j1kipwgPRkMSRqBqCft/h9xHUlSWXLhjBlCyV542jMbQAnuQSfDy/n1h5ZxI+aVBppljyqToojTtt11J7ff180Yy87xUPCmGPVLUaiuSlJ/8AYKNqfD/wkdSYxRQe7A67J85h+8fvxtojrcJUSBp2/l9nWZamIwjZsKTIjsuMtyF7WytAdVroAkkAjrWdTAlRnyxuAiRgsh8PP2YqG/cXx21Zz61y2w9tOE5LgUCDQxqXl+Bk+RQ8gmxDSV67RjKqjFcrjOFFPcrktMvSq4qREZQUObN2ta9QW141097d7es9ogH5lXHhoK6/LYGimuZGQHGlcdC9DX20x7dDtG2b5JZ7nMzUt9MLgtqNPL86Nlq4pkGFWNCK0xArAcaq7exh3mJYzxbLVAkxX3AnJc9lzq3R1JMhFTfemDKY2ktqcbKQsApPgrpq6H2KLcL2LctltNmmeF0Y6p7tnjz+LypKAstKqSpANCDwOEz1T6ruNn2qfZOqdx6mtormKRF0Wm3LFN4SNBng1EI9aOFYMVJBBzGJTyICFkrGupPkT/h8eupVJpQ9mOCdFKlcc93OzXpfuGFo6/8A8XuIie/f8yNg6/H/ANPrkHqvP1iP/vK0+6DH6Penx/8A9bAf/rJuP33WJcfuQ+3fM85ncb8hcdYbkWYXKWpuF5FBxejsr2xbhJcct8enyY1XHkuMQI7785p2Q4lLaFOtJKhqB0++tHSm4brNZ7vtNvLPc0MMgjRnbT8cbEKCQoOsFjkKqCcVD/LD6hbPsFtufTnUV5b2diWW5haeVIk1UEcyKXIBdgIiqKSSFcgZHD794PGzHE3sJp8AjekU4qrjurlutJSlEq1Fmw9czgE6DWwt3n3z8S4epvX+2R7J6WptEfCD5dSe1tY1t/aclvfgR6O79N1V/MBN1HPWt3846g8VjMbCNP7EYVB3DGt+2CtuL7c85sJDiWYcLlvJpEl5euxlmPgvH77zq9ATtbaSSfu6z9D5Ei6QupHNEW/kJPYBDASca/5q4Jbj1IsIoRqkfZ4FUcyxursAe+uKrMKwLkn3U83Zs/x16TeUXM/KuRnn7S0cqv0+ul37RcUqewmSpl1p+8YZSEnT5tAdNOqJ27bN4656nun2ggXsry3FWYrpUyD8QrQguoFPZjrXet+6a9J+hLCLqQM21wR29mBHGJNbrCaeAlQQREzEn2kYefOntD9wnEGGIzzkyRW2mPQ7SJU+tDyiXfyIEi23padUzJjtiPFediobWsK7uKbBB1GhHqfoHqzYNu/im9MklqrhcpWcqW50IFASACe2mA3Qfq/6edYb0dh6Yjlh3CSJpKNAsSuI+IqrGrAMSARwDYs14gqMv5q/aa5ftqCu/VUcOcf5BX5jMQ4021W1eK5bXWkMLC1JH1EfFZUVZRqVrCdwB3AdWAnUFtN6M/L3MgF0Fa3A5kpIGUU5UiK1+vmMUzc9KXVj/M9HcWcLtayyLeE0yVHgKyuT2GcOB3mnKuFr9gmdUt5f7iq6bJjpnSIvEdjEhLWkSHoda/yTHsJjTR+ZbMN+1jIcUOyVPoB/EOln0ulKwblEh/NZYCBzIHnAn2DUK+0Yef5grUzXOxzupMCNdqTTIM3yxUV5EhGI9hxNH9zn93HnD2h+4Ol4Y4AjcP3uPxuKMWvsxGYY1cZBc1Ob3l3li3qtUmpy6jZixziLFRKQy4yXR9SVlRStIArqXf77bd0+Xt9BogJqCSGJPYRyoffg50F0VtO99Pi9vhKpaRlXSQAUAUVzU1OrUK91OWCd+8EHbv8Ab0yjKZUVDFlYNcNWdqlqOuO2zY2mX4k9MaSy4pTjKUSpCkhCiVJA0J16Zupp5W6NlWbJm8o0/tpiv+gbe1j9VLUWuaKLkV7vKlAzwFP/AG38hMOe3bluSIZmzlc5S2IzICQAEYFhLhcccWUttNJLncqOn3nof0BIU2WcFgsfzBJJ/qJw78TvXSOJurLJihknNgAqgVr+bL9mKqv3v0Tm/wBwPPUWBiF9OC8XhIhSRLZQ0cQgltHrBDY9RIPzADQHpJ6weOTfHaKujQnEUPD2nFvekqXCdFwrcqFkE0uQ7NZpi63/ANuOaS0i+1Dgy1sWIO933ARITsqFKElLq08c566zu1bbVuU0hQ/99/b0b6vuYJLFIoS1BMDQin4W7z9DhR9Kts3G03ua5voghazK6ga1IkjI5DlX6sDf9pvE4lz7GMWluNJU8b/lRveB8yQMheQO+nbse3Vj+mt68HTkKA+Hzm/vnFF+vlnHceoF01Kt8pF7R+WuKRvaY16H7kkBgf8Ag8kc7td//eeKclI7/wAukXoo19UUbtubv/hT4u71TXT/AC/SKOVhtv2T2mCP+7LxAvGeVsS5fr4ZbquS6P8ASLp9mPo0MsxBuPFS/JkIVp6tljciIhpKkhShAdUFK0IQS9Ytm+V3iHeYh+XdR6XoP95HQVJ7WQqAP3Dx5Af5YuqDuHTFz0vcN/zFhNrjBP8AuZqkhQeSShySDT81RQczj+ynLEVXuUKk6hY4c+8bf+qnh/Pol6LIXG50/wDJ/wD7/hf/AJq30fwGv/lv/wBKYih7emUzP3PVNEEpe5z5u7efdjkJQ/1dLXTB0eqgI5X9z902H7r0a/5dyOR2ew++2x02VeI/VxrqmQ0VLtq76qGANf8A2aUxcmxgPHRTsNUhHxJAHXUc11odJickah/qtkftpj8+obHWklvzlSq0/aSrD36dXtphKqOO5UnauQwtIV30KTp8QNNO+n+vqRLuCrkpzwLTbST4607MOey47SKa6QWD+XSWiydvhtgvad/s06hXG4f8pIa/gP3HG63sl/iEIA4SLy/eGOcb9l6pRc+7y2iLGoTw3mL+mmupbyPCR/qX1zP6PT/L9VSP/wCRyD/bjx3h/NzALj0thjOY/i8H/CuMdXkjCo7gBWlCQAAB/Lx10PXSrbmRkK4/Ouy2ZD4qY5vv3zqNqlyv27JZA2v0fJR1HmWp2E/6vU6oL1muTcz7eTySb748d5/ykWgtbDfAvBpbT+7cfrxat7PuMDce0z262AZ0+r4dwN8k+K/Ux6CvcO3mD1bHR26iHpjb4ifhtIv7gxzl6pbU0vqHvkunJt0uP+K2FLnnH2OKuI+UORZTCktYPguUZKClI9R2TU08uXCjtFW1HrS5jbbTYUUp3rGpA79Ht06kXbtoudwr/gwO47yqkqB3k0A/RhT6Z6Ubfup9v2NFr83eQxE50CvIoYnuVSSachlXHLJ7X/aVz37pV8hW3Cwr9/G0Sjl5bZ2mQyKFbKMrcuU1jTEpmPIckyJpoJSlIJTqGie/XIXTewbx1BcyDaXVJ0pqZnKV11pmASa0Ncfp1171x0r0PZQf5mieS0nLaESJZQPKC1JViAANage3CV7hPbFzbwREx+85YVCmMX8uZTV02HfSb1bEivaTMMSS5JYZXGS6iQtbSQSFbXD207z+q+jeoumo4bvfGSRJWKKyyGShUVoSQKVqSPYcDfTr1P6I66mudu6TSSGS3VZHR4VhBDHTqUKSGIIAY5UquLoLfN057+3taZUXESJdt7d7VizeaKS2q5qcbk0l6oBOoRpcV742dygjaTqOuk5txO7+ksu4MQZH2d9RH7axFH/21OXLhjhm12D/AC3/ADHQbOqlYYupYygPHypJ1li/+ZuufPjgA/sacfY1ynz3mOB5jf8A9sY1ft4Wxa3O9DRjtNvZQ4hlD7ujTDklQ2JUrsCeqm9Idwutq27fNxsojPdxQQMqD8RrL2ZmnHLHQ38zu3Wm8bn0ptO4XIs7C4vblJJyKiJStvVjXLuqchWpywLv3FuIqH9v390TLY3H1pGynAajMMR5bxWQJDNj+pY3mUSLZ5dQTSwpMYrYvHLmrIac2+i2lSSgnahFk3S/2jq+HqW5ga1mlnFyUAI8Luwk0hswHPmADkDQUplbm37Ns3VHplc9EWF9HudtbWrWAnqpBkhjQwFilVZowYS5HxMCTWuckP3AsieyX2wW1rGmMu0s64wmbXtQWwzXrjSrOO/FfZaSlJ2uMuBQKhroeug/Vprd+g5JYDqV5YWDdoLAg+/HIP8ALvHdQerkNtdDRLFBdIy/ssqMCKDmCCMDD9u2gr8p9tPJGN28cSqnIORcupLSOoBQegWuB4RCltEKCh87D6h4dugHo/aw33RN5ZXI1W817MjDtVoIAfsOHD+ZC/utq9Uds3SybReW22W8iHsdLu6ZT7iBinrPMPs+P81yrCLhKxY4rfWdHJWpksCQa+U4w1NaaK3NI85lKXmiFKCm3EkEggnnbddum2ncp9suP8aCVkJpSuk0DAZ5MKEZnIjPHZ/T+823UOyWm+WdPl7u3SUCtdOtQSpNBmhqrZChBBAOWOie6lOD9uSPDVtS457SKN91COwbit8XQREbPf8AFIXufX5lRT5addK3Kj/4ZBv/AKzJ9fy61/Vjh6xJPro45/5nk+r5x/ocRk/ZQfWxyHzktslK04fiLiCnx3NXdorT49x1VvphT5i9rw8uP+82L59ejSy2z/r5f7qYCnLUoo/d4p5rZO5Hux4RkoI8QUZJgLqQPhtI6B7koPXoXl89D96YadjkI9H2l/ENpuT9Sy4mb+9lxIu9puL/AHCV0X1JNLIe40y99mMFumqslSr3Epct9CgtmLXWjdixqtKkqdsGk7knQLP+oe10gh3OMZofLfLkc1JPYDUe1hwwneivUOq7utgmbJ1E0YJ/EtFkAHMspU5ckOR5RX/ZhSpfujzpKRr/APQFyjXT4f8AULi3oT6cPo3yVv8AyRv+JFhm9bF19LWy/wD1xj/4M+B9+7bXSYHvMyZx9ottWWEYDOhL3IIfjJpE16nUpSpSkATIDqNFaK1RrpoQTB681HqF2PONKfVT7xgr6SFf8mxIpzWeUHuOqv3EHHT7yHkWOw+F82zc39UnDnuOL+9ayVc1lNQumlY5KlRbFE4qDK4smO8hTatfn3ADUkDq3bu5g+QknLAQ+Ux1VyppJrjnXbrC6G7Q2mhjc/MKumh1agwBFONa45z/ANk6E/J93GSSUsLciwuE8uXMf2n0o4kZVgkdj1FjQBTrywEgfMe500BIqToUN/GHYA0Fu1f9ZMdF+qhX/LkaEjUbtKe5JPu7cddWG4o7bWCQ0jczuTrp33HXXalaexB+0dWZd3KxR1OKP2+0aaULgTfuz4JQr/bO9zEW4rmJi67F8YvoHqb0Lh3FPnuKTK2bFcZWhxt2O8jQ6Ha42pTawpta0muN7nNzbyaswB+kYujpa0+Tu4dOTE0PsIOWIm/s1WNXin7cnHKamIzCn5PmvKd/kMhhAS5Z2zea2WOR5kopAU4+iix+FGCjqfSjoHgB1hsMANmr9pP3kfoxJ6ouWXcnirkFX7QDifynpMx1b8kFzcTpqSSka+CVDXT+PR1yiiimhwvQIx8bCtfpxxSz++7WJR7UeJbMbif/AGoKuh906bQvjvP3dNR8pJ9H7+lfqB9UCKf2/wBBw6dLoFu37fL4c/iX7MWF/scRKev/AG4uHLyXYIrFLvuW1Tp8muMlmI0zylljYW2+4tDDS0MtblKOpA+7ofa52q+c4S2FeRqczz4U7vrxC6ha6l3OW2toTM5C0AkC/hB8QBDH7sc+ljkGP86/vv1OTcYZHKvscke8TD7aqySClEcXNfxfNppF7KiLjvtNuUtinDZaUPNrIfgrDiQrcEkZcSKbhni+GuWQHDuGQ92Gywt5I9mjt7xFWXy6MoJcAmppVqk0rz592LZP/bilCx7QeFVuhAWfclEbGxO0KSjjLPVA6eGoDgBI8fPv1pEmuMV41P2Y2wppvyVFE8kfeP1YHn7fMays/wBlvk2PWJUuUJHNNRHiIdabM2RbQsqAU648UtstwIjr8hKlEAutJGuvVZ9Zb29sWt1ySOeGuf7VCxPYEiMsleZjAGeLQ6X2iG5lt7hl1StDcU7uSgdpkkEcfYA5rQVxV3+xy/Tq93mY01tZxK53JOBMsqKhEl5DTtlYx8/4vv3q+ClZHryxTUkuRsHcMsOK8Enob61WjXnSCR0Pki9jLkfhXRIKn3kL7SMbvTO4Ft1G0lfzPlnCjtbUhoPYAT7sTt/fxn49A4m4FxZqbBbvXeQby4jVvrtCyl0kLGnoMq1ETX1xCYnT2Wi4RtLjmgOuuiN6HWSwbxevbI3yi2yrrpkWZwQK9tFY9vPhiwfWTfF3fZ7IXdP4qJzWh4oEIrTjkSO6pxML9oHHpbXsL4elS4zrH6ha8kzYaXW9pfiL5KylhqShKu/pOrjq2k6bkjcNUkE196w6JOv71lNSEgBHf5MeXtz92DPpwJI+k7RWBGt5CK8wZXzHdi0rE51rgy7SPFD0aJOlxpX1kQD6hDCisSK5SVED6ZUptSVhBCilSD5dVttG+XPTaSsR/wAus2qudSrChj7NOoVyzNRyw8bnt0e8unm0NxGrIVPCoOT17acCcqg9uORb3Uzv1f8AfGhy928yPdh7ZGiobxqWVcSRSPnUVggtaePj59dvdD7oN39Prfc1AUSW82Q4DTJKuXP8OOWOqrM2PV01qa1WWLj+8iN+nF3P7weQUdX7YvcdaWr7MY5H+l4zQqcWtl6yuH8womGocVlzamW6GGHnVNt6qaYYccV8qFHrkvpS4m3n+YS3a3t5HRbmVvMBcKqwwsHc5aKAnyxT8bKtanHRnUSR7X6PzRTyBSbeNQp0klpHXSozrWniP7oJpQYrt/ZSSxx97Lf3Sec7R1xFZivHmI2EVoMhH1Nlx1ifKWRIgRZRfSlybaTcpgsIbUG0ocUglZCzs6u642qPqaROnW06Whq9SfgklUZ0zA/Kb29mWfP/AEfuL9P/APr9a+CUhcvxJExFK5E1kHs7c8qePaL7P/cL7wMoySs4DEBd9jS6Ju4nWuSSceUHctftG65pFgzHkOuuSjTSXHASPkaUo66dEuq+rOnumpLLbd5ieZ752WGNI1kH5WjUSpIAC+YufIHA/pzp/e97S63Da5ViFogaV2kZDR9VACASS2hsueCn74v25/dr7KaXAsu9x7+O21XmtjaYrjtpj+aTsy+glUzCbhVXOdnQIS65qU3PfejISVJcU2+rQEEqM7Puu13uq325fLCCpXSFGeVQBl7fdgXudhuFtpuL5tZc01aixy5EnPHapwvyojnb9qDHOUHH48+wyD2W5HEyGTHKFx38uxri+8xfNilKNyWduWUs1JaJJaKdhJKSekL5VLbqLyADRboUB7C1R9hw5rObjYjOT4jAa+0LQ/aMck/7J9e3ac+e4WC4nUPe0vKQlWhKW3E848AKbWsJ+Yp3gDt31PUz1dRJOiJ43/FNCB3HzBSuNHpdI0XWVvKv4UkPtGgin24Cfv5FrzB72ovEeJb5VnBk4BxBjkSY8pqAcmyeYxNcaa2+sYzC8iy8tPK2bt6FHRQCdYfpVbQbF0B/EJqrA7z3DDmFUlSQOxvLLgVp4q8ScTfU65l3rrg2cNGmRIYVp+JmAYVPaPMCE/u9gGJDZL+zj+4tFwzIJVjkuH32OV3G95yNKx+Ly3f2ibrHcBiVtrKrK+ofqEwrG5r0z4pixVbUh1aQlSSlW0l0z6h9F9QXctnshK3CKZG/LCVAOgmo4kHLtp3YV7vpve4IXe4IKRy+WRrJox1EZdh0Nn2gdoxYz/7bG5/YXr/un4BZt5FfKjtYDzXjKKx+O3ZelCessJz1b0OSl1q0rXhOx1CgUj6dwAgkujR+uHu0KfLatOrxBaVp7DUe33YAQR2b6zchPN00QsSACe0qQeQ+3HV9d1dPcyIuPX7TrjzCQuPYKpnIs+OHCsrcrrKMr6cICxq5HdSpo6+HcdELS9iLBLZ6TcWUqRXtrTIHvGVcQbyyeWI/xCMBOCyI4OfLNjUj9GOMv93/AIKrOVP3eeCODb3I5ESi5F454nx2XkVFHR+pwau3zTkVpyS3EntOR27GOltWqVBbfgruDp1MmK3l6iHIGimntPDA6OKTadvlbJipLDsIoP1Ysf8AaD+1P7fvZ3ySxy9RWea8nZ9WR5EbFrfNf0dMDEROjPwrCwoqqnrITf61MgSnI6pUh18tsLUllLalLUpksdptrd/MqWkplWmXsphI3bf768iEBCpDxIFfFTtJJyr9OGKl8Q/cc4i4K/dh93nug5gxPPOQaWzg5jwhiFVhzGOzHg1jdjhmCs3Mv+4LyijIqpOL8eOoabQVuKE7VYBCiQD3UUO5SysCVqVFKcqD9GGqKwuJ9kt7eMqHoHNa8wWpkDn4vsxIT9iXMaPKsF98PCdEp2NQ1MqLy1gUS5eaatnaK6rshxS2/UIjUiY2iXBg0dGl5Tb76fVeI3kBKlFemr4W5lhFdBzHbzH6sAutdsN2tvcNTzFGk9mRBH2k4qq9oHs3xP3Ie1P3e8hqYsnOUeIaustuOzCnSktyHIFRbZPbVCqlsiJYyL2DSLiNFwFTbjwKNFAdadn2iDcdpu5irG8iAKEHsBJFOBrSmJPUXUN1s2/7fbBkG3TkiQEZ5kKDq4jSTXvpni7f9pDLOL53spqLWriVWK3OC3+W0vKf6NAbYXa2dctFxHyW7cKTKmTJmHToO59anBvYUhspS2G0WD0bdQPsarGqIyMwcjKrDPUe0lStT7uWKf8AUqxu06qczPJIkqIYwTWinIqvIAOGoMuNTxqaR+EPePw1iqffHP5bxDkPI773Zs5NBppOPN488xjUTJ3M9mTX7dyxvKp12ULDK4q0iOkpQIXykagBE27f7OD+INeJIz3wYeELkG11rVhzYcOzFq7z0luV3/B026SFI9rKE6iw1FPLApRWyoh4/tYtL/Z3z+PkPtevMPkQWJlhxvyPdwWW1yl7k0WTxIOSQXVsjb6QduJdikDwV6ZIOuoFhenV2Z9la2LUMMzAexqMD9ZbFOes+3ra9TpfBarc2ykn95CUI7/CEPvwQP3UJ5f9mXJLKa+FFSLnj4qUy2r1UkZ3QaAuKJ0107/Z0V68SnS1wak+OP8A4i4Bek7169tBpA/Lm4D/AMC+DJ+3BHsJns54Cjx9Ag4tP0LbDZX/APPRffic2lRJ16ldJMqdM2bHj5R/vNhf9S9f+fNxC8POHL9xcQq/fMvk43hXt840sJk7bl2XZHnds1G9NxyLW4fXQqCI99O8phLz8tWZSjHSVpbKoy9xHynpP9R9yR7W2278LSl2p2KNI+vWaezPFjehG0SjcNw3k0Lx26xJWtCZG1nMVoB5Sg5VowpzxB/N/eBw/wAo+9v2q8v8Y47meH0+HuYbxplbOZJpYjox57ILCndsIkimu7pp0RscyyUhz1fSJDKE6lJ1SHn6psr7qrb91s43jEOiJtdB4SxWooSMlduNOAw22fp/u20+nm9dP7rLDO9z5txH5es+MIr0OpVObxKRSvEnjx6YbhuLjgVHbhuuWr7exbjgJNfHcB+QKVrpMkIPzEfgT28SeugIXMwqT+WPt/oGOM7hPJNKESfcP14iN7vJElftV9wKAUsNDiTOQUNpCdw/QZgIUT8xB/x6E9VEf5bv/wDzST+4cMXp6n/2dbOSOG4wf8RcVK/s3HZd+4F1YK2WKbjp5SPJbqZmZJYB/is9Vt6PZ3F/Tjoi++TF6fzOkLY7QTwElz/dhwLf3Y8rpbnlnj/H4c9iZd45ilnJyGPHUhxNa7e2jbkCHIcQpQbmFivU4po6LQ042ojRY60esN5bTbraWcLBpYYG1ActTDSD30UkjkCDzxO/lm269t+ndw3K4Qra3N0gjJy1+WhDEV4qC4APAkMORxNv3cQ7TG/2zqvFZUyY5+mYRwFVz2pKyr1ZlXOwuO684guPBuQp+OSrao99e58enLrS3+X9NI4ZM5o4LVSTxJUxg1+rvxWHpddre+u813DlbzXm4yAD4aOJ2FOHAHLIZchhE/ZxoKbIOGeXY06cuvnp5Oq/pXlI3RXG3MXhpLbqh3QoLTrr4aHqD6OTyw7TdlF1J8wK9vwLgz/MzbxT9S7cHbTJ8iadn+K+K5vZ1Daov3BcTp76RGrHYuf8r448qVIZS0m4fxfO6SDAS8lZZckTLl5qOyEqIcdcSlJOo6r/AKJlS19RYXuCEAuLhTX9po5VA9pYgDvOLl9U4JNw9FbpLMGQtZ2bjSK+BZreRmp2BFLHsAJxaD+6DMqKX202tM/NYasLfLsVYro8hxDUme/DnmfKaisKIceMaJGW4vaCEpTqT3GtyerE8S9HOsjKskk8YUVzajajQc6AEnuxzT/LxbXEnqVFJEjvDDazmRgMlBTSCx5VYhRXiTlhk/tGR5DPCudTHGlojv8AK01MdxQ0S8uPieKh4t6/iDanEgnw17a6g6CPRyJv8t3LMDpa8ah7aRx1wxfzLTRnrexiBBddsTUOys09K+36cRiEHtpsDW/ul01gW0Phj3GcnrWy6NUPIXPzRDja9O4DiFkajuNdR1Vu1xeZ6jNEDQncJ/d4pMX51BN5Homs1AdOz2hoefghqMSx/fPkIl577epUd1bkN/Dc3XHbeOsqKf1yl9WLJ/zLaOm1Y7LQQfHXov6qIUvLRWADiN604HNcxhY/l7mE+2bk6n8vz4qV4jwvUH2dvMYnpzTxC77hv2ieLMbiwjNyjjv228Jcl4UG4n1kv9WwXjGjkWMKCyFturmXWJuWMFoIO4uSU6JXoEKKbjsC3nRcDxCs4s4ZVyqaiNSQO8io9pwJ2frhtu9TLq1uTS0bc7i3Yk0ABmZVJPYraSe4cuOOb72aqKfd97VFJUEqT7keDVJUUlQSRyfi5CilPdQB8h3PVTbOofdrVDwNzGPrdcdDdQymDp++mHFLOZvqjY46K/38Y1g17Z+HnpYJZe5zillaTvZWTgObEltzTv28joR5jqw+uo4o9riEfxeePb8D4p70rvri8324MtdHyhI/8ZHgpftI2UqD7EuLQwXNxvuSlNBAJOv/AFCyPd4Dv36bugbdJumoC37Ulf8AxjYq31kvpbbri6VCckhp/wCJTE77TMrqGxFUW5LXqsNJUpQUARsT4HTUHqw7XbYGUVoaDFJbjv10spB1CpP345GvZBIgY/8AuN4b/dl1CxSPGz7mKqs7Ky0eaiTpmG8h1DcBpDSlJlWdjZSUQ4jYVtelvNp10Vr1zz0c3ynqBCJBVxcToBUDxskqKK8B4iAfsx2n6nR/xL0YuTAx8trK0kLBSxEay28rsF4mkasaZd9BXFq/7r3IlJZ+12djUFScaqlZri7eNY1JlN/XXk1meZk+4sCkhVpdqgx3HHSAW4zQ2IAHjcXqlDHZ9HyG6kVr6aaIA8K0bUUQfsqASeZpU45n/l9mm3D1Nh+SidNrtrWdiBU01JoEkrcC7sQBXIV0rzwLP2iKm7k8DZ39PCeECw5hmIRIS2dJj8PEcSHplQ+ZTERb+v8AlLivHVJAgejQWPp25lkyreNSvdHHWn3VwX/meJm64sIIs2G2JqA4is89K/fT39mILe2uC6n90lmA6nV1nnfm5h1Hf8cePyGHB279i2eq02GUL6mGUcPnrk/X5uL96otjL6HJbH4jtViPq+X/AFYl1+9XwdNrv+jHuAjQPTg2bNhxJkM7RxG6dXiZluHoUkAsOvPw5NwlSztc9OM2nVSUpCCvqtbxSXdvuUdNZUxv7iWT73z45D3B/QaWez2+72OavlB1mj/tAJJ3/hjIHDMnInO772K8uMe4P2OYFyJIl/WZExx/a4fmrzidkj+7sLgScetZElI3Nh24RBasBsO30paeyfwJZdl3Ztz2KGdzWQRaG/rINJ+ulffio+sOm06f6yubONQtu1yJE/qSHWo/s1K+1efE88v7G2R/2t7yr+zKPUSrgvOIi2/NSJGT4FuA+0BGv8Oq59O7X5vfmirQ/LsfqZMXl69bgdt6IS5pq/5+Nae2ObBY5z9iOXZF+6N9bV8e5HM4Oz3Parma/wApjY1aPYTCbfaey3NcZuMgTEVRQbG+ympmMNxVPB307FjRGi06n7no25k65jHkyfwiSRZWfSfLAHidS1NILMpFK18Qywl7X6sbfH6NSu11COqILd7VIjIomJJEcUix11lUjdWLAUrG2eWGx+9vORN5C4JIG1bWIZil1Pb5VKuqg6aeXh1t9XFpeWR4gxSf3lxj/LGa7Tuvb8xD/cfEmeRuUEYH+2JSTEPONTbP2z8V4LXhlWx9cnM8MxfFHlsLBRtMWDZvPKOoIQ0op1VoC67neDb/AExjnBId9tgjFO2SNE+wMT7B24rrYdr/AIz68T2pAMce+3czV4Ugnllz9rIF9pFcq0pg9u3tS575yxu7zHiWxqqqprbteM2EiblMzHJEmwjQYFo4w2mJGdMpliNaMqOqvlUvw6pzpvpLqDfrZ73aGVIVk0ElyhLABqZA1oGH146b639Rejukb2LbOpEkluHi81QsSygKWZK+IihJRvqw2ebuFOY/bDnuIS+Rn65/JpyIWX0NvBuHshjvu0Vk200iTNkNR3VyYL8Nrc2ddrSkd9DoI+97LvfSm4wTbiVN2aSIwYuKowpUkA1BAy7KYmdK9U9K+oeyXdvsiuNuUNBKhQRkCVDWigkAMGbPtByxbX+4ff1+T+0emyOsKVV+SZLx5fwFocS4hcO1q7OfFWlxICXEqjyRoodiDr1fnqvdJe9BxXkWcUs0Dqe5lZh9hGORP5e9ul2r1bn26cUuLe2uomHCjI6q2XLMHLCd+1kP/wDX7MdToP8ArHkHh4//ADlce9wfLr56I/8A4KXHb/EJP+DBj5/NQaeoVlXh/Bov/SbvAP8A3aHnnbjgxDrzjqGqrPw0HFFQbBmYkSElRJ0UAOlj11AFztpH7E/3xYff5TGJst8FSQJbX+7cYsm4dwh6T+3lhk5tmQ09Z+3q2TFLKkPsSn3sZtUstuIcKTHU6sga7gkE6+HVgdN3DP6eRW8fic7Yy04GpjYYpvra1SL1ouLyUlYxvqMTxFBMhJI9nZio39qNTSvcjksNTiUybHh/Jo0BjeEvS5LOWYLYOsRka7nnW4EF54pTqQ20tXgk9VH6JTxw9WzCQgF7CRRXmfNhag76KT7AcdH/AM0ttPcendq0Ksyx7vCzU/CvkXSVPYNTKvtIxPH37e7XOPbVbcbUHHKMPmW2RQMntMng5LWzbORCgxJFJFxyQw1Dt6tyK3OfVYpUXAv1Cx8um1WtnepnX259JT2cGz+Q00yyNIJFLEAFAhGl1oCfMGda6cuBxRPoZ6Q7F6i2u5XnUnziW1tJCkLQuqBmYStKCXjepUeUcqU1Z1qMC/3dc13l77A8Bu776eHk/Oa8IYtotQy9BhmO2XczmPMx35D7zddvoI6AC44pQfT4pKiBPX3UVzd+ltpdXlBe7iYahQQKZysQCSdPgA4n4hywf9IeirPbvX3cLHbizbXsguSpchmJyt1DEKAX/NZuAA0ngQBiC3Bnsg90HLfH1NyNxlKrKrE8pkWqa12TmE6gemuUlrMo5ry40WK4lSWp9e62lRUSdnVVdP8AQXVm8bUm67TJHHZSlqAyshOlihJAHaCAe7F+9a+sfpz0tv8AJsHUcM0u526pqIt0lVRIiyKAzMDXSykinPHrj6m5H9jnvk4YncnuwIeQYjyHgWT5DYsWSrOBOxHKpzEbJJjdrIaYU65IoLGcy48tP5chKyd23uIawv8ApXqmCHdCvzCSozMG1Aq58R1Glci1T21wdO8bL6memt7cdOCT5Oa2njjUpoZZYlOhSgJoAwQgA5qRSlcd/eLyzKjNkL3JUhPYHtqfPQfDTq4r1NDHLPHH+zXBkjBHZhwvVMma4PQZK1Ad+3YDv31+PUVZ0iHiOWGEWss7Dy1JOG1YQ34D4akIKFadtR4/HT7up8MiTJqXPA+5ilt5dEgo1MaJX6nbw0H8x5/yH+HWZTGvXry7MNillIXUVpB3awox/wDrSevIKIPZj7dODcOf3j9+FpL6ktlQQBprp2J1Px7DTrxUE51xqEhVa0wLczyN+Iy4Gw+SAdAhKh37jv26N7dZo7DVTCrvO5vGpCkj7MVOc9cKwOTs0s8uvcmzRb0z6QMVAnwnKarRCrolcEVkSbFk/Sok/Seq6AdFuuLV59Ybn6X7X1FfNf3l3dpq00RSpRdKhTpDI1K01HvJwb6Y9e9/6I2ePZds27bpNGusrrIJZNcjONbJIurTq0r2KoHLA5q+IrOvMVuLyDmLEKG60tuv/U6uPCKGlpWWDHjQGkoZcCdqgnb2PRyx6Dl23y1tt43RbeNgRGGjVCAa6SqxgUPA0plhc3f1ft9686S+6a6fe9mRgZmjleUEggOHeViWUmoJrnQ0OCz+mrQPzJ8RI/8AhhWe33DqyBLXgDXFK+UaZsKYpu5o9pHPeQ+9f/qjSYQiw49/6h8WXxyVOU4VG/8AZPSV2G/q839HmZJHvdYRr3/y/pvVc9P5Eq3J15z33oTqy/8AUf8AzDa2mrafnoJNfmwjwR+XrOkyB8tLZaammQNRXtTpD1Z9Pdq9Ef8AJ9/uOjqH+F3kPleRct+ZKZ/LXzFhMXi1rnr0iviIoaXTRGYjY+aTIWPD5WUJB7/Ent10RIzHIgfpxxhGoXiTTETvfJxhl/MHAF5hHGlC/keUzL7GZsWvctaapSuLX2jcma79Xdz6yuT6TCSdqngpXgkE9uq/9Rtl3Tf+lZdt2qMS3rSxkLqVahWBObsq5DPj7MXB6JdT7D0j6gQb31BObfbEgmUvokkoXjIUaYld8yaVC0HOmAP7Zfb17gOMvZXzvx1L47VF5Wy67zhzDaZGUYe+7Ij5Ng2HYvFsWbeNkD2PQ1xZMSW4BIlNLBj67fmRvUOk+neptg9P9y2iaAx71PJMYoxJEaiSGKMNqDlBQhsmYHw8MxWx/UTrbobq31j2PqG2uxL0taQ2wnlMNwAphubiZk8tohK1VZBVUI8fHJqan7dftR5j9v8Afcn5Jy/hDGL2V1VY5R40y9dYxkLkmEmZZWF86XcaubhmI2h5iCEhxaVOEkgaI1OHpN0dvHTs97eb5D5EkiRpGNcbkirM+aM1MwlASCc+zGz+Yj1N6Z61tNs2zpS5N1BDJNLMTFNGFaiLEAJY4yxIMtSAQuWeeJk+7bjO85o9uvJXHdBWMz8ltayum43Abcr69Um7orutvIMZE+ykRIMT6xdeWCt51tAQ4dVAHp8652SXfumLvbbRS926KYxUCroyuBViFFaUqSBQ8sVL6UdUWvSPXm3b5uL+Vt8UjLK2lmpHJG8bNpQMx0h9VFBNQKDDc/aMwDnH278a+5zhXn/i2G3xRzRSqqLTHrLJMVv4OV1eW43a4lmtY4MWyG3fqX01LEL03lJbUorJSrc0Oqg6a6B3L+D3Oy9UW7QRGZJYmEkbHUVKv8DtTJU40rXux0J6i+rexHqfb+rfT2/W4v47aS3nVoZkXy9avGD5sceoFmk+EkqQO3FW/JH7ZXu44Y5FnW3t0kW2X46qXMGK5Zh+cVuGZ1X08hwGPAyFqVa4u+mzQgBDy65b8V4oDmjW70kKW4+nPVO0XjSbNWaCp0ujiNwOxgWU17dJYGlcuAs/ZPW/0/6i2tYeqdFteBR5kcsLSwlxxMZCSZcwHCsK08VNRPPsr/au5Py3mOl5R94CWarF6LII+SWeDW9/EyzMeSLSI8JcaLkc6vsLSDXY9KmNpVOVIlOzZTSVMBlAd9duTsvp1vMtyNw38aIlbVoLB3c8tRBYAV41JJ4UFa4FdY+ufTFptrbR0axmuHTQJVQxRQqRQ6AyqxelQtFCqfFqNNJvg/cz40zT3G+yzkTifhvH2ss5DyK44+l1NH+t47Q/VR6TOqC5syLXJ7Wlp2fpq6E65o5JQXNu1AUohJZOp9o3G+2qW1tELzErQVA4MCTViBwHbis/Tvq7YNm6nt9y3WUQ2qpJqfSz0rGygAIrMQSQMh7csQo/aN4c5h9tvEvIuGcs48zh1zdcrTLmBWHKMWuhKhNY3j1K/LRKxi6uoIR9dWOt6LdSpQb3AFBSoh+ntm3Dadtkt9ziMcxmLAVVsiiitULAZgjPPFi9X9T7F1FvkG57HKk0HyqprZHjNRJI1B5qIT8QOQoe3LEZf3K/2zPe97nfdVkvLnCHCic3wC1xDBK+LkA5M4hx1Ls6nx6NBsmTW5byBRXLQiyWyjeuOlC9NUlQ79I+/wBjdXG5PLEtY6AcQOAzyJB+zFtdI7tYW2yRwzPplDNUBWPE1GagjP24tu/ef9pXuA96nB/G3GHtfwRPJmW4py/EzS6ozmeB4d9HjjWFZhSuWTczP8oxWrl+lY27DZaYecfAd3bNgUpOO8u00CoMzrr9hHHG7pyL5a5d5PhMdPtH6sGX9sb2V8scDexfF+Fee8CZ475xpL7ki4j1KcjxLJ/qajI8lk2NRFmXGE3uSY/LROZ1LeyUt2OtQS4EnVIYek92k2yBIpCflvMOscaZ/EOzv7R34S/Unoiz6uElxaIq76sNIn4eYKf4T9tSCFJzUkcq450eC/YX7qONffu7yxnnFa8d44j8h82WMi/XmfHdi7HhZJQcgV9C45RVWWz8hSuZOuIrZb+k9Rn1dXEoCVlJzpHpzfLTrWPe5oabd5s769cZ8MiSBTQOWzLDllXOgrivfUn1B6P3D0on6RtbsnqAW9pF5JhnUh4JoDKpZohGCgjetWoaUBJIrNb3x8B3PPXAORYBQ1SLHOaSyrMswNl2RDhCReVTrkeTBTNsJUGDH/VaGwmRkqeebZS46hazontbHXPT8nU/TcttaKGv0ZZIhUCrrkVqSANSFlzIFSCeGOefSLreDoPri33DcXZNmlR4bhgGakbiobSoZjokVHIVSxCkDjiOn7YXtt5n9vrvNTfMOE/2l/eSuNUY8TkOKX/136Cc+Fsf/mYvbr6T6T9bi/8AH9L1PU+TdtXtWvS7pbfenBfneYfI87ydHjjfVo87V/hu9Kal40rXKtDh+/mD9QukeuG2delrr5v5Y3Xm/lTRaPM+W0f40cddXlv8Naac6VFYQ8lftl+/HJ+ZOTc0wThuTJq73kjOb7HLqFylxLUyZNTc5JazK+a03K5CgWUIy6+WlRQ6hp1AWUrSk6jqrt56J6tO83V9b2xEclzK6MJYgdLOxB/xARUHgaHkcX30v6v+mMfS237TfX6meGxt4pENtcsA8cSKyn8gq1GHEEg0qCcI6P2z/wB0FDujfGWWoeR3GznjipK09vEKHKoOuh8uoP8AlLrkrUxy6T/4eP8A/K4Jj1U9Gw9Bc24cf+RXH/5tjqi9pfEOZcW+2vhnDOVK1dTn+NcfUMXK6+Xa194+zdKjqelpduaifaVlm76rhJeZkvIWe4Uerr6fe7h2W1tLioukiCvmGowyNSCQfrOOO+vf4de9XbjuVgVO2zXDSREKUBR6FSFIUgdxAp2Yf+Svf+yy7gwYockTKuyiMnclAW8/EeaaQlayltJUtYGpIA8z03i1ee3Kfj0mg76ZYqlr+K2u0kpSMSKSe4EEn6scfVJ+1t+4rWPKsMf4YsaqT6S2FTKvmHh+tkFhZQpbXrxuS2HVNLUhJKddCQPh1zQvp51zbvqS0ZHpxE8INPdL3Y/Q2b+YH0Svl8i53RJYwa6Xsr1hXto1qRXPjheX+3N+58k6Ocf5l/HnnjFQ+Hj/ANVCOth6I9QFFTDL/wDREX/5bGlfWf0IY0W7tf8A6Auf/wA1xZZ+6F7RfcR7hqf2uRuIuPEZbI46wPIaXMtcwwWiXV2thGwdEaOtWT5RTJsVOOU8kb4pfbHp91Dcnc7dddL77vS2Q22AytDEwfxxrpJCU+NlrXSeFcVJ6KeovRnSDby/UF4LaO7uY3hPlTvrUGYk/lRvpprXJtJzyGRpWZX/ALbv7mbEZiFW8c5RFiRmkMRYkbnfiuLHjsNpCW2WI6OU2m2Wm0jRKUgADwHSIOieuo10rBIFHITxUH1S4utvWH0YncyPeQNIxqSbK5JJ5kk21ScSj4w9i/7h+N+3n3V4Hf8AGFzPyrl2l4pxfFo9nzJxNaaVVRnasmzV1qXK5Kkwa5DtVWsxnQtxpx9MnRAXtVtI2vTXWMO2XlrLDIZZ1jUAyxmoDamz8wgZAA8K1wtbp6jekt11HtG5295CtrZSTyOy21wlGaIJFUCAFjqJIoDSmdOdnf7T/tQ5W9r/AANyhR81YzEw3NOQuR02LlO3d4zkjisTo8bq4VFIkW2KW93WbnrWdZkR/XUttACiAXNOnXoHY73YrSRtwj8u9eaoFVbwhQBmpI46ufDFQeuHW2z9a7pbr0/P5+zw2tC+h0rIzsXAWRUagUJnShNacK4HP7oPtlyPmrhaLScYUjd/m2P5zS5HWVbdlS06pUMQ7antkGffWFXXNobiW3rFKnkqWplISCdAWjrzZtw6o6cW12+Pzb6OdJFGpVrkysKuVUCjVzOdMsJ/on1fs3QPWz3+9z+RtE9nJDI+iSSh1JIh0xq7k6kC5KaBjXLPEMuFeEudKb2VcpcG5fg5qs7FdyNV4fTqyXFJzVlXZHVJsIRNtBv5dLWLdyGwmNFMiQylICVqICiesum9i6jtPTW96b3C38vcWS4SFPMjbWJFqviVyq1kZgQxHAHngl1x1f0RuPrntXW+y3vnbGktnJdS+TOnltBJpfwPEsjgQpG1UVialRmKYYfsD4M5g4FsOUp3JOLHFm8hi4jDpXUZBjVwqZLrpN+uUlCsdurVccRkTm1EubArcNpOh0w9IukOoemJ75t+t/IWdItH5kT10mTV/hu1KahxpxyxI/mR9RujOvLTaIulLz5traS4Mo8meLSJBCE/xoo9VSrfDWlM6ZY8e/D2953zi7hWRcc0hyXK6CRb0t1Hdt6WsfkUc4MzoMtc7IbOtZdTXT47qA2lxSyZZIToFHrb6udFbp1KtnfbHCJbyLUjrqjTwNRgauyDwsCKAk+OtOONX8uPqf0/0Idy2nqi4+W2qfy5Y20SyASrVGXTEkhBdCp1EAfl0rUgY1cg4Z52zH2Q1HEk3CyOT6Gwo6pGPnJMU2y8ex263VU5m1VkC6Rv0KINNKbVKDilskhACkp6h7h0/wBVbj6XxdPXFt/68haNAnmRZxxv4Dr8wplHpFNdarkBkME9o6y9Ptm9erjrKzvv/sVuUllMvk3Hhmmi/MXy/JEprMWYER0owqTQnBz9gvDOfcRcP5HjPJOOnHcjsOS7a7g1/wCqUlwqRUy8Zw+tivh+hsrSIhT0+skIDanEujZqUhKkkkfTPY936b2Cax3mLybhrt5ANaP4THEoasbMOKsKVrlwzGA3rl1X051v1hbbr0zcfM2SbbHEW8uWOkiz3DldMqI3wyKagUzpWoIEbffT7JeVuQeYU53w5iDeTtW+KwXM8Qi+xaiFZkdKTWh7XI72oEpuXSJhtD0ErPrx3N3dY1RvUXoXed43wbtsUAlSaMCTxxoQ6+EHxutdSBeANNJJ44tj0Y9Wemem+lG6e6ruzbyW0zGE+XNIDHJ4yo8qN6aJC58RFQ4A4HE3lcW5zN9mzvFrFOXuQl+3irwQUBsapB/uiLgMKjVUC0cnopAGp7Po+v8AU/THbuDmw7uny7269PQi7II//WQ2xYtGpf8AEEIUrqrp+IEV1aedaZ4qex3ra19WH6mM3/qU77JceZof/Ba5aQPo0+Z8BB06dXLTXLA9/as9rfOXAebcpWvL2Df2nXZPjmOV9LIOTYfffWyodnYPy2vSxnILl+N6TL6DudS2lWugJII6rPorp/eNimuW3SHylkRQvjRqkE1+BmpSvPF1eq/WXTPVlnYpsNz57RSuzflypQMq0P5iJWtDwr34G/Ifsg9z15+4jC53quMhL4mh+4Hi3NZOVf3px7H9PFsdu8RkXNp+hScsZyVQhsVb59EQzIXs0Q2oqSCHu+nd5m6uXc44a2Xzcb6tafCpUk0LasqHKlTyGGPbet+l7b02bYJrrTup26eLy/LlPjdZAq6hHozLDPVQVzIzxb17l+IY3OvBXJ3FC22VyctxmW1ROv6enDymuU3b4pNWora2txMhgRlr+ZOrYUCdCerE3nbhum2TWLUrJGdPcwzU+5gMUt0tvDdP7/a7uldEUo1U5o3hkHvQsOeeKyP2qPZJ7lOAPcJlubczca/2hilrw7kGLV1p/eOAZB9RkE3NePbaJA+hxbKryya9WupZbnqrZSyn0tpWFKQlVcdJ7Hu+z7jJcX8XlxGBlB1I2ZdDTwsx4A8qYvH1F6s6c6k2SGy2i4864W7VyPLlXwiOVSauijiwFK1z4ccTd/cT/bja93lFSZhg1rU4xzNhNbKrKuXbtLRTZljinX5zOLXU6K27LrXINk+69AlBt5tlcl9DjZS96jRHqXZF3pVmhIW9QUBPBhx0k8qHMHlU5Z5Buhuqm6YZ7a5DSbZKwJA4o3Auo4GoADDKoAocqGkeu/a9/dAu4bPFg47y1vA27HQV87mLDhxrE/5lEo3H6WjOX4RY9VCX9Woa5JWBo2XRt6RDsfUNPk2VxBXh5g0dtaaqd/CvdXFuL1X0br/iKPGbvT8QhbzeFKaigPd8VO+mOif9t/8AbVrvZ5hFw5klpBy3mbPkV398XlWmT+g0kCu9ZyBiuLKmsxpj1bEkS3HZEx1mO7PeKCppCGWkJcdl29NjgZmIa6kpqI4ADgo7h20FTxpQAVz1LvcnVF2gRSlhFXQp+Kp4u1K5mgAFSFHtJNzPH2DNUoS/t0CT8qT4FXiVd/j5dYbhuDTZDGzZ9oSI+aw9mIufubYJl3MPsw554h42pkZDyBm+KQKzGaE2lPS/qUxjJqKweZ/VMgsKqnibIcJxe6RIaQdugJUQCBngluraRIhVyMhlnn35YbbS5gsb6KWc6YgczQmgoeQqcQX/AG4+B+U+C/aZxjxLy1irmM55TW2dSbHG2LahyRxlu8zm/t6tabHFbW8p5Ik101pzRqQso37V7VApE3a4pbTbxHMNMilq5jKpJ4g/pxB3y4tbzc3uYWBtyFzzHBQDxp92LV8c4hkrZZlXMSWGiAr6GEGlvhQGobmOeoDFCgCToDoB36izXskjFLVS7DnyH68RHubWAKskiRA/tVqf6q0z9/1YrA/ek9pfM/up9unFfGntnwGRnWUYpzNDyy+x4ZHgWLxaujRg+Y071gLnLMixutnrRY2sZoNIecfAcKtu0KIXr4M4ozM9xXPsGR4Z0w0dPy/KzNc3AjS1eOitVi7mopqUioyqeFM8c61V+zr+68YTWLf9Hriixh8SI78N/nziD+24zEsuOS/qamn5SsFuR5Li1eolqK6VqWSUnUnoaUkAoeHtw1rf7eza0YFzz0mpp30xd9+1n+0BZ+0LNnufOe8gxPJ+XYtVOqMHx3FFS7Kg49YuYzsG6u3b20gVkizyuwqZDkAfTx0RokV+QlLsn1wpqPLIsSlnNFHPG8SG5osQND9ZwRP3tuCOZfd1xTwvw37dcJTn2XY7yI9m1vUDJ8MxYtVFdhmQVEmf9dmeQ45WvOIm2qUBlt5b6tdQghJIQ7jrzYts3VbTcJxHE6kKdLvVy6KoIRWpXxeJqKKZkYboOltxn257+CMswlij4qtAwc8WIHEqPfU5Y1PYF7fuUvbt+3jbcX82UCcJy1PId/Iuse/W8dvyzVTblqwbkqtMXtrynkfVVyvQS23IWpKpuigCSOqn9UtwsL/ZtyvreUtZNNDGxAZCFCxibJgrE6VMS5ZmauYOLZ6C2y/2vqXbdr3CHTexQzPQlXzLEx0KFlzJEhNeERHHFNvuT/Z95wazG45d9nMGPkWJyr9V1AwOFkNZh+a8fzZD4loaxubbWFPV2dHDlBaoSmZbE6M2ENBp0o9ZbV0n6tbFuu3mz38GMxoql2UypKrVWrABmqaeKqkGtSRUgBetfTTcNm3VZ9hIeOYeYEBEbRGtSqkkAgcVoQQMqGlS0uKf2ePfd7gORK619zMq048xllyMxkGa8h57W8g51MpI7jr7lditbXX+USn5hceUGjYPwobRdW4C6pJaXN3n1T6N6b29otgVJboqSkccZiiDEUBclUoMs9IZjQDKtQv7f0N1FvF4H3dmjhBAZ3cSOR2KAWz/AKxAFa58D1mcU8bYtxPx5hvGGCVQqsRwChrcYx+Itan3Wa+qjIjtOypDmrsywkKSXZD69XH31rcWSpRPXIe4b5d7xfTbhfHXfTSlnJyGomuQ5AcABkBkMsdVbJa2VrtkNiiqILdAiDiQoGWfM9p5mpOeHZe0v6n+nqace9Vt9bqUN6AvLiqZeW2EEpSQ42k66+B79KXUsb7naBLV2M6tqCj8RWlaVIGQqan24+w3kEFxLJGo0OKMTy1VAIOfPP245W/ez+2H+4Dyf7ruWvcLwvw7PsMImZHil9iPIEDlniTErFE+kxXHIn6lVxrvkaiymusqu+pnvTWI7b7bjSXEdlNrV176Tde9JdPekNk/UF4IY7KKVbgtHKwVjNKxWqxsJDpIqI9eZC8TTHOHX3TG/wC7dfXR2yDzGuHQxUeMEgRxrXNwVFeBbTwqOBwFq39qr9zXnm9x6t5vsrGoqkylri33LvNMbkZuqiSltuWUyprsbyXPZglvoCiGj9L67qQFuISd4yuPX/0e2OBZOmw1zcXKsyLbWjQ6jUnxtLHAFq2bEhmzqFY5Y1p6YeoG6MRvLiKCEgEzXAl0jIeFUeStBwHhGVKgZ4uK5J9i/KXC/wC3XyD7LvaHRTuSM75Wscars3snrbDcDlZbP/uvHb7L8ilScxySopIcadU4q3VsxBNdebgKaaCnVJW6aw9PPVuTfPV263bqSf5bbrmEwxRmrJH5RPkxJpUliS0jsxChnYmi6guHvqroeHb/AE8h27ZYvOvIZBK7igZ9Y/NkapAFAEUKCSFAGdCcPv8AZO9lHM3tJRc1XPOFHBeT8m5Mm3IoDc4xkiWMRxvEK5ijnv3OJXF9TPqm3NjYpSyiSotoSFHQuaBm6+6i2/qD1c2m32xpJPk7RzUo6KrEsznxqusMoRQR4a140wP6S2q42f033F75URrqdKUZWYqNIUHSTpIOs0OdKZCuJjfvV+zflP3m+0qqwjhLEP715VxDlnEs4xegTfYvjLk6ImuyHFcjiG5zC6oKJhlisyVUpSHJSFOKjICApWiTZfTG7w7ZunnXTaLOSNlJoTQ1BoQoJ4inDgcIm+2Et7YeXANU6OGAqBUZgnMgcDX2jDL/AGx/a97s+Jv23eVfazzpxQ9hXJ8Rnn7GuL8c/vjj7JWcjo+QMLl29GoXWL5fe49SuyOQchs4q25UqMhtKUPrIQ4VA3cXVluHVEF1YsZIWKFjpZaMtQRRgDwAz92IFtHc2uwS29yoVwG0ioNVahrkTzJyxU9+2/8Atje+b2Wcn808le5PhZri/Dr3273OFU147ydxDlqZeWT+WeIcmraoQMBz7KrVhT1XiE9/1nY6IyUxilTgWttKxXrTdJJ0RLb27gXHnwNQ1GQkrxpTivDsGCHpbbSL1dBLKrGHTIMqH8PMDOmdK9pGBr7R/wBuv3ZZp+4d/wC1P8s8aQca4ZreVOVOQrfK5ef8XXq4UhWOZXf8f1UXGsfzK2yJ942MqrQhSYmyMyhS1qCmikytu6k6Y3Hop9n2a6SSSCwVHokgAPlgsfEig1Gpsu/sOMbrZN/s+sU3HdLd0E14XXxIajzCF+FjShoufDLtGOn+snUlLyRxJiNnYS3YOY43kmFro269h2rlIyPGpElbFhKV+dGSqZWtJbUk7XHHFIOu5OnOnpMlps+7WrNPI0t/5sXw+Fx5dVqeWkqApHHMHiMP5snv+lN03URxlo71SHZjrAifxaV4HUJKPXMALTniiX9n79u/3yexj9xuNyplnCbkL212tbzBxbfchf8AVHh6xmIwWzjTLjBcgXiNTyHKy6WLTKcTow6wa/1WESFOLbSprQdp2e92DxLM0gFVzFDkaZjh2+7sxQ9ztF5HK0Rj1AE0zGY5c8dbmR30OZYPuRIaFRyneha/lKjtOq0JSRt3FXh59RLrcozdeZajhTxZj34l222SC10T0FWPhyIAPLHOH7t/Zf7peR/3hvat7ssL4zn5jwNxbVcUMZnmjeX8axJ1MMZy/PLS7hNYpeZVX5Zc/p0C7jrSWK6Ql1Lu1JUULCZ9puiSaXumPmA8geHKlMsDrnaXgBis0rHTIM2VeebVP2HuxdFy3xc7eYbkeRcVVdo9nbGPXE2jxhK6uFVZFkLFdKfqqxf1tjV12OyrSybbZ9Rx9mEhS9zqkIClh6tNylii1SDzEpkRxHZnwb78V7f7Fa38pFm3k3AOaNUA9tK5juoKYoR/aj9lPNftAxTmA+7HioYdyvyPyRDnpau7HD8zlW+N0lGzIgWv9xYtc5PSSkyb/IrUFoSi+laVrWgJcQpUrYbT/lZHmAMjP2gmlOJpWmZPHAfq+9kF9DFbl1ijjpwZRUnMCoFcguYqO/G9xf7NvcVx5+7Xzd7hsG4pZf8AbJzlxdleK2eX1mWcc1bVPZ5HxzjN1JjS8NkZLEy94TOW8KaZ3sVykf8AOIkLX6aHViG1pPbbo8yr+Q6nOo5gcq14jsxNTcba82GO2d/+djYGlGNaEjjSnwnt5Uw3f2j/AGJe4n2q8Zc+U/uY4yTgE/McnxGZjsF3K8DzJu3q6+lu4dotxWD5PlESM209LbQpuSppSwv5QoA6GOkhLYiSO4WmthTMGtAewntwueoUkO6NBLZNq8tWByYUqRTiB2csQO4q9o/vE9tLvv14kwbjCVZ8X8tYlnFVwjdQuQuPIxdtV202hwyWmut8xgWtHMkYHk8lyU/KSx6UitaQS6fSUZO3bVvO3Lf29tFW0mjcREOnEmi5FgQdLZk0oVHHLEXeN96c3ltovb2cLuFrNGbhTHIfCAGfNUIYCRBQCtQ5OWeJ5/tv+12fwZ7X6LBuWsNpaHk2zyjMMqyKseexy/kRpku1XU1TEm4pJlxUzVTMWpYDqCzJdDaVJbVtcSpAZemNtk2vaUhu4wtwWYuPCaVNBmKg+EDn3YRevN6i33qCS52+VnshGixt4lBotWFGCkeNm4jM58DXEe/ZP7Vecfbp7o/dW/fYGKrgrku3trfAcph5LhkqNPTT5vay8Or143VXsrJat44zlkzu/CYaZVHWhWhW2Do6Y23cNp3m9aWMLt8zFkYMlMnJUaQdQ8LnkBl7MSeu992jqDpnalhmL7zbKFkQpICNUahzrKhD4414MSag8jizb3r+zHKvc1+3fyxhfA2LRs65zurzjiXQY29k2JYq4/BpuQcbtshcN3mt5jmPxhEoYch3a9LbU5s2thSylJh9c3u4zR/w2NSbd9Jy5kEH7xiZ6S7ds8d0d9upAl7AWUKTTSGUqSe2oOKO+MsP/e79t+H49xDhnFmHVFDhUR2uqoUu+9tOQSY7Ds2VOWl+2OfyzKV9TLWQouE6aDXt0OsI+v7ewjtrSFflUWi1MNacc6uDz54J7zL6L7rus+4bjdStfyP4youtOoADLTEVplyNMSaV7cfeF7hvfX7KeavcFxrXqwHh/jDAf+pF47kvGD1JE5LoqbKM4uHYuH0uWzrGS65yPOgwA7DguR9YzTgUYzQe6lTbNvN5vdneX8VIIoV1nUhAkAZjRQxPxkCoFMqjIVwHg6o6T2bo/ddp2S5Jvbm5k8ldEwYwlkjWrsgA/KDNRmBzIpqNMSb/AHfPZDmvuX4IwFv294HT5FyVi3JkS4FJEnYfikmThVnjd/X30hNtkdlj9aPSt01iwyqQFOoQpSUqKR1v6n2u53SySGyjDXEctfwrkQQRUkDjTngd6bdS7f05vEtzvE7R2M9uV4O9GDKykqgZuGoA059+JU1mO5HM44wix5Bp36LPZWHY1JzankS661eqssepYLmRV71pTyZtTYOw7hTzZfjPOx3Snc2pSCCbE2q6uDbx+cNM5RdQqDRqZioyND2ZHlild9trNL+cWba7ITP5bUIDJqOlqMAwqtDQgEc8RX9xfH2QZxwHzriONV5tbq/4zy3H8brjKh1/6rkl5TzIFLWpmWciFAjqXIc3lbzrbaNgKlAHXqV1BHPfbJcWVouq8mgdVWoHFTzJAFchmQM+7Hzo26s9r6qsd23J/L262u4ndqFqAOOSgsaAFqKCchlnjmhift3++7H3Xmq/i2xqHX/TRITXcrcXwy96e5TSXzD5BQHPT3nQKJ2kn7eqLh6E62jqYLZlrx0zQiv1SjHXd16v+lM9FvL6OTTw1Wty1O2lbc078S29u37THJasvqs29x0/G6vGauY3dSsHgWqcnv8AKpbD31KK+9mxm10cGnlvgKkrRKlvSG97Wxsr9VLX036Y37bgl31CYxao+oxhtbOQa0Y/CFJ+KhYkVGVa4rrrr1+2ePZpts6KWY38kZRZ2XykiUimqNT4y4GSVVFU0arU0mfPvW4+zfmH245pxvx3SJyLLbWzxFytqRZU9R67dVlNTYzSJ15YVla0GYUVa9FvJKtu1OqtAba692a/3rpiaw2uPzLxmjIXUq1CyKTmxVRQA8T7Mc7ekHU20dMdfWm779N5G2xJOGfQ70Lwuq+GNXc1YgZKaVqcs8eP2svbhybwJxnyfUc24unDrO8zSuu6SH+v4vkBnVrFJFhOSS7jF1dMRwJKVI2OrbcOmu3Tv0renexb1sG3z2u5w+VNJPqA1I1RoArVGYDMc88Pfrb1h0t1jvVnfbBdfMW0NoUY+XLHRvMZqUlRCciMwCMRf9/H7Z+SZxyNf80e3AVUudlk42+VccyJkOhkryR7c7ZZDjNrOfi1KlXcsfUSo8p2OpMtxx1Dqw56bYDrH02vr68fd9i0GaQ6niJCkscyyMaLVjmQxHiqQTWgdfTD1z2ra9si6a6t8xbaBdEVwFaQCMZLHIigvRB4VZQ3hAUqNNTC7HP27PfJzLk1TW8nMWGOVcFLcf8AurlHkCFkLFRXKUyl5ior4N9kNtIfMdgemy22zHWpCEuPNDRQU4eget93nRNzDpAmWuaUSaRlXSod2rQcAADQAkYsW69Y/Snpuzkl2Axy3UlT5VtbmEuwqQXdo40Aqc2JZgCSFbgegLgn230PAeAY5xniHrSoVIy4uTKnoQixvbea6qTaXUlxsqZW5PmOEoQlSkstBDSPkQnroTY9usen9oi2qzr5MYzY8WYmrM3eTy4AUAyAxxp1Vv26dY9Rz9RbnT5mciiD4Y0UaURO0KoArxY1Y5scVScJexL3UYz7+a7mDIeJ3qzjVHNed5U5kTuX4A8EY9czcmdrZ4p42VP3y/qG7Bk+kIpfRv8AmQnRWlKbd0pv9r1qd4mgA275yV9fmRHwsX0nSHLZ1GWmo5jHUm8+ofR24elo6YtbstvX8Nt4vL8mcfmIsQZdbRCPIqc9Wk0yJyxJ/wDdX9lXuS9yGXcN2XCHG4zWuxbF8nr72R/duCYwIMyxtKyTDZLeYZPj70r1mI6zuZS4lO3RRBIBmeoWzbpvlzbSbZF5qxo4bxItCSKfEy8acq4G+jXVXT3Sdjewb5ceRJNJGyDy5XqFVgc40elCRxoezFz3tk4ussB9tfAOF5dDjQr/ABrhXi7EMwqlPV9iKnKaHBKKnyOhlSq+TOq5aodnEeaS6w69HkoTubWtJBJ/b2uINst7K5UrPDbxoVqCPCgBzFQRUUNCfbhE3tbK93+93Wxk12dzezyI4BU0eVnFQQGUkEEVAPaAa4538X/af90vFfv7wTN8I4sYtfbtg/ukwHOqTMW8941iiHxbT8lUeUqccxyxzZrL3pGNY+0uO60IKpMl2Kostr3oCquTpXdLbfUmgirYJcqwbUgogcNwLavCMuFTTIY6Kf1C6fvukZLa7uabxLYOjIUkNZTGU+IJo8TZ/FQVzORxbH+8P7aub/dL7fOM8G4HwkZxkOPcwxcst6z+5cQxcQqROF5bUuTkysyyDHoD+k+zYR6TTq3jv3bNqVEMXVW1Xu52scVinmSiSpFVGWlhWrEDiRhJ9P8AqLaNhv5rndpRDbtAVB0u3i1qaURWPAHlTLCh+3dxTnXty9qHHHGnMWPN4vnWO3GdyL3HP1egvXoLFxm97bVzybDGrS5qJrcmtmNOEMyHFtlW1QCgQG/oza7+z2KOzuUKXALmlQRm5IzUkZg9uKr9Veo9n3Pq6bcbCUS2TrGA2llzWNQcnCtkQeI9lRiZ2ZsRXaqH+rRJlFBciR5/6lY1siIpcJaEltNWzJabM+XJ10bQnUAnVWgHTjt4kU/llXfhpBBz/eI4Ac6+7FUbnLbXFRWiE11EEeHtWtNVeVMsc3fv2/bivM+5HyTmv23wq5lWUzBa5FxnOsY9XON4Wh+oZJR3lg/HqnpuQymzMnR5DkYJmuuuNuLS4G2656w9Mtwvrtt32Io91K2qSKoSrkkloyaLmeIYg1q2o1oOg/TL112naNsj6a6vEkdlbqEhuAGlpEAAscygF/AMgyBhpoukaamC0D2I+8zki+poXJ0S0oa2O20hvIeQ84h5GmqrHFtNuprK2DeXtq44phgemyltllwoSlTjadFBbtfTbr7fblI90Dx26ZeZPMJAoyqFVXdiaDgAFqACy8cPO4+uPo/0jYyTbAY5ruWreTa2zQmRs9Jkdo4kAqc2JZgCSqtmMdEPta48x/298f4px5hHru1uPJW9Llzg2qZdXM55Uq3uZwG5KZE+W4opQCUMshDSPkbSB0DtvTtj0/s8ez2lTFGpqx+JmJqzN3k1y4AUUZAY4y37rPdOsup5+pdzoLiZxRB8EaKAqIvcqgAnixqx8ROK5PbV7GvdFC/clic4ZLxkio4tn80cvZg/lRzPjuS2xQ5YxnKqef8AocPLJGQluU5cxwWhDLzQc/MQjarbQlr0rv8AtnVjbtPb029biZg2uM+FtYU6Q5bOo5Vzzpjsi69Q+jt39PYtgtbwNuxsrZDH5UwAdPK1jW0YSi6Wz1UNMiait5P7hftpe9zHs95K4lwevj22ZQIlTk/G0cSqqH9dmeMWcexbYTPt5cKsr3sjrkyq4PvvstMtyypa0pB6J9QWFxum3ywgVuWoyg0GYNQKmgFRUZ044EdLdQ2Wy7rBduwWyWquRU+FhQsQoJNDRqAE5cMVq/tgcM+7D2rcb+4XifmrjdGI49lBg5bhL7Wcce5GpWVTKeTjeUQXo+J5jkDsdVhWQqpbbjrTLaREcCnFFSEpx6G2PebFJ7XcYdFu/iQ6kNWpQjwseIC8QBlxwD9ZOruld4nstw2G787cIiY5QI5VpGG1qavGoNCXyBJzFBxxFL9sL2ee4v2/e42zz3l3jpOK4m5xlktC3ZjMMBv99xYXOLy4UP6HGspubEesxXPK9QshpOzRSgSkHT0R0n1Dsu8Ndblb+VAYGUHXG2ZZCBRHY8AeVMS/WX1H6K6t6Tj2zYLz5i9W9jkKeTOngVJQTWSJFyLLlWprkOOL6cktIlfDbtkOn6hohiU2dAC0v5mVKGviDqnX7urbhLSN5R+H9OOWnhVVEiV188UH/ud8Jcr+4nO+NbnibEE5LEx7Hb6Ddq/uHFaVUOZMsoT8dHo5BeVLsj1WmVHe0laRpoSD1X3qH0tv2+zWj7TB5yRRuGOuNaVK0Hjda8Dwrjon0N686T6RsNwi6huvlpZ5omQeVM9QqsGP5UbgUJGRIJ5Ya3uY4y5kzH2me37hrEcZRMyDGajAWc9rncnxCpTWycPwJulVXLl3F9Ag2KP1mSohURx9BVGCt20pKpfVGwdQ3/R227Ht0Be6ijh85Q8Y0mOELpJZwG8Z/CSPDWtKV09A9XdGbR6l731VvF2IrGea5NsximYuJrkuHCpGzL+WODhTR6UrWh+9leH2fC/BeO4ZlLcGty121yG7yGtj2dVYmNLsLV9qE2ZtVMmQ5Tn6NEilSm3FBKtUanb0ydE7JebD05DZ3yeXelnZxkaFmNM1qp8AXgT2csJPqp1PtvV3Wlxue1y+dtflxRxNRl1KqDUdLhWA8wvSoFRnzwLf3COBOQPcFT8YT+L8dGS5HidhkkKxifrOP0pFLex6t5Ehb+Q2tTHe+mnUyUpQhalj11HbpqQA9SOmdz6it7SXaYvNuoXcMNSL4XCmtXZQaFRShrmcNvoj1vsfRN1uEPUM/wAvYXMcTK3lyyfmRs4pSNHIqshJJFPCM64bXInAXP8Anfsf404kGBrXynjVrSV0/GjlGIBDdLjMi+r6uW3dv5EmhdSujMNXpplFaVLKAn5etu67D1HufprabF8v/wCuIXQGPzIvgjLqp1l9HwaDTVUVIpljX0/1d0VsfrduHVovKdM3MUrLN5M+ckwid18sReYPzfMFSlCADXPBh9gXDfIvDHD+R4vydj/9sXdjyTcZBDgfq1Fc+tUSsWw6uYl/U0Fnaw2/UmVb6PTW4HU+nqUhKkks3pbsW69OdPzWO8ReTdNePIF1I9VMcSg1RmAzVhStcuFCMInr91V091r1hbbr03cfM2Me2xxM3lyx0kWe4crSVEY+GRTUDTnStQaC79xD278w842HFEjizEE5QxjFfmDN25/cOK0hhOWsjHFwElOR3lQuT66K906shwJ2/NpqNQfqv0pv/U09k+yW/nrCsofxxpQsY6f4jrWtDwrSmeGf+Xr1D6P6Etd1i6qvPlXuZLcx/lTyaggmDf4MclKFl+Kla5VocWB8AWuTYd7aeIeKsnp26u8xLBqqgvYDkiDYIYmRm3G5DCpMCRNr5SRu0JbdW2r4np+6Q2SfbOn7O03BdF5HbqrrUNQgZiqkqfaCRin/AFG6ns996y3Lcdnk8zbZrx3ik0supSaq2lwrr7GUEdmKYObf28eZsNz6bmPt1Qu5x1+ydtqGDU5HExrMcMckLcdXBjyJ82oalQYJc9OK/GlKlKaIS43qkuLo/qf0o37bt0e+6V/Ns2csiq4jlirUlQWKgqOClW1UyIyqerugv5g+k982GPafUCsG5rGEkd4mmt7jTQByEWQq7Uq6ugTVmrUOldPiv9u/3G8r57HyH3BKt8fx5E2I/kdpkGUxcpzfJYUZaPVrap6LZ3i478hlHpCVNeQmOlW9DbxT6Zi7J6WdT7zuQuOp2aC0DDWzyCSVwPwrpZqV4amI08QG4YI9V+vfQ3S+xtZ9CIt1uBRhEkULQ28THg76kjrQ+LRGp1EUZkrqEwff17YObeZ2uGcK9v8Axo/ZYNgVJbtOwXMmwfGoVZLeTTVNNVx28oySkkPtVdNT7W1JSptKHiN27UB69Uuluod7jsNu6ftlk2y0jbJZIkUE6FRaO6nwomVARQ4qT0D6/wCjOlZt43jrK/aHfdwmQ1eKeVmAMjyOWiidfHJJU1IJK1pTF/Ptz9gGWcE+xjiO1yW9qWLii4+xyVkuMRn4UxuqvZ8ZuZetRLaC/IgTnI13NeStbS1IeSN4PfrLpXqe2s5LPoswSLdwQKjPy8wDVIKcxqLANz488APUrojc90t9x9WPmIH2e7vmeOMV8zyDJ5cL14BigQtHSqg0PDFDH7m/so5/5x5Nwnk7ibjSbksGPhScNyew/XcUooLEynurGyqHkvZLe031i5kS+Wje16iUpjpCiDpqE9Seid533d4L3YoRM3y+mQeZEhXS5KmkjqTXUeFeGG/0C9WukOjenbzY+rL0WpN75kP5U8uoPGiuKwxSAaSimjUPiNK8ugT20cqZSnjXjhnP65ymzZGFYq1mVSudDtBAypqkhNZBGTY18yfCntNWqHQl5t5xLqdFa9+m99ku5NthkvE8u+8lPMWoOl9I1DUpINGrQgkEZ4qFt9sLbf7pNql83aPmpfIejLrh1t5baWCstUpVSoIOWLGOPrpq2LimWnJKVNBR9FClFOo8ewOnSNutq0AGogUPPFvdMbhHckiheq8hU/6MfMzhs2EjVllSC0nRW5J11TrqD28+studok8RyOJO9QR3UmqMEEDAxVWvJWdja+2h7jz8+jXnKVzIwrfKurZVwzMclOLpqspZjNj6GMVFLSToA0nzIPw68FGkVJ4Y1zuRcPSlAx5Y1MjypyBHc2vpQEpIASlCe4H3eCfPqfaWYkYZE4C7lubRRkVpiFPI/IE2SuQgT39PmTolwpGnj5adWBtm3RxAHSMVTvG6STMQWNMRQt7Z+weWVvOr+Yj5nFEeOvmfPpthhCDhywnzTF2JrnhvtqUlZGp8de5J7HsPE9SKAAVxErnlhUbeSpJBA10/j4fd1gVpnyxuVsqcsL81H/PytvdLbiWgNB2DDbbOmv3o6jqToGJFE1Gnbjz9QBqNv2diPLy06+acZ0yyxsxHdziRpr4a/wAfI/wPXxgAKrjKPJqcsF7H3ov0aUKUhDif6Ton+OvQufVry4YMWwUx0y1Yy2jkV1SUpKFBHdRTppr2Hj1ihcdtcbZFjOeWXHCfFqhMVuQkJQk6716BP8CdD2+zr0kxQUPHGMdssmYFBgnVNS3Dr2m0KKnHUuPvLHcJB+RpI7+OhJ+HQqafU5JwVhtqAaeGE2yfMeE6GvkLQ3KcT3WsHyQfgCfLQda41DuK43yMUQheGB9/ejjKy08v0Hkn8iRu7H4JmK8VfYfLoiLRWFeI+nDAOW9ZPCOP044wzeWrSCr0vWLa0bE7wfl3KOiVt6khSEnuNO3Wl9thpWlRj7FeXDHSDmch7Tjex1xcJ+JZQXS9CCAp8CUmNNbd13OqUp5LkWa0pZJHqBK0+G7TTpAvJVdHiuRpkB40qpHLhmp7aVB7MdI7ft17Y+Td7TI0lg6r4dQDqQACpBBVgCDStCOFaYk7iPME1wiHXTZcVLf5ZC229qiBtP5kR9bSk9vHb3PVR70485tJBHtP3EVx0N06G+XR54yGI7Ap+tTT7MSTwXLrepvIFvtdTPhSW1yWlpUyp9lW36mI824kFr6hlRGvbyPSlPQkq/Ds7MPlvwVrcEjiMuPv/TiVbvIf9yv1djHq3q1UJuS2VKkJfKtXmXI4CkpSNqFaqB8O5HWy0OgnMlTkfp3jGTzKZVJAVgfDUip7vceXHhiB/vTxhWL5hSZ9VtehT8hwHH5SUoKW42T1yWkWbah2CVzmXW3tPNal/Dq9fTrcPndvfb5jWe2ag70Pwn3UI9lMccfzD9OjZeoouobRdNluSEsAMlnSgk9msEN7ScQkZkuWTnqPoAH9J0ABBOo7+Q6tREEYAGOZZJnZvHSuFcQFBIcSlKtNFgdtU6H+RA621HDGk6z48qYMmFZHHr44DjKhK02hWvygadjorUa6j4dBdwtGmOR8GGDbb5YF1MPGcP8AXkEVDRlqcQp8ndpqAe/x8iNehotHLaB8OCEl4unza1fHyyzFFlS1xG//AJGQ7UTAfNsj6mA6T4gek4Wwf+wesrfb/KunrwYax9xH6cadw3Q3O3xDP8smNvZ8SH6jT3YcOI8fnN5UaGyFNIkKCS5qeyToCde+muvXy93gbXG0r50xH2/pubqKdLK3y1mlezvwZ8o9vacPr47zEt2ZuSN6d6htVpr+HUA9L1j1r/EpSjKEpww1b56PS9PQpOkvnFuNOR4/ViOOU136Y262Eu+qexUddEAa6p/n0z29155BqKYSpNqe0OlwdX0ywGPQtZEwhKH9pVolRJOncaaAjQduimuNUqaY1GNhlQ1wQIFc/VMB9SluuugH51FWhHfQa6nXofLIkz6eAGJUccluurizYxvpsZiw8j1dU6gtpJ0IHgrQdh181wxDSaVxtFpczkPRqdmGfcZXPpkuRni4jXU/NqVJPkRqdAO3UmG2ino60OI8z3FtVCSMCCfau3qndXddu5b7rh0bZQPFaz28R4DowkYhHLAzUZScB7J1trQqPV70R9T6rvg5Nd8NXD4hsn8I6IQVBq/xfdiPMAw0x/DzPacCW8KmJsGu845C3+3ZUhz5leXfYnQdEozVGftwOkUlwvZmcIG5SJDrg8Q4r+Ov9Pl1rbMAYzjYhiR24dTLPqxkvNkhWm5OnYg/D+Y6itlgki66EHPDvqZzcCKidLQBKVuaq3AkEoUkFL9g8jwLcfXa38VnXy6gTLrbQp8PP9A9/wB2DVq2hVd/iPDt9vs/Tjenyfpq2LAWk/VWqkWUt3UlQiJUr9OjrPjufWFPKB8wPj1GpqYuOC5D9P6sTlYghO3P9WFKhC21oCx2KhodNUkajT+XQ67IIy7MGLPNgMHjHm3ShCm0abSCD4A7Tr2176dule5IrQ4dLWugUw+0euiYpt1JDbwUhY/pLcgBQIPhqlSu3UbSumo44meY5oGHLGeNXvF70SghSVlK1aAjVJ01HwB61yuKVxJgRie/7MEulqiyErBUoJ0Pft/j8B0HuJAcMdpHppzzw9lMLUwlXorCRodw10/1dD6gNxwcXUUoAcsPSqtmokZG/VK17Els6gkDT8I89dOosig8OGCVu+keLjSmDhgtcmwe+p9JQKyCSpOgCdOyR8dOl/cJ9J0A4cNntDKwcjLB4kw49fXlxRCFpQVfDwGvh8elwyNI9Bww+JCkUVTxGIm5fUWuW3S0qmiqr21hK3VsSZElbZUdPo4zLSvUO0alROgHRe3ljiTRGNc3ZnQf1iOGEvddyEclYxrFaChUDLvLCp/Tzw6qiJS4vGisVbVTdzFsoDrXoWEu2lKQs6rkSXFtt1jTfb5dR8dD1GnZW8VwUkNclWtB9VPtwMWW7uZqt8zawqPjfywB/VWhLV7u7PDidjO2im1lpmlSB/8AK+sdf9R4kaKM2WpfqPkknsnRI16FzkOaKFROxcsu888GrO7MKFNb3Mxz1y0NCOGhfwjDkrK1cdlDAZQ22B8qWwAnT4k/1K1+PfoZKgGQyGD9q8051yksx7fp9WFaSzCYYKnnkIVp31IB8B218+hF3cw2yF5GAphs2+ykmIWMHPAnzCVFVXyUV0kl1KV7lsHVTawklOvhp83VSdX9TlrORLJ8wp4ezFqdN7CIpkkuFqCRkfpwxD3HbSfV8vQrW5WuS3VoqICNVH1Fu2kl6RKC0/8Aibmnu4+Hj1yzd3vlbql5uZYzJJAsefhYs+qQEduliR7PbjocbX830zJb7cFDMZJCOBogCrQ+0DD15RUqTxnlLiQpcSRk8Rv0w2dSlFhDCWUJ11SuXJXGJV4FMdz4Dp49Spw3S24tmlu+4oCOJoHjooIy8bmI17I3zxt2FfM9Q7bVQzCyYjPgTGcz/VUSZci69uNHia1gwFKpXwhlp+NDaS854NzVp9VonUjRO51KVHvtB6qjad+trG9Wwl8KSqIwxGQkorJU8izeAf1sNPUe3zXTG9j8ZRmNO2MeE/YNQ9mDC/HdcmNshKgGVHuo6HXt+LX5Sg6diO2nXzdLh57jTH+E51yIIOdfp7cDLKy1QmWEhkbjwqvcf188OpmnQ42lUZBc1H5uxJPzEaK00Hj5dDbhNSVtKsfxECufP9WJDX6WsZtFPi7fv/ow4qjCmLCfWPWTsivgVspyZKkt+mhwsojOB2M2HSkLclj8tsD+pQPgOpmw9NxbpucLbk8ttt8BaSRwQpKBDVBqyLP8Kj9pgeAwDv8AeprOzlSyRJbqVQiqakAlhRjTkvxN3DH7MEuTGK/F6FuNX49UIQkqQtJE2Y4hL7ywCn85sPOahSu6ikeQ6Aeoe/TbhZR9L9OiK32K00liDVZpDRmoD8SBjXURVyoPADEjp6BbaaXedzZ5d0nOX7iioHsNOQ4VPbhtw8eMeRVtyJTSw1qgGPDLg3AAJG/adp9UH+XSZbWvkXdjFPMpCqf8OIsMiAASBl4gx/owYuL4SwTtErDV+09Ps9mM82sgs2CJXryZCUSWpjYZaSwlKwGXgpKgAoLbXrr0auHsYb0TRTyuFZZFKpoow0sCDSoKn7sR4Zbh7UwlEB0FTU1JGYp7CMFfAbxN3lcNmXRoZsKxmW0qeopcD8ZliWI2wgBxpaTKUCkkhSFI/wAo6vrobraDqfeYra4swm5wRnVJUFWVVfTp5qQZGBBqCpX9nCN1Bsx2zapJYJy1tKQQnAhiV1V5EUUUI4MG7cHt6Osja2lCA5/UED8tfglWp10110V9nV0gsfDRQrHs4Ht/R78V4GUGrVNO/l9OGN+sKYttFmSVkNo9V51IHf1Aw6lxsD/MXuw+8dGNrbyb9JpidCkk/UcqdteGIN0pltGhiAqaAeyoofq4+zEPPeVcC5xKfRiXHbkGVWzJsXVSihpx0MV8Fo66KlCIH33UHRQbKVHsodVn6u7otxYNaa1E5eN2WvBakIo7yNTEcaDVwIrZPpht5ttwW8KMY9Dqp7TSrt7K6VB4VqOIOIr+26DJQ9mdUt95ypcoW3noKWtAufHTMTBsWVE/K79C6/FXr+P1GwewHSr6Txk/xFQSbf5RQy04mrlHHfp1xkfvrhj9RnVTZSqAtx80aNX8JKBlI7NWiQdmljhHz7JZFHy3x9kEB2Ss4szgzao8dTSULQq1iybkyS5+KMmvaQsAEL3bfLUEPDcrtm+bbFZ6xPbRRNQU00+YYylvbGB3g0x82LbWl6Lnt5dIimnvCSa1qxKpp79VQTwoMWfGIqOmTEbIIanSthHmwXFKY7/ApVqOuoaGJGhX9s/VXLFJmQOyytzUfXzx9ZQtbS0rBKkDt9qCobgdPDaBr1PtZXKFW/CPr/0Yjz6VcFfhJ+3GuplQO5IUk/HXtr8e3j26no+deWNLsCtMa64MN+VGlSGwZcRSvQfQ660pO7spK0trQ26g9uxBPbt0bs7x7chojkPwnh7xX7ftwDvtuivEIkBDEfEuTinY3Eezhj05ETNZsYeS1uP2+PuNkoQtiY/JLadVbnQVLciy2E9w4xoT206b7LdoZpAx/Jn5HUaH38KdgOEi/wBmuYYTFcA3lpzBUBlzyp+Jj3gjDSq8Jh16E2GJ2yJte4pQ/SZi3m5LaN2oEOVIQkvhCdNUPbVp81dHvnTKlbgCnJl4HvI5e0Zc8KJ2Uwya9vYt2xPQSL7BXP30OFaxr2JsV2PJZUh4I0Wy+godQdP6kqGo1/ket0MjIwZTl24gXFuHUxyCjcweIxEPkjjxLT65TLeh7qCgOxT5pOnnp037duGtdLYr3d9pEZLqM8BRGNuKcBTq2EqHzk6FKknXVIJ11BHRsXOXbhXeyJNRhy/o0Qsl4bVuO6NSmyBuRII+R7X+lt4f+7dfUnNaZjGmS0UrUZngfb/TjSMbI6Jl1mBazITUkH8qK8pDZbI1OoB76jsepkckUhq6hqdowEurOSIEKzLXLwmlR354El8mW0+tx518vrVuLqlqK1dzqT3OoOnRu3KtlQUwrXUBjFFrkcZKS/lRFJmPvLMdBKW45VoZb6dCG09tQ0g93D/Adz16WFWGkccQRJIp8Rw+mM0uH3DIdWVrUQonRSU9holtAGgCANAB4AdQjbRgaQMhiQJJSNZbxYR7rLXpn5T7H4hoQlWqtT2OiSNVFWvYfE9bobdV8QOIU9y5bS4qMM7ILWtjITBUwCio1dlfKlSH7uQgEtgjsoQW9qP/AEdepMKSMfMr8eQ7lH6+OI9zLHGggplHm3e5HD+yMvdgEX8+tWHXEna8Vlwr+ZJCiSon4Ho7bo+Q5YW7qSM108eOEJFtPnMGHHmDatO1SioEhP2E9+3w6JRqqnUw4YFSO7rpU8sIf6Y5Ee3R5CJMkalaiBtSo6HX5T279Tll1ChyGIBhKmq5thbgS7ptTyCsuFcV0fKvw2lChtT/AA62Vhy7jjAx3Go58sajlvYtqQ86HzsUFEEdiT4+B+HX0rGaqKYz/Mjo5wqO5bAQzvcdUXCg721IOu7boE9x4DrSLZyaDhja91Eq1PHDi41zJLdo42+tmTHcO5EOb+YwkE/+GVHe0e/ik9RdxtvyRpyI5jjiRtN5WfxmqnkeGJuULtBetQ0NqZQ6AFGDNWkoUdB/7CTSCPEdkr/n1X121xbsxNdPaP0jFy7PZ216iKKauw/oOFi9gQYDRSlSosjbqGJKdAvtr+Uo9lj4FJI60Wc7zNnmvdifvO1pYJqXJqcDhp0s1+LPIeT6sOUA3Mi7iWJDW75RqO7bzWu5C+xSeiN2ivH4cnXgeY+nPC3YTlZ/FnG3xLyI/QRyPLBtrMdafbZkRXlSYTwPovKAK21DxjyQOyJDfn5KHcdLU1yUJVxSTn+sd2LAs9tSZRLGxaE8O7uPf9+Fadi5SwSXO5Tu1H9OnfXt5jrTDeVfIYlXu0aISznATucQjG4g3qlMhiisYlnPckIK44TEkIebYWjsXVy1I2BA/Fr37dMlpfyJEYVBLOCAOeYpX3ccVxumxQ3chLsFiWhZqVAzBpTmTwpzrgg+5HleJ7gKTCquNirePRcelKmvSHHWnpRkvQktIhxvRQhLMMNHcQfMadQek9mbpmS4mM7TNOBkeAFa1PaeVcEOteoX65NlE9nBZx2EZRTHxk4LU5DSopULnQnEOLvC4UBh+Y9+ZEjJGrXdJlydCWoqNNTtPi4R4J6eba/eRgi/GfsHb+rFeX21R26Fzmi/aez9eIo5Ql1Vk8t753HllRIHyga7QhIH4W20gBI8gOnK20+VlyxW+4axcHUOOFrGZCIxbUvsEFJXtGpUn4aeZ7dQ7tSa044k2TaKVxJevsWqqsbcblBqbaNBzUqAVCg6fDXVK5Omnx29K88fmtRhVF+0/wBGHuyuTbRgqxEjCvsH9OGlJ5gtqeQ61Clr2o1QjQgtLPxUgnQggda/4VBKNTrniSeoLxDpjY0GGYrN3b6ziqllcOTNltRjJaUQ0sSHUtlTyD22JKtfh263eSkCFUzAGBzzPcv501QeZ5e/EreevbTK4M4ux/kKDmkK/VfOQnZVeVttFKJjKXkqhkKUVhkK7+XiOlPZOrE3vc5tuaBovJrmedDTPsw/9Senc/S2y2W9NdQXMd6AQqZFdS6hz8QAyJ5HFaubcgXT6ks7VmM8FRXS24FhRWfkWSCP+G4B49N8floTTCG1vI+Hlw37fMgzGqc5V5VtLDB+PEyHI9BXRkJRlnIT7K1NuCpS9qmrxxC2ylyetJK9CGhqNehd5vgSf5GyAe5Hxk/DH7acWPJeXPCv1Pv9r0pZiaZDLezZQwjIyMeB7kH4m5UyzxK3FuQvbxhzya/H+I8Dlz4ivzlXNYq9tpeg0C5NrYKdkOlRGpUPPpc3E7pOpb5iah7DQD2KMsNnp3u27bsY7ncLW1jjrUqU4jsLfEQMSQrUcb5a4hjMODONY7shmNKaD2L1it8Wc0JEVaJcE7FBbKtQCreP6gD26TZL/crbxW9xNpBIyZhmOP0+rHUllsm0XaiPcobd5yoP+GhyYVGY/XXtzxFP3Qe1ePx3jI5v42qJVXx45cwqnJqAuqkwqGZavfTwrOhdWtclFQ9MUll9lfysLWlSDt1HTd0v1c19c/wm/YG90Eo37QGZDfvUzB507cIXX3pzHte3/wCYNnUjbA4WROIQsaBk56SaAjgpNRiKlK+uQuKpLKlem4ySVFKQBuTodSex6saKflXFIXFrSopj5Nq5bMt6OpxplCJDzaRv10CHVJHfUeXRiGYMtcLlzbkGlcZv0mIhBTIsUlZ0JDYB26/AdydOpKyseC4HSQIPibPGJuFURyCA/IG4j5iQNddNPh49bC8oFeGMIooPM7cPijENxxIYgM6JAOrvfw08dPMdCLuZoxUk4sHYNqiuqaUGHVPU6yzq36LIIHZpA18PEE6nt1FtJ/MfOpOCm+bIbWKoAApywOpk2cXVhtyQ8Se6U66Ea9uwGg6aLdU0gmgxUO4F1kKgknD8c5IzurxuqxuRl1tHxzRTztB+oPSI7iCsK2GKVqbSO/ge3Uf+F7fLdPdrBGbvgH0gH6+OPfxfc4LJbFbiUWJzMeslK9unhiyvIveB7c5vtpRhjFC9/c0jF2KpulTTtJTEtdv0/wCrKnbSAPVbKz310PVTWnQvVkXWh3drgHbxKWqWNSv/AEenhli6b/1J6EuvSqLpAbWV6j8tQZQiBFkD1NwJPi1MB8PfThiuTHcviQHGHZdbIdjnbtnV8z5e2n4gU6oVp8fPq07m0aQEKwDdhGKZtryOIgspKdoOLOvbLzjieNxpwc+qeTNbQhKbFlp4NL7gAOI77dT49U/1n0ze35TTQaTXwkjHQfpb1zt3T8kxkQyeagHiANKd/vwdLC+hX0uVMjSo+kpW9CGkDRsHXtoCO3SzFaPaxrGwOXbh+m3O33GV50Zaua0HAV5YYs/eypX/ADsZsgkaOtrb7aeOpGh6IxUYfCx9mAlwShPjUe0EYBkSa3W47XJ3AKECPr376+in/T7+i1tEZaZZYVtynWCV6mjVOI3ck5iG23mw75K1AV4DvoB36dtpsMwxGeKz3rcw1QDliGWR3Lkp1w7ySoq+3xJ/0+7p4tYAoGWWK/urjW2WGQ33WTr+JR1+Hhp/DTTqfw9mBwI449qQNdfP/H/Q6de9uPrBa4yRm1OyGGkalTjrTY076la0p/lqrrzGik8seANaA54cT6nVS5Z0IK5UkjUaagvOdwdPh1GAqgHKmJFWB76401MuBe7UgfAePfw0+4dZ+GleOPpNDXC1ASG06q7HsST4/HT+XWiTPhjfGcjhbaXJc09FKgPDcSQn+fbU9+tLBB8VMbgznhhTZmsRNEOLMh7X8I77f5a+P29amjLZjIYkJMEAXicTL4D49w/PaO3sMqtmq9cRCzHjF1KFKKAClGhI8fPpE6l3W+2yaOKyj16jmaYsrorY9r32Kebc7gQCJfCOBJwIL20ahZBeV1S8HoEV5cZh4/gUy0VIRp3I7gdvv6JxIZIUklFJCASO84EECGV4o21IrEA9oHA4bslEmw+VAPpuJ2OKAPcHwSB4j+HUiMrGaniMD7kvJUDhgY3WPradW04ghW4p0I1GnjqeiscwK1GAEsHjoa92ENFFEk+hAmNqcbL7IZdSCp5hankBKU+biFKIG3qPcTssbODTwmv1HBKytVaVFHHWv3j9OHXkFNY0ldbxkr9BtBd+md1KQUqUStP/AHkr7FJ7g+HVfyvFNYrPkzFR78uOOjtteaz3Z7DMQiQkDszzHdgR8eZZdUl839es+iJbKiSVFCmW3UqVuI07bR36rrc9tSZXdR4szi8rHc3WERg/EhH1gjLFwePvu5hUJyWF6KH223v1JbRUiP8AUsqW684+pW51z/lUDZpqO/Sld2KqA4GTKGB7j9KYdLO+kgtYWDEwyIO8ggAU7uZriVPCPI2H01LaV19XrspDwCo81MZDrSmHElAikOfOhKHCVbh3PUPToQKj6Cr1OXxDs93ChyxDvLV5r2SZ7ZbhJoQiVfT5TitWPc1QxZfECKdmMHOWIReQeFLuA4w3KexudGyuhc0Cyy3EcWmWy0s/MUrgTV7gfH00ny6aOj9wO37+k0ZpFMCjDkQeH1ED7cKPqvsK7/6dTW0g17nYhZwSMyY6B6dzKzE+wV4YrOnYKhMf/l2EtnYCAEgaEjUeGmg79Xzbbj4vEccGX23FfEBgUzYthWTCw4lYTu02n8O3t33eHfpijeKRNSnCu0cok0tUJXC9HsUFhCTo262PHQa6DyP+bqMyGpPI4II+kdjY05l28fkCtAnuVhR08yOw8Oso4ATj0tyVyXiOeHZgcwWc2ZUPL+SziFTAVrt+vrd0uKR8C4yHU/bqB1rv4vJjW4UZo2f9Vsj9WRxHsLoXE0lqzeGRDp/rp4h9Y1D6sTu4fsozCoTEBlUmwcKEsx4ranZC1eJQhpsKWfA69u3VZdSxuAzTHTEK1JyH18MXt6YR/OXEcVohe6Y5Koqx9wzp2n38MSZuxOlx1N2c2lhSUJ0+ks7utjOpUR2QptUhRacA8QraR59V9bTJG+qFZGQniqMR9dPux0vedLSz2+i+lto5wPhklQEH6zQ4hvzFSzsdbbmW9S5Ghzyr6GybU1MqpZUNwEaxiLfhOO6AnZv3gDw6sPp++ivCY4JKyLxXgw9qmhp30pig+vOjb3Z1FxcwFYJPgkWjRtzGl1qte4kHuw3ONONrDN6LIskq2vrI+LGvcsojLZdkCJOE0GUyAdXPplQ/nSAVbSVD8OnUzed8g226hs5zpafVpJ4VGnI9la8chX24EdLenF91HtF3u1qdRtCmqMCrMrB6svbp05rQ1BqOFDtvYS5IWh4OIUwB8qEglJ+1Pnrp4/d1sG5oqaRXVhdfpa4F1V6GMH3YcdTg0SMgvPFtSCkjuD2WdNE/E9CrjcpHbSta4b9v2CGBA8lCtPtwNc14h/XfXktpbSlGqlvjsltkanb4dzp4Dort29fL0Rq+zvwvb50wbgtNEBQVJPYMRHzXCnoDLkSvjqjw2VErJ1SuWsakrcUf6fgnp2tL5ZKO5qx+zFZXdi8DGOhCA/XgGfTJaddeeAUzDQt1xOv/AIg7NI8/xOeX2dGA1RlxOBmjRWtKDAktYqnrILWNVrWp0q+JV3On/d106II1EyxBkQF+GeEZunnOqWtthZb9QnwPcanv4eHbr40ij4jnjGOCQ1Kjw1w46qMQ76UjcxDaCXZrp7FttJGiEeBL0hfyIHiSdfLqJKxplx5YKW6CtTktMz9OZ4DDrh1/6vZ+vJSlmuZZMiWhP/DiVcJIIYR5aughA/zLUT1AlkEUdFzetB3k4NW8fmvVskp9QGPDseZLmvSnW0j6lwu+gNNrLegQwwgf0iOylKNPD5eorSoqUB4c+3/TidHEzHhmeXZgj43Tevs01IOmqVADTQ+Wvn26B3dzpww2NoaDjxwf6eiWzGbUncpAToQRooHzB+/pekmUsa8cN1vbsqA1w8FQ2lNR1KGq1NpBHbXVslJ1/wAOtJcgZcMSUjBNCKkNh11kJv1GJakDY6gtukjXRxv5Sdf+0jQ/w6hSuSCK54K2sfjHZgkVDdU040FkOBSgNgHbuR49Cpy9Dhks1iDDVwrgoJrYsiMpAbaQ3sO1QCUjQp1+H4h0JaRg+ZwyrGhWgHhwl1OMfqE1CkthaWV7WRp3Ud34tPAgDrVc3AiUitMSLSzM0gCirVyxJegrzTwUbW07kpOg9RpBW6lO7ZuWpKUE+WunSfd3kbSeI1NeAw+W5i2+IBvjpwGZ9p7vpTGGXdTbNZbcjWLTbWnpONuMtRS6PlKJLrqFF1tJ7/IlQV8esbcRtVqkdw5+/wDVhd3rfrhgIpGjW3bitDX6hmSf3jQdmE4QHnFL+qtJRZcACorLqm2tm0JKC7/7EONnzTqEnqYZJNHlg+H6vt54VPMtZJxLBCiOB8R8R7ahT4VPsGMUtqBAYZbiNtRWhuBKAEb9DuJUonco/aST1AdljqWIAwdjjluWUnU7n3/6MMu6zrHqBsGTJb9QeQWCTp3A7dxqelbceottsamWRdXZXFhbN0vuF8oEUZp20wlwuaq+U2W4wQtB7JWSAdfDv0iXnX0MjFLfMcMWlYdCyQIpmybDhxKovOUbV2HRyEpS22XpTj5UWY4J+UBQIBJ79j0uI991HM0ds/hAzJ4D2dpw0+TbbMirIlXPDv7ThOzPGbjAJcqss47MqQqNIktrQQ2wtIbKUOOlXzKPqLGnl0i9TWl1spe3uFWSRgSOQ7Mwe/DjsckW46XjJRVIrz+rEQYcKRPymwuZSoTQXarJdWv50iteZaI0JCfSZab+Uj7fj1RG/WUl1NBeSNGpkuGJ1E8I2H+yq0AI7+3F87JIkNk9ogcsIAKUy/MqfrJ4g93ZgyZnj0BzjapjTbNBekWyZZSwSEpZadDy5C9o/NEKE+tKArQCRNQPFIHT/wBY7dG/RsMd1KDNJdA+EVXwmpenMJGTSuQeZeajCl05uLL1pdXVvGTphKgnlqyAqeBZwDl+CIngcCmFDpy8w6wxMecfeW4y9v8AS0SpRQhtW3soJCANfgB8OqAu49pnn8tUlkWQ5EmiHM5U4jgufEUFOGLRikvo4yxZFCgAileX+n68SHw6/btmXYTtcyqdVtsr+ocKlLkRVqU2ont3U3sG74+PRaLfrW+V7fyV/iNsqFmY6vMjJK6tXNlOkGuZBrywqbjtslo63CSMLaYkaRkFbjSnYc6dnvwbKoPJSgNpaQlXbRDae27wOvjoE+fRyyvZqhFCoh7Fws3Cx6jWpPtxt34rm6x1ifdQYjiJEN8NyJjLbhS27oQlgrDiu6vIHrPedi3XdNrZIjIxWaJ1IBp4XGWQ7DiNZ7nbWt4CxUAowNacxhoQ3MaZkNr/AFquY2P70sqleqUpSoICfUUVE7tNe/h/DpOj6E3mO684QSRSh6hMzpANKajXiR7ge7BKff7RoiPMVqrSuWZ48B2YVjb08NKTFyeCyhpwO7ESEBJdUsrX2KCTuB0P2dboOn+s9uWNLDz4ok8ZVeBdiSxoV5jIjsxGO67VcV8/QxYUqezgOfLCfOl4/IRoi9q1JaW6FJEllOiXSPTWlLim9QpPY/DTr7ddJ7w8ALRy+RGWqApYjVSjAUrQjieVM8b7beLPXQOgdgKVYDMcRx4/rwROL6VLFq9LDyZDqoclan0d06SHWEoO7UhSi2jQadgOnn0s2aTb93lllYmQQkEEEGpI4g8qD6ZYE9XbitxYpGgpHrUChyNK1pTlXB62AILfn9o8fu89euiIaMKMaYrc5moywOOUMx/sPE5N0lsrmuPtw4gSkObFvaByYEK+VZjtgHQ9t+3Xt0P6n6iTpvY33RgWmDKqgCuZ/FTsUVPZWlcGendnO97qtkTSLSWbOmQ/DX94mnbStMVtcqXtplUoWFs+GmUIc/TK6MlTrfrLLSZDkyQpSVPTn2ilTjy9SoDROiQB1zx1Fe327zm8v30ih8tFFasaBtZyqxGZY14UFBi9enrK32+M2tmup6guxyOkVICr+yDwUe01OPft9kpi5hPS4soU/UPtNJRuId9D0nZsUg6tvOCOtuY2PxARFgajXp79H3jXcblDkxgOnv0nVIlOBOkrMo4/lsBlXCp6nCm2wuo/LFwtSeQeqo3aBqBiY8PzFJ5Yy4/xxR8lcn2mLZMp1tp+kv2Wn659UeTCuayTFYYmtLGgdegR17wk6ocSnv26i7PscG9ddT7ZuVQVhmXwkgh1fUDXmVRxQcDTuxNvt1uNm6LjvLDOlxGfEKgxvrJHcGao7RXFh9Kt2VHhuOLLy5NJRvOOKGhcfagNwZqynyUZ0RzUeR166MtGaYKWNdUMZ9pChT/tKa4oq5VYywApplcUHYW1L/skYXEsbBvHYlehH3Dv28tdei0Q8sau/wC7A6SrnSTlpxjeZSkdhqFd09/I+I1+IPbqUfD7MagdRoQdf6sIr6NCdB5j7+tyS1oDjzLQZHLGulx1taVIUoaHyUQR46H7f49EoJe3h9+I0kasK8MezOCt7a3ZEYrVu9eGtLLoVqCVKSUlK92miviOj9puEluNKnwdhzH1HhgBfbLaXfjkT82vxDJq95FCfYcLapkSzabasUP2HpoKEvRlstyGkhPyuHcn1twUNSNSFD7Oj0F6rmqHS1cxSo+r9OFm82RnGiYB0H4h4WHZQcGwPspxB2ZCfkMtrmxG9SkK9JExtOgO5ccL3OJSPxFI7Hy6O2V8ok0nwkc+X18vfhH3nYLhIi6DzIs+HxD2r+kYhlmMb9OkuhhO1O8oX2IDa9SOw07fx8D07WkgkQHlTFV38RiY0w0aiUGpK3HTuacQW5CSdSttXif++2e46IMDSg44DBgCS3DnhRsLaGFqiS3EoWwkKjPpUNshpY+RQPhoQO/UqBWpUYF3ciV0Mcxw9hwKrRyLZSXXVSUIixUnevVO8kn5WG/8zz3gB5Dv5dGINSKBTM4WLmjE9gxnxjD52R2rK1Rm2YadqUI3ANMs69kpH+YjuT4lR6zuLuOCI1J1fbgfFYyTyBQOJxNKk4Nonaht5SmA8poK26/Mfie3Yd+ki46gnWfSAdNcWVZdEW89mJGZQ5GBJl/FMKoW9OZb9V6MQIrQSfzp7hKISQPBQZX+Yf8AuD49G7TdWnUKTQHj3Dn9fD34Tty6eO3yNJ8Wk+HLix+Ee7j7hiG+b0b1VKEA7yhlbinXlAlUqa6d0hwk+PzHQfYOmq0mWVdX0A5Yr+/t3iPlniDn3k8cCC1rUoaW6+PSbT/T/UofA+f8Oi8D6iBzwDuYvDVss8DaTLJc+niILLYV+IfKtXw79tE9GY1y1NngHKfFpXgDhWqIT6ipaQ4lw9vxKOmvhu+wn7utjOKZ4xQEiorhYREt2JafTU8UrStGpSCAFtqBBJ8grTrBpIdNSRjaFlDVArhvSrGyi6tPNoKtCNVp79te407dyOt0YSTxKeeNMrSKKNhDdlWDhDjlfq1rqVIGo08u5Hh1LUJwDeLEB2l46PDh1Y+YS3mlOxloUVAEpG3vr5FJB0PUa41hTQ4kW+ksCRQ8cShxGzhRIzaPXW2pagW1LWsFKE+I0I8OlLcLRpmJAxZnTm9x2YVJDShyPZgoTsiflRG4ypLdjGUNBDmfmoHbxZeB9aOofFJ7dB4LLyZNQBVq8R9KYa9x3oXkOlnDKeAOf1c8bmNU9i9LS/VTgs9i5UWytCBqNRBsO7boHklzQ/b1IuZ4fL0zr7GX9Iwt2kFw0+u2YE1+Fv0N+vEiae/TSMGLKjmO86gGTCcIaD6dfxNqPyFxB/CtPn9nSvcWXzL60NQOB7PpzxY+372ljD5Uo0uRmp59/t7+3Gk5aZZfWserx6IufGkhakz0NEtxWR2WmahOpRLa8Ng7uHuO3WyOzsraEz3TBCOROZPd3d/LniNeb3uN9KLPbUaUvWjAE0HPUB+IcAOZ4c8R/wCVMqv8OuGcdyOsk1EHduaRITtcmuOHRU+SrslUhfknXRtPYdNmzbda3sHzdq6yOcqg1A/dHd9+K23/AHzcLK4O230EtuY89DijGv4z215dgywFK7leO7KerEuo0LKX23SoBttMRCdXVE/0+ikj7T0cOzaYg540p9eAEe8TNIorwr9WeeGdnHMUCcx6cYbIkZCkxklXzK3aepIcHj6j57n4DQdSbHZ2iNW+MnP9A92NG57sZhQZIvAdv+nEaJmXRbGWt31ACvUDVX4R3J+Pjr0ypAyKEphIuGZ5dbcMPPGfQdV+oPuD6KAEvOAaAyHvFiMn/MXV6a/9nqHdhgpUfEfs7TiVZIGbzH+BfpTD2jyLW5fLilDWYrVY3BIQj8KEJAPyoQnQAdA5mihFOzDDbpNO/wDWw9o3GS7FLZ2lJ7Fbm07E6jX5leCtegc+7hK0zw02uxagGfBg404uxZnIoSL1lL8NJ+d99ICd+4abQSACnyPS9uO73XkMYTSTuw2bTsVhJeRxXf8A2fn34InuKpKW+hQcZqrR+wjx20uRYUmY48xFDY0KGkFWiQE+HwB6EbReTxaricAOTxpQn24NdT7ZYiSK12tiUVcgWJCdy9nuxHDjP2ot53l1YbJC2cTrnU2V6tlSh6kCIoOvRG1q7ByapIaB8gony62bp1K1nbMyH885L7T+rAb+BSwWxuJEqg+04eHuMzZmdZGrrSivg1kZNTAisFDcCphRkNogwXEaaRokaOnaA2nepXc6nrTskvlW3nSGrsat2k8yO0k9uWKPPTUvUvUD7neIxMS0TMkKuRCkZ0C1OaipOZxEiosMUpbBE+Uh6zkNvIdU40pivYUAoKUn1JKnZLyCP8qU66+HRl7x5lKAKF9hY/ZQD6zizrDZZLNVCuyADKrJEv2lnPuUVxbJVc/cR5ZxhjFVjeMNxb2II7M1aWX0em42EBxRlKabSR5666dV5dWN5BeSSzSVhatOH3VqKY6N2nfdmvNntbO2RFvY9IcrrIqOJD0GqvH34UuULMW3BHLFC+2hVXd8e3zLrLjgWwmTFhLnV8plCiUfURJkdDiFJ76jqPtsgh3e2mH+Kk6kdtCaEe8ZYYN4jN507e23xQSWrg/s1AqpFeYIqMUBUObOrQyhUtSSpKFAoO0K3AHtp4dX5FQNTsxyNdxFlrTOmCs7OblSXZHqqX64jyEhRJ/4zDTqiTr/AJ1Ho3bSeEDCjewBWNMZUPLUtRTodoB8O+gP+0dFUpTPnhenBrUfFj4uSpC9D5q+YDv2+zx7nqTo1LXliCspieufHD4x12QhSXAgtt6alx4+m2EjxJKtCeg19ba8uOLF6c3lbRvF8Pfh0zLmtGjTjipa/D02vkZ3aad1nUkDqLZ2MiGuDW+9RxXEOjlhHemh5IYbbajtKITtZG1RCu3df4lHTpghQoNXE4qW+mWVzpphq5G2UWLMZg7gW2W0biSApZA0J0Pbd1Ot2/LLHtOA1wPzhGvOg+s4+OhyqtLOgstijDcfqn1NKKkNyoalMuraWUpUpv10K0O0HTQ6dYqfPhS5irRgGFew5jL2Uxsmie0uZLKemuNyhpwDKaGnv4ZY3XKy5xmOixbs655Q/QnJ1Swuc5LhR8kr121K9NRIgM1ryJ0BAWfp33y0VpQ4ELO3rStxBdyGIq4+MBjShMbaWAoSwocvEBWhIqM8EZ9rubCEXBkjJ0xlkGosomTzIy1VCnUtD4WbTUBqHLBwwTMYbbrDTTya2WQgrbUvWG+rv4HXVlSlfw6DX1mXQlhqX7cTbG9WJhpqsn2HExMPzd9ZRFfCm3AlGx0LJbUNvYtrBAUD0i7htiirrwrwxZezb4+UMmTcjg3Q7R+awPzvVGmpS8EupI8xooEj/d0syQLE+Qp7MPlvdPcJmajvzxEq7y5DWPwFF3RRgRvMap/KSPDXw6atpsSwU0yphA6j3ILcSpXxajiHGa5IuY+6PU3DcoDue517eZ+PVg2NsFAGKtv7ou2WBO8ouqUo9/u6MIoHDAU5nHlpkhQ1A76/bqPu6zb4c8fQoB5VxsFrcfD7P5/aPHrA50rj5pBbvw8cVqUvzIaygf8AsXHOpHgEOJWdPs0T1FuZCFIHZiVBHnU4UpUNoMuLIG8arKtBrqSSTr49yetKsa0xvKrSh44RosVySo7EEjw3HUJH27vs62swUZ40ImtshlXCgpuFBGrq/XeGmjaPw66eYHYfx6wGp+GQxvoqDSeONFyfKkkIR+Ug9ktN6gkfaR93WWhV45nGkyu5ouFmBCTHHqugKc0108k/z8Tqeo00tRpHDEuCHPtbDoRdy6WulOxZj8YuIEZptlxSCtx7XcdqT3UlGvfoXIizSAMARhggJgiLLUE5DG3iseXYpcW8Topxsndu3OEkakqPc6daLgrHw443xSMaBjSuD3X0rLUMObQVbdCdO+unbToM0x105VwUEa6NVMDzJqoKWp5P/EUFaDTsFp12pP29E4JSRTlgPPFR+/AusI7tdGfkqSQ8lCnELB/AtI3I+G0pWkeHgR1JIWVgv4SaHvHP7MYIWhjJrR6VB7wP0HDizSZ6iJyS2lVVK+nlPOS3kstREzWkvuLQ+vupxsufhQFEkdVnbSGTZgP95G7Jl+4xUfYMdLSxLF1HFMlRHc2sE47fzY1Y/aTgQIp4TaWH6Rg5M2VrP160lurb3Aj0lsJ/OfcbV/6whJPl0FkeoOrI4sW0SQlaZUzy+lMTA4nus7sFxUxLKU/Irom39J+RsPNNq3ux0xUhDUtPopA2nuWwE69ulu6gXyiAAI1Puz5+ztw77ek7qUiJaNfEFrwyA8I+3254mXVRZFKlIICWpxjvMKTqlKULC1OsaHVSFsLO1ST4adK8sdK9mGRXYMob49NfuxJ3jiei0gzcemaOM2EKRBQHe6dJbaorjSv/AHm6l7Uf5SOtFvM9vcK6/hYEe41xMmtI9ws3gkAKyxtG3sdSpP24r+sUpEqxr4jpMirmy4M+vc/9jIjkWQ4ysbdAp1lJRprpqPP49dDWzVjSVh4HUEHkaj78fnFutu0F5NZsaywyujA8RpYj9GGTbYsqxbcWtA3KToCU66fcR8OjNveiOg5DC3PYuwLdvLDFYwqWpamClSiCdp0+bbr5EAakdEXv4gNROWIcG2zOSoBPZjStcMmQEAempaVkaqIOqfvHnr1vtb6KU8c8Rdx2+eBCNPHGepr5NT6MqAy49OiufWJabbWtQRCSZT5CUguBpLDSys+AQCSQAepMs0MqlJWAjYUrw45D31Ip34DQWN+Zlks0ZpkOugBOSAsx55BQSx5LUnLExq+4RxlT0GL1096sybPKZvMMpyRqIZk+qobh4u41h1bskMLr0yYCTKnOJUlwlSEHVJ0FZXUDb/dTX8qB7G0lMMUZbSrOg/NmbI6qN4UBqOJ48ep9lubT042iz2KCVrffd1tReXVyI9bxQymttaR0ZSlU/MmYENq0jMEBV+VkkSLXstt3DkhCQpS3DWtNraU2EutL2OPuetucHcE6aeOvQwWMjyEtEFP9bj28AKYYJuqLaGBEgvGkXiSYQCpFCpoWOqp41PLnwwHarm3+y8saM6b/AHZht7Yx2cwxefXR0wDEQtpCbetiF52PHvKv0w+w80htRcaAUog9G5+mv4hYny18jcI1JikViWrn4WNKlG4EEkUPDAHa/UobVvQW5lN5stxIouYHjATSCAJI1BKiWOgZWUKSVFTnUSBl51yZQZFd4rY5o9KiVVvEnU1hW1FDTt2sF5pqyx+U4qjq4Lb5k1s1v1G9S2orUggjt0sxbXsl1ZxX8VsBLJGQ6szuVYHS4GtmpRgaHjkDxxYe49S9Z7XvNxs8940ltDOrRSJFCgdWAkhJMUag1RlDL8LElSCMsFmfiqLF+mniI3VX9rDMvIsdjJ1jVsha9Y8pKR/7AOWbJDy4h1LBV301A6CQbg0KSxFjJaRtSOQ8WHMfvaTkH54OdQbDbXT296Y1g3mdNU8C/CpPB6fgL/EY8yK59+tbYuuEj0lN7U6FTq/6Wzr4DyKvh1sgvllavPlhbvNn+XTQFFOZ5DDVhREzZKa5xoohE7EA9vUUe29Z89T1OkmMSGVT+Z92AKWizzCBx+T2dp7TgX8ucc10WvkOJjhRKFK/DrrqPEEDXorsu7yyOBXC11R0vawxFwvfirTkWlVQvKitMqSZjpnSU6EbWQSI6Dr4eatOrb2+cXCBmPDIfpxRG5WrW8pVRzzwDJAQqcyVeCdxKfPw17fYejQ+A0wBb48+Aw96RcGY2w00tHqlYbQyNu9TilaAAeepPf4DqDOGQkkZYKW2iRQEOfZhAyhUVM1VbCKHI8dzfKfRptlzgNq1pI7fTxR8jflqCevsWrRrb4jw7h/TjObSZPLSmkHM9p5+7C9BUmFTR4TqCJduGp8wHspuqjrP6cwrtqDKeSp4j4JGvj0Pm8cpZT4VyHtPE+7hgvbHREqH42oSO7kP04cdRVuTnQhpsrPbRR7JI1HfXzHQW7l8tTnhhs4fMbwjPBepMdeiraUppW4gaKQNfvGgB7dLFxcEk4bLK0C01ccGGuCkNJaW3sO0AL7DTt/2iP5Hoa8sYzZgPfhjhtLqUaYI3Y05KT+jGRVfYOErr48iwVHD777cOO7K9GKlv1XH3FISGWGkBHitaQT2Gp61SbnYxKBJIoYmgFcyeQHMn2A4IQ9N707eaYGjjNKl/CBjfizZrVemTaxnsappSfVi3GTBdHHkLY0C/oIkln9QsAUnaS02tA17qHUc3JeQrEodl4gMCwrwrTJf7VMSHgsbJNE07m4PALGdOXHNqav7JwTKfFLiXjsTJIE+pnompXLi1ldOE+bJqUOhj9UYeYQYileqfmjFSZDaAVKTp0G/ifm37WUkMiBR8Z4auOkjiO5hUE5YIy/I2EEd18wDAxpqYaRU8hmTl+IkDTzwY8PwHOLpLDSYyN8houxoDk5hFi5HQla3HzC3F1ppIQdd3zaeWhHUa53CxgcoXGsYMWckt3AZ7ZWlgAJqtDUDjp7adgzweMLw5cRj13lRm3COy2mzIcHcfh9UJZSrXx7ajpZ3m4bUULgDsr+nG/ZOpFvIzJZxEIebGhPu4j6Vw7nqKG0tx8IS5IUdy3nyp1Sj8QFflo+4DQdLKy2qc1r7cGZ5NxuRp8whRyU0HvPE/XhBl2FdF3IkPBawNumh1R2/p1GidPs60zbtZW3iMi1HKoxhbbXJcHyyta/f7cAfKuTYFLYmMhW4KJCe+m0/ce5BHUG663sYbcuh1EYNbX0Pd3F3pP8AhnA8yPMsguYSEV7LwbcfU2Fp/LAC0jRQUvb8Oql6i603a+UpYhgpNK59mL26c6I22z0tclSwFcBa/wAKv5xTIs7VqIxuKi+68V6d+4KddSdD207dU7up3qR/Ou5gkfaTX7MXLtMe3Wy6LeKppwAxsU7mI4o2hUlybkMpOnZxZZhJcHcaIRpqCR3+PWNnuu1Wyfms9xKB7F+ocsFHsb+6Y6AsSV9pwceP/cPkmIWiJNRU1kevU3tXXIYShDiEpOinHR8xVtGnc9Gdu9Qr3bLjzIIoxbkZqP19uIV50nb3kNHkbz/2jnxxp57yrc8rzrHIJKRGUzCTXqr461bIrf1TDmqe43FYbPc/0j7ekzrDrO56nuhcqAulSpQHNVXxE+3IfXg/050/HsI8h21621BiOOVKDuFfrwH8Px+xkJiS5MKY4LCHLSwh2M+hxiZMekOID7BBe0lKe9RtWmjiNNmunSXZx3F7vllBLHJojtyAJEYDUxZmJyzDgVU/iAotaHFrPd21lYTvHIhcuCSrKfCoCgDlUHJh+E8aYNXLDS66jpKlLqWXIhhma22hltqMoR3ZsGmkKBJetpD5/UbAJ1QynY2ruO1heoitZbLb2StoEZXXw8JCtJHbntkZj8xcgZLRYycsq+6Af5/c7m+K1WTVoOdX8QR5gOUSr+RATm51OMjmJadn01VzqEr2AeoWUqBRu3valG7ttVp9/bqgHt0haOaJWIrqpXKoGdPbXPtpyOLjl8UUsRIrlnzpll7hg6YMYsJ24tZRQ1DjVbCXpCwUABxSGgQgalSlrJAA1JVpprr17p3b3ut3uTbgsDbBQKUrUoM+4GvspXCv1BcLDYRCRqUlLdvCpp92WPtvn0+yWpiuedqq38I9NXpzJKNdu999BBQFAfgQQkDsSrq59n6dht1Vpx5k/P8AZHsH6T9mKp3HdZGJVDpj+33n9AxqQkR50WS0F73FsqI0V8xUkheoPdQUSnx+PVgxbebiwlgYGpjNKZHLMUPLhhRkvVjuFcEU1c8+OX6cK8OoYS1oqK41qQtbi3ipxeuh/FrqlPU/benElj0pbSISQWLPVj3VrkO7Ea63Uq+syAimQAoBj0utju/8NQUQpQ7HX4gAnXx/19bZulpjUlWAqaZ95xpi3lK6QRXGpIq0sN+v6St3plKzv1SpO4g6pJ01IHQ1tiljpJFCWkoQSGyIqQQRX6Z4mLuSsdDMAtajLG5gNzOxadKkU85+u19NCozjxktvBJUsiQy4VtOpI0A1BI8iPHrf030+lo0zywtFwA1tqagzyY8uHOmM9z3MzBFRw5qTkKD6sTMwnPYGWMGO6hEO7Zb3vRgoluU2AAqTDUolZQCfmQdVI18VDv0fltDAdSVZO39eIsMxl40BxtchYNEzGhjsz5HoIjGa9tWrYhwuNtoLah/2gP4ePUfeemLbqHaUS8bTGjMxFaA1Qin24MbNvs2zX7PbrqLBRlnShrX3YryzvCJTFkaCNGlTx9UG4LkdvcqQX9Ex0tNp3LdWlQ2qA7aa/AdUTv8A01cwXAsbdHkjD0VgPi1AACnEkUoTkK92Ln2jfI2g/iDssb6asCfhK8STy7fqwl8fY3b4nm8qFcQpVd9K7stn0I9VVFJjvJEG+UhG4yYcN94okpR8xhSCoagadMPp5tt9tHUUtvcq0caj83KpiKtVJ6fiVGJWQLmYnrmMsBuut1sd12BZoEjmlP8AhipXzgy+OGoyV3UaoycvNQDImuFnLYOSYHlisloI7LN3FvnlxTs9eElmZXtOWSZRWpLcisU3IDKFBX5jZQU/OdOpvV0V70x1Mu7Wi0lWauQL+FhSQGnxKQVVD+JWSmeIXTV9Y9R7E9peSUsXgFSTooVNEoOKuCuphTwtrrliR3COeTc6xGlvrCJ+nT1TMwpp8NMd+EI0usyKROaZESTo/HBi2qSEL+YDqzOlt2k3SyiupEaOTXMjKylSCspYAqc18L8DhI6i22Lb7uS3hYSRaYnVgwYENGFJDDI5pxGRwa3pBSlOmp3bie/2hI6dtXhAp24VNI1GvHGv9QXEOJUDuG5SO/jp3KfhqR/iOt8ZZgV+rGtwFbUPYf14b8l4lWup7fE+f3+Pl1iAymuM6imeNL6g9t2vbTU/EDvofs6lQysp7saJQDj66j1iCjVIPcKJ07/Z0YiZ2oRiG7qooaVx9beEXuD83xB0OoHx6LwPo9uIMyrIKZacflWJk6ofQh1BGzRZ2q0UPm0dRosE/f0SguJNVK+Hs+n9OIU9tCFzoe/DPyzAMcyyCELUKiY20G2ZrDYXqhKSEpljXSUga+Kvm6Y9u3a4tG0rUp2HMe7swj790xs+6KzSgpckfGuR944HEXbrhPkpl9MTEIEPM1ynVMxjVSERpJVu26vty1oaZaRr87ilpQANSQOn603iyaIT3h8qOlanMfr93HFQbh0LuaSFdtK3ALUA+E++uXv4YWaH2jzLeQuu5G5eoKC7gem/JxTBkf3dkVWzJTuSxcSFFiNWa7uwLLiFHUpWQOtkvVYgjEu3WUkkDcJJfy0anNa1LcOVD3Y023poJJfJ3ncIYbkDV5MI8xwveTTT/qkd+GJIw/2gNXVni6OT+bZ2QYzNkQrWNW0+PTEMz4qimS+qpap3LWwAKSf+X9T5ew6KpfdZ+UtyLSw8lwCKu9aHgK6qA+3ECDpP05vpmtbO+3Oa7WtVjVHbLj+WITIwFDXTXD+xzi6gyRtmRwnzLQ5ZMecejw8QzCqs+O8qnPwwVvRIMe9aZemzEJSdyTHZSnsVKAOogzdRNbuYd/s5ICBUvGyyoAeBIWhA76nuzxifTiG8iFx0xuMc7VIEUyNBISOIBbi2XAotOZGDGXce48ZiUHLfLNdimYPRmH5WJ0dXNymZRNyUJWwzcTK0usMvqSoEoACVDuhS0kKIfXPujtPstmZrIGgkd1jD05qGzI7/AKwOGGGLbrDp62S16n3AQX7qCYoo3l0V4B3QMK+4DsJGeM1/Tya9NLbs21dluJXSHzQZNVg/SyJoUtEhidHWXFwJ8RpG0tqUSNpHZSVAZWtykpkhKNBex01xtxA5EHgVPb+sYHb5sbQJFexSx3O1yg+XKvAtnUMM6MACAO41oQQIt8v4XCbYcmxY6n3zuX8qddilfMT28B3/AI9NO03rudDmgxT3VG0RW/5kYqSK+w4gtlFfKbecakIUnU6AKQQPsABH+PTxauCoYYqu9R9elhnhrxsVj+qguBbi1nUADRI18R2GnU351zklBgabNK+I1PZgq1GPxYjKSY6QpW3xQDoNPDuNCR1DkuJGPE4nw2scf4c8KrlQ3JOqWdFIdQSdoSNu4DXy8B1h5hXnlTG94lYVAzBwG8np0sylsvNEbZBSlfjoCvw8fAjoraTFfEpypwwDvIwPCwzB44lXxjxtiuVYc/HMdBs22dUpWlBK9EklQJ7+PSvu27XlnfK4P5JxYvTew7buu0upH/NgfXgH2/Hb1BfOxfplJZDpS18DqrTUaDzHTPb3wu7YODU0zxXu4be+33zQsKANlgl12OhYaQllKlIQlCRqdddB5effqFLMV4nLEiOMNQAeLBFo8SL7iUOtgu69gnXROg8PtPQq5udIquS4YLCCSRwr8cFStx5EBCleg8FKH/a01Hw+09AJrgsaVFMO9lZoiVKnVjXdRYyHRABTNjK3LMex1KIqB3ceTJ7OxUtp76g6eXWxZkjHmfC/avP3cDjXLZSyt5Smqdjcu014gDD+4NsJ9fynUYvjs9aau8clplfqrLjrhdiV8uwk2DLqNEPoajRFpYbO0jUakdz0N6k8qfY5Lu7X86IDTpNOLBQvdUnM54ZvTiC6h61tbHb5P+TmLhw4rXTGzlxTLIKQleFQThL5eyPhTmN+HJu8J56vWYodEOxxyvxlhhRZUtCvW9dMl5KkrT8ySf59bthtupNgjaG1uNsQk1ZXMhPupQYLdX3Pp/1fex3242HUMhCFEeFYQrAHOtdRqD20pjayPI+SuOabhPBuEr1zAsUVw/id7MgycTxu4sJNnPds1TpVst7HLeYq2lBlJkKb0QXQVaDVRMewstp3aXcNx6hj+ZvRfSICJJEAVQtAtJFGkV8Nc6ZZ5DBXft46n6ag2fY+ip/kdsbaYpCpgjlYuxkLM/5MrF2AGvSKaqnmTho3PJfuzZ9f6HmllsAKU36nGNC9okjVAO3jtzdoPHx6IQ7H0WxGuwP/ANEMP/v+AFx1l6qRof8A1svD/wCcQfusjgWNZTmHMXC3uJg+5ibecnV/H2WcKOY4MFxPE6PNK43mYT6u7cxtLeO0oEqdCbQhwS2yUsbwAnU6lZLKz2Hf9rbpUR2sl1Dda/NkkeI6IwyazrbIHMaTxpgVaX171j0pvrddtNfxWN1Y6BbwxRzDzZmRzEvlx0YgAEOoIWooMxgY4xiftDv8mw/jdrGPdxhkvMslq8RoL/JYuDpomckyCQivqnLLSI09Ljolup9RLWqw0FEDtqCN9uPW1paTbk82yzxwRNI6RmbWUQVbTnkaDKvPjgTtnTfpTuG42+ypadUWk93OsMckwtxGJZDpQt4cxXiAK0qe/DUocKcxzL8sxO1fYkScPyvIMVdksoWBOfx24mVD0phpRKm2JDsMrTr3CSOs7vcBdWUN7ECqTxJIB2B1DAHvFaYR5Nmba97utnZg8tpcyQlqUBMTshIHIGlaf04lRV0jYrWt7YaQgfIgJSkLGg0Cz4kp6Rrq6PmUHPFjbbYAw1l5cMMLLHpEAqdjKShtJ0UpGu5P/dIPiB49aUnBHi44kT2MjGsYoO7AsNlLflIlj13nW19ySFbm/wCsAnXUqB8Ph1pmu1RaEgYlWuzSu+sAnPE38Et6rGuLESHX0sWeVSnEMxkrbbfahN9my64pSUtsK2uqJ+O0a9+kHeNwLbgsQBIVajvbhQe8jjiRftaXRTaVnVZFl0yAHNKDUWbsUCvDMmijM4gHf2eCXlrlyZzK5s56TMRCfYeMliVZErQw+y6na2mHBSNNdCVveHYDprgNxBaRhGPmFRUUoF7u01+7AS2Tp/VPaGMwxo7UYSVeRjlqoMlCimWfiJpww3sM4vrps1U2+cmRa9tsP/Rq3oXKYcRPbU44tlovRHIj8TcUKHzJI+I6kLcTBTJOFEXb2nuwuXEe2rucG12Essm4TSqKEA6U1DWzZEii178aXGVxBYmTK7a48pxsrYflSHnllTSlIDiQsjQuDQ6AdRLszuuZA7QAB92LG6ek2i2uClGc1NCzFuBy54klZZJYW3GudNypC1NQcLyQBB7I1RVSAFFI0Tpp0ItwIr2JU+NpVz58Ri1Wd77bJ5GoII7dyF5fCeOKHmG7CEmIUqCvyWB2J13BtOo180nTq9opgWPbXHKk0BaMA9g+7B2oLqQ8xWpDbjq3IbbatgKzvjvPNK3Hy+Qp8fLovbSjiThUv7UipPDBWgwglAdnS24fn6YIceUD5bUnsfh0dhl1AaRXLCfdRBWPZjbROgR1kQo/quan/mZXzK1A8Uo7gd+p6BmHiOWAshVTlSp+n+jG2iY9I7vOqWD4J12oTp30CRoAOvvlg8seS8ZBUE4xvufMk6/h0PbsB9vl1ujiVeGNdxfPL4STh0460xZTqtExTsWHKs4kB2aUaMMl55pDqy8sekFNMr36E9h3PbrC6doYnMYBkCFgOZoMsuOZyxv2y3W7u4luCUt3mRC/IVIBz4VANc+XdhxZ5UR/Vr7aLSHHZX9w5FSKqnHbF0ORsddpzFsXRZvPSRKeesn475QpDBcinY22oLSIW2zsweCSXzk8qN9XhGcgaq+EAUAUMK1ajCpIocGeprC2t/Kuobf5SU3U8XlkuarCYtLnzCW1EuyPSiFozpVTUYZ1jR3tu7Y5fJbgNNWUmTkUyKxNjfWR4FndvQUz/wBNVIdmtVqrV30EKWCr5kE/ItC1Tobi2t9NghYsgCAkGhKpXTqpTVpFeQyI4ggBbrb9xvzLvcixhZWaZlVhqCySldeipYIZDoBOeak5MCXPOkG8rVVbdZGjzVsY+JNs0qa5IntYvVO01NGUw7Icixg3CWPWLaAXVtoUNmiguNFCIJvPLs0dXovhovmNrY1Aqc+FTkCeOVJdxu3zliLIwotwREGkBcswgQxxihbStFI1UHiKqfDmC25sRVS3izyEONP2NDInWAWtZ3ymcryeqQtCVKIbH0VYyNqdASNfEnWRE3nvMpzVZQF9hjjb72OIF3CsENrKg8UtuXap5iaZPd4UUUHt4nB241zibAktsvOCVESpIUw+onRI0B9JZOqVadA9029JUNPC/aMFdo3KSCQZhkHI4nji1pEsozMitfA3IQVRHV/OPjsUexHVbX0EkLlJx7xi5Nou4riMPat4qZqeOK07rMFP0FV+ZoVVsXsDr39FP+/qx9psgIlIHIYqjqO8LX0wJz1t9+AhLmGXIKlE6anz76a+P8+mdIwg92E55DI1eWMYSAANSNe+gOv/AJu/W8CmNRPPG0y0pxaEo1JJAA0PcnQDy76adYnH1SOeHWMbmIZEhQSdBuLXmAfAkeZB6imdCdI+vG/ypKajxphzY0l5EtAab3JaalOqJ7JSWob69Sr/ALyeo84BFT2j78ZwsQaLjHYPxWkJbeWHHAkAstdxqBpoo9fUjc5rwx9eZAKN8XdhuO2LzifTb/Ia00CUdjp9p6z8vSatxxiJ6jLhjSQlTqwhIJJ8yf8AFRPft1kchXGIOpqDPPDrq6xGocV3KRqVED+SR1FlkoKYnQQgnL68b0mQyyoN7UqX3OweA07DcdOobVbPlgpEqqQO/GL6F+wmxIgClhpPrL+XwceOp7f9lJ6jFlRS3bicVYuE7MSNxLFRGgNrUjbqQoajw08z/LoFc3NW088T4IGY6jwph5OKc3BhofJppr5BQOiv4dRlUU1njibV/hHDGNjCpNwh13YpLKPzPWdPpshQ/p3qA17eQ16824JAQp4ns448u0y3QMgBCjmeFcCfLqevgqWwhlU9xIUpxchtSYbYQCpz044IckkJB03HQny6IW8zyAMfCPbn9fAYFzwpDVMyaGv9A+7AM5BZlXkxsSHnlpaqqw+io+khtYhtK2egkbG1J/CRpqNOkDbqjZ5yOdzLT2ajmO7njp/cYxb73tUTZSLtFoGrka+WDmORoeGCBx+iprMd/wCXTH3JRtmMKIS26T+JSgfBw6/iHielqZnaQg1pXFmWKIVU0Fafp/Tgs4Ldw6y9jWlXI3NNSoSXi2dXYbj7qmR6gHctoWEkK8D59R/L1HQ/Oo9oP0GGGxnFvcLIMlqB9eWLAbR6O9S0dg0ltLNnOWso7FTMptkIlxx5hIcAPwKSD0rTwlHeI/Ev3cj9WDlzIY56j4Dw/SPpywQcLmiLYxXE6JVqk7Rp4+XYd/EDoXKtGB5A4J7c5cFRwxFbk7CbWq5UvbBl1txy7lysio5cc6Is62RIdEuvc0O39WpJLbjKx/4qUfEd7w2Dc4Z9liQggIoRwfwsAKN/VYUI7CccOepPTNzt/W15IKFbiV54WGQkRmOpP68bAqRzAwrRaX9SiJdKUNSVIJ2j5Wn1eY+Dbuvl4E9SWuRE+niv3YVo9vaVNRp7O3H2LTRYw9SUkIfaJ3BSQlQHhoQe/fTrXLcyOdKZqcFbGwhRdUopIMO5eBVt3AjKbaXMnT1pZgRoiC649IcIS0yltIKlLUo+XUAbtNaSnUQsaZsTlQDia4Zoejrbe4kEYZ7mU0RVzJY8BT6U9mNqga4y4ucyrDcggyMmyjJsYvceyfIqMQ5ZwddrFVDboqFuUtqLLsWUOqM6TvAStIaSFDdpHvJN938QbjaMIbGCZJIo3qPO0mpd6ZhSfgWnCrGmVXTY7DovoIXfT+5K11vd3ZzQ3M8IVvlvMXR5EIYhSwBJmevxAJQ5hXnZ42zZZbVXtdKkwBdYvigrH4qvSIqU0ddBWyt9p9CkKjza91C29CApPfTqDaXpi2+S0nVX8qeXUD+1rZq0I5qwoezBPqPaTPv9vvO3TTwSXNnbCNo8j5PlIpGoOCNLI1VoRUCpGG/ynx/NxKl+orb24ccfBWhiJIcKB6hU44sqZkJUhRUonQJOpJ1Pn1L2XdotxuaSwxADmR7uY/Thc6x6an6b29TYX1+8rGoUZKKkliSshINTX4czzxCpYuqeXaSafJMzQ/eSWYkxxUN6vi2b7QeaahyZrF7LXKkModcCGy2pXzEdtT0/Ut7hES4ht9MakjPUVBoagFBQHKpr34qe3k3Sxllm2y83MS3Mio5KFFlYVARnWd9TAFgqlScyMs8TslcsSOP3cjwVUOJe3NTRcY1VWJzDEyvxzJaPF0NX12gLQtL9mw4qO02kkhLzIUfwFJrBNhXdlh3QM0VtJLcM2kkNJG8lUTuU+IntBpzx0nuXXkfSjXOwMFn3KC3sUjDAMkU0cH5sve6nywBX4lBy0mrh4vzBSvUmWDy3ZDjjj02bKUVuuvOKKnFuLXqVuuLJPWnedvoAkQogFAB2DAHp3qJ7h2ubpyZmYl3Y5kniSeZwXMiu6y2ry9EA+UDVCe5UdPxkd+gFtbzQy6Xrhsvr60urfzIqVAzwHDIQ3LDpJQ2g6pOoGp+Hl4dH9JZNH4sJUkiCTzOQwq2v0+RxW4cgBxCkaqWRr+UgHeD9oA/x6+WyNasXXjXES/njvoxFIajt7sQE5349iqkypjaAFkkgBI2pbSNqEDw7BA6sfYNxfSIzwxTXVG1oszSKMj92K5strxWWxQNAB6nceXy/z6sa1k8yOuKrvI/Klw2ql9deFy+6ZstC0Q06kGPDWSl6aR4h6QNUNfAaq6zlXX4fwDj7ez9eNcDlBX8bcO4cz7Tyw4caqnsmuG6+JFlTNoXJmsw2XJD7VfDQHZBUlAOwugBA3EAlX2dQb64isrczzuqJwqxAFTw4/owV221nv7oQWyPI/EhRUgDt5D3kYKFXgtzYWblnkMqHRty3kelDWptb7LISlqJEO4pZR6LKQgjuAQT0q3fUFrFH5VkjSkfi+FSeZ7cWTtnQ1/KTPvM8dqhz0L+bIByBp4Vy5Z4mHgvGvF9LATMy7kvEaZKykpZlWSJs0aa7ghiOW2wR8CfHpB3ff9zkqEUKncMvrOLB2Xb+jLIhGWe5n4jWaV/sr+rD5mZR7eKtr6atm8iZ4tnUenh2HTpEZwjQbETWozjPzHwJc6rvcepYbYlr25RT2ax9wxaG2We5XChOn9lLJybyWp7dTaRgeXfOWAY2w89G9vuXtw2wSLHkTLKDGK7RHguQzIsFSUo18QGtfs6Sr31M6etD+dcKT3Gp+04a06U9RJloWsLIHk8kakf2Vq36cR/v/wBy6hxSLNpcTtMExd1TzC7LHeN2Tkd1bsxnklMGTk0ppdZGiFSiHAGitfgk66dL956qdP3Ca/lnkVQdLsSNNcjppSjEcMye7BfbvS7qMuL7etxadQwokSssJ5+N5aagTwCqScLcb3iuZjCjZfkMFGQtFDdYOOsi9NcWJNdUWa2PhypLf1tDOsX1JU6lat7KApZToodJsfqbMb17fa41tpQQwuK0lcGmVyPhZFHA0GrIUFDhri6At9wpt11AJduSMmWEgmKGFfE0sR+JOekatTOcsq4V8G5oOGXeRYneyHaa8kMxL104Y9Gv4tcw8tLTLWQ4mPRTIckSZAYVJgrRKSRpovw6Yt39VLnbZYbXqKKa0sbqohuFAcZUzZa61jqcpPEozPAE4XIfRTp3qONd39P2S/MJKNt905glkahJEMhqkjaAS6MOFMwDXG7x7+4h/wC01coybPk6rYz2geBF3/aTztzlUNixSuNGusaakttTJkaIhAEuK6htbRSU/i0PQiTrTctq3V7Tdibq0uVVklVAxAGYRWUjiviCkUfPS1cGenPSe1aeLcunRdWMln5kcu33ZEIicrpLayCjKGNFYE0qCQRi1Ph73Ccf890LvIvF1xW3+N3Ut6U0xWuqM2nWs6uV1xWLAm1VhHUfzG3UBO8naSOnWe4td5txc7ZcieDSBl8SmnB1+JTyIP14pT+B7xsO/Xdp1DZPt24NOzCEiqUY5GJx4ZFPxalPPgMGBzIpDDCt8RltGh1UtaWyfLXRWp06TL/Z7qQkrMy+/DzYSi3UApCQP2lwxZWTxfUU4qvekAHUhlLKW9B5+o6jv2+zqv8Adel7qZi/zLkdgqB9dcOe1bxawMNdrbt3gU/RgX3ea42q1CXcd1cR4uKjwJCh4d9VNa9iOgI6O3JYjLFKoHezmv20w+bf1vsNvOI5rGJm7QRhItMwq5LKks1khpIdbIKq2GQkEKB0SkDQ6npdvNp6hiBVZa6T+2wHuxZG2dZdJMR5lmMxyocNGbHqrpoes164J/4TlclG3wGmiV6dCLnZN0uY6yuW7vMNPtw52nWHSSZR2ugnnpBwJc0xWtiIaVGYSlDqzqhMZXyEfYF6kAjpem2DcrQBkY+X2asHbbqzpqYMPLUOBkSh/QcMKOlyKotAJCSSU6IcT2CTppuUfI9D3Fzb+GTUeePv8R22fxQ+WP7LD9ON3E7VmM9dNkepujuPTGyCB6IU1FQtRPcDfMJ/9H7Ol7YHEnVDLOK2wiVWrkCZJVU/ZXBO78drAyEBjJRfaAW/7kYm7lMmAza4pXxVCvVXRIs+Vcenq7AgV7LMCocaZCU/qEsvaiEwddzrev4QrrrvqprSG6tLOMJCYkDtLpqYo0ASIgcZHJJEMeepxXgDilemWumsr2+cmXzZGRYtVBI7kySAn/dqBnM/JTTiRiP/AC7LhyrhuC4mRWOxoxMCrWpDj1X9dJElMnIJbgJl5DkSWVvSdunosjYO2mvPXqnfbBBOllvM5sHEZEKMa+SWYNWd6HXcXAVnlH4V8I5E3n6ZWt29m91HpuFZhrlAoJCo06YVFNNvBULHzZvEc+DaqWn22q3ckDct9pTiRqhaUaOJcR4jTR4gjzA6p2O4XQvyssVxbktR08SkcznmppxUgHmMWLcARu4YEEBTQ8c8qfZxwt2VutTkilZWoRon0IfA7erLUFvhK9p0UmO2oAfBROvgOrL6J28WtlPdsPG6xotRmAzlj9ej6gMVd1bdme6jgQ/loGJ7zQD7K/XXGk9ZQ6lUeZZQRZQGVKVIrfrHq/6tHprT6Ylxwp5gpWQrVI/p08D1cfSq2/8AEI3vIfOtxWqaiurI0zXMUJDZezFU9QM62zCJ9D/tUBpmORy4Ze/BdszjdXcUtPS49+kP2NFQZG5MVfWVmDHvq5MwQPp5qQlPo+qAXAdTt8Br1du8bTtVnHHHaWqxyOqtr8xzQMCCuliR2eLjlwxW9ne3EspMshZRlSgz4GtR92Faa5Hr4bMiRHQ/HaV+fF+odYLyG+ykeujVxrXTxA7dRektrt/OSsUZQGjDU1SRlx414Y271dukTEMQePLC9dN4/WP4tFr6X9PeyTFaTLRI/WZ0/wBKPcmTthejISEKLZjf8Ttrr4Dqxd12qxFi5WMBhzFcqd1c8LFteTfMJRj4lDca8cNXMHmIDKZL7DEiKw6FPx/XdjrltJG5yP6zXdkOtkp3gEp116qGa0tvn1eWGGS3U1ZdTKWpWq1XhUZV4jjh1E7i1Ol2WQjI0BA78+NDhMsHsWgwsRcpsY/SpWX0cfJG5X9yWtoIja7CXCVEEealLb28QiQv5dArw7dNV7tW1220JJZ2ojkmjVwRJI+itDTxE6ssq0HHIYF219cS3mmWUlVNM1UV4jlwz9uCFjsyTUSYNtDd2TYjqX47viElJ7pUnwUh1JKVg9lJOngekd1DExn4aUwfkkYnSpoBiZdvMi5Zg8Syb3IYnNtOrS2QHI7qx6T7aVjQ7o8htST5HTpP6qlurDZjNA5BhnQkdqsaEHuNcF9kl034bIkqeP14Y2KYvVQGRNLzFpaqW8hExKUrRCWDsfiRidVNPnb+Yo/Nu8Ox6Y9gtrOW0juUIeaRdQPHTUZqOw9p7e7BjdL66kbyyDHbACo4FhyY9o7O7CHmOBxrmfDyWocTX5LW7U7ktpdYtWG0qbTFsWCQiQhxpxTSge6myBrqEkEDttncX0V/H+VfR5BqVBypRxzVgdLd3eBiAdwuEsJbCQGS0kHCtCvPUjfhdSNS8qg8icBPLnIMa+x71655tpYdpp8KWtT6Kae8gzYcZwOgJk16pjahDePdIUGz3CejH/KWu7WrSQARPWIhgG8pz4kXPihZfyn7wvEDC+VvrnZrpY5tU8ZEgZfD5qr4Hag+GQKR5ycDQuMiTjxx3JXClZ88pe9hHK0KSl/cVJddyPC6lU7QnQAqmJQVfBXQW/SO23e8ePJTfI5pzaWFdR95AJwy7OzXGw2SNky2UiAdginfT9S1A7sHV+RptSQSA2gj+Pzf7eja6SBny+/ELTxPOpxoqkqSoEf0kEfYQe336eHUpABmOONTglSOWM7cVElQkFHyL1ISe2ix2WPh4/4HrG5LU8AzP0ONCuUyfl92FNESIhIK20KP3a6D+XWuKBxmSScaJHZjhuXa0xWg7H02j8XbskDyA+PRuxMhfyxwxoaIMM8M9dglQ7dyR4DXUH7fHpjht34nEeRVAqaDGaOtawVKT2HiO/8AiR4nqdHGytTngRd3CIMjjWYTZZPcs43TuIYceQ6/PsXDpFqq2OAqXPlOahKGo6fAEjcohOo16O28SxRm4l/wx9pPADtPd7+3Cfc3cl5P8rb8TxPYOZPdiA/PHvXVl0C94p9pWSR4GNV78/Hcn5ch2dXByTObSugOSbGn4wemyWFy2YsZJUubGBCd7YjlTj7HrA73ept1It9oPmbi0gRTksSD8RjZ8nK8C4DZ00mumrltPT8MdG3PVHa0rpzDSGtBrKiqKczSqmgJOQYCNWDUWS49BrvT5Cv8VyXK240U3Eu7drIKV2KXHGZFj6LX6nfO2LDSwy5JSy2XlFad4B62T+qcdmzdMWV6u99RIQspVFW1ilQEMscrVaZ1GWlKRqQF4437l6ddKXdib6+CW9nWoEeos1eGsqR4ST4mYkUzpheuskscVFrjM6NGdyLEpkhizySE9Hbyu6gPyJDtLauS3UILqCz+TJ27QgpSsK+Y9CJPV2+23dm2zddLIr6VqaIGJzVhwD815NUjkMa9v9NdkFtHLt2uyEkYfz4aLOxHCMSUqkTA6fCKgANxOCTxr7maSLzNxJD5Lyqui1uS8i8Rx6ODPXDs62HNaxywjWskxkFU/jyXHzSbAS45YBhExbXqx1Otb3U2HB1btdxt8lrEq+fNA6K1RrBcLk1CeFClGpXUSKjCNL0tvdtuQu5nD2KzI2gjwqqBh4CwqxLFWJBJ8OYBwWc2uKag5n5SqeT+NKOfkLeU2dlEsIWXrq7y+j2k+S9jtp9BaqZbUxIqUpdSW1raQG9ifw6Bvg3rp6SwtraG4kglWJFIaMFQQvioVIJqQeVeJOE2+27cUvbie629J0MjMHV2UsK5E1BFaU592DHjjlFF9tt+8upzjDsbv+S6hOFsoUm5ulTITSF3WQ0aYipCnKaU00tlbmu1SkOdu4KsJPl7nqKEWk8UhW0fzGJ0rQnwIS34gcwOymeNFLaHpab563ngtpLpdCUDtUU1OoX8NAQT3HtxsVtZXWlamPXcp1ti56X5lVm9I/Vzlgj/AIaZDrQTv18+3X24W9tHLrHkODKQy/7JOF59k6b3WIrFdqHIzWRWRv8AaFMR+5G4vemodUmGy3JYKl/UQnUS4Tmmqk7XEarSlXkfh1MsOsEt28m6FK5VH30xXW/+lsso8zbpVZhUgNl7tQyxF2TXWlLKQ3Z1z0JClb40l5tQjSUpO3ViRp6a9SPDXXp+sb60v4tdnKr0+IA5r3FeIxSu67Tumy3Hk7pA8NfhLDwsO0NwI9hw94Ky6wlzVCykahGoHcanw/h1tqASMa1OVcfnpKPoy6p9ph5KySgH/Loe+niFdZqviyHhONcrflVqAa4DGazW3ZMlQV3CgtB0PiRrr3+B8OilspCiuAN9IGJwQ+DeRnam1jQ3isNqWlAWo6JUDoNFA/0qHQzfrBJYC4pqpwwxdIb1JZXaxt8BOJq5NhsW8jtXbTCFhxv10rTofnUNdpUARpr36U9q3RrdzbsTllh/6p2Fb2EX8IBqK1GB9VUaw4GkNFUr1NqQkanurQ+XkP8AV0w3FwtNVaR0xXllZyM4hAJlriS2HcXSVNIlKb1K0pWd3kSAe2vcfb0lbjviVKDli3Ni6Qm0CZxmc8O20xCSwja2wkqH4Bt7Egdyo6aAAa6noXFfK5qxwzybPLAtFUfVgfTaJh9aobSkBLqwJz2gBlKSezSCNCIzR8B/UfHolHOw8ZB4ZDs9vfgTPaxsfLBGknxHt7v6o+3BM4sg1NdneMxkNRxOSu1SytASXNBQ2ZXofEflbtfvPQje2mk2yZyT5fh/vrh96BWzh6gtYlC+fWSnb/hP+jAn43y3G8dpbqNOhIUZDzrjC22JLzgSFuJ9J8qbeQlBBG30igaeOp1PRvcrO6upozG5XTxzGfDhShr7a4W9i3bbds84zwQyiRWAqHLLmePxLpI46Qp7ScPTLkwJtnxtMNchbDnE+LONtIedaSy06/aqQwgBTbhSgL0Gqgfj1C20usd4uog/PSdmeS58x9mC3VHlK+0u0Ssg2eCgqwAzfIUYNQcqtXtrgY59SYwK5yUiJsVsWkkvWykg7STuEW4iqBSdfDo9ttxd+YELZexP0ocV1vjbWIDIIEDU/an/AETqcRcpYEWB7e/d5JE+E0zY3HBLiG7NrIDXQWBns5LfqGvskXzjD29W1SJPqAgEqI16O7hNI3U2yijFlju+Gip/KFeI0A/2aYg9MrCPT7qmVtCwvNtxo3m6VHzLU+F/NI7KPq7TgFcMxKkc1cDprJeD2U5nmDjt6T+ivZw2+xGGTV/qLSMrvraI6tJI7IQl7fpoQncCX36eX+B7h5nmhDZTDxeVSug/9Gin68u7ALo8Wb9XbP8ALG2Mi7rak+X8zWnmr/08jrT2AHhnStZftYl9RzFzHOmVAeD3KPIbzLzTjbqi2vLbdTaygKCkFSdCR5HpHe9I2SziV6UtIRn/ANWuGrcNoZ+sN2uJI9Qbcrogih/38mCLYVrAg+kgyIykp1WVNL+QDwAOhT36WZrjx6j9+Gaz24PFzFO7AdyGIlbS0FcdxABToo7VKPbude3fqK87Ma1OrBiG1SNOAwI5cVcWdEitxXFrkvNttfTqCxvdWlCUbU/NoSrqM5ZgW5YkyTw2ED3DjwxoWy7gTh884Xa6vEZWKRAG58OPAjeBS63EjIC5aWyBqha5KzrofLpbsJ/ndz8s8NVT7ByxTllC67febrdil/I4NTxGolyB2EFs+5QMQ4p46FFpxQeEWEn66Y4wgFTMSP8AmueJSgFW3aCe2p6ey+ptJIAwp3F0bSHUn+MxoorSpPAdp7TTlg/I5Idl49kT0LHlR515AXAiT/UUhmBEntmOZSStOr8vaghASdPxHXt1Dvl+YmiUPSBCDp7aZ0PZgx0UrbVNfbw6GbdZ4WQSNl5Rfw60/a55cQATyxG+A2qkyOucSXGwHEMrCye7ZPpnT7+x16znmrGfrw17HauLtKkggj/TiTlhLdg8d8kSVsn6T+yMgWtTivSQpDla80jRav8AMtQ/j0u2xefdYIlNC0yD/a/Vjo2B4Ns6du7ydQ6JbSEr2+AjFaNfhVW1CZcmvLspTUdpJjxRoz6iG0gkueYH8eruW5UuSMiT+nHKwgcooIOSgZ8chhJftJkJqMzGjprmGJkmOtDKdHNriGXWyp09+5bJ7dF7WQVBwC3G3FDllTD0o5frbFKWXFKSNSSVE6ga6lR8T36ZrWWoHZhB3C3zOWHUWCFHTsPE69vEajuPHUjozE+XdhTuovFljMlK0eRHh4f7upyUPHjgU4OrLGtJmrQpuO0hx+Q+tLLTDaFOPPOuEIbaZbQCtxxajoEgEknt1IVRQuaBQKknhQdvsxrETzOEQEuTQAcSewAcSeWCNUVGfqpU1B42z8v/AK0uwDow6+Mcx1wW46QlYhb/AFfUQTpt02+fl0Jmu9tW4883VsE8vT/ipWuqvbhttdn359vFhHY3rTecXyhkIppp+zWuXZw54cNhivJL9VAv5WHchWMuRNuaZEaRiuSyH4UShr6R+E76zsZ1wRZCrlbbTYQhCDHVtKtSERotx2hJnto57VIwqtUSIAxdnBFAQKjRUmpJ1Dhz33fTnUlzaxX09vfzXBeSPS0UrFEiWIqakE6T5hUCgC6DQmtFRXlcgsVDkH/pHyF+pyKKLjEu0/Qr5UFymiZCxkTBaqBjiH2rMSIbLSnTLW0Wkq/K3q3p2o22NcCUXtsYRKZAupK6ihQ+LXQrQk00g1I8VBQ5tbb5HYG1O23guXt1gLlH0+WsqzCieUCH1Kq6tZGkHw1NQZcHwrKVYxHku4bnichn1dzcxIqsGuVwW4VMZaVxZ05xDb8aynIrX1MtpYWCfRGp9YloRf7xYpdlPPtvlldEJ85Q1Xpmq8Cq6lqaj8X7OZvaOh9yuNt89ba+O4SRSSKotnaMLFq8LvxWRwjlVCH/AHYz8yqM2+xW7kvIn2mJZHjwbCEyZtlj9rWQnFLPyJ+plxGWPVUo6AbtSo9EbbcLM/lW88Ercgrqx+oEnC7uPTW9W6/MXtndwRji8kMiLmchqZQKk8O3GCHVtV4EuM8SkOfMNdNTqOw+/re8hlycZ0wGEAgbUCcsSN46yfX0YqHiCSNhCiClQOug7gjpY3Sx1AuRhq2fcmicBTliGDFm5cY5Rvu1AeLtXCUfRZdbPzR0KOg2jQ9Me3oY4lFaUXADfGU7hNUA0kYfbjerMActGTLEOwhFQ1QjYpYP3ajv1Mkuwh01BwIS1Ei6jVRjQViDzchUf11IWg6FL7JQTr28Se2nn1uFzkDTiOWNBhUGhYccHvgb23ZdzLm0fGKSdVQQzGcny50l0LSxFZ0KilhKt61H4Dv0vdS9UWHTe1tuN6rsmoKABmSeGfLDR0b0fu/WvUEPT+ymL5uRWermiqiCrMaZmgpQDMk4eXL/ABU7wlldliOR2Ee3s4TDchtMH5WXmH0lTS3AdSgkDwPUbYd7i6j26PcrJWSB6/FxBHHG/qjp266Q3u42Hd2R9xtmAbQaqagMpB45g8OIxHlibKfcsPTCYrSYMwhtsbSPVCWE7j4n/jdNHloqqTUmowlPM5LBaAUOEoQVHVbiu57kkfH7z1mXAyXhjTGGJqTnjB+mLdWUoPfX8RHZI0HWLOtATiYiE8OGFuLTpZTorX4nXspf2n4J6iu/ZiZEgAx+kS1RjsbICdNpV8B8Ejvr1HYVzPHE6KoFF4YJfHWBQcmP1k6QtTadXVaDU6JAOh7+G49+gm43zW/hUZ4Z9q29J11yHLuwQqHA0Rrp8rBcC3CpvXTUNn/hjT7EjobPfaoQRibHYE3BQ/QYPzGNKTXIS2AhvTRbq1BtpA0IIKz4kDyTqegDXQMniJr9eDkdhQEDhTM4+1mNRlvoabQqWvcPzHdUxkgHupLX43e/+bt1jPeMBqJoO7jiXabWkjBMya8+H1YJcnHf+SbKw4XEJ1AHyoA00PptpAQkaeGnfoKt2fMNKUw2Hal+XFQdQGAHlWKMfXayU6IcWg+GhAWdpH2kAn+PRtL2Q2zrFnJoYD26TT7cKFxtMHzsfzPhhMqaj2KXAY+5an3Yg3kEp5+9yN5CVNpRLkNBhxCkqQGHC2EFBG7eNgGg769tPLr5t9j5GxxWxprMdSe85n7zixd63obh1pPex1+XEvlp3IlFUAd4Ay9lMEfi3h2Rdp9XP7abi8SSsP8A9uVakpvXYjgCmHLOSvVqnVIT8yWUgvpQQVaE6Csr/eYI52jtiCikjWeBI46R+IfvHI8sWda3t7fXS7TsKrNepTznIqkJOegkZNJzK8F4E1xYRxzxHwRBCIlfgEd2U9GS27YTMis12EktFLvrOyH3Us+pvSFBOmnkOg53Sdxr81uNeAoMWFZdO7pFQ3T+YxGeQA91OGCdlXH8hFXFs8IefsaupS9Ll0brpds4nqJG6TFUCEzWUtNaaaepr8evPdNMQLhQHP4hwPZ7PuwWurSdk81FJ0EVHPgBlhApMxYookvILvdGrqFsSpoIPrvutqbaiVkZr8b1laznG4sdpI3KedSNOhN0p1BF+In/AEn3c8E9raO3Vri4P5Kip7T2ADtPAYJWVYXNncWYlYWLRbyfHnXre1W33eYXkcszLBhDiR3RAnLTtI7AlX29NPTm56L6SMH8iVQoH9QZfWK/ZiqfVTpo3vTcG4hf/WFpKZGK8dMzeID+q2k+84ZkGvZlwXnNgbdbSFTmk6JBJ7fWNJHgh0/jA/Cr7D01tOyOFrVTwP6D+jtxRkNik0RelHGbD/uh3HmOR7sDXK7aNEaVHfVoEBSUSUn8xHY6Bfm4gfb4dG7GBpWDL9WFfc7pLZCnLtwCLTlvJcd9JNNauw5EF9EqvnR1/O080oKbeYX4bkkdwexHYgjpmj2Czu1PzMYZGFGB5g8Qfp7MKkPW+7bRMku3ztFJG4ZWU5gjMU/SOBzBFMHHjm+4u5sbyjOcitpeGZlieJ32SZ3iGOiDHGcPVEQzBk+IrsG5EWJJlpaWLCGUnR1aXEqSncpaju9rvnTbQbXaRrc7fPcJHBLJU+SHOny5dNCQKjQ1cwCCDwF6dNbl0T6iQ3XUm4SyWW+WdlNPeW0BRfmfKXX8xb6wygsARLHTJiGqM2cl4ryjx1a01djMVzK6WvjmRIocov3K+xnVEmSoPSK+VEp2IyZNDKLfqEJ1eae1I3BWiRt7sO9W8737iCWU0EkUYZQ4HBgXJpIOH7JHGhFSX2zr/ofcrOLp6OS+trZAxhuZijvCxNShEQGqFqVp8StwyPhz5ZEye9r1JprinyNr6NXp29TyJh8SpbeLJ2uSGbZ+vs2GkOfjQtAcSARrqNetdlPZ2s3/ADMTwtqzR7eUuRXgCoZT3EGmN+57Jf7lal9tukvKx5TRbharFqpxIk0SAA8QRqAyrXPEdaPI+IeHMtp7rlbkSPmmRN2aZFXieAyDlOL4ZbOhYay/K7Avxq23n07z3qCDD9V71E6lS+2jJeW2/dQWMlvsdo1tZ6KNJMPLklUf7qNaFlVwKa3oKHgMJ+yXHRvQe5w3fV26rf7mJwyW9oxuILaQ1/5md6hJJIya+XHVgwr4sqN6yduMQyOW9mNvHto1+Hcmqs5ivolU+a11m8qQi8qpY1S8Za3fzWtfUjuaoUOwJnW3y1/ZKu3oUaKkbQkUeJlFNDDlSmR4MMxhL6ltd02Xenm3iYTxXVZ47pTqjuUc6vNRhkdVfEvFDkeROm9zGqIsJZeDFck6spSQkqA7eo5/2z8D1vGwLIKtnLz/AKML/wDm+aAkIaQj6Z4fuO+4GEhHoKmIcUpOmhUOxPj5+fQ266Wc+ILgzY+oQiPluxrhTsOXojje4y2Ud923cO489APj1DTYZFPwk4It1jFMubgY9Ruca+JWLUqSguyVqZY2qGoYaA9Vz46Kc+X+HWw7BK8gFMgMRj1ZEkZIark5eztwBeS+Y41jHcQVtuLKFeY18Ox/h0wbbshiavLCzunUPzSlTzGIM27zV3aP2EneqGwQXENglyS+8SmLXx0AFTr8tzQbUgq26nTp2iBhjCLTUe3kBxYnkB34RJVWeQu1dA+0ngB3nEheKfajeZQDlfLFg7gOOvpRMRVkNM5JLrhoW1vJkFMbG670tAhb59YjulGvVc9S+pVhtlbLYwt1eZjXmYw3YtM5WryXwjmcMu3dLKSl3v8AI1vBI3giQVml/dRBmPbyw++eeV+JvbPxt6XHyMRwuptm1rn5Xksh02E6AyotpMOGyhzI8llTXUqWlKUMsrA7K06prcN+v7u4N91FNJLcLmsS0NPaP8OIDh+JhjovpT023/ebeOCxt49p2VmBK/FcSqOHmOM1LHMqvLLFC/I/7j3HESVKapcazfk6WFrCbDI708f4ul7cT6jNLTiTkjrKVeCX3E6jpU3j1B3e4bRbukEfYg1tT+scvqGOmenvSDZ7SIG9BmalCD4V+pcz7ycAR79zbmdl/wBLFofHfHsJ3RLJxvDY8yzBT+H1bvInrB991evdRbGp6QNw3+5ua/Mzzykn8Tmg/sjKmLQ2rojYNuAaxtLeN+1Y1r/rULfbhn5Z71/crljIdtebuRnocg//ACvrr92mioWewSmJRtV7ep10HbXpDvoLSUmTQCTzJP68Nlvsu3E0miViP2iSPvAH3YbtJD5K5LkKdya/umobLSZdlMyG8lPSIUBR1+usXJrzrdVEUPwLf1ddPZppatOk24ubJCxsIYXcVq5H5an+salj+6nvIxgLrZIpTBsVtDc3inNgv5SHvkIOo/upX2jFlHtB9uGScsZS9hnCGD2eTWEaskuX2SyEfSWs9hxohIgvTt7eHVMgkBMqWVWcoH5G2kEdDdpTdOpNx+T2g+fdhCTK9Aka8CY0PhFDQBqHlmTgZu0osrZdz3mZmbzFVCE1KrfsQxDJnpU04ACrHBI5fxrk7iLN3cGg8eZDP5wwCLDechw8bnWfHnGypiSmvnoEhP0ee5dIRoUSVLEJhYBVuUnrdF0t1DtN68U0MiyIwkavi1k5h2l+EqeKgnLgcGbfqnonb9gf5O7tjt91I6O07iOV2ApIhBo4pXOoyB8HHB19vHtWR7k7WnouWuQ7PgjOK6DX2M21lZHWuZXn8s2hsU211ORIYNYaaaQ5CZ/4TS1EbT2IPdObFvnUW8zXPUMsNzb6mOsMWMKMulYojXyyGP8AiKoNQa5Upit9p6n2nbtwC7QVurqJ38l0gZLe1iZQPKUkBmd1B1SHNhlWmBf7pRi8P3L0+OOzLrN6/i7HItZn3O8pysoomU5FWz1fp7seJVoQ1fSo9EDCkOBJXYvOBwpBQVEftvT+39JdWTQFp22ESKQDIzusaDUY4gT4QXNVXgi5DI0xY1hf383SXjhSW8eeTTCSWJEjZs0jeJRnVVJOnLuww+Ouc3uJ8tl5LhebzcFiSJUqWiFUNKrC2lyQXWl2DLRDU4+kvYtJQo9j4Hpt3p+nLq6kOwXt3YXQGqNx4XHOrgeFgDkRmD3YjCy3O8tVh32xtr2w1FdLkOQvAKH4qaZggjlxxY5xz+7UwHokHkbEGsvhkBt3J8WkCut9ELWFynamw1jTQWtp2oKVkg/HqJs/VfXLzeRfbUdxsi1BcQkRNp4AsJSEYniaHFfdQekfT0Sm52Tc1tLoZm3uKyCvHSrRguoHAalOJz4L71PbTzCWa3EeUaOBkEva2nGMtcTit428vRIY9OyUiI6rcdNUOkdWJ8gb1BLIrxMfwvpqPboZl+o4qqfp7qa2cxR24lpkHSSPSe8a2RvrUYPLXCPJVztuarH2p0SQPUYmR7aqlRn2te3pSY0mQy4kj4K8etUm1zKuiBRp9owJTpnq4S+abYjP/pIv+/wrt8UZ7BjuotMOtQDsO9ltmSj5Vd9Cy4pRIH2eXQG46cu5dX5Yqe9f14aLHb+pYgBLAy/24z9z40E40/XHZKrpcRYUe8qI+xtJ8iXGkpHj8eg0nS9zAKvE1O7P7q5YY47vdrZfzo3CgceP1kVw3LXEU2zoLiQNh+VKdNpPke2vfoHdbOkwoRwwXs9/nHAmmGtJ4gVIeC2twISo7dBt00/wJPSre9JNLJWMnIHDlt/UsqjSeGB/a8dowlxd1JiPuVc2K7WXspLanWoPqSGXYUh9ttKnExnFJU2tYG1BKSe3Vc3nTR6fne/kDGylXTI1KhGBBRjTPScwTwBocWZtu/Sb1bx2MLBb6KUSRitC1AQwBOVcwQOeYGeCfh2QtSbeRdzbyE/YluBFqX5zPrQqRtmJ9K7clj52p1s2yNIu78pkrUsDVR6sfZes4Nyv3v8Ac7qNNzpGkbOtYoqLpa5pmHmplEp8EdWalScRd12CRdsj22xtZVsqu0yo1Hl1PrEAORSEnOT8ThQpNAMDzLcWU99ddQg3IqV30wOTJcpyVdSpTQjxhkVy87qo19rJeebbUkaM/IsgIUQKR9YOnRf2f8Z2qSGazhun1apNUzkhV+ZmJ4xytrQFQfL8L0CkgW70NvfyZTa79Hju2tkoFTTCg8TfLxAZB41CsQfizWpYZ7GPVTyoTyHI8sqiTm1NoAAIWhDgkoJUNENqaWNCCU90/HXqlulkNpuACxzLA8oRgaZNT8XZQkUPAg9+HPdrlJKMGSrRHPtFRT2mo4ccjhCgRlynXXwNX7GTInekyA46UrWsNpW52Qk7UKI7j5R12DYbcBZxxRL+ZISwVczpjURrU8MyshHdnihdwuy91I7nwrlU5CpOo0HHmBgjcV4TUZnzDiGGZjATMoLj9Y+trhLlRn3jFp5ctsGTDejvtFD7IOiVDXQjw6uPoLp4JM1zeolFjYhSSSTQcaEcDnSvI8sVb1RuXmslvCzUY0JGQpnw9tOPsxKf3JcR8e8a4fU5djVTMjXjdzjuONTZt5d2qm6htt1hiEhFnYS0JZZZbCUgDsPDq0roRz2skkiKZURdJNagBgKVJOVCcsJhjEEiNGSKtQ59xwCMLZrMv5NwLDb1Dk2nv7OdEsIqHnIxeaZqLGalIfjrakIIcjjulQJA6B9MSql5LpYsyGSgIFKhSR7afbibu0fmJEjjwPSvHn92JK+4rinAePON1ZhRVs5i3oXMVx+sky7y6skw6hdxHhNwGWLCfIZSw01LWEDb8u7t003e5TTWMwuNOhUrkKZ6lHLAptvt4WR4QQ9VXiTl2ZnESqp6ty7POPsXuULlVWR5BCrrKK2+4wH40hKy4368dbT6FK2+KFAjqutie1vd38tyxUBqr+E0VjnSjcQOeGG/jkhtFA4ErnzpUDEtOf8Ag/jLA+K7HKKGkmxrPF49PXUrz99ez0V8GTdxmnIzEedYyGEtf864QnboCrXqw2dbq3a3nVfKSI0oM1oMqZ8uzAN4UtmE8VfM1AZk51OI30M1EiBHV6gUNoKVH4HanT7ytR0+zqpbyMfNMYDWIHhz7PtOHGJXaISMPERn9/3YlhxnPS/x1kUN785ddLmPw2dQFlKGIsooQe409RTqifh0J3ja23TbbrbaUklgqpPJlORB50H3YmbexiuopeEYehPccCCjy6Tjt3Zy3UPuV8x1TkyEvRBDxeVrNjknYh5sJWAO29OgPl1XEfUw6dlhTSxtpFqycCoWgLjkCDUUPGhHHFqy7Mu52oiBAmTJW4g5ZKe0EU9lcGdV1EsIYtoMqM5GXHLw3KKFFxSSUNraRueQ64RqE6agg/Z1Z1luNtfxi/tZIzblK5mlSRkpXiGbkKZHjhIns5raU2c6OJg1MhXhxIPCgHOueGXnWC3PI9MxHx9h+LbaxFM3cyGtDLcQyGJD0OwS4ELloR6e5pQHqNOpBB6LbnaX27bcsO3MRerpKOykgLUM0cnAlcqqfiRgCDlgVYmz2/cHnugPIfUsig5MdJUSJTg1DQ8nUkHCPLooNK3GxJr57RV7HyDJX2mEoUlxoFsyHQnVptx9xLTbDW4n02z4AdYXMDXszW5H/MtOJJSBwIFAOzkFA7BngttsaWlqrQVNlHCY4qmpNcznxPElj2kYcD7bgUvYSop+VOoIJSkBOvfw8OmSLbH5UoMRDIpXPLGiG3idV6pT5kkg6n4Dw8uiMO0tUVzxHkdQO/DpqnGjHVGQguOK0KQhKnFB0DTsEhXZQ7Hra+2aWpoJHdgdISWD8P1fTPGdVdeyBpGqJywe3/C9MEeHdSynuet0e1ztwjNO/wDpx9rEfxDCXaYdl8qG4EUzqioaJQl1or076fLuGh+89E7Ta5kkDPpFD2jGDyxLkpzxFjkPmfh7huUa3krknFqC+SNW8SiWLOQ5jJVpqltjGqBU+xStXxeSygeah1PvJLWxGu+ligXtdgPqUVY+4Y0Lt267jX5OCQxftHwoP7RoPqriuXnH93Dj7C25tPx3RV0OchKm0ZByU9IlPBKVKSt6FgOKuvT3HQBqgS5Yb+YbhpqOlubrHa4ZhHt8b3UlfjesUVe2g/MYd4IricnRSBfO3aYsmVUh4+zzGyGXMDDH9ynvEzjjf9vHjRzLsufi82++Nm5yKZLpYLFHb4twBGebEaLUQoKtlQ1k9dYwhvIClxrCSEKLjSVpl3HUdrc7cv8AE7gEyMVKQ5aK01IoFStFoKnxHUxB4UB22xud1lGyW6pHDn4zqGVaM5NNVWBNOGQy41rAh+5HjihjcKRuI8Bz2xi4Tm9ZezMUxSwg3eS21tIxprH0NyaaVEkWmRxpOTwqq5YjMqSZTcR2AQFvoeYSN8ZJ4beLp4SziCRiI9TRsKRkKFYBiasVYjL8a/EQysvTkO5WXzU3VUlubm5iIDRrVFV5BqXxFdJ0BvEK56T8NQ04eF/af7uczZtuaOfebuK+GsNy/J47tzN51yuLRZBk7scy30Il4K06LHFb+nkkriVcp6vlw16pDYR4q+w9JbRuOzx2nTEtuwtmCgwShprOROUgJGnUylXDMXYkHSwJZS15vMWz3LrvNrdpduiiCIxrHDNG9avVqllI0+VojKtmdQoKyqs/Z9T1dLMyjkT3g8k2kRpciyjchT/aXyrHrsXVJUXvXVkkOXPorjApaVKLoefcaT8qkFG4lMzqTZr1bpL7d7TcRYyDTNMI45zHSg8ySO3lkUwUz8wyLLHT/DJIwL2jcW807ftscL5llhEhAYCpYBmRWR1yNSgQgNUilCb8D9veFyfbPnNthU/jb3jxJUaxch5nwZeUeZJkyWGwUxWahqdZSYFiw4rcIUd+YttzUFSPEWx0vabXYbGsMExuQoyfi5yqFrUnUBQaCdQ4Uriut7n3G63V59GhGYHQeC1pmVOnj+0BnxpiJWPe9zmFlrE+PuQuO+F+fG8Xtf7eqb3nHj5665FwwVjD8jJI7635tc5Yz8dr4/08eNL+lmWL7J9SwWVlxEqC5lkI0gSrqIJGTqRXUWQ5EgZVWhJ48zidJBbyisB8qTSDoY1VuFAj9/HS3DllTBFyz3MZz7wPZ9n3INZYuYjzP7Y0w+b8Zq8TopXHSpXAlxf5Jg2WUNhijWT5ZHacw614+t3C6ibLS5Er690qZVLdaTJS/wDKtpLiAVqmohq56dXEEn9l+Z4A86APebcbiQW1wNMqPQcqVply7Ry9+GX7ePf5e3MCDX5fct3ra22kLN5Cr7ELHgdylsNOnXTv82vVU3/qxa7ZcGO4UxEHijFfsBxuPQV1PGJYmEink6hh9uJ5L5e48u4KJKK+JDdebCjIxqxVXOIKxqda+SXYCyNfA9M3TPXNn1HIPl5gzE8Hz+3jhS6g6cfao/zoNIpxjqPsOWEauzrBbJ5FHZLr7msd3Icivw48S3DSydxl0UpYgXITrqXoDzT+n4Uk9Wslhu6Kdw2pzHdAVHiOkn92Rc09jgrivG3LYLpRtW8xpNaUp8I1gfvwv4X/AK0bK/YMNnN/b63YQ37vhi5afU4VrbxSymKEKa5t3rhUlrKCH6uySB8sGelJPglZ6Ytj9U/lLldr6ziaKX/pQPEBw1Oq+GVO2SM5fiXCZ1D6Bz7pavvHprKs6Cp+WL5NzKxO1Gjk7IZQOxWOIA3+VXdLaWFJcQp9ReVzy41hUWkd2JPhPJJTo+w6AoA6fKsaoUO4JHV62Ztby2S8tHSW0kFVdDqVh3EfdxHMY5X3MX+23cm37hFJBfwtpkjkUq6t2FTn7DwPInCC/ZSrN5r1Fa+qho/BPh4/bpoepYCICezA4l5KA8T92FaE7Ir5zD7SwgtrR2B76JIIIH39R20yoVPPG0lopA65EU4cMsWg8FZxEyPHm6Sc+lyQGx6W8jUbUgbf49Vrvu3yWdx81ECFrni+ejt4h3Ww+Qnar0oK+zBDYr4lZkbTykoS0pwbk6ahJ17/AO/rL5iS5sSBmwGI/wAhFYbwJCB5Zbh2YmBjD8EwE/TltYCAUpGnc6eAHSBeJKJfFWtcXXtk0Hy6+XQimGNl2UEKer2WkbiFIddSB+HzabV8B5nz6JWO31AlY5YX953oozW0S58z+gfpxH25msQd7qwpKgSrXXTTX4ADyPTbbW7S5DFZbjuS29SajPCTw7lyZvPmDViXVLEpzJQEqOuno4fkEjUeP/quvvUu3eT0zczkZjy/tlQfpwb9K99+c9QbC11E6vP+y2mP6MRhquQUPQn3W/wbygaK+Xz7/Dpnm2khwvOmECPqQNMSPZiSWd5giuZ4XUVFP1vCGDz06HRSg+uzAA+Ou3pW2awMvz/au4Sj6guLV663hbaPZTzfY7ZvrL4cvE2G0vM8rKK3KMlONwYUNMqG19SxGlyVEnc839QpH/Lsgar+I61b5uE+wQxT2cHnyM9DkSB7aczywsdK7LadbbhPYbnfLYW8cJkBJUFyMqDUQKDi3OnDEHMhuGIvAvvtqoshFjX4zlPA1PEtopC02Mf/AKhz2A+lSdU7NE6du3c9NExr1BsUz+B3humKn8JMKmmB2yMR6d9WQLV0S4sFVgDRwt04DDuYCo9uI5+2aobd5t4PsGkrKU8o8fu7C2dEgZVVK37tD3Tpr9nUzqS5X+D3qahnbSjj+42APp/Zt/m/apNJoNxtzwP/AEyHE85zi0cs8vobMhlZ5Lz/AOZBcQFbsrtVfKQdNNDrr0gmUDabWtP+zxf3Bi0NwgJ6p3LSCCdwuO3nM+ePVlbW8eM96VjKaASv8ThX8qR4fPqOgEsiOamlcNVpaSxRihNDiOebclzqqvkSHpjT7kdKikPMNkLWnw8gSOtCqdWfA42zysq8RlhvcIZ/k2dZvRMfp1fLaiy0208hJQY1bDUV+oVHtvdd2pQnxUo6DrVuUi2lpJMeS/acsKW/bnJ/C5YIQpuZfAinIsScwP7IJ92HbyDydVPZu+zcUQlMplPtreae+ZaHFkLJBBCiSdSD0u9L/nGS5AAJyr9OeF3cNxhjs44ZI6qzMTnxqcO/Esc47sIav/ZXYQ2bN11Tp7PMvsIOiS5p2MZpIOifNXj00XMrxprPIfbhBmtts3LeRb/mrIMkpwQH4n9p4A4lp7d+FOIOXsdyW5mXP6hHYnuQq+CHGIi6qBEDsKM8Y6ijRlTjLrgOnf1Neh1xczWEcflprd82OZz5j28PqxdnQHSmwdRW90sl20cNsQEUMFYAAjW2riK6jz44rq50x+pwPPbOixj6O+NbYLajyTJbcCE71bW+xUVlO0HT+HUu4lPl1AIqAadndj202Cx3vkqyyBZGUMPxAEgH3j3Y3M9jX197cOSZDzchuS/iZjhDTatEtmTHLqUBrVW0tAjX4ePQHbJyN/tWPAS19+Lv3W0p0VeKB+Y1uRw7s8R246wZc6qbakKIdbaGjZTt3pABAO4A6jTv1afzpBBHDHPp2tWHiw1OROPzCizHWm1DbIjSfDXRP5jCjpp219UdMm13mogHChvNhoGQyNcB+hdXFm+gskISdEE6+PgU99PwnpztpKJUYrW/t/EQeWDLXbJeiQCpWgT8TqdPDTt0cgky44Uby3zNAcLjlFKbbLm3Qa9kk9yPhp8NOicUysaHC7PAyEkcMFz2q5JiuF83tzcstK/GpdviGT4zguYXEFuxqsL5Cu2Y0XHMksY76VM/TR/zmCpejaS+CtSUblAL1pZXt7sGizRpUSaN5o1OlpYUJLopGdTkaccsqmgw7emV9t1l1Jqv5Fhkkt5I4ZWAZYp30hHavAU1LU5eIVIFSJMWuF+73Db+bj+X/uCcJ0GQwih6dTW3KLdPawky4qJkUyaqVjMeRE+ohyG3W0qQAWlpUNUkHpKhvui7+2W5s+mr+W2bIMtvqU0NDRg5BoQQTXiCOOLZew672u68m56t2u1uhmVkuFjZaioqrRggkUIy4EEZYS2q/wBz6G3/AP8A6M8C6FKEJP8A1hjHUuL3qH/zvdiUt9b2PSZYf/YvuR5/9lPL+334ixP1lGHI6z2gCgH/AGtKnUa0Hg/dqfd241lVPuelOpB/cX4GISdyj/1fjgHb5f8Azu/Z1s8zpWMVXpfcgP8AzU/9/iMy9W3ctJesNoIHNrpQP7mGjb5L7oaPNMYw1j318eTkZRFuZJy2o5Kjv4ljRp2GXwxklsKVArXrVTuyKnYr1FpI7adTobHpG62+a+fp26UxFR5b258x9VRVF1HUF/FnkDgZLvPW237vBtlt1XY0nEh82O5Xyo/LANJHCDSXJonaQcSP4qtOXONqLNOV/cP7tONOcPb8mlyHDrzFsYyBOdu5PltlUBVNjEB1NHFbr7oLlNvktvF1DPdaUsqU6hO3q12fdrm32bpfZrrb+p/MSVJHTyRHGreOQ+M6lyK5ihPA6gAbP6Q3vetitrzfuuN+td06MaCSGaFHE/nSMngjU6BR/EDk1afEApLCt2Nkyp9W2ppCklW36tI1+Vfceon4Ajq7hbaZfF2ZY5GuL1ZBVB7cETD8karVod9QapUk9tSf5a/Z1Hu7TzEIpyxts71YW1VFa4HeEtT38Tx9x+wklaqeAoneB8xjNk6jb15IwI1FAcsQdwua38pJy8xvvwW63J/oIP0j7shTrQIbWleu4HuAfDwPUV7Mu+oUocbo9wRYwjipGBZbyZEqY9KUt38xZPdwkePj5dGYLZQgWmAlxeHzC55nC3gWcZVg2QM3+KXlhS2sdtaEzIb6kK2L7Ftwa7VoV8D1rv8AZrLcbY2l9Eklu3FWGRxlZb9fbXdpfbbNJb3qGqyRsVZa5ZEdoyOCVc3dvyJNVe5RYyLPIJ7YUqylOFTj7jfYsrJ7DTyHUCHb7faohbWSKlonBQKADtGJsm8XG7TNc7hK0t+5q0jmrMf3icycMh+uciiw3sFtQjMsrB7d3Jsc6g+BBDZ6mAKdJByrX6gcRTISTUUNMJqYbj6tE/h7+Hifu161yEL7cSYkJ443kojwR3ShTvwPzbVaa6/9pXUehbPlicNKgAccaE2cQhW5SU69zqRqf9o+7rWcsSI6vge2dyku+kHElRO0aHwHx/l1r0k+zE5AagKMSJ4svZFZVqS06whmWpLQkSFBtptpA1ccC1fiOhI0TqTr0v7lCsklWBqOz9WGrbXuIo/LQLRu3liRmPXlEwtUp2Q1MmLICVu/lx93lsZ3b3APIq7dLlxBM4CKKJ9v14YbYGMmaSjSd/D6sP1VuiYyhfql0kEpAUPTT8AhCfkSBr1AMBRqUpjfHckkk5k/Z7sOqgjrZSiQ6FfmeZHZI11+Hnp1AujrJUcsMm3/AJYEj88PSVcRm20pLqVEjtqodtPIA+ST0PW2kJqBlg5JuMSKMxXARzKVDfe9Uu6htZ8wd2pHl0dsoZAlKZ4Tt0u7dpgWNRXEceSJ2LYI25ya/UxJ1zDjvv1tfvSiLYSWwhtq7t2tFfnwZaXGYo0/Pd1WoEMdw91eXDX56ej1eXKyluVEKgsqnkGyL9gIH4snXZNgut22C46lE6QWtvCVWWmqTzNRRBp4FiBSNjw0s34RhV4Sy/CeUYFhdUFm65ew3grI6KxWj9Xr5UkBxMlxoHWRFknUoeTqlWhHYjTqrOqbKa13BkuF004U4aeWnu+7HRnpfY7Zt/TcEO2UZKeJ/wATPxbWTnrJzNfaMS0xWvlWE+BT14ZVOnyW4sVD7qWGFvL1KErecG1sKAIHxPbz6VBO8balJpi0tIbM4lL/AGRlOGV7V85PiSTGVHYnQ4PrLMeO6UsqdU4rRDrbbpAOg7Eg+XRe33UFvKcZU58K0xqjh0EnIipqO7AoyjAoF7meO8iSZ766rGXVSjhTbDSKOVnS1PIgZrauD5pjlZDWRDiqHpNyyp86r2abpzVKx0qwr7uz68aJ9uVZ47mp+WB4cg3af0Ykli1jEuqZ2tmqS6Cw7FfK+5XFlAoUo6+KmHVbv5darKZopRIuTVBHtGI27WMd3bPaSCsEsZU+win2cR3jEYsmnNYxZTYZWmPOr3n47qf6FpQSjRSP6mn2/wCYPVs2MZvYVkArEwBH07jjjnfyNivZbOXwXMLsp76dvaGH2YjzmkhiwYVPhEmE+socbKt64MsgkxnFHuW1p7tKP4k/aD047arROI5P8QZjvHb+vFV748dwhuLevkE5jmjfsnuP4TzHeMAxFAt/ekx1PsuOHe2ryCtdFtkjVKu3l8OmtbnTmTQ0xXs9ozjIHCrTcdSGZ/1UNTiUlDqVusKUy8hmSytiRHcU3or05Ed1TaweykKKTqCesbjcImi0SAHhkeFQag+45inAiozxrtbe7gmrAWRqEEqSDpZSrA0pkykqw4EEg1BOCKYM2M00zDikIhrbdb2p0+dlQV5DRW7aU/ceheuJyTK2bCnuP0rggrXERVogToNfeMDLkOmcYmuhplQivNJmsdgPypSfVGpHmkqOvRPbJQ8NWP5gND7ssadzkMU7KlfLIDD2NmMR2m4Q/bOuTH0usQWjqpwg7lqGuiWtO5Kujy3Ij8C0L4CktKdbVCj7cFHjrOmIFY9xTyjWT7PiWwlKk09hDaMnIuMr50FLeU45rqt+C8s/+zGu/BJb1UgerqHF7eNplkuBveysqb2i0ZTklwg/3cnf+w/FTkfDmLQ6R62sFsD0h1iry9JSsWSRRqmsZT/v4eJKH/fQ8HFSo1VDjWyZsZK5UeveM2IxIfaiyiw5F+tjtOqbYliM6S9GMhoBfpq1Ujdoe46NQoqqryDS5AqK1oeYqMjQ5VHHFe39xGZpI7Z/MgVyFehXWoJAbSc11ChocxWh4YQoCL2A/oWXAvXXudO3w107gdSWWJ17sDFmeNsuOHIHchmuNx/TdK3ilpPj2Kzpr28Anx+4dRjFboCTwGJUdzOxAWuPU+NbqeLUYL9GOhMZlRJ02tdlufe45qetX5QFTxOeCCvMzc6DLCTFwvKMsuYOO0ldLur20c9KHAjjVStP+I++4rRuLCjIBU66shDaBqT1Hu9ysNttHvr2RYbOMVZj9w5ljyUZk4IWm33m4XSWVojS3choqrmT3nsAGZJyAxLyHifEXtgx1OVZpYVGRZjVwXZ6raa4wjH8efKNZT9I1L/JfltkemJzwUEgflJPbqhOoett26pkez27VbbNWhFaPIBwMhGYHMRjj+LFnbD0yEvl2vZY0vuoQPzJeNtbV4tXgzDgD2/COeKkPcB+5hZZjkQp8CmNMU0NUyysryWkuQK6orUGRa2kODIOs2apgBtuXM3H1HE+m3p0iXW4Wm0xlLc6rkr4n/FTsBGSL+6lO81x1V0F6Q2e3TjddyJut5YCssgqan8ManJFr7yM8Uqc9e6jMOZrS4tbd+Wp63kqWHZchciQ1AbAZgQklR+RLMNCQpKdqAvdoBr1XW475Pd1RfDH9QA/XjpjbOnILNR5YBYcfp7cQrlOl15xxThQ4lRV858R5DTvr/r06XWkBzOZwxJB4MhnjerVNWEqNAcfaZMlZbbU4UpUtQQVANIP5jizp22gnXofOdZpzJxuef5eBpVWoUVp28sTm4O4C5uzGuD+K8SXyIZLC159mDcXC8frq9xafWlQbfNHKuCqUI+5SHUNu/MBoNNehs+0XF+3l0c237NNKsf32y8I/ZqB24Rd/wCoNkt5VHUO4LFZ/wDQRks8h7GSPVKR+7QDtxNa14m4QwW3nwr33OYdNx0MQVU/HHGtZcZZn+PWDX0r1tk+YZG2leOZDl1i4h6P6y/VjxGVD6VCSAeg9/0bDcqg3G9ihtomJZIl8wFaUCkCioAc+NCfiqMbG6o3XcLCOHo3ZLp1DCj3WmztiKcPFWRk50VQzHjiwX2S+/Hk3hmbD4v9lftG5N5swClXOfy605DuE1VlKRaSFy3JrNm7FhiA27MJUHpq3VrbSEIG0dM3SJFpePbdHbTcbgzoEmmBHmaVzUKF/LjWuZDe2mFrq+aXYrGHqD1S6s2jZvJYmC3SIrbVYaSrFyZ5mINAYwKHA79znPfu391HKl/nU/lDF+EMUNHIRR4dh0xy1RXu4wFR59Cu4YjoXe5S5MKk71asDQBIA16HdUdNbl6g3DDdnW12SNTEIfOcglM2DCIjUxJpmdJ4DG/bbj086ZRdxk29946jvZY2e4MARQswHluBKSI4UQA1A18zU4g/Ww2sJu1Znd3ubZblctKIk3JsitZS5DZmIUjVlhC2ojCYzykFJSnUjXrZD0htHSMaw2kkrRJGFWgMaJUEEKCSRTKjDjh8/j8N4F223S3igWpEcYHAGuZ+I1qcieOAxmXLnIFhkEiTInPNPsSSguPj6gIfi7o6XI6FpLLakgfKoJ3aeffpHvJJrZ2WGuqpqTUnjnmcP21WVktqiRqFiI1ADLjnU8/bhAh5amZJakXLDs2QC5uddf3lW/8A4iikq1Tqe5+0dWZ050lGqxbpvSB78KdCtwRW5MDxY9hyXsqKhL37qV2aTbtpbRZahrYcXK8weSjtGbDnTLDhbyVlbesF9ltnardvcBUUuKAJSQNUDRQGnn1YAU8AMsJBYVz44zLtYT2ipqW320JHpLP5qwQexacTo82tHjqFAdeoRjwNcSs4C90nP/DFkmdxXzDnuP16kITHp/7gl2VV+LQ61lkqVEWwCNC3sAI8T18x9xcxwd+9nztS20fHObONcZz6uWyHGclxqQMYyDa0nRQlQN7la884kKKSAjeR19AJ4Y+EgccW/wDHn7h3t65HqmJtom6xl58gGDkFbHmNb9oUppMtv1Glaa6a9ZFGHEZY+a1OQOCJI5E4YzqO4/jVZDnSVIJYm1k+HAeQ4UkoW7GSoOLTu8UlHfw6FX22211GzGMGehoQdJr7R+nEO4so5ayRgLN28j7R+nCNAaZQva+ghRQUgqQQSdO+qdO3SCUZWKSAhhkRw+meMreFl4HP6fbhXFJFeQfUab2rBStC0IcC0LGhStpSVJcbUPEEEdQrixhmU1A4UOQz9oPEYPWpljYca15cfcRwphhTeF8HkTFzo9SunmvLG+VUvLihal9lKXC+eO4BrroEjw6R73oTY5ZjcRxGCZjm0ZK17apmD7KYsDb+q97hiELyiaBeCyDV/tcR9eP3/Ri4gONOw5UG8grQy2hFqj9PKW3VuHVwxwppbaySO40GhJ7dCbn0+vY2UwmG5t6ADzRopWpzIyIPf2Z4Y4OsLWdSJFkguBUnQdQqKcK51GWM0jFq6MqMuzpX1KadhuuNQUkxXoi0LZlw3VN6oU7EcZSk7tAtsjoHu3S+xtpbeLDVLqQlolKgrSjKzJkxQqB4gAy0wQtN6vmVktZ1oQwBbiDWqsK50apOXA1xEN+2jMyUtVrbUKO0Ahptvat30m1JDQKB/wAMpTu1SdeylDz6cJ92gRki29VigSMKgGbBVHhFOX4ia8dTd2FyGzZw73TGSRiSSchU8fbyz7hhxcO5JTY5z/g2T5PbxKalhi7TLtbaUliO2uZSzo8dK3XVJQguyH0pSPElWg8en/oDcEbcWS4cBniYAuwzJ4AE0AJ5DvphH6otDGiSKPCrj4RwFKV/XiX3u/5Bwm/4+qsfqMmqpl29k9NZM1bUlInOwY63kPymo6tri2WlOAKUAQNerN3S5Sx22ZpWRXYKACRU+Na0Famg+rClGnzVxHHHnRqnuFDiI/HeQ0mNc1ce5DkNtDp6OjuJb1nZ2D6I0GE2/jltGaW/IdKW2kLekISNTpqoDpE6V3S1/jlwksioAzVLEKoLRNSrGgzyAzzOWGHeLGRLKCUKSAAcszQNnkOzExvd9yNg9zw7Px2syiqk3ttb43JrKxuQBNnMV2Q1Umc7FjqCXH24jCgpwpB2JI101HTjv91HZbLctI6LIyAKCRVj5iVoK1NBmacBngHap81dRxoCfHU0HDI4gJh17U0XKHF15f2cSpqaXJ4M20sbB1MWHBitIdDj8mQ8pLTTSdR3J6qjozdrZN/DXMixpV9TOdKiqsBVjQUrTnzw4b9t8v8AD1MakkachmeIPAZ4sU90XJGB33DF7SVeV08q0vzTOU0NqWkP2bca5rJb64KCAZKWow3q2a6JIPn1dsxEFpLNIQEMTUzGZNOGefuwjEid1hTNi45dhxCamWY9bFBOpQlOu3soHTxH+Ygjx6qKSSs5kU519h44f4YSLcKeP2cMSr4OvREqrhUptuQwqeGFtOqS1oyuI0lwFStEr9RrVJ17nrP+Km2uFaajKEpQ5UFc+PGoyxuWx86IpGSG1VqM8+Xsph2jEOPJL8x2LvlyJDpdVUzS4/GQsI7tttpKEr1KFABKz4jpUPTXRd9fzXdHmuJGqYnZmjU5nSq5Cmqp0g8ThgO79RQQpCxEcaigkWisR2k5nhTMjlhVrJVnUzUM1tJV1kJZ/NkSGES0KBW40w60ywhSm1H0wpJUSdFd/Lpp2v5mzuBDY2sEFucyzAMM6gEAZjhUVPPAy7jguoi9zNLLMOABK50BIJPHsNOYxuWk7KLBClNWi5MUoUl1msIjhrsojc0NXig6diPj02qLpz/zEhYcCF8Kj2AcsDo49vgNDHpcGoLZk+/hgdpTte1U2QVKCnOxK1KHbc8pXzqUB/m1I6I2ccQoABgjIx8slTyy/o/ox7EttOqirufwhXhpr0w2/k5YgzRtyGYGFavyTHIGptaldirduQv6hKGm0eAHoL0SpWvfXXw6KCa2iXVKQtOZI/TiA1vO4ojacOJHLOHw0H9PrZynwdqYsSE1qVAHt6wSEgdvt60tv+zopBlDOOSgk/ZQYjtYzk0ZhTtzxHTl73txMJ9OlxjFZOU5tO9dqsw2pcas7119lHqOPzvzEV1JWR2iFvSpS0NNJ+J7dJe8ddvbAQ7XbmS5atAxz9pA4DvY044Nbb0z55L3Mnl260Jc5LTsHMtXKgzxSP7q/f3yRlRscdynmW1q1NFbL3E/t7s2ooPqIKXIGa8vrbLbG3UhxmoQpY0KQvz6V5Oo+or1WWe7WOWtSkAAoDy8zl7RU1yw02217fZKHtbYSV4Szg8uaxc/7WKT8q5Ht5Rns4/Bi4VXzH1qlxMeXOdvLQuEhb2Q5pZOycov5S1alanH20qP9GnQA1Zi7amlrmSSWPI1Y1JPPKmDckNzOK3DlgFy4aQeI0qKBRy4Yi1lD01CHmWo7iEPqdK1HVS1OObUqWogFSlKKgSTqST1JgaRWIiFRWn1g1xBv4VWINL8Yoa/1TUfq9mLoP3JuJ+ROWffvxL7ROIqR6znYjw5w9w/glTYTERIjDKqedlc2xlOuBTcCDXwLguynAlRTFhjsopAO/pvb91ksIVmidLlnmca+PjdiWoeACjL90VGEobxbrFcXLshjZlqEXiEACivOrEk14k9nCWXHj3tj/bkl23F/AK8W5w96VbXvVnLHuDuKxm5xvim1U2Y1nhfGNU6pxkWVW4VtyVbiUvDbNceUlUGPauy2dlYZStquaZ05VHCoPPiQDTtNQRhH3i/3DcB52mkBPhHL2nt4caeztxFzPqvLuSbGu5itrS65N913JnLWIe3H2+8j5Heyl2XGOWZAxVZLl+fY6EOogU1zjcPKqGDUsssohVq578lLW9lrbJ3vZrHeLYVEi3GtFWSOR4njIYOSHjZWoKgaa6SCagjEHa99v8AZ5DMPKeN9X5UkUckbVUxgMsiOlSanXp1KaFSGAOHRX/ty32UI9y+c8He873p8j81+1OJbxcq5fyOZUMYtlXN2OtuSbfEsHXbWjd/cwI0tpxL0uTYQ0RkoMh2b6KmFPJO5dPda9Iy/wCaNh323TZS0zXMEwnWURxLrklZy81vIDnRXtxUhquOOLH6e606f6gtB0nd7VcnddSRwzRPbvAzyvoSDyDEsgofibzqiq0WhoInN8ie8Hi+nx73p3/GsbAeVqXJpHFHJvOPAGXQKut5vs1szpWIZ7ltdiVhKxbKn2WKW2bvn340mptZMKE8kKckuLdbrHe99SCLe+pNns7zpq6tY5rbcbSQQyyRydpUvHNFkrguixCqIFAAqv2e4+nO5XV90lt985632rc5LO/26VJEns7iNdXlhXRSrsAy+VreSiO/w1Cz84f9+Hth/cZt6HDeVr6h9sPvYr5kSs4t5iv8aEPAeaFRghDGD8rU05uBjk17J0JLAaW+2px10CA62tYhPZXO49JyrHfbLeyQiFRqF4oEiKq0I84SeXKMgxd2L5sNFDiLuHT19ZyfLojSiTgighyxPJKaic6eAHv7cE/2W1GW0n7g3KPt8zvE6uqxG+4a5T4GlU9Rc09s1RYi5SJzKLCfbjynZ8uFcPoesIcuQlx99Nkpa16vEdGxZX5i+ZvIA1i8Y0yxEPEykg0NCWBNTTUqsa0oScBJoruK3Uz1DBjXWCHVhVSKkVoKAFSciMuGOeut5KtuNZESFZS3mPUQtxhf5zCk+k6trY4zJQzJYc1TrtWhJHXJ/qL0o4uy9sdUbio5gZnKv3g5jFmdO3SSW9WyYcsG+s95s+JCbES9cDrCTvR9QQdRrprqodj0h9MWm87JuQeIuorUcafXzxN3yxsdxt2imVeHHBg4k96KMwyqsocllgxZEptpMtxwJKVgjVxKuxCkE9lDQjyPXc3QfqG0NslpuB/NIA/045Q659LDNObuyFFGdP1YvDw/l1/AaiFc112zlmNWDbEezrpryHLRMcbNzlfKWo/UusJOraX924jTd1ZG5bRsfV9r5UoVZhmjLlRu0U4d+n6sV/tW89R9B3guYHdoqgODzXsPJqd/14NmeY7xnzzj9bIydKVvy65UrDeTKBpL2QV8ZHyKbmsnR+4r694enOr5Gr8fTVCtNOk7Zt06l9Ob1ktnM2069MsMmS1PAHkjkfBKvhbnh06t2Po/1m21ZtxRbfqVYiYrmIAuwA404yKDlJE1WXkeGK5eRsByziC/r6jJo0eTGnRnJWN5RVrVIxrK6xC1BE2omkaeslB/OjLIejr7KGnfrpLp3qXaeq7A3m2OarlJG2UsTfsuvZ+ywyYcMcSdXdHb90Juo27eoxRhqimTOGdP2o2/vIfEh4jnhCjzUTNHEHQE6aeKknzB+7oxpZDoPHCw7+YNQGQ5YNPEWbu4/kkb81SGA6hAJXpr3+Yn4dR93slurMjiwGCfTO6ttu4q9TQtizaHMYyGDHtI6gfVQgK2DU+pp208ySeqzAeykMD8AeeOgXji3a3W7i+IjDtrs0GMMLhrkBc1fZ3RXaK2R2bSfN0jxPl1hJt4vWEij8v7/wCjGmLev4RGbdmJnPHuHZ7e/GpZ5bWyIq3QtPrf0krG7VXj56+J63xWUqOFI8OIVxu9tNEXB8fLAqySzr5UJapEnRatQkJUO/j/AD6OWMciS0RcsJe8PBLAZJGz9uGF7f2B/wC1P8auodWpsu5h6aVE6EKwLKEp0+JJP8+pvWJ/+w28BHipF/xo8TfRZB/8UNtZSdINxT/6FnxHHHmKxijkpnSEwG96/wDjEF9WhO7Y1+IHy79N9xrMw8tdTU5cPrxXENrUnzX0Lq9/HsxJvlGwrIbPAu1baSfb3x+uLKkJCnPpSu2DSkoV8oUoDUn7ekbYI5HG514fxWao76JzxcvqPJFFDsFCajp20oedPzPdiK+fWTATLnR7qdGlFp1HrR5TrSy2ofMglpafkWDpp4dMsKyABPLXRXFRz+Q/jLvq+nfgcYXbwGPaB73JDkxRQ1ce3H1FLClFJVyLYBKtdVElavEnoRvCSv1PtJCiui6/4a4s/pGa2T036jQvm0m3gZdk7HL3ccCz2n8iQJHPnBlPCsFLMrlTj6IWvTWQ4p3KqtBQj4Agnv5dCuo7eddvunZKL5L1NR+ycEeg5ID1Ftyo5J+dhyof+kXFr8OzpZ/KXK8N51gvs8lZ9GUlY0O5rK7ZpW1WncpKfj1W9yLgWUIAbSYkpn+6PvxcDrZyb5eBiuv5yauX/hW5438qxyOiNJEZTTpLSndpHfbp3Gmmp/h49Bx5+ricHWSCNKVU05Yg3yJjLVy1LZSw1ub3IcG06DudCQBodR0QjW4XnhWvRHISABQ8sbGCMQuBuO6LknIAmM0vkSsk5BKZSkfSYOhLlQl5/eQfpGZc0uuE9vlSry6BbzdTys9mvOM5fvDP7sUZvW7ifrixjt1ZreymliqCKPI8JU0HMh2SNT+0TwONZ/inKOQskmyaZEKHUCweQzkttKESndQ46VMuw3u67BDrSgoKaBQQddet/Sof5CtDn9/9GI+7X1nJHDGJUTUxChjQ0JqtQcxkc68DXEp6/iK9oONpdVUX+O5FlUaNYR4MeIZde081MTtQY8ychLTxaUTooaBR7A9GbpjJKiyU8lSK0NeHKmGfZei7m4s5r7bJoZ9xZaIg1L3ZOwCt3EZVxW5k2ScicXZFPoothkOHWLcSJX2EFt1+GqQhuG2lS9Ar05DBfUoJcQSknz163Rzuo8JGk1PaM+ftwButsu9vuPkr6OWC8RBqU1ViOPL4gTzFQcRkVklpJyQvSrCc5MXMWt2Q5IeW66VqKitaio7j38+hV9MwGZNcWl0VbgzRj2e7FnHH2RyZvCmeQ35AkfT4naStHkhY0jQ1PkK8NU/Iel2wdjusAP8A0oH20x0VusSL0zc9gtn+xT9DgT4PyTUExGp8CPq8GwHWdElIKU7SQQOyt3+PVspaSEVFcsc4PuUQOlwM+zBTvqfEMthTm461xnXoLg0GpG5sofHb5gCPT/w6KWTzwMDSorgLugs7pTpNGpiHGR8VKrZjrkGc26jepaQoAa+Oh1BBGo6fbC8DoKihxVu52BDtpIpXGvQR7CBKQh9pKtugUW1aj4a9yCNdfPphhlQrkcJtzbOpOVe3BRddUtj50KSQAdVJI7ad9ftI6L2jLqywobjGVBBGBZkUZmel5KUjXU7dOx18j5efTFCCvDhhW84o2JU8k1KfctxHT820rRn8ucPY5X4XzfSsJ32l9jNeFtY3yQywhPrTVIipWie4AopCVfhbjp3Iu0P/AJR36Tp65OnZb+VpbRz8KSHOSAngM80HOo4ljS3+odXqD0pF1TYgN1BtkCQ3qD4njBIjuAOJNPjpwFeAQViXBrK6XCaU2NS44pR7j+hKUjXv/wBs9WNoYNnyGKImu3jyPbh0QsZgpZ3FACl/Z8ew01Pn1g6knGkbi/I4VqHjqyznJ6bA8NqnLjJMjmJr4UVlPZJc7vS5ToSUxYEJpKnX3VaIaaSpROg6hbhe2202Mm57g4jtIV1MT9gHaxOQAzJIAwe6c27c+pd4g2baozLfTuFUcs+LMfwqo8TNwABJwYvcbIxzE6zC/bNx7OatMP4hemWWXX0XQRsx5XtUrTkNoCDtejUW9yHGJJU2lbjWqktoUVXo+1u9wkn6w3RDHe3wAijPGK2XNF7i+Tt25HKpxZnqVuthsYh9PNkmWax21iZpVOU12RSQ+yMlkHYSy1OkHEYYcgw3Eenpt/C4jU6LT4EEHsfs6ePJrxxTpvaeIYdbMlLLjbzKz6LhGn/ZV2+Qj7D1iYSy0bGj5wg6xXClg09L+HY4dSw8qnr9WHUqae3GO3qkocCV69/h0FU0jU8VoMHb6Ou4SrWh81sjkeOFSxkSWVJUWHW0gaAlBSFE/aRpp8Ot8AVjQ0xEm1ggCoAwneop9ACtAlQOvn2+/wC09EkoMxxwOlVnNDjKxFQ2ytaT3UQBroBp4a+Hfv1vEgJocRJIGpqHLD9x+xP0j8RxKSuOPqWVDsdE9nQPgdh1/h1CuY1LBxwOX6sZwGRDQ8fp9uHMlTdzVzW5Cw3IVKgtMyiNAsJTMd9Nw+Wuwd/PoZKPIlUp8FGJH1DDBaL8xGRIfFUUP14G11fx6TfGJCX2hsUSR38dCk6n5T8es0gaY6h8JxNaTyvAB4h9uFXjLjjl3m6xVE4ywTIct0dLL1jDiCNQwXO+rc7IJ64tNEcCQTsW+HFAHak9Cd837YOnIvM3e5ihyyUmrt7EFWPtAp2kYd+kPT7q/rWcDYrKaeIGhfTpiU9jSPSMHuLVOdAcWA4P+1FyzkjbUrkvkvHcIYdQlxdTjkGVlVoArTWO/Nku0tdDkIHiUJlo1GgJHfqnd39b9mgYptFrLcH9pyI19wGpiPbpOOm+nP5WNydFl6iv4YOBKQqZW9hZvLVSO4OOztxK7D/2mvbPQuMScmncg57KRtU8m6yRNXCW4O5DcfGIVI8hnXXQKdWoDxUfHpBvvWnqy5qtottbqf2U1H/5oWH2U7sW5tn8uXp3Y0N2t3dsOPmS6R9UIjIHvr34knUeyP2v0qGURuKKaSmOkIZFrNuLgISPIC0sJY7+fx6V5/UXrK4JL3sgr+yFX+6ow72npN6d2YAh2q2NP29Un99mw+0+2j2/Nx1R2+HePEJU2pv1Ri1T9ShKk7dyJRjGQhaR3CgoEHuD0N/zj1Tr1/xC7rX/AKRqfVWmC46C6JCaP4Tt1KU/7PFX69NftxSCJkGn5G5CxqGXE1ONZ7l9DWtLWp1TcCmyOxr4aFLXqp3040VIKiSSe/XTkSTXW0Wl3LQzS20Ttyqzxqx9mZx+fHUAtdq6t3Hb7YFbODcLiJBWtFjmZVFTmaAAYOhvWF16PTLSW0t9ldux2ndrp8fLoELUiTOta4JPugMAVaaAMBC6y1Tcp1Ae+XcpIUlRO0DXXQanufHpjgsQUBphQuN1fWaHKuB/YXbkpz0kuqUX1bN5+bQknVZ/7DSNVH4AHqd5MdvE0r0CIpJ9gFfr5DvwKWe4vJ1ghqZZHCrzqSaD3duIee5TIVgQcPQoiST+qXmiypTKSNlXWLOpBMKElAWPD1Ss+Kj0kbNW+vrjqCT4GPlxf1QfEw7matP3QMdO7zbjpzpnbugocrr/ALVd9utwPKjb95I6Fhydm78VZcj83Z7wDmmOcn8YWH0uTUMgJl18haxT5LTeolU3Hr5lHZ6BYNDalz8bDmjiO6Tqu9eJbyWil6eaOB5j+jtw/elPzEN48eZtWA1Dke8d45H3Y6O/av7icE9ynE+H8x4HMegt3GyPc0T60G2xHLap9tNxjlkhOpEisno+RY0S8yULT8qh1R+uQvppwOOhnhhSKrONIzGWeJwHlvLJf1ESfeOpiSg7FdZRXx22nIrv5bgSlaNVEJ8wSQe/j1IZWDVAoK4GrMmjI+OndT39mPrVxEFNeRW3i7GdqpO5aht/OjpL7LyQfwKQpGuvj306mwy1kVCSaVpXEy0kkksZTOqgaDlWuY4H24QcKzh9t1lvfo4kBCla6JeaUAhSVd+ywk6g9bZSYpARln9uI1sqzx55nCH7haS1kVrHIdXGL0WPGarssDJ3OxUoIZr7gtJ1Koy06MvL8EKSlR7K6tTobdoJCNsuGo7NWOvAnmte3mPqxzJ6/dGXSKvV21oWhEem5A4gLkktOwCit2UBPHEUKa7CFavo+rjyk+jJiKWAiRG3anv/AEvNn5m1/wBKvsJ6taS11DwnS6nI9h/UeY5jHIqbiIal/HE2RX9pf0EcVPbh5uxo0REd2GPWiyhrDd0AU4nUBSHE99j7CvlWnyI+BHWpBLISr5MPiHZ/QeIOIl3cwwaXjOqF/hPb3Edo/EMOCrmM1+gSkl1f/E/CQUkaKBHw79utEsTSCp+EY1xXsUXD4jh0rnV0hgiIztfUAPSI+ZS1HT8of1FR6jLBMreM+H6ccSpdztvK/LHj+nDDUzLFpf0UKTeV8uoZibmR9YwthcuI9q/FU0Fgb0hRKAfs6nWFzGZGS2dZGbPI1oRkQacO3Azc1uEt45r6KWDRkNalSyN4kKgjNeIBwGpjMJ47ClLcVs6NMjQIABGi1f8AaI6PKkiivF8BjcJIKrQJ9OOPJpal3QhCFjt3Vp5j4EeXWvzJRkcSA0bZqcfP7ZgoSXGUAAfiKRqEjv5aaePXz5h+BOPgjXkaHH1NDVyNGpLLeuvZwJA1+GpHdP8Aq6+GaVc0OM0SNjpk41ws1+JQ4yXpbaUuBALEdR0JS88khRB8y01qf49Q57xyRG2XM+wfrwXtrBQpkOYpQe3+gY2qzATfT2qisjIckuIU4446Q1FhRWRrJnz31DaxDjI7rUfHwGpPUHcN5t9rs23C+fRbp9ZJ4Ko5seAH6ME7HbJLy5W0tBWZhUk5BVHxMx5KBxPuGeHFyLmnH/tcwF2bXx1W+RZWBBhbWScnzWYpxLUeJBZGjsDHXZawhtlG31U/M4Tr1Td3d7x1zfNPdN5G1W2YWvghXtPJ5iOJPw8BTFnbZtIi07J0+W+ZuKLcXVPHpPCKIcmf9n8K+JzyxVT+57zBZcae0GBhuWJjzebvcDkdc7Lr4kX62RjVLDcTK/QacNpdfK4bRbZJbTudeKggdh0hbtusUczQ7dqW3UaUpxavFj2lsdnen3Qe2dN7TFZRxpHGo8yViRxH4pHalacySAOVMc9YwvlCZhs2noMFz22ynJI5vMvhMYvd/U4lxvRrLsdzIFyYkdFMjI7RJe0kKbU7Fa0CVdukncLjyl03RKSuRkahiOVF+LM92Lf2wWlwwntJI5bVfhZGVlLc6MpK+Ecc8jhl0HC3JGT1L16mh/QcaYbLsjKcwnw8Ux2M0k6qffsbZxr8oJ7/ACpJPl0v30tvZrqvJEhU8A58R9iCrN3eHBZt/wBts2Ecsi6+xas3uAx9RxjjEetk3NTHzznZqtJNrM4rpncb4vqCnUpRb8vZY1FrJm9Y2hNUXnD5DoXHuNo4JAlKClCV06ieSr8Z9pAxPtr579S6J8vABk0gqzV7FHD+1hXwnIMsqhDucduOGfbDUvPL1tKjG5vLvMbiGVhIT9fdR3wh6R/4WjKUqPfsOpoium8SKIV41IzPuzOIV9d2mho4IJ9wnIoQziOPPj+6AOPb2Z4k/E/6XWyomUcvXvuW57nqKX4TPLWfx8Vr7dsLCd9fgFS8pNDUK/pU62hWw/Knr0lreSKVnlUOeZ1MadyGiD2nA6wggs5Qbe2tbc1+GFFBr+9OQ0rewMBi0L2pch8QZrJvsBn8G8Scc8K0WCZHl+dW+EiSxnOJQqiueEC9ezqwRMsFlExSUrjaBEpxQA0OnVP+p1kthJt+4W91dSbq1yIYbUNW3lLcWeEUDMgzDE0HCmFT1J643rpHp35rYokl6mvJ0trSNkLvNPIdKhQTqIXi3cM8sTS/auzFTOb8+NzoykY3U4RhDrSrFSJFhcRFrmy8TlTFf8NNgjFx/wA0gDUOL+bz66A/lyK3W37k+2nRaJOwlAyHzGv8wKK10Ahwo7K0yxyz/O7uNpsEGw2/Uwhn3x0UGgDKknlostOwfMMNP9UEYgDx1ythlnyTlNXyFQy48e4znkfMMGrWJrVXDlVqZ1s1+ml1hO5lx1Uhl5KNAHdunUfaitlus0W8hre2uLmaSI8KhSSBlmKkin24u+fZr++6Tth09cpJewQWQnZlaQJWNNRoeNACP3eOB57mMu48o6d+PUwqePctwkoKY0s2Bky3HmHvqfUDqm0KipSpCR4FKj1G626l2KW2EW3WyRzCGjNqLEvUHXnUVABA9uH7ozZd4uilzdvK6tJUeEJRACNNKV8WRr24gPld81kBr7j6kIctY61SWm0NMxo9jFAY9FLDXzpC2dixqdANSekbpyyTd94iuJQTFHqcg8CUoAR3FiuR40OLH3F5dn2yS2XJmoitUlqPUmpPOgPDtGB1YyxHSHWI7mjTGx3RRLeoRtXJUVHd+YojXyB06uRRU0xXzGgx0Re1b2K/t98GeyHjb3s/uZZNlTtFzhaRK/CqGjd5MaocaiXrNzYYkzJj8QsKzK0vLqmoH57sh9xFfHQtuP6fqJ9R4tHAa+VEqmQKCSxNBXgBTnTjgfJIir5szEISQAMR1/dN9kOBezPIuJM/4JyC5yHgb3HUhv8ACoWROPSbzEX6hqltJtaidLjxJ8qovKXIIz0My2zNaU282+pRCFK0TxgoX06XVirDkCOzuP6OONsbFXABqjCoOIE41NgWM6e8l817E+VGdYh+kU+hHZSPSDZbOqXErHdKfxHx6HYm4tf/AG//AGWWHuy5pRTZnJk13GFdQzshy+5aP011ZwIK4kWBT46ohQjTJEuahTslSVIZYbWlIK1DQlbRokTXEg1KMgO0ngP14gzO0kohQ0PE+wYudmez/wBofIXHvIrXtSsMops04giretmLqRmk+puJ8VM//kpyM2DocNgKl9CH65bbbTm1SkqRohReezu7Uxx7hEiJNkpU8DTnme0YGwXVpdCRrCRmkh+IEEdvaB2HhiIWP1OSSaeO7DtEh1tTO36D14LwCVJ0ZXMQUJSAfDv4kdL1zH5UhXBi3k8yMNiQWA5lzjRBxmJc2VzGitNn9NLLV42hDriUNrkTVaIZjqKgCsqOmo+3qm+ul3fbd0S+2wF7eeI6k/fQgE5+EAhlrXnU88WR0rZbJuti9vuKot1FIKOciUYEgZZkghvdQYkhjXuJua5thzPcZhwkBLRfdqXnUzi2zcmkuFfTvp9BFhSyNjr0ZOqlMrKkEjpQg6vktADu0RRRStBnTVobhlqQ0JXsNRhgl6Ghlb/1VKXzNA1CK6da9+lhUBuFRQ4ONDz7xZbRGW1S5dNPU+62qVaxFpjvKbdWpmVGlM70qQooSUjaPl7H8XRaDrfpm4jC+bomqRqbhWpoQfcKZcPbgdN0d1DasWCLJHTgpzHaCD+vj7MF9rIsfv4yhU5DU2LDu4tCJYMJQEqUVqaDS1od2hzXROnbU9Fxf2O4xMtvcROhJppYDia0oaHAgWd9ZODcwSxuONVP18xjRcU+0lTSNfT0/pQUpV/mJP8AV4a/x6EXQn0NFQ0pyGX188FojGxDNkx78V+ya2G/KkiXEZUsPPJJ2bF6hagdVI2nx6iz7ZaXIHnxRkkcQKH6xTGYuZoT+S7Ae3LDVucRoZ21qRNl0zq1titnBwLhx5yVhUf671DvYjFwAF1O4tfiKSAejfTfTu1fNALLPbzmmhlaqBq5a65hTlmK05gjC/ve5XjRZqki8wRnTup93PCzl9bfz7+mtuS7rIJU2Fj+O11TVyEMLsnFooKp25kqde2oiQv1BZ3LCXFPySsJHyLKbF3/AGxJTFNvFxMyLGirGAuovoXzGJ4AFuORq1QtACQpbdcOjMtsih6nxGuQqdIHblw7uPei5HhZyFxCYthGap58JyS/YyI7iREi1imkTRKhoeXpYxN6Gy2lzbIU60UkeqNFybo+SS6N5t0kUdjNGCzENQLHQEsmo/mKPCQGo9UII1ZG130Lb/KXSM8qMQBUcW7GoPCTmDSq5jOmPObScgzdFfZSrzILqXhlMGGIeROQ1qerQWE2VxWOQ3VpZkqDDTkmMsK2tI3pdWEL019TrfdRWlbV5XmtVqyvoGtajzJUKsfFkpeNq0UFlZtLY17U1tt04adVCSGgIqdJ/CpqBlmQGHPIgVGGzb4/Qutoq8psZrE9z0TYCBGjuQqV5wpcbg2QL4lzpDCSBM9H0yydyEBxSSCvWmw7Vt9wtrvEjreMVLiMLoiNQQkni1u1KCYpp05qocg4M3e63d1A0lmg+XAIUtUlsqalyoo/YBrXiaA4ImQxstmxcIXyDfWAg09G/BpIyAiaLOy/Xr1lTtQ9I2N/pjNVHiKXI+YegpgJCt6SLN30XMljG+4z+TGEIKxANrl1uPCW/AECEsa+ErQHVhQ2wKLkiBDI1ci5ppWg4gcyScss69mFqDFYUw2h+XM00A+RxDSRqO3ZI7a9VkLKGR/zppSCeRA+4YfBNKi/lxxg+wnEqeEa+lZx65e+kRIf/VggOSlLf27IrCisBatnYKGnbxPUxrDaoVDeWJJgMi9WyrxzyxlHcX0nh1lY68FoOXdnh/zHWH5XpR20oZb3Ep0+VKE6qWpCmgn01EfDzPQ3yEurrRbrSAcR99CKYKIHSLVITqP054wN5PHpEuOSrOFDYUk7WbKQwEA6IShDRCi6Aj00aDTvp36Y7Se3sEJmmjSPsdhl3DnQUHLGmSxe7oEjdnrxUH6zy5nPCGM9xCLKTsvAzMYAd+rjpU5GeSHHpEt1S0natthpIQhG3uFfZ1Nj6i2K3k0tNqcAk6cxzJzPIezGuba9wZTWKsTGlDxHAD6zmT2jH3K85WooNVhr1w4+w043Nhh5x1L8hmQ8Gi2wG21pbZjqWpZJSEFJ/qHWFz1CYX/5G3eQMAairGpBNKCg4Dj+vEK02/SPzZwgDU0nIUBA7zxNPbXswHZTmfXShshw6Jkkr19VK5QaIKk+pHa3LJ7jXU6dj0Mbeeo7k0iAhTjxANPYKnBoDboRRmaRvZl9ZxsR6GzShKJd3LkrV4oisNNd+/dQXvWO/W+2l3GuuSTWx7Vr9hPHEOaa3rVY1A7zXCXd4XCtYMytm5DfxGpjRakNQZKEPqQrtoX0bFxz27lBCuicU1xxaQBuwopHvGXH240fNHVURoy94IHuxVZ7mPavyHiNRe2vFFtaGjymsfx/MIrF3NVMuK9UlqxgKXZSlfqDEv8AUmv+bYcdSxLaUAgp2aFR3mzkhme7VNEVx4ZWRiUpkUIU5x6WB1AHSwbtGG7ZNxtbki3uHFYqPCrKKqwBEg1D41daaajUjLzDYqZ4G9sWTc+cw/8AS+HK/tGzaedjWjFglUR6DKiOFLkZTD2wtrCk69hoQdfDqJ0/09Lut3JEJPJ0qK8eIPAf0csGuoeoItl2+O6ePzqsaDKhDD6cezCtzT7ZBwNyTaYBlVsxZvRHllMmMptbbqVfKkFaNx7K06dJOn1228KSSFoG0stePeD9mBO0b+u9bck0cQSVSwb9FPZhrSeNMVVXSjFrGpK4lc6+h9bQ2F4NkkNkJLjjup1008R0w2FjapP4VBU5/o/XgVvqTPbCrENmD9Pdiyv3+1eT4v798P8AddxnmbOJ3LnBvGvOWAWb0NM2BNRAr2MSlVlqhxTbL9baVyFx5aN28x39uiStKg7nbTd3QnGdARwrXMgg9xU5jsJxTiXkVtYPBMK+Khz4ClajvB+3DZzTgrhb9xB+35U9q2QY57dPe7Oiyp3KfBs65NPgnM9kUqlW2Wcc2S0+hGvrF1C3nfywVuHWa2yVqsH8Zunkt4/MgDRQ9q5juBGdAOAKjSOFKY1bf1BGW+Xk0yAcmGY9nb3itew8sQ7wGHz9gF7Z8G8j4RaYf7o+GufaD3Ke3LF+Q4i6CDyvcU9FU4zleC43cPuxaeff2LOLUNtQqZkuxLd+qfiIWXn2UrkRQTrTUFkVZwy8g66NNKjIGuluPEHELfLT+J2gXa7g2l35Y8QoQrLJr8St8SsNUbDI6WFDh3cv+7ngfMLzI4GPcPfuHUnuayJ7NMlq/Y3WWL9XwCx7kLmeu9TmFtXMPQ8ut4P91umxZgKqXHX30fSuNIihDSdfV1i28dPzdMW6ILyW3mFRmzqQG8QLFahS2kjSCRUiowS6RnPSnUlp1Ze1+Stb2ByuRR2ZswtBq0MwCOXqw1eDUMiSPfDXc8Sv2s8L4to8WXO9xlzf8aZFzth/HMeJaRsQunpcp/IaScuiXJrqoY/fyFJstrvoQAfp1rCkLAJdaWk1h6PLeXAFpt9lZW8JSoH5aCklKZaQyoMhmy1GRFaN9Ir6zuf5vJOkNoJ3TrTfOobrcnkCkrHdTsqWCzOcgzRvcO2pgI45FViCrgfuOOE2PYzwL7bOa2Pa9N93/u4sau2q8q4fyWjvHWeL+S46nLWpu8mXktPVLxqLTzVb6taojyJcIMyYIktSI1jH5l2cdTRb+qCK3n2m6Fw5jmtnTyrdHiS2UKZFMj3ETPJ5xV4+KFI5I2D919T7hBbQ71se43qjdLe/tzHLbTQXMcshWVpnS6TzNKxSiLwwOpOpkcjTTBP/AG/+H+Yc+96eS+6f3G+1i34Nz6HxnyVn2T51xfzS2al6fIxtjGotKvAo/wBDMppYgWAbjMNLkR0Ijem4hxOvVg2/T7yWpsrWG6sIJJEAbb5Y7byfzA2ox6ljZBQl1KSl6msbknFTS79Pb3fzZuYbt1WrR3cRnEtMvEZA9Sf2iVKgVV1YLiBTP7fvuMu4K6+bJj8l4u7H1ZquTV1NfkSUKaClRVOWzaYcOxQflQtmS4VKGo6hb1t1xeXHnzzyT3DCjalCkAcNXBGJ7VA78Tre/wBuMIeJfLbu7e4ry9uIl8p/tj5/Rqdn44/e8d2iQVjEeRYVw1STF66FugzFtNhXTmj/AEp3spA89Ol+TYrgIfLSpA4EaW/snNT9YwQj3GB30uw0n8VQQP6wGY+rENsp4Z594jdbnXuIXbDUR3Vq6plIt6lRSQQpqdXrfZKyR+ALUvTxA6BQ7nDa3Py8shhuwcg/hJ9lcm9xOJ822vcW5kSLzIjxK+IfZmPeMSS429wHOdbh0DLb2vzP/p7FsHKn+7lVdhIxZmfEVskV9nYR0vN1UprTuJQZBHgSOrQ6f6wvLCUL5lSDwryxV3UXRe27kjRlAJD9eLj/AGc+62JyGpril3KE0VnkrwsuOskRKQY+PciMtEwUvKJ9J+iyppBiy2ydhO1Xjr1ccPUdlu1sNwlRZSi6J4yK+ZCeP9pD4lPHFE33SV/sVybK3doo3cvBIMvKnHw/2XHhb68WUcc8m4rzXV5FwpylircfK4cuwRkXH4WmLN/WawFE3MOK5jo3V+RQQn13oSDpIa7hKkkgAt6s926Cu4eselpy+zmlHzYKjcIrkfiib4Vc/AciRkcMvTs/TXqtt0/pz11bpHvz1CVolZ1yEsDH/Dl5svwvxpxxCjlnjy+4Su2GpE8X2HX6H5GEZvFb9OHkENle12BNa7ityepJCJkReikqG9GqT26R6I6v2vrawF3ajy72KgngJq0THgR+1G3FGHLI544p9SvTjfvTPen2jdAWtWZvIm00EgU5hv2ZU4On9oVBwyaK1W68xIbcWpe5K9Qok7goEaafeP4np7OkKQQAMVlGrag/PFpPAWdNor2KixfH6i60nYytQJhtqHyLXqSPXWD2HkOqx6ksCZDPGPB29v8ARi9+hN3Cwi0nP5hGXd/Thf5WTOq2HZ0NxxJQFOFSNdFJPzbtexJ6+bBLHMwilAx961spYlNzDUHjliHs7l60bcVHTKeW8HCkNp3EnQ6AbU9zr8en1dqgNGoKYpmTeLlTo1GtcbTOeypfpuXVmK6OgApaUsuS3NdDohlOu0H4nz68LCONvyVq32Ywbc5Zh/zD6VA9+Nw8szKC1rLvCpKqq9ppTU2uuFJQ/MakNa9w26lbJadQVJW2pKkrQspUCCR1tfaILy3e23BfMtpFIZeAIPeM8uIIORFRjLbeqL7Ytxi3HZnMO4QOGR8iQeHA1BBBIYEUIJBFMOm092vJqWn5srF+JH3nit196Rxpj5ffcc1Up91z00lTriiVE+JPQReitoWiRz34UZAC4koO4CvDFnJ6zdSznzJLPZ2c5sTZxkkniSa5knj3nGLJOfPbjy5TcY3PIWU8n4LmmHcc0eCW9Rh+H0lnjspykkTnhZ163bFlxhmb9aSlrRIaQAnTsSRtls/U2xyXcO2xWlxY3Fy0ytLI6uNYXwt4TWlOPPDLuvUvp51lb7fc9QXG42e62lils6QQRtETGzHUpJqAdWQy0ig5VLAvbX2aPUzNlN5h51bhWc+1qmlt8e48pz6qojVMqa36arclCUM3ccgnsoqIH4T1t8/rHzTF8pYalUN/ivwJIH4f3TliEdn9KBbLdHcd48qR2UH5eKtUCFhTVwo6+2p7MBW79y/t44D4l5QxXgG2zjkfMuTsh43nPnlrB8dbxKmi8f3ku83SKxuc+m3FgZKmvSWkhK9q9Rt0VqksN33O+gu94WGGO3SQARO5JMgA4kClKY1/xbp3ZdvuNu6We5uEuniLG4jRQBESRQKc61z9mBLiv7kXONRNbkUeJcF45YNJc+iyGh4bxODdVTy21NmVWzw079PJbQs7VbVAeYI6EbjsFlPUStcNGeIMrlT7RXMYY9g653fb49Nolosg4N5Ka171YjI/WMSO4B5h+qkqsrmycnzbGQ/YTpj7pdkyps51cidKfcUoqcffkOKWpR7lRJPS1ulmSfD8IFO4U4fVwwzbTuS1MkpLTMSSTxJOZJPaa1PficH/AFLhTYxWJSHVNp3J/MTvW0R4A66nT4HpcNvoNKYaV3ONznwGGrVLqsrtfp4raVypThbCVBIKG1ElanG/8qACdfs61SN5alj8IzwI3/eItr2ibclANwBpjXtlfwoPrNfdhe5ErMIRj0lHIEZmRgrMZFf+lKjpkouERtNGHIu5O6GF/O4PBa1aeAPS9Dbm8uTcS5x6jl2+3uHDvxyf1Tvdp0pYC9vGf5oDUjKKyNKWrqUciXYyV7SB+HFUnOH7g2JYJeLZM2HSVEINVlHQw30/T1dZBbTFhxm20q2BxMdtO/QAbuw7Do9c7jHbQCNKKAOWEv0x6a629ROpW327ilMEkupVapCqTXMnIsfiY8CxNABgh8Fe/ebyev65qRZWePUMeJCtLJLTkiFQVs6QI8Fye80gtwIjktYSlx3RJUdCrv0nS30wkLISFY4/T3pTpKTbbCFdwYtPFGFA/ZUZhR3DE5cpxGk9xGEy4M5bDGRUTfrY1lfyb62Y6NIsOY/qFSquwUoJW0SrQHcnQjXqTabq6NpnI0VwV6s6O2/qrbTbMAu4RqTBKB4kb9knnG3Ag5DiM8UxZgJGD5NbR8r+mxyTQzn4lx+qSmYLEKXAc2SQX5K20ONFO1aFp13tKSodj0Rv21rVTqBoRioOkNuurW7NnNG63cTlXWhJDDj7uYPAjPBdvfcU7h3F3HMagMWXR+4eLlkGnyVDykJXjGHy4VdkUuBHcShbzVvKnGLGkfgUEOKTroOpfS+z/N7k1zOafL0YLzLGun2AUrTD76gb5cbV02lhCv5l7rQt+wi0107S1dIPLPDSpOSobUphJcQGHEllI102qaVt0JHcFI0/l1bttbMw4Y5jvrt0NTwpTEsuK89jT5jEdT6XR6hYc1XruDydg079wArqW9mQtQMC/wCLFjpNa4duawWpbXrIb7kaFTfynTTTcCnzHRTbl0+E4B7pcFjqAxGGxctK6xWGHXhsJ0C9VpUgHw+bpvtwhQasJty8jOdJNMO+nyWwdbSiQgPpOg08D37fb4+HRGIIpyNMALmN5a6swcOA/p0ooL8ItFR+bRGvYd1dx59GYHfTk2FO9t1RqMpGfLCzheV33FOVx8/49txXXENz6Z+I9q9WXVY6UmbTXEIrQmbWzEpAWgkKSoJWhSXEJUnDcdttN7sjt25pqhYVB4MjDgyHkw5H2g1BIO7ZN/vemr8bltUmiQeFlPwupzKOMqqaA9xAIIIBB4l8R8f+4ZS8m9vk2swnlB2M5YZPwBezGILNjL0clTp/GNo6GYcuK8NV/RK2BsanSOkJQpbg6i3Xo9hadVo9xsgbTHeopbSMgFuFFSpHDXnX981IsC46I2P1Kh+e6Elit+ptGqXb5WCF2qSzWzmiMCKeHIDMkRCgIV4/465B5QyOwxXHoMSrXjMeRNzK6yCUmppsNroTxjzpl/KeG6N9M6hSfSSlTqlJVonalakuO77/ALTstkl7csXExAiSMammYioCAcaihrwzGeYrU+x9D9Qb9uU9hbRFDa1Nw0nhWBVJDGSvAggjT8VQcqAkGG35XwDhXHrXAvb9ZSMkza/iKrc954kRjEdciLCTLx7jlnVTtbVqcHzzEqKl6BSFuqDTrSxbbHuvU96m69WIsO2RNqgsgaivKS4PBm7EIy4EDxKz9edWbB0Fs77J0I7S73Oum53BhpYDnHbAVKrXPWD2EFmCusaZFbI/TDOahyVtBKvzw2sg6jupa9NDr38enssmvRUV7MU4JJJjrJNcDRx8heuhGh8Pt18D1tIpnjJTqGnCgzZpQPTc1LLh/MA/pI8HE+Y08fu6+EVFRxxoKFG0sTTBLweXcYtBxeDfFm4hR66AlSrKM1KK0oZbG71dEvbjp47j0mxpHPahoarJp5GmLGv3ktt3lWejRCUnxAHKv1/bgxZ5mODWVS0wICob5A2LqXEOBtOmmi4slKewPkDr1httnfRyltYZP3hT7RjHc77bpYgpTSx5qa09xwH4zUSTtRX28F5Su6WJ4XWyD8APWBaWrX4HTo8XkTORGA7R4hgCsUMn+E61PI5H9WFiXV2kSO0H4b7afxes2j1mVDsezrJcQR83mR1jHcRu1FYHu4H9GPs9tLGtCpp2gVH2Vwt4zDLclubIOjCFguIJHztHs4kj4KQSO/n18uZT5ZRfiIxqgtlZyz/BjayyxUylrG6CLIsrO0u2odVBrWXZlhZPyGGWq+PCiR0rfkyJSpqUoQlJUpSgACehyMFBu7pgkKRksWNFUA1YkmgAFM+VMMllYS3ssdlYRvJcyyBUVAWZmOShVFSSSaAca4s+9rv7ZMSVHqs/90Cf1m1WG5tZxfHlKFXWtqSHGRmE+I4F3M5BI3RGViGggpcVICtE8+9cetM5d9r6OPlw8GuCPE3/AFQPwjscjUeI08+4PS7+WjbrKGLefUFRcX+TLaV/Lj5jzmB/MYc0B8sZg+YDlcPSUNBidRFp8dp6ygpKyOGYdZUQY1fAhxmgSlqNEiNtMMtoGuiUpA659ubq6vp2uLuR5blzUsxLMSeZJqTjrS1tLWwt1tbKOOG1jWioihVUDkqqAAO4DFeVz7oPcveKzfkji7jDBbHhDj22s4E85DaWLGbZRBoFk29rTGM8iDWsFlKlNIdYfWE6HRatUC0YOj+krYW+07xd3K9RXSKV0KDFGX+FWqKtnxoR7hniqLnrPrO5a63fY7G0k6Ys5HVvMdhPKI/8R46eFRxoGDH2nISbzP3KYdhnBNVzpKgWs6syGopJmPY5CZQu9trbIWmjW0LLO4tpnF53a6dSltKFq7hPdS2/pG/3DqR+m0ZFmidw8hPgVU+JyeynDtqBhv3brTbNo6VXquVZHtpI0aONRWR2kpojA4aiTQ50FCc6YFHEnu+yTJ+UqPiLmfhHIeD8rzeosLrjpVjdxMkqstjVMczbOEmdEhQDX3EOAPWXHUhe1CVBakKKEuG996EtLPZpN82DcYtxsreQJPpQxtGWNFNCWqpOVajPgCKkL3TvqLe3+/RdOdT7VPtG5XUTSW2uRZUmVBqdQyhdMir4ihBoK1IOkNN59Wxl5fhsacV/7ign/Z1XSirAdpxaB4Y5i3bpBzTPrgkK/V8xymxQonur66+sJYVr8VhzXrtS2tyu22sH7FvGv1Iox+WfVd2snUm4XQz8y9nb/WldsbMnOFBostuupBBBSlRKRr4nQdbk29SakAnCxLuL00gkLTtwy5V0hzetT2qlnvqSDr31I10Hh1OSAjlliCbpTzwiP27bEOxnKUdkOumyytCinaywksthJ8A5Ns1oaR8UMv8Aw6XOpbhkhj2+I/m3EiqfYcz7ggJPeU7cWZ6XbQNw3abebgf+r9ttpLhzyLKCsS+15ioX91ZOzED8+vHZc+xubeSXXpKPWedcUdVhTe9lwKP9LrehB+Oo8j1AdYbWzEUQCxRAig5U5YsuGW5v93e8vHaW4uWD6jxIcVB+rL68VLc5ZMrKbeexHWBEjOrZTodd51AVpp2I0/l1R/Ve6teXRhTNVx1F0Ts8W1bcLmUUds8WT/tCX9rjF1yXjEaaG6CbIx65eirdDccXiIb6JchkuKS2HTVx2C74EpQD0t7ft0ty5AFaUP1/qGeIPXXVybNZpNVqSEqAK1yNBkO0mnux0dPZJHfwWyfiyoNpMitRSpxpSH268SZDDKkJfb3NmUtpYWUhR2juejV1aIkfljOnH6e7Cj0ru99d6rifUkbPVak+Kiiooe9q+7CaxdGBithJUd6n22qxv1Cdrjs9J9VWvl6Udsn+I6CRRhbnwjhme7LIYuyKZ12cNylOkeziT+jHrEWkoWgrWAoaJ2kgHUgaag6HXrG6auZ4Y2bcNLUHZg/G6mtYfk7NVVMZNcDHLQQMdlOBuNdy24jiv0WU4oEbLBpCgkfiUoBKe6h1M2udRcJrYpGHBLDioqPEP6vHHzqC1e42m4jhiWeYwvpib4ZDpPgPcwqtOfLOmK4paIER2qvqUyHMOyiO5OoHHfmkVkhhz0rnFbNQ7ItsdnBTKwdC42ErA0V10/sW4fxOyKS0+fhIEnYwI8Mi/uuM+41HEY/Ln1B6fHT25ie0DfwO7DPDXihU0kt37JIW8P7y6W54fVVa/wDKqbkJJgySFBQSS5DeSNrc5hOmuqR2cT/W39oHU+aA6w0f+KPqYfsn9B5HFfpd0DRTV+Wb60bk6/cw5jLsx6Mh2HJWicrRwBKw6hWrTzaxq080rwUy6jQg/b8evqxLMlYvh7DxFOI9oxDmlktZCk58faOBHIg9hGYxv1mSPV1pAukBtcanmsTFIdGqJCmHAsM6dwoL0+7r01gssD2x/wASRCuXKo4/T3Yj2+7vBeRXgAKwyq9CKglWDUPaDTPtGDjypzhO5zrYbbOMxK6LVwX2PTSre+uW0n1WXCoJGxo+moBPlu6U+n+k4OkpX/PeSSSQGp4AHI+08M+7Fo9e+rG4+qTQCaxgtobaAoAmbMRmGJyoMjReVcQzkWsQq2vRVRyNUq2LKtFA6EHX4EadWJ5D8Qa4qe3vo2WjAg0ph44LhsvNZxMKwMGqjPITNsX0FaUqOivpoyCUh+TsOpTqAgEFRGqQQm7bnFtkf5i6rhh4VB+09g+/lzo79MdO3fUUtYT5dipAeQio/qqMtTc6VAHEnhWZ1TiGHY1CQhiLClrQ2N0yxS3JkuL291fmpLLSj/7zSgdVtc7luN7IWkZlUnguQH1Zn31xfm19M7DtUQSGJJHAzeQBmJ7cxQf2QMILUy9yCycqMLxyffS21BPoVUIyEMp10CnnG0CPFbJOm9xSU/b1jKba0h8/cJlij7WalfZXMnuFThi27ab7dp/ltntXmcH8C1C9mo/Co7yQMFOFxFzMWmFWDeIY0hIJ9G/ySFHcU6sbljbAZsACB28ddB0Ak6k6fDHyjcTd6Rkin9orh+tvTHqwxgzfJ2/dJL/3iuPtw+2cE5Go65S4FLUZYw1H+tnJx64rZUm+nMarZgbX3IpbqoyhrsBK3VeI17dKd/fWe63QE0jQQg0QOrAIDxc5EFz9SjDLH0JvVjbCO2SCeucjxuCzEcgGC+FeS8zmezEUZcqEu4ynkbmzD7F+zw5uRKpMUsse3zW3Y7JeU9V18+PuUiOhQRHWkbNTvBOnUjfCtntqWe0uDZlasytUN7SDxPE19mDXR+zLHeGfc4THcI1ER1oVrxbPt/a58cV7VtBiWbe8Hif3p8pYcKXCOKzc12N8fZAj9fRkci0hqbiS8Zw5xp6bIyuocWl1ElpKYUcbg4jcUrRWNvvFpaXD3RrIFVk1LnoZhSoPDUvE0OQ5g4euqo4tz2tdq28ie4W5ileIfBMsTVMMrDJUfsNakCoIqDXX+75+5Ujk3mRvDOMcbuOH8doKdqtyOsy9mNGynkbJVupdhW9lxdiDmyzi1cPRFfIubBcNTawtDZSdSrdS3V7vt9FFscs/kxoVlnCeW0hJBCLNICwRB/0Y1Ek0NMfNh6Z3d5Z7q7CbXs87IY7SEqzLpBDyExgIGlJz0jIDPPFTFJZZTk0YZXlWJV2S3jC/XgZfzrOXlNPQJbO5g4txNAdg4ZCeA8FyfqCkgfKdOoll0TCi+ZcuQT8RUnW3tlcmQ+4rhxhG3bUQtlCDNzZsye8k/ophLyHNMkzpRg5JnV3mcSs/McZtZjNJx/Qoa/AuDidOzW4tARGA/L0iqd7DQk9+jkG32NkPLtI0UDiTmfaWNT9uC3nTTJ5kxzPADicAbIM7oMdlqViUdq4ukjb/AHXaMBEKOpIO9GPVjwOmz/17g3HxA060TTIjVhGp/wBo8vZ+vE+3t5ZFpL4Y+wfpP6MNiqsc1yhdpaIVPsvomf1C2sCt0JaYK0oT6slYIdkOqOjbQO5Q8BoOhF3exQkG6kRXetNRoSe76UxI1wRsI4gWalaKNRAHFiBnpHNuA54mbU88ZHxRw7E4L/RIlFO5JkHkvOcg+oact8lx3Yazj7GFkD1msWgyA/OfRrskyAgkEJ6r+92yPduoJt2uXVorQJbwKOCO35k0mrhqZdKrThma4DJ0ZZb51tYdc3cpkh2y1kWCOhAik1VllJrTzWyjTmgrnXFxH7UMnLszwjnvLRGnZDk36q9Ww2oDamRHg0XGNnSUkdmItbYSzYPISlClaoU+rXUEjroH0I2jaNi2WaGzdFVru6lcFgWd5InI1GvHU5017ABjg7+fW13Tdt/2mCwtZJlkFiZXjQuEUbhE00hoDkqoNdMwCSRTFYqMU5umci1SnONsoYk1kuwW67NgGPKhvRHmFPwG3JLiENpW8haXSjd21GvS31eL27vg8MReKJnzArQDTl9hJpljraw3rpnaNglghv7dbhhEAqPUksGCkqtc9NNIamBn7hcMz+JfzbU4/bCtmNNy4xjsiRDhlxwrkVyXIwW0HILqVpPc/lgHXqp+oLG6RS7IwjYZCnDVyy78Wb6b7ztlxax28lzGZcyNTEOQMqnV28PbgMUcqbZ1L8ZclKH4zwnR2lNpS5uhhSZCPS2pUpLkYrAPmQPPpj9P0dby6L1yUD6iK/org11skaWtvoBzYk865ZYzKpr12h/VXa62fonX5FG1dCFIFQbhgMWS6xNkWvo1TWYkht5THqF1LbiVFO0g9WsqMBrodNePKuK2Zh8PPHQR7Ev3OuDZftrwn2Te9/2p3/ucxLjiwXYcTqx3Dsa5ImSGof6o9VwLjCcjsKloWWMxLaVEhWEN11SoLiWlsoKHHnSkJaQhomKT6aHKoIHDLtGIEmhRolXVHWo7sR8/ce97Fp74+UMDx6o41tOIuJeCa2fi/HXGlozHi5TDcskVbdlZ5TAjBMKjk/p9NCjRoDa3moDLJ2vLLy9NNydC+UKlqkkniSe7l9DjZEdbayKKBQDsGFP23+0+dYwGM4y+qntY+26owQmBJdbfcCx3lSUNFuNHPgNxSt49kgJOvUUW0rLrAOntpliQZ0B01zxdt7VsnPBGb19/j8Br6WTXv01lUy3Ax+qVMlbDy4sVppDioz7bzKHGlbSCUKHgo6krFUljNrNUK3PsI4HA66dopBcRZkcu0HFhdnyfhcPHcoouI+LmePbnPoriMnt7KFCpgUPqW1JklqItx2bJa/UnChS9iUF3f3BI6LTR3IKS39wZhF8C55d5r7v14HxS2wDx2MAieT4zQCv1ceeIkQsYwurlojuS3LyMGHEWMKqbIh1E90MmvkvNsK/I1StLJ9Rf/GUlXcJI6W7lXldpKZfdg3bssaCMHPDirba0rI1kzFbiV0dwupVEhNNvuNosGof6hFaUkhCI318cvsglRZK1JHbquOu5ZILGJo/iLMDlUkEDL7Afdh86OjWa7kVuACn3itPv+3GyzKjSHkvONh1SlOBSp2yZKW44EpcfUXUlKHl7Adw0J89eqUuW1P5hFWNeOZz4n2+zFqRRuqUBoBThkPZ7ML0OgoZT6HVVbJLYLqV6qQpKm07xoEnYACgAdvLoW+22Mzh5olJGdeeQ/wBGJiX93HHpSRqHKnt9uFuq46xaxmxlPz7StSXUFx5haXhHJOgcLerJWhBPca9069QIulNlmuEDyT266viViSO+hP192N0m/X8ETBY4pDQ0BFK91c/9OCfYYFd42hDmIcly1KUlP/KpemKaSe35hQ99RGAUkfhJHRLeOk97238zp3e3zA8PiIHfnqFCOWBdrvW37iSm67coANa+H6sqHEScoY5WoMisI77lTbIVIXKT6jDQU6xKUZCFNuMlCirVZQe3ZST1Ns5+uorVDIbe7cCjVABJHE5Z58ffiFc2/S5mdAJYQTVczSh7j2YaMvJsqu5kagk4PJQ/Ic2OzI0jSDCYHd+wnvSAW4kCI0C466ohLbaST4dMmw77v8+5R7dc7XKrsc3VqRqvN3JyVFGbMSAoBJwvbxs21JZtc298hUDJWHiY8lUDNmPAAZk4ddvn1Hki4+WtTLiVApIcDC5bllFkuSoSaNn6Onnvthv1IUPJIaA+hSht+q9dGu4DWyty6o2TdLX+J28kxtYG8lyyNVSCdDsKVVZhmrEU1h1rUYSbXZNxs5/lpEjMzrrUBgQR+IL2lDxAzoQeBrh3V7iX6SVjX1Gy2yN6Lew4ilhkx2oaEmvq3mHUJdZfyFhxbyUEpKlR4h0IcBDVYlJtr+Q8yl1OA6pWhAoCilSAQZBVgDmaRn8QwGuUkS6+YKflRtQniKiuo1HEKcif63ZhHq7D9Es05TMZQmDiwedfjvrRGTZ2Ijuoj4/q8hQLlhqoPo2qKYqXFEHTQru2Ou13Eu63hVYLZHrU6dTlTSLPm2dV4hAxwSuY3vI0tYVJkkYUoK0Woq+XIcjwqRgV3cSaLgQIAk3KcnkOT6CclCnXriLOdW4l9S0JUj6thalIljX8h5DgXptPSBvFtctvqLZl547ttcLAEmVXNa1GWpTlL+wwYNShw1WDRfwxvOAjaFdLg/hIHD2Hiv7QIpxw4b3LoNvX0qq+2m5E3xbVMYjZJirXZIXXoeclIyOvaQhChTrtJD8L1AFIbZZiFagXkjo5ve/7febZTbpzcjbUEUvl+Pw0A89QBnEX1R6swoEWoguMQds2e7ivgLiMQ/NEsmrw51J8s1/HpIbTxJ10HhOE5rkmVJUhqqx6zmBadAuR+S14aA6JBXtPVYjq+czeTY2k0vYTkMWCOnIkj8y7uIkpyGZxKTj+dy5HxeI1BZqaBqyccsHfqWkvv7ZIQlpeshRUndGZQew07/Hobd7x15d3LLZpBa2ooPEAScszma8a/VXBK32/pW3hBuTLPJxyqB3cMO1WM5fasuG6zeYHJawhTUBKkspYSoFYQgBpKfUOg8dNAetkO0dS3cbNuO5OHc0pGCAAMzThxOVcbTumz2zBbSyXQg4tStSMq8eHHHtnjDG2yg2Ei1uFp1JTMllLJPYkqQncrx/7XRq06T24aTePPO4/beg+ofrxAm6jv3r5CxRKf2Vqft/Vhy10KioyhunqK+Ips/iUwJKSojuVKkF1Y1SdDp20PTXZ21nZELawoKd2o/W1cA7qW7uhquZHao7afdh/siwfr1TI0aU3X+m/Hdkx0LcitJfAYdRoOzSg38m4eDYHTZBDPNbGeJH8kAgkDIcj9Qy9mFma5giuRbyunnkggE0JpmP105nGCVMiuoY+sjIjtNPPPvvwtFPvIDTnoxkgDcw2rRDZ3ahISD5Hrb4CB5oAoScuJ7B3dnux8GsEmMk1FM+Hee/twiPI37Uw1NyEfOlQjEB91xiGxImuerpqWI7jyWU66FxxWg6+jSRSOh9nHgCc+6oHecfCx4yV9/DjQe88fZjRgYzaXFl9LEguulDqBJLw9FLAWkOb3pHdkgpIIAJJBHW2G3lml0qvPOvL34wmuo4otbNTLL/Rg51uCUMOBJiWMGHbqnR1xZzU2Ol2M9GeADsUMOApUyseJPcn4dM67fbRwlJFV9QoaioI5inZhbe9uXlDRErQ1FDnlzriqr3h+yObBsl818FldHlFVAcYfkxFvR50RhAPpxraTGCn36vbohix2rehK+V4KaJUFO72u52qcbltP+AozXmvs7Up719mHLb9+hvrf+Fb14lY+Fu33cNQPLKo4Z45w+Zo/L6c1s7TJol9ldvClxjf1SkrezOqYUdynXaxtTpnVj6UkM2EMvRnCQTt0PQS73m8mm86QmRgM+FRlyA5dmHvZRt1vCbeAIiA0HIHPLM8+2uNDAeYKWZXXKXT6K2YElxgSHQtxD6G39pUTr8zSmU7tPBQ6O9P79DLK3mZeHj9PbjV1BasYAF/a/RX7cXO4timD+8D9vvhvk3mfl+H7aoHClvl3B0jk7KKGNex+TOMnEVcyNS4rAdnV8uysItlUxYEVUdTzrTtfNCG1kqSLp2u/SN0khTzY3UMBUKARVcyagAinvGOc98sWaSSFn8tgxBIBPGhyHGv68Rrn+xPBs7xy25S9kPuXpfcNM4wkOZJlGERKKwwDlKlTEi/8ndY5DnSGJk9bEuOFoKmYfdtXouuugNdNFtuiq6QXcXlKwCq1Q6k1JAJHAnCVe7POUa5spTJJq1FSCjAEUJFeNOODVjHuQ58wnhO0i++Nvhrm/CaJWKwqfjfmLBYHKXJbcm2fnMIkzbWA8HIijHjlQfnxrGcEoWsLACdda7ZFe3Rbb4pFBJJKHQlO3Oimhpw50wa226uo4UF0VkYD8RNefMUPDv44cD/AO4B7fXn6CTf8RR4nH1NDl2DbUX3Jc93VHEkJgLgQoCsUboVRGa2FXpkb65MJLG8x06oSNFybvpTe42SWGG4kuItXiVIyUqpDBAvJkYhszUGnM4Nybhaybc1oUNZSmZmpFQNUeEgD4tJUsxpQ8zlLyh9zOEse3akuuMbbiT254lkLLrNVSYjEpsAQ3Cfs2YdEy026tE6DKsQ64pCEiO6tS1LKElStQ+5wTW21C83NbgMoIBmJ0x1YAcQFUt3ivDsxW2zbrYS9e3fTuyRwrO0kfnGGFxJMwiZmrJQiRYwBmjaFJb8TmrabsIGNQ7x2tck2kHIJ19jtqZthGtBIsaNyulQ75qTBjR0Q55enqBbO9SRuSVqCtBXO7baNylguLWRvNgnahVg1aUDKdOVGrmpzBAPHFx2Eptma3nUAyoDQjTmQSCK51FOPMHBNix5XGHAVxYTULjZx7gJEepqYikrbl13HVYovTp7qSkLbReF9SdNQHGn2ld9qgDIQWlqajxAZ+0igH9lSa95GA7Fppc+LH7Af0nA9q2RFgssK0UPTGqFJStP2/KoKHSZOQ0hPfhgjXQmkcsKsdz0kLZbKkR3xtkRCEvwJOuuqZNfIS9DfSR4hSCD18iLg0Q0+7H1gG5Z8jz+vA5yj26cNchNyBNxxOIXEttSHLrCSinZklfiLXGXA9js5sk/MGmoy1eautO4bLtO9xGDcYY2rzIBz9/D3UxItd13LbJRJaSNQd5B+sfpBxBzPPYfytxDIusx4F5DjQESYrrlzXRjBqId9XoR+ZHyXBsgbnYPlcb0zopsoB266ujqvb70/wBw2xvM2G4KxA18uSskR9h/xI/7JIGGqLqnbd1Aj3mBWl/6RKRyjvqPC/8AaAJxT1lXGebcWcizMrsOPLavtJ9wL81vHaWsEbRMTIRLLuN4pbqdxv6dx1AWlFbLW0kn5RpoOiexdSbrslykO+obNK6RIwMtu3L/ABUzHHg6inbgRv3StrvVow2mRbogV8s0jnBHYreFz/VOLS/cByDFznCuH/c5xa7cVdjJhU1TnVojWFk2I8g0jDaKybfx2w29V3X5ZbW6R6T6kHuoHv0t6ebpBPaz7JuDRzWxUlFNGjkjf41U5hlNa05A45O9R9jurK/i3ayWSG9QhXNCrpInwORkQwpTVwNOOJp8V8o4f7qeF7gZnAYlP+kzE5qw2pbQxa1GRQmvTqedONmv/jebISUvTYyAlK1b0EFKhog7w26ekPVkG4bIzfwmZ/8Alnc1Qoxq9hcH9nj5LmtMq8MW9sNrsf8AMH0VcdKdWBB1ZBFkwAEkjoKLNGeclMyOJzXOuIiZBgd1wfkjdHfyot41Zs/q+B5LCSr9GyzG3VlEXIo5PytyGiNkiKTvjyApKu2nXWvTPVu2dZbOm77YSqnwyxk+OGQfFG445cjwYUIx+efWPQu89A9QSbFvCguKmKQDwTR1oJF7xwZeKsCDgr8aZDYQ7iNKS846846laglRW64VqGuunfXv9wHU6/WOe3KkZAYg7Q81rdrICa1xahXVUbNMSaXPUhLojgLZTtdkKSUDsUjXZ/Hv1Vj3T7fe/l/DXjwGOhIrKHetqAlFW058zwxAXljBXccnyv0aE1CZcUtRk7A5LUCT/wCIoEN/cOrR2bdluoR5rVYDhyxz91R0423XLCFdMZJ9uI2Lqpjbi3VLW6tatCVHcon7yd3fplW5Rh3YQpLN1OfGuH3iuMyHn2X5n4N4Kk+J017bh8B1FuNwRQVXG+Da3Y1YYNuOUnHVdyBhT3JNfMsMMTkdU9ehl9H6citS9q8bavTXypVhXB0JU+0042pTAWnRWunS/uN1uc22TjamVb/ymC146qZaTqAB7CQaGnDFh9F23Ttt1FZnqaNpNs+YQudQ0BR/0iaGLpWmoBhVaihrTCl7uInFN7W4GIp4oyXPIlxmDltYe3xhnHcfTx9IcQrCqq6lOQLKE9kUIEb0+m44yhK0EjcHFgOk5N0t3nL/ADcW3lI9IuzrfzQPzWUAghDyzocj3C0PU5+l7iGzCHbbjelmnLttumNBbE/kJI2l1Mq5VFCVAYZAhiv83ue3SZ7Xr6jqDwy5jg47w1HFGJ0lS4z7gqHmJMiMMxucwtQykpr5jTG2TKWkNTG29o1AR6orbV6gHUSzTfOfMGeTznZv+VaDPy1jWvEE5DipOfOjNv130M3Qr29q+1/IfJw/Kwoo+fju8vOeY8aEDxuRRwKcNOqI/t6yP20Y5xk1W5BK4LxiS1nN9I52rvcFFYyTOcp4zcoVtU1fxJNjVdY3JmomnX6WOyXUPpUsg6oS8138O4PdmRjKU8seWYzpQPWpMgJOVPsxRb3MAsBDYIfNZjU5Fu7xUXwgezn7cU/TrzC237ZOKVmSsIVe2TlK/Y3MORDFEuylqrGVViaRE9uUisLKFqVNc/MSo6HcAMtwlEYzp34YtqtncjWfFQcPtwVOPcztcflx3EPyBBeWk7tyglCyddPgAf8ADpQvJkkblh1tbaSNfCMuz9OJ0U2flVW1KjWCytCd4Hqnt2BLZTr316ASwfmVy0nBAySadIB1jE4eFWJFZiozK7C4t7lbseppWHVbXIFRJcSh+xeSdNkiw3bWge6W9SfEdJ3Ud1HCBBCdVczTn3fXiud56gXfdzFhbOHjtJfJFD4fPfKRz2mND5a9jOW4rgM+8/J8xlyXcLwuypok+Uw7S1kKbaxq9yTGEcMylLMj8iL82/ctSge+o7nqH0xLcbi8sYAMMWZIYZZ0AA45ZE9mN8XoxJ1Rvq3m63ax2+pk0EZGMLmCKEKDQqOZJrUE45W/cVwhm+RZDPtL2JbW0iE6uGp/G7SvFY8qG640p9tEdbolLWrXV1WnqaA6adfN08t3KoyK4JyNfvNPsx2l0l0NtvSW2xW1qnlIEUB/JbSRTIllr9Zx49nWcc8cU8w1NRxZkttj8ia05AyuvzCrak4/KxR3tOiZPAHqUmVUb+0IEdSQ+HSkoKVDcFmdynhZRXkQcvqOYw4NaFAJdSPCeDK2oHtHaD2giox1LcL5U1Ex2iEeY8+028GrZalKQh+a6pSkyxHStTcaMlK9jDQJDDQSkE6a9Dp6vRuYNMErBUVGh5tn7e7AD95nsd42505pwPmfkXKMvRhJgVVHmWAY4pbaM4djSkikMi3cc+mx6OkKXEmPpbU+8y+wlPdvo1abibaHQRqI4Z/WPp2Yiy2qS0mjRBMrBJGAAJRj4GNMyVNVqeTDsxEn90/JIlDyb7YMfxqtrcbo8T4kto2O45SsCLU4/RMX1RV1VVXx06FLEaFB0Kj87q9y1EqUT0++n7yzW9zcyULtMv3HL2DFJesWkXdpZj4FhkP2qBiOlPldlJYQoJQCPTe3A666pSFqSfEbkjX4a9XFZcgBxxzNuenSa8sS44byKQxNhulxRIcaJ+YgKAWk6n/tJ6PeSCvdhInnGrLFi1E6bcLhOKSpPqLCFHv+I7ko76/LtV1H0eV4qZ41mUz0Unlhyp4Oeu3Q6ljdvAG7b8e/bt261Sb0lvlXBGy6emvDUDLCzXe2ywbcO2MrTxGiVD4nQfHXrUeqYlFa4LjoSeSoA+zChkPB8uqgmQIyxoyo/gUSFAd/AfDoxtXUsc8gSvPCr1H0RLaQGUoaUxFa7pJMJwRSy4St91Y+UgHvt8erGtp1lTXXKmKM3Gze3lK0NScO7gyvlxfcVwzKaWtgtcl4g0laVkEAXERtWhHkpGoPx16g9UOknSe4q2YNnL/cOGD09NxH1vtbRMVkF9FQjkdQwbIL8pVH+6ClTzejXJOOsDakpUlpzmXO0rZSoHUIWGwCPAgdK7LH830aKH/sj0/+hIcWHJJdCw69csPFepryzJN5ccD2VrXuxFzGVVTKYf6gg+iXGy4Ts026p3ePfTTqyZ/NIPl8cc8Loab8ytMXT4Ix7YJ/Ay2ZztO3Yqq3fXLxZExMz0jsCNfm8eucd3fr+LrANAJDaeYKU+HTjrvpa29FZ/S113IxL1B5Laif8XzPw6f3eGKXM0xqkRkFsaRxDteJj3oFKiD6YWQnbp8p7ddD2ktw9uhnFJdIr7ccozQW6TMI2qtcD8VMRyUmMp70ColTq1q0DLKAVvOqJ7bW2wT9p7efW5pHjQmlf14xEEcrgVoeZ7B/Rgl0tqi3xujluNpCXKqEsN6FSEhUdBGgPcePShaKyxKB2YdN4kje/mPY7fecaMiugy1HY56To7ga6j/0Tr5Ho5byOtARlhamCEmmROPFXiN9aTUQ6quk2b6iFBuG0t1W1J8VISDoDp1Pknghj8yd1RO0mgxBRZHYRwqzyHkoJP2YfkaHa4zKUm0VLqpEYAKhla2tij5OMH8pah8CnqKVgu01Q6XQ8+P24lpcT2hpLqRxy4U9o/RjXvuQ/SjOKsYUCWylCggrZESYoAdtJMXZ858tUdeTawv+GzKfbUfUf142puZuJAjqjLXKuR+sYv19tft+4mwrEMU5yyHCq/FZ9Nh6burl5UwlF1i9dLp25Ntc3ciao/Q2i4e8LKti4zG5KtpUtI48656w37dtyuOmbS5ae3e50ERfBIQxCIgHxLWnaGahFQAcfpx6Qel3SvSWwWXV97Y/K798kJJHnarxa0DSs1TpQ0qOAZEqpoS4xn4+/cg9vnIueVGFV0XkSjg5PcvY/hmf5Thc+jwHMrhp9UZuBRXclfrF+U8na0JLEfcohJ0WUpMDdPSbqnatsk3CU2sksMeuWGOUPNElK6nQZUA46Se0VGeG3aPWbo3ed2i2uD5yOO4kMcFxLA8dvO4NNMch4knIagtTlxIBkJzZzVXcZQIdLVxf7j5CyjWFiWJRNz0qbIe1aTMltNauNVzCz8yjpvI2pPiUq/TvT0u8StcTHytqhzkkOQAGdAT+I/Zxw6dQdQw7PGsES+dus+UUQzLHtI5KOZ92I8457VeS38SkUdpys5itJmTj9lnWH0dQy7HLtk4p2fBrp65KUQ0OMLDTmxstK0OqVp7dNV31rs63wuIbIT3FvRYZXYg+EUBIpnnmKmvsOFS16I3drFrWa/aG1uSWniRFpVzVlRifCCMjQUPYRhoc9x2OTGcN9qfBNazNssDm4/aWeWuOrVRcds4yn0q/6qShDqZtsogpLRCiVap0K95an9MM2zNP1r1K5WK5V1WKnjn8zjQcl7D2Z8KVDdZQnf4YOgek0U3Ns8TtMSfLtRF8FTnqc0ppz55VqVdPHnt29wmVc4YHzJ7mMz4/sU8PVl9C42xnjiHaIiv2WRwf0myyHIpdvGiOCUquGgYaStv1dq0FsIKXIW69V9LWXTtzsHR9vdJ8+6GeScrUKh1KiBSctXM0NKg1rlJ2Xo3rO/6ptOp+vLqxkbbI5VtYrRZAuqZdDyytIAdRQU0gFa0I00Iaa2f5DHxHBcxymXtEXHcYvLqQVK2p9GtrZMtzVXkNrR79V5tdq99udvZR/HLMiD2swH6cWhut7Htu13O4zZRQQSSN7EUsfsGOS6tzArhKffdQX3ty1KPfcpw73FkfFSieu8TYaWCqDpH0GPyA3DevNlZ3NXJJPtOZPvxgayRciQGmydXVbd2vb4fy0PW42mlanlgL/EdbaV4nD19CO5GS246G3FAD1lE7Gyr/AMRY/wAiddT9nURvDVvwgEn3YmoyyUUGjkgD2k8/rwzeSJzlDhzkP01MzLYNTH2V6JdjQUtFqjgPeGjzcRwyXx5SpLvVZQT/AMY6ie7rW3g1Ip5Fv94w7tQ0L+4i46yi2gdD+l8NjIpXdt0ZZ5QfiWILSCM88kJlYftyNisD3A54KnGn6thY/WA0RDd3ajYoFT0N9Ou5UdSiVJI7trOo7a9C+sL57CBmgPikGa9vYR2Gn1jDD6bWEe7SRw3qkLAfA/HSCalGHNK1I/ZPDInFe9J9JcIekTWH1uB/R6Mn8yS7Le/Aw0hPd5byuyCOx6pSGLz5GmfOp4c68h7a46H3O9NtbraRECi8SaKAOLE8KDj24u39gvEdfj1EqLkVY3Kt7uwkXtuyVrShp2XHabjV6XG1JUUQYZQwe+h2n49OO3bYIU0y/GQWb2ngPYAKY5p646vnudxj/hzkW6usKEgElVoS9O1mJburi852NS4/hcehra6KzEW0tuGww0lqOl4IYXNm7AAV/moaSCok6KHfoZf6Uqy0AH3Cp+/FldAvc3dnDBfAtLKZHAP4AzAAmmeYWnt9mIO8nc58mTPcHXe2finiKVltLh+L0V9yty/JyONSYlgWSZWy3aJxCxjPsql3GQR8bMNxEeLvUFPKCtpBHQi3WK3gW6uRVpSWFCOAyFRxH6cXHuMrvJ/DbMNqt41ByIXU2dA3AkClRy54m1iFRbogwqy4msKjIT/ykxhkMWDnqKUtAekq1WpoE+mQr5kpHj26G3N3HID5Y0+6ufZTGdlDdwHVctpanBTUe88z7O3Bsp5akMNNRgWpdbt2pQfznGWFbkvJWfmXIhqTqde5RofI9QoJRFJU/wCG31A/04NxtrXidQxDnJodZhHOd1gdx6cXirnGLHzyjXtAawjkAyF1ltYVx8GISrdkpltDRJiPoV/4Y6uzpO/uJNvF3aeLcLPwkf8ASwnPQe0gZoeTCnPHJnrJ0tYQbzJYX3g2HeKSq/K2vFJXzV7EZvDKBxjYH8OHFKxqTRyJFfPAZfgPuRXB4pLjR0J1Pba4O6T5pIPViw7il3Gs8BrG4DD2H6Z45E3HYrjarmSyvU0XMLlG/rL+g8R2g1GND041skVTqm2GmVFUSc4AERnFK1VBcUdB9JJUdUn/AMNz7D1NR3t/+YHiY8V5kfte0c+0YXprdLtTZyEIFqUc/hP7BP7Lcj+E9xwybmQ2w4Y3p/TojqUz9ORtWHUHapTiQe6tR0ZgUsuutWYVryp3YVJI2DlGGkKaUPGoxIDhiHVUzdPld039TiV9LkUFiUgEx7BKdUdz2bcBUlY18R0o9RSTXQmsbZtO4wqJFrzX9I4jD30nHDtd5Z7puMTSbBcyNBKV4q1KinYRVWFeIwOso4ej5BmWT0mJS0OzIjiLqJEcISqTRyZKESZUfuC79Al0OOJHzBGvbt0St9/+V2uC8vQQjDQ3dIBkD2aqUB7cD4+mLm96ludlsPEwfzIm/ahLCrU7UBBYDlU8sGitrqrFKiJVwEJZhQY4bSogBbjndTr7qtNC/IcJUs/5j8Oke4nnvrlriY1kY19nYB3AcMdH7Zt9ttW3xWFoNNvGtO8nmx7SxzPecOvB8OXmzEzLsusplBxlTy24Rdhodcuctt3HAyxjuMxWEOSJcmS+pLRLSFuKcUG20le4th923T+HMu32CLLvMi1oSAkS8TJITQAAZ5kCmbGlAbN6N6OG9RHet7d4enYmCjSD5lw9aCOICpNT4TpBYsdCeKpV+5xnuZ4NaQMBr8NXxRiEivbnQIlfIRHtbWO76aCuZkEFb3q26F6CSlt1RQs6KcdSUrUJ2radv3WJ91mufnr9XoxYVVT3I1KJ+zUZjgAagOnVnU+7dLNFsdhY/wAP2l49ShDpZhwP5iVpIPx0JIJzZgQT9o+Mqd2riZRLuZ8p/J7H6SMiUFybQ1jYUqe7KmLU6pMZ6QlSfVGgdR566dBOpN8ujKdot0jVLdNT0yTWfhFMswKGnI43dH9PWlzatv8AfySzC7YpGJKtIE4MSxJ4tWjDiKHjhvZ3Dw7FcrkuVGbfpMMVUGMqtr0WaLnH51ahbz4qp9Y/ERFVq5qtzeR30V8Ooe23e7HZw01oWl1sQ7FQkitkCytUn2U9mNV/FbbFuctrsEi+VoUsWdtUbrxAKUOVeNc+eGbxXzBmHuEGcYpk3GWQ8lcKYvDcrZnJkJ9dTnVNMWpllf8AblhEdhzMmkw4h+oktRlJkhlJ+aQ4tuO6MuNqYI0V9JBHLMcoxqVG/dOfh7A40gHLhUh02HcNy3HbWO9L85aoKlkQ+Yg7gKlyBnpA1kcmJAJH4/8AbBhOP2Ew5ZdIzPFb1tbHHtvUNpqEw8QeAWiresUyJk5du2tZ+qdQ839Qr5ylOpbRGba7IRFYLdI7mPwtGy10U4aV4UPHVSp54KQ26wyK6Sk7XIA0TIwAckZs1APEOQ4U760EPIv7aPtNeymTyPW8XVt7lzdZKpV2d47+pNP0k1wrWy8w806mS5CecUtlxSvUb3ubSN3Xyw2y2WcS6B8wwIq2dO0U4AHlllivevLvqKW2aCxu7g28UlQUOluzVqU1Og504EVrXLFWPuh/Zww7OhbZNg2QWuE2byQ/FwdhapWLvrZb2pbqi0RKx5L+0FSh6yiSdAOtW7bSzBpUHh/Z4D+yeWIXRPW297JDFY9SyyugNFlBEztU1rMrULL7KEDHPDyj7I/cxh2bOYDfYbFZoWZy2aR2pskRcfs9SS0v15Cfq5Nju7OesC4NCQNOkncbO484w2euRKDTpQ1qQCQVPMGoqcjxx0mvUmw2e3ruU13AkLqD5jmi9+dKDuBp2YfeHezi548qshsOY2uMa5i1ZqbSChy/q8guEUtS+45a09dAQ+foLG2WEhEtBMhtSdhQQeoT9N788Qdk8pNJJLuoNBxOXYOWWF0ervp/c3AtbC4vL25V9JEFtO61Jp4m0qtO8VAHDEy/b7wfmOT0fuB9s1E5g+HORMOpObuQ2eVqusdzCjxNFcmRjzVO6lpt7E2Z0N1EpC3FeugKBKPn06i9K9LbR1jBf3VrdW15CYDE5CF6CFtTrG5ICAsRrehUkChNMV16meomzdC9VbX1ldW++LJJC+3xxCVbazZZpA7SOmck89QFSNaEjLFdvK/uEyJGZYDWYr/Z+YX19iWNtU+YWGDxLGxh08q1l0lBRwTcxIy1QoZaW40ENKbCVajUnpM3fatj28Sbje2aNF5Imq8rsCEQ+JlUqgICUDcTxOLv6R2her7K4liaTbo7KaSIwprBLMyMwbxGpcyVYtmCMdE37f13Sycg9zmCZQ8vJLTg9/Ccfi5C56da/ZuXmHtXd83YR6WPVxZSItqtxphK0qDSGwO579Wt/Kfva+oPpxb9Y3djbWtxcmRtEa+EIskix1rxJjRGJPMmmOBP56+rZPS632fadivLmCPcLiZW5O5jlSI0JJZVapGRAIocc7Gf+4PlDkzNcrubjkHKVQkZxl9VSsN2CmUQ6ers32osOMhhtBSyyGgdTqo99Sehe7dQbrvCzXjyg6buVE1UGiNDQKoAApzpmcdy9EenHRXSe1Wlntm220TS7fazTGjM0k0sSs0js7MSzVPOg5AYdXHTufZpHkQsPVlWQXNKXbSqYjfWT40tO1SreA82sLguAtD1gF/1J0Hj1XaXlybljAzTXAqxABYUHEFSePMcMWleWu1+UIr6O3EGkLQquoDIChUah9ffh/SuP+Scgqmo9/wZdJtYj78lvKYFfGhT3VLkalLzDbjZLTW5QDbaSFoV5dOfTck07NLcWYtpTGPGKjVU8ChyDczSuFLdtst9vOqzupJbRyKRMSRHRciCSSQRlnnhx8O1vKPC1zZVt1T5XecB5FIETl7ii6jWrmPZZidshhuwmQqZKXYkHNaqAwl2mumkfXVlhFZWhe1KkLe7OZkbQc4T8S8iP19h4jC1cqCuofGOB+nLuxJTP+HeRvbLOb9u3t0OYXWSZZGiZvybzTRQp+Hqu8QyrbbcS8fx8qs0V0unxuJgz0G7u4TGglXdsY8n1P02OEzrgmzTyrckEmpbgSK5CvZzPaT3YjRDz21SU7hiaft09uthy9XwYHuHuU5zyFxDHfyuFlzabO3spPFlaIq8o44s8klB+Rl1pQLKZWOrnqKYJeksIUWFMtt77ULdxj5ipZTx56eYPbTl2ZjGqZjExWPIUxOb6jOoLTmSnLK3j2vgRVxsYw+vk/VU1bXRHFtRaNFe6pNfKrPp47qJbXoqdkFK1Oeopwk4PeyJPpU6VBoAOAHZThTt7ceWENFq4nC85e0eBVeL8nYjVwok7lFt+RSRbR2ay5hkKvcEDNozL8sLV66siAj162wvZDZdOqfWSrqXI0VvGZ4hR2pT93tp78h3YjoHkYIxyxo0nIOU55fN8e3NiZL2XuLq8clrVIixKbMXlPtVG1SFuzVVF7IX9JMHzthqQl7ZqynrRbXLXKtHMarSo7j/AE8D/RjbND5RBTjjXlZ/HNjKxGllyU4vjj7tQhqKlcGPeWEFXoT8hsmvkelSLCcwpxpLg1jRw22ACgkxtxmKDyI8oRy7TzJ7c/qGN9mmvxtxw/6nJHVwlAPJUn1Wk7U+OvfUKVruUr7+qn6+fTZQH/wh+7Fl9EIGvJq/9GPvw+qyfEk7PWbCD27jTt5eI8/v6qh44pMmGeLNJdVopwS61kBl11h3cnYhsDXU6rWFHTT4IQf59aWtaIxQ9g+3+jGgyksAw7/q/wBONtuY5GWDooaEHUHv4+f2Hv0LljdXNeIxNGmRcszgjUV79SlDbug1SQ4fBI8gsfaPh59E7K5y0OBpYU/pwOuINHiXiMxhKzrE3MhiibBQP1auSost6hImRVfOtgq8PUJ+ZvU6BRKe27UErN2tp2MnwHj2DsIwNuY1uIgV/wAQYjs6l1IkRnFyGFLPpS45U40FKaWfyZDOoCvTWPwqHY9M6XTxREQuwV6VAJow5VFaEe324APbKzjzVGpeBIGX6Qca0aIIr4lQXFQ5O0o9eKTGkFCtNyPXjlpzao6ajXQ9fIL2S3m82ElJqU1L4W9lVoae/GMtlHKmhxqj7DmPqOX2Y3q1igkWL9Q4iOq5Edi0CHAtL80SZDzSiy8skSJKHGNzncqAWknx6Ibdf7fc3MtmTGu5JGsoUkhpNbMPCT8TArVhWo1L24g31ndQQxT6ZDaSSNHqABVNKhvEPwqQaA8yCMM3K2PqUZSbJ+VaKhRqMUz82QuW9SiwfmtzxGMhS2wHWoaSkaAJPSzvu7XslruaTu8pjSDyHfxvB5rOsmgtXIhAVr8J7cFdusES728RARpK83mquSyeWislaZ5FiD2jAcpmPUqbdMTKZDMJM2LDmY+Leeybl+YgBwormViLKajtJ1WpxOmgI8uq62WS4bZbqaPcXWJJkja3M0gadnpUrGDpYKDViwpQHsw77hBFHuVvA1nVnjdxMI0KxBOAZz4gWOQANakduCTi2PQ4SEvQUJhPpQoJfjlTTwQtGxafUQpKwlaCQR4EHTpw2K2jtxrtwI5KEArkaEZiozpyPaMANzdpW0yVZK8DSmX0+vBw4+49dv5Qffb9KlhLBkOaemmU4gg/Rs6Abio/jUPwI8wSnqdNHHAhRaeZ2dnfjRFrkYM/wff3frxJ1qN6YDLaUIAKUIQkADRRASEpHZKR/q6gwWdTQccEZLgAVPCn0+nsxryJ7Ed5TYIKGwGkkdyoI8VDt/UvU9TS6xyaF+Bcvq5+854jhWZA7ZOc/r/oxpKsA8s6r0QB8oB7kfafPw63LIWfVXHioVRTjj22pCiO3fUbQPFSiflAHio69FIGH9o4hSs2deGCTWckrosaexlFY3KX6clkS0uBLTaZSlqdDjehLrrO8gKHYn7unOz34WlgLHy9UqqQCDQeImte0ivvIwp3ewPeXxvfMCwu6sQRVvDSlDwANPcMMhRRLSlaVbQtSdywSlbOvYl5HfVGp7q7gjXqCg83xLmp+se39eCT1jJU01fZ7jgr47x9WtBizsZqbV1QadY/T3DHhgNlK2wpaDvlaOoClA6AqHfw6O2u1xZTO2psuHD+nAO53CQ1iRSgGRrmf6MFJn0W0NtIYaSwkbUtstpbCEhSSVBA27lBKEpHntGnRdVyoAABgSzrWuZOMiGmn92xxIcKlqDK/AJJUEjXxHZI7+HfTrYIycycsatTDMY13GSgqGmnZSV7gHGlIV8qknUEFCgSFA9j3B63IoU0OYONUjVyPxVxW17qv288E5waTkmGb8KzutMiZTTKeUamVXTnQVuvY5btFP6c3JWQXq9/dAfPiEeBVt26YguZDPZny7mtacAT2js7xwPdhp2fqeW1UQ3f5kFOP4gOw9oH1jHPdzt7KuXuNrS4bzbgWbyGZjL0eZmXFMsYTmz0Y6tmZb4uttVLbT0NqUQ+wWmllW4A6joBBtUtrcKlxC1XIDOvIV+IgcQOJ+zDg282dza1huQlOCyDUufYeI9+JO+6/jGRYe1b9vWRhPA3LPJ3E2E8aZBSv4DLyljF4WP8oGZTM3b/ACrGZe/UJl5YOwpRaLCQ2pSZSkKSHjvs6TaEt7TzbCcyxoBUGWTxUCqGRHqBUh/ClKGuocMVutzbveym8lt1LSHxBeVScjQChBFKktQAHgMOX9t6x93Vn7s+P6pjgeg4a4cpqvKJmX19DWQoUBFEaG0ZjtXVoVzLexelWIjtslQUC+4lw9knohaXt/BtMsZtxLM6rpLyeWqtqBA+F25HPSfZgducWySXCPZ3GucPnpWtUoQ1RkOYPHiKc8Vk85XfL1RZczu3GA5DEwKxz+xvYCvonra6eoa+8s6/G0rciOvzkyFR7xIcjvMlSEI/oCTrt3bqjfPKEl/HIIBUAV+EA0oAtKV9mfHE3ZNn2eZh8rcLJcaa0JFakZihH4e7EYMh5GxnEMn4gxTHrmJBSvOKC4ziS2+hCGnH7qpXNgyXHyphEYRypDiVgoMVtvcPHqVZddblZlZIbidHjhLeFqaQtMgcyMic8ar/AKV27dZlsr+FZLKadY2jcVjdWJBDcOOXAg8aUxJvmCiw/lD9szifl5/HGci5iff41/TsoQ1JevJU3I8s/RLV1NdEUINjazYa0tsqVGWtLoSUd/G2t0vr7ffTmLd5W825eNHBcag2rUG1KcnyGVRly44/M70s646t6B/+6AdSel+07hdw+n8c06JZLIWijCR61WIyF3iXVmQjAHmDybnF/I/LXt290ES697/K3LVBxJi2Y2Ffx/xvG/REZ9yfYPxXrmqfzHGg1SyZ8jIZ0RhuwdkM/qdg4tC7B9qKhS18/wC09S28+/W9luu1W73yy0kZC9uYWdNau+g0Z2/LbSI6tUa2yIx+zvXHTmzdJtcbteSef0pJt8dzZZpruYSTEtJFAKxRukqMyZgISgoamW+R/ue+5u84kxz31ZSrgPIOHebuUpPD3H/EM2Fb43f8fORruRSUmKqyt9UhmJZCRUKS89IS024+468kNNqCEue922znpU7ym4GDa3k1HXGH0+VrJ/MUK5YjUXXTnyGVMcx7L1bsV96xXfpPZW8zb/b2ccnmFxTRII3UGJgvg0ypSZaKa5iuD7iv7mOAXUWZU8h8PZBx1lVVYu0+Q0ldlWPZFaUVxVuvNWTM+pcdiWje3aO2pQUBKkk7uguzdD3PUljFuXT+7bJdwziqK12sEhFcvDKBQ+/uOH/dBb7TvUnT+5O1ru0T6Ss0bRjNdQIfNSrDNW4HlgsUXvl9r923PcVn8/Hk1MZEy0dyjGLWmh1sV1aWmn5Fk6HIHpuOOBKSlaiSfDrGXoPqqzuRbz2y+afhpNEysOFVfUFYVoKg8cYzW7IA3mQlCQAVcEEngB3nD2r/AHScY5QwV8bXzeXp3Ng2bUCwjVTiXEBz1YUiW0wmc02dQpSRtSoaanr5bdJb1Luy7O0OrcNIbQjq4UHgWZCwFOYrUc8QJm+XYiUFVA+LkT+yO0j6sHrjvK8tzCI6z9LWZDTyfUiyqezqmH6iaw62pEmLq6rcW1MqKXSNAUnx16dLr06k2kad0vURyoJRU1afaT28qZ4A3u9bba+Kd1jmJyBObczlyoMzXFdmR/tQe6CuzDObf25+7qJjnEeQ2cm9oPbrzTi0nl/FMSdnLMmXj1XNnqVaxsfYmKV9GY8lDsdpQSfwjpRuOlZonaO2u4ZbJ2+CWIgU51PA178brPrTY7lQschW4Ti6OCD2HSRl30Pswn4fwB75OFE3cTP/AG/8Xcp4nbstwcpZ4jymVZ1tzVFaWnHp2A5Ohu3r0NN/PuZfeWx4oI06hr6fMIA2zXj7buCtVJIfHGGrWpj4EdqgDGV71NDcORudnFu22lKMhIEhFOFW8Q9oY0OdMOVPJnty9rnK+PYAKZXDVdlUnDsryRP9tWuU5Z6zCpGmMRcuanLZhUzTswsyWZjIZQhRUokpBEbcbf1E3fZ5rbf7a13eOjRxtDKLcMtQDM0Mo8Lgio0mvIZYFbLt3p9Y9RWT7Zc3OzrLNG0glQuwbVQRpcRmqipo2pchngkzuPIvL0DmJx3OG38gwuQxyTg3ESHUvW8XGxJcbyyVQkARo9cqE40HGIylJUtPqqGp1LN6cbyvSV3b2UyGNJ6wTOTVi/GIyngzjgGPFchwwyfzM+mVp1TbXVttlm0O62sCXttpIeOfSv8AzAhcgECQDNBxdSxAJw1sNYcfZjJq4zdTFSltZUj8ya8hXcKdkq7gkeSfA9X/ADbhmdZqTj87bXY9QDIKCle/34sI4VuG4TLUF5a1he1C1OKKlHcO+pUfPpL3n806xi1emP8AlwIHqRh78rccxb2vXIaaCt6CtCgPFRTrprp5jrVsu8PbSaGNBiR1Z0xHfwmRFzpXFfWQ4Ma2cppbZT6ajoD2GoOmunxHVkQbt5kdQeWKDvunjBNoIpQ/QY3a2EIiULPhp+D4eQ+A1061veBssYJtZUYzTXGJgXDA36pO5R0Vp8NNB4DrJJio1jGD2eltIOdOOG1BxVEcNOLabDKW0ladBqsbR2JA8Br19kvPDQnljH5WXzDQc/bhv5ljFb+nPSGmUJC0rIVt8PHsdB4k9R03Aq1OJxvfbHaPW/0/VipPn6KzGnvyFaFLK1J3J0G4FWhT5ajol88ShBrnj7aWmmSg+IccR1qrSrV2U8EOI7JBPinxP2bT0Cvmdq1zGLA2yNVCsBR+eHdHz+shJTEXIbLfYBW4diNNVjXt2116W3RtVcOEMq6KKOeJC8T3GQ5TdYjU4pBdyO1yrJ4GOYZQB0stX9j67S7OwnyD2iY7j8ELdlyD8iSANfHqBezLbo/mnTGiF5G46V5AdrMcgMCN5mjFs5RzGSNJIpqBbwhVrlqYmgJ4Znli7Lmi0h41/wCyiJIrmTWOVSY0WqkpkxA3HZjL9diSklUlqQpjRCzpqlIPieqpvJJL9TcqHox/EKEUPCnKgxUbbNF01vqbJcG3DCaMgQyLKp8wBzIZFJ1s5+I9oy44i97ncHqs5kU2QyY7thXWzdPdKEVzY9Ih2TcdyYxGfTqW3g6442lXktJB6auiLVba6mdQAzE1y4V4e7F/7PucgjaydjrCa1NeOVTl28/bXClkXsQ4LsqMQsZxiRCmxqKVaRLE21gLK2mRd82bW2D7bnosTGYJLbYCNNyAfPpw3OCBoBNPEjoz0rQVUHIGntoT78OHp31df3sjCO/nOpWKLWinSc1KZ0IAJ78c7lXmlfQ835jXOMu10aNlljRwm5it8hFdUTXYcFt58pT6izsKySASVdVbvFolvMwiULGMqD7T9eLutL2a7UTTnVMwzNAPZwoPfi5XgnJUyGWYTMgKasmkN+mpRKfX7Fhxs69lFXbTpcRQWIODMbFAGBpp+hxZnjtNJzDEjRXdXKUh2M+2l0tK/JCGPmeCiO3pupQtBHf1AnTw6k/LTBa0yP0GNUO7WcV0GLVQ1DjkVORz+7vpjnb/AHUZ0s+6jCscdSpcjEuFMciSgEntItMkyKUpwJP4Q+1EQsH4EdWn6ewMNpckZtct9gUfpxQfrJcKnUwi1giO1Sh5EMWII/rCh9+BTgK5cuHAUI41U36Kt3bQpOg1107nt1cthbkqCeWOYt3vUVmWvOv14lbgTcuKtsgtNFDiT+InTTwOo8tT00QWxK5jKmK6vb7Q5+n1YsH4+yBX1Fd6j6NzqYSlAaDUltCDrrrqdUH+PWu4sPy2PZiHb7sBIM8w2LgeGq6ns6th18NKJZQQVafi0H8NR1TfUDTwTELWlcdNdDfKXVqrNpqQMSihYrRBDSg2xqsA+A18PMnXpJkvLgkgk4t632+00gqFqRgd8u0lNBx+UtDbf/CVoEhPclPkR8D0f6aubh7tQSeOEzryytIttdsq6cUtcnTW28lRHYaAQhalkkgafOVEHTt5ddPbLGzWepjmRTHBfU0iruBSPhXDU45yCPQ8xcV5BbTI9dUUed43c209/sxEgQLONKlvuk/0ttoJ+J8ANepu9Wkl3sN7awKXmltpEVRxJZSAMQuk90i27qaxvJ5FjhiukdmbgoDVJPdh/wDM3OeE2R5SwjgnDnKfGOVswmZXyTnuReu/k2dWS8hmZHHYr4rqkposfrrGa59I2tPq+ks6ttLU4pwb030puMRstx6jnD3llAI4IUp5cK6BGdR/G5UDUQaVHEilC3V/X23zJe7d04mm2vpjJcTNXVMxcyeFfwqGJ06s6GmlSSTGVqK4hlLY3K2+GpJ0On2/A9PwiLGuKiNyuqpwqR3bRLX06Z0pthWgUwl5aW+/kAFdbRaJXUQK9tMff4iQukHM4VGFGMkBX5o2nVK+5+/d469a3iQHvxmk8smdcatm3CMQtONBEmwSla94ALcFKtWmwoDsZTid5/7CR8eoTxOzak+Ffv8A6PvwYgnRE0SH8xu3s/p44e+F4TYR8Rx9lwL+Sogp+YHvtjtjTv8AYOkqK7i0LTsGHbcdvma9lbOhkY/bjDY48/FWolJGp8tQR5a66duiUN0p51AwFnsJBxBwbPb9y27wxlf6+/URbxlTJbMeW0l4DUaagKB7ajoR1Ts8XUu2mwMrRGtdSmhwx9Eb9c9Fb9HvkUEVwyAjRIKqa+3DS5gziRyPmtnlpgtVzM5xbjUNlIS21qdQNqQBoei/T9nFsu2R7crmQotCx4nAPqe8uuoN6uN4mjWM3EhbQooq15AcsGz2B+3tHP8AziclyiCJXHXEa4N7aR32g5CvcsddW5jdI8hf5ciJHVHXNkp+ZJDLTbidj3SN6wdat0z038hYPp3a/DIpBzSIf4jjsJqEU97EGq4vz+Wb0uj6s6tPUG7Ratk2oq9GFVkuCaxIe1UoZHGfworDS+Lh/wBwLkTBsA9qnKlVl0+dGkcjYrb8Z4dTUSfWyHIcry6tlVVPU0kBBDkt31XS48lIITGbcJB00PNHpjte47l1nZT2SqUtZlnld8kSONgzM55dg/eIx3V6t7xtm09A7jDuLuHvbaS1hSPOSSaZGREjX8RqakfshjnwxBHEOL/ebzLxv7fvbhyP7f6PifBeMJnH9zfcxqzPHrKXaVOGxWvoYdVidc67a1OTWENJakOKU42JO4q9JCgOrMvd66B6f3fdOrNr3OW93K8WZEtfKdQrSnMtI3haNTmOB00A1EVxU23bD6n9SbHs3RO+7PBt207e9s8l58xG7Oluo0rHClXjlYZMSSA1fhBxMykein9wfJqOYyzJei8AVF9WuSGkuuRNl+ipddiOOJUWVKStQUUkH5zr49IFwr//AAthuIyQp3NkahpXwagD2/0YtG2lQ+rlxaOAWGzJItRw/OKEjs7/AG43uZefMhznPH/bb7eX02OeFsDkXOI2r1JxhTu/I+JM1olv+4XkEpbaSfUbUdAPU7t6Ng6btdt20dWdUDTt1fyIT8U7cqD9jv4HnlkZO/8AVF1uW6N0f0mwfdqf8xMM0tUP7RGXmH8K8Qe/hIbi/i3EOCcHer6lKnVx4r9tk2Qyx6trez2WFPzbGe+AXFqVtVsQPlbT8qR8Vfed5v8AqXchLOaAsFjQfCik0CgfeeeGvZdk2/prbTBbg5AtJIc3kalWZjzPdyGQxWNI95PvQm8cWXu9ocG4gPtirbeUuPgFhKvG+V7fB4F6qkl5Cm3beVRQ7VTja1pbUgpQhJ/Kc0Sty316B6Bj3VOhrm5vv83vGKzKE+XWUprCafjK058/2hmBQ7epnqZNscnqTaWe2f5BjkYi3ZpPnXt1k8tpdYPlK9QSFIyA+Fsi0ovfFy3BR7JsryigkOt/9WcXx+gxxDiS1KlQuQRDblt7NT6breNy5Lqhr2CCPHpR9Ntilf1FgsrgV+Smd35gGGtP/mgUD24bvWjqiGy9IL/dLViPn7RIoq5Ei60qcu0RM7HuBxze1+MWQjstobeWSANu1RPYAfz67GkuogxNchj8rHtp2c6QTX78L7OLXUdSXDHcQU6L3EKHceHl1qN3btkCMffkrpc9JBGeH9jMmXHt6x1UZL05uzq4kFmQkLirnTJO0OSW1/K5GjRmnFLB7alOvS5v0iNZmzVtKTq4ZuYQDxU7CSRT34YtkS8iuVvraMy3VtJEyJkQz69QBrlTSjA/1hgVc6Xrc6W7KW6Sx6S7J06jQl0F1KSU/L8qjt+HbpI6fsDtllIJBR0Yr/q5fbxx1Z6i9RR9UbzbfKf9meFHA7PMUHT/AGfhI5UxQb7hs/Vb5mtMd4mPCkqRtCyAsa7VeB8/h1VPV25NeXxQHwDF0dBbSm2bSrEASMM8ErgCnq5UkzX2VOOIcDjOqQpW5W0gtkg7NO/zD4da9lswE88jPC113vcpPysbHSTmP19uL9PZ5QSLF02C2SEK/MToCPlLmjWvbxIbH8+js7CKKRueS/rxTVrA2573bQ0qq6pSOOXBfuH14tGZo3J11DiSYynokNuPDhRGtfWsNqETZoSD2L0x95thOnbRodV/ulxrGgfibP68/sx2D0RsItrj5g/EsCD2aVJoPaxwNqtxmxtbe1VEZYmz7SbZTYzTKGVfVvOkPuPJSkF6WjaEqUrVZAA10AHQOZy5J7fp9WG9n8ep+ZP1n6ccEGBKZWCy4oJJ09BSuw9QKG1tXmEO/hJ8j1GGThjTTXPH26BMfnJXwcadnOneOOHPAcfckxpbO9p5KkpJR+NMhsjYvTTxV4K8j3Hn15001HFCfsPL6foxutpPCGB+n0zxAr36cp00fPuMcZpkNG9w2qu7HKxXrS8iFLyN2OWKqSNfyXlRIweW0D+X6gB0PVldCtLt8bXb6ijkBK5ZLzPaKnI4pH1jkst4eDZgQ08KuXFfhMlKKew0FSO/EuOB8QsfclhGL5Z+tJo6yG1Kxq3upMVyS5mESp2opbWjbWUoekRmF/SSXlkJK2NQST04XnUltsMkiQR+b5tJUQGgiZv8RXPEKWGtQOTYoix9Or/rsRPdym1e1rbzyspYzpGB5DoMqyKh8uQn9kGtThJ5q4C5F47YVZU8Y5fgzPzSbenZdVZQB5m9qhukNNj/ANa3ubPTP0v1htG7yCC5b5fc24I5Glv6j8K9xzxW3qN6QdT9JW739inz/TygkyRKdcf/AF0XxAfvCq4irMnP3moC9bWK1rGJJBtYrSP/AGHUon5rCKhPyHxdbGn4h3sFY1t2p/uCc/3Sef8AVPPsPdijCfm4QTncquX76jl/XUcO0ZcRhZZz6dT4VNxdiS6uFOnsWbOpUlMeyjtpAWhBOqXdPlJ8wOtT7XFcXcd8ygTKrIe9SeHs540pu1xbrPtaSMbJvLkAPASAU1AdtMicEDiXIbi/z6jyNuQ8bahg2qXCNU/UxpkURkpWOwWn/mFpIPY7R0B6ksoLPaJLUAeRK6U7iDX9AOH308vp906nivHqbqCOTPtDLpH2MRg9/olnyBmdBhlUlcR/IbVEaY4kaivhDdJspwSSE7I0JpxYB01UAnz6Qbi6i2vbpdymzWFKgftNwUe80x0h03s1zv8AvlvskdQs0mbfsoPE57KqoNK8chzw++T667z+xj1WCxrKt404leNTirlQpbKXbinPpWGSB1hwPSZf1aFBt8blBIKwdXF6h9ie02yIzbqyPvV+NcgbjofNY8xQCnFcs8uQxY/Ws+7blOtt05DMnTO0t5cTRVA82PJ5RQ1YqwIVhXgWrVjg849luKc91EnhPlN9qFn0CMJOMZC0W48uxLUbezZ1jhCUt28Vv/2Kjfheb3KCSjeltUu7C/6VuF6i2QFtrdqSRnMLn8LDmp/C3I5caVsLa952f1CsJOk+oqLu6KCjigLeGokjNMpF/GnAipoVLBcXF2AZlYcj5fg+XzhSf2RRU7dc5Uo3NzMWt3rWFQSK5UpC2d8l6lkOSFKSpTbzYR303dBd2mslsY90gBlFzKxbVTOQULhqZ0UMoA5g1rg503tO5x3dxtW5eXElsiJD5VaeQdQjYA1ox0ktxowp34Gtj7WZlhzFJwo2NnfUEeniW869tX0Jag01vKmtKizmY4a/UrqU/BeAUdiCgBRHYjpgk6js59hTcbhAkgkKLElQGdAM65kIARkP1YUp+hdwbqRtls5G/hRiWWW4cgyBXZx5agAAyEofGcgCDTIjBxyK/osdq2eFuIGolNTVTaoGQW9cltpLB7pk18NxkJC576yfqHh3bJKR8+uwNY2Usr/xzeQWkbONDw7iRyA/COfE97TuW5223QL0t00BHHGNMjj8PaoPEyN+Jq1Brnq4IeFwGq4yeNpj6RT3YU9j8hatTTZK2lTsdxhRWC21OUChxKT8yjp/WrqXfTNJTdox+fHk4/bj4EHtI4j/AEY0bKiNG2wXB/5SUExn9iQZ5e3mO32nBawp5udWPVMuN/7MGPWhS9xBfbkpKmHmka6p+RQ7fd0Ivw0conRvysiOynEYmW9tGYWs3jBmoVY865g0wxs2xluDHSl0NOhRWFSWlh1sua/MhTqe6Xm/h46+HRSyulukKsPCR8JHLtp39uK/3zpbyyGotT+NTXPsqM69oxAXnPizFspr5htobTzIiyW5r6G20y3oj7S2nQw+QlxmwjoVq08gpcTp49LO6wHb5xPHUc1I4+zDJ0nHdkfwaUC4sphQq4GkGtQTXI078cp3IkWB7aue7+t5Vof1XH+NY1xyHjtdOca+g5HhVzP6hgbMh8Fbi4thaus/WBSRv+mWCdFHoJ1rZXW8dG3FrtszxXF4FgcqrK8SyGkjqxGlqLUVUkrXMYtLYmgTdoQJlfy7kRBSGUNKMlVKgKwB7DlStMInsl5F5Bc4t/c357y2fKssnynjDJJOUXhTtFlZXFXOs2kqdWdIcFuYfShNJISiM0htI2oHU7092X/LvTG8/Lx6LK227QigZKgSig5ceFTzatcc1fzjbntu5eqHpn0ZEwVrjqu1iADcVSdfNYAV8wVDFj7DirvJq2zrE8GWszJIVDLyThijVQWrb7rz1OKW0uIbc0qjJW6mWuxfQGgnVSPxHsOqa6lmC2UEMtubiKHboNUeX5ofWSudAQVBrXjmMdtdIXNgllvlxaNJ58u+3RYaaaRGITUc6k0y5DPHSt+1TnNVnDnu4yRuPOgSHX+H4VvIvJCDNlza/jNVXazpbpIQoSp0BbynFHVRcJJ6tz+UywXp30yuNqnaHyrW/uH1r4Y1hneWZFzppSJWVAO6g5Y/IX/7qnLu249VdBvEkkt1exuI4YlZ3eQXEKAIFBZ2krkoHsxBLhb2WG6VPvuRYbTbTWf5re1WOVstTrEyFYXVgIarqUzoXA8wUupYaOm1Q3HpPttgm3G0khm1R2nz00qup8TqzMAF7ARnq7OGP1ch3E2UFnLOWS5OzWULwkUdXSCPV5h/AVaqlfiqDwxY1ieGN4emvg0MONRR4K2CzWVEVEUraGiVNqYhpS44FtkghaiSD3PTHZbRabbGIdvhWKLuGZ7SWNSSe84jNevL8ZopzoMvt4n3k4J//S6MmWtS68oYlFqTCckrkvS/ppI9RLTUJtanlmOrcg6AnRHUqa3eKjNXSxyxHa4EqhQfGOOChjvC1HH+gyrLg89X43M9OqxGymWjU/Mp4gOXdDT/AE2qq+oobaShTS5b7qVqQHGmm1OKTpMsRCn58pFAfh5k8vYO0n3YHXWtjoUH28sKrGXYTn8xS+U8mh2We1CZU3F8luXJsOhyrG77171FLdOMsyYlbcYld2L7TQlFuP8AR7WQtAYZBJ0S+TSxpIvA8iDnTuIP2YgajbtXtwuw+UUce18eHxzeU1vcTbmLPyyTQsPyqJzHqx9Ah4Wy9JhwjZtW0RtlFzJS2hp0x2kMqVtcdX8EqWChFoz1qezLgM/t+zH3Q1wdQwnqpMZtp0q0icjVVdidgtuY9DyFVtZ8g0LASoroEVf0brNvPhkluNKZf+neBDjrjG5aUaTHbSSecJFEZNaH4h3U593L2Y2gyqnl0Nfswry8ygZgifjV6tzGMXiKhL4xn2fr2jGFO1kNmr+jsm4jD7/0GVQI7a7N+M045+oIRJ2KSHEnd8xFd1g+FctJ7KZZ07Rx78ajG0NHOG6uxq8CctraLmFVkPIE2JJq8ZGMzJcyLicezjqiWGRybZUWIym6TXOux69mOpxyOt5Uham1ttJV9WFbFS9QXOQpy7TXtpwGPhlFwQByw45MiuydDmcRbWDByOamO5mWNSUyGJE6/dcaYnZFjzqI64c6HePL+rksKcbejSFO7Uqa2K6g3rQ3CecpAcDNede0dteJHLPliXbB4m0kGh5/rw66x1f0SFJJC1SgdEk9lJa1J7aePn1UfqKP+RgI5Sn+7i0Ogf8AtlxX/oh/ewSqGbJQtCVfmdh2I8+2ncePbqovMkyA4fbizmWNhnkcHelccMVrQqSpxxSu/wAvZtAQk/DTc4eicUZZBWoqfp9+BsmTmnADDiCHVKCVpC92up/yj4n4AdapkoMxX78fY5QMq5YJeC0sK0s2WZLnoRtNTu7FR1A1V4ADTy8uiex7RBe3em5OlcDd1v5raAtH4nw+8ljxKqX9JAkJfS0gkEEEFodjqe+pSf8ADqfv9nb2Unk2pBA+7+jEDa7iW4j8y4FCT9uBpdYbU5IVPyG1RZ5A0sI20LVoNEiQ2RsfQPt0XoNAoDoDby3EBqviTs/V9PdglcQwyjPJ+0YHUrim/YcV9FLr5rf9BUtyK8oeRLS0ONJ/+KHost7EQPMUq3Pn+rAtrSUGikEfVjSTxvlm9Kvo4ZcRu9Nz62L6iCToS25u3t7gO+hGvWRvrTUH/GOB05jtoeI9xx75S5AIp4DyrkfaOB9+BLmmI5U1Y5RBMGN6SY+AGwd/UI6fRE13JnYu1IX+cX3Y5QdPwgEnpU3rd7Om5xNXy1jsC5ocvMefT7SSKdw44O7btUxk26QkCUvehB+0VjiDZ8tINc+JoBgc1nH9uhzexGr46l9lOl9GqtwBJUWkuL0OvfQdJG37jtUZLQglu5c/rNMNtxYX8i6JT4e85YkngXGbKkoesn37FaAlRiQWnUsDTTs++PzloI8UgNn7enjb9yklipbIR38T9mWFm8sIonrcMOPDgPtxJyuYbiQ0NsxxFbZRsbYS16DbaU+CUoKUpAGv+3o3BGzr5jVL864GzOoIjUjT3f0YTH5vofVPBepbT6aO/wAvrOhQSQfP00BR+GoHUhDo1PzAy9p+hONRXUVTkePsFPvywyXpw3OLUsaDtqdB5Dw+/qCripJxMZCwCqM8abdhqv5fHTXU+A8NQOtiyZZYxMNK6uGFhmwXFQHlkes6n8lGvdlo9i+of+sc0Ib+A+b4dFrVzEodv8Vhl3D9r2nl9fZgfKvnEov+GOPeez2Difqxtsy0LCz+EaA6q7eHkAfj0QhqeBoMRJA4NDxxsNXDUZYU0dVgglSToR9oP+zorb3SwmgqTiBLAXBLZDBDxLKZNfISpo+pDfUDIhlX/Luanu6yO/0skDzHyK8D01WK3XxhG8s8Qcge8V4H78LV61sfCZAWHMZn304jEgI0mPNjtSmFb2ne4J7LbV31QsD8C0HsR0xIFAoeGARY4yLT2Kh3AICdRopJHmk9vA9fWCfhx8qcse2JDiNUgpX/AElDg+YpO46BR7H5lk69+vIQOOPjVY1x+UG3DoNUPLWlOmh9LQjc4sp7DQkkDv8ADr4aHPmMfVyPdhDyLG4N9XuV1rBasoayCNyEOlvuk7m96FltSgQCACNDp1iVrThjMtllzwJ04SjGIlpV0NNTTMZtVh6zxe0gN2NBKkICUiQqtdAQ1IPpp3LRoVlKSrXQaFre8hRAkoIpwINCP9PYajuwHmgnViYqMrcQRUe3CdFhZPVw1Q8Lp8b49iOPNyJKcQpIkB6Ypk6t/VurQ6XW0nUbdo1BIOoJHUhr+yVauDIa8GOQ9wAHvpiP5F4Toi0xgUrpGZz5k1yw1Msw/Fs2STydxTi+XS2y2s5BBacx3IFut6bXHbCsSh517tqNqkDXy79ZrJZXKkk6YwODAOB2AV8X+1jFhcRONcdZD+JSVPtJGKsPdP8Atf8A7fmVVORcn3Ptz5vusjcRZS7k8cZp9OpovQigWuQU6LqpmXNRGLSA85DS7MaZBO9pOqwE3DbbT5W5eyKreSwPGHVWfyywoH8syqGCkA6a5jLKtQw7dvMy3lql8JJbKG4jkaMsE8wIc08wIzLqUkaqVBNc6UMa3/cBX+zr2P8AH2TcDe3rFuPrfE6vgrEKKDkd4nlS0qcgssxx7EplrSX1lNYhSbN21nuykLdeW0hSwl1opCmjZ213N3tXota31zci5u7Tb0jkVCBGZJAsMgYLnUanYA6SCQaAZY/PG86k2Ld//uju+9DbV0620rum5PdJdzVNzJaQB7uF4GdGJgm8uON2EsgZFKE6/EOYHnyu51589/8AyfyDnGVTsmyil5ZssothlVwz6lHR12W1tQjHaFppDVUpEMz2EtR4TbTBSolI1165sst5p1DC0ikSTbki+EfExIAJpwAA058AoAyAGP1V63tJJOk/IurmWaGPZWVFkLH5eIKzLEhbIJUmQafDVyT4i1T3jtFl/LX7OXIHH+PwJlyzgfumy2w/RGW0PIkzpmSxk1jYHZaZUp7MXmWkggLIP2dWXHDez+kk6qKi33V8xmK+ZqGfaQzU9hxwzf3207T/APdC4bcSAT7v0HZOgY0LKkUkUtORoYI2bmKjAgw67nc5cdcbZC3FdyDlqpq5/HfKcdlhc6/tbTCH112K5DaQ2mVvKsLLFGWESZOv/MPMKWo7telDpdQPP21Ify1JlU6fyyzAF4waUDg5lP2W1UpjtTrBbTqTbbfqLqLddq2bbdrtflrm8vphEJIkcm10x18y5mEZ8pVQEnQKkYNXt9plXmOZBnFhZx7msrLldZjmIszZVnRTTVuqh29zYxpaRCmPU1mj0UsttqbRtUo+HTzb2T7xtm3RKkp+fuXhgVgWgZ4z+YsTfACp4jLVxAxXWi3sL5TZyXEmzvGTHJKhiMw4h1hq2iNh4hrIahB54u19vWGTOT8jgvQ3HIGDQ6ilnvSEpS2CJEVC1UzAb0aC2XNQ5s0A8NB11HtOxbR6WdLfKRxxnqedmElAMiMtXs/ZGKY9V/VW26F2yMSuJd6nH5Kdg/6QjsHLFzuFqpMZqG4VcyiM1EVHDIQhOpTtDatNB3K9pBHn1V+4NNfXHmzkszg1zzxy3t/qRdX8U+47pM7XQnRhXsYUNPbQgj2YItZclLwkx5iIqEObnHkqO1hxw6iMkD/irQnxSNelXdtxs7L/AJZh5tyBQjkPae37cXv6d9P9S9RRJu9uws9pfxCRgSWqfhjTLUF4FiQlahSxBAOuFZBic8zXMpksGSyzrEkekppLuqSFfKnXa6R9o116U23W6r+XRB3Cv31x0NBsFnGqmYvJKB8ROmp9iaR9+IG+872YcE+7rGXI95JmcdZzUJfcxHk/HmWHbOvWTu/TsiiN7f17GpikhLzK/wA5tKtyFajTqQu9bvL4PMrlT4V/VjbPtO1uEM0Yby31KSTUGteNakZcDUHsxSPitf7jvbHlVVi+a8b8szMu4k2f9PPcTguFWnJ3C2S4clxyC/A5OyHHWZ7uM0VxXPJrnZFr9NHaaKFPL9RGvUW4sLq5ZpLhW1SACoFAdJqvw0AKnMNxHCtMOE3Xu73W32u1yzxyWtgaxVjj8xMqHxldbAjJgzMGAAINBiT+H8wcb5zOs7vAX1t4xNnyFVzL21CosltXp3MBj5vzYMC5D7TDndK2kggkdGG6m6hiAUzZAAZonLLPw5+3FWN0F0fczPcG1pJI7O1JJAKsSxoA4CipqAAAOQGJE4pl70aTGMBx55biwEJZSp1bh1/ClKQVKUB5AdaT1TvTZSSKy9hRf0AHGxPT3paPOGF0btEshP8AtMR9mJ04dmH67XCnuW3Ys1LQ2Ny2lsOqG0kLCXQklJOncdbLbf8A80NcKFPatae8Zn6j7sRr7o3TAUspC6gfC9K+5gAK+0D24jDzRCbrpbrzaAkqKwSANBp31P39Wtsd4s8KkEFaZd+Oc+rdqNncuJEKyA0IIoR7cRXcyNIdLSXhuBIUe3kdAPsA6ZB2kZYQdOZApjVj3zbD5deUA2Faq1VtKiT4a/6upGolaLgZKio2fPCg9m0ZTTbKF/iQkAajsNB8CdSeozK3vxvjKVpywzczyVKMdktKcUkrSpad3bQhJHbTwHbqOqsZM8TH8sxEIQT9uKXvcRla1vzIoWdCpeiiSfnJOg/9LoqreHRz5YGwIRMX5VxEKnfmykqQhbinSFBB7n5j/SR8OsDEz5czhoW5SABvw5f04emMcWZHmVr6E6VIrMcro6rfJrpKFAV1OwpIdQxqQFz5zhDEdA7qcX8B1olsZIwqBdU7tpRe0nt7ABmTyGJI3iMK8xYLFGpZifwgc+8ngo5nLF0PsNxjCKKPlXKucR58OvvKew4t46/Sh6j2E4vA9IXMuINdyn7l9wMyX0/mO6uknQgBP6tsL23EdltjI91G6yyauEjch/Z4gcOGKh371A6Fu+rV6L9QRfW/TE+3uZZbTOaCefKCR14sBGGLBc1JBwYuUMlp7KXFw7AbORYV8eV+qXOTPtJYeWGxsgVLLjqVLRCjAlTiewUrt5dAdt225kk/iG7RhJT4I4ga8eLEcCW4Duw5WFh0Xb7fF0X6a3b3/T0Tm83DcpohGWWMfk20bMNaRQCruVI1SaQKgUwZeOrOl5DwAVyZDVlI48smKaVIjNpYQ/XSFOWcBxgDcEtoktSWhr3Ciknx06ITxy7XvCSBVSOdCpFKDUvd25jPngzsDh9uuHsvPVFDSW5mfzJDG3AsaCgfMrGc0UivHBFzzj9WTx4OQ45m+WYNMPpNzZNG8061OZIUpqTPq5GsZ+Yyy4Wi4jYVgJCgenS3LJA7SKkklRkeAyFKd1RiD0NeXEmqzuXKqJjG+nwsgao1oQQamgBz4kduOXP3wcIS+GudL5mBJl2VPbpjZPTX0tgRpFqiUVCbIfbQS2zLExtSloBOm8dIW+bf80JJioVnJNBmARkw+4j347A2C/8AIjitS5fygq1ORKn4WI5VzB7xiQvtH5qrq2fiwv5qpCEz4zLcMKDkx1YWgDYjX5GgRqpa9EpHfXqrjCbe5CyZDVz7cWNMxexkeIeLTjsh44yTiqz4mrLSK7XmxnVzDilMrZcbbUloepHCtdVFDnif6iejLduWkYXYjG0dUA0U+zv78cwv7xHCaKzk7DfcrjqJMukzGJD43zBtIU63V3dL9fOxKY2PBiLaQnZUVY8PXaa81dWR6fbhFMsu15eZGxdacwaBveKA+wnFHer+0XEEdvv5JMbKIHr+Gmpoj7DVlPfpxXZx7aTC2qO3GcJStt9vdqnwICtAPgB36vGwj1LnwxynvMw8zVXuxLjGGrR1cdxtBQHEo7ISpWh018fDw6Y4AoGeK/3B2ZiRwP0yxKLEYlyw5WynHnwhtBQQNQCWnSpI8/whzqS5jKEZYXwZVl1VNAcWUcWcrLoIERh6UvcEoCypfkkJ+J076dVtvmzC6kLIBQ4ujo/qWTbo1jck0piT1X7iYmmhlJ+T5QN+uumnhoe/SVJ0o7GoXFvQeokaChcVGGvyjzczYUn0/wBUFKW2SRv81Dz7+A6PdPdMmG41laAYUOsuuxeWZiVqkjFaGWXKbLIJ0ndqlhohJ17b1gIHn8Vnq7bGLybZU5k/djlbdrg3N48h4YYbz6JDh7BQ100+wHo5ADhWudVCAcbDLSAPlSE+PfQf4dEo1rxwFl40HLG0A2gd9D5d/Lt1MXSo454ilJWOVcazkgb0hGmuoGg7a/yPc69ZM4A7sbYbZmYE4dtDjztq56rqViKwgvSSNdVISQEtJPj6j7hCB9/2dBrq9WGoB8RNB+v3ccMNnt8ki1p4eJ/V78Kk/BLGctx9bS9zp3aBKtqABolCRppsbQAkfYOoQ3OFBpBFBiadrlYlypqfbliwiNx1BZoICGENJLcBhJT27bWk+Hl49UBa7zNRdVaY6s3PpW182RkpXUcR/wA1xhqC46ClGhJ/h4+B/h05bffNKoOeK83LaYoZCGpTADsUIivKRqlI18NdPP8A2dMEUjOK4XpbRIzyphvXM5LNY+rcCQhWiu3iBrr8PA9S4CxkxEe2QmmOjX9vbi1njH2wYG67HS1ecgR1cjX7ugDr0jKENSqpDw0BQ5Cx1EOOpJ8FNHz16499Vt8feus7rxVt7U+Qg7BHUNT2vqb34/Tj0Q6Xi6X9OLCEKFurtPmpTzLTAMte9YvLQ/1cR+h481y1+5/mNZzJIctIfBPGmM5t7dMOcbAx5pi/EKHledS4rq1JmZHW5Efp2HtpSgJbV8rkds9MT3R2T0ggm2EBJNxu5Ir2X8dUqY4geSMmZHtHBzgLHZrv3rfcw9RsZI9qsIp9uh/3YEtFmuGB+KVJPAp4AaTkyKcWveH2AdUti9cUMe8Xni7wz3l5RK4guFzrB/gqh4rzK9xuvk5DP47sLPMbO5tJrTFbGlEX8WjZZ9NlRTot1KiQpGnXTPQPTVtf9AQx79GFiG5PcxJIwQTqsSooqxHgLk1PdTgcci+p/WG47R6ozN0u5edtmjs55I0aVrVnuGkZ9KA/mCMLpU0zYE5imCrwJ7rva57a8CaxvFePeerWdYPqs8vzKZxvPm5Fl+SSPmnW15LXJEpTzjq1em2rRtpB0Tr3UV/qfovrHqrcTd3l1tqQqNMUQnASJBwVRSntPEn6sOPSHXvQvSG0rY7fabw8jHVLM9q5lmkPxPI1akk1oOAGQw/8s/cLlcp1ieOPbRxDyna8tZm/+g0cvOcIlUGJ40zLadE7J72dIdeYVCp46S56atErVoCdPlUJsPTKPaJ/4p1Ze2ibLANbiKUPI9KURQKGrHKoz/Qa3H1Ym3qD+E9EbffS9Q3B0RmeBo4YgQdUsjGo0oM6czQdx1ML/bRuYWDY/wAZ5t7i8+tuKy9Eucz4ppYsKnxq4uzLTbWUGvmtETK3GJdtq4YqGwSSVpUh0haZO4erlu+4y7tt21WybzQpFcuS0ipTSpYcGkC5aie41GRGbX6Gzw7RDsG6b5fS9Oag81mgRIXfVrZVYDUkLPn5Y9oIbMJHv0mQ8nyfjPg2gaaZocHhN5La10TYiHGlPRl02MwUstgJaXAq0ylbPJuQ2QNNOinpTbva2l51Ndkm5uG8tWPEgHVI1T+02kV7VOED+ZTdI7p9u6IsqC3hHnyKvAGhjhWg4aV8w07GU9mI203D8aLscMLd2/yE/N9g0P8APp7ut/ZqqGocc8bf0kC2pkyx+veN4wCt0f0yQR3R2Gg7K00179a7XepO2uJd90vHT4SrEYCXIGB/oeAX1vDSHJcC7prJkNaJkFmHHmfUtsudlJKkO6keB2jXrVe7yX3ywikNI3SVM+GolNJP1U9+GDpTpAS9L77NCoe8tzazoB8RWMyiRVPEVRicuajFb/uduoDmFqexuxrYdvIq5hbVaz2ayseZiwn7FUT6+SpDLMqHEZcUtSvl10R+IjVk6gRrfaWlj/xytWJoBwpWp4H9WE/pbqBIN1kvd3R12OOZUiWJHmlGtwiqsaAu4Z2WlMxqJNFFcc2Uy9nZVkytzpUmRKUoqSsKCwtzcCFJ7HXXt1zSWN7eEnME/ppjuu5B2nb9EgKSKoyPsy9o9mLfPbNgjr2PxHEslK1emwVLT853BKUqOvmNdR082kSxW4HIccc39RXj3W4O9Sc6DPKpNP6cdHPtQwRmkxuK44GI7bhZMiTKcbjRo8dhv1XH5Ml9TbEdhpCCpSlqSkAePQzdptFvpFS1K0GZ7P04O9AbSl7vvmghY9SoSeAVRXM8BkKnBNwvm7j3nfkZ+r4TzBWTY7xPlrsDkvKq2teONWVrUx2pMHGsVyI7GLByLdOEz3Gtza0oQhJ+RWqTeQywkPcDSzCiqezmT2d2OuNolgEYjtmDMT4j3Dhn38T7hhO5Ofi4nn9yFOtwIlu3GyKGFOojtIatW/VlI3qUhIQ1MS4kA/5eossWh9S1KkA/T7sbbtRFKwNAnGpwg/3u2xVquo1TkmSxERZUxpOMUsm2cltw47z6mo6mwlClyPQ9No66LdUkA99etMlpMGIkARhyY0I9o41+3EG3vLfzQqmo1UrQ6Qe89mKxOV/3gJmPv3mP43wryfx082PomL3NaE12UyVKUphSq6qltmJWPrWR6bu51xKv6SSOp9lFtVvVr03E0mVFRQFJGZDVzIHdywF3QdSXFbXYPlLeAEgyu7O9OTRqo0gjsbEM+Dc8b9zvuLwfhinbyr9f5Eu59nl1hdS3pNtS0Vc0bTLZ1pNaUsfqKIqks7l6bXpCB2Pbpys+ptukj8qKJ1mVchTw1HDhwGKt3X096gtHFzf3MEhkkAZ1J1EtmTRvETQE9xx14YPEg4XSUeLYvDZq8fx+tiVVXBjJDTMeFDbSyy2hI/qKU7lKPdSiSTr0GkMkrF3qZGNScOdnbWtrbLbQj8pBQU4+08MyefHBlj5NJaRHecVot9CkrHyFDiQVNq9RCiUuIeRpqkgg99R1G8kklQMhmPp92CnlEqsig+IGo5EcMxwoRxB44hf7hfbVHyZMzP8AiOI3X5XH1n2+GRSlmvyH0SXnZWPoB0rcgbAKgwPypH9Oiu3Vk9KdeXdgV27enMlj8KyHNkHY1fiTlU5j2Y5x9UfQbbN6ik37o6EW+9CrvbqNKS0zLRDgkvOg8L9gOIXVVTHzSGqVDi+nZw5akXdcWlNuJfB9JyYmMR6jRDySmS3oC04CfAjq44tyjgjAZqwsPA1cu3ST/dPMZcRjiu92KeS/dkjIu1FJEoQag01heIzycfhOfA4lxwZxfBhX81xtMhwv0jqI6y0EBDynI7jrbw1/oWkhCh46n49V51dv00lmqHSNMoJz4ihoR+kYvv0o6Oii3hpfEQ9qQMqUaqkhvZwBxIDDqR7HLjkvMGENIscZ47u01Lrg7M21loxDcSVDRKypgp3fBR8uq93W8W9hs7B6+TNdpq71XM/fjqjovZX2ibcd3QAXEFi4SvJ34H61p7CcLsf2/YWxFrER/cG7ViDBYajtRptOz6CtvqOvH/mwVPvOLJWpWu49Qm6s3EvJq2kSa3JJIc17Bw4Dlhgm9OdmkENN5ni8mMKoWSNacy3ezHNiePswj5D7WuOslu4WT/8AtQTtZewnYkti1p5FDFkIsIRSqNZsrEhYYmIWkKJRogqGu3rba9a7vaW7Wf8ACtdq1QVYORpPFeGY9uffjC49N+n59wTdE3d49wXSdaNEpLLwfLg3bSg7sSFRxVlWSS6rJ6jnmyetqiIupTkNPRYw47OiKCVPwbVDLC62cj1khxKVtasuElvZuVqqnfLGzjeyn2tBBI2rQ7yZHkVr4lyyyOY41oMPP8B3C7mjvo91lM8SlA6Rw+IGlVcBSrZgEVHhOa0qa6r/ABfd4pAu2rjn6fXzcwcd/Ub60rsdhW81fohhLUOUsMoiNRI2iWmoyW0M6lSUhalKP0bzb30kbW+2K0duPCis5Vc61IzqScyWqTwJoAMZrstzYxSpPucgluCdUjLGHJpTI0AUKMlCgBeIFSSWNRcL4NQsfTReZKx1PclTv6T6pWe5cLjc5Cisnz6n3G/bjcNrewkB/tU+rTgZbdLbRbJoW8Qj+xX6weONHKeOqGpr5GT1/L0SxtcbaVd1VY2uqQZs2s/5yLEBRKU4fqHWQk6AnQ9b7LdbmeUWctgyQSnQzeLINkTw5DPGNxsNnaIb2G91TwjWq1TMrmB250pglw5bNbyLZKR/7D3LNbcxUtDRCv1CA2467p2/4khKlfaehjRtNtSV+KPUhr+6xAH1Ymz0i3hmX4ZFVx7xT7xhqZJa+oTGecbahSHiyGddFrWpehKkaFTalalQcP4SPt6IQxaQHSplA4/Tj7MBIhJcMYZhW3FTppxPbX6HEf8AN366o9WM9UUmTRm95YlSEyUqkNK1AMlCXAn1AdRppodOlvq/a7nctrM0N1cW7g5qumgrkQKioGPDp+SeQ6Li4gDfs0y7CuWOeD9zb2wW/LNhieb1pjtwbYIxTJ4KY8RhLrVRYG3xsRpSW0zW24TPqxVxw4Wn2l6LSfEPXQVnPu2wjpq6uZH262Xzo0fSVElAJJMxq1NTxZ0PZiH1fubdMfJbgGkeCwZjpLNQtMAjuV4F3XPWc0zIxAf2v45Dl+z79z/HLN84/TyrTGMbcVNjPRU0uPxKKbBeddYCS5o2zq6AnXULBHY9A+oZdz270y6yuNrt3lv4dvYwxjLzSBVQCcvEwHHgTjkT+Yfdpm/mi9DJNvRZZv4ncyquoUeXUCiajkAMlr3duK6eYcFuoF5wBI4xT+sYy/TwMd4WrKmH+rZdlNrHl2MTIZjNY+haRVwpZS4C+hLOqxqdRqOc7Dpfet29Ntn3nq9RHul/tEEcsVSNTI0hkA05roDBSwOZoBjvr0u6+nt9037YN0ip1JabpcTX2nOCBblIfKTUf8R5AuSr2Fjli9P2xcIZHhPGbmPz0usZPnNfRSuYGqJ8M1FxZUIkoqq5U5O1KYsGPJKZimzslSNyUnYOn7onpfeNs2+4tLiR02i7mSRbWlKeWulDMRm2WYj+EZaqnG7qe26V3fqrbutr6xt5urtohmisbiRQxs0nYNI8KHwpOwAAkoWiWuihzxNbD+N5iVwq5hj6ht1SYrEeoZ9JpqW6CYUVyatKWtH9qm0aHQrAA79WVDsslNc3hWnPsHd3YFT7whY6SXmJ48ak9p5k9vbguo4/paGJEyewsYNfBafqFTYdafXtZtPZPuRZMlh5e51qwjSGXELaCSUKQrXQ7dZDWlrbkoRV6HM5Co+8Htx8ivLm4ppNEqPbQ/pHCmHRByPH6hyUcbr4zpoVokxLW1UtciUhmU27JlQ92k9aH4j7zLyQElbbSVpAUT0tbxMkkSqtKhq5dhFPp7cH7KCSNmZq6SKYFHI2YvXLsp6zlfVJmIcisrSEw69sesh+O99OzouQ7FeYQUOHQ/InsO+oSGrOFxMm8KFueJbYlC9t3tV9pHH/ALledeJmuab3lLII9SYiKiguWMfReR7uZXwqyryOVGqITEatpFJlPDdKdkOEE+nohtlt7e4ubg2Vo4jCRhiaZmtP14B3E9vaWwvLlDIXagHZx7fZhr+8Thjjjjaw4p5B4pp/7RxLmfHn7iRhTAbjRKec1FqbBl2FGaW43CZlxbcIdjskMNOM6tjRZ0hXAeSJ1loZonKkjg3fiTGEikRoso5FrTsxF+LXV8ZaXHUp1JBI0GpPbQbNe2v26noJrbhgroXjiTHtO4nxXnbmf+28tBkYri+OTcrsadl5xhV4qJLgwodc+4ytt9MIuTPVe2KBV6YR4LPRa1byLZrhaGSoA7iefu/VgdMgnuRC2SUJPfTliQ9bXe1f3iYVz5S8a8Np4oy7gZpTETKmamholyrRpF8IW5dDKd/UapxzHHEPtzE7g26FN7XPmSWura8spIVu5FljnNKUzU5cPr9mB9tcWl6svy0Zjkh59vHs9nPEBsQqTPq2JC29q/TTqojz01AGn4jp5eOvS3dL5cpUcMG7U64w2CEyz9JHYb7jc86r7TtShJ3eWn+HVX+oEqmO2g/FV291AB+n6sWT0JERJcTfhoi+01J/QPrw/sXalzpLbEKLIlyApKfTjMuPKB8AFbEnTU/Hqs0gBeoGWLCklVFJcgDtJpiYmMcdZPJiRS9ARCT9OnVU11DS9VkuL3N93BpuHR5LVggrkNI/Xhcn3O2V2o2o15YdbuASI6PTXe1LDn9SdVKJUPIlSknanqFJHpNAV1Y+R7gr0PluRhx1+H2kdhC4NlXPkJ7lpakbj/3jqBr0Qt1mVA0bLw5YhzXkTuRIrAd+GnZR7mrnl+cw8lvdp9Ru9aO5r2KS4nVI3D49BrlpRc63JOftH044L2z200OiMivZwON1iUptSFIUCy6kLRr3AB7FHx1Qo9+voQoapnGcx3f6MZqRKpDf4inP6d+F+PMQobXEAgnVKk+Xl307jqXHQijjI+/EZwwOXHH11aNSW1aa+Op0/wBfh1pkgQmuNiSuBQ8MA3Pfo2bDIpT0BM0PwcOVPUZq4qlCrVkq6tuMEhQSv1HHUqPfRK9fIdI2+WqRxbpIU1h4dvqdVPgmuNIHfUmvtwwbdcSNdbdGJCjLJeaQFBHjiiDk91ACB2imNLE4vGFkhuVKbzGs27XF1ilNTWV9h8jVgygEpV9o179Btj2vb5QHdJ0b9nJh7iMFN0vd8hrGrWz/AL2ake1TzwfIGX/Sw0xcchsY/VMpCWG2221zXwBoHZL69VFah4jU9WbZn5eERwARRAcBxPtPbhJnsvPl13jGac8anwj2DG+1m1iGl/XmLYR9NVtzI7ZUR4EhxsJWgnolHfPopJRk7CMR5NtgJ/K1I/apPH2cMIti3QX6hCqZJorfYXv0+Yv1IUl59AV6cd7XUKQjQaeI16j3TRuRFD4HpUg8Kn9Qp9uMo3urUGWcebBWmoZMAOZGA5YxLKrmuQbZlxh/XeAru263r2cYWPldR28uhSxyK+hxRq1wXSWKdBLCQVwoQGt4MhberDRCEIJ2iQ+ACGQfHYlPzOHxCe3iR0ZtIQ41uKxg8O09ns5t2D24gXcpU+WrUc/YO2n2Dv7hj5JmBhxbj6/WfWSogDtr/wBkeCUgdgPIdSCyoxZzqkP0+rGsU0AIKR/Tj34S3bZ55W0KCU+GgOnl5nz6m2EFzuFwLeEHU31AcyewD+gZ0xCvJ4bOFriU0UfWTyA7z/Tj01OcToCrXU+fhpoD2108OrQ27aLbb0FBruObkZ+7sH29pOK8v90ub5zqOmHko/T2n7OwYcdDkE1VnFqYDKpcyYtaI0YOstFRbZdfdHqPuNMgBltSjqoeHbv4n7a3a4bSKYCTzeV7cEKg5PerH0ONzax+EohMqOnIKJaVgH/iNFNiUpebHgddD59E22iQfs/6y/rxBG4qeeC0zyLDfjpfjyWn2HAVIdakMONnxBSHEOqbOw9joToR18G0yHho/wBZf14+ncF78b1XmrM4r2aONNuNsuuNutvBtx8OFpLmxxezeWlaE6A6HrCbapIk1GmQrxH68fYtwWRqDhh3TLduHFEhSjt2a7T3BHiddO+mnQ6OIySeWOOJ0koRNfLDQVniY0hcVSyw62drjC3UKKV6ApHphZWghOnbt4aEdFhs8pWo009o/XgedxUNpPHHpzNoj6t0gFsd9HEHQbTp4kaHXVRPh4adfP4Q5yOin9Zf149/EaZiuExvL2H5BaYUH0JHbY40HQAC49qlKtVemkEq7eA18NevkuwyBKinDtHt7fZjCLdFaUiorXtHIdmHi16UyOgltK0q2qPy6KJWnXXXTRXbw+PQW4ge3AhzrWp9v9AywVt5RMPNyz4ez+nGEwGUKS402ltxtYKVD5XEkeBSfHXTqKrSLzOJJCPxAxVz75/YRUcxYLkcnjqJY1kayyvC+Qc/42xZqM2q9ueP8sqsyiZpx7Bd2RIOYh+p22NYj04t3HUrQJkBBVCvP4gljcWe3TPHBdaPOjrVJCjBlOngGqKahmRjUNn6avOodt6l3uxgud92gTLZXRFJ7ZZ0McsaSDMwuCSYnqgbxKASa8l+V8eZ1ivPWWSckxOHMk57zNjEuZlS3nmoVXx1+sy8ur7Clb2oUqLZWUF1iWlzV2NOjfTPJCkAlb2fpbed13OPetvjLR2N0JJo1FWUtSkrD/o/w1GYNTwrhr6q6u21rROn9xl1LJtMghZslMEJEckKkcZIg4ZlbPSympwQfahIqsW/bX93l3AkS5dVU891+TtrMNUeYs1mYce20+Mwyv8A4ylR4rgQodtT9nVk9Hfxeb0T3k36CO6/jI06WD+HUaNyFdJY6fdzxxB67x9IWP8A91B9OE2C4Eu0Sen8onZkKBWpdUTTxOYUVHMjFB9jyBy1xjFy3MuLcyv8FZz6xmVKItBKMO4t6KydlS0lLqUOuR4i1zUMuPNhDu7slae/Sba9Rbtsdlf7HtTabfco4XlkKgkeW8iqsbH/AAXf8ZQamVUUkBRjsbeOnOl+rIEj6qsLO+s7e4Vo4p01hSg1hgDxp3+FjkwIyxehxZbXOBcd4hioY+ohU8StjiWfQbto1tErGlvTYr62nSqXNdlOKklaVJfKjv1PVs9DdWTbJuEPR03lXPSEREvysy6kSbSA00TgiSKQ0AJVs6DLAy3sZ7zpmF7vV5DghVDaQAfwBRTIKAMqUFBjoA9rdTR4txNisBiHICZsH694uuNxH27Cyd+oW844z6ja2w44SR2SNdNAOrA6k6in33dX3CNiNbAUbxAAZAA5EgDnx7cfmF6z7tFvXX13HeJJLawVhi8egx0NBnnUA9tO/EnmrFLCCgyW1+q6hlv85Ck6pdSlbi3EfKjYpQJ+zXpU3zdJdvtwUKfNPUKVNaZZn3feRgv6FenMfXHU7x7j4+nrNVknFfjOo+XEWXIeYwJIGflo4BBIOH/UsuoMuVby119dXTxVgR2lSZtnaKbDpgVEXUJdc9AhbjytENoIKj1WvidiTUsT78fpJGkUEawxKqQooCqAAFAFAABkABkAMgMbk7O49ZPg0Ndi9zZW1odlXBjzV2VjYuaqSUsRKxqQlTqFD5kp12+fYdEbXbZZzUiicychT24g3F/HF4VzfsGeJK8fe2rMMxMa85NmTMUqHNryMRgTWpFzIbJ3Jbs7NkFmvQoAatslxwg/jbUCOprXVjt40WwEtx2n4R+lvu9uIy2l3eHVckxw/sj4j7ez3fZg7zeUfbbwe9H4+m5ZhGHylBv16Zclj6hsyEoSmVeLSHVxzJSQS9LUn1B3Kj3PXxNt37dV+cjjkdOR4D+yMvsxm+4bNtrfKSSRxvzFc/a3ZXtOIXe4P9r7g/mZp3kbgOxr+HM5sml2keViTDL3GeWuSgqWl22xeA4zChOWDjpWqdWGO4tbqnnkSlaDqMLtkY2+4IdSmhNKMD3jn9/fje1sjjz7RgKiopmp9n9GKuM6HMPs0sIuKcj4bNx12aQwnlhEdFpjmSPlO76PErxLbkOpjtIBPoSQxZLA3LaQkjX7NZRuvm2rBk+72jljFLqSNvLuBpb7Prw+sf59uMpwlWRxpa7eyxDI4EX68FKpEipuGVqLchaNPV9B1vsft6FtGyGhGCCyKwywMOXvdFj9lAQzK2NzFNliQFq2ONOjtvOpCu3hp049Gbq1ruSWMrf8vK1B3Py9zHL2kHtxWXqd05HuOyybtbqPnLZat+9GPir3p8Vf2Qw7KQ9j8mR5kxTkaUktKXuBDndWv9WoOmp6u8CoxyLcflsaccOk5UbYBpl4lxLanAkOD8KE91HU6k9+w8T1sj7OdaYGyCpMjEUUV+rG1W2kaI4l2dN9Z4JSPS3gBrXT5D3Op/1dSHCRihoWxAJnumJjqluD7zjPlk1q+x6xchL2rjsLCtq9flCTtIAPh26+rBrXUcsQmvns7gRk1riuGBwPyT7kM3kYlxtjrl1KiPE3d5KcEHGcdZ395V3cugRo+wdw0CXVeQ6zZ7e2i1zGh7OJPsH6ce3DfLLZoRd7hKsULE6anNjzCrxY9vIcyMWhcLftwcDcY05suU5LvMOSjaxMeS5LocAqp7jfzwqGBHKLbIp7Wh0edU2hAG4jTqEbi6nkWOIeXUVoM3p2seCj7TyxSnU/rwUt5JenURNvhk8trmUa1MmZ8qFBQzS0zKr4UGbkYAnu144xzh7Gqil4opLX+18wuJ1jayLANTZtXdwWUt0+PPTGtBJpIMdbsmOVAKcf0Sr8AJZNqtCJnvb5g0gUJHpB8KnNz3M2S1/Zr24sH089Udk9RbFbO1Qw7xap5l3DKVAdvgikioSHiBOplrVJKDgAcb/t1yBVzwlCxWIthjIMBdt2LatcWlE1+nsJblnEvoscaLlsJcedbfKdS2sJ17HpQ6uiWy3Q3hBNtckaWpkGA0lSfw1yI7RXAPqn0/3LqHrX+LbeqG3khjDAmjDygQSOGqg40NaYbGbZE9S1sPHq15ScgyNSZs6QgkPRYbqtGR/mQpxtQSj7SVDw6g7fBrmfcZcreEFY68K/ib68h7zyxdtvssW3bFa9D7fR7q78ua/K5ltRrBaEjlT82YdmlT8WD77UsoRSciw8GVuXVZnSS8clrQCtCL9AFhTTHtCfCyZS0Vn+hROvc9LO9PLfhrta/k+IHjQA5knvxbu47LbbBskKSlfPV9UpyGoyDQwH7qCgA7QTzxYjDQV1susdStL1ZJWjTTRKotgFuMaeSlNS0OJOngEp6ZdvuhcWyOpB1RCvfpNfuOKOupDs/UUckeSM9G7CQcj7xpPuxUr+4dxcxlVPj9o5HK3o0aygMP7AVx5DSkymwFaa7HGUrBT59bba1juzPaS8QwZT2E5VH6cdNwbk8S2t/CfC0ek99M6H9HZigyJKtcGuX0MlUdTco+qpGqVuBK/6l/iCNe4SPlHw6rHqXY2gnavxduLl2Ldo7q2Ug0BHDF8PsJ9wV1k0AYZY30lxGxt6A06+o7X2kglsaq10dQn+J6RXNwKxEmnLDDFBZLJ5ugaifdif/uuosTzz215PheWy1RFZTa43CxSWhKXpEPLDaR5dbP8AQP5jsKqei/VTSjUohNuq7DXpq6Elmtd9inUeBFcv3qFNfrGQ5VphB9TNvh3XpyfbXZVkmZEjNQPzCw8vjxowFQMyK0xRHhGHyqq+cqbKGqLYQp8qnsoiu6os+HIdgzI5Og1LEphSdfPTXrrOwVWRZUNY2UEHtBAIPvGPzp3mR43eKUFZkYqwPEMpoR7iDiw/i3jVMxhDbjY3MK0KiO4SNNB8Rqg+PUuW48nCmAbgmubVxJGTiESsqmztSkMOkJ0AGocb10/iW9eoq3ju9BjZ8gvxHhhiyLKUy5tYcUNCANCdBoNNfsA+PUrQr8RiRGWhXw8cKcS0sGlIc9Zw6EancdCo+JA8NNeslt42yoMYS3kqZgmlMa97lMx5SGFvrV3SkjcfM/7AOidnaRpmBlgJfX7yeFiScDiwmKDclzX8yRI01176DVR/1DowlKgdgwtTKGqeZOEKNNDa/m1018/H4dEoZAD34DTQahTDgRYhxA9PT7f4nuB5+PRJXBFeGB5tVX4hjw5KKj3UB/p/u695hHDGQtl7KYWser1z5Teo1BUNSdew8tOod1d+WhzwUs9vWRwuJw4DgUcsRobyUJI9OTNA7fnrRqxGUdPCM0rU/wDvRR+HVabvvThjItewezmfefsGLX2Pp2N1WJ6cif0D3fecH1jAapbYb9FnUgDsE+Gmn3nTpPk3y5rWpw+xdI2JGYFcOmnK3aasKk67oMYKA11H5afm8yR0rx0VRiwryrzOeWo4Z2YYBJt4b0mOyFBSFHTT8Wo8U+WvRzb90SBxG5ywp7tsU1xGZolriCXIGMzqyQ8260tBQo9ynTXQkf6j1Y+33MUyhlIIxU+4wS27lXFCMRlzC0lQq+Q2vXTYtPmNOxI7fH7OmO2iRnBGAaTsZQrY7RMNq4lHiOL00AJTCqsfp6+IEEFIjRK+OwyEkaAgNoHX55bhM9xfzXEv+I8rsfaWJOP2OsbeO0sobWHKKOJVX2KoA+wYri925VxB7yPZl7hWgqPTZTe3XtyzyTu9Np6Nm8d2Zg7Uh3sltiJf/UyFFeqT6Y8D36tPok/xzoTfel2znijW9hHMGIgSkdpKaV9+Kg69rsHqH031cvhtp5X22c8Ki4GqAE9glDNnl7ML/ud9zed5RnrXtJ9pYYu+br2OlWf54kfUYxwVickITKvLuYhLsf8AuZxh0fSRPmWlS0KKFLU025G6R6R26z27/OvWtY+n4z+TDwku5BwRRkfLr8TcDnnQEiR1r1put7ug6A6A0ydUyr+fPxisIjxkkOY80g/lpmakEgkqGkv7bvbRgfttwNGKY4h67vrSQq4znObsCXkub5PJ3OT7u5mul15RcfcX6TO9SGUKIBUpS1rVOrOrty6s3L5y7IjtkGmGFMo4oxkqqBQcKVNM/ZQBx6M6L2rorav4fYAyXMja553zlnlPxSSMakkkmgrRR31JkL9LG/8Asdj/AOIt/wD1PStrftP14b9K9gx6Swwg7kMtIV8UtoSf5gA9eLMciSceoBwGGfyLnlBxlheQZxk0tESooIDst5SlJDkh7s3EhRkrUkOy50paGWkA6rcWlI7nqftO2XW87jFttmKzysAO4cyewAVJPIDAze94sdg2qfeNxcJZwRlmPs4AdrMaKo5sQOeKk8Ep7jkrKMh5IyX8y4yq1ftAkLU61HjObUQoDC16rUxWwUNst699rY66Fvbi22Tb4tos/wDs8MYXvJ5k97GpPeccPfK3vWG/3PUe4/49zKWA4hU4IgPYiAKPZXB/dxP6JjUMk6IJB2/5e4+zXt0r/wAQMr8eeG1dkFpHkvLAsvY4kveiUpQUnaSQD3/lr26NW1VXVU4Wdw0yNoyBGA/ytj7LeGBkBtaJN3FhyEo0BLM+nu2SFaDw3oH8dOh+6zv8xBMKgqHI9qtGcPfprbwrcXdvk0cixqw7VYSqR9uOWL9w2osmWIWOpeciBEmUIipKtKyU3IWrc29u1bjvLUQCSNqj49h0ydZ7y1xskSk1jK5j9Y5js/XgB6e9KzdN9Z3clupBSVtDD4gDXxIeRINGA92WKsss4l5A4ZpcT5YiqrpGKZYksRbKbBkyq6NdxPypFJeQVbHam0jPaKRodjjO1QJSoHqhYr9ra9/LYLIfFnmGX7uyvPnjra92Wy33poXRTzoEPluK6XhYjKvPQ2ZQ8AaoeAxMfjn3bcwY5iM6ywPBsbyeLXSmKBuVAo7WynnKBGb1MmjiqekwmlvFS2Wl91NhPfuenGHqPRDqnVXIWlE41PCvYvszxR8vphsd3unyqSTK4OvxuAugDMr+0a+4c8W4e1rhX3++83jm9oPdF/d/DfEGSmDc4nfNGPgmYOx2nEM2OLxMCbfE20xe7rklbcucUBmWlKglaXCkPVp1D0W3p5Jb7pY3KeowvPMguImHl+VSgDBvgVAWUrn5ofUaMgq1QbT0dsTSbNsiLJUgh2qSdQHmQTHINSisjJmrAg1VsXlcM8RYfwPguP8AF/HmOIxnGaOI0iKyUhUue4dS/Y2clKQuXYTHipbq1dytR6qO8lluZTNMayE4etjEVsixoKIPt/0YJHI/BuIc343DrMkYejXlbuFDkUHvOrC66lSWJMRxaItrWPOjatl3UoKtyDrqDsgbwgqxWUGoPYfpw/XgzfoJYXoBq0UqRXLjw7O3ngv8O8M2+J4xFobOTAnR6yGqIlt+QlxbrSUlv6ZJQ0wYzfpHRIA+UHTqHNC4mMjtrLHjTie0k5k95wvWyyiILJTV7aind2Yq09/Wd8Ne2GmspfLlZXV8HIa+wpsLXlOHy8jxaTfuxH00rL15HhS26ePDnFt515bzJbbQVa6gdSJJRttobx0aRK00pmzEnOnYaVz4Uxs6c2a06s6qj6Ym3C32pnViLm6ZooIRpNGZwCGBagCgEk4pG9rvNmG8IcnQ/cPV0WG5byQqiyGDk2S00x96oyTHbJ1l2yi18+tdfjvTB9G0UFDSXklADm7Unpx6ei6M6pBXbbqSz31UqYJ1KFqfsVoGB/crha9WvTr1/wDR+KPct82q16j9NjIqx7vs9wl5ApcZNcqhZoHoa/nhBTvxfn7TP3SPaJ7pnYGP4xnsPCs/kupiLw3OXG6OUZnregWI0qWptlxSngQhKylS/s6DXTQRS+Wkis1T4eDGnEUPE93HGnadr3i6sG3FraT5KMKXkT8xEDcGcrUop/aI094xZnKcW2tCNim0MNgNBafmWlXzeuO+1SXtdwI1Gnh1piegIJ8THPu7vdg7FEHoqEGMZV/T7zjNElrRJZUlakKXsO5JIUhxJ3NOoUO4W2tII8+s5mqh7vu5g+3EiO3ahVKaq5YBvIHFMeJmMblLFoKIMi4moZzKHFbS3GdtXAQ1kEdpsBARcsEoloACVOgq/q6ceneoGFm2zXbaoQtY68QOaH+qc1PIZY5r9VfTiL+NRdZ7OqpcPJS4VR+I5eZQcn4SDgT7cSH44xePEDdlHjBpt+LIBB0IacC297aT4lIIOmvl0B32/aUmF2qQw947cMfQuxQWqrdRx6VZG48iCKgdwIyx7fgJeY5Up1pSVT8WblgAf8Vmsnes8kdgTqh/w6geYVkspxWizkezUtB92LPsYlktdytciz24PtC6q/fhqs8W41dRILDcNl2bNixnHCG07YjHp/mPPKI0Tu0+UfxPbqeN4urZmmkYiBGI/rHsGEHfLa2aeLbbCMS7xPGpp+GNKfG55furz4nLGy1xbT2E2FieJQor9ow2W5lqWd0KjgrWS7JWAU73F9wkahbyuw2oClJiy79Poa9vGZbYmoWvic0yH0yAzNTkSWx9HJrit7bTLuqAhpiPBChNSQObHMDgznsUEg+8CY9WYpAzvHahanIFTnEmK26twOredFJSLkuuKHy+o5JUoqA0AJ0AA7dK/U91Ney211OKSPbA07Breg+rFs9L2NvtkFxY2tfJjuSKk1JOhCxJ7SSa8uwDDY54wKr5JzHj/F7F/wCkedpM0m1MzUExbWIrH1RnA2pSUvIKVKStHiUE6EHQiT03uUu1WN1dxDUokiDDtU66+zuPbTGrqLa4N3u7aznND5cpU9jDy6Hv51HZgE0WBQK67exfJa5qDeRVpbWlafyZIJKW5sR5QAeiyUDck+RBBAUCAx3O4ySQC8tHLWxFe8dx7COzCjFtSW9ybO7ULcV9x7x2g/0HMYc/IPHmOxK5TrEZkKShPplKEkqLY2FKSOx3KPbqPtO43UkgDk5nEzdNvt0H5YFKUHuw6Lh5cTMYsZpWi6XHMfrnyP8A1sauS+rv490vaePUOIg2DOeEksjD2FqfowdliD7iq/8ARwov1Cv6cMJcsP27sCWvfHsd8dDi0pKm3ZAEiK8hX4h6bxAHfqaJPy10jxKPuyONF/YtFAbmAUkhOsd4HxA9tRgUZPGXJYktLYQ5IiPOFTSkq+YtubZbXykEEKAcT9hPUPdFLbdIqkgEcvsOeDls8bKkqgFSoI9hGIb+62q/TfbjyxmrWGnKX8SqU5BCxCPIeaesZjS22HFRZDaS+gNtq9RxKNq1JBTuAJ6m+n81zt17Gn+MXDqA2QbUKqGpy1fXip/WbZx1T0/LY29wbCasbeeo1NEFYaygORcxkhdXhBzINMVHe0Dj63z/ANvXuYb5VaffteW2oEbJI8eA0bZ+hb+rr6pAqoO1DdpX00huMylOqylhG4kjq1f8ubnddN7pb3QX568tmrq8KaqggdgUfdjgz1v3zYenf5gvSCfZ5H/y1su7+ShYmSXS7KGNTUu7t4yf2mNMqYmBwD+3RhWEWVBmVRAskwMUxdvEaPMM1dZcsqzHX7KTZWdVj8Nnd9C7PfURaSkhDpaCmUEfMeqSs9tSKysYN2o9zZQtGiIaqasWDmuQpWige3sx+isdbffN43jby1N1vFuG1LTy2SIRUoM3bKvi+HgM8WF1eP8ADbFUU2ONV828TIkQ5cOLvhor5EBYRDdhSY60xEQHPRSjatJQtB7aadFJJCrhIZfJYAaWyIavEOvM94NRjKFLuVC9ypn1E6kY0II4FHHL21BwKOWmrGjqkmidZXhsfcn0aeMBe1TLk79Rgx7dAHoRkVswgsTGStJ2p12kHUfPvE9uStyhWZuD8Y3ypVTyJHFTQ4P2W120+UTZLxQijrnWhHMA8GGIuHIE2SyZTyUSkmQpbyT9VNfeeeVJ+oK1flsGS6QpwI0G/U6d+gVzuUstVJywzW+3RwAMgz+lc8N+LkUmst25LaEF5hQWp99RdL7Z+RaVrX8o3NajsB4joJM5Jqa8cH4rcSRkV4/fhBzhB9UsJeDsOSkS4a9x2KiSR6jGivxLcaUktq08VI6+o2k1wLljIJRuOJPe33348j+3zEoeAT8Ho+UsDSuXMx+vubFyktMdmvvqenw49gmsumZFWqU8X22nI4cbU8sJdCNqEFhcQTANIWWUCmpTQ07DgYEmhqiANGTWhwOuZPcfnfuKzSJmGatQKeFTRF1uMYvUep+mUUJ5xDkkNuPn1ZM6atpHrvEI3htCUpSlISNU9xGI/IgFErWpzJPaTjZFDI0nmzU1U4DgPZgYP3bry9zayUkaE6nXt/UNRoCfPoZidyph2cY8t5zw3ndRyRgUhhu6rkOw5UKa2p+suaiVsM2os2G3GnXIcktIWChSVodQhaTqkdELa5RFMUorEwofp29mIU8LlxLEaSDE1uQvf1yNyph0rC6bAMd40jZRXqh5RcVlm/bWdjDkFbM2PBKqypRVJmI3IUpX1LobWQhSVaLEpZrWE+Ypd5Bw1GunGhhczDyyqqh404nAqwepn2wj1WP1suzl7UoDEZlStngNXVJ+RpI8SVH7+g11cooa4mYLGBUk8AMEreE5RIKschiV1DwPUVsGNdck3keEwwHJD1bEkobaGpSfTkWCu7igBoUtBXw6qLergbtd/MvlFQhRz0g5Zd/EjtOLG2uVtvtvk7NdUxoWan4iM6dwyAJpkK4ezXJGNY6wmp41xqIgJOz9QeYMdpw66BwA7pskn4qUEnoaEEY/LUDvP2YJC0muDrvpCR2DP+jA3yDk7M7CY81IvJDbZcWPSgEQmAlCilKEBr8wpT8Sr5vu6FXMkrSFmJpUgDu9mDNrY2kUa6YxWnPP78Nxixly3Qp+RIdUvxU7IeWrv/mKlknXoeULNngogRFqAAe4DBNo59nDQgxbCXG+Cm5Do00I8iog6faOpKJIooCQMQpvKc1kVSe8DB7xDInLltynug3KdcZWWnlJAExpCdXGpCOwLqU/MFDTUD49SI1qNMhqD9P9GAV7AsJE9v4aHh2d4w15bX6bZzqlKiptp/fDKydwS4kOtI3f9ttYT946xjDKxtz25e3s9/D24mLJ5kS3X4iPF+v3HPGSPY7SNdUnuFA+BI7aH4EEdbIZeYybsOMnz7D34elBFbu5CY+7zSFHXv8ANoO/2dHdus49xlCEZYGXlybSPWOJ5YaXNHFD0CtnXbDiENuIq1v71K3luHCvPTSynXZp60jVRI/D2619VdGqm13U0VAXFvXt/LZyPtOeMdh6jD7paRyaiUM9KcPEqAlufAUHf7MRjxzJURW0xwtLq0I2keHdCQO+nidR1WFhc/LN5dASMsWJew+adYrQ5/Xgz1ORMPRtXm9CUg69j9nYDw6a47pGUahTABrVw1VOeHPXyIMpK1F75Gmy88kkd229Dt0Pb8xRCfvPU22SKQ1/CMz7B9KYjXHnRgCmZyHtwiQKG2ym7RXVy0uS3nVPJcCilTatxIUkg6pUlR7EeGnWyy2u43S68mMgsc6403m4QWFr5sgypSnb/RgifpkicuXheaFoXdYyqVEsEAF4xU6NiWDoCXmHCEuI8HQodtepNztU1ld/K3pFQKhuwc6+zhTnkMBVvU8sXtiD5bGhXv7B3Ht5UOA/bTZEKdIrHEfSivUY7bWuujfZQeBHZZkghwq8932DrW9zkEjGmNcgP1955n9WCKRhl80nUzZk9/8ARwHsw3XnnHVFe4nXxPfUj+PfrFUZyScZ105HGNG4n/WdP5a+fVo9L7cLSwFw4/Pmz9i/hHv+L3jsxX3UN81zemBT+TFlT978R93D3d+NpKTp5+H8vA+Y7ePTNhfwkTnLOumwbqpf9CxqpLM6G4U+olL0dYcSl1s9nGlkbVpP4kkjw6nWVwIZATwxCu4TItV+IYU0xK7PLNscbxcIor+Sgqs8EyZ2xhSvrkpK5EvGLBMoxrmDKcBUmMlCJDHhtKdNG2KZGh1P4l7aVoO+n30+rC9JETJSHSr/ALJNPqPOvZxGFDEq/P4LVrOuY+FY3iFXKXFvrG6/ujGm6+wAUlQiDIIUJFhKQUaOMsBTh1A8SNZAMETgeFiRWi55dvYPfTGjTPIpY6VVTQlqj3Z8fdiSuMwYUtmAzSNIRWtqblybNDa0KvZhQoNTEoW66Y0Jtp0hlsKJ0UVK+Y9hG5XYUFSfEeXYOzv7z+rBGytw2kRiiDif2j29wHIYKVnXql1pj6kktlI7nTw7dwT5adLkMvly6+/ByaLXFo7sBO9jMvOx2XF1tVkLWjCp9q4+zX3TLaENRg9JS6G6+c00naVLHpu9tVJPi52VwJUJpqQjgBUjtoOY7uI5YWLqIKwB0rOOZ4N2Z8j7cjhLTU5kqxkUyoFe1IiRzLkvvGwYq2oYSVCWq1daTWGMsJO1YeKVEEDXQ9S62nliYEEE0AA8RPZprWvuxG0XfmGEquoCpOdKduqlPtw5MYgxHEOxoaoVjZSXC3PuYHrGBGjFSd8GtLzhEpbxR+ZI2hOzVKNQSrqPd3YiHjqI1XJeBJ7T7OS17z2Y2wQBvBHpaVzRmFaAcwO0nt+rEhq4Ijwkx1todOwDeNPlIBCTt7EBCVeRPZOnSdMyzSFu3DTCrwxhQK4yKiNOIW42saIO9KD3WQpZS2nX8QWvx+89aGhByGJIkIyw3L6wr8YjKm28xEJCAHGxqVSFuIAcQiMlHzqcGqT/AC6wS0klbTECafV9eMJrqGAVlahPAcye4Yo59/3ENHybT5pmPHmDsx7a0hzZeR0UhptK7u2ZYfXFz/CvpAHanKob6/VsYiR9PatpK9of1UqdtV7uPTN++4bW35ssTRyofhdWVlGfJl1Eg8xUc8BN72La+rbOGx3V5IFinWWOWP44mqA2XBklQeXKnAih4qMc/kfgrIeEv2s/cbh99lNZlEnI+Za29asMdspKmpMFT1JaO0i5LobeiT3mahyPIZ7Fpayk+fRbpi0uF9F90hnicPJutQv7QjLtkRybTSvIGuOQvUzyLn/7pj0hmiw2XQh1ORUATLMFkFcm0+arAHiQBitjP/btIlZ9wpb4RFlXHFPLFXj97x5aS2A6avDY6Ydlb4vcPRwppNji8pl2M9r85OwnXcOpfUXRUNzaWV908hO33YimRRWgQLrcluTLpYODwIBHHF+dDep8u4RbztHVLrF1jsE0ljfJSmuUO0dvcRLxMd0rI0YHMkcsW6MwV0rkWtsscqLKGJC7S1juBaJL6pzLKIzImj82G7AhNoACPlKu58eku42e72/e5rh3YPlQKQQoIrQ9/b2YvXautdsuuk7Da4bG2Z4lkEsjhtcrFqLQ1GgIBRaVrmTi83iGuxyfxzQToEOxZhrrI0WIwh0rCJym0NxorjhSQn8xWp18Ug/HpssZ7uTREjAyuwAr388fnT6h7N0u26bnvl5DcR2cZlJ0sSPMFdK1INKnOh5DLD4sax0Wy6aGtsuQG0tvreWSwlsIS5MkvrHyoTGUogjxPZI1JHUTqORWv/KjNY0WntNTU++mLw/lh2N9r6CnvbkAX13fMzAfhRYotCf2Szk97HB24s4/Y9wTNlx9jd3Mov7Mks2GT5m5IW7cBu9ddSmDV1O30PRsUQX2w646FNBkEhQOw6bdLe2t/mpwWYmgA5n28hi+5TNczGCJgqgVJ7u7E0DinHftWxmgq8AxNWS57mNu3jWNizmIevsiupLLk6U9YXEhCzXVUaPCXIkei2iO2EDa2CoazLCObfJXSeQQbbChdyBkFHd+JjwFcR7ySLaIlaCMzX0rhEBObMc8zyUAEnspge8ycle7jjWiYnPR+GJyswtK7C8WqacZQ3kzWU5KVxK0Q3JpkVNp+nuEyHPVTGQWWVk6adTbSz6au5yLb5krGNRL6QCBxrThXEW5ut/toQ0/y+tzpATVUE8KV4045gYTuJuQOEuJcLscM5Pxe+w/kByM7LzZjkGhNxkXJVxPd22NpW2sBu2jZkmxnSVJbajuuKbQdvppQnrPcIN13C5S4sZVa0BomhtKxgcARlSmMLGbbbC3aC6jZLk5vrWrSE8TUV1VP+jCRgVzyzgeXowbijFYWK4ByPMsclwGo5UNjGZx0RWWpF9T01fVJky62PPcfMqPCfU0hjR3aBqB1OvbfbLu2+c3KQzXNuoWVoaVauQqSMyOBOIdpNuNtcG1sIxFbTEtEs1fDTNgApqAeIUkUzwfJ9pdXVnE4g9x+CYHk2M8jxptfWzIcU22LWsuMx9W/QXdNfMPBt9xhlTjKyVIWpAAAUOwiTb9vlspNz6fllDQUMkclNWkmmoEZMteIIywTjvr6K7j2/eo4tM1QkiE6SwFdLBs1alSDUg0pxxAnnH9uvGuF8J5AzL293jGKYvuYyvJcJzG1kPY/S1lIH5drKoMglplz66DGhlbpjy1PtpCSEOsoCUAZDPDffkSqFnPAjgfaOX3ezE+WCS1PnxMTEOIPH3H9GOcv3fcSZ3gmRVtvOl1djR5hUru8avcYsm7XHrqAl70ZJiTGewfhPnY62rRafh4dDW12twHQ0kRgQR2g1B+vEwrHd2zRSisUiFWHaGFCPeDiGdHlOSR5BYQXG2GFbXX17whspPYJ7jcdPADrpzb63USycFIBqT2itMcAbxosp3hfxThiABz0kivcMsG2qza2ioQmI8HJK2XXHn3lFtuMy22VrPqL0CXNvh/Lo6baK3j11Jc9nEV7u3CjHPNuFz5LrpthWnLURz9g+3C5B5IQqG9IlyCqSltosLKgEqWpQLvrBXdRDWumg8et6WgoCBlzrjGW5kFYzXLhT9OJc+0PD7T3A/9QHZCplbx7ikYM5FkEZH5r8uQguNY9SOOD03bmY0kknuI7Wq1eQ6HbndR2QWGOhuZDkOQHNj3D7TlhJ6v3yHYLJ92ufGYYi4QHxMfwqOzVzPIVOLMsZ4rexXCmMZ46x6DQUcKImW/WVyfRVIkvpL6GZMp0IeuZzcb8yS+pS3HXnAn5Ujb0mDdrUXf57ksWopOZNMixpwqclByAFRiid+6S9TetbB9zQGe/a1E0+k6FQONcdlbhqApDDRpvLq0srKshoKYbWaPrjXUinaDjUKgbj10RhY2qSp2FFmTZTiddPqJsmQSpXiUJSPAdNOyaZLNbts5pmLMfYxAHsUDh2knFF+oztYdTy9PQho9t2pY7eFCKEFoYpZpWH7c0khLNxKqi1oMRX9w9aq04fz1TaEOTarHp95VB0JIFpTsqmxAN+iQXlt7Ndf6unrbAgk0yf4RU14cAK8TlX25Yleku8T7X6jbVcRFvLmuVhkAr4o5fAwPbxqO8Y59sQ91HP8AjNp+tte2e+Va18WM4+hx2zgWMGHcMerDejNwG5Eeb9YylW5p3cnQAlJ6SJ+pIdyra+Qr2rnIVqSRyqBQEciuP1fuukNt2i5Nne30cdwwBjZqMYzX4mUHw1yoWp3jlg44f7n5OY2tpd55xtkOA3S45MWwyKW3PE4RWtormvpw28y/3PpFbSEAnTseodzt6X0fysDvAo4o1NI7ieI+rE3ZtyTpC4juI4LfeLhnIklg8EiFiKzFTVWAoAxqGFMhyxZh+31QS+Ss55GzwONPJwnja7GIWjEkPVLGb5Oya+lDYaVsXIrYSnXVJWCtKk7u3Sv1FEdstY7U0EEkq1pQ61BqST39mAHUm+2vUGu1leR92njlIV9UfklQSgC/skgAHPUamuLJuIcq/wComGOXE+JGpcsjxpbWR4uJseRZVsikmv10qxcioV67VZYSIapDBKflQ7tVodNdW3q22XC2jmsWoaW5FZM1+qtD7MV1f338ZjTcSDHPENDilB5sdNdCace7nQDjgEe5fEBkHH9+kNFT9Upu5jpACllLWokpT2Ou6OpQ6OxN8vuiFvhcaT+j7cdHdL3g3PpVZEzeKh7TQcfsxzRc3YUqFkEpxDKUpccXooDT1ASVAnyB8+suoLBLqHXTxgYsPpvcjEfLr4a4dHtVyy8wjkWn+jbkvK+taW2zHStxxWxXqL0SjXcEtJJP2A9VFf7O/m/lqSxPADnixU3mNI9crBUHEk8vbie/OPLOR+6Tlnh1jiW3sE0lPcxsSw+DGS9HkS8wvZbMDLbixhL0W00qr3sR1uII+k9U9vU6szaumIulelTu92Ua9utSyCucVBVY6czlqenMgcjjmLdvUS89RPVaDpzbIpk2HbGWaJ/w3FPinPYNZEUS8QasRmME3mHC49D7kOU62KlJZiZo44hTY/LcdkV1ZLkPNggkB+Y8tz4nd379Wl0fKbjpmyuG+JoB9jMBX3DHPvqoos+ut1tI6BRdk+9kRj9rE+/EzuMokWJGYcUlIMmM28s9gNQNFAnsOwPh9nUq+LZ05HCRtZLvnzx9zC21+uitK/LQELRoR4JXs7geZC+vltHUBjxwWdc9I4DA5jQ1uFK1glThA0PkkkeP269FAa8OGIspC5njh8mhYbhbnHyh1TZUAO4HbsT59+tkbnVkMsRLhBoJJzwFJpcTauNLUSW3FkHT+hAPc/f0fUARAgdmFaQt5hB+KuG5Nd3+mPL8xZH/AHlBI/wR1vQ0P0+nPEObM07cIzqyNSB3Hl8O47/Hy6kjKnLEYKCaHhjJGlqSrvqNO/2fb1JSYqKYwktwRUYcFey7PcSlCd27t2H+h63yThVrjXDaszZiuJT8W4Y4FpsHmPUbiempCVJ1Q9MXqY7R+KEEFa/+yn7ekzetzFDCDmfsHM/Tnh32LaiW85hVRT3tyH6TiYmNQVwGk7kkrX8y1nxU4rUrUT9qjr1XN/P5zEA5DFrbZaNAuY9p78EFmSUJ7nvpqBp308ugjCpwyxyUFTh/0VWwmrrgn5h9Ix20Gh/LHS95zAYfZbVTM1ebHDyTCY9AI0Ba26EHTt5eHwHUfzW11543i3j8sD8FMRr5c49hWyHH48dKlBKjqB4nxOh003dOOxbtJCdDnLFbdV7BFMxlhUVpyxWTy5x86zFmBtBG0OBIKT2I17H46dWvtW5B6V54pq72xoJdQGYP3Y6j/atyRE5Y9vXE+bx5AkSbDDaiJdfMFKj5FTx01GQxHD2JVFuYT6NSBuCddBr1xL1ptMmydU3u3OKKs7Fe9GOpD71IOP1T6B32LqXo3bt5ibU01qmvukUaJV9qyKw92ID/ALgeet+4i7ofZFwZUjNea38kxnOcjymJLWxj/A1ZjtjGsWslyW3jtPpjW8tlfotRE6v+jI1A9VyM29Y/pntx6Xgk6/6hf5fYRFJEkZFXuy4I0IppVQcy3Co7AxFX+rm4HrCSP0x6XT5jqd5op5JQSI7BY3DiWVwDRyPCqDxUavEoGZHCftq/cc9qtLd1vFUb2gZsrI7iTfZXkGWyeTjyDlttJeeeVMuL7ZHZluN+spKErWG0blKCd63Fqn9QdWel3WM8cu8HeoBFGEjSMQeTGoAFFTOnDOgqchWgAArpTon1h6BtJbbZf8uXfnStJLLKbv5mZ2JOqSTgxzoK5DMgVLEnSn5o/c9qbWui5X7QuJcqrXZ0ViwsMN5jgUIjw3H0NyZjbeQrmuqEdkqXt2KUrTQAnt0uzbD6STws9nvd5DKFJCy2xeppkPABxOXHDjb9Q+s0E6Jf7Bt88JYBmhvAlATQtSQE5DOlK4s7QVFCSpO1RSkqT47VEAlOvnoeqkPHLhi4xwwjZHkdFiNJZZJktrBpKOniPTrK0sZDUWHDisIK3Xnn3lJQhKUj49z2632lpc31ylpaI0lzIwCqoqSTyAGI15eWu32r3t7IkVpEpZ3YhVVRmSScgBjn29z3urn+6DN4OO4y7NouJ8Uti/QR5qVRlZvataspvbdpQC4cdtClCBHcGqUrLjoDqghnqLovoqPpCwa6vAsm9TJRyM/KX9he01prYceAyFW4Y9WPVN+ur5dp2hmj6cgkqtcjO4y8xxyUZ+Wp7dTeIhVlRwTemBTRIMxlxr0kJSC6hQLe3Tskkd0keeuh6Weo7cyztIhrnyxO6LvlgtlimyPfxxIe7yJZjhEZpDiVp0Kh4D4HUdLlpZAvVzQjDhuu7ukYSJQQRxwEb4BBcfU2QVblbknT7SR/HpqtFqQoOQxWG8S6QZTUE86/bgJZDKFqyurKjudmQpTSVK3bn4TrigkJ+JYedA08yB1I3vby+1POo8UKlvcRRv0H3Yy9N+q4bDrKCznJ8i8Pk5n8ddSfWwK/2sUX/uCcWMXlbKklsH6WRJCilCT29Q9lbtQQQPh0p9Q+bNs0U0dTRBjpfZTap1JLbygBnOXtxWHwdl+TYXLfxH9YatsTdeBdxPLq6uyrHdw1QlbVddx5IjKCNQNihtHYaDpR2WzsNwoLpATXjwPfnid1juG+bJqk2qUxhhmKVUjvHMVHPFxHCfKho2Gl49UYRi7ktyOqU7h+GY3QPzFxmdkYyJcOEmQ48wglLalL3JGoB6dbfYdsjU+WpAA5n9WKNv8ArfquaYefJ2jwooOY4VPCpzy44tb4XyjIcpsKlVpYhth76Zxa5L7kh+fLUsBp6fJcUtf08cL1A18fs6j7hCkSVjQBQKH6c8E+mpryYm+m1tIlXAJqxYZ0HZly5mmJuzYf6rCRJCUPz4yiCpISj1mz2WlAQAnt2KekqaML4UyUfTnnjoTZ7w3IEkh/MYV7OPcMsfsev4qbu7xZpsLsaOspplwXUnSFKuS9Kr6p4dtJLlWhqS4nXVCHka9+tKo6BZJKqjV0947R3VyB554ZzKjv8qK6vLqT2VOX2Z+8YIacrUuC8lteywhpSZCdQlLjB0SXEkH8CVEa6+A8ep8caFgXIER59h7+zEG4sWjNVPhPDDNyNuBe10ynyOorMjqbNgotaDJKmuvaSVHcGhTJrLNiXBlJWlR0UUbgD2I6kPHkUIy7uBxCFqzAxycD3nPup+rHPn70f2NsEzeTc8uewXJh7Y+ZlKkW03iZmU85wNyLYqDj0hMSlnOyWcAyGyWSkOs6w3VK0WEg6dRYrWBJ0csyQhvFRQ/HKqhvhYV/CVrmME9jv7/pt2faLm6tdeTNDIVLLX4JF/w50H7EySDLHN1xT7NPcTyJ7nbbi/M+Ds9485zw26Zl5Y9URlUuDM+g8kypd/Zzi1T0kewQQ+HG3ixKQdzYCtNF7qrYurLdkk2xI9x2a7lBDElSoGRZWyZWQirA0dDlUg46Y9NOvv5bNsvzvPrHuTdGdT2Vk7295aBTZbsA3iglsSHrLKp8t4QrQTCrBUZcu2n2UYbz3xFiKMS5r5uxDkrF2oTTeMY5EaurrIcEcZ0SmshZ3LbBvseUz4MPeqphwflurQdA2RC+ubNH3SKOPd1yYxkaHUAAFl5SftEZNxoDjlz1M9R/SLqHrOW+9DNp3S06PkqXW70pGZiatJaR1aSK3fiIpc04LlliwaGmNISzNYeSppsqKW/6ypKTsBPdIO7TXXy61vrRPKYHUefL6UwIstxjmGphR+zG1FS5scjv7gw7uadT+IIDwJSsJ103IB3J8tR1KBSoZOPI94/XwOPm428N3bsjgFGBUn2in2cfbh28fyvRfepZi0iXCL8ck6JbkoIQ4h5HbTVxohQP29Y7ouuIXMfwGh7x3e44rXpxmtL59suMpYiy/wBbgQfeM8fb2xrMdzKpdmfmMWyXam2SnbsZp7VP0Zfl69kpRKUg6HuQknwHWqC3uL3bpPLyMdGTvdPFQe6uGA9QbZsvUNtYTtqnuvCwH4I3OkM/ZWTSoHEgk8BhsUcPJIFzK40pWHXrVpS0yriQFFDdQ44VxrB98bv+XcYWClIOq1fKPA6Z30ttcQLu1wQLc8EH7VM1A7ajP6zj23dPXFjeNs1rqe5z1zPmxjJJUs3MUOQ5nLgDRz5hkDPH1HLwXj1f1WUSUFzI8j0S89DedQj1nXHRqlVkpk/lN/hZRtJGm0GFt1mdyuF3HdBpsgfy0/aH/e14n8WGDdr+Hp3b22zZfFfn434kEgVZjzenwg5AUypQFL4juVcV8I8p5YuM7eO4rJyfKnYrktTL9u9Coo1m4y5OcbkqbdmuNkKdUlwhStxCvA7+oLf+MdR2dkCIxMscdaVCguV4ZZDsy92PvS9wdp6Xur1gZWhaWQ1ObkIGNTnmx5muZrnhO90rVhOtuIbanmSKy1gu3FnXzIyyh+K+titIKVDUKBSspUk6pWkkEEEjr3SLRwwX0MwDwtpUg8CKtiR1cJWlspoGKTIzMpHEGi/Q9oyOHTWT4fMtIzUXRYx7lOgiqfrpyUhH1qAAj6yOgkOPQJKgPWZJKmlEEf0qMOVG2SczQVl2eRqMOw9hPaOR58DghC6b3biK4pHukYqD+kfunmOX1HDDx2RkFtlSsPyuG7X/ANpkWmRuyBrFRVQtXQ+0+ra27FnlsJQoHuCe2qVAGLgW0Fj8/aNrafwx046jlSnaOY/WMB7eOebcRZ3I0rB437NI4GvYxH39hws0LU/I7LIcmcjOpFvKkyYvqI02RkaohNHtoCiKhKf4dQ7tVto4rRT8CgH28z9eC1nOks0l234iSPZy+zCXHwi2npgSiytKkI9Jb69GEIUwd7TvqO6aaIPjp2063edDGzIzU5gcTnx4Y+y3kbx0ALqQQa5D391MKT2FUzpcXLnuSX96XpS48dQbWoHY4tUlW1CkLCtqlaaefXyWRnh0soCHLM5/VgKdwWL8uA0RRkFBIA7ATy+7DE5L46opeD5Pjf6vGhVtxiWS15YedjxpSJTlVJcTueWSChYbAUpJJ08O/UvZrxbW9huI0q8cyHmRTUB+nnhX6ksbjcduuLNpNKzW0oH7QfSdNa5EcMwcUs+0+NZ4PEuKXEK3FrZvI5Ko9NYM37MubDqHFNCU8fRU6/JmQ7tlxLK9EaMrIVoRr1dfX+4ie3+WheQRgVlAFAeYXuDDNhU5jHLPpZ0NZ7l1BZ9W9Vxwzbnt2pLRX0sEnJ0tcUNfFGABH2NnXLFkN/h4fo49HIs7mK9EjBJmJc+kivvvoSuSZlWgpTKbU+VHRSlDcpSh+I9c1X26XAkZ0AFeXMDkAe4fox3jt+0WYgCMSzjie0niSOdTU4jTk2NZRizbr7jTt5WKVq7LrCtMNpIBSkyoqR9Q0UoOmp+U6DoK19JK2piQ/wBuDC2CINIA0/ZhtQc0cdaVEkhEmE60uOuOoARlMrTsW0rXutCk/iSdQfh0VsdxWQfL3VM+Fc6+0HLGmbbQ3iXIjmMqew8cCvKeM25Tv6xhBDUhGrr2Pvu7I7o8VprJCjqlR/8AVLPc+B6zvNoGc1mRX9nt9h5ezG623CW1/Ivxqh4a6Zj+sOftGBdNiPy2lhcdcWZE3IdjymlMvMOo13srYUAvdoO2vS86EVBBDjiOfvwfgdQQVNYzwI4Ed2NCcy5Px9Z1KpdIpUhsqGriqt5SRLbToAE/SPAOgeSd3x61gNprzHLGUqRSPpYH29/05YbcNCX4UtlQ3uND66NqSVasApkIHx3RyVfaUjrHzQPiqMR226UZxlSD7j9v68YGnAlJBPcnw10/Fp8PLU9amu4F+JvsP6sfRtV8cwn2r+vClGcHZA769gNdNAP6ifLTrU25WaCrPQew/qxmNn3E8I/9pf14c1JV2VvNYr6qDJnzZLiUNRobDkiQ8o9vymW0qKQde61aJ60yb1tsS6pJNK94b9WM/wCB7nzjH+sn/fYnLx37ZZAgw7zkuxTjldEZcdeq2n2UWBaCw9rPmuH6ata2qIJJU6fIdC7rq7a4E1RF5TyoKCvtah+oHGEe0Xcknl0Fe41+6uHff+4nBOPoTmL8Q0sCY41+S7bhpSKltxPylxUhQ+sunQofEN6+Y6TNz3jcd3OmQeVajMIOfee09hOXYMOW2dORW/5lxXX9v9H34HcLN7vL5sexyi0kWkgshSWnDtitFx1e1EWGg+iygAADQFX29R441AUmuS1NePHPBfQsJZIhRdVMu4DBWhXEFpH5KUJcbaW66oHskNIUtTaT4n8OhP8ADrM6XGvgBmPd9MseAcYaf1QlPl07fEkad9TqNR4/HoFKupi3LBdCAKY3GpaWVgk6aEeHlr9nn1DddLVGJKuSufHD9qrrYlIJCtB9mnW1H5c8RnTVgiY5krce4qXlOJQlE5hDncD8t1YbWO57AoX1tBLEUGIlxD+S+rmpw4eQshra3IStyU2n14UdXZQ7qaU60og6+ICR4dSXsp3csFOBdleQi2ALCgY4aozmkeSh4S2txPpyAFo7O6dl+I+V0d/vB6kNtdyR5oXM5HLn/T99cZLuNqreSWFRmPZ2e77sOvG89gV8xuTGloUNU6hKx4AjTTv49upFg1zts4kZTo54xufl72LSGFcJ/NfMsq3pMhrYjrZZrqenlSCon1CmxaySK2log9gHmUqUfgn7etvUPUj3e2XsKcIo7Ynt/NkkUfaueNGzbPFb7zt/mA1mkuAtOH5caMa+45f0YiDi0lKUIe1KgtAUVDXuSNddTqe6u3VW29mAS4zqfvxaN0+o6BlTBfrrxCGSnd4aAd/h206Ko4QEY0pakgE8cOyPeoj1wBWPVnr9QgHTSLHUUp/g7JCj9zfRCKby7en4pD/sjh9Zr9WIs1p5kxNPCgp/aP6h9+NetzO3x60j2dE8oTgtLbTYKl+qpZ0S2AD3JUeplhuF3Y3IntP8XhTt/pwD3Db7e7hMNzlGM69mMkrkm3m5ZCup8wSH2JjSrF1pX5UoFRaksIVrqYcdpagnyWvVf+XSRuG53F/P5kxBccSOBI5D90cB2mp7MQbfbILe0McYIDDwg8uz3nn2DLtwQuUYbY/S7xnwd/5CQodkrG314biiPFSm96R9gHXygqG7cD9vlYq0R5Go/TgdRltrATv7n4+YGnbXonCgp242yuQaDC1HDSUglSdd32eAP29+rljQRRrGvwqoA9wpiqZHMkjSNxYk/Xnjy6trdqFpA1OoB8fDxPxPWeMMN60nLLkevgsOTLCxfZiQorCd7r8p9xLMdhpI7qccdWAPv6l2luZ5ABiJczeUvfhLmjjPB5b6Jk2uyjlWsmpS4u0q59phOJTo5ClNxIcP0xkNzXyE7VLeUIzbqeyFFJ1cLZPJTTWkVO+p+zIfb7MLc7RM9fiuAeYqo/Wfs9uPjvKT2WhFdzRldfyZjBluSUNScVkU99Quvp2OTMYt6mLDMJ0JA1bdQ6y6EhKgB362ItsgItQI3Ipzofb9K4waSZ8rxvMjrXhQj+qRT9WJE8QX9dTxIUOhun8kwSdKkw8WuJzBi29bKjo+ok4lfxtNrVhGjKD0ZaPkkMbikAJ06C7haFl1nJxx+nZ9DgnZXCoRoNYjlXv7D34kxPtEQ4H1ClDaGyoA9k6nXz8v/L0BiiLyaBxrgxLKEj14BuRTMefcDuT+rIlSo/1dVQsJcQ24h7T6WVdSGil5iK+AVIab/McRoSUpI6crG3kt1BjIGebHl2gDt9uQ78LF5JBI3/MVLEVCj7Cx7O4Z4b68oyl2IqtnW9ZMxYsNxTiLlC0iiahNFKmI0ZtlKJrBjlIU2sPeolYB1JHU8JYB/NQEXNa66nVXmSTka88qYhmW6K+W7K1tSmjT4acgKZ5cs8OrHbCmhlx/FHZBiwQ25a08zcZVYHloaMiLIV3n1iH1BveoBxBKd4+bXoTudvJLEWcjVyI5+7kedOHGnDBHb5YUf8gnyxxB4j2HmPt7cGuHaIeifUqWltpKCtxalBCG0aEqW4pRCUpA18ekl3ZJKHtw2oAyawRpp7MC/IuZ4EVbkShWzKVHJEi1fIbjM7NQTF36JfWnTso9vhr1sWcJTWfdgdLdNKxhtBVv2uQ9mAnacoMWbq/qpRsy6XGCucsIkBuTIaenLj+p+VvdZj+mhSgAhJ0HRqz3C3f8sUQfZ3frwLuNvmgHmuxkl7TxFeNOXdgS5fYUFshtNTLcrLp5xoKalFRjh99Tzr76AslCYVNGirc3NkhW5CBqrXphjs4Z1LPQpQ8Ps95Jp9ZwszbrLbSiMavMqKg8O/3KB9wxTf7ueF8u/Ssj4uo0V1JQ8wSGcotoj++xxvHLmPIl0zuSFmKHJNJGsn7QGahAKS4lCkDVQ1Mbbt9wlnNaWUg/hlw4V61ASQqVWQDuDHVQZjPlhc3y36XveorHrbdLEzdd7LY3AspI9ImntmIkksmJ+MFk1W6ufA7kA6WpisfgtnlXiXE6Dhx+qpbLEbHl+3osG5FMOXLucZrZc6OzmVDT0cuOqRX12QyoCpf1C0JXFYGg039mHYNr3/YdhbYZ1jm25pZFhlBIdFanmKqn8DkV1cAuXPFZ9WbJ0L1j6kL6jJPd2O/RbXay323alEFxPHq+TmuJEbxS2yuU8oZSSAMa6c7S8h9v1t+nvXC/oFMzFKtHLOKVy0vVai0y1JisNbnFxUFxIGugGvfw6Xd526Ao8zMFckkilST2D+nFodNbnLHPDZuGMAUIG5A8ant76YsI4Ixhugw+ogIyCvsnYZjOyGIziJbCJEZBLJfVGKo5UhK+6QrUHsfDpSsrxbaRXjQmSIkjV+kDsxUnqT0VBd7jd2r3Wmyu2VnROPhNR3V5HnTBrssLLVTa2UGUP1GTNb+qMhOxtyTJQp5qGlQKtjcVPzqB11dUNfwjoTuEnm3Wo9g/X+nF6ek+1x7T0l8tGxatzIxypQkLl7gB764Ln7dcOTBzvnuPKbcacRW8dEhafEql5uSpKx8riVag6j49SbwU26L+ufuw8WZrey/1R95w7/d/zZj+H8tcEWeMpk5flXFOcvTMzoq9p0U1HjvIFDPwqFIyjJlo/RMbkLvbeCplMp1C1Nbl6bdNxrYrCdrCdJwY7a5UAHmdJrUDmB9uBu8XsKXkLQ+O4gckjkNSlRqbgpNcq+3Az9zGT+4yy5d4YpER8fjXixfZTxtjmJ08nI5bGVCHHpo6spsrWYzRssRK64lSBMLCWoTUd1wh1Sm0ibt8G1QW0z25drb4Xd8q04gAcjXhxPDES9m3OS4iSYItzmyKnioTkKk0GWedKDBQtPadb306De80e4j63kG+hRIq2n6fFREhzGlNvphYi3aMCXXIiytqkuQkxXnlpC1gFWgiQ72kSlLC0Y2yHiK/WaA/aTiTLs7zEPe3P/MMOwfUtf0UOGRyr/134YyPj+JkWbrzDH4GS0aMAzO0omHWFPy3m6yZRZPOr3o82DKREkF9K1qeTPaaVotDiSnojts2230E3kx6HkUiVAc6ciOXHgaZH7YG4RbjZzRebJrRHBjcjnwIamfA9viHOuChyryLmcPOOI63lOnrqirwzKRyHf5Nh7s27h/ocSotKqPImV6YxsK2CLGzaDjigpJCSofhI6w23b7X5K6l2l3YzxGIK9Fz4kV4E0GWMtwvbgXltFuiKqwyeaWSrCgBUEilVFWzJy78Sq5Xj41yPwfnsAvxL3E81wK7rX3okhD8Ozo7+qehyfSkMKUlbMqFKUApJ8FajpRsIHi3WO3nUq4ehByPfhnvJkfbpJ4SGQxkgjMHKoOKCvcn7KZ+C8IYlTxHJWQ4HRZtkdxU2jrSpL+K0WQVrRdr56kJV/yyZ7Y2uJAT2+YA9R79AlyyjhXGdi+u3Vjxpiq5v25MTpMiTGg/8ozNDNYoJPpuFGu5fopP57jmoUSfAddEWVyEtYanwiJaDvoMzjgXc7dpNyuSKtI1xIGc9gY+FR93dgmVftRYkVj0aUylIkS47j9i8hanIzJ1D7bLaflDY1J+JOg8Opq7mygsMzTId/ecRztqSOgA0kEZ9g5jLliPVz7TMqyHNqvBsGYkOzcivf02vekJV6UKEXdsi4sNOyIsCEC+4fAdh0WG9RW1mbm4NFVakdp5Ad5OF+8tPKmdo6+StSSRWijiTi+3hHjTj/ibiWwwXEm2K3A8JVR4u9cPJDcnI7+xltqyzMrl0fO9Y3UtKwkn/gxkIQNADqjXVxeO3nvV7+VXeg5AL4EXuX7TnjnTcd52jrXd903S/kS16OtLuzsBIxoKPMBPNIa/G1GXL4VFBzwT87ziqoBNiQZDcyYiGY9XHZbeTW+gtooissyggJS81FIcPb7SdT0qbRtlxdFZXBRdVWJI114k07K5YuD1R9VOnejUubCwmW4vUtzHbIodbYRstIkWWlA4jIegGdOJLYqL539+PCuCRWMZjKmcp810M1FDc02CFEquYqfp3FQX89yZav0SktqqWpLJSHFyHmFAFOqB0wXnWOxdHvI+5XKrYvmI1ozrKTmqgZ6XGdOTcOOFr0E/kf8A5hv51bGwPQ3T08e+2siwT7rdK0Frc7cqMYppnkAU3Fs9ItYNZISAw8AOIH3HvllchLyKqyL+1ajEauK+zd11LYuFhyYElw1dndyWiw+y0gaPpBb1PYK6eujeprrqOF9xeKPb9hUeGS4cB5m5AIT8FOP1Y6n6g/kX9Hv5X98tNmvNwu/Ub1fjZXmtdqglNlYsDR0muFpRlPetQDpbAx9vvIvP/N/JE+m4W4Jub/jx+DY0673C6huDW0z77ekK8dym4CK6Ulp0AOht9xRbJ0PWN/dS3F0bixmjSOFwNZZEiociI6AA0FaFR3YePW7dpOo/SRPTfcz0n0nZx3kN5HDGmm+mlhqVjlnQvdlXqQ4kkKk8QBiT879p7n66kjL8syfiehW+kvyI067tLOfBDrZQ3DmGliymJU0uFPzNkp8QTr0GfcI/N8iE+bOWzYVox5kas/ecc17V19tHRO1tfbskrWUSeIxlWjavDKurNqAAjMnFlHtX9t9N7SOLpuNxLP8AWMizfIBlWX2jUZ2HAXLhVz9bXwKiM6pTqYEKPLUS4vRbjijqBoOlbfpBc3AiqSsa048ycLEPXE/WG6x9QG1+UsSzLBGalhEBxdvxMzEGinSvwjOuI3+4H27Z5kHIlVzXwNmuR8a8k0kSah+zoUOSoluwtLbsqHbVLzhrZ9ZKQ0FSEuNq1CAfHuG7YruGfbBt1yE84EaGagH9RjxBH4Dy4HLBXctVoJL9YRcwOoM1vqoxIFEnhz+LgsqDNlAZfEMBv3V/uMYj7bM5quKMip1cyLODVTOa5DXxncGlyMtnRE/qb2M11klVXcY3JDmrakPeq0tCgoaEdS221ppo7a/Y214fECRVVA+HWeWrtFQOGCHob6m32+224raWLy7LBdGBJdSr5hUMJ9URPmQCJ6JGJQpmFXGVMVI8qe4bjTNa6suafFsmSqxkWDDkSW7WMPVioQYcZQ+8l5SJCJLLx9NxncNWlBWh01I7nDcwp5Jj1SkDT2EnLI/TLHRmw3tqAZZ5QkaVLDiwAFRl2ngO/EfKjHs35TsjaYozkTEWLNVXUMPGJE2AqysdwbeXHtIimFtV9XHUTMlOLAKiQBppqmXC2ke4x2Ebq25KNcrfhhX2c2Y5KPxZ8hXAfcOoNy3W/NnaqzM6kxwjMRRD/eTHkzcQSeynPHR9+2V7ZZuNzWs75DkxbnPQ61Dx0t7H/wBNen7os2e4+EpTJnswHFNNvbUqUpa1/wCUkL1dvT3MItYxSFEJbvp3d/PsGXbh69Puk4dqkk3GbS19cSCrAUGo8dPYozOWTMS3IY/c3VAc9yXL8jaNgzWQ2x28o0KBH+HgA3p/Dq7ei/B0jt9ePy4J95Jxxl6tEzepe8acx8390aDBPx55caoZKSEqZXonTufTX28/Ia9T7khpPbhY262KKDTnjO5WOSX1PKO9DrLqQT31XtKgf93WtJKCnfgxJDpXUeBxqyG2oUcqO0EAHy7a9/56dEoAWwGumVQc8M61z5mOw6h1BU6gBpsJ771agJCUjupRPbQePReGzNaj4ePswFe6eY+WASeApzwsU3t/56y8uz67B5MVc2fUUrLFu+1WSG7fJTH/AEOofYf1XEtpzMpEhUV0IkxIChNkoZhESCMuur+m7L8uW4DBVZ6qCw0x11MCOKggqGFVZ/y0LSeHD5tnpB1zuq+eloY2d0TTIwRg0tPLQqc1dgQxRqOkf50ipFR8K/I3s19xvGt1cU93RY/afoNLX5BOsaC/RLrBRz1usC4TKnRa1TVVAsGXGJ8l5DTFYoJdmLYjvMPOwtr9Qul92gSe3klTzHKBXQhtYz00BbxEEFFBJk4IGZWVSvUHoJ11slxLbzxW0phiWQvHJVNDZa6uqEIrArIzALFk0pSN0doxWsW3x23n47k1RYUN/WvehYVNrFdhTorhCVo9Rl5KFFt1tYW2saocQoKSSkgl0tbq3u4FubWRZLZhUMpBB+r7ew5HFRbnst/tF7Jt+5wyQXsZoyOpVh2ZHkeIPAgggkEY+xmC84Aga6keHjp59vt163s9MxiD5IrSmWJHcbYa7Ofj6MFxx1baUp013KWQAB46d1dBNw3IRIaNRQDg5tu1meQKBViQKYn9i2IRoEaNEQgFuIDvWnTR2WoD6h3X+oBSQhP/AGU/b1WV/uTysXqat93L9Zxbu17PHCix08KffzP6B3YJ0eqaSjcEkAAd/j/5AOgL3Dk6ScNMVmtMuGPyoaAdQSD5dh3HX0OxFMYNAqtXPBAoFkVNadT3hxyk+RHppP8Aj0AIBFMPshIlY/vHDjDoWkgnTsfDXTt1rIoa4+aiVphu2sVD7K0KTqFBQUD3Hfx/xHUqCQq9RxxCuoY5EIPA4iFy3gzUqPJLbQO8KPZP2EafEnv0+bHuJBAJzGKp6i2rQxeMe3AR4l9zvL3tyxvNuJMLt6ShrORrWG3T5flAlu1/FlxaSI9ZcZUzGYizC/GNdo6ttTam2X2UvbFguocI9Q9F7J1Tdwb5fpJJNaIdUcdA1wqgssZJIodWQNakErUZEG/T/wBVt/6N2+46Us5IYre9lURzzFtFm7kLJLQBtS6cyCKKyh6EFg19HtD9uvGPAXGzf9i3DWd3+duN5ZnfLUmW1a3PJOQWKVynr2XbIfll2vW5JcVEZS64202sq3OOuOuucxdb9T7v1HupG4xm2trf8uK2AKrAi5BAtB4shqNASRSgAAHcvQXSWy9LbMBtcgurm6pLNdlg73MjZmQuCaqakoASADxJLM0sOkvDxj94dex7EYuevd3wn7eYLozLJ2bDKVslytwTHFNW2WWSykFofp7TqUVsZ3XtImLjx/Lfu0BcemeheoeqZB8hCVs6+KZ6rGvb4qeI9ygnuphD6z9Sekuhbcyb1cr87pqkEdHmfsogPhB/acqn72KM+aPcvyv7rr5CMkScR44gzBIoeP6+UtcQrbV+RYZHM2MrvbQDunchEeOf+E2FbnF9LdNdFbL0XbE2v5+6stHmYUPesY/AvbmS3M0oBw91/wCrXUPqHc/LyVtOn1fwW6niRwaVsvMbmBQIv4VrVjhxjBar6utblbExVvsJdCEjTTUeJ010KupF7eyqrFPjAywoWNhHM6eYfCTni46hr+MqnjSIpUuH67FagB9S2zIbUloafPrvVtPkeqEuJt6n3pl0toL8OXHsx1zZWfR9p0ertJH5qw/FUaq04141ry92I+RuQYUN2REfU29CcdWGJKNFtqRqdq0OdxoPh4jpzba2lAdKiQDhinx1EkGqG4o0J4MOFO39eGTlecV8Zl/1VBxpaVFhwLRooHXQdvBQ8x0YsNumdgFHi54Td83y1RHDmqUyNeOI4TMzbcntTWVJb+nkNvo2n59WXUr+U+GpA0+HTku1CW2e1mzjkQo3ZRlIP31xVX8ba1v4tytcp4JkkT+tGwdfrIp7MQm5/rF5gcox2T803R19kpToiXElNB6HYxu3zIfYcQogfhUSOk3+A69qa1ZSVQMp/dKnSQezPgeYocdZwdfW24bpb7lbkLcPHHKBykjcBkkTtqDRl4qwI5YoByzj7IsO5LfjPtyEMKkladCtKXElfbQ9vHXqqodul2y/aFq6NWXsP0/Ri8tx3G33/aUmShkKfUcWU8GRimrb9bQqBZcQlR8S2WydT5/D+PTZCxyP0zxT9/tUZqaZ0r7wa4uW4Yt48f6RphKNVNxnGUo1IT8qUqQgD4K1H29ZToZoSh+IZfqwS2ZFtLlWjFIZBqA7K8R7jgo+4X3cq4ep8d434po2+Sfc7ysqVjfFfHsYKk1FDZoYQqdyBybOYKmqDDsQjPCW6w6pEmc4lDDY+ckJlxaa5vIfnmeVE5knlT7+/FuWjx2luNwclYVpkASWfgqoB8RY0r+yKk5YMvCmCXnD/HFJSZVf2+YZpaF3IM5ze8Up2fl2X26g/c2ilEqDET6hRbjMJ0bYjpQhACQB1G3a/m3a+N3IRpVEiRVAVUjjUKihRQCgArxJOZJJrhp20yiMfMGtww1E1rx5DuHAYNFcG5U2G2sLWuwkswi2he1wMznExH0A/Fxp4ga+B06iC4kiibSuogEhf2iBkPfwwcVhIPLIqn0+lcU9eyH9zXF+R8+uPbdz1ZtVub1+ZZzQ8UcgONorKbmTFcXu5MRizx1x9YYbyKHXllcquKyHUkOMEpOnTt1TtG17VvEtr07cm96f8qGSOYVJTzolkKOaCvlsWjbgaqaCmAkV7CXMExyEjKDShyNOHs4jnxGLOuXeQaHieoXPsb2m3TPSbppDzwW3JVJaU+y4YW9Dy1tsJUtaFFKUBJKyB0rjWZPKRSZRyHAd9ezswo9ddY2PR23GeSe3S7c6Y2fxKGPAhAayEDxacgKeIgYqvne7ylzLNLuupnU5XbSUJhybi1VHkSrF8IVDjJjxWkJajQoCSVNBQUWkpASrQ9RtxuJLYLH5h8xTkoJ0r2/TLFOdDWX+et4n3W6tzdxaSpursB5HryiT/DgSuaqgrwqxxMTjG7tmYkB958oktek4hWgUErRtIJC9UnQ+R7dR7fcy1Vm4EYvOHYbPbkWK1QeEUGXP2YlbjWWz23HnFuB9bp9aQyEAJeH/AIhCGwA0dT2IACT1jdb9AqeVbprYcych+k/WPbgpbbI7SefMQisM1pmf+9+/Blr5TdlHU/GCk+kltS09jvaV87SiO4SUdxp8Og53m/8AwsFHco/TXBZdrtFXSwZh3k/ophEy1GQ18qhynHiXFVk8VmRRFLCG01MtBQzcr17ba1CiHD/lSnrMb5uQjMLOGjY1IKr99Kj3HAqfpDYbi8G4NE63gFNQkkHKma6ipIHMqeXZgwYrWwb+NbxZkZh+XKbZf/Vnlb3Jq0temtbSHSQw2lXdsDy7+fUhN9unKK9FjTIBchTvHE9+eA9r6f7Pt8tzdQapbq5fU8k1Hk1UoNLZAAD4QFFO3CChV6465T11uKTP6aDJq8fvpCG1QsqoVg7aawWsFtVjXuEGO4r5kOAKG5K3kqYP+UKC6dBLtMjBpEBzik/bA46W/EOYyyIUiNbzXzO+2CQw7/BGVilceG4i5Bu1lPPiG8QqGdcNrE1QsejWdVkkd5m13OP27s8KElEp3cHJAdcG58vFeoVqQvXUE9EL5Xu2S5syDDSiBeBHZQcKfZhbsA+3vLZ7sreYxLuzcQx4sCfirXvrj6t5iZ7Z/cWqG6FMGmz1plehSEJTiqEJCh5FOmp06jPqXq3a/MB1aoq/+MOGa0EZ6O3FYj+X5c4HKg8umfsw5ue5ceHO4oekISvSHZBsLP5YX9NVncpP9egHYdQOn4nmjvUQkeNa048WwV6ilSJ7R3FfipXhwXjhjS6qZe2tTMxVyUnKGnW5UGVCASuKG9A5KlvKHoR4CUkpWHNULSduitdCWhaKzgdL4D5SlGB4GvIDiW9nDjgKWub24Rtvr80DUMOXaWPAL9/Chw7ps6XKkyqCrkpv7W3ktSM6yhKCWJ8iOA0xQVSUdkVNcBt2g7Cdddy1uqMWGFUVbqZTHBGKQx81B4u37zce32AAYIXdxTVaRuHnkIM0nbT8Cj9kcKe2tSWOClGZsaioUhC2460MLSgOBpDe4I1UEI2HaUNgkfHoWTDPOCwqteVftz9mPSyy29sVg/xKZVy5ch7K4acq2rUnRUt24edbS+y2pYU2FBG4IToUtNFBHh5gEEdTwsg4ARAGh7fb2nApLeS4ILFpNQqK8v1U+0YGGVZk/IZCYam47pQoNMMAKU46pGx9tbpSEJRJSPDTQEDrVdOtpGRmzV5/Zl3YJw2Opg8mQHEDvFG9x7KYi3yDcznqyycdkKS05As4shClF55TDkB9xrctRO1QRuQdPhp0EG53OsBDnUEUyzwYfbbXyiCoIKnvypjmt4L5wt8Dt7CuiNIkxGsns5cJ+QspmU82LbzmmDWq1CGoj0ZO11odiSFfi6tG+3d/KR3LMxQBhXjUDNu3uxQWw7bHPfXCLoikWY6aLloDGgHZ217cdDPGnKUXlfCqTJ2pIlyjDZiWyHVlTkWyZbAUFHXeEvJG5Ovj36qrciI5mDfCcx9O7HRe0Mr2wdfi4H2jLD2bUSdwUSnuDr+HaexSQeykn4Hsel6UAsSMMCvQaSPFgV5jxTU3/q2FF6NNcj5+yS3VTl9yfXjt6CK8o/8AiIGmviOvLIUyPD7frxsUBs6ZYBJj22P2ZrLmK/DmsklIeGrbiddEuRXB+W8yfEKST00bVuWtPIkOeI9xbqwLAVGFqxxeqzRIbkqbg3GwIh26An1fUSk+mxMHZMiOs9u/zJ8j5dTry0iuk1EUcc/19owNRZ7Ni8GcXNDw93YfsxHXKsdtMMs0sWTQacacKXmSkmPMjOAoeLS9PzWJDBI+IPYjXpZmge3bQ+R+wju7sEYbr5gArX9I9owPjRWjNsiPT1lpZNpW09BMOBLkmVAkD1GNdjSgpRaV6a/+2lQ6FXDJC3jZQO8gfpwbtZNY8XsPtwU8c9tHL+TSj9JiUyqrVLSpqfeKbq2fpl7VslIkKC1qS2oAgDUKB6XrvcLOIkB9bA5Bc8GIpAi+M4k3jHs9radtNhyLl0ZqOz870Km2x2VJT3KZNvP9NGp8Py0q06X7veQKiJaHtbM+4Dh78bFmZhpiBb7v1n2YcdrzfwXwvDk1PHNXBur5pPoLFMA+VOj5SbbJn0qKvm/E2yf/AEegTxX122uSuntbh7h+nE2O2lnznakfZ/R+vEVcw5gzrlLRV7YmPWJmuhigrVuR6poFtKmxIQCFz3UhB1U7qNfBI6mx2UcKClWkqak8vYOX34JwJFbt+WABQe368I0eG6EJVt0JA7BPgB4dvDuB5dYSR1FcSll7MP8Ar7JdS8wg6hz6SOHdup9AEFWg1/8AFUFd/wDKD8etDMVcV/wtI9/9GMIlEqMRx1t9Pbgh1t6XI8kNbjubaZ3E9h67iEq089S2lXXx5CynRkafecffL0OK+36sOeFI0Qnt4j4ny76/HqMy5Z4lBtRrXPGz9TvWTroEka9+xGnb+B06HuhJyzxuVwBQ4xvZKIbzMSKh+bPlPNx4cGG05JlypLighpiNHZC3XnnFkBKUpKiToB1NstquL2VY41JY8hxOIF7uUFnGWcgADPEo8L9s+e21Y3mHK+VV3EGJtJalqbsJEY35jBbakKlGS81W0u9JGnqrecSr5VtJPbqyNs6NiiGq+YlqVKrQkf1nPhX/AGu+mEDcOqZ56pbCinKp/QOJ+z34IeQWntLxxqHZTafLOWJKV+g1Zs/VSYjnq6uI2F2XTUj7LqmyUrQlSTr49+mBzsVgASIQ1OzzCae3wfVTC1B87OxjiMpPE56R+vCMjmz24jVpPtoW3AUAlT/6Ni4mFA7Aq2zNSpPj/wAU9/Pz6wXetkPhGkA/+Bip9+JLbbuOnWQTT99q/dj8i39m+XurhvwMu4ps1uIS1OUbCNEQpzUoJcjv3VAy0oDXc8lKdoOh7Hre8Wy3lVYQk+wxnPvHg9lcaEmvoBVTIo/1h7CDn7cCbmf2pcqs1V3m/DWV1XLuHT8aLLlPXKYTk8gVsa/XANe6w+5WWwTJsEKWG1MurUjahpRPSX1B0MZ4byTbG8dxBCojYCrGF3ddL1CmpevAVpQVJGGnZeqBDd2TXwJFvNIfMBNFEqKjVShJyXTxy1V5YhhgeXlfqVdoxKrbSAsw7CtsGHYU2FLjqKH4syJJQ2/GksuApWhaQpJGhGvVV/Iz2TGCdSrjIgggg9hBzxb8N9b3qiWFgyHMEGo+vB1gFyWtlmOdypDjaEq1+VJXokqV8EtjUqPwB6wFq0pCR8zT6ffg1FOscJduAH0GFGXaFT61Nk/TNhLEUd9BGjpDbBPwU4kblf8AaUevssLlyU/wxQD2Dh9Yz9+PqKPJ8X+JxPtOZ+r9GMzliuDG07pnzGtQPBUGC8nsv4plz21fL5oZOvisab0jeBKn/GYf6qn9LV9wz54XbtVlckf4Kt/rMOXsU+4t7MasZaCNDpoe3by7afd2HWcUefdgdIxriUF3I/V+K4k9WinGoNTMPxDkd1EV4nx8if59Eolqo1cjhcWke4MvAVP254CTE0jT4+R8P4gePRKMdnDEiUqThRFiew76ePj59+3wIGnVyYqfH4z1KOgPj2P8VDx/h/q69j2NjEZ8SDyZhsmxkiFDNi9F+vWEKRAmWNfMr6ycQ4pDf/JWUll3UkAbNdR0Z2sipH4iD7+734GXoOtWPw1+n1YY7k3Iay0sqy1veXf1CDMkRpf1eMoKi+y6pDqg6nK3kvBahuCwohYIIJB16aHqygqz6e4VH3jABCVYq9dXf/pxnduLBtBJveUTp8xCMcBOn+UD+5tD/PrSocmmqT/V/pxsYgDPB3wuV9bg0xyTZZPusMio/wBDbyCGK6R+o0qn58+wrml2U93RiMpLDy0hKfzkpJPgPl4VCZ8QuZPHiKD9Pux9tQxBzahYUB7uJGfuxJy5kpssMjTEIAKo5DqB+NDzYCHWjp33BwHT7NOlqyZRd1PCuDV2rfLUHGmBDkr77eUSXlTr2RDmojzK536IfQmC/HQphqMv64atRBqzpsTopsjQdOEYJgAXwkVrTPPt9/H34XZiVuCWLlTmMsqdgz5cPdjaivxlNSHH5UwKbQVNhSXGv6D+BCVuB5WvlqO+nWuj14yV9n9OMiw450wiVtp9H/cNzKsrqHT1dDZuWJlxQ1BfEiOqLDglwzleo/JmPN+kgIUVrSAAPEfbkqkH5uft+088fIHpI0hLCNQa9ncOPbgd3nJ1hd1v07rjlbSIALNYlwolzUgah6W6PxpOn4Tqkfb1WV9KBIdGQ7fpwwYWea6VUNVi/Z5n2n6DEc8mzN19am2nQiKzqURknbt01GuhI9Q/b4/d0JaYlqH68MVpapDGCfi+7AfyHkX9PirckyVNsq7ts6gyHwk+DWpBbb7fjOn2dSoZ2DUXP7sabpAQdXD7cRI5S9z7lXVy4kWeuM0pJSqA4svIkbSTtMrUyEqXp9xPl027ZuDwOHU+IH3fVwwj7rYrMCrDwniO338T7MRdq/d3YUFyixlK2/XpholQrqQ7YtyocMviJFamOLdfr4jTjiFtpQflU2hW38tPVrbF1BYXcfy98KKcgyjKp41HA+/tPbhC33pPcLdlltdS3AGvS1QwH4T2jtHsGWWJBDkrizljJuNbzKoUPABAt02S5lUlpqF9derfdtpq5EdtaA7ZqcS3HMgB5biVA/jAFsbdZudtlggf5ils4hrxANMu3wjjTKhHZjnnfEvrXqb+LQxaPmL6Bb6pyJioAyVyCnNqZkvUDFimPcbWmY45IxitltvSmY6K+nt4am2mnoslDN1Io5W4ltC43qNp9UatF1Yb7adVjuFrbtCY5iVQ1JB45HTqHtocuNM8XjbX8y3iT26qzA5FSKZjVoI5Fa0rwqaYVOHYcvHJM3Hb2OA/CUQwplhcd0vl5SIyHoziR6zfqpKlbNflR9vSBuGx3O3SEr4oya14intxq6kvtu3MR7gMjShpkQ9aCq8SK5nuFRg7yrMogs1rbyXt8l+e5qRq5qgR2nO/Ze/YVDT49K1+CLjMUNBiz/TZgemsiGAuJBUcD8Ofv44PXsuQ8m65mXECRYP0eMPx/UBA3FzKGoW/T5thWyVdj4K6I3Oj5K38z4PNz9lBhnta/Nz6fi0Cn24w8MDieR7OOQ4/Jiml2fp51G9wIsFIGVjkA2E5F1+pKf8A+bTbqllo1pV39Mxy1229F90+ePUEQt/8CieVT4PLoOHKlK1wN235MbHIbj/F8fnfteZU1rzrXh7qYHns5teUo3uEdxbmCRfy8hTxAbiA9kzMbemqal4vX0X6LIZQjel+pWDYBWrn6mh8nrbvjINpZLfT5PnVOntzrw78a9oEh3MPc6vP8imfZ4aU+nGuIxe4/wDWE828lf8AUlM5Nii2sHMcXOLhZXjCnlmhXUKV+V9OiF6aClrwdSvX5yonZD5ot4Pkv+z6Bw7fxV761riNceX8zN87/jajSvZXw07qU/04mzk6slX7AKtXJKbE3im8a+SxDhujVHO4AoEuB/8A5r9RND6Gu/8AN3fi+bXqHa+X/mQ+RTToOqnDVpz+37cTrrzP8vf8xXVUUrxprFO/h78Pn2tLSi1z2Pyi7bL5ATjeLupGYFj1Txga5aKNTe4+mpaHg+myV5y9Svuevu/mXyYRYU+W8xq6P+lr3fZ78fdlp50xva/NaF+P/oqZfbXV343uNliFwhz67jQ2YHGyPOn+MynVUH9ERCZffVUD/h/ogyH6r0Aj5NAdO2nW/d6jeLHzqfxHyE83t1UNNX72mlcaNs0nar3ya/IedJ5XZo56f3deqlMuzBu45dqMz4+i1N3AjPt2NftsqmQlt5qQw4j0lOtocBBZcHh21Hn0p7j/ANqb24YtvFLZfZitrnn2fHjvJZOX8fxVuYpMUp2RRRmtzlYw4QqWiv3BZLCgTvRoVoB1T26sbp3qeHc7UWl1RbqIAV5GnA/T2Y54659NpdrvZd72KrWUrM8kQ4rqzYp3dq8RxGBVRU9a/BkVyIaYzy3HNrayC2iMgDYPVWAtTiToNNNT0yOzrSQmqkce/wBmKuWNHrCF0y1J45ADvPPtxu1mGUfG70vNLbZHt5zioFehLrLTi65aVfURmUqPrJdlLILm38LQ0V49ZNMb38kgGFRU1qaHt7MuXfilPULqGz28w28EzktNmI2A84AEeX+1pBNWIyoM8HFnIOP6Thargz6THXUZNfOThUzG5Li7WPGBZMhx5DqVKcaLhUpRUnUnt0uX13Km5GVZZAqx6dQIFCc8hTu7MWJ6Z+n3RO49Ap07uey7bNaXV15rwSLIxlAGkMzhg2pSxJJYVJyxD73WcOZvz9xOip4RzV/CLOkaktvVa1bXsqrjHV9Jh8PN1vJn4zWl3ukLQ5uOiS+lPboLe7jukdlPHtEsQ3CdTp1+EF+bmmWqnZpBPHF/dC/y2+gm4eoHT136tQ7lN0LtcqgxRsJ7iG2HC3SV/wAxrRWo2h1mmjUUjYjIcrs/jzm6Hn9t7eON+DsqlctxckbXkdc/Tyq/G8XlOK3GwuJ4QV3LspTnrBxhTgdT863Ep10pLa+hOot6uJbjqF5oWWcMTkzyEcAvICvA8Mftp65/z6/y5/yudFWPQno3Dte6THbAttawMYbKGJlyedwPNlkCisi+KWtQ7LXFt3tq/a64bw9+BmPuozGFypnIeZsWuOIFSscW4nZaBa3rLH8fDTGW2ge7qVNfcaTp2Cx10nsezLt1uoSF7i4TnLICAe4vUEj91QMfzk+t389fXnqtez2jb7Z9MbJcO2uPa7JlkcE5FordRpWnO6klmYeIqtcW7ithN1cGmw7KcMepIbKWq3GahlGKtVrLKQEtx8XREhQ4jbSBoVIQEgDUq6ZUvpQ3/M208b9tA6+5lJy7vsxxjvG2DdW+d2vqfat2mlOaySyw3ZJ5G3nUMTX9nw9+HJh2HWdlDm3Cv0+TEittuKkyJAFYCk6rSp5pQS+0CAE7Tq4vsnXoZuu9w2pESFhK2WQz9o7/ALhxwwenvpdvfUcM27TxwNYW5DMZWKw1U+JXI4qDlkau9FWuGTf5Exk91c4ixGW7Z4ymqnRI7AEdLtPbNuMJmncpRVBTNiqQs+KVqTrpr0EFtNpS8JBWXUDXOhB++hxbttc28Tjb7xCYoCHg0jSoUgK6UqTpquoVzzNcOxmndraSTXNyI7E6NVrmyHdyDq5q24dqx/4hVtCUj+gaHx6atvt410zMDpBqPaB2c/14lhrm/vJg7qstrb+cwBFSgIKqD+2QRReI58cQA9wvF/CHNlW3jPMvFjVtbx35RrbWsrWlSG5D6VJL1XPAbcjtyFEKUyhbe1Xc69Mh3K/hjKwyxyWgGaS0YD2fiX3Gh7MWdsHT/TNvK+7TWkkW+XKgPc2qmKaYD4fOCflTla5MyGQcNVMVAcn+2Hh+qYkwjhUuxjQIAx+NAORTauFCqYMxMkRI0CLCc+ilTHGB9a6HnHX9VJ3gdD9yka/kS4M5jjjFVRR4QSKV458TQn24tvp/bYdrgktBAZJpWq7u3jb25ZdhAyHAYU+M/rGokClqq2txjG60NQYVFjzK48ZuM0eyH5LpVMmLWr5lqWrVajqe/grRwQ20zmBfzJG1MxzZj2sfZkOzgMO1tttuIi8aRwxtmwQU1EcNTHxPTvNByAxft7L69MRjF5r5Sww1cR5Ml1fyobjR9q3lOE6aBLaSTr0vb7CzpMEBL+VQDtJ4DDFtVzFG8GohYhISScgFUVr7hiI/JKm7vkrOsrj6Kh5LlVzc1atNPVrn5riYTwOn/ixmkK8fBXXRmzQNYbPa2D/4kMCK3c1Kt9RJHux+eHVF6u+dVbjvUBrBcXkjoe1K6UPvVQ3vw4ccYXNYUxp3WydBp+FSU669vDUHr7OaNU9uM7SMCPvAw6dyY8Noq0CkEI0I0I0+VXY/9k9fYY9T92MbqakRrxGAzk90ra4y2r8G8HTUdgdCTp0z2duBSuE2+umdtK8cHr2lYLYyf1nl6FRUV5kVPfY7imBzcxaXJwjCMgyq+g43XZvc1zTjbuR28O0n7oUNC2kQo0OZYSHGizDTITeu91jXRsRkkjtXjeSYRGksqRoXaJW4IpUeNqEuzJEoOp9N/wDoj0sSkvVywRTbjFNHFbNMKwW8krrGLh1Gcjh2/LQECNY5ZnK6YQ9lkGh9qVLyfx5hV7zIrl6lxHEc8ynLVW2fvZVHl8iW1tR1lPZWOI4a8xird5Ph2eSOTH01jcySt8rfccKlE03Ld9Y3G0XV9b2PyVxNNDHHphEZEKq7MqySAyaAVhCjWVULRQKDHUkFl0TbbvabdcX53C3t4J5ZC8/mg3Dsiq7RQkRCRg1yXYRB2LEuzVNVvk932c1uX8QX+MZFB4xYVmFnimY2tHfZXxMpOJZNid4Wky5LcnG48iKrOaqiW6h8uR3EN6OtuJ0HUbaP88SWV7b3cbXbeSskSusdx+Yki8BRyD5TSgEUYVyIOJm7r0HBfWF1aSpZL57RysjyWv5Ukb8SDGCPOWEkNVTQagRTEKPdZwnUXWG5PT4zb0/JlRxLRUWV8Tcx0cqml38fBsrnXrLPGuaO47GhY/kGKwL+lmwquTBaY/t1LtY2619LIlyWrD6K6hnt7+Ge7R7Wa9keO4tnDBDLGFJni1kukhRlaRWJ86kpB1qimmvVTomz3bYbi2tWjvF22FJbO8QoZFhlaT/lZvLCxyQh0dImQL8tqgDL5byyLW1x9Sv2647pSValJJ0Pn9gHfTq6b24WFSMcY21q7yDFkPE2FGvhIsnGtrqh6EMEeDhSPWfA0/8AAQrQH/OofDqt963DzH8kHw8T+r34s/p3ajGguWGZyX28z7hl7cSUr4AabQ2kaaAanTw6UJZatqOLCtrfSukYWlbWwEgaDTTv4fbr/PqKKsa88EclFOQwluqCdSfAdvv6lLwwNmOfsw7MWeC6etSsjvDjgeWn5SR0EcUFR2YdyymZweFT9+Hi1F9VQDfcnw+AHh36jeYwzbG5bfzG0rj9Lp3i2pQ0XoNCB4kefw6zjmAOfHHyazfTXiBgR5fQqlR3Nqd3ylJSU+Pbx8Ox16O2FyI3BOWFHeLAzRkgVGK/+W8D+o+o/wCX3H59ClJV37/KQB1aOz3woKHFKb1YNG5BGdcCfi/mL3A8Ay3GOLOR8hxqsU8t1zHJSmLjF3XFr3OqFBcsTq2K9IP43mEMvK/z+HU3e+mOmepk1bvaxSzU/wAQVWQD+upDEDsJI7sTuk/UzrbopvJ2O+litK18pqSRcc6RuGVSeZUKx7cS9rf3OPeA1HRBdY4inOlAR+oS8NvhOWrw3n6PMYkH1FH4MAfZ0gy+jHQ5cyA3yrX4RKlPtjJ+3FuQ/wAz/qAI/KaHa3f9owy19uU4X6lpgech+8T3U51Hcr77l2djla8gmTAwiJCxQaqH4EWte1/cLaUpOm0TNDr31PfoxtXp30ZtjiS3sVmmByaUmT/Zbwf7OFzfvXT1D3iNoZdwNtARmtuqxfU4rKP9fEV4QgNy358h5yfOlOrfkzJTi5Mp95xW5bsiS+pbrrzp7qUpRJPj07MjhREoCRgUAGQAHYBlTFPTbg9zK08zl5nJJZjUknmSakk+2vbh5w8gdbUhMUIbCPM6eHn4dh4dRntwRVueMBeSKfDTLBrwq7csXE/VSFlprTQA6BSx3JGmn/DHf7+l/cLXQDoAqcMm2XpcirHSMPq2yq4UREiWUz6SONA0l1fpukj5gRqdRp26DR2kS+N1XWe7B9rt28Cs3ljlU0r7Me6rMpzDS4/puPx3ezkZZPZWvdTZJ1QseRHj1tNnEzBqgP2/TliBcXtwiMgBaM8R+r+jCPfP2splaUOvSqtw/OgamTDUrwK0Aa7Qf6h0ZsxboRUATfYcIW5m7NfEzQV9492GUaS5ZVvSHlsqOjbp12qB/wDpuiouYeGQbswvm3uNVanTjJbcfy8zq1ekl1GTY7HW7FS0r0nbnH0L9Z6K25roZ1M8srQlX/Ejr07en0p7vfjZ7wbmn/1PuCEm7Ef4VcjmkgAV+xgD+LF1+ndmeqtt/wApO+jf7TXNYtXT5sROuW2DcnjassQ5qzqKacQk5r4wwy3r4E24prlq1rpNiLi/pWmXZdbGTG31ipVG96RmNLm6NvrQsLaaUXAFBB6UOpdlvr2OTdNgFu7iJSkDkhZX1HWFlFfLOnNAQQWFCRXF09F9Yy7Per0/1iJkHmlTMFo8S0AXWhp5g1fEQQQDWhpiDWA82XVFDt/T4lzh5NHIcYQI6BPVYzW1siLDhNV6JEl/6tLm4uBO1tCST3I6Tm3eGzuDZ3kTRShQfHQZngBi7v8ALSbpbi/s5hJExIOkAlVHxM2dAM6AcScS546tPfdzJEXX1VRC9onHlrFcRD5DzRCG80EeQpG2XU1Fgly3leu0VhsMxNySAoqSCD1C3W93e/0QdP6IgzgSySAklKZ+UAMmB7a1GCfStl0PswuX6vS6vjHA3ykVsVCicNkLl2IrGRkQpFDU1OQxan7Lvb3xH7ZqWbDxO8yfkfMsgsX7zOeWeQLF2xynK72clsTJUT6p2YaOudW0kpYQtatqfmUSdBi+3zW8OglmY5szZsx41bsFeCjLtrj0M8u5z/MSMqBMookzjgU8lrQuxGTOaE8hTjY+LFE6A8tSwHENfUwwUpUdzI1+ZKwoaFKSCPA9tPLoFLbspNRwOGuJTbSKY21RNz9vI9mIbe9X3YR/ab7YeS+YGokOw5BlVErBOEKGFEflX2Yc15nCk1GD0eP0sQOS7eyr5b5sVNMtqKW4mqtAoHqKIyzUBpTP6sGTdrbxmR/hX7K88c2ntL9lmL+17CeLPcD+4rfmy5Uwavg5dx37c3r1Zb4+y5M+xtq7M+TraNMQGMjiG3Uhqljr9NCNqZRUpIa6uu769eP08suh9ngtbfbX2a1h3KeWFRK91bXNxcrLbSZMjBbgxPKa+YgK0pQ4586z9WNv22/PT/S8Lbp1LLMVjjiGvxOQq1pU6ixoAO0dmAP7y/3GORPcXlc17DLWdjmD1rcurZu0tFh+Y644j9U/QmXNwebkrZSkSVpCUNpAT46dcv8AUPXgtJm23psihGb0qSDzz4DsrmeQpni6/TP+Vm86puoetvXGrXEJJSyDflxNzVmU0duAYDIcGauWIOcUe6LkbgjMYmaYVZRZr7EtL9jV5MHbWDfNFQL8ae+6tcmOqQkEB1rQtk6hJ006W7G53fzhNLPI7NmQ2a/Vy+vHSG87H0DZ2Y23bdugtIY10q0A0OKcznR/ePeMde3sK923GvvRwaNd8ftSa7MaWRAreQ+OFrbkZDilg+ppK3WUIIM+mloUXIktILTiPEggjppM7yoKjSaZjFNzWcVvdOqOJAporUpl7OR7cXqwKvj/AI8rBFkCFXpmRlR3n55RKuJ5eZ2LSNQp0qO7Ta2lIB0+/rDHsDrE7Ix5i0pCxDUsx3EuAo/5T1CGisHulxI0Uk/Ht5nr2PYN9RXx5CpDDrbU6LNDsGSCpJa9HZudEhPf5ykggeI01Hl17HsaKLKhwueMXsLaLHlLZS5WSJTyWCuA/qI6ULcUN4bT8u8/h8Dp17HsKkuljWiUBzY42vTV5KgruvshbLiTqlSDorUEanv0SsNxlspAVPhrmMCdz2qDcI6MKTD4W5g+36dvEYQcgYsY8OPVZXRM5vVrStll911yFexWkrCUhi0j6POJCU+C/mJ8VadNtq0Mjm42+Q28poaDNDUc1P6PqrhKuLqaNBZb1Ct1CKjUfDIKGmRHH7D3nDVGJ405imSYjAyHkvEcZyuPYRbelfrcdtIzzdnF+imlEpdfKtEqcjjbr6/bTsNdT1JN3dpexXksVpPdwkFWDSKRpNRlULx7sboYtsbbpLSCa7t7CZWDIyowowo1CVZuHPVTCtaUmPZK9Uu39pyDyAqjStNZFkxqKir46lJbQoFyugQZ/puJaSFblOdh2+3XBcXdmHW2S1tfM+Igu7H3MSMq9gxuuTt19pNxJdXgj4KAqL9aqh+s4c7VXb2cc08aPFxCiWEJeragqM+e1tSnbPtHNsmQNE6FKQkEdlajqP5kELC5ctcXPJn+FT+6oyHv92NT3F1OptLZFtrUcQnxMO9v1CuHXEapsLrlLaTFi+g2Vgq0U6pSEkOBCR86luJAOnj1Dlaa/k8ZZgT7vfyyxJtLVLSPUaB/t76c88ATLc/mXE3cgufTp3IZQ6v0WW0jXe4tts/MpSfHXUadvvKwwR2kenKvPKpPvOJLQLO4d6k8uQHuwwlZC6Wg+24pwwVgNo7oT9OpZKVlI01CXu3/AHT1kDT4x8f3/wCj7cSo4QKBBmuGtkli66tt2OSzHlpMqOoHaWnEq/OaA8lMP/4EdLe7THwxjNhx93DBGGEVJelDiO/uJzukwnibL84ecbafao50P9KU4hEmTkzkdYgt1rSiDIasEOFw7dxb0Vr2PUKxtJLy7RE7ansA5n2YG7zudvs23Sy3HJSI+1yeCj94V+rHMfh1ciXZOWMdanHnXnXpinVJDKZRkrU84xtOmxJOo3aFRPVhSorxeVJkvAU492KOtJXivvmrfOpJYcq1zAxYrwFzJf8AFtmh5gOWNPLS2zcU76y0y+yD/wAZgkaNyWgNUKH+rpL3K2WTIfEOHM4uXZLx41DUqrcRy/04trwfkHF+Qqlq3xmxblpKAZMBxSUTq9f9TUmNru0SfBQ+Ujv0sTRvEdLCmHqCZJI9SGp+7D6Qok+Ovb/TTrRxxKX9rGtaUlTkMYQLiG3LYGpaWflkxV9wXIsgD1GV6+Q+U+YPWId4n1oaMMZ5EUPA4EV3x9ZYzumw1uWtKk95SEf83CGp0TPZTr8o8PUTqn46dN+37klygV6LMB7j7P1YiyxEAgZ/qwZeHKOnyxTsm+bp5ciuZcaiJtokWW26yUg+msyG3S2sEApWnuNNDqOvu5bdbbjb+TNUZ1BBIKn9IPMYESySWlz5sNaUzHaOw/oOArnXuWyDCL++xOqxDFIcvG30AFpTqUqqX1lAkpEaNHSUMvKCgAdNilHtoeq7u+m4IXMckjl14ZcQeypOQ7cM1hcNOgkodJPbzH6e7A6s/cpypd14dZt6+mKXFR3hV1jYkqadG5haZEtb5aQghSSduupHUI7TZAZ6moaHPL7OJwYjRAaEf6f1YjLn2VZNfKWvIMnvLlJCiGpdg6qP38mobam4wSk+KlJPUaS1t4z+Sij7T7ya592CcDtSnDAoZeSop2gIA0CUIG1AHbuE+ZJPc9Q3UZ1zOCKk0wXcPYacirLnbZJYcPfx3Nup7eXieo6qaEHtH3Y+u5qOyhwaYseIhkfl7XloBQDp+UCPxqBHdxX9I8h3+HWmSMSCgzj4e3u9nbjFHdczhNmQVKnqDW5WiI6PDXsGkHvqNSdx7nqNdQ/mEDsA+zG61k0xZ8yfvOHzVVLrUJklJCnZCnFHTUFLDeg7fDV7/DqN5BRff9PvxvM4LGvZ9+HMjRHyHXUJGv2f4d+tboDlj6H5jhhFly7GbZV2O49BkW2QXUyPXVNZESFyZU2UsNsNNp1SE6qOqlKIQhIKlEAEiTtu0zX1wscK1Yn/AE58vuAxCv8AdUsoC8h5e/E6a+q4+9lmN197lMaHyF7jMphKdq6htaVx8fZfSUOCOtaHP0unir1bdmFH1M1YUltOwFLdoRQbf0/Z6z8RFCR8TnmFr8KDmeJ51yUV1NNebzdU5A5Dko7T2t2Dl9ZMZ8jyTPuYLf8AuHku/k2Z9RT0CjaUuNj9MhZGjFZVBZZb2o0SXV731gDetXSLu3Ul5dsY0OiEHJRkPb3nvNThu23YILcCSUapO08f9GJC8eQ6W7xmbiEltluXGYK691aU7iyFeq2tPbUmFJ0JA/8ADV8B0tvPPJVCT3YnzWkVrKJ41Gk8fp7MCzIX3a2U3QMV6pF5JnN1UetjtlyTJnPvJYYjx2x3cVIdUNmnjqOiW1WFxeTiNNRYkU9vD6HGO4Xtva25ckUph38g4NFxKwxrAJqmZeQwseYyLKltBK0Isbt10Jr0K8Vt1ESKgI18UuKXoNxHTpvsK7fYQWtufzI1Z2P7dTp9tE0+GvBSTxJwo7W/zl5JNMB5bkAD9k8f9quZ7acqYi9d5zyhwXyvPyjirMn6OHBgceru8SlIM7HMpRbI5GYDFlWuOJabDa4DbinmPSlaNpCXEjXpOsepL6wubuYyVt4ksqxsKq3myzoaZ+E0zJWjGiitBhhvtmtZRYWiRkz3LXtHBpo8mKKXMfi1fCAchViMznJtDPFn7guHTcrwyLB4x90eI1zT13ROOoS1fNsIS22iW6220bvH5boDTE8N/VQHNqHE7CEOuMkO19Y7d58VI75VGZzZDyVyB44ycg1Kr2DNWEQz7j0pf+XJVrRjwHBh2r+y4GZHA9vMRPxXILbHl3tNlMGTT5NRTJGNTquYkIlw7UbkWKVpBKVBiJuCVoJQv1kKSSkg9VjdWk+0Syx3KlZFbRTvPE/6udRkdQIyxb+27hDu8EZgYFSAxPcOA9urt/ZIOHpFuIqGf1J9KVtIcU1CjLBCJ01ASo7k6j/k4W5Knj4KO1vxUdIqzIB5xAK8AO0/qHE+4YMSBiPJUnURUn9kfrbgvvPLGum0Mhxbz7inHXVqW64vTc44pWqlk/aT4eA8u3WcciO2ps2Jz+n07MB7o6VCJ8I+7kMLseSyR2VofLTQDX4dFIo4WGWAUkhBJYYk9h6k3PFFnCSd7sONbxSnUFQ9LWewNB3GqVjT7utq2qhjTMVr91cApn/5wPyJH6sBFpKgAQNSTrp5Adh/AadS/IYcsbncE5cMbIDuvZKjrrqdddOw8PPTt49Wtir8bKW1p0Onfw76a6/zOo69j2E+1hMy460v7QdpOh8SddNB9o163wSNG4K8caJ41dSG4YW6K5yDk2bVYJlFBmmQN1yG4cDPsOsU1ORYzVlwJSL6RaOsUNxTR1d901Tb7Sd2xxSladOFrIxhMuQNK58CfdzPcM+w8cLc4DyiEhmHIrxUd9ciPb9eCNmfB+P8YVa8qi3/ACNzD9FLkBePUmURXoMaIyApiyyNNRHjZAiD4lxENC0J26LeSk6nZBdyTkqyGKg51qT2Ly+v6sYzWiQDWGabPgCKAdrUzp7PrwMq/kywyfIIeV2GkqXAjfQVNc4pMOip6tKUpTW19TGU6GmEpSNStwurKQVqJHQDcZnU+WuS/TjgrZIJPzHzb7vZiTfFOfTcns7nHblUVtp+ILGqjRWvSbZ+nIZnMo3KUtZWhxtfc69j0HR2Vqjjgo6qyUPDChkkx2vCsfRCl5BWTJJdaqITzzdlGlLCv+Yp5bCXHWHhu1UgpWyv+pPfXpx2yRpaOx0uBTUeFO/PMfaOWFi9HlHygC6E/CONe1ew/Zjdm8VVkGifvUZNltm+xWszk4dEs6JWUPSVFanqktoWqL9QwhA3Fp110nUJbKtoVJXcJTP5JjCrqI1kNopyPv5VoO/GptvRYvNEkjGgOiq6q8x2Zc6Enu7Ye5ByVc5m63RWUB7GKChmOfp+EvKkplsTUKUlVhkjstKJdjbAHTR1KQ34IQnuVA98neKJiPiNKnt/UP8ATniRAoli0sKKp+Hv7TXifbgdZVcgN+i26XARoogndvOmgAHzJ0A7AdgOq/kkYseR+n14YLG3oPNPHADym+agIWqQ4HXglSkxlEbmtNClb6kkbz5hHidO+nUepOS5Dt5e79eCbHSc+OIWcociPavpW8ZDpSslxSw2ppKQdVKXqAhptPke2nh1tQBc1NB9ef6/9OMI45ruUQRCshBJJNAqjizHgFHafZis3k3OZd1YyYtM8qU4ylbsiasbDHYR+NxtC/y2mUnwWdVKOmgHRqyhkkp5o0js5e89/ZliJcXlrt9V2sLJc8DOwqfZDGclH75BY8QBiJWQZTOemfTxHpLjYbVHASouFwnX53CdfmUrp326KVNIjB1n9nCReyrqeWV6vxJY1LHnmak+/Eu8AzHPpMeHc47RGZJsb2uo4dKxFcnMKdqmINpLdbZ1SWktF1tKFBQSlSVfb1dm2T3yWogiBSQJxrlUjn3HMezFM3MO1/NMdycNDmWpkSCTpy7Qcx28MXncTc0waCljuXuTQqC4XEW3PZoJL09MhT20z2pEBtD0aDIcO5JUlQKFADcBqeokl/CCBuCq5VhStBw514kV93diE+xPext/C2kj1oQSvIMKU7K0947cWFNch4hydXwshq2GLBVTXRn7hmGoC7rlGCpEfclKg+a1UhxLzzjJV6IUEqSPHofb2EMoKRSBkkcha/Cc6kdzAVADU1UqDhb6ul3O221jdRM6wR6mC1DqQhAIPHQWIZmWumukgccO5GIxbyoYFRcw7LVBbZCwlmVGQyhCJDrbySHkpW6vRLakgnsQdSeqn6vtzbb20ZTyzoU09tf0Yuz0JuVuugI3Sbz1FzMurvGmopyo1cjnSlcGr2Uw5sPkHmtuVGmxWWq3BYkVqY3tKEQ5GVNLaQ6nVt4oJG4jw1Hx6g33/wBTov65+7FnWX/bpv6q/ecJvu+4RxzLeUeF67FJDuC5zy3mT1fkmS129yovKvA8fsMzgNZZi6nmqbK3otnTRiz9UhSw2go3BH4S+xX8yWE7zkvb26gqOY1GlAeQwO3izie9gWHwTzuQx5EIuoVHAkUFK4GfOOLe46k5J4jtsjnYbbZqs3WH8ZZtgjkzE3375ET9biIyWnvP1GoSZtdVyWlQxLXHmtvONflEIUZu2XG1TWsyRBhB8Tq+dAeJHOmXHiOOI24Q7lHdQu5UzZqjLlU8RUHLPPLgc8Fa0910nHbCro+deB2abNqdqItyxm2mMLrX57pZZan4sqykGwsG5spaUoag/WSGVnYv8JUY0OxpKpexu2W1c8ACR7DQj7QMb5d4eJtF5a1uUFeK5961/RXDK5ta535Qy7CH7DE4dbjsy+of+meJTL9yBD/VGnROs7zLojECROmKZgMKbQQhKYTLiilpx1W4ENvh23b4ZvIkEjxqTI4GfZQcuPAAmpxBvpdwvZYfOiMaO48tCeYzJYDPgOdNI5VwvcpcWZ/L5G4XyLm3JKS4h5rlh4yuMT4/h2WOVrVBPqrO8iV023M1VxfVzlxSMKebe9NrQlKUAKJ60bbf23ydzHtaMrQRmUM9Gz4VpwBzyxu3Czna6t33FlZZZPKKpVRQgtQmtSKjPhicPIdTWUXEWWU9PXRYFVXYjYRINZCZbjRWIrEJTbUZhlpKW2m0oSEgAAAdKm3zST7tHNMxaRpKkk1JwxXsaRbbJFEAsaxEADgBTABxcSY1dDZaU9HVDRGTGcSpaHUstRwo6LRoSA45ofI6datx/wC1N7cZ7f8A9mX2YfMi8ubGtEGbIEtDTyXVqcZaW8Ak/LtWAnc2QdF9jr0wQ7XaQSi5iGl2XtNDUdnb2YrabqW9vLd49QYxyHUKAkAEgMp/Z/aHL2YinyzUYXiMuxyqqjpiWpjrcMVKEmAxNdb3uS2Y6gdksIBI1G1A+bTXpo2+a9nVLObxRV489PAAns+/HM3rX1VtWyQzLsXg3iSOshUVVKip0jk7D3AZ8cUo+4/3Bzqm/bjS7ITJSjFroX07hWw19UsKTGaKQEeqVOaun8ald1eA6dTFGloRGCM+HM95/RjjPpXpLfuoeoxuW5OZdIGlhmir8WlTwyB8XMkZ1phpcm+6hqTnmG8axZgEfDsYqos0Jd7GzntolywdDoFJU4Afu6rTfo2RioOfE4/SP0p2eG22xLuWh0jSg7hxPvOLJuCrpu4xqXZKyGpqFQIkd+Mm1ntQ27dS1bXoUH1Dq9NDeqgPAgaa9KXzDQtVjVe/F8W0K3CmoBP0ywWLCBSZdGVYwZMSovZcEUycsrGIjkswkueoKmXMQ2ZLlS88NFNBQKCdQOj+3buYnDtSWAcUPHLsPdyByxUvqz6Qbb6jbHLYtK9hv4idba+iQGeDV8SZjxQvwkXJ6V0MDkYu2vH19jeQSK+7XHqKuPGduZmSSVH9Ch0qHfz7FMhHZ9SSdrbCdXnHClATqerFtN1t5ofmIGLKeX4q/skcqfVTPH46dT+kvW/RXVb9G9SQC3ni8QuAC1vLBWgnhYCrhuAiH5nmEIRXFa3uM97M5/ljD/a/7baCfcT8hznDKDkXJZKFK/QcevZ4fen5tOiLT6MqdTw5D8TH4zqdrCS5LVtBHUC83S4uGEVqPMuSaeH4E50Hf+8c+ymOhOkfTbozofY5+qev7sWFmlpNPFA5Hzc6xFV1z6a6AZHRFgTwKzBXMjkgXXZxyDD9BFVjUVFXitY221S1sRGiCy2yllmdJDSQl6Y+ykaaDa0n5UjpIjWd5zJOS0xJzPZyA7vv44tvc+qLJ9qhsNlijtdihjUrEnAyFBrlk/blZq8cox4VAzxAnmW75SxWtXzJxZjismyTE2lxLvF5D4gNZvhiJ7FncY01OdTpFtkiKHITp0Sl8BKvlUerJ2VIJ7drOUDWw1Rn/wAIo4D+sMsVDLJDeNBLeTPCsE5ZiorqheqnUv8AvBGSHIFGABI7CUuMPcHxr7heOK7lLifI2shxy2W6zMjL/wCXv8Ru47hbtMSzOmURKoclp5qVMPx3kp1KNyCpCknrau5JOKRZKvhIPxBhkykcipyPfg7t/Tm67PvR27dIytyxDqRmkqPRkmjbg8bqQVYZUNDmMOKRKfaprWepR9GJFky0IUAUoebYWn1UapOx1QO3Ud9Oly8lM90qcSzUPfnjtXo23NjsgZsgiVHcacR2e7FWXImKy5keQtSFFx9TzrqtO5eeWtxZ3aeJWonX7OmtEZ8l4cBjFZVTMnxV/pw2uOON3GfpiprXc7uHb5idwO7X+rt19isiZCzYK3W4CO1WNeJGLfOHXYmOcf37MdQelVFC+3NUB+VDn37S4FTBWsfKbCTvU6Gx8yGW1LOg06jR7a9xusKsKRPKD7Vj8TH+qMhXtIAwr791DFYdNXk0bBriO2dMuAeYGONa/tEktTiApJ4YjlY4x9WwpDTeorVJEcaa74egQUADXUIWkHq23mIap4tx9uORLezXRoHBAAPZjcoqZUBxDndABBd08CCdD8T2HbrQz6xTE3yjGMj4RhHy5f0L76EqOh+Yd9QNw3Dbp/m16L2C6lBOAG4llYg88AS7QHFTvn+YqWpPcdw4N/3+CumKJgNOXtwrmFy+rvxaB7JcQ4ZyHBvbhTZDZoybIrPnjM5mT8e5U5sxlqDD4/5RfqreDikn06fJALSprFfqzzUuQxKQmKl1oIDIo/1Evt+tty3We2TyrRNuiEc0f+ISZoAymQeJPCz/AJYKqVJehrqx236Kbd03c9K7HbXEnn3z7nO0tvLlGAIbrS6wmiSjUkX5xDsrgRhlChBb9itbAi+5PkyNFgw48KJwnwi3XsMR2Wo8dK8v5wS+iM222lttKkMMhQT5IT8B1RV5LK/Sto7sxka/uqkkkn8u1pX6z9uOmrOONOq7tEUBF2+0oAAAPzbytB7h9mM3uTjtf2Dji0MM+sjmr29htQbRvCHecuP2JIT8pOi4jriVfFCiD2PWPSrMdylBJp8hefZazEfaB78ZdVKP4bEQMxuFl/6ZAD9lfdiLvuiwHhGJyvUzcheiYC9lPB/NEa6u8PLdXkeTWrF1xfGoquwra5GmdFyHY2SG6ydHmsSELd1aO3VLj0duO/vtDpag3Ih3C1KpL4kRSs5dgzf4VCqVkVlIyzwk9bbV02N3WS+YWnn7ZeK7xeGSRtdsEUov/aPC0gETrIrAnwnlUNwRxel2PCKm/wAZQEqcQEhIABK1jUgBCRuPc6fE9dBb9vOgtTj3Y4U2npdp5gUFVr9CfdiwCpomIzLSY7ejEdtLEcDQfInuXCNeynV/Mfv+7qvJrpnYlj4yanFj2+2rAAAKIoovs7faeOF5TIZG3QA9tf8AcPu6jBtRxMMaouVa1xoPnx18P/IOtqduND/Zhvy3dSQPDXt9/mepaDPAmdxWuHnQISmrr9DoREY08P8A1afh9vQXOg9mHOU/ntQ/iP34fdY4sKGvfy1PwPbqJMoxPt3ce3Dk3hKSVnaNOw7eJ6jEEmi4JiTSM+FMNydFiuBW5KVIVrqFeGp+7t1Ljd1zBzwMuIo2GfA/fgC5/ite8y64hpAUQoqCUg6ny10Hx6adqv5UYKSaYr3qLabdlLoM8QIzzF0MSXXI7ehSVK0UP6/HT7h1Zlhe60AbFO7lt+hyV5YFsWnsWXFS5LK247IKg4W1IQVaHboojTToo88ZAVPiOAyWsg8bDwjCYa2VeS1ttJC07ty1FWoHcaefYJA62CZYVqcjjT5DzvQdueNaxx6VV/jQC2SAlaSCNe+nn5efXxblZeeePslo8fAZY918Zx300Np1U4vb4+J8NT9g6xeQLnjNISQABngs18g0cRtCFJC1/ID8Ce6ln7VHoVMBMxrwwVtz5AFOPDBUxxTNgw2FOjeQfDT8Xw76+PQC8DRkkDLDRtxEgCk54daaqOytDw+BC9fj/m6Fm4fNRlg01pEy1PGmeH3i9fCmzG0aoSvTQ6gEKPwWPBSFeevWqe6lSM1rTEIbVbzS0AFafT24NcbjKJZaKYjJQkp/Pi6aoURofWjHy/7vboW29vAPE2fI/oOJSdHx3b1iWg5r2964TJ3GUqplx5VePTlR3kuxX9oJQpGoUl1J/EhaFFC0nstCiPPr5/GILuF7e5Aa3dSrKeYP0qOw0OJsXS19tN1Ff7azR3sMivHIOKsuYP6COBBIOROAfzBwzWWynbJqvLIkth1RSnRUeStO5+H6qdP+G5r6aj2WjafHpZ23drzp+7+TkcvZEnQTwZeVf3hwI7cdHm02P1I2VbqeNY99iUCVRk8cg4shGZjY5qcxTI5jFcfIlDm2BtyJ+L5PeVUOK4txxivdZhuI2jVzR2MwzIC1eZ3anpouLbbLwC8WCFpaVrSp+/7sBdpXctqn/hd3dTiCuXioCO/LP34h/Wc+2pvw5Jsp8izS8pLr1jNekPyDu+ZbjspbjhCQD218OtMHyzgwhVAPYKU5Ysm12BjpvFdm0jMGrVFM+334sm4k9wVQqsrZxnJEdtSGJDXqaqemnTs18xPo7gfu/h1on2ySRzGc2IqD3d+D8NilsFltv8Fjw7D+r/RifmLcxx51Y7YrlsNxm2FokSX3UtRIjRaUVqfcUQEIaaBUfPt9vS9uOzLAlZcjy7Se7BLcN62nZLFrreZ44LVVLEsc6IKmg4mndipD3l++LjHFs2hZfUJgZfm/HddZweN7exQ0/WcX/qqBHvcnxiHJCmK7N8nCEMLt1pM5EVCI0T0xvU4o7hfbXssYe6o90zeFBmS3Z2mnMDIc8Ultkvqp/MjvFzsXppBJtvp/aJru9wmPlRxwg5zTytRYVbhEldbmiorMQMc2/N3NeccvXc3LuU7qbKrJFkkV+MIlPqixIkiRtVZ24cVvmT5C3EA+pqlhKydN/cVj1Nv+4bu4srZirsDXu7AAMvZzPsOOn/Rj0U6J9NFk324QXhhYUmYEPKSwUy1bx6amudKA1oCMo5ZRmHqKXFY2NRmUpajsRwAwwwgFKG20J+VKQB/Hpa2zp0xjXKCXJqSeJPecXR1H6h/MEW9rRIEGlVXIADkAOWA3NvlrcKVLWQokEa6DsSEnuft6ao7SOJK81xW91vNxdcyK9+Dt7V/dfyz7PucMO544cupNdlGMS2k2NQZC0VeZ40t3faYpesbg1JizmSr0FrBMeQUrGgKwfuBWO+riD3VYV7gONME5vxu1enVvI+PQciYXZvJdsq6TJSDYU8pJVtjP1U9K2VISEgbNQOvY9iS9dylWQqV7L7i4gQccq463rKwkyEMNssxG1OyXXXVlLZ9BCde57bh8evY9jV9u3uHTeZflVuXHEm3v2JsugkSysS6Scwhipsq2K4kJYcYYAU4tBPZQCh2169j2JBe6T25H3Bt47YY3l0rFb+lpZEWNaRZUth1sPyDJjhZiBafVbRuQsKGm3boevY9iM/tv4195nH3JkPDMiyuut+MWFerJvrmV9eBBDpCBGfW020mQ6gEeltCwr+snt17HsW+SWK9RbW2YrxQltPqyNhQ0lACStthHzuFSu/fQd+iVnI5jMdW0g8F4mvInkMK29xpFcLOvliRgM5DUKAaEhBmxzHcOeG9ZqoW3CJAjyXVJK0eoEJQVM9nUpbT2A08AT49EbdLplqmpV507+Gf6sArieJ5dDkSuRUagAPDxoo5HlXA+Gctt2qI8CuiKhhwoWllsLUlHlvc1CUqOmmgHb49FDtkZhPnOxlpWpPPuHPHyC9vSweJUWDhpAqSO88vZ9uN27slPQHXqh9iPOOoDaw3vQFDcjRxxJQhSwfPx07dY28IDhZgTDTj38/qxvEzQmgoHrwyPHv8A0YjhbXNyJahYrfddS5seW6paytGu1SdVagaJV22jbp0Y8tFjKx0CU5cMToRG5EhzcdpzwypzKm3X2Fr1UFK9RWu4hrsttpJJ7lSSCo/w6i+YXIYii8v14nhQooOOEgSUML9RQHo7VNPp/wA7Cxo4NPDVI7j7R1g5B8I4DhjavhYU4YjtyP7gaHD81a4UxPFr/mHmu4rv13GuPMVSG4VQ240o1txyXmD+lJx9jF3oGw/McStahqhBPQC4USS+fJTyxlnwPaO007h78abm+WKX5GIlrxhWiipUHmTwUHtPuGIe2vtJ5y51y1rMvdLy9R015GTLXj/DnFFS9YYLxw2tl9DsB24tnoLuTX6Iyg3LmemELKSGtEeO2Lc47JdMEYMjHxNWlewADgMArjp263aQPe3GiBBRIwNWknixJI1N3+4UxTPfUNjxLyJc8eSLEWEugu3qluc2yWo8+G2EvsWAaI+RchlY3J1ISoEa9Nt1ewttaXIAUzcKnMduKr2babuHqi42uVtQtTmRwavwnuqDUjEycIxKzuIkebOksRUyGE+ik7/WbSBqh4toSoJSoHwPcjpIuLsavyvf34uyws5ESj/Dg6YxiHJuJzkZDgUpFpJgBLzqcdlB6YGEp+b6ulc2SnmVDssBCk9R5JoJhomyPInBmOGeE6o88Tu4o5lkZZjwssxpJGNuxrJVFLuHGls067VtCFFiYHNHapxz1AElwBpSjolWvUeXZ7xbM7jGhaxDaS4zCkcj2e3HyLqbZP42vTMtwib80IlWFsmeMkjUlcmIoaqMxzFMSGYVqoKBBSsApIIUlQPcFKhqlSdD2I6DPT6sMa92HLBJHylOqVp0XqAUFJ7FKwQUqSoHQg+I62W8gVuOeNhSqnAW5IqlYg5Gtsdek10S3fdjy4sdSg2xLCC6kxtNPTZkICvlP4VeHbsGmxunkBjkNXXMezvxFkt4pBVx+vEWM6pHb0N3kdIfvqpDqtHFAi1rHkqE6qlKPd5x5oktE67XB9vWvc7M3UPmR5zqMu1hzX9WNlsRbHSuUJ49xHA+3txHRnITAlyap5xRZfSWGlvEpW606A9XSnwdFIKVbdyfJaVJ8j0klTr0jLlWmQ9nf24PJQgPz+n0GGjKnSrR303AouFRSseBQpCilSCBppsUkjTobNGV4cMEoypFa54VItC56aVbSDoD3+B8u3UFoycSRKOeHPVyXadmVu1K9I7jSFHwCH0J9Vwf5Bqdo/qP2dQHUtqQfCBmffwH6cbgakHifpnh4V2UOvFttTh37wCVE6kE/MfHU9+tRbkOHLljYDzODXWpLrr0p5pQSFBKSUnaQlCE6j4dx1vkiJcuRjRE4ESgcKfpJwTosqIhmO0pSSW4yCANOynVKc76eZQU9R3AAA7vvx4VJPZXGnLaQG3ZQIKA2o9yPtJ7eHfXrTHa+bIAvPH2S48lCxxJj224/R8R8e5j7r87hplzUNTKPjKqeKUvSHHHTAU/CDiVlEy8tP8AlkObdWYjbi+7bi+rL2nb4dttDLJk7LqY8wnIe1zl7KcicV9uV899caFzQNQDtb9Sj9OAFDbvs8yS25CziYqwyTIpRmSXV7vTjtdkxq+E2oq9CBBYCW2kA9kp7kkklM3u+e9nOfh4AcgBwA7h/Sc8NOz2aWsIalWPPDzdhpjo1SRs8vs0008O2nS20XPDHHKQKcTj7Fu5NItNnCdLMmGv1WXB3+ZIOqVA/ibWnUKB7EdYRw63CHjjG5kXy6H4aZ4ml7ZON2s7uYvPmV1JiuMR5EHEYMj/AIS3EKXGl5J6Z7qGwLZiKV39NS1aEFtQtTZrBNvs1cj/AJqRfqU8+4t/d9uKwvriW4unQtW2Rsvbz+r78RayjJnMv5r5LytC0uMJymbVQFJO5lUPH9KJotnwWzIEArPkr1CfPoF1TMVvNCnNAF+oAH6zX68HunoQ9uWbgxJ+s5fZTEMvcYh1OeXVixLkMQ4VBx1KbiNpHo2CpEPl2MGZLh+b/wBlTp3AJ8V7Se2nVY7taHRuFxGxWFYrEgU+Ks8woT+4Ty45Ye7G8Rbza7OWNHuHlvlLk5x0t4mqo4HzAKGvAaqZ4DtDlOTcV53W8jYHOXWZHjNmLCEtG8MTYj6VuvVU5pCkfVV1lEe9J5sn5kK1BCgkiTsu6TbTuXmxHLUajkVJBII7CD+kZgHEjdttg3Tb9L/Fp48wRzr2g/q4Ysg91FNQc08VYL7v8DQmrWwGabl2pjqQ/IrlNuN1cx2S2ylBk3WP3EZMErKR68dxpZKWmk9Wf1NtcO87at7GaGNSx5nR+IZcXjYafrzoBhE6W3WbZtxaycVWQgDkNXFT3KwOru9pOIdw8iFwGJKEhmM2y3HhRUr9REWKgkttBX9bhUsrdWe7jilK8xpSsyMZqU0quQHIDs9vMnmanF1RTAW1Qas2ZPaTz+4Achlh0xZWvifh566/E9TIY6DAS7kqSRwwttTglSQD21Hn/q/h0TjUilOOAkhBFDiQXCuWtQbWRQTVoES+Sn0CtWiBYNIKA0dx0/5yMooA/wAyUjz6ma5BQ1y+meBN4mtQ4+Jfuxt2eLW1XezqxmHIkMNvF2G8G1BtyC6pS46y4oBHyJ+VXfspJHRm0aV0yUn3YjSTxlA1QMvt543WKltgAzZ8CM4NdzSFmY/8dvpRkubSP+1p1Y0bB41ccCAfrwgSLocp2EjGVRpmSR6U2esHsp9bcCPp/wBxr13yD8DtPWeMMaseNKyW/p8Zrna+hYtpjbMmwbYa3Q4LYU/OmKkzluDcxDZWsd0AqHRLbolklzzoCadtMQLxmC6QaV54T8ztbHJISsPgY7RV+DVz77caDH5JpYsm/AXtRaZU9EvNbaxfShKtHVKaZ7BCRpuLUrJEKihY/vAU7gK5ff24AOrzeClIh2c+804n6DAugYw7i1jGusZoqent4S98WdX8r1lfJbPYlIdRfNlTbmmikK1QsdlAgnr78wTk2nT/AF1/XjEWwQ6owQ49uCBkvq5dj8nkZdRjmM5ZT3MCtyyrxm/oretv4FohbcHJvo6efLNbapnslmWNiG31OJcToQoAdewxtCSM6Ctag0zpQkfZXP25Yn20j6wxADVoQOffT78fcTyKTQ5Bjlo2VgiwjxHijxUxYboDqSNQDqJAV/6PS7BGGmCnhXBedmEOocaYlxcrsIgk0tezDV9Qw0u4v2bNCJ0grZD8mDHkMvByFWRt2xaUgFxTZKjtOnTpAsUS6j+HgpIAy5kHn2dle3C3MZSxjQA1GbA5nuFOA5d+IgZ1Bi2k4NQaeifg1T4fhyf+otPWPuS0nX69aUXTbrawoflFR3JHf5T1m97IxodOn+uv68aVsY1zC+P34yybGdyHjtxW5TAxutyjEcasrjFMzhZzj19f3jdC05Pm4vkDUa2lWl4H6tDiozxQt6OtrVSilSuhW4xRzWzDKpBIFQfhBbKhyGWfLniYnmHwPkw4Hmc+B7e7niJVrlSgwtSVKbe2lP1Su7ykkf066hCTp/3vt6q+UAMea14cv9OG+CixAL7/AKcsRhzzJyEOgLCQ2laluqXo2BruUXFE6pOv8esQ6gU59nPGyK3luWPk0CrmxJoqDtY/o4nliuTmLOS8qUyxK+nhhX/Oz3vl9QjX5Uo7KWNfwNjurxV0QsoHlkDNxrQDs9naTzxGvr6GO3axsifljnI5yMtOFf2Yx+FeZzOdKV+5TmiZYfiVy3mIDilF10jSXOeDm1t2SpH4UafhRrogEff042UUaUU5kj4e8dvafuwqSySS/wCGKLzbnw5DkO/njxi7tO9Ma/W21S2EOb1/SLDUk/MlXpKeKVISgJHygpJJ6s7pm2iD630q1eB4dtO73YROoZZEgPkksStAeY7/AG17eWJqVfOlNiPGk7H6KLEivWCpBVGqmFszoSVsqZcclSnHFvvvy2iErUkpbIBG069WoNxsbO08agXB4EUoKdhPP7cVLadK7luu8CZ3YwocyxybOoqvYDwGeMPAvLuK5lX5VhVxaGFyFjIp8lxqY3JejmVSBBfyTHEIKUJkLRBbKkpKSGnddCQR1S+77na3nzkMjj52Nw0bDKoJoU7yRn7ji3Jtj37aN42q/wBtQnpq4V4buMUYghdUdwOagHwMOdRUc8S2xD3HZzgN5CuK6ZJZntkuqcYIac9WYsOyvWSAWZLYaCGdq0lKkp0KSNOh2071PZqVk8cLcjnUDt5g9hGY44kb1ssO6ExkUZQfEBmCeXeO0EEdoxdRwL7pqLkbGK+0zKMhqxlPpqqy1qGYlXdokIDf161MNJjQ7KNFO0JCglSXCoAnwC71PcJdbkJ4xRWiXKpPM9pJ92CnplZy7dsU9hN8cV9KPhVKgqjA0UAHjStM6U5YlxT8q5PwreN5NWQ4ud4vkEFqNZhn/kbV9uvecdDjbp3Mt3deiSpD0ZxKfVASQQPm60QPb3VuLSclSDUEcj7Of2Yb5VltZjcQgMGGY9nYcSHtrzj33ZYvQyuO85OI8k4JewsxxZ+ZDZN9i97FZfhuNW1BKdaVZUdlDmuxZKW3PRebcOxzckESbNptllZpkE+3SroehyIPf+FhxFRjVdLFu0SiBzFexMHQkZqwyzHNSDQ8sMzlvjL3Zcl4tEr7Sbw5BscQta/M8ausdbyZV2cpxomXVOV7NmtuBXfXuhTDoeVJR6Dy0kHXUT7O86atZibf5kCQaWD6SoB7aZmmIV1a7/cxaZflqodSldVSRwpXIV4Zk40OJcA4a5WwWwyXlPLLrOM/YbcYzl/P7tqqyDjm/gq1sK6sq679Jj4eitlsqUy5HabUpA3b1JPWzcZ922+5S2sYlW0JqmhdSyA8DWhrWuMbGHbL+3a4u5Ge5GT62o0ZHEUy00PP7aYYOA4zz9lOXScz4bzqtznj3jqXMxvjiw5wM+UjJEy20N5DaVV5jkeNZzY9bIZ+kizpLUgPJLum7QKEq9n2+ytvktxQxXM6hpVipUUzAavAniRiLaRXt5cfN2LiW2gJWNpa+KuTFSKVA4BjWueJL02J8jO31Vyb7k8o49rI2DCVKxvGsNVYjG4FhKjmK7e2tzkKYsuwsEsurbYbTHaQ1u1BUo9CTf2aWcm17DDIZJ6CSR820g10gDILXMk4Jizunu49x3iWMJBUoi1ChiKamJNSaVAGQFa8cMjKfc3X8hS52H4HUvS8YcjSUXuV2bLsdmTBa0Q8xTQFenJUJJISl970xprtQoEKESG0h24ie4cG4GYUcAe88/dl3nEia7kvQYLdT5JyLHmO4fpP1Yfs9yVWU1XOiNIcivRo8dwIa9URnCC5uVt0Wgq3aHXtr0GnkMspftOCcKCGIKeAGPsO1ecKHC2EemQCUI2gK01IOviFA9/I9WQ1oqRiNszpGOR7PqiR7p7mEEKJDQgU4knOvaOIPHESfcdcV0CPNlWklcVpphX1LDKEshO8bw8684vQIKNNykgnb2AJ6JbFKouhaE0Ytkxz9wp+nnit+tem4d/3B9ykUEEDzI1omdeJZjThzH4cgDijTlzj6ns8vxzIoWYsZhUh+ZltvT0NgypVLHrm1rjSHYLy1l1xax6ZU6WlHw06dLncfIkFvLpjelAWHGnHxdnswwbR0Ba7f0+15t1XDEAhcwjMaAnjTL8bU7DTFc2DQ2s35ov81vL2U7Qru3pkZtoKhSpbqHzsrZbLpLkYxENhLhTuR4bSQeq56jvC7FUFHORPH6u3F4dK7O222cVs9NEYAPaTSvsGfOtCOFcWs4VyPXBcZsOMpajhAYaBGxpodglsEnw/mfPpFmWh0A8cWvZxgR6qD9WJfYfmJKk2WNTmWLL5TLq3XimoyBoAKMeW0CUQbAgflS2wFJVpvCh18RxAa56Pp9KYnNbrcppFBN9QbuP68SMtZWL82cb3GH30m6rqjIocurfl1cpuDlWI3aWSymfXPOofjxchpXlB1hbiFtOaJVoQQQbst0MT6o20yU48iOw9oOEXqbonauo4Fh3KCOQwya4yyisUg4Mp/Cwxy8cne0H3ge1Xke/4vdtMZuuFM2tszy/AvdHPXLit5JkOQYRYYPjeI5Y8XZErHeUqiTYl8R3l/TzVLW4wsg6Bnsbq9uNNnaU0PI7M1RUF0KgntArkB244w9ZvTjovo6O5646kFxd7klhbwxWqoXEkVpuEW43PlgDSHlSOj6+KJQVNMWH+0j3SXXuH9tfGeXLt/psro6yPxdyhWNIbbsqPlDj9DOLZTAs0EF1h2W/ATMSTpvS+SNdD161tUkt1c5yjwsOxlyI9uKG9R7W76R64vNmhkH8FnYXllKOE1jefn28ikmhXQ+g5ZFaGmJxqsGq5IiOqTMYixBDcZcfVrKLjX/MPKX32pU8oqKtDu0A6OfMGzCQwrmpqeVPb34CbbKLi+Mt0BNbfDpLEA5ZMunMEHMEDM5HLFb3JXszzaq5Em88+zbPlcOcn2T8izzfAAzG/6X81TVMoZErNataSym8ZjN+m3IAR+WpWikLO4wN7t590UX+0zx2G+1GtnXVBcAZESAfBLTJZMlJpr4Y6f9LL+WBYNm6ls5d56NSgXyqHcrBAdT/I6iFmic5mBjqjJ1IHUaQ9qn3i5Rj8Z7jT3BcN22GZJvbhyMzxhDz2JXnotMLlyoEKxS3KjtrkObNqFup2aKBI6m7Lsu43kEN/uMRtNyIOqBiHUGpAKSpVHUijDOorTFxWnWu13G2tBZRssIJDJqHnwguQqzRnSwfSBqIBFe/Ga/zvjW1lyqmHItpMllSQkxqKwfamuOtNvMM1TrTSm5775fSlKUE6K1B0APTTaQTFA6LVjUUJApQ0JavwgUqT2Y2Xc1pYBpdwl8u3RQ2oKz6qgELEqirua0CjOvHngqYRx2cjxT059Tf8f3c2+r/0gTXIht5mMIaeXZOu1kcOqqpkuSppDJUoqS0Fq01062C5SwuzK/lXEAjPCugPUUqTTWAK8Mq05YH7stz1Ht0VntbXe3u8w1EhfOkgpnpAqIC5oKk6gtTQGmJSXUWjw3HK7juhSoM1qjaX0lxZdlWWQy46ELcnSCSp56DC2t6KJ2FW3xB6k7QlxdTPvV1/izeFBwCxA1FBy1Nn7KYrfrO62+zji6O2mi2NkS0pBJ1XDDOpJJby1NCSSdTNXMYY0COwh9LhQShWrZQR3Lbidq9R5kjv0xu7OKE58cV6kccTVAoP14QslEajQ62oEpA1Qof1JUNWyPIgj/HqTaxNOQw44h3kyQgocAfKrliW2p89yGVJ7nxLZP2+I6YrWBo6DvwsXMscznEmp3t+4xc/bnV7ljBsRyX/AHOls2htZv0aq/8A6pjjz9LNUXv04RhAPr7/AEvX+oGvqbPk6RF6q3hfVX/KWpf4R5Pw6RXV8v52rVTVXVlxpp5VzxfX/wAOumP/AIBjrvyn/wAyebXzNbU0/O/LaNFdGnR4q6dWv8VMsM/2sZO3kWJW/EkfHqPNriqyOi5Po+PrZVdGscqRjVpAt7xOAz7QphM5hIp4UivnRnXGjMq5SH47jbtepEqX1rZNbXqb28kkEDxNA8y6isfmKVTzguflhirowB0yKVYFZQV3ekG8JebVN0qsUVzeQzpdR276Q0wjZXk+XZvD52lWjkUka4nV0KtAwks3xSZ7f8n5mwabxxzLn2NQuQuLb8S4szlPOIWRQbDHLnHZ+KVb2OcjWc20q4M6syS3disKiMx9sVSmh3707ex9S2mxXCbrYW0sltdpQi3iKMrq4kYPCoViGSMMdRbxCuOlLCbpe+3+2l2fcLmOK7snqDczLIDG8bRKY7hi6hlklKqUC0UlcO7nbGeM6S64fxzNefM4m1d7yMxOu2Lrlx+jVFqMWxzIMnhTo6sck49NEpWV1VXGYU2564ekoLXz9Qenbrdri3vbqw223WaO1ohW21VaR0Qg6w4p5bOxqKUU1ywR6itdot57G13DcrgwyXYLh7opRY43kBHlmM181Y1BBrVhpzxHDkm3xGG9neZYBRu1mMZlVweMsUyXK37qx5F5BXQzrdzMZUV7LlyM0YxxdtYJhWsuzcBeYqUMRUqE0SGm/Zre9cW1jucge7gczyRxhVgh1hfKBEdIi+ldUaoMjIWc/l6Sg9UbhYWlvebltsei0uYhaxPIXa4uChfzivm1mEWpgkzykVWEJGD5odXl7SuIsZzCPn8a+TJC8fx+verUw5LkVTDtn+qaSl+mQVqQqu+RKtUEE7geovW+/Xtg9q9qV0yykNUVrp05f7XHEL0v6P2rdYr3+JIxMcaaKEimvXVsqVI0ilajjUHHjEJRsqZmWoAkoSfv7afyHUu5/LmKd+FCS21Q+ao+nbjZmHVw/EE6/wAT1uj+HLC9NUE17cN2e8E6oHw1V/Edh/LqXEOeBdxIB4RhvL1cV28Nf9P59SxRR34FsNZ7q4JNalLNZA7AJERkj7B6aft8R0voxYDtph9uk0Ttl+I4UWbP01AJJPcjw0GngO5I6yMRbGpLgqaDjhWNgt1I0OnidSd2nWjQFOdcS/mGdajhjApZcB1Wo6/A6fd2HbrKoHDI4wILDM8cNm3r1SWnEnQgjzHiNOpdvNoIPPAu8tjKhU4ibyJii2pAkqZUtpDqVuoSOykJVqoH7enrab8MuiudMVVv+2SRkvpOXH9OGxzDnWBx+L4tNWVjbV442EKcQ0kL0A+ZSlePgB8NNT0Q2Ww3Jt2aed9VtyFcCN+3XaRssdrbRabzmaffiFWNZIiC66l8lCXPBWh7HXuCT9vTtcWxkAKZkYQbW8EVQ+N3JcrYkx0xmF+ooq1UryHx7/f1qgtGVqtljdcX6uoRO2uNKgsHGkqlK12p12BQ/wDcjr56jt16aIE6cfIpzTVzxuSMhfmPanXYlWiUjXT/AB08OsRAqCg44+Nds7Z9uH5iOUSI76GlqISSNCpWnnp/t6HXlorrqHHBSwvnRgDiSFbJNgwlwLB1SNw1/ENO58elO4j8o0plh4tJzOtSeWHNRKNdZMulS9ApKkK1J7a90qHn49QZ6yRacsEYYtEwap45fpGJ4ccToNrEZSpSUuhKe5UAR20PfXv1X27CWFyQKri3OnIra6Qaqa8EaxpYayS/sIXoEHwB7eJ0+3z6ExXUn4ag4aJ9stv97QdmGTbY2wlpW9ht1lYKXEKSlbbrZ76KQQUq/wBnlp1PSdLhTDOAyHke39B7xhens7nbZ1vbF2iuFOTqaH9II7jUd2IN85cJU0uruLeGXoBShb0yv+mXMr1wVJCXJ7fpBctlcVw/mp2rCUEK10CtDFpdT7cUWIebYOdJqwDox4CpoGVuRNCDkeIqSsd2PUsjbZu+mLe1XXE6qdMyqPFUCpWVeJAqGXxClDTnX9xPGMfCM1ivxLWngLyGdHgVyrGxZpq1T0ySlhDr1nNLMSFD3rCnHXFJS23qVEAHqLvxu7OQ3Nqr00lqClchXSo/Ex5KMychi3egd3sp4Pk72ddaNTMNUjh2cOVcR4xi39xfFnN+YcMZfjaat6lYMl7K2JrVzx9QU7iGbFjLKPLK1x+mzmNaUhU5CbrXHhKcWnVSEpWehm0+oNruFqtUmWQ5kSqYWQKdLBlYa66vCFpmeGDu9KdigN6kfn+OkcaHUZmIqqgg0UAGrOclUEnEi/dX7ucs404EpuPOLa66scz5FktuXmZTpLLbeOYjGccW/Bilxz0mp09SWnLKesJZ9ciKzuQ0rXX1jvz2FpHJdjy766B0j9lVpSNAMyST4mGeRA54onpnordvV3q+XcOobvzOmbORVaCBcyNVSSzUjSNQCIlYnUx82Xiqil+5ym3tZDcm9nqnPtvF+LE9d52M1KWCFz3nH9H7GycBIMl4BSR2bQ2jsaFubv8AOaaVjJftkWP4R+yoGSgfsj2sWOP0KieH+A23S2yWcO0dAWbB4bCCtJpgKfNXkpAkvLpuc0vhQeGCOJOI7yazbfjPNzPReblIcZdaWVALQtO1TZUO4KknsR4HuPDrXCiKRItS9a1517cDru5kasUg/IKlSvAFSKFadlPtzwEpE9bS1QJLiVupQpcORpoZ0VJ2pJ8vq4/ZLyPEEbh8pB6Y4LhZUrwkHEdneO48sVtd2MtpNoJLRHNG7R3944EYHli68t87Va/MNNCdf4/x6xkYFSBjbEpC1PHC/S10+ykxYkNlUmbLeaYhxmkqW4++6oJQ2gD4k66+AH3dR8bMdaX7bHFecscUYXhE3Ko1HheLQ/1G0ktPh6dOkXcxct2JVxir8lMqY4W21ObUp0KlEAHr2PYMXvJ5Hy/LIX/RTDbJivw/HpdbaXk+mmuOQbWFomZUogWyfy7vHJQT6rs9GrMuYkt9kNAH2PYkB7XbPKcjr8TyOiRKYtsJZTStSWip2pu4DKkKlViZiTtkvR2wlxH4tp1TqSo9ex7HTlxNaU2UYbWyJeyHaJhMvuLiuAKWj0wlx1lwBSH2t4PyqCi34dex7CfeZh9E1Y1d0w3RUsuQ5X1eUsyYbyly3R6ba3mUKcVDnJWQfzQlKwdAe/XsexF2pyTKsBz1zDLRjJX2bx5T9VnktSnaq8fbAdXDYZZ3sQ/UhkgtrX6oUg9h26cel7eKW0vJ5QGWPyvCe/zBqH9UgfXnisevbnyt32m2FVkmNwAwJB8PkkpTgQ4JrXhpywZpMWzdjSnVynJH0chuxiOIKwC0QETI5ZGiylxoggnUa69EjIhIEdKsNJH3GvDLEmCzWA1I8INQcDixt3au3C4QWhtXpPpfWPneaWdRqjt8ydCk6/DuOslVnAE/EZU7PpxwXEMQUiICjZjDkenmZCS7EccQh9DsZa9+o9ZQ9ZlKk9wkjukd+3l1nGaMQQDSh93A4HNEooZO2n6RhvtW6Hoq4MwtvPxtyHEvJT6qGN3iiQAXA+14AHUAdZ6Q7VUeA/afZ2Y3GF4mDLwOEK2rYjkdiTBlBa3Elr03hsUpbPbaVk7C6tCu3+YDrWxqSjAceWdK4mJOQQjVpTjiNuQZhbzshGI8bqqLjJqXJaRvNpFj6j9Nh9Cpf11n9aWiESLt+C3sjxypKUqXqsgAArp3iJ+pI+nLWOSYrC0tzKuUVtGDRRI/AyyNkkQqxFSQBibdQXcmzyXFnJHHdFgkZYEks3FgvPSueeVaY28HwnE8Hr76gxGO63Lv8iusptrmbJVLvMrtrea/NeZuLd3SVMTBQ4Gq9pSvSjtNhDaUgnUdfyI10xhXTGDl9DiRt23xWVosKlmkA8Ttm7nmWPOvIcAMhhQk5hEj/qUmfRCbkVfQ26IFymYuK2xpEUwH72sLK02EqE2rRtwLbUTpvB016iIpOanOuJrFQpP4qGmOZnmv/wBmnujvGxuebXOaS4CFfJtaYbU6oHsew7/EnplvUmXZbQkUQKze2rccVJs12s3Wu7gfhliT/YFfvxNHFZBYZjo1bWnYlP5fgEpAA1T276DpT1ENXPjwxa6Kje3kRgxVs12K5HmwpDkeTHUl1h5l0svNuDwUy6gpKVD7D38D19JDLTEqPUBnlgioz126qbTG8zZcyTHLdHpWTUSUqqvEIStDiHWbFlJRJcZdSFBMhKtSNN2nbrdDc3dvG8MEjrbyCjID4W9q8PeKHEa423a7u8h3G6toJNxtmJilKjzIyRQ6XyOYPA1HYMP/AI/sM1xJkLwG/VyvhzKS69hN2pFfyLSMa6kVyHFFq2aZQNAWSrdp2T1FkVZDUgA92WCaTugoKEd+JU4BybimbodYq5v01tF+Wwx21QYN5WOpA9RuRCeCXFlC+2qdR1gIFBqK1xmbtz+z9PfjJy2z9ThM55JLi4cyDICUDV/aHkJWUJHzbNijqdOw8epcE7276o6VpTPhjE3DHjTEPhYteqAlaCUnskKCkoPiO/gpWvUwblcjiF+39ePgnIFMqYjzzTh4jst5jWp0ZTJTHt20An6US3NWJmie3oGarQnwSpwnoTfBZ5POjXSTxHf2jBOwvVUeTKaDkT91cDarmRfqY81RGyc0HV6+UprazMR4diXNrn/vzoJcrU1PZ9uDUWqlBxB/0YJsafAbbDziknX/AIbf+cj+pQ0GjST4/E9uhkhrVVPtPZ/TiYoPE8ezDXsLBt52ZtOpcjOL3nzKFJc8tAAAjtp2HUJwKELlliUh4HnXG1jzzH6nXl4/l/UNajXsQVj4/HqKq0kHeRjdK9ImPYMSrZt2ktPpJQWFLcCdCNv/ABCkbdPP49EZCPFwpU4GRVotPi0j7sN9F4t2e96R0aRILSO/9DP5KT8PBvoLJ4pDSvH+jBRFHlg86Yf6ZT90aXF69IVbZJb1lFBCQVFUu1lsQY401BUA6+Ne4HR7YLdbi6RDwLDADfJzDbkjjT6fXiWnvMvoFFbcT8CUagzj+A4vAup8RCwUOTXWV1NOmQlASn6iJCiSFnXx+pCtPAlp3678mzouXmEn+yvhUff9mFnaLbzroauCfecycAevv48dtDe9vRCQANw8NAO3fz6rVi7sScWCqqooOWFB+/aeASHUAeQ3DzOo8/Dt1qYMeWWNiMteIywmT58b6F4LdQNzaj+IfD7+s7aNvOGWdRjXdSL5RqRSmJp+0/nURvbrynFsVqclcNxL2bBdeSsMy6SbDmWVU0h4kB1xiwbfjbE90oQ2PFQ6tyyHmW0EsnBRQ+xRUfZl7sVjcsFnlRe2o9+X354AnHNZX5hQN2FWtqJlHpB20rHVhLdm4sBbkyGskD1nFqOo8z49/Gud4dpb5i2ZLE/acN2zqbO0CtnFTI9ntxDj3BQkSs+ySJImyY7sXH+OlQYCV+mw/cSIvLNchqY2SPyylwp79kPBtR/D3iXG3RXVrfrqIXy7QqORYzOBXuz+uh5YIybg1rue1ERozM98Gc/EiLAjkp3mmfPRqHPA1pa5U+XXPJZVJU3R1E8xlpCfXnMR49bGiulW0IS7aMD1SrQIbSsnsk9Am23TMsyCpVQadpAoB/rDPurhnW8BhaMmil2Ff3SSSR/ZOXuxNj9vrKaXI7jm32t3j4tcV5AwyyyCK864QmwuvSRRZdIgpd3IQixiWEVUdIAUG4RdI1WdtidJ3kc9tJYP4lTxEn8YY6ZfcarQdgJ54ReqLSS3njv08LsaAfs6fEnvFDX2gcsQoxirvqW8yLCJUOZKuMSv7jG7BDEdxX/O0ljJrZPkUpSXoyiPmPbqu932uW33GS3RSzK5GQ7DTFjbTucU+3JM7BQyg5ntGDAxXPRgn9UsaunOgJZlTEPzQNddBAh+tK3EDsNOtMdm6H85kQ95qf8AVFTjTPdBz+UrOO2lB9ZoMLUZ7HGdCp22uHPH5Et1EM/YFu+tOUn/AN9A9T41tl5s/wDsj7c/swMczt+yn+0f1fbh4VeQqYUg1lfXVi0qCm322lTZqFpIUlxMqaVthxJGoUGk6EdupkcgH+GiKPZU/Wf1YiPFq/xGZvsH1D9eJPVl2nlDH26+dNKMvpmi4z6jpQxZN7UhSw0CGvTkFI9QbfynND+E9bBLIDVyWHf+rhgdIgt2LRgCI9nI/fgWPTnoLzkOU0uLJjOLbkR1p2LadTqCkg+Gmnl2PTnsl2ssHy5PjTh3r/R91MLu7WpSQXK/4b8fb/T99cYjYKWAdx7anx8fH7+/RzAfGBTjcgjeTp37+P3jv49j/DrNJGQ1GNbxh8jhaqqiLNeS0lJUVK2/KkE99fIHvoethuHPHGAgQYdkni5Dqd+wp+XVPyD/AG+A79fPPfH3yUw0pPGZhuFxtrUgEk7fDTwHgR19Nw5FDwx8ECA1oMJFhVSo0d1C46j6aSpB2nVC0/MkjwPyrAI61BiDUccbClRQ8MSV5DzWtZw3H4FYsfXZTQwLG2fR+ODXyY7a/Q3g6tvzlg7vNLY79ldbzdSkUJONQtowagYifaIgBG9Z1R/4JSlCHANT2PiCyD469leXWHnvjLyUwEstehgnVTDXclOo2xndDoEsDU7XPMoPfXz6DX24M7+WpJVeziDzPs5VxnBZrI/mmixrlqPD3cz7sR+yrJIcZtxtIcdOhSlo6oTr3B2hOriidfDsfv6DNrOYIHsz/owS1WUIyV5SO06V+zMj6sRhsavJeTLmxx6hcr211dLaZDYpnT2KyHWVNaypx6XNfeXqELcCWkJG5xxxYAA8Qf6d6fvd8vTa2gUOiF3ZzRVUcST3nIAZ59lThE9QvUjZuiNkj3XqBpfk5LmOCGC3jZ3kmkNFVEWpNBVmdzQKCa4qG5OvbLIJxVMkIiQELWlMfcpCwj1FAelHHzKKynxPcjvr1OtmigyFSa50y4HkeCj7e3E6SK4uXBNFyBAPePrJ9uVcAyWxHCgWnXEhCiEgoSgaaHuW1K9Qg6+J79GrS6XWKKFHdnj69qQCCSWpzyzxopcWlbaGlKRKUrRP0ra1rBGm1JQBoVHXyHbpzsJ5XISPOueWB97aWiqXm8IoCQRXj+17cG6Dh9rYUX6hKDleXK12ShxbakuOuthTbC24oUlxRfeAAP4QnUnsOnKeK7ks0LBizjJTkcuJ9g7TxOWEEX+1bfezCORB5YJLclHEV7+xRng9e2v268aS6bkK8zvLsrpMzgxsfewm1qIjK5sqwuPWk2tEPUfSwmCWKba4++sJLbpQAT0jp00kN6z3jSa3cNpWmmgHwitONeOGm56zv7ixtTsFrAdvELKTMTr8RqZW0gitQaKK0qO/B3/6ccmJbrpEyCm5iW0x+M9kNTEfjx6JptpUpU7Iqh0fUV0OFXpLi3UFbKvTICtSB0W3Dp+L5fz7U6jkNJGlhXh9MxgZtO9217c/Jf4VxmRqcMJD+LS4yJ56SAwHLEjeAc5bl5P9HBS+rF8T+hh4+lQJemvCRpEeebGg+ruJyVzHtO4SkA9j0h7ltV5FGWkjYSx1LChqBzPsH3VOGOzv4LDdBHKwS3udKAnKkorpX2uCR3sFHPFxvB2WckNlEGVTIu8anykvzRcxg7Wj0wS/KU+6UKiPMMa6utrCkoGh7DTpeBI4YcSoORwcbbBsEyq2XknD+ZQqDJoR3M15snKl9q5cBAVUWq1R1LQolZRtIUoHuB5krbcZYMjmnYcxgfc7fHL4lyfkRkR78SG4p575lxZ1OPcxYbZ3lZGUGEZhWxW2rKO0kBIdtIiPTh2SCT/xI+xzanXY4ok9TWtLPcPFbERT9n4T+r7R7MRRd3VkdNyDJD+0PiHtHP7/AG4kDO4t9vfLsxvPLDEcPySwCECTbuxGW5bqWUo2xrtA9FckMISB6UpKtg7aDr0e5dQbQPko5JUQ8AMx7VOf1rj7Jt+y7oRdyRRSN2kZ5cm7adhwm5VzNVYxAYx3i3HWcikRg3WxFVyGomLUqUJDLO5xgBUtmNoB6UVCkgJKVLR18XbJHJut1crqNSCau3trw9pz7sfX3GJP+W29Q7AUFMkX9fsH2YhLm1DynmlxLy3KsilZCVyk11PjxdFdR0MRKiLiRDrQtMUvSEKLCH3N7+3spZHWM24xwKYbJQkf2n2nn9KY+R2MtwwlvDqbs5D2DCc3XSePIjEaTFchnJLBpUZ3boU0MMFbikKALbhclLKdEnulPQZ5XkNWNTgskSoKKMSVv8hkV0lg1djYs170GrjVjMAJK7RLcRtaUpEgKZabKlkuvdwn8OpPbo109tbbnuKhgPlo6O5PCg4A9pY5UGZFewnCB6m9Tnpvpeb5RiN4ulaGClNQZhRpM6gCJSW1MCurQCDqAIgzrkyfU47ksjEn1ZXm9daUVMxTiC/YVL91eSE7qSMqK4y7OsocJKlvOpSGWfAnXq69s2WO6voIb9fltoeKSQvqCOEjHxkEGis1Aqk1bljh7cupY9j2W/3HbJzunWEN3bwCMoZoTPcmohGgqNaRgs7hSqZA0OWIz5hZZJk0PL4HIsuuek2rcgSK2sdZk1uJQm20x2aaPeOBAt7xiSQ66ltHpt92QtStevu52u27Ylvuu3qfll/KZmWjylsxKqfhAGXaR4qYI9M2e+XrXNtu0oa/lZpdAbwQKOEXm5a3UmrfhXJDnigT3HR8x4Hqc/nuNyYNpZpcg0VpVqW23Y1jgW4ucwpIIAWjQKJ89Qe4PUq8u4Luy/OCSI6+GuYPeO0fdh82Bb2HfIYLRpILq18UoUlTq4AHkQeI4gjFTWFe4F6kdc+plLCgtS1uOL/G+6sqeWSSNFqWok6+fVWbl+ZOVHLs7By92L32vVrSJ6l2ap7e0k+/E1OOOfFTmUSRLUrUDwdIJB79+/cfHpRmY+aWGQGLOhQBVQcQMTe4u5/kMSGE/UE7loGm8/MewCQPAk9RgWfLliUyac/xYsm4l5gjW0iS+FI9CWI7EpzcS3KmRgUqdcA07spX6e8fNqPHt19ceWAw4jG2NRcMY3yemR7+/Eo7aFifIuHX+D5vSwMwwLKq9VbkWOWm56PIjPAhDhU2pLrEmKshyLNYUl5h0BSVJUOidhuTRMCcx2/T7RgHum0W24RtbXSAg9oBp3ioIr34598o9ivJH7bXL+ccv+1ezzfnL2t51HRkvNvAtsF2/L2FQYTrr6uXOM5UZIZ5Pcwj1Fm2iNoat11ylKW04B6gdbCWW3gO6xDzNtZ9MgGZR+IPv4jtzHEY5O9evSzb/UfbYuj7Ca3s/UvbrVrja2YiNbm2UhJbZxyhB0rrWqwOUYhVJxLfHfcJi+b0FNl2K5JWZNjmQQ0WFZc1EtEuHKYX+MEpPqR5DKyUuMuJS60sFK0pUCOjE1xBKi3UTBopMwR+nmCOYOPzi6QHUe29Tz9MdRWtxab1ZSmOaGVSroQc8jkQeKutVYZg0OJb8Q3ca6acs5K9I0Zr1lK0BQ2lICvmV2Hrq07J/p8/LobPclvykGVfr/o78fop6a7MkFqt3Lx0g9w9veezkM+zHzkeRj+dpUzb0lZNrG0LbYiTobEhTm8jV2QpQIU6vQH/ALPbv0xbPNNYxGKN2CtxFfD7hwHtpnh63zbbbdLtLueKIzIKK+ka/e48R9hJA7MDqlj2P1Easx5+HR1VaktpcgVNXHeislWi0MzkRPrA44kaHRYJ8z1Nub2ytULmPVcvyqaHvIrSg9mJths247iVWS4cWUXYFovcDStT3H34PuPMQsdhSbwIU/NQjZWGWpUmTYWaxoJkxxwkmDXd3VJ10cWEo8+oNlDcbpcqLg/8sDVqZAD9lf3m4dwqcaOqt1sumNreHbVUbnICsdc2JPGR+elOI/aai4Zfoy5zzj0hTi1KWt591w7nHXXXFLWtxR7qcccJUo+ZOvVjI6UCoMuAAypQCg9gGWOV7pZIXLyEliSSTmSTmSTzYk1J7TjYW39OhbpB3BPypHkNPHz8vDqVGlSBgcbhm8WBRnFi5YQgUklcUll3uNfSV/wla66kpV2+7pi2+3CNQ88xgLfTSSKCOXH2YjffOSk18xI13NK9VPjrtIIV/DTo/FGglFeBwuys/wCHiMWgtQrC2/ZWWxBjypUgX78t1uIyZEhuFA9yy5lhIQ0FI3iJAjuOHVSRojuR49UY7xxev1XIA8oAVNASbGgHvNBjs+wgku/5VkiUMWLEnSKkKu6lmNOelQT7BiN/ux9nsfgrCeKfcZ7fMwyfJ+NLOBj8+TlJmt/reL3U9EeXjuUQrCpi1q4lVbOuobSooQuJM2JKj6yUoa+jevH6i3C96W6ngih3VGdRHTwSIKh4yGLVZczxIZammRJSfUX0mi6Q26w636DubiXaCsbNJq/MjdqNHMrIq0R6gcAUegrRgFHNZ7seUzU1dZkON4bnsaJlETNGGb+oYXDOTNpdFtcfpwZcgx3suh2EqNctMoaizES3n22mJ7rs1wjcdF7N5zzWss9uzQmLwMa6PwrWtT5ZVWiJqy6QpZowEA+w9VOo1gS23K3trvTOJvGoA8zPW+mhUGYMyzBQEcMzqqTM0pPDHuRyrJcrs7/AOHOP+I49ljiMVjR8UjNxpdbTyXmpt+3GtauroZCJeTzo7IlyWksyW40VhEVyM6lyQ8u/5Ss7SzS23G+ub1kl8wmQ1DMBRKqzOKRgnSpqpZmLhlooa5fU69ur17nZtttLBHh8oCPJlUkF6OixmsjBdbAKwRUEbIwZ2NXBnF2Y88ckQYt3Pfcr6WuiIuLGOwzFrMVxmIC1X4/j9ey2iuq2l7C3FjtoCUgLdUFbV7gfUe8WPTW1s9uoEkjHSpJLSSHi7n4m7WJNeAyqMMPSWw7r1xvSybk7G2iVQxACrFGMljiUDSlaUUAZZsakGs3fa1CoKnlX3SUOJ2km9xzG2cTo4trIDZ3TokXJf1WvDzKG2XlVs1SmiQkfh8/xGu+sJbmfaNnub1BHdStI5UdhKaTQ5jUM/pTFy9D2Vlt+9bzabc7SWURhQMafEBJqWooDpYkcP1kC8apH9uRwrsPST/Lt/s6bNxY/MkjPPFQQIGt6PkKYVLcJYK3AQfMD/UD93Ui1LOApwo7ssUVWTjhiyFqcWe+pJOvx79FUAArhTdizUJzOPceOT3I7D+Op68z1x9SP6sEishqVUwd2h0iMaajz9Md+/S4kooKdmLGurR2dzTmcaLzZacPykH46du3U1G1DAVoihpjciPDUJ0Hf/Dy/hr1rlXnjOI0Ok8DhWSnXTTvqdPDw60VArliYBlXHtyOFoOn4h4fb4dvLuesBJQ92Phj1jvwPcox1FiwtG0EqBHcAnvqSB2+A6L2V2YmBwv7pt3zKEAZ4gPy/hriJD5baG1vcE/L4aa7hqANerN2S/VkFTmcUh1HtrRzMAPCDliHdhFdgyFtqSAATprqAOnaJlkSo44QJVaNjXCa2HH30NJSn5iNfgE+JJP2DrJiCuo8sYrqYgAc8FHj/AI/zLlzJJWEYIKtqZVY7aZTcTraRKjwKugp1xm5059NdCsrKUpDs1pCWo0d95aljROmpATddzstltVvtx1lHlWNQoBLM1aDMqo4HNiBlhv6a6c3Dqe8bbtuMatHC0js5IVUWgJ8KsxzYABVYmvClTj1W8VMWF21jdfzVx9JvnpH0iYBxbmtpsyivYGzNc4s9BOrgI3H5e3WmTfGjhN1LY3AtwK18y24ezz8HU6EjkuBaxbnaNdE00+Veih/rfLUxo5hi2X8T5zd8fZs3GiZJjjkQShClfVwn2psNixhSoj5Q0tbEqJKQsBaG3E67VoSoKSJVhe2W87fHuVgSbWUGlRQggkEEdoII5jsJGFjftk3DpndpNo3LT81FSpU1UhgGVgaA0IIOYBHAgEEYkrwrima59iuS5tBl1NNheGzItZdZDdKuXmE2MptL6IESDj1Re2ch5DDiHFq9FDSErTqvUgdKXUW5bftl7Dt8ivJuFwpZEXSDpBpUl2RQK1AzJPZiy+hejt46k2u43uOSG32i1cI8snmkFyA2lVhjlckKQT4QoqM65YOOG4HPzyxbpsLznFMhunEuLi1yKzkCrW+tltS1NtS7jDIUBClbNAXXW0/FQ6Wb/do9uiNxuNvNFbjixaFqV7QspP1AnFhbX0G+9TfKbPuFnPd0NF8u7jqRyDSWyqPewHaaYfPH+X2NJKfhWAdjTK+U9DlR1EbmJMV1TEhlZSVI3NutkHTt26ibhZQ3KB46GN1qD2g5j7MDLG6utouWguQUnjcqw7GUkMOziM+WJSIz+LNrELLiC6lII1UCQrpW/hLxzaaeHDc/UiTW1a+IYbSeTmGFqiWGhaJ0/wDhafHejv3Hx6IfwJmXXFkcLv8AnJVcwXQqn3d4wo32H5NkOKSczrY7TuPQWHJqpRKfzYzKSl9ssns4y62pSHB4FJI8+tNve7dBefwa7NbmUaCn9bv5HmOwgHliVf7J1Bd7YOsNpGnb7Q+aswPwsh405j8LciCVPHHMh+6NgjuX/wBsQsZjRlQsllJjULctxDNdBLzjjVgLWQsKRFh0Raddluq7NRmlOeA6WuuJpdotXgvSxkg4kcXB/wANlHMuCAB+0dOOkPSu4sOrtti6nswipcKRIg/3UyZTIe4EFh2oQeeKLM8yLkbAOMpLmF2mfyuBKWTZY/gmdWTsyHFyjLJ1jEasp+HUdrMeXXYzYSo76Gfpmg2lKW0OfOSkVY2/xG8t7TcZU/jxAYxhtTRxISQCaZspYHM0rXSKZ4tG46cv9x2+5v7C2c7OgEXmU8JlmolCeWpQQFXPPxGvAd5ZnOVT0OKyETKxVkumfTSyZDzqYUerr1Q4Na8t7RTr8BCw73AC1rUvQkAjd1ldtve+nfUbUvywijzJESKFoqV4VodTD4ia1zOJXpjtVr0d0n/lAJ5ci35nmJUK0szFvMMlOyqlEY5KKU8IoILa4bjOhwub1qIUNVHsCRu+Pc6+PSNDE8z86A4tm7uIoI6scyMsDbKrwra1Ssgg9wPH+X3dMMEAUCvDCDfX7SuVQ5duBtJnqktFlxalI3B1Cwra408kaJeZWO7bqR217gjsQR1I8tdQYZOOz6Z4GebJQxt4kPI559o7D7MeamFY2UxuK039fJdWlLBZQpDrm46EvIGqEK+JSdD9nW0cCCM8ajQCnDPF+37W/wC3PI5syGPlWYMym6eM+jdPbH00ZiCkk2KmZLqCGWkpBQl4aErSdPl7HHHzHRjxvx5L50xafg3sXd4n499uuIZBa4hlvNl9HcyKw5IyiAh2gzOgxOmgl6+/5evckQ1XkkENpkLMQAncPY9iNnL/ALPOUfZ5VUtZGRScj8SQMgVI4lzSYzLlRcYRZyFTrn20cxokBmXHxLMFF9zFb1QS3HsNzC1BbnXsewXsJ5O494vx2uyTimFNs4GcKKoOMWza26zibKGHEKs8RlNMFMl2/gFa1KdeKG0s7S2leoPXsexZ5wHzRsixZLchr072Mp+VWMvqU3V3QGs2JGUoksNTQC6wRok+GngOvY9hx8g8t5WxPrbPE2Itw+ZjUDJsbkrjoVllRruJRAnNqgv3UVOum3YpaRp49ex7B4uJ4yvCI1dVWwrb1EyhsaeQgModkR0yWZMmrfjyFLaCtqFM/IQsIT8vTz0jbzKkt0yarN1ZGyOVFqGy7C3HhXjikPVXdtvXcrDbZHZNygljniYEABmcoVauRDKpqONKEYcM+2l0NrFMht12vmNpUyFDa4Gn/wAqRHeB2pLjDyju00Ph1uEcciEfDIDmRmO49v34eFn1DKjIwqO3t+sYRpyai6jvMq9NuXBW4nv8r3o7vnGuoUfSXodD5Hr6XlizIDxn35/oxLh0SIChKvx+g54RWIk2rhWEePrJD7Ikx9CNzD0YhaFhP4d6kEj7utiyRTMrV00NCDzr39n0rj0sLManj3d3dgYzZrjU1xakqQ67qVpWCkEkdySdO3x8upLsyDQo4/T2YkQxK0dK1pgHXeZZjySm+xHiSY1QVtPKif3Ry3cRPqqisb3qi2lbhNWpxteRXzDfYvnbDYUe61KGnVYXfUt51TukvTPRsmm2hAW83Fc4YATnDA+ayXRAOS10c6YJraraw/N3YHmajojYZtT8RXI6c+dAe3HzHaLGOPaFWMYi1KDD8lc+9vrSR9bkeW3LxJk3F/ZbUKkvOr/4bSAhllHypT5lsWO02na02rbFMe3QjIE1Z3PGSRiSXkY5szEnOnDENELTtdSktM3Fjy7gMgBTsphUYW68ghIVr4pUg6EEEFJSpOhCwfMdA1UyHP4icTHkWNdYPD6fTsx9vWr62qJkB2PAbXYNIjS7UQwm1lRQoERXX0r2uJWQN6ggLWB3J6N2WzSTHWfh+meFLdup7ezUqT4+GX054qu5M9vsb+6brLi62/OYlGMystIS+2W3CCxqhKVKST83zaqHVn3u0wXmxQWyUMkUNBw4Znj7Tjmrp3qO523rrcLqcFUu7kympNM6IOPPw5AYF1fK/TZBivEocZVtHj229jqfu/n1Sl/D8vOYDxBx1zs0wvLZbtTWNhl39p/Rh9OZG3HglxtXcI/CkEpP4gDtPcd+tEKeY4XBSZ9CVxO2s9vnDFPl2F8H5nzHllR7lc9xqBf1dZW49Gl8dUdhc179nUY1dyVsKsZU6SzHUkOIkNNqJBKUlSEqYBYr5LSBaxLWprnlkSBTgD2kVpywHa5HnLEz0mbMCmWfAE9p9mIzVqbs5cnC4pNfmsbKzhurEpTaE5Gi5/Qy3GlNlLjCDYjbuHYp76AduoBsaXHlnhXEr5smKo+LE9bDifhvKssveHEcnZZfe5HjnH5d7Jv41IzQRpkykhNTbGkpb2K229MlV7bwBTOW+2vQgHUL2EjYqYVkZKQtQA1zzyBIpSh9uVcQxcjzmhR6zrmRTLLMgHtGAvjuTZ3yjYYjxHX28a9lZjf1tfS8kVzK2DXsRguwmtZjQhaJFdNjVUR5xCk/kylI2JUCrXodHZDzCX/wwKk+zExrksoVfiOWFa7474Zyam5WseEOTspyXMOEWlT+QKnJaONW0l7WQprlfc3WIPxWGFtR4UiO4ra6XS5oANAtKzPn2/RGPMQLqGWdeVaHIchyrwxEhu1dmETlihzqKc6VH9OBDXyINvWSIdsyJdXZRXYNjFWPlkQ5aPSeSknul1AVuQrxSpIPY9Lci6Wpg3G2pA2IcZRi1rgNzcY1YrckMQnP1jHJ5BSm8qygb3W1f5voXErfA/C43oPEdCdwhYx64+Nc+7vww7Vd+PyJTlTL9X6u6uENq8luDctwkkAaAnRI0ITtGvypTp2+HQMxlfCOA+mffhgDA588LNdNW88Qpe4LZkI0+BUytOmh89eo7IPfTEnVl35YcNK9umRPn0JfYAH2lYPxJ8OooXVIOyoxlcMRA/8AVOCLS3EhyUhDshamEOuyFJ3naA0Vvq1HmNqeo4LF+J01Jx7SqoCOIUD7KYd0CxA2FKtyiNT28z3OvwOp60afqxuVjT2YPHtq/wDml9z/AArROncyrJpFntV+EqoKO2vWz380vVgI+7ps6YFJy54hGP8Asn+jCt1GaxheWofeMOj3N3zl17rOXnnS4uPW21NUNNoWEuIj12OU7DzbbhStKCqV6ihqDoVdx1v6iekqRtXQqL9RUE/ecRtiQlXkX4yT99Mb+CYZwjnMhuuvubrzjS4fWlttjJsahoqXFk+DN+1aGtSndpp9QYyla9k9bbDbdluYx5byeZT4W0qfccwfrr3Yxvr7dIHIdVEfaKn7MiPqp34l/Xft3QrKKxOr+dp9hEkNoeYlQqCukRn2XUhbbjTjdo4haFoOoIJBHh0RO07ZGdLxyhu8gf8Ac4Gjc9wYVSRKew/rxuSf2+8WpDDsMt5us2aSPMhqsW5cGqp2JjBktJXCE5+ar6VU3X0krGqgpY0BOnWyDbNuDVhhkZhyqCPfRRl78aZr6+YUklUD2f04ZMnmXmCp908X271nHtW1w47aQ8RTg7eJMuV1hgkqM03LytyzTHPrMpjOuSVHd9MAktLQXApRO6TkxA0BK6uzLgOVAcqUpy44EGRhJo7WpTtz49vfX34EGa1VzwpyPkePz6O8x7HWcinHDbeVHkNwJlSqR6teK+1AVGkrYjLSlSfULqQNFpBJHSVvOys8hu7dawHPLMCtTT3fXhr2rdRGPlpzRxlnz78QP9z9w/mfLGVT4a5Ui5bxbit+JCrVrSxIkTInINbNmTI7YCFJaZllwFRShtayrdroCg3sE3kbgqlvMSK10qDka3BBJ9gNRwoc8OFrdiK82lFWI28st8GZhVgEtwwCNy1MAG4kqKUpUid3O+Je2TBfbHi+R4TlLFnmF3BxaCJlDkUOZf5sHmlOZCxMZU86xWx0LkyXX3G20lHdHcL06c9/tdltNiNxAV1nQEdWBaWrDVXMjNSzEgCmfswidKblvl31DFbTPI71lNxC6UjttIOnTkCNDhI0DM2sENyriJ3syziJVe8jhdNXRRa2Nb21/Ry5DkyXYWUiPYYtetsNmSr6OM0n60sLUAyrUJ0B769CekLhDeCNEVQ8bgnMn4C3HIcVHLDp1bGxtdckjMylTSgC11AcMzwPbje90L8/GvdvzrjDVjNRXN5VCtEQWpLrUQHIscpMie1YYU224pb1qokrCjqesesfMa+Iq2gqmVaDNFJyHHjzx86UeNbMCi66tnzyYjieGGRBngbEtgJB0+VICe/xOmnl0qJH2ZDDNLKDnmcPavkqc0001OgHn4ntoe2g6mRxdmB7uTh+1y1IA8d3jr/LUdSkQ1piOzilcPGttpdfJjzIUhyNLjLS6xIbVo4hSfP4KSruFJOoUDoepIjNNP4TiO7Agg8MG1qfUcpx0tSnI9HncZkIYeUoNwbxDafkbOv9X2ElbevmnwyiaWzlEsVdIPH6c/sOB8kYVTG41W54jswNLGHZ001yvtYrkOWwdFNu9twBIS42sfI40sdwpPYjpxst2guFAlIWT7D+r3/XheuduePxweOI/WPdz931Y10yikElRHnp4/4/w+/osCCKjMYGkEGh44fWBWvpWjJcT29QHUjx1J17DQ9fcfMS5jPtyWkKKUKC0g90jwHw8wevY9jy9EiO6hbafEgH7z4Hvr/uPXsew3JOKQ7BXpJSlIX+JRQPkT4KKtB46Htr17HsM7IOOmEMlyMslDaEoS0pStQhCQhPiVKSltPgAPDt4daZ7m3tU8y5dUTtYgff92MlR3OlASe7Ea8qx16Gt0tOLcUolPoj4/MPlT301H9A7Hy08Ol246ht5ax2pOj9rt9g407zn3c8SWsplFHHiPLs9p/QMRYzimto3qvvunasKP0itXHUj+n5AdUdvAfiHUJbuAkBjRu76f0Y+tbT0zBoBx4AdwxDvPLG3ZS/sZdix2kqCpzx3TEI1OgSkEJSCPAk7gOpSyxLnUA9nbiK8cp8IHi7+Hu7PYcV/clZZ+nmd6T0htUne27MW6+lUpvclz6eQ0laQ7GUtAO1zUagHadB1tSeVwUQlVbIqDSo7CRxHOmYxmljEume4CvMh1KWUMVahFVqCVIBI1ChAJA44grlN8qVNfeT6bC3FH85ClOrVr/Slxf/AA0ka/hAA6KwkKAGJYDkf1c/ecaFhDPVRpYj2++uB29L3LKVaoI1WdSAVg6hJKiT27eZKei8DsCNOdfp9OeN0kB8shhy9/1/QYlnxHQJZjJsItRBsJlrAZbel2MduQqO4taXYyISlrSILiF6J9TXRSVEK1HhdvRlu6WM+5MiG3hhLsWFclI+E8jX/RiqfUGEXG6Jt/myrJ5iFRGxXUHUZMODKQefAioIxLN/h65iULr89lb9haIVInT1NqK0tONgNwGDuUhiBFT+FKNApWqj0y2c81wqtcGs8hDMOSjiqDuA499cUL1S8S372tiP+VhGjUPxsPib68geJAzxL/gTgS/znBczMHGFZPMd/s+WmprXYkG3ZYUl2pspjD0pTbTcWG+1vf3jahL6VE6HXofPbW9jdqL+QLbNLItTy/EvDPMHIcyCOOHuy3ifc9kthtSsb1bOIlR+LSTHJlX8LLmewgnB3yGnpsCx1jDsCzXFLe7lqGP5m9PsHHIKIzakvHHqh2S4zAtY8T0wbKXCUobkBCFbE6mdcJDJCtxOrx2iLVKjxH94jitfwKwBoanPAizabz5LUFZLt28en4V7FU/iz+NgaAigNBXCdx3m/BFZdLwjAaSltsxmSoM+typvayuNmLQbj3H9txrFLMGyp5Nc16aUTilX5YU0CST1DR7aV1ldwXoVKnMMnIOwzU/1ajkcT9+llFsh6hFyNkRSr3EQ1yQV4TiNv8VYzQmvjCV0nIYmJf8ALs2fgthWxlKg3NOpLLkaWy01aRadW9omRFShpuCufICllaEDc2RovQ9V71B0YCzXuyafL4tFUeE89B4Fe4kEcicgGrbfUa46TS32/rtxPtVwK2e7QAy213FlpZygylH4wq6lPxopBYx0bzSzpW62M1OfQ+pJtZwLhOsiaB9O0rUn/gxUJGh8D1X0ttcQNpmR1I7QRi2bHfdk3OMSbfd20yEA+CRG49oBqD3EA4M1RzfllhEar7C7sZ1cwnamK9MkOfTADT/lN7q0JGnbbptP2ePWCGRDVa1xOeS2YUZ0+sYOmNZvZ43Kjs19g8tlUdpd5HaUpTS5Eva+Y9nAUShaY6SE9wpHmD0Si3W7jUKCcuGID2Vk5JDKK8aEfbn9+J/cX5VxxPpY1nFtI+M3ClGFJQ00t6nmTFo19ZmKSttLqUk79PlGuo6iT3E87VetcSYY7SBaIUA9owdo0atyJLceOzWT0ApZVZU8iPLSW0JI2SoLh9dtK9TuCD28uoxDDiDiWskbZIyn2EYZHJfG8u1ix4TtFGtKSJ6HoGI4uJMrExwpf1DSAFJI7d0gIK9dCT1N2+yF9cCF5EiQ/iav1ADifqHfha6r6kfpvbHvbezuL64H+7hAyyqWdmICoBxIDMTQBTXDNbwFzL8fq2HTcRYddZfT+hIaTW2P0RbBTElHe4pMBD6AW0oOhH4vHqz7G8tOlJGtbURPKYa66+YuqvxDgNVMjUZfhGWOS9zbqD1a28btuIu7e3+eKGMp8vKkJUUjIJYrFrAKtVmbMsanA15zyiq4uxORX130cGUiLIaisRI0WNZ2KkRVJdWJbLbb0OPuOjshSi4ofKjx6O9N2Mm9Xh3C5LOhNSSzFRnkNJJUnsUCg4nCV1n1JbdF2MWw7alujAlY4liRXYqvikZlUOVqaNI5LMaKgOObXnr3QXOJ3jGNMqtMjsMik11E3jVWUF6C5OnAwYLMZLiQyw7IdCyoLRNJTvSrdr07dR2h3OweRH8mGAl1lGWlkGbLxDkcKMDGa0OWNvpl89tu6RN1WvzG736LF8nGoISKQhhHKuXl6yQz0bzVAqvjrh95Xdt+5jGJXCUpvG3M+wCc3iVfkDzxGKXObv14s7HBau+cSmwtbSqiJ9O3kIZejQX3W/V0VvV1Xdi6WthHaXyNJZSoGMjL5YWRidJIWoj1Aj4cgeWnDNud7LtfWVxeWzOZEdikMbGTyrdCI9EkjgalaTUyJIfMpwNccy3us4L5P4byC9gZNgttibsS7mNSoL6Q4qCn1SYykzWSph6I4ggNvBIQ7p2OvSvvtjNtq+cSdEtdPBveCMiDxqPqx0F0FvO39SSNPAwa4hADgArpbgQQRUEHiDmMu3Al4q5el0stmusJPpBJShJku+nqE+OqyotjQD4jw6RJC4FGGZPEfTLFvwJU1U/ZTFguF81x5MmBErJ4aU+ptEqzCtUMNHaHBAB+V6StOoDh+Rvx7nrVHKSaJw5n9XfiU8QVSZOPIc/f2D6DFxXBubxHqqvYhvJDKGWkJCXdyhoAdxWTqpS1d1HXUknre7AgAcKYiIGVq1zxY9xzm7rTbDL7qtqDo24kgqbCgNex1StpYPzIPYjrJYRQsnHs7f6cbpGEuTkBu39GBn73uab/AIR4swfm7DFS1XGBco46qfVxdDAt8dv0PVNo20+HBIr5ENTyJLawD6Sm9FBbZUkvPp3ulpHvEu07o6jar2ExurGlXrWOnY4b4GHPHPX8w/Q99vfTe39VbBrg616d3BLyzuohV4UI0XKMv++tZ4iUurdvC6GuRAOK2fdp7Wb7MeOqz9wn9udmNQXmRVUrkDmv2ww4fp4JylIbUf70yfC6evLIxnk+vejuOy40PZGuGSHUtl8/maLiE7dus9hJIfNjk0k1oHXirU5MV4kc6g4nbt0nsPXmy7d1HcWMAvVtw8ckaKJoywBkhZyNU8CuD5SS1KR00EDLDo9ovvLc5U4yiQrjjTL+LZUqBvatrND03D7BSNglSI968hiW19QtJSlL7KFJI26dj0ejsrmeYXSrVDTI5UAp28fbzwp7Rc7HtEX8Fe4i89GOornqYk5lVroPILwAFBniV9dnMTIZEaDX2QkOyFpaH07T7pKlaj5AhCj9vRvxW8ZkdMwO0AfXg1AkO4SKkEoWNyBUhsq9wFcTCwrjWFBroku5W+4+6PWEA/kNgFI9JUwD89xxwnXZ8oSkDXx6jWtrJdk3d0Pi+EA8u2vZ7Mad/wCrrbZ2Gx9PHXLGCJZXXwh+QRebDmWpTIAE4cNrE3FMdtCNEnYhKEgJQkeCUpA0CAPLpqtAiIAo0oBwxTG6NcXdw01w5kuHObNmT3dwHICgHIYSTWojtkqSAlAO7UeKvHuft6NWxZzlxOEbeY0RfHyww7WY0XVNgg7joQO4Hl8dR00QWraNRxX0t+qSla0Hdga2UKOp9aVK1bfCmnO3iFdgo9/6V6HoojMi1HEYl20cdwdDHwtxwMLXGC4uRHWjRS0uMEEaa6g7T9x7EdTluNSh15Z4gXdiLeWhzHDFsf7bPNmHL4/X7Vc8jQY9jAk5G/i0e1aYcqswor+wnXVvUFEgFp60hy58hS46wfWiKCkBQQ6E8++rnTm4Juv+c9tLGJggkKk6onRQqtlnpIUeIcG40qK9p/y99abTedOL0BuIRbyEymJWA0zRyO0jrnkXVneqnilCK0alldRwPxhR8Z3fDtfjbP8A00vEXzL2IS5EmbUwoeSOyJVrW1aJTrr1fWLmy3nmWG1huKpwpYDbaW0IqCfqTd7nd499llP8Wj0HzAAGJQAKzUoC1AASRVqeKpJJ6Et+m9mtdnk2CGFf4NIHBhNSgWQksqgnwrUkhQaLWi0AAHOXyb7Z7XgvnC74wmOPWVGgtXWG2zyQpyzxazffTAMohAQZ8BcdcWQUhIW6yVgJStI66h2jq2LqHYY91QBbj4ZF/ZkUCtO41qO40zIxxr1Z0E3THUMm2ZtaHxxMeJjYmlf3lIKt20rShGD/AI7x1IjM10CuhF+ytJESBXxwkBT82Y6iPHR+HQBbzgGp7Dx6C3O6KxeSRqRICWPYBmfsxu2rYDLLHbwoWmkYKo7WJAH2/Vi6bhnhip4r48TirDijcW8dyRk95FPpTJlrMYDb7sd5SSplqEghuMNPkQgEgqKiefd/3+fed0+cb/AjNI0PAKDlUcyeLdvsx150103bdPbOu2w/4rCsjjIs5GZ7gOCjkAOdSWZmVjxx7auPFYnglHAqrO7bls0dHBCnptjZSW/Sk3l1KfW9OnekVJVIlSFuOunRJUpSh0R26Hdurd1F7uMjPDGQXc5BVGYRAKAV5KoAGZoBiBvG4bP0Vs5hs0RJXBEcY4u54sxOZpkXdiSe0kjEOcdp3KuoYjlJRtaTroD4Ad9SfP8Ah1ZUkiyzF+dcc43csqRaaUGEi2accUSAdo10/n/V956I25VB34Sr53lJA4VwjR61bq9SNT2/gPv6kPKqjESC3MjZjCmqKGUhO3TTuAQO5+J8OtAk1HE9rcIlBh/Vwb/SoA/+RGT/ACbH+vpYiYkA+zFpXqRhmApXUcYX4iXgVD+P+n8OpiTaMjgGbQSEuuEB1sNrIHkdO3j/ADHU1HqKnhgbPAqtQca4W4p3t/iOo001Hj/5R1Gc6W7sSIoSyEHlhSQD8PLyB8R4/wAz1panLGxY6ZAZ41pMbVK3SjsE9x8SfPrON86A8c8a5bbwliDwxHbkbEEWMd9fo6kpUo9vv8NPLv027RuBhcCuWK16j2UTIzqM+OK5+SMSchSH1JaWCFKP4e3jr/AEdWjtl8JEGeKT3TbniYihr7MCCMz9A248+khxXZI0IOngB95PRhn1nSpywEWJogS/HBa9tNlZwsl5vtaqTaw7CHwVeyI0unm2ddZR3UZ/xuUvRJlLFnW7bjXjpGZcfWAUoG4jpf6sSGS3sYZgjRncFBDBSp/KnyIYhfrIHM5Ysn02M8dzuU9uZBKm2OQULBhSe3zBQM3P8IJPACtMN3/q/wC4d6EYf/VL3LyVBGiqthqyr7QpI19NWRMWsuwbVon/AIv0hV57R19/g3TYfzPldrAr8R0sv+oVA92r34JLv/WUim3F3vbfuBWVvZ5gYt79HfTCv7n1TxzfYOTPrTKdwHiZ2QqxkqmTy85xhia3DOlrbaVKllZPqOFKd69ToNdOvvR7RHYB5enQLi4A0igoJ5OA5DsHLAr1Rs7mHqlvPDCX5S0JDGrZ20RzOVT2mmZzwVOG8ozDGfatnc/EbfLqe3e5/wAZjIlYTLvIt96Rwie88lpumjvNzI+jO51qY5HiLQCC76vpIWC6gtLC86zt4r5IJIRtshpKEKf4qgfERQ55FQzA8tNSLB6Cvd42z0uvLjapLqKc7zEAbcyCX/s5JpoBBFB4lkKIR+LVoBP3CnJnKt/nnHUe4zrmW7irzTEW5sWaJlFALRvIAeTbKjT7VE2GUa+sy6G0uI1BUPELnUOz7LbbZdPBbWEb/Ly0Io5+A001VaHsIqQaYdejupuqr/qCwgvbzeZ4DewBgwMSU8xa+YQ0mpafErUBFcxxw8m4g/WMlcSjc7+u3R017qH6lJOpOniNeoQlpDCCfD5af3RiDvFj5t9dMBn8xL/fbCzTyJTLikubyhWg+VXgPiB9nWcrIygjjhch2+RXIINMer6smTm97aHEOI1U2QSNw+I799fh1LsruKPJiCDha3nabjzNSghhnXPCJY868q45g03jaJNZYoZLLrCnFNbpaYzp1dYbdP4ULPj1Jj6b2O63RN5kTVeqQRnlXkSO3Gk9Z9V2XTMvSkNwY9lkrqSgrQmrKG4hScyMV98tYXQZdh+SHMsZsczrqirkyq2kgzVVjEy4uXY9c9W39i2RMiYrcVjLrdh9KUynWErabUgulxKH68Wvl9P2+7WbKm4RS6RUVDIqs4NObRuQy1yqRXIYvH+Tvc7i96wv+i7ksdiu4Vncg5xyI6p4eQ85PA/cg7cc6XvvmW2SUMrH3n2FSYCG96a6IzXVdPHrkJZqabHKqKlESloMcS0hMSMylKG20EnValKVwx0kLlOoZN0vNRllJALGpOdSSTxLZ+8jgMfr91ns1oPTyTbtpRUgh8uQBOQWo1e0EhiePE4A9hjMvmX260fOmIlu/ssESziXPONsMCNdY/fR0KQiZGh71uLrpkZAkQ7LQNPK3tEbgsC5YEMKi1JrbOS0LngDxMTns/ZPZ3jHPN3e2m6SPvkkbfOwoiblCmbyRcEvrcf9JHkZE5moOT1EEcpXMr3o62pv6lWz2fqaq0bSUsy4m4pIUkj8qUw4Ch1B7oWD1riWPSWUFWDUZTxVuYP6O0ZjETeIbu0nWKSRZrOWISQTJXy54W+CRD2cmXijgq2YwzLBcl1kFayddD9o18+58dD1LJAFeWAGnPCdVVNhaS24sZDji3FhICUlRB10/wAdevKCR34wkZUFTi3/ANhfsVyLm7NYFf8ATuRqaKuPJyq+cRsYr4WocVXsPkafXSWwd+mpbb+0jott9gbhtcuUI7ef9A+3FedYdUvtkAs7EGTeZyVjRRUj96g7s+7Hch7ceEsR4ewzFcdxisjQ66ucgmJCajpMi2fjLaQxIsWgn5oiVI/IYX8p13KBJ06i3tusEx8o1gJ8J7R/R91MN20fxE7Vbtu6hdyMK+YAa+KnE9hPEjkSRyxzrez79xHFPZL+4d73Pb5ns5bXt1u/clm0bElREsCJg89q1cbfegRYyEBcVDqyZCRuUtIKwSUqSIeCOOtOunYXyfhibCK7Q59x5ndEUHRTFvjmU49ZtBRZdCC40/HdQQpJGjjLqQtJS4lJHsexTH7pvbBce160lc34HX3vIHt+uFR6bmnBo+6VkdDTIWRUZ3WhA+bK8ObJAmISBZRG/TdAX4ex7GjgGTQcFqY1vIzOLcY1mtdGncdXOOLEpF/jTklDsO7sn06sVk6vKAhcFKlTA8kgoCQCfY9iUOX5ibvEqeKhqIi+tJyJ8a+atm2Yt7ChQwqPa1qkDdXoBSpDjqFq0kpKFDbqDKsrK53G6SytFL3EhoAPtJ7gKknkATgXvW87f0/tc+87rIIrC3QszGnuUVIqzGiqvFmIAzONDjLky4yjJmeMsjky5UW6hqax3L0lyCt+TWpMh2NcJYUoU2R0rqQ8w6ghiYyNUaElPXTsHTSdPbNDc7S+uS2QeYCAdRbJmH7SSEkMpzStOArj899k9VrD1Q6hv7DqmHybi5nYLGSRpjBPlJUcHjQDRMtA5BI0saYsT4p5Fr85pZHGmVz03GX4aXobl8htTUmwfjHR1Tjb6UOuLTHKV7v/ABBqoEnt1X/VG0rZ3a7zYp5dhdAHy/wr20pkM+XLhjof0z3K8Tbn6T3OdrncNvJ8qYkl5IiaqGZqF9KkAMcyBxNMOe7xOTDks2le4zOQzsdcbivhxx/cn0loWBo4kPp8e3Yga9LOTDQKqx7cqfoOLWtLoLlJRlBzpn7KjjnhsNWk3H7NKrN1pNYSHDJmuJjNsw3wUKdkPOlLbaIqlbVlRGmmvXyREaMlhmBmR3Z17KfVgqj51RvCc8/18uzPAdiZXD5wekwsOQY3HMefY1FxyTIjSo8bI5kFbkWZS8fCUwym9QVBSHbBtRit9wlSlDTpU3Kxv+pYHsI53g6XddMroaTXCnIxxcDHGcw0tNTZhKccZbZve3z1ubIrLNE5XVxRHU0Irwdh2CqjmThwzuPa7Ha6lo8NbarqF2nlMprUlbjqpRKkOOz5bilyJUxSmworcKiddBoO3RWxtNr2farfYdmgjstotUCxQxikajtpxLMc2Y1ZjmTiQktxc3EtxM5lld6sxOZy+mXIYCjtNbtyW48iM6hZUAFaFTZJ0Cgladyf9R6jXSTSOFA8JPLMY3NPFFGWbly4YM1FjkWugpflpBXt12q07dvHv4dG7DbVUBnzbCFvG+udQiNEFcDPkrLK+lr5T0yRKgxG29vqVwZ+rClqDaFNB51lISkq1Oh3EDQDXpz261Z3SGFULswB1VoBzNADU05cO3FQ9UbjNBttzfkkGKJmQVALP+FakigJyrx7MVrY5mRsJl9UWT71ghi4miA+dSp5UOc4n13kuq1bU7XPKJTqfmT8e/Vrbzs6WkkctvQQyRLq5ZFRwA/epXuxzf011Pdbrt4a6GjcIbhhT4qHVqKs3E6QWCnhzwKOVcaVWSTc16m/RdJUtBBZc3eJCU90PEg9gDr93XJ++P5u83RHATuB7FYqPsAx+kfSUXkdL7erfGbSJm/rOgdvtY4FlM9kOVfV1uNY1kOSPQ2VPz2KCmsrqVGYTu1fmM1seSqMwNp1WoJQNDqe3Wm2t5WOpATgrcTJTQcTgw33/ZDTt4rkN3wxxLmPMXHtI1iuNcs5PVzFZbCpoSHobUWzajuRnpFixEdda9dD0dQ9Vz5PzHN5oPrUxSawG4gGgJ58q58xWh7MDGJUiVAhkUZEjMDC17gq3EJtZxR7p+KGZ2Gxua5mTW1rijsgPjEeRMQu0M3s2ilpG1ytn3KXXmklKQhSQtKUJcDTWZqrVObKRn2jkT35UP14wIVlDJUBq5dhHGndiSWXe4Gw/wDadce5zq8EwKj5p5ft8l4rzXkqqoxHvpFDSwlMPz4Si6fpra1jRGkOuFS0p2ApT8jYbzqRlqJgUAqtcgST76CmQ5Y+E1XzAqidiVZgMyBT7TzwKuGJ2McDcUT/AHWz605bniMzkcZcS0Et11igqcgdoXrGbkmQhh1h2e3Ggh4NMDtqnsUrcS6xgq6lMZyVhViONK0oOyudT2Y9URjzeLg0Ucge0+zDLyz3UXWVUGSYtx9w3xvx5c8rWDEjkyVg9VLayLkF5T65TsWI7+YmGJ015x11gpeL6nXCXApxxS8XZ3GlNbMBQVNaezIZ0yqa4yUqKkqilszQUr7cMKrs3oT5qrestqKwhobXMp7yvl1FxHKwQ2l6vntMyUoc0JS5t9NQGoOnQK5tpY2q4OC1vMjDSvHDjyiirc9x+K3NUhqyo3f/AGX2ASFOQ475UlkOp/E7CUtamX0/1JcB8UjqCV1jQeeWJ0blHDjiDiD+Q01jiF7NoLhhUaTDc/5dR1LM2C6QuHMiuaBL8d9hSTvHnqDoQR0AvbeS2naKQUYfVTkR3YbbaYTIHU5cxzB5g4yVMtJmRwTpq6kEJJ0IVqO/fx79C2yOCQ+Dtyw6aSTpMi+A2OAnw7FAJ8fiNOo6L+YPbj7cv/y7DtFPrw6aiaUsSn9dD9Mhka/55LjbX8D6alfy6hkEKaZin34kt8QXjn91cPGvmL2p0UD9vj8dP4HrTpPPGOqhwfva5fpovdZwdZyDtaVlUiq3KUAPUv6K3omEknT8T9kAPiemvpqnzGntR/tU4Wuov8MN3j7xgke5uvcx/wB2/MMJ3s1YWlLcRysbfVYtMappDi0jtqkTPURr57es+pkYusg4FF+xQD9oOIuxSaapWhDH76/pwyzV189BTIZQoKHiQD9+vStFcTRGqk4ZJIYpRRgMO3Aso5J4pmiXxvnN3j0f1S69TpkmZQSSrXeX6OcJFb6iwSC4ltLo17KB0IYbbqG5iQRv4k7GFR/R7RQ4DT7HbyHWvhbtGX+nGrn9jnnKkl255Hy+3yeSypT0SLMklqqr1keNdTxgzWwSAACptsLUANxUe/Uk77eXDhV8MdeAyH1fp49+NbbFbRRF2zftJqcTOrOUOW4vsPv8tj5fZNZrU5XBxiqytbMVy8j467kdRVpZTNfjLW4/9HKW2mSvc+AQoL3gKDpG4CrPOK/kliDwJ1UrTgcufPjxwoyISWihNKSaQeYyrT68GD26e5Ot5Xoq/jLn2splZNYsog1djcRIIo8+bQnsPopKfRj3PyAraCAy8r5mtCfTRHWRZqzWfgfOqV4jmQOY7R+jhtKPFSO5GoftU+88AezFOP7k3DUzhvnG7ehIeqeOcyxzDrbCo9dvjRbaVGv5dRlWNW0ncXbJvG4lg29Hi6pjtR5rZUlagSEbqzazNYXl9H4ImjtQAopqf5lVfV20UggZKNXDLFg9G7tDb7rtVi4SRxLftJrGpljjtUlj8v8AZ1srqxzJCgVFcRsrsUogiO83dMs+s2hSAERkKSh1IV6evqg9gQPLw1PVUCeYAgRZjLgf1Y6pu/TfpfzGmG8hIZDrUHy8lOaj46mgIA7MSX9leDKs/eTwj9HZtTo1TZ3t5NKEhTjTFfiF0604dilo2qnhlBJ8N+vVg9El3vNToRpRyf8AVYfewGKS9T+kLbZdra8s9whuYPMQUFAxqw4UJGQz48K4UvdSmffe73nO/XCkMRXsnr4LLqmlbHWqTGaOhQ8lSQpOx39MKk6+I6JdVQg7h5b0D6EyPGmhaHCB07b3kW1ruHlObFmcCQCq1VyCKitCCKEHPDRqapbhSUpUr46HX4eH3dL0ViSeBpibPuyDMEVwTa2ndShPyEE+P+wa9vPqeliBxwPO6oT4cOuNGW1puB7fEHz08z26kC1VeAyx9W6D51wqpVpp28P/ACd/sJ6y8oDHvNPPPLGyh4pUlSSpK0LCkLQoocQtJBSpCkkKStJ7gjuD14RilOWMhKD9WCtW5/AtoTVJn8JdpDaT6cO+jAJuq3wAKlJH/Mtp07+ZHik+PWk2zK2u3ND2HgcR2op1Kc/sxrTMFlSWnbDE7CJlVUNVD6BxKLSM3prtl1qyH0rT5lIUOpMVyyeF9Ub+3I+8Y0Ppk+IA4T8XeEGzbamIcjuIdAW28lTTg0Oh1S4AQQfHrXuE935JaKSQHtDH9eJlnBasaPHGTTmo/ViYNROachsel3AbT8wV9nh/HpBm3Td4pKGeen/WP+vGM1pAHJCIB/VGFYOqdUEIBK1dto/hqrv4D469YrvG6MwQT3FT/wCEf9eIjQwICzKoA45DCmhSYyNhIW4rupWvYnvtBOuuweXnr49TP4lfKuk3E5J4/mN+vEBgsj1CqF5ZffhEuEvFh0h1KSUK1O/VSP8AskAk6keHw6Hzku3mTuS3ean9eJEDBW0qMQ15Fn2cWW4K4ekNxDspzTeoH8QR/kH/AHe58j0WtZ4TFoU0y41zwYggBAdxXEZcru/pI7zrrBkv7VFa3OyN/mSskrP3jQkeKuvKWD6QxA+36d2JrQocnXL7MV9cu8iRGFSQ618yN24aBDSfEHa2AR318e5Pmei9ukrcDlTA+ZIE5Z8uzFbHIWVVFtIeCykFRVuQsDatKlHRJB7any6ORrJGBWtMDJCjkhQDnwOVfZgFJw+pyF5aYsltt1au8dailaST32kaAbdPxeP39bpNxlhI8yopwPb9Ow4l2O1pOTozHMcx7f14clRwCqVMjttzXbFK3GyuPFaMh1oKP4lLGiUhP3kEeI8uie07rBcXSwuaMT9efZ+jj2Yl3m1S2UTXIQMAOY4D6c8WFcfcME5TxbjMavTDrp06rXdhKyVuVuN16JlpKWAlIit2sjZq33SkL269dTbff2Fr0BDtKOr7hcXTGQgU8CMWROPiGYqTmdNaY5t6yku26nm33SFt47NFQDgG8sRk0/CeYHKuLKORK6kiUYUzXxUlts7UhsbUoQNGwtHhtCRof6T1FsfMSYvWtTimtx8g24BXxZ17c+eALwTlXB8jIeSqLmjJ7fFJGRQoFfiGYrXZmhx+RA9aR9BNTXAxIEeW44la/WQll5ICAoHoxdXUUNz/AM8KKwRlpXXzViOR0kAiufHCzuWx+oG7bJYXXpcI5LuyuLqO7hlKLE6uY5bcliRIPMVpYmKkhSEYgg0xF7lW+hxWbOkwfKv7wltrmVk6/gMyq6FDqQ4UqYxqNJbYkyG7dsBUiepCStH5bY2alSXvV/ezu9M40PAcacmYcvYKgHicdF9J7JJaW8UW423kbpJGpJqCocirRK3JkORrQMM1qMAPCJT1fOfaamTYdpKeiMR34kdx2QwyXUhCUKQAtpSnlgkpUFBKQNe/UfapgE1ljrJrl2d+DPUG2ThBE6fkgGobKpIp7wP04sl4v9w1PyVlEbj7LLn9EyGlj/2vjfILjKZUeTXRmkKl1uWRkKCnq6WiMrV5KiponcQojTrG5vIjcHSzRT0ydfi7gynwuO5swOBGKPutu6k9PCx2u1h3n00vZNd7sdySIDkdVxZSDx2N2BUiWEhZCAJUcZ485Pkj9fmsjG8sjoxjK7EuWFTVPSWpFXk9SCUMXOA3baW4WTVPoJSpbTekuOTo42NNel26vL+0mWDcFUofglTOJ+4njHJ2o/H8JIwK3ToXpvrDZLjrz0fuZ9x6bhNbyymXTuu0tzS8txnLAOCXcIZCtC4U4KvHjjrtnCU9o5BjepZTHQSWxGr0/UOJX5p3uJQ2QrQ/P1KiuEfwDKQ5UP09+KbG2S/MJIV1Wq+MsuYoni4jtNB78SiwPFstzWUbGogzVyJT6nwsIcZbC5KtyEeudqEJ0UlPzHadPDpgj26kWuZlEdOfZ7OOIVncXt3eFrJJXui1fCDkWOQLcB2UOWJjUXFl+j6eG/Ir4aKSIfVmosocYPWEg7p8uRF9UIcQ2tWxKhoQB4da4YVh/N1Owc/DpY5cgDTLLlh6msri6YWcqwxi3TOTzY0BY5yOyEgEA0Wooacjg7Lyaq4kxRNvTPtWDrKXY67yQ4tSPqmUhcuU2yVoQ1DjM9klYO9Z8uvkkMm5zrY3IYIafljmDwBNCSxOZpSgwwwXNv0ltbb7tBjluUDKLk1NHHxuqVACKuQ1BizcOQwMInutzyz+muWrRpjE5Idjx577UGJaS7RDrYTEr0TlNMSIxYJJfQSkk6aHTo7B0btBkexeyY36MODMYypGZ8NWqD+GnvwD3P1a6gh8loeoJGR4KyaUh89JzQiFFcaFUJmZCwGfiFcsTWwLkr9fx2Zb3cSMI8Br1RJQ0GjIlEbkMNJKUJkLT5uABJVrp279IG+dNrZX8drYO2uU001rpHMk50Hdmacc8W50J6qtuuyXl91NDC9vZColCBDJL+FAtAJGHFpKBS1SopQ4pM9/fM8eJY2sKhtm5mU2iRFYjIIc/QYhVp6DbY1DllKdUAkDXTsT1enSNg8O2R28yFbJB4m/6Q/oXtPuHPHJPVNzDcdVz9SmUSbnLQxRvSlqoNdbg1GsHNEzC/G1TQYqi429v+WZXnkiupIqZvKrkNVhkeTTdj1VwjQ2STrPmTXwqK/yXdR3FfSIcK/05tXqqTv2jqbu08O4xlUoNqjND2SsPhRR+wD8VOPDtxN23eZekLZ+qt3mME8qN5Go0mIf47pyc1ZlqIQc8zIxA0g2Q8Ue3bjzhqpi1GKV7dxcR4ciG5ldw0mTNjxpjxl2MPGxMC/0WHPmlT8yZ2n2Dx9R5xKQhsKcsKEF7vxA8QfhHdT7BjnPrT1R6l623H+H9NvNZ7UzAKkLMkk7DINIRQv2+PLiaDjiPXuTw/i73Uqy3jaoRHyDlrj7EFz7UMxA5XZlh4eTCk1y7Aflm/x6UpCW1q/43l3HddkhtbiFunrx6WslWt2PxRP+yf3W7DQ0x2T6Seoe97Na2d9v6j+LxKsV4yVKSIB4JWH/AEoWgZhUMK55DHJp7hvbFY8ZZNMZSwv6QOOP1xlMKKHGg4pBB+Xc3JiqGxxCwdih1U+4bfd2FzJazr+ejUI+4jtBGYNc8foHtl/Z7pYR7hYMDBIoORqM/wBGA3j1nc464y4JC2y2rXaFbkfJqNNDqnUaeGnQxkKZMKH6jicGVslOLKfbv7ok47JroNtMT6ay2hRC9NuunchR2ageQI61nWPZ9OeNZAqaigxe3wfzBU5RHhGFNZkCShsISFj1CpRASnbroVd/LqbbTBcmH6sR5kqn24rB/dO97+JZTbQfbLieTzIlFxtkKsu5hyquWl5ibdV1U+3V8e0LAcCLmYiRLSZYUQ2h7ajcNFEFrTZ1urhLtiFtFYMSPiYg5Ig5sx9wFSTiK16JS20AqLtomLM3wQxUOuaU8gBUIg8UjkAA4sM/Y95bynk72d25t4c6Ozx5yg7i9RKmSjYrfrv7fhukuyFIQViOhqOyrxCdg1V36kbrJHJu7SICmo/CSSQeefPMmndywA2qGW32dI661jGkNpC1ArpJUZKdOnUAaA1ANMED3Ge3t7HuY/7pqvqV4lyChVxDhrdddg0N+0pCbuqro+v08KHIWoSWm0JSElbmg7dNu03ck0YSZ2ZkFMzXIcPq4YQd02axsZnksII4lmcsdCBRrNSxOkAEt8XbxOJEcQ8c1WF1zGTWEdD1hIX6FQw8kKSHEpBkTHEHxZjgj71EDo/Z2j7vd+Qa/KRANIe39lPeePdhO6v6kh6Q2cTQkfxe5JjgU/hy/MmI40QcO1iBzxIiBaPWCyQ6tQ1/EfFe4/Os6DTVR6a3tAooAP1dmKJtNyaU6mZiSakniScySe0nP24drVc0hPrOkKWpJJ1PZKQP4fx6wSMk0HDE2a+jRak50+rAuzbJK+uZdbQ6hK0ggAEEq8AT2P8ALpt2nb5HYMwNMVj1HvMbAqhxHaTk0d1bjhc0UpZHY9wNfs+3pyFq6jSoypivo54pCXlOdcIrt9FWofONSO518O/2+Y698tIeIOWDMF9HBwNBTGzYWUR9EWWlQKkqbZf+O9sfIo/95A/mOtUVtIhKH3Yl3l/DcKrg8MjhvWssRpbFpUTJNXaV78W0q7KC8uNNgTozyJEaXEksqS6xIjPICkKSQQR1rks/NjMUyh4WBVlIqCpyIIORBxs27f2225Se2kaO4jYMjKSCrA1BBGYII44sKwz91jkKppaTHci4fqc0v4MRqHYZQzmr2PfrDrOqEz3KVrEbVqHIebSC6lD5QXNykpQkhCaa3H0U22W5kubW+eC2Y1EflB9NeQbzFqOyorTiTxPVGx/zPXRtIbS/21Li9VQryifyw5GWry/JcKSOIDUrUgAUAz5VzFJ9zeaYrltngcbC5eP1smqU0xervvrY8iS1KbLj7tPTln6ZxKwkBK93qHw0767Lp4dI2M1lFcGdJHDZppoaU4Bm45fViXu3XcPXt3BdNai1aFWX/E8zVUgjPQlKEHt48sH+mis4/ZUOSx4LNg5jktixbhOuFlqTIZCvQaU8lDpR+aQoEJOm3w6X59V1FJZsxVZVK1AzAPE0+zBzbLuDaLuHctAk8k6tNaVNCBnQ0zNeHLBlc92WYpWW08ZVgHgHTlMo6anTds/Qe+nw179AB0FZEV+bf/xY/wC/w3P6wyqafw9f/H//AMrAUZbuMuyafmOXyfr7ier5EnURq+KhSjHr69pRUI8OOlfYakqUSpRK1EljKwbfZpY2I0wL9bHmzdpP9Aywi3W5Xe+3z7nuDapWyA/Cijgq9gH25k5k4ck5TLYLaQnsND4aeHYdYW+pjU4HX7RU0jDQltJdV2A/16/7u3RWN9IwqTwamrTGxFr0to3FPfTy8h8Pjr1rkmLGmJMFnoTUOOEyc0NxAHcdtB5jy7DrdEe3hjCYoMjxwsVziVVUAg+MSP8Ay2JP+PQWJCOOHa9lQyuBx1HCk0NEeXgf93XnzfGVqBp1e3CPMjaubkjso9/9NPAdTInqlOeB08YabIeHG0wn020+RB/1f7O3WliWeuJJjRIxwBOFyIhDydw0+37D21H+PUeVymWN1rAkmZ4Y2ZUcJQGyAd/dQB8u3b+HWuKRidXZjddW4UaMN2ypG5EZaVIBCgdNRqQCP9vU2G8KuM8C7naVlgYkccRF5I4zROeeWlgFKdyido8PhqPj0+bTvBjUKTipN66YMkxIXLEDOTcYVUyXENI2toVpoB/WexH/AKPVg7Zeidak54rTfdmNsSFGXsw1+L8/RxRfXt1MxaDmdHkmLT8QyTHplnYUi5tPOsaq2UquuK3fIrLKNY0sdxt0tvNjaQptQPb29bed4to4I5mguIZhIjhQ1GAZfErZMpDMCKg94wwdB7kvTl3LczWyXdncWzQyxszJVGdHqrrUqwaNSDRhxBU1wQ5lR7e6njbGOVH/AG8c8SMZyLLMkx2HCtOTlRcJS9QwsclxnUZJH48bkvxLmTcSo7GxaXFLq5ASpSkLCAiXPUs26S7QNz28XUUKOSsFZaOXBGgzUBUKpNQRR1yoRWwn2zom22SDqFtk3Y2U9xLGqtdEQ1jWMg+YLcEq5d1WhrWJwCSDRi5RcZXz5yJZXeM4HPm20uqrI8LEMKrbvJHKbG8Wpq6hrWxtTYWklqFXQGUuyHfxuq1+XclILbetn01tq291cKsCuxMkrKmp5GLt2KKkmgHAdueFLqa23PrneXvNts3e5aJAsMCySlIokWNf2nOlVAZjxJ5VAwtcdcq0GIYRlnGHIGAv5lhuQZHByOXDh5FPw3JaHI6iM5WpkRZqIVlGe2xyUORpMUjenxSete8bPcXu4Q7ztlyIL6KIoCUEsboxDUIqpGeYZWxJ6Q3202jZ7npffbI3W1zTiUqJWhljlQaKhtLA5ZFGXjzriQjmScXcGZHx7cMcIcsMZDaU1Hm+Ju8m5vKhVFfLEj6mK4zBg4hTSbtFTJZaWpAkNoXuSF6A6FZW33jqK1ubd9wszapI8UggiBZhShBJkYIWBOekkcsPF3cdM9EX9luCbPuP8QeOO4hN1cMqIwNVoqwoX0EAkagDUV7yti2VfqMZ+zfcbMye69NkBHytlyS6qQ6G0kq2oStw6DU6DoZd2XlOIUB8tAAO3LLEWLdmu42upKeZIzOacKsSTTuqfq7cKjeXxYsoFxxCUFXbUjsrUajxHYnrS1jK0eQNcRIt0gSekhAFcO5zkCpXGCVPtJUlPyknuPiPietEG3Th60NK417zutl5eRFRgRZXkVZarW22tr1SD+HT5j/mTp38PLpv260liWprSuKb37dYixVOPdgUuSGmolxWS9ohXiIlXJeUPla+qdeZgSST+FCLN1hpR8g8SfDoL6hbGm9bNEGGryJGendp8Q/1Ax/sjDz/AC9dbr0p1/JMzaUuoEjJ4U/MyPud0r3Enlii/wB4/tztYMy2tIEJyTEdL6nQElf4td6FADule7QnwI65Q6g6AntZvm7Nfyq1FOQ5fVj9i+iPVTb9zsm2zcXGto9JDHjUUI9+KYOO84y32rcwS76NVOZFhGUJ/RORMDlvrjVGe4rIVtk09opJBD7TR/LWCFpeQhxJOikqjWrMkJhulJtz8YHxIw/Go7K592K23WGSw3b5nb2Czox8tj8EkZ+KOTtBGRHvxvcuUnGrt3a23Eli/ccNXclF3Ei2UMNZTx/OnIS7JrbOpVosR6h5RjTVt6tSmUesyT8p6zmtmdvmoCHnCgMBwlUcD3OPw99VPKk3a96sI7UdL75+T09LK0tvMw1vt9w/xDLNrORv8dBUqKToKhww0reFbrOp0uHhFVZ2aazHZ2VXLldEdtq6tp61gS35jS4YesH2FM/OkJaWUpOqiACRutba4vUD2qlozz4DvBrShHMHOuAm+NH09ePYbofLvUIBVfGGDZq6MtQ8bghlceEqQa4f3BeL19xk1NivH2EZ/wA1cl3r6I1Bg3GeKy7i9sJBIT+KQlmFXRmydXZMhaGY6PmWdAei1ntZlkCyamr+FASxPZXIAdpwmbr1Fa2yFtSqRwL+FRXn2k9gAx2newz27ZPxXxxQxeTKjGsOySziR3/+luLy27w41YMNIdu5GU5m0pKMlm1slzWQ8whEX6ktsNlwJKujM8iRwC1jADHI04d4B5gc6ZV7cB9n2u0n3obkgaVwoLSP+HmFA/CWPAfFpFWpkMTi5/8AcPivtl4N5L5zyadHrcc4pxCwvmkuuIQ/a3rTP0mJUDKlkFyZfZG7HYS0j5ilSz4JOmloIpkEMg8GXu7x3/6MO9xeaKvj+bU9Z3vJd/mvJeS2zbEu2yu6y++sJzu2RZ5Fd2cizltLdWoFEltUjY02nd2SSQR3Iq62W6hVpofzLcHjwI7KjjX2V7eGI8G628hCS+CQjnwPv/XTFynsD/eR5q9r+R01ROif397fYsKLBzXj1/0IE2vktketkWAvuPqRDuxH1WuEpLUaWlPyhKzqQ5VlyYEHBJWVhVSCO7Hbv7afcZwp7uuLqvkzhnKKPkDAMvgOwLGC6GXnILrzW2xxjL6R/wDOr5rBJakMPJCgRqk9gevnHhj6SBmeGKv/AHOe1X/2lh3Ib7G4lpd+zLOb1VrmNDVtO2WU+1vNLJ0//NziaGh9S5gEuesfXBsbG2ydwChqTG07FuG83a2VqFWV60LkIpoK0BPEngAK1OFzqDqnaem9uk3K+Z3hjpqEamRhU0qQvwgcSSQAMJ/DPEnK+b0N9j0SpYtsTxSJNuavM4rrDNNkFdPiCwiSsQugtSLabOjD6hUUEtDulakrI0vbpqPpXoWKN5nMu53QCSMV/MjaviXRn5aA5GpJfI5gADkL1AXr/wBYY5vlwlptW3uZYIgxaGSikrIZMluGZCRkAIwWChWrqEcPLORX4VrgvHhmvZ6ZVVOpX6hbCnpyPrm4qFImpWt16okx1updWkgMFCkuaKT1Yc8thYu1zdBfldLHxVoeYovAEZccz7Mcyx7TvadXbbedPq6y3FykF0saglCw06i9AXhcVaMiiUqSdQxYBxljPKdDzX/1Ayh+vwnFI1Tj8Oe5aSkPT760q45ZmIo4DDrtlKRIeBUqW+2iOrvosjqutx3fbtw2g7fChllZnZQBQJrNfESAAB+yM+7HVk00vS+/pemaPXDEkbEMSW8sUAyqT7aUpxxN295PqLBMS8x5qNJq3H1xJYDroVX2qk7y1JbRtKGJyPnZ8ljUa69ukFrYWpMNwxM2VOFCvKneOB7MOdpvW9dSzpfWEnlbY2TKgoVkHEEkatLDNDlXMccL06gwPmnFYWKZ3HTKiuT6+xYjwJRbjWP0chuYzR3iAd0uEuQ0lSmFH5ykAk9CL+yjuM5A7W1asmpkDEZAkoQSo5ioU864sNdqbcbVNr3KWUhXVxofTrANfKlIz0k5kAgnhXDWy561w23tKmXDrbCo+pjTMcgRQhlNamrdQttusZbShDSPSJbdYSPlHl1kCrRRkDSqLTSAFAHKlMgBg3ewsjeVEohK+EaQNOkcguQGBzc5tNm561NpWgKiwfhPM16gR9C49sbkstK1O1IWSdCD8Bp1CZ5XOiUBlA488SbORba2oTpbVXtrXBao2q98vG0iBpaX3kH1EjYCHFJGiiCnT4eHRG2tGjAljOZHDn/TgVebvHcM1ufwkiuMeVV0REJbsGUlOiSUJPzJ3afKEp1BI16KwS0akylfZhcvLGKRdcZzOIMcl0c5j6jIM2U3FxOoV+pTXlKU4y+mKS60wGgA8tSlJACQk7lHTph2+ae6mFntKmS+l8CBeVciT2DvOQHPFcdWWMdrYNeb6UTZoSJJC3A6MwoHEnsAGZxW5hNjHn5vLltoXGi3V/NlRYqvlWzFnyni006nX5FCO4nVPiD26vTqS1kg2GM1DS20KqzDnRQCR7xxxyD0JdW249VzWrEw29/csyA/hq5Kgjt0kCnHlg457i5t8bnwXkB2Uw04lgBPyNqRuSEqHikhQ/D5+Z64ov2V9zncfCZ3P1scfq3s8LW2yWlu5q8drEp9qxqD92CbwU3zfc+1DE8F9l/JeA8dc7Yzy/ktv7gGMmssfp8jusfelyF4jfIfva20E3EqmmEZqUhDZDpYW2ncUONOF7bySi+YNUensJoamtQK5kUoae/GiYTUpCQsmrOpAqOXHlxqMaXua5n/AG+JfNuYuZHgXIXIOZNtU8TPM64gyipo+Pciy6NURGbyVRxXLNKXnEy0FuS82ClyQhZKlq3LVmh05OVrXKoJIHIEhhUgYwl8ssSA3fQ0BPMgUxg9x2T8WWftH9ottw5R5Hi3H0zJOa0VNPl9kzaXseRGyUs2y5k6M/JZkCTaIecZ0UdrS0p7EaD62sl9VDUrSnZQ95+/GPgEaaahfFxPOuPuQS25Hsb4MeQvVLvMnIqQfiRHlA/HzB6+0JSnOi/e+PMRoB/eP6MF3DZnD9f7JqJXM+L5dltI77ibVNPCw63j1FhHuf7MkFEuVJlPstuQxBS+gt9yVrQdO3bNBIJBSgXy86iv4jTgR34wYxeR4wTWTKhpyw6OA8m9vCLPPavgWiyHinnO648vavi3MuYL6quccj5DJS2liFVuOqmxK68kgD03HGV7m0rTotO5pfjRmWtDDXxBQQSPrNQDSoHHH1CoDeXVZyvhLEEA/Vx78MP3Lv5ZC4b4axXnnK8cyv3OU2X5DPmW2Mzq2xuMd44lRX24tTk1rVMNRZMyXZfTOtpWhSHENH8akKWY18YQjeWNMRpQEUzzqQDwBy7KkY32wm8AmIacVqRQ5cgSOeABj16wwtpM9zVtxpUaY+yNWZEZ8FtwyI4JcivJSdyVJ3N7hrqPDpb/ABZduDi8B243c3wyqy+mag5AyXV1jq4cO9gFKpcIPIMmBLjSBqlceQ2SVsrO1SSkdjoeiF/DbXyeVPlIOB4EfrGDMLSRN5sJoxHuPaDiJUnjbM6e2kxY0M3bUB1h36qr3OBcR9SvpZK2NPUZLwbIKT4LSRqeq8vUaxuWt5lfw08QUlSDwNQCM+/niyNt6avt222PcLCexkaVW/JNxEk6lcmDRSMrGmRBAIIIIOPkSFcV89/6yssI/oCwJ9SK8kJ9Jt9QBJSPDT+XUOOaFpDpdaivPuxo3HpvqGxgDXdlcojFPF5ZKkEjMMtR9uFmvl+nXoCtUKflJ13Ag7IrB1HcD+uSP5daytEoOJOBrApJ4gVbPiCPsOHnVS06bSsadvMfx17/AMuo9O0E48zLkKjDoNhc4nZYlnMBh9mZQ3lXk9E+4hbbMuTjtpFmoVHdKdHWm5TCULKdQNdD0Z2aZre6SUg6dWXYaHP+nAbd40uLd1UgsBQ91cxiyD361EK9s+FPcvi//M4pyViFZRWM5pAS23MEZy+x52UpBUn6ifXTpTZ17p+kCdT2Aa96tfNtKrmYzQd6t4lP3/ZhX2y58m5GrJWH2jIjEaKuSJDbSm9VhSUkBAKjroNewB6r3yx5lDlnh5iLSqPJVm9gJw/IddKcQlaIshQOnf0lDTU9u5HRRLeFU1kqPfiQtjuszaYbeZh/VOFeRjmQSK196JUS3WiHG/VSkemlTaQpz1XNdrSEJUCVK0AHUm3j1uGjFUrSoGVcQrqCeF3s7nRHeLGHKMyq+ljQEKTU1IoKYk1XMzIf7el+t36eZIZzKG62S2uRHKIvIlU20oD5UviO0yClQ1bUUg/MnxfHqsK8CRbHll8X6OWK5ii/PkjQHUbk1ANTX8QyrzrXsz54i25XJu6hh6St4ztjTjcolaJLTjQ3tracSEln0VDVARtCDptA06rtr2e2vDKCdVePvy/ow/JZwXFoImUUpw+/34QuduS4vPvGU3gblksnlLj9uuy/gvkGYoCRktl+jzY9rhFy4oJ1t7mjL7UZ7eGpUhTCngl1lK3WfzF33bbi3fSkyiNgTkGbWKUHDUaaacCWBpUYC28jdP7nbSJrZbj5iMhQCVVIwxJNCQoBDE0GSFSfFTFb8HiPlaPGDqcHvZUZohTUtEMelJig6sSWlFRCkyGNFjzAV36ryTabtZmjEbHMgdhzyww/5o2N4gz3ESuRmCcweYOXEGo92LMv2uuObDB7fnb3Mcj1UrHKDjfDJuK1D9uwIqXZbqRkGTTY6n1NpSqtrq6G0HB2UJa0gjRQNgdObbJbxfnKUeQhc8qKKM59mQz9vfhP3jdrW8rNauskMYJJU1FeQy58cvZgWwoPKGTcn5VYOwHsgssnuX7ZsxoanIEuJPeWtqQ28o/ksrO4L3qISsK79utPUezT7hvMmtCzVAUrw0UGmndT7cMPTPX+9dLbWlpalGsiKtFIoZSWzbMUNWJzrX2YLtJj9DPUtuZEdo7FtxbLymx6kRD7Sy24laB2QEuJIOm3oZDtd9Bkv5qA8Dxxp3Dqzovf2b5yN9r3Nvxp4oSe0ryB/s4dbmFS4jYdZS3OjDv68Mh0BPxW2PnQR93U1Y4vhcFJOxhT7cAZts3WFfmbFo7yx5SQtqy7So8Q78sN+Yy3HSQo7doPZSdCD4eBHWEsCgagMYWd6zsAa6uzhhnyLFKFqTv7AkaDt59z9uvQ1wBhlj1FRU41l2RaUtDnqNrbOi23EqQpJ11KShWigRr59ajkaHjjfSoqCKHHpu2C/mKlaadhr/r18+s1xrcVHfhTjXb0RxEmJJkRJCdNr8Z5cd5O3v8A8RpSVHt5E6dbaKRRqYjkZnPD9pOSb9+U0xYyIN4yFAJRewI05eg7fJLQlmaD/wCmeoN3FEsTOgp7DifaRamCk59oxLzCrCBYxEb6ZiNqkKWqFYSWmmh23EIkIf0T37Dceq+unV7jRTPOp4Zdv05433p8hakkjsoMP3dGZKxFSQST87igtYRroAVABJA8+wGvWKvAnhjz7ScAXaWU6nyHZ9OOEmXMaQFHdqR3IHiFdvm76bR9/Ua4mhAombdv6sZKjHLgMM6ztTsUneDqk6eaO+h7nspzXyJ8Ogc8zcUbPE6KGpqch9uIg8t5M9A9dYYCwkEBXclOn2D+n7vDohtkkjGjt/ThmgiQxgcxiEWX8kRjHfEprYRuG49xpoTr28f93RdxIjUHDEqKFWFHOK7OZMrx2ep9tt5tLit6iSQNNdQdAdDqTpqOju3y3S0DCq4HXdtCwJqNX0+vFf8AlEOunS3EpkBOmpRtIOu4qG8DX529fLyH29NUVw+mtMvp9uAIs1Z8/iw1a6kkImtKYkqQsHVt1Cir5Rp2KCQHG9fEeI/x69IRKhyBU8vpz+w4Y9sg0MC5IIyB+nEe3MYs89p/HNzdy2bCwjIRCZCXA+U/lydPONr33EePl8eo+0LbRXpZfFKOXNf63Kn24Y96jZ7ALJ4Yjzpx9n6+GJl5FfVeI5dEcYrVMPRK1UGKtKe4VLc9aW7qAA36gCUaj5Tt8urq6Yiv7uJ7yV9SICFHIV5DtoPfnjmX1Tn22zEG1QqFlmOpu2g5n2nL3YLdWInINK4wvew6pvspXy91DT+Gv2dj1o3HrK52S4oalRirJOmLa/t6kgVHHDcwn26mNbZS6YokwZrEBUsLaDiUKDj7aFpbWFt6lPj27pHQDqL1Jg3W3g8w0miLAZ0yNMjix/Rnp47JfbjECDbTxxMQQCKqWHsrT9WB1yX7PETVOTKNtdTYoSVRZUEKbjrB7gLbbBDaF+e0bD5o8+hm2epcduQlwdcQPAnxL20J/umoOLi3Hpeyv0IiASQ9nwnsyHDuI4d3HETp/GmZ4NMkJuqsN2DECYiJIZbLRmLWn0UvoeSlaVlDS1EAE6HuCdOrW2Teund+h12d0La6ORyqrHvWoKN2gZHlhF3K83jYAtru9hHu23K1V1MY50QdkgDCRezUNQ7SM8DnDKOsoncjyWYXoX0tamojvNSGluqsbySWVlSZKo6FKbjR3B+LX5/t6i3fSu8TXPmw3cbxmpBAPLtBy+rELc/UboS9tDa3WxXUUgpqHmIwoeSkCtfaMSr4qei8pv0fClxjw5ew+fOE+HQXqjXXmAuxkhT+WYllkKWmXjX6Uz87ikvtIWAE6K1CTsi27fNuo10sdxZlgG4HicgyE+Iezhxy445f6y2/o2yv5OvfTrc7zpbrWyiZ0leqxSoPiieRAVcOKjy5Vo1SB2G3bhXgXj3hnFZljaZNZZ3aT5KYFZkOXx4jzzFIV/UqqIFZDQ3Hv57DKEJMuQkLUlIU6ATqWq32aKe9VooBHEBqKaiQDwBLHNVrWgzPIE4593f1KG/2km8otraX10Y4nmt42SOeQeKd4Lfh5jimoKEjT4mCVpg73nuPxSsm0+LQINfLiRvo6ZqomVsL17PKJiQqWlEaEoIWYDC06Lb+ZClBIII6z/hsTSOjyaZixbUjEBY1+ECuYrzBFMqkYtbpnc5bra4mEKybZBCqFJ4kLSXL5ux0Eg6QQFKnUKgAg4LmBKwe0CF10SLTyZ4lMu1NnOf9RT6QoPsAOqVJhrQtO5SJHy7R2X1jdm/giOtmlhSh1qBUDkTyYcqrn3Yzsununp7zXtcSW1xKXUwyudDH8QRs2jIOZSXKgyfERPeZnsOmwiOlhr1a2jdsKp2mnPqjuZDZIeQ5AiemAFmHIfUHXAjelTaEgnvp049GbOlzNJd3PE6Wqv4QRmAf2stPIipxUfqRv93ZXe3bFYCSO4WSSNTIKikbAtcOlPhFSUrVS+ivw0xHziBu6zDLePUqcbm1sdlLuTIsH0Sm6qTFjGdDfQpRQ0plncULZbSlDSUpB8D0akkeytLqRgwmeugioLajQjjXhTTXCXtNpYT7tZ2FkbdrrzXafXUhUozFiy/i1UZxwL8aVxMbmT3FQePOP36Wls2nLKXHW7XyluBSyg6tyLNplshKwpRIZGm3UDT49BNo6aO4bj87cKRboaMBlnxCVP8Atc8MO89WtsWxLsFg1ZyS8byEFjU/mXLj4fFwiWlFABpipnB+M+ROd89m2zbiqiPCKbG3y+7SH6jAKhSyWZjhe0Tb5zYNFS4cIqUiMpYcfIACemfd9wgihNtGa2wOk6PikbmkYH4RwZxyFFwrxXtv07D8zeRpJvrQ+ekUxAjgh5Xl8zHwqxzhhbxSuQWFKDE366Jx7wrgsyqp3o2J4PVuSrvI8gyOxbXYXlkQXJ+R5ddyChdjZyVJK9pIQ0NEpAA6CTvXQHAM5A0qooFHYo5DtY8cc7btvnVnqfvjojSPZiTMvUrxpqelcyfhjWp4ceVNfut/dGx+Vi2T0/B8iY1TxV/prOYIbK5+Z2C1raVDpEBQVT0Go1cmujctHdpJ7HoFfs8fiyeQdnwr7P2m7+A5DHS3p56NraTwpfKUnkGqRmNH0050+Bf2YlqxP+IwGWIk/tC+5e0ge73lFjObRdlbcs8Qy2oTTi3AWEYvfsX7lTTsSlKCoCYaFL8A4+tJWslXgg3duby6LOx8/iDwao7OeWLh9XJ5OgOitv3jp+IptdnuISfSAdQmQoJJMiGJfwkNVQvhyGeJ4+8nizAOS76XGsfosflX7i5dHbKSGKq1dG0LlwXHSlMe2iuOBudDUoKdbV6qAQCQ0WezTdUbS09yoO82R0s1APMT8IbvI+FuFRQ0rgp6TeuNtYTQWZcHpzcQfLpWkEw/xIqGukfjC1yHDIY58PdRwHdcTvrU5HUzIYKvVVHUS2pAG5p5paQPUjvt6KQvwIPVfbzt8SJWlD2HKhHL9eOzNl3R7lgymsZFQQa1Bz44gOxmNixMaQ0l1ySlYDSYxV6zih2G1CAQpf8AAff0jTK0RoCQv2YdE/MHCpxKDF/fFyhxDidpVYnNSzkc+vfhQbGY4FvY367Smlzo0cktPWzaFflBR2Mq0WrwAO61Bb82VSUrQU/Eewc6dpxGvdUVYYaG506jXhGv7bngO4cSeWIyccVWQ8kXLDS2bK5s7O/jV0OA4uRNtc5zbIppNfSx3HNzswyprypU+Vrq22FuLIOg6PXe4tYwRW6Cu6T5LQZQxD45D7vCg/ExA7cVZvdzdy38PSfTup9zvW13M5z8uJaF5pDw8Pwwx/ikKjHeV7EuIIHtY4G494pq3I6rSrZfuswsIyUGLcZnkJbk5I4pkgper2nNsRpJ1HosDTx6HrIJ5yjFimQqTnlwNe0ca9uLDNoLGwSGAeBFoAc6gCmfbXn9eJ/5bjeOZziLMZkIM79SizayqJDkyLZxUOPTE1gUC7JhSK1Lx0Gqmx8qtex6ZNtmkt5l8wE0BGrkwOQr2EGnt5YVrq3S6t5449OYDAHijKamleKlNXuyxDS55OqX7F9iKpbESuU5Vwozm1tbDEJ1bLvrIKdUPvPoUpwHuDoD4dX7tOyfIWKRHOZhrcjgWYV+oDIcuJ544Q6u6rfqTqW4vzVbKNjDAhrVY4yRUg8GkYF25/CDww7MSz2AspVvBHl3BC1eHh4aDom+3s+WAp3RYBUUoMPLIOQERa9xSHAkrQT2V4Ag9v49SbHZ9coqMq4Bbp1CVQ0OZB/0YhjmOayrKY8Q6ot7zr31Piew/wBO3Vi2O3xwxgU8WKzvtyluZTn4cDV+/eSSAo9/Eef8/Lqf5K+7ESOZxnzwnm+k7tUrIOp76/z6+eSo5Y3meTTxOF2su3nSY7jitH07AVHsl4d2l/Dsrt9x6+tbrkw4jGk3si+Ak0P38sba7CQ4hCFE7/zY6h8FaHb93frzWyUOWRxEF5I5FDRhh7cfUUqbZNLWgqAJc1I79hr3J79LW8mOKMgYc+n2keQHniw3iyldhMtOqb+dehHbXVSj2AH269VJvUqO2kcBi/8AplZY0VueJJqkOBCI4I2Nd3Dp+N0jRX8EAbf59KYRAdWeeH+SaVlERPhHHvP9HDGBx5IBOidfjoNfH/Z1sUVyzpiHJ4Rq548i0+mT+Ia+Xx/2agdfDbhzwyxvjvmjSgOE1y2U6s993n3Ovn3PbsOt624Axpa6q1WxssPep8x0AH+JHXitDp5480iMKjlhYRLSGyCRoRoRp3/3nrQ0R1ZcsbUuAq0bhTCHLfQpSlagAak/4ef3dTYoyAAcB7u5Wp04Tsbnl+orgVHd9HHI1Pcj00+X2adQmiCqDTKmGI3ZeZs89R+/DqRJ0T2/x8j9nUVogxzwRiuwqUGNhspdOhP2j7+vhUrwx9SVXbPGN8hHynsR2PXyNc9QxvuZU0gDswpVDg9QJJ+XXVX3a9aLlcq88TNsdGahOFeSoOO/KdRroNPh/v6jp4VzxMnZXlpXnjZWwFM7fs7f6tOtYJDV54klKwUwxr+lbfivJKAXFAjVQHjof4adv8Oitrc6WFDhdu7JXrlU4r45qwpQckLS0SnVRGifAkHXTsB36s3p6/DKKnPFOdX7cY3JpiGwk2mJypj1ezUOPPx1xim7x3H8ljBtS0OqKIOSVdrCZd3tD8xLaXAnVO7apQLlMkV2qrKXoDXwO6H60ZSR3Vp9WFPab642ovJAIiStPzIopRTjksqOoOXEAGlRWhIwUL/3bZpY8Ztcff2biUezVS4ti1tkr0WNZ0EzGcLtp1xj8Sv4rsoEjjihtW5k1XrzY0EOOoSAlLZCSkHb9E2Ee6HdBPMYtckix1KuHkUK5a4Uid1oMlZ6A8zzd7n1b3G52MbItrbLdmOKJ5SA8bRQuzxhbNlNtG9W8TqlTQUC5UNnt7Z9yVI/Nsrrhiof4+zTHm5H1T39j+32zmzcdcRl2HyKrI6JWD5HIr7G/rIrDrY9WLIjSSV/lkkrvUrdMXAEUN843GCSlB5t4oD/AJcgZH81AQjMQcmDDLPFjdFr1rApnvdtiO0XUINaQbe5aP8AOhKyR+RIVaRUUjNSrZ5VwDuTL73KYlkkzOeTeNcYxRWW5PPuoloeLOLrvGnrWbNetzAiZTFoLpmyfQCopLs56W6hClrWtQWvpk2m36YvbRdu2q6mm8mFVK/MTq4UALUxl1Kj2KFBIAAFBhE6hv8ArjZ9xfet+sba3S4uGcP8paSR62YvpEyxvqP9aQuQCxJNThyck+6bK+TjUsxsQxfHY8a5vMotTaxmOSF2GTZIivRaSapXIUC9/tKlCa5H09bX+lHZ1PdXy7dez9F2m063eeaVmjSNdJMGlErpDeSU8xvF4nepOBfV/q5fb35cNtbW0CLLJK/mKt3qlkC6inzKSCFPCNMcYAFeJ5JVFnUpHrOSFtJekuuyHkx2I8OOHH1lxwR4cRpiHEZ3HRDTSENtp0SlISAOp8+1rUBa6QABUknLtJqSe8kk8zXCZH1M7q7uVDsxJ0hVWpJJ0qoCqM8lUAAZAAUGMFvk77xUtt0gHVXZXloTr8Ne3WyKxUChGA1xvDM+oNxOB7Pz2fHX6an16a+O5XYDw8/h1MSwjAqBgbcbtJJ4WJxrs5y4p1txb6ioHxCjqCOwP2nqXHAFFKYWtwYymta4J1Nk9Vdxnq+1OrE2O9Ek7CAsx5CC0440rXch9rs4jzDiEnyHUa8tTLC0a0qeFeTDNT7K8e0VGIe3Ttt9/HfCpKNUgZVUijr7SpIHYaHlgVZAj9Rat8Zy5DU6zrkOB+W4lJat6xaSYN3H0AQpuXGKS4B+Fev2dIN1tFuF1JGBZtUaT/u2HxIfYeHaKY7T6S6+nvbGNBMTu8ar46086I/4cqjtK5OOKuDXHOf7vcfxljOLeFXRmDFTIdBa2p26glWqf8vXM3XMdvZ7m/yXhYHljuL0/t7vetgjk3PxlgOOfsoeNcRLxhasFtm7mFjWO5vTy0hmyxnKFz4geaKhqIdzWOJkxXPlIHqtvt/9nTpSh3FlYM6kPTPTwPtGDO49JmmmE6k7DxHsP68T+4DveCK/IaPPKL2s5hT5rSKkO1rsXmuZFxlkSkLbm18iLCqm5k+glIcWlcN/c0UKI000APbfeJA8k8AIM1NY/CSODFeGrtbicBt0sOobnb7baboobKzDLE708xI2NfJ8w5mFDUxoQRHUhSFootV9t3I1jHiy6fi/j7BeEcSv7QxbKh4XxlNTd5rMdWp1dZkebOtP5TbRH5CikR0PIjjUqKQAOmaOa6EelgVVs2AoCwPDLie01NOVDiqtx23aPN+ajkjnuEfQrULKjAeI1OQb9kgV51AxdRhaxx9iiGb+XEOdZBFaeyAJfaTXYhjUFKnq7GYr61BuO0wkl+a6pQCnNVKPbXrW0bM+t/i4AU4Ds/XgjZXENrAEgyt1zHHVI5+J27exe7HIz+81+5NH9y+aVHtw4ZtXLHgTi++dtMvyOvd2wuW+R4IchB6A8CUu4phiVuM1q1JUiXKcdkAFPpkYtP5Ryzpx+n0zxNUNOup+J5YoVvMklz5KYbIbjtNrQ0mNF3FhhaU7UtITuJmTto0ceVqdfE9aJL9pDXl9Pr9pxt+VUKTT6forg98LUDEa3Fpe20TH0fSKn0z1k60Y79hXOJktTXkS0KgzhGU0SpMgpQpBUEgjv0Y2ehmMczBFYVUnu9uX14B7lbm4gMag1H2dn07cWpe3P3J5r7Ns3w/mX2qZW8vKrx6UvlviC5mA4PzdXxEIsp0sswwippsvTFWtUFcNKVhlIICmwQGYwWTQiBhUNxGVa8dSngDTOnAjCE/zlrKJrVyLqPI1zDL+y45qfrHI1x2/+zz3lcNe/LguFy1xuppxuVHexnlLjHI2Y713ht65HMe6xfKqd3eiVXSUrUlDhSpmSyQpJ+AK5tZrCZVYny2zjcZVp9zjmMMG2bhZ75BLHEoS7QUlgahIB5gH4o25H3HEJee/Zz7msRYn8Q+0DO00ntu5kuHZGQYxc3/6e/wNKO6Vc1lRdvPKuGuK8pDpIagJdmNvD0Nu07i97L1DsbXC7p1BCJN2tl8LAV84cFJX4TKnItlTPFOdadJdQ2NpJsXSd00Wx7hJ4odR1QcSwUirrBJ+ILn+HBh4Q9tWA+33FsZhSnpPKOdYRFtxG5Aejqoo+Pt3YSq2gQgtbkRii9RH5S53rzSn5w02o69aN432+367e4H5VtJT8uuotp4E9rU+LSAuVKnC7050WljZxbMwD3UAbS6LSRVY1Ku3wrEDUxoxaRakhVxH7nL3h8ScYXEmqkzo+T5jaVUh6TV429NyGw/QkSEtSbO0tvrGKyipmHB6Zmyn2m9fkCiTt6Bzy/IQ+bcNSJWFFC56jwAHNu734tnYuhtt16I7SL5maIqXeRmZ1GbEKKkDkWJpTInETOH/ANzrjXEs0yNOR4jl0PAHJ0aruo0+xg2zq4Eh7VD7Mdr6eUtMBavUYfZStTSh23JJ1Xtw3IX7FCClxHQ1yJHLOnCvMYsCx6Rg2BYp7FP+TmBUqxArlXKtK0PwnljoR4Hg4bzBjdPyJxVntJlfGtk21Ys38SUgz4fpaSFVVhXpPqsW7OzYrsAD37eHQ2bc5LdPLkUmUigpmpHCoPZ3ccGoLEXDCSNx5C8ScmFORHb9mCBMra/PjbJXDElSZD81UQOradDzCVoQ7GWhaSETG0ALGuiXQCdU9bIZBE8YuFD2/Ag8CD9tRy7u/GzcoJt42ie1sZfl9zKlopMiVdcxUEEFWpRhx7MDbC8SqRYJcsklh9qYlMZLzJVLdCX9Uoc2ates1t+Y6nXTXokm1xW6vIZG8nMgHiAe/FUXvUW7TNBaW9ukt87KrlT4QwPianGmRPKmE3Osgrcfburayso8WpgKlyHJMotwosZhtSyp18qV6aEjTxJ1PRCxgknMdvBGTO1AAKsSewc6+zHtwuIbRZr6/lRLOOrMzEKigcWJOVB2nFWvIHvims3jzHHcNFzCYdKE2d16rNS4lKiC9CiNlEt9On4VqU22fEbh1dGzel0lxbiTf5PJUj/CShk/tNwX2Cp7aY5m6o/mLjtLs2vRsAulU08+aqxHvjQUZh2MSqniK4RoHuYr8/eaRmk1+JPSfyxOUkUrKjoD9E42lEeMkf53kIV/2j0bHRS7KhG1IojPGnxt/WJzPsBp3YUR6rp1TIG36Z1uhwD/AOEp/cpRV9rAHvOFyXxzhmSFNpUelXWCimS1bVS22lLc1DiHlJSFR5AUsA7lJK1AdlDrD+JX0CG1vVMkBGko2eRyI7vuGN8mwbTeypuFkfJvlYOs0ZCnUDUNl4W9pBJHMYIMzD7eTUtWkdtNiy40Y9mqOj84T4yQh9ZifMrbLa0WAhSiVE9ceboixbncRx/As8gHsDkDH6UbDLJcbFZTTGsr2kLMe0mNST9eK2PcRxWyuW5bstegNyvUZU1tU4oan8waBSFAnz7A/HqMszKKDBBoEbjgpftq+0bj73Z8mclYPypJyurqMRw6vvKh3ErOFWuuS5Vx9C8l9yXW2KXGwyAQAlJ17knyJRyeVbm5YamDAZnLOp/RiC0Qkm8itBQnL3YvQk/tL8DzsNxbj2fyTznOwrCZVzNxTHZGV44YNJKyCY5PuXoZbxBuRusJbqlub1qGp7aDTrP+NPSnlJT34+Haozl5j0936sOdn9sLhdvD6fAF8g8xycOx+0m3VNQycix9cOvtLL1BOmsKRizcj1pIdUFblkaHsOw6zG+yAU8mP/a/X34xOzxkUMklPd+rC6P24uIv7Uh4OvOeVncRgXjmSxKFy9o1QWb12K5Bcs0gY4l0yFRHVIOqtu0+HX079IRTyYuHf+vGP8Gipp8yTTWvLj9WKv8A3JcM4bxjy5d8Z48qfKqqmqppjD9u6xImrcs4n1D5cdZjx21aEaD5B26xuZT8vHcoNJcVoOFakfox6GJRO1uTULTM+zEV7XG4tN6q2EbdFaa9gkga6b9R8fh0IkneT4jgmkCJ8OB+/k7cN0p9TapCu+hKdfDUapOmih1pON65Ee3BJxfksustLalNh9CUU85DwS4y40+orppMltX5brbUwegsnXQKQOowvyyeVL4k4d47CDhiMCai0fhbjlwPaKezPLvwvNcq1NBZ0F2wu3pXpF9WVtrBq0pegzIcySGpkRcpe4RGlHUtB9H5TwG09SrTcFdxCj+E8ajOnf207fswvdRbDZ7rChuoo2njcaXNKqCRqoe8cueF7ktqlzTMJWRYddZFWJmY89DsWbBcJLMewj7vppHoAFp5t6NJ0WpvRRUgeOvQy7sFuZmeRImDKRmvPt4V+og4N7Lvm59NW0dtsl7fQRpMH0pM+mlQSNLEpyyBWmGdQY3OkWcnHskMz0otY3Mr8og1sOTCekSJRC40iDMZZcc9SNsUCh1CklJBBBB6EN03C1AvgpzVm4+w/rw2v6sdYagXktrtCfEtzbxOch4SHVQa8a1GHUjj2VBcamNzMRuq1l9hUyOqusKu0cil1IdbbaZTIbDqkEjVJUE+PgOozdNXIP5cuXex/UcSD6mmeFludp2vzypoywmgPbk68ONKYIuUPU2fzW2auItLNLQCorsLRUSVTqiWx8wg14eVGiIpllxDzsx5YcKQpxQJIHRWbZ5JpI5FlMaJHQIvAHuNK0ORJOfdgBsvVdttm1mzu9vtLq8eV3aVwxY6j2AgCg4KAAK04DEpfaRk1NzHxjnXsr5TdhVttGanW/FlkC3KZbLD7lgtmnddV6c2Rjd0gymghZEiGtxsaNsr6are2k+QHnnWFXS1DU6TwOYGanh7hwGFK53dJN4N3axRQNUMqhfy9QFCNJJ+IcR21PHPES5b2ccXZdeca5zHRTZJjMpcKQ2iMluPOYB3Q7SueW2kya2xj6OsuADclWhAUCAs33Scit58JRomzBqf1YbLL1J3JFNtqELrlRIoxgv4qxY3TzK7S6VV1xR6/qSHHWlyWwrRDbYabV9I1Lc/KRJe2MJWr8Wo060Q7TNH4X0afefvGMLrq7cLkFjcXJc94Uf7JH1YeOY3cKhopkSsZmVzFqy6yqqRbot4jitrjTsiXNQhDk1yPuTsGqUdx8n9XU5IWt2BjQqteANR7Tw+3nywnzfN7pcBtxdJPLzV2FHNMwoJLcTx7BmMHzHVKH7a82Y846pSrm0fUsrWp0Ji8ruR2iXN24lLUZPcHsOmcSEOG5i3P6cDdAS3Ih8J86uWWZbPh78Ryxy9H6XFHqBaQhJHqgOE/lg6FS0q1B6Tpdy0yFGUEV5gdvfh2to5TArV8VM8Q19xr9I/ll7YTnIjVvU40tWPpUEhResMWcFwWWwUodcRUMuaanRtJUrQ66jZbvaSWF9cS1EyCMoOXiYV7BwHuzIwA3S/3e13ra7GzYfw+4a48/LxUjVfLo1CVBcjURTVRVJzoXNjlvlnJuU4txVx6h61yzJ3WIcSI26+piDGDaFTbazcZ9QQ6uqj7nX3DrsQnQAqKUn22xyXVwWNdAJJJ5DvP0rw4kYkbpY2lvGQqJrPZxJNe7nxxND3k8scWe3rivFPZTTrk5VcW9Gm15LXVyvpZoDxM6M7cyGA6qNOyO8R9SI6l6ohMhCh6Lre5gvL6C3jPmKWLppVexOGontY/p5UwItrBWQQIdMStViBxbjQfTsxF/hbI2oUaBWT5tu5VTIkalmTWJzn1LEJP50RLD5W45E3rWUPoSBqNSCdB1AsJmkYLI7NERp48hw51HHMdmfLA3d3e3B0gqDxINTnxAPZ93vxLa7xavo4TMqoZjwmoyNj0Ndx+sW08ep8891uPHSzBZhpO1e9Q3dj469MHkJGAFAA9tcV3uIYMWViB3sKn2AcKc64Q405wJK2HnGHANSppZTqB/mA+VXw7jrRPAkq6ZAGHeMZbRu17tlwJLGaSKSvFGI+scD7xhrZFdEtL+tjQ5xAIUtaPQf79uz7Omp+8HpdvbJY/wDCYp76j6ji3tm6vmvCF3e3trsc2ZfLk/10pU+0YHU2hZlQnp76p9ClTLj8ZyQ0idWuegXESIciRHV69fYNqbBS06gFaTqPEagjbz6GZyuXcad4PGh7O3DNLvvTfzcUNtFcorZOoZJGQmmlkVtBkjOYYg1U8qVp5yy2rMpbqn6+WJNvCjswbXIZCVw4t+WGktpW0x6Z3uRFN7S+VauJIChqnUy2g+cSL5cAy00sxIFSPb2dvPniHs//AC1vKt8wgXWWjiNWZUJNASoNDSnhOQrkThstQkNJ/wCctYzaQO6GErdXrr5FXpt+B+PUldpKZzSoD2LVj+rGMu5pxjRj3nIfrwnP28JEl5hmT6bbACWnpq0t/UkJJWtPplQQhSgAjXXXXuR1mNqkkYiE0UcC2VcvpTEeXd4IUVnqxPELmV+vPLP6sPTAcpra20clTXYh00ZiNvpjvADcFPPKBLiE7toCD37E/HrZbWTW8jPMgZCKCoB9p5jA2+3vzYkitHKuDViCR7F5e09+JfYJyBHu1OKE5lxlTikMpCGo6G46Vn0m0oa0QSlPcq8Srqsepw73TSPkgqFoAAByFB9vbhltinysYWpk0jVUk+I8TU9/uAwWV3DYZKW1+osgHekgJI8v+0r+HSRJMgUopOZ9g/p+zG0REtVj9P0YbM2c4r5wvv8ADXt/AdCpzKeByxKRVGVMNOdPIB76jQ9tSNNfiD4joXLLOoqc8b1VeWAFycuudr3lzNAopXoSNRoAexI7kjrK0v5o5aAHBexR5DpXhis3kawx5b0uF9QykgL0KlBJHlu1J1Pfy6cYrqYqJCDg4lvQU54rs5YxaDPdkKhS9HCFFIaWVFW7tqrQ/h8gf4Hpl27dSlNa5c8QLvbHkNVrr5Uqfp7frxEmXxrkL7zmxSo35hKXZC/Q3/EtgncFbfs2q8D8emNd726JQQdZ5gZ/Xyp78sbrHo/er0giPy4/238P1A519gz59uHZh+Gza6xjJmKVZuJdT6YUNkXenQhIb/4heQfJWn8R1Cu91mvF0Wg8pT72oe/hT2Yfdt6WstqUS3refcrnwog76cT7/qxc7wJIXi+IGfaxNsdMb6h4pQEEMoSPlAACG3HTohJT8pKh26f+jelZJoVaIf8AMytQc+PPvAzJ50xVHqD1daWLyTXLabK2jLvTsX8IHaxIUd5xlv7esyKWu0nI2OuL3oS4PnjtakoZHbX5Uad/M9Xja7NNtluLK3roAzP7R5n34403jqUdRbg+73ZAlc5LyRR8KL3AfWcGriu8qC0mIh1tWmgG1QBSfLQdikn4eHVZdYbNNrMrqaYObbuEVxBpiYEdmJJVV7Z43IXMq1sOtvtpbkxJTfqxpTSdVIS4kELQ4kklKknUanx6p3cNqguk0PUNXIg0p9mGXZt7vdjuDPaFaMAGVhVWA5HmCORGHQnk6gmoDF1SPVatdfqYafrYiF+KtW0gSWm1H4JIHSdd9PXasXtpCxpz492fDFobd6h2MwCX8Twt+0njUe0fFT3Y35eD4fyDWp3Rq61hr3lmUltiU204ttSFIKFhXpOFKyFIUAfuOhAuBt+srnRE0iE8aFgSPaD9tcPVpulheRpdQmCeNGqtQrrXsZT9oNMu/AmzXFotZgrnHOVYLwchNwuyh01/k1XBo4rEGJC+irpVvVwa2VbSJjDpJTJhqSrencopOquu6/TbrUXmzRx75BG1xHCEyUeIdrADjTiRSvHjjgD1Y/loe/60/wAzdHdR7ztdrLfm6miSaSQK2R8mFnlosLNWiOsgUNShAGGl7PuDOOvbvxNb5ZZ5ZWZxl+fWVlWJt6OTOfgIxipkLLNXX2M6Ih6JGdklRdCQ4skAFXYEEZL62n3WO2aQfJRUYGoILnOhFakqKZE8OOK99dEurhk2V0ZZkSklvGSrSvoB815AulVpWqrVtRrRcCHlXnrIsuynAcep3ImMUxZsbZxLi3IdbUUkewdRHkTFOrK3GoVHCceWtRKnCO3cjpg/i0MMsrs35S0FTxZqVNAOZPADhijto6B3fdo9ss7S1PzkgZo4IwQsUWvSmosa6UjBLuxz4nEHMf8AevAi+5CszDFYb2SY/ht4yzh6shkOQYk8w5anX7+bEj+o+4u0mAyPnUglIR+Ea9IV1uU8s8kSCjT+E550PIdmWWO5dq6Lsti2i2iu7j8+28arGgZBIMwzsxXWS2dAADl4sXjcae4ui5Yp8jznK0QcCuOS3VrryGR9JNrFOiDNyISYLgeq13K2/SjuOttJWgKWFqSdS0bFFLNHFbW+plts9JNTUZqADxVeJGfsGKu672ndtqe5343RkmvQF1RxhQob/EdkWoEjiiBgaUqSc8OXknC+PuTMWRxvkqrK5gw4D79HlMZ4vz8XnPpacLUCcwX0RIrDTKVqRMbUw921LfZXThZX91ZzNdxFUdyA6jhIBzINKseAKnUBwrwxS7bntF/Kuy7+kk8kMTNFOFOuEnMxUGpliA8X4kLZsF+LATr8RuuCaiumxLuxyRmdavtWGbNULkisro5hLbrqm3qYX1yYTNmhe3VRWy8oE+odAAbNzHvTtBKqRLoosYehahqWDtQ1BGZyIGVBiDBt1j0vtY33psm/b5tdc3l6vKVgQBJGNZJbVRFzQnOpNBhHwf2/ZZzTOn39ncrcxhUgJsczcZFeiehlwbcV45gSm0JUYyTskWC2vp4o/Albmg617p1Na2cS2US+Wi/gBqx/ecjkTwz1NwqBngVH0ffWrT7lH5V71E3iDTERxQ1+FNNKNIn/AEenTF8TqWouM3uS9yXEPszwOxjZNbVuN41TQHqqoxmjcMu9ySUGw8/V0iVelNs5E5wgSZkpLri1kq3IHgJg03ai5QHUPEzvSigcKjhXsRacq1xTb7F1p1N1PebH5weCZjDNJHXVMWp5ixykL+QB4ZJ51kOR8kKccw/uP90fNfukZazLNlpxji5dotvC+Aai7FXOuq9QW1Btcjsti25tg04pLqo7iNmwHYkn5+ht3uCICFJ8R4nNm9vYO4ZY6U6L6Q6f6MkTZ9tiRr6GMF5gh8qI5VVB8RJGXmmrE9gyxVJyHk97j9zIxybE9ExNgK2C5o64NrhmNqdQ3udaJA27QlQHZJ16DPPPOPyzUHt7O446J2HbLOW1+br8WfI07iPZzHPDl4p5hvsB5X4/5nh2cSLkvHFrFuWUwfyGMoqUH0b+tmA7G2FTKkrQqINqHFjcnv2OMdlGrLJLkynge3uPPA/q3p216k6X3DpBlZrG/gaOp4xScYnHPwvQ6jXLjjrlw/JOOPcnx/SzIIh5NgueVEXIsfktMNTH4qZDCkl+O05oV2NaVORpbB2qcCFo0CtpBm0nn2a4G42RqhWjilQyc8uens4nH51dIXW79JdT3PRXUSGJ1uQki/AUuIzQSK34fMXNX+EgioIxBj32+yPPjSN5DxZnTcymfpYUN3jTM1xpMJtEWOpuPZ8a5nYMpDkaSgArrJrrMho9k+ppr0PuF2LdnePUiyuSTHIaBq51ikFAR+63iGO+uht/666WWOC/tpLrplh+TuFmDI0WdfI3KyaskLgkgXEGuB6ZsvDHO3n/ABDyLxdFr7vK6Kwh39oi2ZRjFRSrdeQxFeSxWSpNxE9dqvsrN1SlOMlSw1FbLignckGs+oNgsrM6h5kb1P5Y8QI/dNagn6u/HSfSvXe775O23WMCSppAW6cCOIMalmkVtJZEGVQasxChTmcRnrsDym2nCXkCRTtuyBvcsiGZskKc1DMCvOr/ANMdNEpWEqP4ld9AA1pNBYN51yAZwKKimoQdnt/aPuGHe+tJ72L5LbWIhZtUs7V1TP8AtAc0XhGnAfEeWLf/AGD41h3F2eVOcXTSLG9rm3ouJImIQYOKNWBCZ02tjLCki1lpO1UlX5iU9gR26hXV5Hd3L3DUMjU4chyHbl2cAc8Edn6TTaLZ5ooysktDI7f4kpHDV2KuelRkM+eOqTiLM42Q1sJbDgWFIbCQ2QokrAPZOuvf7Oo1u+iTLjjbfw1irTMDEhfa9kLvNnu1y7GKa1jJw/2+8a5LElLTIbKsm5ayuJGjyoNezu3SmMHxyQfqlIBDL0sBWhHT8zna9mhu5UJmuZ05fDEprU9hdh4e2mKbudO9b7c7XbSBbe2tJQxBoZLiRdIQDmIoyzOeFWUHEF/c7jFxx3z/AJfRstuM1ORKj5lUBQIbS1dFxFpGaPbb9NcxniQD2Dg66Q6PcbpskUvF4/yz/Z+E+9SPqxw36i2f8C6mnVQBFOBMo72JEg90gJ/tYz4WuVHaaddK09gdO6hqex7fi/19OAsjwoPuxU99virUBqH68OrILKU/HKPUJSUnslROg+J809ErO2RGGoZ4UbvdZLgkKar7cBSzJBVoPMk+f3n7fHowBn3Y0ROGyPHDUebWo69/u6+1zpiYDz5YwhpX9X2f6fDr4B9ePMzHLljfjq9JQPcaaafEEHUEdbRSgpiNMhc1HHBOx2pXbvtOtoKkyNq1ADUCQyoJdGnfTcnv/HqFc3KwoanMfccZ2lnJLICoqD94xN/iTjpW1b7rGmjexOqe+q1BJPhp1WHUW6imhTnXFx9IbE7t5jDKmJhUdI3Vx0LQj5kAIZ7eLm38fh3Dae/39VhdzmZyO3ji+9stEtYQwAy4e3+jCuRtToPL4+fhr1E54n8qYTpbmxtaidAB/LuP9/W+MVYAccRJ2AFTww0JExxxwhJ176fYB4fZ2HU9IwMBJro6qJjbiJUtQJ11Pj5AJ8esmoox8SZ3zPDC166Wxt10A0Gmo11+zqN5ZY154lic6eNBjwuZ8p7/AMNfj37j7D1sWM41STNTjnhuzrE90A6Dz8ft7ePl1NiiAzPHAa5mJyGMtSlyLAr/AB0ERjuPgEDQ6dv49DE0vGPZhruNSXDf1j9+Hiw96jYUD30AUPgRpr1EdArZ4mQylly44UmXtp1/2+Q0H2dutTKDiWkzA4zvj1kbgdVAakdu4BIPf7OsVopI5Y2vI0i58cY4zqmApzv37Dvp/p36+vGHyx9t7vyKmueFqC8HCkKPfsRr5/z8eok0dPZXE+2vA7UJ4YcDSu3fwT/p4/w6iFKHLBlLjUOOEaxVvC1J+4fz8f8AHrdCM9OIk0hNWGI58nY8ifDfWEakpOp01+Pfz6btmufKkA5YrrqqHz4i444rP5GpHIkqSgJ27FK+bwGn8u56tnbnEqBhnijL65a2Zo8+JxGrIXyzGWNdpAWPt1Hmf4Hpkhi8WfA4W1vay6q51xPj3c83MK5ptqW4489uWYivpsfYiWHI+D5LJy5MVqsbQhlV3iJpGXa+MUem0zKkS3gEKIKUrS2isujOmWTZUmhudzhLSOSsMqCOpY/hk1Gp4kqFGfaKnoz1G9RYpd9kt5LPZp0EaAPcQOZqBaU1xaBpHABy5yOYBAA3m8o2GXe1bk6jj1nFGO0VHyjxZJg0HE2HWuKVceTaRMxFhKs3bmK3Y206WmtYAdcfkFKGQAoA6dGIdlis+r7W5ZruS4e1nBeeRZGIUx0ChTRQNRyAGZwn3vVs+4en1/aItlFaJeWxCW0TRrU+bUsXFWY6VzJOQ44jpAlkNNqP+X46DwH8enVoxwGKSe6d3z4YcTE1ZSnYrQ9iR8P9/UVowDQ4+i4Y+3G+JrriNqla6+Pj9o/1dazGBwxuWQnjhBnwVSdVBJUdP9ngeshlljx8WeGlJgy2TqgLA17eXb4fDrYoFMa2cEUbhj1DtJ8RYAWtKkkaEagjw0IH3dbGiFKnhiG+k1I7cSDw/DU8p8Wc7ZHNsJ0K+4a4+h5hjT1cw3KlTWXrGU1kNfIjqW25NgQ6dhcstIO9IbWUg9k9Vz1zuv8AAZbEQqh+fneGTUaL4UBRqgEBtRCBiKGoB4VxcXpJtM2+2u7zO01NmtY7uERAGTU0pEqqCRVfLVpCgPFTTjTHNZ7gcIyyNYUWW5rVSqvG+SmJtxx7n8Vl6Zgmc10SS4xNcq7BIU7CnV8pPoS4bifqIj/yOISdOuWOqLA3u6yI4MFxq+GT4c+HiWoHaCCyn9ocMfpH6Y+pUNt0tbXc0RvNpdSBPApEilCVYPC1DVWBVgNL9iHjhO409vuZ5EQhrErmwiuIbkxZMSvkyGHWnfmbdZUEfMhemo+/pZTaLzzTH5bUBpUUKmnYwyI764uT/PHSktmL2G8hYOoIU1VxUVo0bDUp7QQMWMYB7d143jIczm7xjiyvdSr62xyqfHTbIrwkqdbrscirXZyJL4G3VaWkAf1d+nTZNkbUslzQRjMDiSfYOzvxRvX/AF7JcwSWm0tV3GkyGqqq9xOZY9wxKBn3LcNe1f2/z+UqSsyy8wmjn2VTjnI0PB5GRv5HmSUtfWUlImoQ81SMhpZcckvuelHSkBbmqtnTNeiC3WrkAZVAzb7OHsH14qvYrbcL6kaCQqlSmrwISTQnMgmvacwBwzxSH7lf3OOZPdPi2R4vRzpnGXGFm1Jbk1lRZFzNOQYjZKZSLy8huAwqdDxCHoMb8uSCULeVoUdLM135zlIFKRDiT8R9vYPZixbbbpLZg12/mT05ZKv9Xt9p+rFTkmFdWFvVYlR01ja5LeyoddSUFLBfm2k6XOKW4EKsr4rbj8qc+pQCEISdv2DqAxlndbeEF5XNNIFSTyAGDQEcMTXExCQoKlmNAAMySTkBjo/9jH/tvHaZ7V12e+9XL7DjqJaNMyofDnHsqHLzwwnUpcS1l2TympFPikxafxR4rc59BJDgQoaBgtunBANW4NWSnwKeH9ZuFe0LX24qPqD1Zt0lNl0ygmYGhncHy6/uLkX9pIHZXHSVxZ+2H+3dxVjdVjkP2xcWXFXVkfT2HLIsuSLUuKAD0px3J58iI5IdI1UGojaAfBIHU75dAgjgjqq8BSv2nnhVTqq+eQXO9bh5KMeJYRj3Koqftw5uW/26/YZy1j4apeJ8Z4YvqxxqPjvIXEFa1iX6ZZoSoQXV4xMcXiuSJKlkLjBhmS40SlMhvUEYJNf2RDFgQRXS1G+4VFO33YYbfqfYdxR5LASzpGQpkRWCk8qa6aiewVPM0xHv2GfttNeybnXkDmPI/cHWzYmaw0UVfx1xtj9xi2IT5HzpbyjOm8nly5S561qSpFfF3MMuE6SlJ+XqVd7v81a/KCNdLEE1zII/ZpwJ7RjVb7Lf3G6p1BYrPbzW8bAPkBRhwlU5EDiErUnjiTnP/urwzg1+3jZbZOyraGrVMaQ4kvzvU1UwmFWR/neS/oNidEN+ZPn1nY2M96y6NKR048ad5PAe3M92D1ntW1wIbq6kmvb+TNqnTVuYNKEj90aVA54ps579+/LPLURmnpLJjCcRLs1b8cll576JxJaQr6SM4ldlLejnX6ZKUsNKG9Th7EHkO3baXGby0AJ5nuUnJKc240yC1xomgv8AcBHFtsccMKOSNICxjvcjxS+ytCcy1MVPci5o3TVVpV0EmdPrLNTkq2kyJjkhd1Mjv/UpcyOUlREhtmSd7MFH/LML+Ylbmq+gu5Wt1ft5z0SEDw0/CD+yONT+J28R7hlhs2K8sNrBUt519Uay1KEj/uR+FF8I51OI02D+Q3omzWmH3ZMiDHLjqFOKQ+9IfDje1hI2sqjoGw6ePj1ETaXRZDCvhZF9pONW5dVW7yW6SuHkjmetTkBnli5P9prnPnr238oQoMudYnhvkd2PR5xj0lK3o0B6eURYeWUrDmqIlrCccAdKdA60Tr3Tr0Vt+htw3KzeVloUUstedM6e/CHf+q2zbTuCwK6sJHCmlPDqNAfYCc8dftNc19AA9HfQoRkl5MkK1LiCnehZOgKg8lQOh18elc2vg8rTTl9PZh2tdwpKZ5HBp4qDhkPp7cNHIOV6KgqrW/sY9XUfTxpEmXbLdTGhxGSFlclbJ7B7ae2mpKvDqdBtt1eTR2kRklJYBUGZJ7AcLlxum17VBPu08cMDKGZ5mNAinMnPhlyHE4or9zHuKt+ZrJdHSLlV3HdfJKo8ZYLUrJ5LayU2dm32IiBXdhg9h+JffQddJdGdJW/TkXzd0A+9uuZ4iEH8Kn9r9pvcMcN+qPqNeddTna9uLxdJRNkpqHuWHCSQfsD8CHL8TZ5YioiM6oaq1Q35JHdSyB8exWrv59h0/eaAcs3+79X3nFQ/JMRTgn3/AK/uGMn07hA3JKGgflSO6lH/AA3K+JPYde84Ammb8+76dmMTZORUikfZ2/Tt5Yd2M5ZlOKuocqLSRFjglX6eVetDe7/MVxndzaSvzcSEr+B6h3NvZ3gKTKGfm3Mewj7uHbgttt9u+0sGs5GWKvwcVPtB7e0UPZib3BPuMRKu04rlkL6RV83sivxip6ALKMCtgqaXq/EVJa3IA1c3eZHbrgbfkWPfb2NfhW7mA9gkYY/X/pCV7jpPa55AA77dbMQOFTChNMHfkLCsE5MoZbchqLJU6yoNyo7gbktq2kpCX2SHEqb1HyK1A8COhOGLEK8ATzr7RMjyvN/b9eUypWRVjVNbM5BTRLGUqDElqmsMxjIW1EW4HjqVfldu2h6K2UqP/wAvKoaMkZHtHsIwNu0eP86I0cDji+XiTGfd/wAi8Zce8gT/AHOY9USc1wzG8plUn/Q6mf8A0qTeVESyfrhK/ulhT4huyC3vLaSrbroPDqbLPs1vM0MlqWKmlQ5H68R44dzmiWVLkKGFfgB/SMEo8U+63Tt7pqEH4/8AQ+mP+H91dY/PbF/85N/4w/qxl8pu3/z0v/ix+vAd9wX/ALVlwtxRfci1/uLxS9n0btLHRV3HF1FQ1tg9cXtdTtoctTdyRWn/AJ75CtK0qc2hRSCVDdBLs13J5UdqUbSTUuSMvqxpnTdLWIyvcBxUCgQDiadpxTRyHyTyZmmdycs5ZjqiZrMhQY0wt1rVW1IiwmfRiOxWmiY77JQflcbUpKwQQdOhd7OrERINMaZAYn2sTf4zmsjc8A3kfPWocB3s6F7FEjROoOmgOm4E/d0NxPxFLHplzyjkcrHcevaCuu24jkyDAyKxFSq5DSvz4tXIcQqO/NZR8/pFSVqQDt106+HIEnhTHwsEGs8Bn9WCbQ8TcuUdoyrKV1NNQz0LhTJse0btXHIj3YvxYlb68lUiI6lLqAoJAW33I6C27JcvpQg5do9/A4kRdTbddxxmyWaR5MxVCoUcyxalByI41NAMLHI2DWkFUVmPmD9pBlOtS7VqDBcq5osISk/UuQ0TS22Yz8tKZDfzhQDiDpp0XjiSIUQZnj34xlmkmarn2DkPZhVg8g5NQVqn7bF8lU27Cjtw7RVZKdiWkb1o8T602DLK4T0hbo2uIQTooE+A62Y1Y3ZHPBEqQhiA8hbLhjp9QqjFJYSmOR8xQoArQe+nfr2PY+s8y5TId1jzWYOpGjjSkuPAeA+Z06Ajy8fDr2PYe+N59ZtWDNvIkmdNS+HHnX3Uq+sSpAadizUKC2X4zzPyFJGm0DsR17HsECVlWS3mVU+YYmpePZJi7safTWVfMcMirXAWHWJEmfILcNqLH0KfSS0zFS0ooKCFK3T7O6MD0PwnIjEK5t/NFRk4zGLPKTKeJPf5i9fjuRWlFgfumwurcYp71uMHK6/LTfqPyaqPMUwMjx555JcciLKnoLpUtsqbPqOk1IgUtGPMsW4jiUJ5ju+hzzxBI+ZIV/y7xeB5N9Ps+vELcxx/mf295O7T8kVl5STJLy2ImV17suXj2TNNqCkOQbltv6SYgp0UIz4Q8yCNzSO3UaWwWVfNtSGTu5e3sONyXbwt5VyCp7eR9hwlyryffQ5DwNhOeeRr6n07qypRRoNNrYSNQNOw76fZ1Cjt5VmAZTSuJMk8bxEgjhixiiqpSP2y5sL6Z76oRr6T9P6Cw/uPJcyWB6J+fcUnX4+fRG5ULelFFB8ucv7JP9OIcLVsAzmv5nH+1iA8S2NPUsfWIlRdjZP5jLqBoGx3O5I6r6a1eWZtIrnh0t7uNIBRhiF+V8K8/wDuy55ynEuEcDfuUxMewiGxn9w1LrMLxBOQw36rK5Npka21QkPMUDRWmKymTOdB/KYXr2m2myvNBeiUhHKQaC2QzkrJTtIRTyJBIyzGIF9vE0d1ZR2oJtmafzqKDULHqhq1CUHnaSaFdVBnQEG0V2Hi/wC2xxtZ4jw3Q23uM95ue1TcS7yeLTvzKnE0ONhbC7NxlTkbGsWrHSl2NVJf+vtHUoceWGwl1kncXW37PBRyFBzCk+Juwt2KOQ4nlXMiIgnv5qsRqHEjgPZ2k9vL7MVWVXBfuYyLI7jOc4wvLb/L8ps13d/c2bsFyxsLKcCZL7o+pSW0p3BDbaEpbabSlCEpSlICXcb3bXUpkeWrGvbyOXKlOQHCmWDwtfJhCRgZYknhnGnLNEGzNwLLYoCGt+yqfeSQ0oo7llLiFEJVqNeje3X1mXB8xaV5mnHPCJvlpduraEatOWfDEmoN1mSaxNTZHJYUFKdi4b8WbEZUnxUh9IabU8hSu5CypJ8dOnKG6hkQBZFPvBxV95DuUTFHDiPP8NCfaQKnPtOPBt24bSipRTtHcKC0+HgNFAefWxiG4Y026kMABn92BJlOYMtOhwOsqLTiXgh4JcZWW1BYS60VBLjayAFJ1Go7dLu4SkVGLJ2O2qoOefu+rA4y3luRkoZZeaahobWhctMeW44me603sZU+46DKUy0B+Wh1x5SOwCyANF6WeR/Dlp++nCvP3Vw5bds8VpK1zqLMwotR8APGnKp5kAV7M8NGkyTJ8luqzDcKrbPJ7+zeUxV4/SRVzpbzhJWtTTDIX6LLWhW44vRttGqlqABPUuwsJ7iTwah38B+oDG67ube3qwUGU+8n9ZxYDRezCPiNBHzr3dc0UfFFK4A4nFai0rP1lxZb9QQnruYl6I9ZHwMaDGlqX/4buuh6arexhiHhrKw4n4UHtY8fs7jgFPLJKKzHy1PIZn6v9OMK+b/26OPnBCw/h3N+XpjGjabmwrps+FOeQPmedRmtxAUhDhH/AIUEI/yp29SDcRxChkjUfurq+1v0YjrbRPmI2bvY0+z+jGyPdz7RJSxDtvZh+nRnmwXXarHsMj2iGlEhO0lymWVrSnUKS+E6H8XXlubaQDRNqJGf5a0Bzy4/dUZ+3G6OwkkJKWo0g8QRn38vqOeXswRcVs/YjyAppnEMmyPgjIpJIjw8gmyYdYZLoBZTJctJlzjSBvO302ZbK1nsDqQehm6dMWG5J+ekbORWqNocf2TVfcBXEsT3UNFYyJlwI1D6xn9uFvPeNOWuJowvFGJn+BbQ8nK8bS48uFEUN6ZFrVtqkPxoxa+YvNLfYQO6lo7dVhuHpdfXFwU2qWNu5zoYe0GoP9kkmmYGCtpuYFEn58CMwf1e/DOqszrLqMX0z4jI8An1A44vx+b00nc2PhuAJ+GnVMbuLnarl7aQeYUYjUtdJoeIJAy/04dLHbZL5Q0RpH2n6Z48TJ8BQJFi33Hmjbr4/FWnS+271OlkIPtwQk2QxCrvWnYMB3P4kO0rJDfrlZ2LAG5IHdJBHbXses49xiSQOaA4wtj8vJpWueKrOTsSjMWUtTi07itfp71HTvr8qvmBJH29Pm37gstvqUA0HDDFBqZxWpOIU5xQ3kJbrtbIAbSo/wDACUKSfPTt/r7KHUjz7eVtMnH7MN236owGUAZZmmf+nEe7abkTLqxIClOJ1IJ1UkhB7kEakt/H+pH3dHbCKAkBaaSfp9OBwyQTFqF65YIHEtnIuMkg18qKpSlPtJKlJ+VaQoHYvz7D8Kh3Hl8Omzb9oFxcp5fMjh+j9I588K3Uu6iztncnIKcWoX93Do6Chx1rVsymUS5o0+YR2TsYbc0GikPPhStfMAddddDdLeTZm6K5xppX+sRVqewUH14/Nv1q9Qg91HssT/8AaJDLJ/1aEhB7GbUx7aLjXi19bfxwltYQ5tA+RQGmo79h4jv9/Ra8SezkIYVFcIG33FruUIVTRqYeeAYSuBaIUH1JAcBSArsRqO/Y6eHSL1TN81bFQvLD107ALV6E58jiYCK58Q29CVbWwNPHUD79f5dc/wB9ZTJIx5Vw8eB3qMjhsTIT3qH5CCPEAaa+Xn/t6CssqtRxVcTI4gfg+LG3js+0xuemxqpDsKUlaVqSNyosooPyomR9Q2+keW7unyI6mw/LohM6hlPDLMHtGJdtLdWUwms3KSjjTgR2MOB9/ux45/YTm+KUdgoqr8h/QpEstb1lElUyS+XnmXDoXgrU9vxp8+vu2b7Ps92piLNbMPqqef0zxbuz3Vh1Dbra3aol7kdGWdOak8R3cRiHXGuc5/Hwb/pRVNTZFvjl1ZOUEWMSmTIhWzjkzSM4ClS3WZSVDTcTqBp8Opou99fqO33DZWYwysAUyPiXMVU5EEZU7MJfX/Q2zDczvO9xqdrmhAL5jypUyPiWjLqFGU5+KvsxFv39XOd0drRYi6J7OTzOK8Jh5I682pmdS1UyCqW/TPyQll6ReXMjRct4khiMlDCT+Im37a+u7pzeTJpkLE6V4K3A6RXLPvrx5YQf4FtOwIxtXaWaSFFaaShcxLmkeoAVXOrc2yriJft/4fbRX2fKGfJlxOOcQWo262T6U/Lr51JVVce42tenr3FyrvKcRqIMLe6vQlGpWG2l81ZlANyVJA/ZHDUe4cu05DnhB3jd4ppW8xtO2xyKrEfjbiIkHNm58lWpNMsSU4d9xOS2HK6J12tCo2SzE1jtFGUk1FdTx2m4VbQ1sZ5JYRXVsFpDCApOikhSiNVHpv2e3kCFYqiQA58682r9uBO470s8fnzqpi1CqgVXSMgtDUU5YnBx3y1lVPmU+xwuzVDYmWsxl2hclyJzcZhSy05WpffeWiVUPIQRo4kKY12pV6eiep7yTRE225eJRQ+ZwIPIkDKv38aVxXnVvRO1b+g3jpgm13fSwaMZeYGFWAbjwGYOR4GoxY1hPLePWfr17EivqZ6WlwbBmOn1qO29dpP1UZyGUrjSK9RWUK0b9LUHVPn1nLPMkQNyDNCaHkHFOB7z7we/FDjb972Xcy22XHytzESDLHqQOf2XCgoRyJKheNRzwQMr5GzKTgF1RcX12Lozmvq1Q8Riy57GO4jJW2kts1yZ7TTkbHtilbkr9IsFOum1XWEVpYT3Aumd5ISdTIc5K8q1zI5H8Q78TLneLq828bSzpt+8eaFZ1T8l4zUsYypKpIT4uGhqmhU455nf2x/e3z7zLf8AIXuul4xAZTKS7VZFBzbH86wmBBdecP6bjtXjlpYPtv1ySFAShFQ4/op53XRIMT7lZXFuLS1OinxKVZSOxQpAJ7aiteOCG8W970Nt0Z2OxnuZJ08MqlHV2OZlmuNXlIOQUuKfCBU5mLJv2Wvb7DwyZ+mcs8+UmbUNnAVGu6v+1baq3TAuZNlyBMcsVfXSHnNykKfKXGwGtyNu3qHa7Tb3l3Gkj6baRGNa6XBU8kYDUpPHMEdhGKw3T1t6x6c2O/3O5sIrvf7S8hikgeEGzlilTVQ3NvIfIniUDy49EivnVlkGnFU3ul/aG5xx3FEz+DsgoOdDDXMnXlddl3F+ULEPPF5X6JAlypFFYLYa/DGjzVPuL7IbOoHRDculNwhgMm1MswA+A+F/7PI+yte7DX6Y/wA5nQu534s+v7O42J5CqrNH/wAzZoBl+YVUTRgnizRaQOLY50c5ezXEcsvsJv66xxu7opK4l5T3cGVVW9PIQoo2WFdJbalQ56duumnzA69UzufUN7FK9k4eOWMkMrChB7wcfo70/YbDu+12+97XNBebbcIHilhdZIpARWqOpII7c8ueLSv2qPfnZ8EZtB4qz2TOmcQWt0iwrbFkuybDjzJJLqC/bRWxuMrH7ZYBmx0jc2oeogH5gS3S+9zOrW1yT5LZjnpPb20PP7MUP6+ehlh1hNF1dsaInVduoWRclW6iXgpPATR5+Wx+IeFjwOO4nGpmC8rYaiXTs0GYY3kVapyZWNrhza91UlrebCrWpMhoRJLit60JGrSydNPDrTu9nKWJgNDqrQHKp4le0HswX9Id+bboBY7ujLcQKFJdSJFC5AOCOK8NXZzxza/uG+yq0wqzs52PKuoEBRdnMxKy3tIsNUV/Vwek2xKTH9SOFemvROvyg+fS1fyO0NNTDiAe8cQRyP1VGOttgh2ncPzpIF8wgFgKcDwYEVBH3HFHUbFmsdtHUuJdU864UuuT3HXpRWD31lPqWtXceZHSDczzxMdWYr9MuOLbsbHa4otVtGPNHNszg+YHc2FFPirbddDRWkoB7pPcEDb3Tt+7Tr5FN5hBFP0403sY0kEfq4YuU4F9y2YMVdfgPGlPIy7l3LUpocJqYqVvw4FpNH07d9cqG70KyjQTIe1OmjehI16fNs2F1Vb7c/yrM+IBsncdw46Tw1GlTkK4ojfuqIrm8l2bpyl1uSko7pnFEx4KzjwtIOJRSSozagxZT7IMczD2vfuX8c8J5Pbt2sOFh07Ep93HddcbzK/5ixVOYX+bTnHAhT9hd5iQyCQQ21BbQk6a9Wju1tDuPptc7nEtJ1dZKH8KwvpCDuEZr7WOOd7K/l2X1js9jkcPaeU8IYcJGuELNKe0tKAK9iDtOLB/3IOPYL0fAOTYcOO0abILbDLNQQEvqiXKzLhqknUnViygoSg6aaPefTh6PbgQ822SsSZIVlXsqooae1WNfZipP5i9sLRQb3bqgSG6khanxeM18Y7nUUNKENiu+rmtttpbQdh001PdP8wB5fZ1fYViO0D68cYX4qxIJB+n04YzzFqeHiT49wdR8B8w7db4wBgKSUpXjhrzK8q3KVtUPHuO/wAfHXXXXrdUHhUHEuG5J40oMNOVFQnd4pI7HUaj49iO/wDPrMVGWCEctRUcMJC0KAOg3DwBT8wP3keHX1mX34loGOfLHqHGclSUN6kakDTz8QPu8+tLyBBqxuSJnbT24mdwpiSZchlhTeoWUOJJHi4gd0n4hxBIP8Oknf8AcfKjZ68sPHTm0iaZY6cfp9uLH8QxtuDDCENhKnHGkjt/lTu+z8PVP7lemV9RPAHHQew7SLeGgHEgYfEhptKdEabW07EfwOqleR1Wrv8Ad0FqTmeJw2lVGS/ABQfpOEJ9B1O0f+f/AB62KMaCaZDjht2R1BST8o8dPAn79PLTqZCtM+eBV1JWvZhvIa3r7A6eXbt/HqYSFHfgUq6my4YXY7PpI8NCSNfj9x89NOozNqOJaoqLyrj0ttJ1J18fH/ynrIEjLGsnCTNX6KSB2JB0769v9mnUqLxYiTOVXDUeUVq076eP+nl1LGWBLsWOCFAYDtVAHgoRWNPh/wAJPbt5EdLsbaQOymLDuow0rHnU49xlKZd2EaAnaQfL4d/s62OAwqOOI0RKNnww4W0btNPv1/l8dO3UNmoc8TwKjLCi02QAkeegH8/8detTEccblUgY9y4mwI0GqdNT9h/h4dfYZKnvxpuIytNOPMbchaUjQdwB/DXr7MARXHy3Zg2nnhzlweiEDxPdXhqPj/Poewo1eeDyOdPtwkvHcf8AsjUH7etsYoO/GLknLlhk5FW/URnUlOqFJV3OvYadxp3/APN0Us5tDjtwv7nbl4iD8JGK5OcqpuE4+pG0FJX20APnr/r6trp2cyIK8Mc89V2wimamK9Mrbcll1Dfcbl+B7/DQaHqw7dhTPsxXaOyMSO2mJVZze8RcyZP/ANQH+X63ji5ta6rZusdzDg+yy51mdBhNQ3XIuR168y+qjhplDTSENQmktNIJaDqnVqTbC23jZrf5BLN7mFGYq8dysdQTXNG8uhzqc2NSc6UAtO+venOoLkbjLuEdnOyqGSWzaWhApk6+bUUyAAUUAyqScJGS3mB4/wAWX3G2I53N5HnZbleL5FZWcTjprjvGKRrF410023XRHJTU6xnWC7j81xcFncltOqhsAXIs7Xcbjd490vbdbWOGGRFUzGaR/MKHM0ooGnIBjx78om7brsll05NsW2XjX01xPFIzLb/LxIIg4ooJDMW1CpKDIcRShC7CChCEAa6D7Pu7eHl0eYgmoxXWnKozwuRUkd9PED+f2dYE554+qOQ44VW0gABXY+J+HfyPWBIOMxXCihxpKdCR9w0J8Pt+zrClcZEsBQY1Xkx3ddwSrXy07+f39ZUoMuOMTVsznhEfgR1K1CB8Tpp8dP4dbA1BjU8dcG/i26sMRwDlybSSExZUpinqZhUw1JRIpb6jzCit4bsd4Ft1qVAsXU9x8qtFDukdUZ66yeXsttKagqJiCOIdTEymvaCB7q466/lBs4rvqncrVwGV47cMp4FGMyuD3EEjHO9M5cx6qce9nvLz9u1QcS5zyTlOJ361tBmPR8ux6WS56KV+oW6fFMup2pjjm1RDU4rICNQOcLndLjqzYJriw0vfMIpfLqQWEBYSIGzIkINRTNgpFeeOxdh6WX0x68jtN0BTp5buaOOZgCifNIrIxB8LIAFLBqgMxJxFnMeQOZOLLGRjsfNclqkx5H06v0y9s/0qZDeQHIdjXH6pcZyusIykutFI26HTsQel3Zb+WSXR5shjpUVY/Ua5gjgcXR1ts1hbxi+trW2QPQMURdNaV1LTLS48S9x9uI6ZNnmV20iW9c5Jd2y5ClrUudaSn0kqR82qC6GyCe/h36cFu7jTpLtp7KnFSS2VoWD+VHq/qjL2ZZe7GbiLnbm/i6/MrhvkjM8Gn2G2NYMY/byP0S3jBWhhX+KzhOxfJoTmuio8+FJaWCQU6a9EbfcZVUK1GWvAio9nd7sD7jb45G1KGWQc1ND9n6cSl93sXFGOF+D/AHQV2C4bhHImXZHlXGnOdZxvQM4/g19n2OV1Xk+J8iVWFxCajGrfJMdu0s2MKEGK42UZb7DLW8oEe4lVx50SlE1MCv7JX9B451ypie1jJbukMziQmFHVhzDjh7VNQcWF/tT+3H+ybmDzfyRgD9XzLmCH3sSTktS/H/6eYu8sMyLCIqxjtKi3tyfmlOpSlTevpthKddWXp75SCEX6MrzSrUMpDeAjJUI7fxEceHDFQ+osu47s/wDBbfXHtMbBXGa+bJ+yT+wvPlXHVPg11VxK9pMSelL21IkXkpDciQ85oPU/TYjqiyw0Fdg66FKPknqVeXT6yHFE/YrT/WIz9wxU5jsYm+R2uVRcK1Hn0htJHFYFbwVHOVgw/ZB443855X4n4lxiyzjMLaGpipgzrewl3s5a22q+tZVJmTZJBQwhhCU6JQlKlLWQlI1IHUeCTcdwlFtbtoU9goqgcT20A95yHPEy12bpTa2N/d273lyKGsjGR5GY0VAMgC7ZDw0AqeCnFS/G37oeG8rJyv3A2M1mHhNZaWGN8QY7HkKiXrctgrYvH8kxhwpVTzWkBBryhLjb0RwOahwqAE2819c7ve2MelNog8pVLKwlZyupnMnwNE6kFVXNGqrVIri8j0xtK7Ds1xbLr6muhNNP5bD5e3i1eXHbrEoDLKjBg0jikq0aPSMRR5v/AHFM+ztbsPE5D1LDdcWhluI4XLFTg7hbj3zqaWkDcUp1WB4gDuDCSWdkC7nURxY5AfrH1D34NQbW8Ki2u6jV8MYGp2/qpwH9ZsPm0Wr3ne2WXyRCki19wPt7rEozir9T15mYcfJOkbJWY6FET5NIrRuRs3ICdFKKx26xXfDODDYGiE8sqHu7K8RXP2YXf4HJ05uxG9Bl2m48QWtTQmg1tzCnJlFFGXHFPdrfzModlorFy0z4vrCbAQ86XrGEhR+pUGwQVzYJ1XsAGreu0fL0xbUriiT0ZWIoT+Fv0BuB51p24j9TzpCTd2ClY1WjgVo6cQRTmnHvHswc8D4fyvkaUzLlMN11bZU8SqtJiGd0KyjRmQW5UljZtS/LLaAtaO+7v8eniw6fuZ1poJjrwPEezFGdQ9fWNu1TKEmC0qOB9o78s+WJnYf7aMUx6tr/AKuG08+gJ9XehKhqhRI0OncDy6ebXpuxtkQuoLAYofcfULeNwvJgjkJr7e0fThic3BHCKsst4Nh9L+i4DQWMVy2uvQJNhKjOofZx+iaCQuztJS2wFIb1S0jVSyB1p3zdbbabVoY9PzboQq9gOWpuwDl2nIY90vtV9v25o7K8kIcE04uQa6V+rxNwUccWg53zBTYhUptMmnN1cZpkR6ulY9NyxlemkJZZbYY0VNnLSkBWg9Nv/Npqeqcsdnu96uvlNqjLmtWc5KO1mJyUd3E9mOptw6h2/pDav4j1RcqjN8Ma5sxAyjiQZueRPDmSBniszl7mPJ+V5SmJi1VmMRni5X42y8VNrUk/lTLp1BAnTfNLY/KaPhqe/V29N9K2nT8etPzL9h4pSPrEYPwr2nie7HJ/XfqJufWlx5EgMGyI1Y7cHInk8xHxv2L8C95zwCl1qCvctIW7/k7aD4bwPD/ujv8AHpnDGlFNF7f1frxX+lPxirdn6/1Y9/QITpvSlS9PlQB+HXw3aeH2JHXgxIop8P3/AE7cfdEYozir/d7f1Y8Krhu1KQp3sAgjVKe/bcB/8D14O1PCaJ9PpXGYVF4j8zs/X+rHoQNhOuinT3KuxSk/y0UofDwHWFSeGSYy0Kprxf7B/T9gxsxEu1k2JZR1lD8CWzNZUn8QdjuB0aEd/mCSCfgeuE+oP/q9ff8Ank3/ABGx+tPR3/4I7V/7ttv+CmJXWd9KZDVxUTHof6lEZnNLYcIQUymi6pCkbtriGn9yQlQI7eHQjDHgAzvcJOq5VhTZPC+ra3KCbCCEoe2n+p+G4r0lnv3KFI0Hgk9S7I0uF9uIt4KwN7MdWXtnlsz/AG78Hzo5KmJvFOBy2FEFJUzJxuuebJSoBSSULHYgEdZbia30h/ex6yFLSMfuDBw6hYlYhF+4qnf7ReTU/GXg/wD+/mNn+I6JbV/2k/8AVt92IG5f9lP9ZfvGKHaXPZdTj7FJkdfBzHFG0koo71TilQNQNzlDbtKFnRO6f+pX6JV+JtQ7dQ585D7cSYP8MezEbOVcMxjkMOo4lzqJCyBepTxxyFYQ6S1fdOv/AC+NZW4pnH746jRtt9USQrsNij46cbsVncgY9mWC3rlNneNX2JXLTu4R7qvk1y1/PuTIiSFoQy62T8yHWVFJ8Unr4eGPDjj1U8iZrAR6UPN8qjMhISGkX1i40U9wAgPvulP8NB1EtbS2tyzwxojNxIAFfqxIuGVnFAAadgHHBN/vfJcmxJUqVlV/LsMecaTN9a4nqLkJSS21IKUvJBBjapWf/kZvzPUzEfBEwDmLkrHmG2aLkrOKuAy1AV9Exklm5XBDU91+SEVcx+RAShaGFBWjQ111Pn17HsEit9yXKcjabPIKS/Czuc/uTBcFt3XN53qC5D2PNSl9yfFev29ex7D/AK7m+3khJmYfxJLJI+Zzj2FGWdex1FdPhp3anw0+7r2PYNNHyOuE0zLv8L4urG30JfiVkXDHjd2LZ/C6iM/eKbr4S9NPqH9qSP8Ahhwjb17HsPRnmq2fbXEiYxx1VVrjiXU1sTD4rzJUkbW3JTsuS4ue8gDUKcG1KjqhCevY9j87nmQyH2J1cvH6exhvIkwp9PitDAmQ5LSgtmRDltQ1SI7zSxqlaVhSSNQR1NtryW3PhOWIdxaRzDxce3EzeLf3BOQ6CqRiPOuJVnNWIKCY6rJyPAh5S1FCkgCVHcjikvS0hI2BaIjqlfMt5Z6IJJbSv5sbGG47V4H2j9WXdiIwuIl8uVRLD38fr+h78FN/IP2+eXk/Uschck8I2spSlSqxu5yahisqc8AGprd/h8ZtCj2EZaUjTuNNOpgkvga/lTDtrpP6MRTHYNl+ZEeziP0jG23wn7YUVBpm/wBw3Nxh6m17sWHM2MCIpl14yFtqqQkRSpx1RUU/S91HXTXrf89d8PlV10pXUOHtxp+StaU+afy68K5fVwxgauv29uJEmRIzvkHmu1ikLi1S7XJL+NIKPEKZgt0WISW1Ed0yllCtew016BTCKF/zFgib/XPbwzH14KwxIy/lmSRfqH6MVze9T93HmqsGXcKe2PDce4CoK+lwoVuTJZh2OdWbWZJuJNhHoq5ltijxiTApoqXAppFi+hwl1DjYT0s7l1BBBb3rRNW4t1t9DNTjJMEei8MkrTiQfF7DlptF2+6bZblP+SumuvNCqfCIrdpI6uMhqk01rTVkoJJoahKjOc1uIMSdk2e51kU2TGivypd5mWSWcmTIcabW69Idm2bqnnnHCSpSipRPcnXpYlma4cyPRnPEnMnvzw1xW0EC0jUD2UGHzV5UtpSXEyZhIbSQVTJZVube1KitUjcT38esFIDcuPZ2jHnTUprxp24kLh/Lt1TrbRByS/h6OObTFvbdju4gKRoGZqR2V4dToZlXNgvAcQORzwvX23s9SlePafdiUGL+57PYjTYb5ByvYnsG5F7Mlt+Omm2c5ICtPh0Yiktj8caH3AfdhOubC9DUV3H9on78PeX7rs39IhzI2pnZWv6hTY5PKtPJSpNStR/mes5ZbTT4Uo3czD9OMLbb7zXqZtQ71U/owLrn3M5LLUoPxMHmgk6iZguOrJB7j5o7EY6nx8egtwsbGoaSv9Y/04b7K2eMA6VH9kYTcEzPkLm/kDHuK+POPeOLzLMqmfTxkDEnokWBEQC7OuLaVCtEiFU1kZKnXnNpISnakKWpKFbtu2s3MwOqTQMya5Adpy+ntNMSb268iPT+M8KcfdizfknmDiT9u3HjxjxJT4tnPulyOpiOZtlf6YlqrxgSWkPods2GZTsuNFWs+pApW5IWWyl+Q58yFPuLPb2FuA1fK5LXNu9zyHYP9JXQssr+HOXm3Idw+n6sVrXvNHIXIV4/lvIVvWZRkMoKJsbajiWDkZsqU79LXt2S5kevhtrOqWGUIaT5J6XrvcpZzkzBRyBoB7AKYJW+3Ihq3icnMnPGuvlPIWEaIulwWwCdK6HVVGmg8Q5WwIro7J7fNrp0JoJGJIqe8k/ecFPKWMUFMJNdcZzmjeQXNBTZVlELHY8SRk15Gan2UOmiSZAiw3be0f8AVZhtLfWdgWsAJSpWmxCyCUVpdeU0sYKxqKmnL6vpTPEKS4gEgWv5lcqV5+zGtIXDbhrkXmTMqIBKqqh+lupIOh1bk2K9tPFI89HXXE/5Ceog3ncI38uAyEdrZD3Agk/V78F4dullXzXYIKdxP1frOJv+3fMfftwZUw80wXg7P+QuBVNol2GB2pafl2FK9+euyxKBYSGsjizXWFFxkwa5UORu1U05qlYabA7pKAb3MHhUqrDvUV1DuByPYDngNept+oiOSs3AkL4a99BQ9la1Hbh9c/4JjvLPEUn3hezCRKeq4P1z3LPELUdcO3x2fXfmZItrHkqU9SZPjyyV2NaE+nIY/wCYjE6p+pBdQ9I2O6q0ojUXRqchQP25cnHMfi9ubFth6hm25xbzmtvyPHT2Z81PI8vZwqua94d4y2C44txpKATuO7tp37knXueqgu/T63mZiooeWHiTfoWUE0NcEbFvcqM1ZejJO170Vg7VdwdvbUajueqj6s6XutplqurQDXBXa0tr0q4A9nbiuP3Ecx5RWX0mK0+8E+oso1d1TtCjtCyfiB1ZfRWzw3W3LI3xUzGJO7XCWDhFAA7f0fT7sRrZ9wNu7ozYNJdUQUFSzopYHYpOh7OI0/j9/RO86Z0OXjNB3fo7vuwb2rd4XQB6avpxw2LHliLKXq4w4lJVruSNVNKOuh107aDXvp3HY9S7DbJ4xqY50p7f1/T24Zob6Er4KUxLX2nvU+UZWw4pKFKaUkpJQB3Kh3SO2mpPzI/p8urO6LsJ5t1jWh0agT/Sf0/XinfU3dFtdqmZTRiKDPmcsWF3lfW3NxKfSoFtKhEjdwQmPFSGW9PIpUE6n7eu4dttJrHbIoKUbTU+05nH5IdV71bb91NdXwaqeZ5aZ/gj8K/XSvtwu0OHpbWkxnik69juOh+7T7R0K3Ni4/MWvuwc6fZEYGNiCe/BXqKW6iSmFIJWncPm8dfv8Oky8s7WaJlIzpiybPcLmKVRWoyxKzGIMl+vSiU1uUUJIOhB8vH7uqa3/b0jlJiApXFlWt4JYQWPjpjYn0KDu3I+4kDXt4A/5hr9uvSFdWuk6gMsGILlTkeOG1+kJbe2qT8uvj2PbUeJPYjToWyLw5YJCYFac+WHdlvG0vkDjqsbomkSrCoZk1cqOw82zMjFLyn4MxhSlo0SW3NCQe3n26m2NvbrcpNJSgyIOYI7D2jBGJp5YVe1r56ZeE0ZSMwwPLFe8zI8c9v+TIGY29RO5ED5FZLhLRKpMOStW1E3I321Bibeq7BplsqZYOi1q3AAOb9PNMnz+xK2gZmMmhJGZMfMgchkeyuLJ2brO3uLQbJ1gyVYU80Cqn9kSngrfvCor2YNvJWO8G+57EajN8nxvN8l5twbGFQU4zgN1j9S1zZjsBS5cBhm1tUONM38FkqStTKFyX2dQ22pYGprYd1kln+VcxRyu2fmKcm4ZAU8R/ZNATzxW/qFsFvtlqbxUubmxQEr5BWrJxzJrkObKCaZgYpH5s50n3E56vTS1+B4/hLK8fxXjepYkxabBm3lL/VIrrE8GfLyWa4dbGwmay5LuupQgBAtW329bSByCXdtOpjxY8eWQA5KOGOQbvfbjetzh8Kx2qGQxRp8CLwyrmzHi8jZsewZYC+DZOqBZMzahqNKemV8iCXJKXS6yue639Q/CW262uJMQEhDT43bUqJ0OvRCxk8pT7ain3HDGSBGEIBBWhB+8d45Ysj4b/T5q4kCtlvRLYybB2+cspDYhVsdhtCo0JqySQ3YKeQC4pYSkhWjehV36wuZncGWSjJlSn4j205Acvrww2FjFqCrVblgeJyRfb2t+oYl3TX0X0DIeRpJ+nQxCfJU02obgS5JLQDxWEj5Vp0UknU6gadKu47i9vVAx0fd7OVO7D3tnpxZb+nnyxL8wVoSRkw/e7+wjOvGuCTTZ3dVzcmVWWf1jEZgvzqSetCLlptoa/UVMkBLN0w2e+9gpktj8bSR0trvjxziSbJVOTrUD2E8Vr2NVTyOFjqf0REUTRbYCk0gP5Zpw/aXhWnJkKsOeBTmvL+awM4xbkPjnJZFZZ3cefj+bY2uO2MfyeLXRTYQ7W/ZQ80z9WzEQuMt/Z2Qd63EEDWy9k6s2y5smi3rQbaJtWpv8SKv4ozxYVzKA5itMc9bt6bdUbfO22bcrvczuNIBokppQpIBRADx8x8gePGuJGYlyTjnItNAyPj+/dYTLgNvrixpUe2qwUhTcpLDiVFEqG3KS4EqIcGg17Hptt5p5WMFzGk0dahl4sp8SuCOTLmCCOw8xigtw25Jg0Qt3hv0aSN46VKNGSHV0IIqpqaEECoZSKg4aGQzosdKjZKbjhaktixhqU5AKj23ymTq/CCleKwFtDz29NMFxKw1IC0YGa08Y7xTJh2jIjvxzZv3Qm3Wl2Z7A6YjSqE04GpoxqB9vsGKZP3L/anx57lcXmOzoMGh5joYKnsJ5FistCZOaabUqNRZJJZAVkGOTANiC6pbsRR3NKA1SV/rHo7b+sdtNzEFTfYkJjkH46Cuh+3urmvLHQ/8sfqB1V6X9QfL7a0s/RNxKPmbJidGfGaBTUQzrxJSiygUcHI45U8Gj23F2aSWcirFR7Oit5lZfUy3Nk6OqvUUy5EYEJEhtBIU2pB3LSoKA65s2ueaxuHhuAUmRirA8iOOP12v47febGG62/8ANsJ41kR+AZWFR7GHAjkQa8MdTv7d3L2UcXTsaayLkhrFGriJHtoPHxhPXVrKpbVpMivVe1jjseoo5kphe9LhfalLbUCtpQ0PTLdx+Soe4mVZWAbyh4mAOY1cApIzArUcTiubZ13W7ptNg0kMLFVvZG8pGZTQiKgaSZVIKklfLahAPHFo/unu4WbYevIIsZ5FclKlyq5xhNpXqadRo480WUuT6tTifFKPUZ+4dQId522dGsd2g82I/DIPDIOzMZNT94e/DZb9Mb7ttym7dMbg1lffjhK+baSdo8l6NFXmYnUc9Ncc6XJnGfD97kcluNkGRUsx2QVfp7WNvW7ZcUdxTFeqnZa1JWrskOJbV8Rr0obrttiKmKZzEeHgqacuY+7Fx7H1H1LIFS5tLZ5aCpSUqK8zpIY07qjGePwanBMKlZ1eSlY1iEaVHrK2ZlkAQ7nKrqwcCIFPjeKtvTLudKcRq46pwRmWmUKcWpKR1FTdts2kV2+3pOMzPKfMcd6ggKvdRGbsbEO96b6g6pmMPUN881sxNLO2rbwtzpIY3M0gA+LVNGhpmhGWOqn9tngv27cF43jua4BArc6n8g4vEYvOSbVuPPnOxbSKj9UrsbZKFIxuqalLW2UI3S1oA9Rzvt633lzebjELxZXkcENmfiP73aRyrw5UwKhtrLbVfZ44I7aIVUBVChR+yKABVPMKBXi1cJXueqo2IfuQe3fKMeBAkxuKPp3AoLdW3T5dYV7IW8NFKH0czYCrTVI79dAdHkbl6c7jFPzimrX96IHh7R9eOPfUsPs/q7s00GTiW3I7TpnI49lGy7sSz/cUyGR/0zXCYjE1z3MsWqmTCralL8Fq5tY6U+ZDogJQCO3bqT6RoBukYdvzk27UB7Qin6i1cB/5hY2h6blKrWObeghYcAQZJF+sIBiqCunJUhvYsaj8QVoPu0OoT10lExI8WOHb2AEkfT6fVhzNSkqAOuh8+/8At8D1JpqHbhdlQxkjnXH15wOII7K7HuPlJP3Dt1iqlTXhjSrUOeGjNilRUQvxPdB7K8fL+nx+3rcWNMxglbmuEVbBRqNCkj+oD5j466HUE9amNe/BeLId2FnH2mzMb3pSrVXbUaK7+eo7a9vMdRLmoQkf0YJ22nzPHnixbg2rZ/5NbZ0c3NlO7TUq10QNR20J+OnVVdTXDUKEZHFwdGwRFxITmDX6vZiyK6wK4xOlhWswRi1PDSSGV+oYy5DKV7V+GpUnsNPDXqnId2t7+5eCIktHxrlWh5Y6Ru9h3DZtvt727jCRXQqlCDSqhqNTgSMwMMB46k9/H+Q/h0R1GlMB2XmcIkx0NoIHZSv8B5n+XUiIVPsxCuGCig4nDUlKDmqe+g/x/n5dT0Gkd+BUg1KeOPUaOAN6hp/lHh/Ht1i7chxxikYA7sbh/wBP9n8OsVx5hjA6sIQpSvAfzPW4eI0GIzsFFThpTJHqLJGmuug0Pw7fd26IRJQUwIuJCca7DCnVabQB4nrNiFzxhAhkPDLD7pFhVVXD/wCQ2O/2emk/4dLQqBiw5TqlYfvHG68yFHeANfBWn+v+GvWasRlyxHkQ/EMKcE99ivH+n/T7etMo5rliVbmvhbDgjNarB/pSNT9p8uoTP24nohY05DGwU+pqCNQew+7wHXzMcMeKamoeONYMekVOK0G0ap79tPL+fWQeuWdMYeSUNchjAJKkqJ1/FqP4f+TrxjB7QBjYsuk9uNgKTpqrTbp5+J61kGtF44lEqB+7hg5lksOrgSCp1CdEK07gEEA9GNuspJZBlU4W983WC2t2BYcMVb82Ze3ZypAS78gKu2o79z46aePVy7DYmKMAjPHN3Um5JcTM1cQ9dbL76ldikqJGo8/8e/TgaKlBkcJK5ip4422oDBIK0aaeYA7n/DrXqce/G3ICh44UkMIbSNBoNNdNB/p4da2ctx442KgpWuMrLS1K10Oh7+H/AJ+sCwGMyppXlhfZaCUfMdAO5P26dh1rLZ1GM0jqKnHoKSo6btT4fx89OsCxHAZY3CIU78eCT+HcNddB/wCX7uvoY8cYNHTgceQFa9+sg2WNRU1pzx7CSfE9+vc8e0s2CRx8FzazkbHB8ztth6bOIjw9SVjk1SnEpHmr6S3Wr/uoPVP+t+3tfdGidBnDPQ/1ZFp/eVfrx07/AClbzHtPqobOY0W7syB3vE4an+qzH3Y5vffXga5Oa1/LNLEkOZPhDdnEkw47qYwuYr8ZyNEj2LikqBr0LUEujQkhKNdACRwt0xe32wbi9mW0/nB0J5EHxAf1hX6zj9ZPUrpa36s6Wj3i3XzLi2iYOBnqiYfFTtQ59tAOQwyeLGMK5444jYK9ORU5P9NIVgn63IbjsOSwSuVhbM+StP6bPalBaWYMhWxKgDHXtKm02R1DsXzCDrPpkEyA6ri3A4j8ZUD8Y+Kn4u+uOfekeuLna3HQPVoL7I/gt7mpLW7HNFkBza3Y/BIM4mqrjQwYQtyjBr3GLW1obmumtz6uW8w5HcYeD6VsuKacacb2bkOoKdCOvWFzDf26XNq2uF1BBH3Hv7cFtytZNvumt7kgMvPkRxDA8CDxB54VMYhDFa1yzdj+naySWYqHEaORwrto2gjclYB7nx6MRDy1Lt8eBM0hlbRGcj2Yl3wrmjdhgFhhmYJr7lGKcv8AGvNePQ7uM1YQ37HEWrgMxnI76Vh5hMx6M6+0fy5KYSW3dyCUnRexw3sDLMzAkqQASpfV+W6VHJl0k9i6qUJwc2mJVjCOpZUSVWY/7taCVJPc2tR2mmLW+Ovc/kdTY1N5yFnFhmGY51Pn5fJj5BLdtBWVxabYl2TrLqyxCQ6ksxocZsNxw4lxQQS2OnvbYLa0hEFtHHHIiaUCiiqo5hRkAOAAGKM63+bubfTCZHMgzoaNQn4Q1a6mGbNyGWJnQvc9Kcq0P49TZNZICNR9BXOyWhr30DzWiQNfu6GXt7a2hJvJ7dP6zge/PFS7X0Lve5XPlbXZ3rGuWmPX9oxTh+4L7uMv5Bt08VOSZsSoVGr5OV0omPVVl+nsPOvN1MxL7Kww3YSmEl9wghpOwBKldYQ7xC1uxswJIHUeJCCXANaIRlQniewYtzb/AE5udjEEO66491D63MqlVjdl0oGVqEmNK6VqPExzzxDfj7kuRJx1OLXYocXL1zJmYzBxKoTGqoESRHbZdpZE2TJZtbiyf2eoudLkFxbiiNPTCW+lrcd6v3kkuAlY0FDGDSi8nUUNc/iyJPGtcX10p0ztW37bFZWs6puVxKWM7pqLN+wTWisRnHUUGaqBSuCWvJIlDGc+rcDTW3YUFwOOvBJ1Sl9wNteqnf3DaENMA9w3r8xVjud7uk4Wpp2cAPv5cySe/D0di2zZIGuaars8XbNmPt/QMsOv2++7rMfb/wAx49yViz6lwojy4F/TOKKoOQY3YJMa1qbJg/lPxZkNxSSlQPkfEDp/2S1W3UA56uP6/dikPUC6h3ezeGQ6Z0qY+49/ceBxKD3P8cYvDuqz3I8CEvcb50tF65VQ16u4jcvqD8+pdS3qUJjPrUEEgap08erKsoshLz59/f3Y5vh6ouGdtlvmKzpklTThyPaew88Gv27c+Y9cVkajnNtwrNTqGy0ygJivpcAC1toQkqiSFO9yEgtq1P4erEsOttj2uAQ7rcxRlRzNX7vCKsR7q4qXqP0q6p6gvDd9P2c8wdqnSKRV5+JqIprmRWncMXB4DwjXPY/W5nyepVTiDwMqsp/WciWl6lhv6papMkAqq6X0Tq6dpfdR2bA/EBO8+rHTkaadpuDJOQfEY5Aq+wMgq3Z+H24kdOfy7eoE10W3qzS3tWZeM8DGg7RHKxp3cT3YX8u9xNZWxWqrjanhhqvjGDW2TsP6HH6WIe3pUFMkh1wOaaqdcKVvq+ZxTh6DdPt071JcCbdd1hBdq+TqKSuexmkCivcuogcAMOfVS9XenVkbHpXp27kKpR714xJEgHNUgaQhRxAcotfi1HETbu8uMjsHrS6spdpZOk+rNmOblpbP/hsIGjMOMnyQ2EjTx16vGzs7Tb7cWtnGkdsOCrzPaTxY95Jxyjue77nvd61/u08txfNxdzmB+yoyWNR+yoA7c8IC9Uja1qVHxc18B5+mP6QPM+PUk9rfD2frxDUDhHx7f1frx5aVodjZ3uHtv07f+h4fxUf4dfGBPifJez9f6sfFyyXNvpw/Xj2NUna3opZ13ODwA8TsJ7feo9Y8RVvh7P1/qxnTkubU4/q/Xj4VbezZ7n8Tmp8PNKNfAHzPievtK5t9X68e+H4ePb+jHnQ6jTtp8O38evVB448OGXDHxw+J7j4+Hcn+Hbz64S6g/wDq9e/+dzf8RsfrZ0d/+CO1f+7bb/gpgnVFsuVg8VBUVPUkqTWKBPzCO7rKhnU6kDRTg6EYY8QM5it342TfIoj6ggBKe+5Wuh8dPm7fZ1ItTSYe3Ee5/wAI+zHbL7Ql+p7Vfbg4SFFfB/F69wOoVuwynVqD5g69bL//ALZJ7f0DHrT/ALMn9UYkV1DxIxCH9xZfp+0Tk1fwl4Nr9xz3Ggf9fRLav+0n/q2+7EDcf+y/2l+8Y5t8oyKNEokNhxIV6IOvmDt79+5Oo+PUKf8AxD7cSYf8MezFeXJtxJky3VI0KApRGvgnx8gdfE/w61Y3YT8Z9ynJuL1Yxqxn1uc4WnVH9k8jVUXMsdSjuFJhMWqXJdOsjsFQn2FpPcHrFxVSK0yxklNYrmKjCp/d/tazUhWS8ZZlxNbu6+tacUZG1fY6lxQ/4xw/Mt0thAPcts2Og8uoG1JcR2KJdSmaYVq5AUkVNKgZVAoD20rifuywJfutspSHw0UmpHhFc6CorWmXDDrxHjLh9VsiRifuZxZypsG3K6zpOSMQyXDLByBLAQU/Vw497TrkRXtriSXwklHjpr0RwOw4f/aYeS6aFdQa3KuJsmXDeFcw7T8oYmhb7boeMdwMWdhAkNuPwpiHEpUkEhXbXr2PY90ntv56feCI2HwpTTa0CRPYy3E5FbDbUraH5k1u4UxGY1UPmWR9nXsewX6viDlXHVJaqcP+ttUnRzJrOyx9qvjrAVu/tytnTgHNunabLRr5tsj5XOvY9hyQ+KeSvWVJtV49GfeWp6RIs8+xVMh1xWgU6+49credcUPEqJOg+HXsew+YOA28bYZuYccwyNNfVzqkfUD3GmyDIkKPb4a9ex7D5r8WrmyBK5JwZGmmohPXVmoeJ7CFUupV2PkdOvY9h8Q6fCmkpTJzt2YdASmrxeyd3eeiV2S65ABA7Hr6CRwx8IBwo/pXGoBPo5baqH+dNPTtKPgQQl20cGv2DXras8q8CcamgjbiBjKljCmFJ+gwqFqS2Au1s59gvUAjcURhWs/w0062fOT/ALRxh8pD2YzfWBsAQ63H4AAVoqLSQCpPzaDR2a3Md7aeJVr0jbzcs1/IGZzmuWo0+EHlh32axiNjG+haeLiK/ib6cMVme5fKKOPyRyUufQUczIolXhqceu5brjdp9ZKxd1yU9GabW20HalhlTqdiTub1SrYnUlVuYo2tN1Z2Ovy7AKOzVdeIDvKjM5nDRFFOd32JIPMFmZtxMwX4WEVgTFropyEklQKqC2niQBgQwZXDNnCjrt4fJOH2z7SF2CsefxzJ6D65Y3S3oNZYu49YQYjrxUpDHrvFpJ2hagNesIpt0ijGgQSpQUqWRqUyqaMK9uePs9hbGVgHaMhjxXUBnwBBrQcssO2rwri2elv9F51hQTo8kR80wTKqZ1BUgK2uyKWPkMDUE+IdI62fxO8jJ+Ys5BmD4GRx94P2YittTn/Cmhb2kqf9oDDsicVLKvVquZeFLIaR3QF5m/TLV6eqCotXVdXrR+IePj19G+QKaSw3SnMZxMfuxpbZb0jwhGHc6n9OHfE40ymP2bzrh9xCSQFo5VxUaj5h2Dk5Cgfv63r1HZ6a/nD/AOSf9WIj9O3rGpiH1j9eNs4BlSjo7m/EbaQrsVcpYqsAf9r0p61HT7Osv8wWbfCJz/8AJv8AqxgnT94pBMX1kfrxhm4TIgxnH5vJfELO1KiNucRZq0kDUgJhMyFK1I8vj1nDusU0gVY7g5/9G36Rj7LtlzBGWZVFP3l/X2Yt69uNLj3sE9n157mMstcYc5u5/bYpOK581ixk1kCksors3FW4zLUFdtJr5MaO5ezvSbbEplEdklJQhfVk2+ix28TFHJ0hitPESfgQ9mXibh2HMDCPIk13dGNKa6kCpFBTia8O4cftxWhBrsXt7O0yrM+XbHJ8myKwlXN7bRsRup0+zs57y5MyW/Jt11TanHnlk+QGoAAGg6UL3dr66lJ8huP4nUfYCfuphjtNmSCOrSR17qn7hh0fW8YwEEpZzi/UkAj6h6kxuKrt4kMG9kpT/I6HqD5u4vlSFCe9nP3Afbgitjbji7Edy0+81+zBb9vvE+be5/PUYRxNguPY5Q1q2Hs15Du4s/KIuKVjyjoou2TzFbOvJaEKESE2wlTqwVKLbKHHEH9q2uacefeTP5I46QFHsHEk+/25YDbncwW/5MCVkPDUa+80pjpP424k4f4Qwik4dgijDWSx5te/FyJ6rVc8hT/05169flw3EMN2770Ftxx1hhkMMRklKG22UBKWdPMVQbZSsMfCmdO8nmTzJ+7LC6RGCQ5GtvdX2DuxT5zP7Ck8Je6PhLNcbrlXHtiyzlrFoeU0riFyxxzaWVo2inq7VDgc+owa6vlx4rL7mqY6nhGf7KbW59g2+1uLn5xABMoJK94FQR2itMuXsxMbc7iKzNo3wnINzoTmD30rni/5CG0NoQ2lKW0oCUJSAEhAGgAA7bdOtBJJqfixHAFKDhjmh5f919D7Kf3VOQZNJJZb4Z5Kbwav58xuEpJqYN/dU8X6jNG4TOrLN1RNymJ0pSUlySzIlJUC46laZr3I8yOGQ+JkFT+y34T9VK9oPbj6sBa3aVRXSxp3jmPrrTvGK8/3YvbtF9rHuBTfYWwG+G+dYEvOsA+hCTV09p6zCsuxmEtoCOIcGXYMTIqEaNtw57TSAQ0T0M3GzX/HUUYkhh+8OI9/HurQcMbbW5cHymNVpke7l+r7+OISe2q9kTstMZbpLDitOxOuh1Omuummh/h36ov1OiT5DWq+PFr9FF5Ca8BgZe7tl6Fk0olLoZdcJ7E6Ak9jrrrtP+B696cyiXbRp/xAKYJ9VRsCGy00xBVc1W/Y4olQ/CSSA4PJBI7kpB7H4n7uny4jLnVwFfq+nPA7apCMuY7ezH2PJW88kKPzE6HXsXUgd0qHgFJ07jTv1qhgJ+EGn3f0YdYJAI6ilaffz9mLQvZdXCGzZ3LiNC0yv0jr39Yja1orX8QcWCPPt/Dq8fSjavm9+iiIqK1OXIZmvdQY5c/mL6jGz9H3l4rUdYyF/rsNC09havuxOFAdSdyXFoWTuJGo+bXXXQdu5PfrscxqciAVx+TqTOniBIOF6vym2q1ghz1UJ07K8QB9v29QrjaLW6XIUJwXs+pLuwfjlg14py3FRsbsEkEEdzp20On+nfpK3TpKbNoOGLR2D1AgcCO5GfbiW/H3IFRcJbYadQVK0ASSNfu0Gmmnl1TfUuwXNsS7qaDF47FvdpuUK+SwLUwY5UNt5sLSAQoa6/eO/h46dVlc2yMSDxwzxTFWoTwwzrCsSjcQO2m7t/gRp4EadLl3tympHHBiC5Jp2YaEuZJhJdQxIfaQ4j03UtPOtb0H+h301pK0aeR1HSvdpPFkpIpg3bOGOoHFPPvgqxFmuWNfH9MLSsuBCQCD4qI+XRaDr31/j1a3pxfy3AEFyxNCPp3Yx6gJtLDzos20moxCjiDn7MMCtYUSotVSq8Tov/sinvv/AESXlPoCXq2QhQlVExKu6FNHaFf09XRddJbTvRC3SFbkkASpk/8Aa5OPbn34oK69Tequk1klsHE22qGLW8tTGaAk6G+KI9hFV/dxJ3mmTw17w0XcGymQeJfcrB+njw7q4SxApuSdGtsOqzGU2G4C787QiNaDa6/qESE6/P1FSz3rYvMtLyl1tAaiTpUlAMgJV4qP3yCBzOIMN70t1akG/wCyK1j1FJHWWykool1Zs9s+SO446BQuPwhsV2yaOy4/uLmoySsm4ZOx9yqpnMfkLflypsllC27K6aeLCWvTfIDi/nDZ1SGwrv0btIDI3izBqa8KDkO8dhzB440319DFF5kGTCigUJNR8XEeFhzBAIOJJYVyDIqHI30Rkrjq2rjRnkBMwxdgWZUpltbiUuKSCdApQQganTonc2miAawBIw5cAP1nAvaepgb00YvEj0JPEsOI7wOeJj4XzBEu22WUvJU6dqEoSsdydACR3I7/AB6rLe9umDmlc8dP9Hdb2pgUFl00wWJmVNGK3ASW5UZB3P7vmBkeIU2pJ1SWvIgg9Id9FLAfLThz7zi2LPdrPdU+YuVBJ+H90dvvxB73y8nYtinFmOU7c6wj5ZeZR+pRY9fPkRLMV1ZGUh5yJNiuA1iJiny0Vy2pENfdDraknUQLa/NpO8F7Gs+3TRlXjNRVTxzBFCOKkc8sL289GQ7/AGTz7ZL8ruCMdLFdauSKaWU1JQj4lFCOIauWAt+1/wAw2r2U5xwgm5M2DiNdOy/GHJK2om3H5UoOTa1DYdJcXFcdJUhoFpP4kHQjq2bHrC3srWzigL/JKnlrq+Ly+KBu9DkKZUrTHKfVvo+t3d3NzJDF8zKFMwBy82Pwl1qBUMtCeDVABxc1KnzJrG3RpbLiSXEKe7OJIO5Lm9agEkHQ6adWJtXWVrUNrOrtpijNz9FraWahjjpXMV4/bw7cRC5KSqflrPGK4kv6+XUqyLF1pcRJTY13r+lPhV8kKSoOQJOjchlYOxK0rSVDXrbuXUiGF9xtTpiRwJKZUJ4SAcgTk1O2uCvR3o3c9OdQi1gWOTb71S1uSa+XItC8LEcQB4oz2VU8sVpe8T9vJdlm3t55Tx6LIFZy3mdfhuZQmgTLrsngyEWFRKRICEpQidAYdaW8UjuACCe3VCbxe2971CLtQI/MlHmDiDp8VR7aZjl7Md9bNsV5sPp/c2Ifznjti0LBdBUzHyzGVqfEGaqkGh7AcWSZj7TaNF5DtaplUOzgtw2RMZ+V1a4bDTAWtSdNSj09B8EgDpWfeLi5vZLuXPzJGNOwEmg+qgw8r0lt+27NbbXZrR7W3SMn9oqoDMe8tU+/E1uLMEuLTE3MeuliayqMqIpbgKtyCjYCUkEgj+R6lNIr0deOAXy8tsxjevHEK+RvZ/XY1f22VXwYraGi9a1es30BDLUJtXqKSVr7KJPZI8STp1IMiPFRuIH2Yk2rTC5QQAliaADmezFQ3uCzi25P5WTDhzIzmF4MkU+FVNY99VUxW5DaFWlup1IQiXf2Sz6ch5IKWkILLaineVJO72srXBUlWjBy0mo7czzI4c6Z4szaL2zhtgbcSrckeMyKFbUCQQi1JEeWRNC/EgClbe/20efbPimzh8d5fJeHHOTS2k170ha1s4pkMlSUNPNFRJYqbNwhLqfwtrIV4a9HOmY5aGDinEcjTu+meEfriOOSm4RZTD4qc+8+3FmvJUZ3lL30Y1Kp3VPUnBWG4Yq9sou19oXMOTJv0VzbzZW0p6ZOs4bABPykkkduul+mwm09ASCenm30jhFOXhICV9gAZscOdaq/Uvq/CYKm22iCFpWGfjVjIErw1MzRr9fZiQnvlsHZPttxiHZOOP2SOVKWfLmOOeo7ImP11sHQlRGvoxmVlCUn7T59avTBCvWU88VBCbB1A5ABkp7yc8e9fZxJ6ZWdvN4rpd3R3Y8SzpJX3KMgO854qMiy1tKAbVu/wOnhoQfH+B66MilC01ZY4kuYBIDTP78PGtlOEpKlfKe5SdCO57ggjTz6JqwIrhXurcqaHjXDuQ42UjQAag90nVP/ALj+LsfgevlGrXiMDCig0oRhPlspUCrUEaHw7kaeRT2IPWRagzyON0Na0FD7MNeU4hBKUqJAOumgI/kfs61vTBiF2AFOB5Y+VtghmShR+QBQ+ZPcdvPQ+H8CeoVzVlpx+n0+lcEoGowPAdoxOvhPLo7Ko7ZeBPy6kKIIA00JHZXj1W3UVo8gJpi1elL+OCVeBB44shh51Z5JVQYc62kTIkNCBHYdWClvakJSVEAFZSkdideqkm26K0meSONVdzmQOOOhbPdpb62iilmkkhiFI1ZiQg4ZD2Zc8shjE+oAFWnxPbt28vDt4da1BOXAYkuQFJIrhpTVKWpRBJJ8B4EeXbXt4dEYqAZ4C3Bq/HGk1DWvUqQrT5SddND38iO/fra0wHA4wWFmrlTGyWyntp4dv4fZ1rDA4+tEwxhUkg+B+3rMMDiOyMDQ8MN+ydUr5EfhH8ifDx6nwKKVPHAy6qDp5YRWorrqxokkk6D4eP8ALXqWWCjPA8QmQ9+HZFgJYb+YDcR8xPx+HUOSQu3dgnDD5S5fFj5jyt1RA7//ABs0P5ISP9fQg/CD3YZpcrhhwzw5EHw/3f6dj15aYzOPaFbFAjtoe3+7rzLWuPqmh78OmJIbMfcojevQH4j7eh0iFWI5YLQyKYq/iONpCkKV207d9fgPD/HrVmeONyla4zOxTPcahtKaZecCilT6lNpO1lTqUgpStalupTo2lKVKWogJBJHXhIIfG1SoPL20+znU5Z14Y2ND804gQqsrVpqNBwJ5VNTTwgAliQAKkYZyAQ+tDzqIy2ESFKEoqaAXHbccUxptKg+4tGxKSBqsgEjx6IsQVBUFlNOHeePsHE92A8aUlKyMqMob4qjNQTT2mlADzyNMatlPcXVz5TDrCG4DWrinXFje4Ys2alpIabc2/wDKVr7inF7GW0NErWntrlDEqTrGwJZz2d6rU1PayigqxJyBzxhc3EklpJLEVCRjOtczpdwBQH8KOSxoqhSWYZVry5e5QlKaWorebjPl9Ed0oWll9TBCXQ06fkcLRWNwSTtJGviOrU2TaY0PIyClRzFeFR38sUH1Jv003ibUImrpJ4GnGh4GmVeyueIHXtzItpa1F1SklZI1P2nudT0/QqIUoBnisJ5Gncu3DGnFjpO0Ekj/AMvYDT7esyxOeMlSNc24YczFcgJCynU6bu/gP5dutDS8q42Rw/iOMb7aQR8oAT4DTQa6+PbrWDU4kgAChzx9aU2BrqAQfDXr7Q1xqeQAkcseZD+idEK+1Wg/08OsgpPHHxHFajCel8pPZXb7/h36+6K8cZGWhxmQ8pagRr8D/wCTrLQRjUZdXDCo2rUDx1/0/wBfWJUg0OM1YY3m21KGu06+fx/006wI5csZ6hhw41au43f1V4hpTzcGQfrI6RqZVZKZdh2kUfEyIEhwJH+cJPl0M3naYt72i42icgLPEVBP4X4o39lwCe6uD3S3Udz0p1HZdSWgJns7hZNI/EnCRP7SFgO8jEHfeZ7f3q29m3tOymwxu9ZNjCkNo9SLKr7FHqoPb5FJWy4AQfDuD1wr1R0fNbzSSFCt1E5VxzV1yPfxFa/Vj9v/AEn9S9r33aYHilSSzuIldDkQ8bitPqJBHtGKJc5wGz4hv5N2xEfnYBcvAX0FDRefpXwQGrJsIGpMcp0VqNVtgdwpIHXzpjfpdrmpQlsvMX9tf2gDwcf7XA8jhJ9W/Thbd23bac9qkJKMP9yzGpjfn5Z/ATw5c8dK/wCy3x97S+X6vLcr59dxTMMwSwyK1zMpEZ4TqlhsCPYtSZR3S5CYaAguKK1rCQVEq3E2Fd2VrHZpvfTMaiKcky6FFA1eOingb9oADPOmOfbLfWvNyfpTrOWkdsq/KiRyhIPxoXBUvQ5qCSQDlXFRv7smD8MUfu6yeD7fv04YYy0lxUaoUl2pizHVbnERHGiWnBu17gkAdulzcLhZDG5K/M6PHSgz9nInnh/6esJ7dZo6P8ispERapJWmdCcyK5A8x24hfwvSuP8AJuFwJy0Iiz7ysjSt+im1MNTWZrjLratUutvIiqSUnUEKI8D0t7lc/LWT3YFXhBYdxIKg+4kYsPYrZbzc4tukJWK5IjbvXUGI94BA9uJ08actVuKZxzHbvUzDk5dmzR1EmREhSGk1FXb2jsOHBYkMy24LVcl75S0G3CtRO4A6DX1TPezw2iwSPCvkeNlY1dmAOVCMqEA8QacMMnpn05tE15uV1uccU0QuCIVdAfKSNilKmtWJBY9laY95t7nswtELYYsJLLOh0aS8+4AlQ7JQXVrUj7k6DTy6RI9piaXVJqdv3s8XHcb7tuzQFdvjjV1GVABT3CmKxeWba3yPKn8tfS85cIZ/SZEpS1rWqO2467DLzJOsph5klsjUFK0bh1bG2wNHHHHBQRaQdNBQ5AMB2EHMfVjlbqfc3vrqe9vCWnEpEhJJOlmqrHuHCvIjLnhtUFlHdhvzorpdERbLd0xJSv1qMvK2szGNiA27EdcGm9BBaXoF6AjWfuG1PJai+g4Ctacu0H9I/XgXsO+3FlfNbzDXaZUY0IIPCo5MOR+o4/ZPkt0/LaEuRIkxHdEV87Q+i+lKdNAoaIDoA0Px8R0v7fbQqxEShXr4h9M6Yet+3m6FuJhqaEjwvxB7uGTDnXPsxt1N6hLaGJCkhQO7QkAA6dte+pB6crOSOEU5U44pDeLee5laYajU8M86/pxP32o8iZ3YWLHEsGnsssoM1mM10XHIsZ+xkOTpSwy0a+K0244V6q8Up7Due3Q/cOp7sg223sY4eBYfEfYeKjvGfeBliTtfpzsz3Ue8b1Cs18tCqN8C9hdeDt3NVR2E0I6M/Yv+17gD9vJ5dyfLscuYmNZBIo7LDsTtq3ITRZRAUFS6nJLGA/JhRrqsd+R2ON3069QslYKelQksdTGrHFlqqqoVQAoGQHAYveyDAcVzLj254xVEi1tfPr3IlNISkqcq7kx1tV9uZC9zzrgdIS6sklTZIPYAdfMfcc9+VUFviN5c4rkMcwbbGrOVT2MXaRskwnPTK20K7rRISEuJUdflUO/Xsew0lyNwOnyNpI11/wBumm9X2eA6sTo71G3npeZIJma52aoDRMalR2xMfgI/Z+A8wCdQpf1L9E+mOvraS7tkSx6moStxGoAkbsuEWgkB5v8A4i5EMQCjfC56qFJR8jemqlHtqB4FZ1+PgkefXWO1brY71YRbpt7iS2lWqkcR2qRyZTkwPAjH539QbFu3TG8T7BvURh3C3fS4PA81dTwZHBDIwyYEHGJJOhS3olAAK1KGm4fFZHgn4JHj9vRHgfF8XL6fpwHqD4VyXmT9Psx8VKRptBCR/USQFr079/gkeQ/n36+6GrU8cY+YpFBwx+DoGnhp8PL/AAPXzSTxx7WPdjMFjQeB+Hh4f+brEjGYeuS8fvxhkOpSNU/br2I1Pj/h1wjv/wD9Xb3/AM7m/wCI2P1s6O//AAR2r/3bbf8ABTG9ilv+fc0p3f8As0rlyIyEDXWZXEyAEgagqcY3J7dCcMeIEe5O4VWT17FkOHX1XU6/KgjQstkAAHT8Z8/D49b7f/FGNFxnGcdzPsne+o9nftYkA6+v7e+H3tfj6uBUK9f/AHbrZff9qf2j7hj1r/2dPZiT3UTG/Ff37odmKb2RcxWaiEiI7gSiSdAN/IuKteP/AL86I7YaXBP/AINsQtwFben7y/fjkStM/cuWUpQ7uTtSnQKJATpptA1/EfHqFMayHEmL/DHswJb2M9NJURrvKtCrXRWvkfD5uteNmA1bVag6tKEbSNTtAA8x8yfAanwPw06xb4T7MZx5yKO8YQm6p8uJG3uk6a6Egfd5knTqDtbB7JGHOv8AeOCu/J5e6yL3L/dGHpU45LAUS0tQCCooCVHUbSduunifsHRDAfF4Ga+wfjer/b7x73QXHKEyyzePh+N5ZY1FeupZosibQ6/XVmEsyJHqTI2TxIMtuK5J0eJcjoHo9jr7HsVFybqbPWzBQxFramK6FQ6SsQpqvYUAf+ZkKcKn7SwKT80mQpbg/oDaTsHsew8qNj5gVNpI08doPbz07Ejx69j2CfWN7NqvTQO+uobSO/nqSnudB17HsEOrKnNiQlKSPmI0+09hp4adex7BVpGlkblEqBCQAPEAePn/ALuvY9giVyQgAqWNT30IB08gNRp3PXsew6mHmUAEnXwGqTr3BBOn2afy69j2Mcm4bZUEo1JC1efhoNB49+5P8OvY9hLfuyEH5ik7B4+HzK76/wCzpP3ONTuEjHiSv9xcO+0uRtsQ5Ub++2KuvctcUCc35cfsCwb0UWEoxdTpH1IeeokKvUxE67twp46i4oAhLY7kbtFBJ4rdrTdGlp8xo24J/wDRZ109qg4Mrcbqu8dPxWZcbWZt4NwAtVIXbh5Jc0yAlI0io1NTI0yDEaZFnMMLSsAqabJGoGhU2knsfHUnoHFO8VFY5Dh9eWGye2hnJK0qT78OCIUoCNFd0vJB7jUJUkg/Huduh6ki6ByPGn3YFyWDD4e3Ci1IKUpB0UPSdb0A8VNqKwB20Omo6kC4WtQTxB/RiM1pIMqZUxn+uWpSdNNiktqT8oURqnao+Hckg9ZrKvM1AJxqNu/IY2mpagoK007nx299dfEeJ62Gde3ljA2rEVPCuCfxJhj/AC/zLxJxM0XUo5A5BxXF5rrRIXFqrS2iM3M1JTordDqi872Ov5fR3YwLm8RG+EsK+wZn6hgHvdLe0dxyX7eX24tB/dz5RbvPczh3BdIluJh3AeAUtezURkpZhxskymLGspXpxmtG0tRcYj1TbI0/LClhOgUdWDfr4xhYh8R8Z9rcPqFKe04A7BY+bWY8AaD2D9Zr9WIBwLMNNpBUkAfEj7O3c6gd+kz5lmNRww4C0Ay5YV13jam1JSpJOz49u4Omn2EDTXy6wWWUvVq0/pxINtEqZ8cW1ezT9y/hf2+e3jLMKz3EWaPLsDZk2uMx8Vr9i+WpNnIS0yJsshxETJo8hbaJj8lfpGGhLreobUyiwduure5tUjc6DEKEDPV3j9486+3hkK/3Xb7iG6aVAWRzxP4e493Z9XZXx7VuDvcP+4Vztj3vT9xF3kOB8XYfeMXHEOMUM6yopNg3XTUyYMHFHG3I82rxJLrKfr7QbZVyoFDZDR3tGxOYE1EacvCncRxb29+Z7hTAV40J8tc2r4m/QPpl7cdEllV11zWzae2hRbOrsoj8Cwr57DUuHNhSmlMSYsuM+lbMiPIZWUrQsFKkkggg9DVZkbUpowxsIBFDwxzy++P3M+7v2x8z33CXF3L0lWC2eKU2T4pNybHaHIMzxiuu3LSA5SRMnsYb0i7i10uqdEeVZNy7D0yA8+84kurCdRdURbPIilI/MeIPUjh4mXtpxXmDxwx9PdNneI2kLNoRytK8aAH28+3HPvyXR5hdzr6+v7OyyDIL2VNs7q6tH3rCxsrKY4p+XOnTJCnHpMmQ6sqUpSu5Pj0tW3VdvdziSVxqJrnhjvembi2tykSHywOWLTeZ37D3YfsW0eb2wXP5O9nGaV9FKnlAcnv02NWkPFXI6pKvzy0eMMvgTZO4n1pEEKOqgk9WKlxDe2ZmjaqslfYyZGv9k1PtxW8ttNa3IgdaSBqAdobh9vD2Yoc9vmVzaXJ4ciQ8fTcWgIWdU7knwB/lp59Ut6gRJd2TpF8QGLu9PbR0akwNCDgoe7tSbKsbtWlFRWyDu0Oh1SAoEpHcjTpT9O5Wt3MLcAfp9Dhj6utVFsHy44rNZkqW4ppatxGgQfHv3IIP/ZB7dW9PFVqrxpnhH256DLl9M8O2jjmRJSANyyUgefbXTX4hXwPn/j1N2+1BYU4E4PterDbs4NFp9Pdi3b20wv0rG6+MUhK7AOS1kp26sx9UNdh2BUsnX7uumvRbaj83dX7DKNQg9rZn7AKY4B/m56iK7NYbMpOq6maVv6keSj3sx+rEtUkD4DrokIccFeaBxxrvLSdd2nhp2A62rHniHNOK8c8Jvq+mvUEp/kAP9nWbRBhnmMa4rt4m1A0xJT284lyVnmQelhNLJsINe6j9VupTqK/H6lJ0VpOtpSm4rbm3v6SCp0jwSekDrS32uKzK3bKszg6VGbt7FGfvNB34tnoLqS8tbrzmYLZofEzEBR7zxPcKnFu0Dhi0TQRHneQMWTZlsgNLj2iK1x1HyqS3YGMVKQF/Lv8ASA/h365lvNoZrhhHG1OzKv38ffjqvbN423dLNbuOZVR+DMraTTI0NOFe7Ef+Rf7j44W2nMqkwIUpRTBumVtzKGx084VxEU9CcWexKFKS4n+pI6Xb3Zp2B8ipYfh4Ee40Pv8AtwfV/livn0EbcHBqjexhl7uPdgF2WeUUlJU3Jb07kFDiFpJ8e5Cj4dVnvUN7bVEimnsP6cNe3KkgBiIJNMRJ9w+O0WdYhbPNSYpmRorzraVrSncUoJ1BJ7K+zoJ0/wBWzbPu6AhgjMAcMk22G+smgcZ0NMUUcTSMeR7msAoczdEbE4WcQLDKHUKSCulp5P10uOd3bfMDCWgr4r8+u09g3NruyimtzWVwKdxPA/Tsxyl6g7THaW101wp8tUbUF4kcx7SMsWfe132uTOeeeORucM1ojH4qrs8l3tJQPoUlnIpqLJxzCcZYUUbXYMZEdqTL07KQhKf6j0VvrqXb52s1Y/MkEMewfiNO+tB7cUR1V1FFt3REV1tqobyVVjt1OdJGBK+0pT68Tr9wHs5oOcaiXcXL0DFs0a+pl1NguAn9KkzmhtZZyaNGZS+xVBSQ0lTB9RsD/huJGgjEW1pMk8CsYov92pAqKcErxH4ih8NfhKk0xUHpj6t9X7Zt8u0+olyZ9tuiVS7lVnnt6PQtcEU1LqPlxyrWdeDrKgqtDvI2E8icD5FY0ubVLlVZTo8mOzPYfZsKe7rpCtqpOKXUZa4cuvkNtgJUhQdCRtISdR0WN5E7apGDg0YGhHsBB4EcCORx0pFYfM2yS7LQ2xWlQQwKnmrDIhuOocfaKYG7PLRw9pNhHeLc51Okdsq2qYbHYuPNgjy8PP7+hm5G3EJdgDIQaDs78FNgbdRfCJGYW0ZGs9pHLBKge8ZFJhdjYuxvqbr0nYtIFjdCesXEFKJcpWmn0kRR3q1/EoBPn1Um7xxByEP5nGnZ3nHUXS243UkSvOGEHCv7X7o/SeWKwc0y/Js9srvKr/In3ER3FNWuW2r70uK5LdC3o9JTsOLWPrHe+xhra00gEkhI16A2+2xMwnvmK2teY8T/ANH1DtpiyJ+r4rby7C3Bfc5cgkYroTgW7B2VOdchU4sW/Zz4ffncuZhzZZNPSqmjxudiseZNbUW7W2yNSNzKIyylAag16Csp8O4PmOpsjxX4/wCXOaniOwcAP6MZSbKUZpL6JRDoyTiVJzOoji3M9laY6ZYOA45bMg/poZLg0D1VJWyrU/54M4yWlEfBLrQPRC0N3Aoo/uYf90tPuOKy3bZ9sacsqUP7p/7lq/YwxBz32YbG4hh8BczY7kK6/IsI5QcYbjOQlRn7LG7iu9C+qZrI9aLKhvtq1IS4vRYBIHj0ybZM0qz21wp8uWOhOqoB5d/uoMD2KWaxy27pqhbzACpBOgVIHEVIyqDiaOS4xj8jA8dTcV0NX6XkNBnWOMKcZkGDLfH1tW/o0tRjyUx5P4FaLCe5HcdVlezgTyGFq5FCae40/WMX9tsa3dnb/MgqqMkoFcjp8SV7RXOh54F1u9D3LcWtJJKlfMQNT49jpp3P3HoRQgdwwyCZGNCa4VsI5SoMcnqbnTI8eK02XpT77iG2I7LY1cedcWUoQhCfEk9b4LjT4W4Y13e0NeQ+ZCPEMJPuQkwOfOPk12KuSv0qI8ZSW220tN24Q0SzOkuJcD6lIc/4LKkhKUHeSVaAbhMSwoTor9YwEW1kstatQSaSBxqDwOXLL6uzFLtn7X5FdcKmiEpDReO/RrTQ7tSka6dh8dOictjHcxhkAqcLcW8XFpdmJz4AcsSGwTHKjF2GEOMLsLMusRa6mio+efO3pDMUuhJ9NIcKfUKQtSB4gag9WF0T0Hd3Ugu7keVYDxFyOXEkfo7cVp6kerNhtFq1paUuNzc6EQHJ5DwQEZkji2kEKOJGL8vZ1w1LxfFZM+03u5RlFgq6zGd8xMu0fSDHqUqWVLcgUkZwICVEgr+4dMfVW7xXUyW1vQWMKBIh2KOLe1vu9uEDobpyaxtZby7z3W5maW4bk0rZhB+7EDQA88+Qwt++nE3JHH2E0jSFaKyaTdPJQNQluuqpMZClJ112+tMHRv0wkC7pc3R5QBP9ZgfuGEX19g1dP2VgK1e7aUjuSNgKj2sMU8zqh2I+top/4aiNQNe2p8RoCnq/4ZCwDc8cWXREchQnLGzCk+mAhZ3aHtu11HnoFfiGn36dFI2y7MB7kA8KEd+F5uzQEhIJBHkrun7wrw8fj1uMhUVNCMCHtvMPhy9uEyZaL10+cH+kjxPw2n4HrQ1wDwIpiXDt8goSDhEW+66ohaSQT4nsfDx3DufHz160tIvxA/T2YJx2xTJhl9OePQbSDuCiCO4Cu38Arw/np1paRjkRliQIgKfT+jBKwrLH6iWzotSNpHiSARqO4Pw6F3lqs6EHPBWxu3tmHKmLBuMOVUvNstuvJUSEghSvu7g669VvvOyaSWUZYtrpvqggiKU1xKmHfxZ0RLiXE/MPDUHufEa69JElm8cnDFrwbnDcQ6lOMailatw7g+BBB7H+fj19FQKHGTaHNRhVQENsjQjXQE+fn8PDqOfE3ixMoFSgxrL2q/pH8O3+/rMBhwxgaHjhMlq9NOiddyuwOngNO/8AHqTCuo58MQ5yqjLjhELBdPfTQd9PP+X8eianSO/AKbxsSvEYV4sZCBu26fAkePxJ16jySE5YlQQgDU3078eJLwb89fgB/Hv93XkFeOPS/u4TMTUF0Ncvx1ZHnr4dv9nQuM6o1I7MMV2qpdOD24cxc+XT4dwR1kAK40s+WExM4rc9P/xNQNNfHXwPj1IMYC1PDEUTMX0g54VGpygsoSr/AIY0I8v/AC6daHiFKnEpLghqDDxiuMRIctuxYkx7JxhqRBdc3hgIUEPtoMdKA4ozWVatvblNgaDbov1GxzK8kqtCVMIJDU4+2vDwniOPflQnI2hhgkiu1dLsqGQmtKUDAaePjHwvUrwy8Wtda9yW0cbr3ZJMVcCBDZgOoZTGkpjR47UdpxEtKG5LyFlkqSVLUlJUrboD1stLC3UuqeIO7FqmoqTU5ZgceypyrjVuW8XrLC8p0NFEgjIGltKqFBDABiDSoqSBnpphEx2lh5bHs5krKqapTDWuMtiZMfRYpkyfpm4k9bDcKWpyAlyUt1e38x1MV1AKCUr63311Jt7JFHbySahWoA0kCtVrUeLIAcgXU55jEfZtui32Ka4mvLeEISpDMfM1NpCuQEaqAsWNM2EbqNJIbECeZOULXD7jJMZg5QxKLBk01jMxq0lKqrJlTampUUPJEb6yNo6tpxK0FBIWO6e5srYdot7+2hvZYCtaOokUalPEGmdDwIINeHA4o7qrfb7aNwudrguw4XVG7QSN5cgpRlr4dS5lSCKVqMxniL9zm8zkZnjrC84sXcQwyDMsUU2QRKBhbS4lpLhwJElSHZVMzZR4lzElPSZTktXpOPyNBqkNFnisI9qe73DblE+4Mq60LmtVBIHBypKFQqhRUKnbqwpSbpLv0W3bLvUhtNnjZxFKsQIKuyqWNWjDhZFdncudJaSnALgI3tdEh2d7Jx/9SsMSjZDZVVLeyoykomxW3311n1UhtpEVFhKrG0vKaToQCSEgDo7bTSSQxpdaVvjErOgPAkDVQcdIbIH7cLV9axQ3Mz2HmPtSXDpHIRTUATo1EDSGZAGp76Y+VTZedSo9kp0P+Px7+Pj1uJovfiEzAceAw+mobrjeup2gdvLy8eo5AB78RGvyT4T4cIFi3s3o1UfHt566+enW5EBNcfRd6sq4RmkOdjoRofEq0/8AL1IKAZYweXOlTjMthS/6u/8AHTT79f8AZ198PuxiJD3kYwIhkHQhRI+749fToxnrY5Z1wqRoxGgSkk/Z5f8Am61swP8AVxtj1nlhdjwldiUHv/Lv5daGcDhiWqMfqwpJaKNewIPbw/1d9dOtdQceoad+NlKT2GhOv+of7usaY+5jjnh1Q5VLfUTuC5mhtdE96oqbN5sLOPyZClLcbeIClmokuq3K01+nWSofITtRer+kE3hW3GxQNuOmkif9MoFARXLzFGQ/bGXxAVvj0Z9X5+hbtdk3eRl6deTVHJmflXY515+Q5zan+G3i+EmlcXuQ9qFnjztk9GrW59PObWS2lsSIsiK8klKkKTuS60tB1StJ+0Ht1y51D0jJGxurEEFSeVGBHEEdo7Dj9VehfU3bd4slsNzMclvKgGZDJIjUzrmGB5EHLIjPFTk/irkjh2zm3WDXGTjC1MrNhj1dKnCyx4N92lUHouBUaOknsgbY2g0IQrRXQLY+pd22O40pr880UozUjdRlQg8CBkKeyhHBX9RvSHZ9xgbc9pWNtuGqQ+HVJExzrlnIpOer4hxqDxS24P65/wA+tEiY7M1kTGpKHG5q1rR6rsxMRwCRCsG0ArmQVBJ26vsAo3pRL3S0sr+Nt96fNbep86KtXhatCeeqInIMCQpyJpTELprc9z2i7i6H9QYjZ7r5SG2lkXSJYmAMYao8LFSCK0JGdMiBmj4xKrZ8K1rm0pm18uNZQiPmQp+G83JYUFg6KbWpABPmk9LquJkaGXON1Kn2EUPvw7Xu3SbdcrJHVLiF1dT3g1B7CD3cjh35XEsaDKP1WtgtzMdzyPFtnUuJkerDQ+Uqfdh+ksNtTYMgKbfSpCwvaoEAjd0ZsoF3rZVtrkhdws2MeqgqafDXnpcUoeRwq7pvDdJdWjdLRnOy36eY0dTpo9BIQM/zIm4jmAcOxOBSZLXqoZU6laQpK0pOikqGqNvYg7k9Drba5iaFSCDz49+Nm89ZWi5RyAj66g4EmdcS2aEO2LMVzs0UrSlJ1cCSFp2kjRLrS07kHwB1B0BJ6bLKMxxeW2RB8J7+z2HFX7hvImu/mI11xsNLr2r+scsR8apq+sySqyIRHIaIzqomV4/CWW2rqA+n0pnotSEeklp1CtZERwhSj87au3RJb8wPWhZW+OOoGoAcRWo1DlXJh4SRkcS9ogtTOiTykbYxp5wUuYq8pYwQxjr8RXxIPGoahU7TOFWOQJyJ7EaC/wAoxWuL7iQmEo/pcBR/JQ688UxWnWN6UpcW5uA0OoHQu+6fv5v/AFntUUr2oIJYKaKW4KxGQ+vB22626b2S9m6e3q7t/KbUoDOp1qPxx1+IjkwFe2meD77Uf28uZ/cxIm3GMMfQ45RPgZBf3yRHxilbSpTj6JOWOPMUjzsGKkqdCXB8w2pKj1hulvdWNrH8wQJJq+EclFATXsJNB7DgTsm7bZvO4XEe3K7W1tp8bZVL6tI08agLU1pSqnmMTWz845hVVYe3r2NZhltQuPj91D5s95FfiGVelmVtHjIQ/wAW8UciY7i+Q13HuHtOB5mdax3WHrJ38oTm2ddy9htwTP2i/dDyl+3Rz/D4H9yCZ0L27+4t6vr6G9dkszcVxbLLApNFdV9vGMmE9Hvi+GpznruKTIUlaj8xPXsex2opT9K40WnRIaVo/HkNrStqVFeSVxpCHE7gpCm/h4nXr2PYqg/cHxaLVcw0eTR0BtWb4ZDm2SEAAu2lPIVWuyDtAAVIa0KleJI69j2ICrYWrVSxtbT4ADt2Om1APn8SevY9hYqqSyupkWsqa+dYz5bjbMGsrYr06dLeWQA1Gix0OPyHlfYn+Q6uP0g6qG07rJsl7Iq7bdKWUsQFSVFrWpyAdAQe0hMcyfzK9Afx7p2LqzbImfetvdUcIpZ5beRgumgFWaORlZf2VaUnE7ON/YjklnFZveYL1HH9MlCZX9r1noW+byGOxKpyEKcrcfCgdCX1Ovo8PR6trdfUSzgkNvscfzVwTTzGqsIP7vBn91FP7WOWtk9L9xvIvnN7lFtbgajClHn09rgVWP8AtVYfs4mfgfBXtno2jVwOLccvE6Bp20yx6fkdpI/pW8668/GjsrXrrtZYZA8h0g7l1N1TO3mSXk0bfsxhY1HcAASfexxbvS/p/wBML4GsIJY8s5dchPea6R9SjuwNOZvYBguXwZ1/wZK/s7KW0uPjCLSW9JxO8WNVehTz5SnLDH5kg9m0vLfj7iAS2O/RfYvUrcbKRbfqEefZGg81QBKneyjwuBzoAaduM+qvQXad1t3vejH+V3UAn5d2JhlPZG5q0THkGLLXIlRnio+2pLXH7Syor2ul1F1TTH662q57SmJkCdGVsfjvtK7pUg9wRqlaSFAlJB6umG4guYUubd1kgkUMrLmGB4EH6fXjlq42+6sbqSxvo3hvIXKOjAhkZcipHIj7RQjIjDYsXA2hZGmvloe5+Gg+PXC+/wCe/Xp/8rm/4jY/WLo8U6S2sf8A1utv+CmG9j9+xS5vi78lQDbl1EiTCSNGYlgow30du/qlD+p/yj7ehGGPEMfeZWu47kM+reV3hTJkEK1BS4y26oMOpOo3BxkA+Pj1ug/xBjTP8Bx3R+xYlXsr9pSj4q9t/CxP3njvHj1svf8AtT+77hj5bf4C+zEquouN+Kyv3h5C4v7eHPj7ZKVoPG+hH/b5VwlB/gQrqdt5pMx/8G2Il5nD/aH3443cDQqay0pwlW5KU6knRJBHxOnfqJJ8Zxvi+AYL0qgC4i3EgghBCQfgBu8vA9u3WGNmAtY06jIdC2yCnUa6kBW4k9gNPmJ/39Yv8J9hxsi/xV/rD78LmP4m1JPzIG7XVXykggeOmuuqtD4/7eh2zqV29FPGrf3jg91VpO+TFPhon9xcHeiwqIqIlRSnVKRtIGmn3kjTXTx6J4XcI2Sv2bNUnHV3Vy5QxZjs6NjzlvZOUMaa5rvnRaVySqrYlrHcuIaC9e+up169j2BbVRWTNVvSlQKj5aaDd27Dx1+z4dex7BSporCV9tDokH46Ht957Dy169j2HYuS3HWkaaEbQQdCNdNNTrr17HsOeosU+uhKSNNv8++up8fh17HsF6pkqDYUrsPDx8SR4qHhuJ69j2FZVuULQNxBJ8BqdAO58vLXr2PYWo1stSQkHXUjzHYfEn4aHv269j2NeTYEAHXvtcVqe+nbQaE9u2nl17HsIEy3IbUArQJQkglXj3Hl/HpV3ID5xzzqP7ow5bWa7fGP6395sVj+4C1x9/L+YTaLbN+usxiFipWrV0L/ALXhSbsMJ8dRVMq3K8h21G7RS3cNbm33QyZ3AXbxH/8ARZ10/sj6Vwxxpuo3TYEsi38MMm8G5A+EgbcBBqPICUigrm1MjSoj+qxfqX0sKWpOiWiCV9inYPAkD49QYo1lQNxODs7NbylQchh0V+WpDRKnO49NRGp+VSTsJBPj+LrXJagsAPplj6l5Tichh0xspaUsblIKUSeyFEahDh0J7aa6hP8Aj1itq4XL9n7Rjz3SE54UU5CypCNVhJQpbZIIIG1QUAfj+P8Aw63i2evHI5/XjQbmMcs8KDV6x22u7hr4dj4eHl27nrNbZye76fXjSbpAM6A4nf8AtdS4dz+4l7aoElKJDRt8+lhpaRtD9dxNn1hEcAP9bEqKhaT5FIPTd03bGK619iP/AHGwodUTh7YoBxK/3lwge9/LnrX3y+56dNfWqQ1yhZ1IU4okiPj8OBQw0gq1VtbhVqEpHhoNB26l7/CWu27AF+xQMQthuPLtlUcc/tJ/XgCJyNtI1U8o9htAPY/+7d9OgYtwBmKHDGtwWamMMjMWITKnHHSEpT8w3abQRrrrr37n/DrfDZ62CgEnGM1yI1JJyxcl+3H+2jac8vUfPvuKo5dVxIlxi1wfj+xbcjWHJIQpL0S7yBhaW3omDr0SthhQDlqnRR2xCPqXCzsksEDyis5GSnl3sPuH15ZFJ3Lcnu3MUWUY4n9A/Xy5Z8OoBtuoxyoS20iBS0dLACUIQliDXVldBY0CUpHpx4sSLHb/AOyhCE+QHW+ryvnVnY+84EgKoyyUYq84Z/ckx73Ce9uVwNxcivsOHqHAMyeezp06u5jnNPbY6GJWOO+oltGK19aZzbbpCjYOOB1G1lDantSTwSXL2UXilSMsSOFQyig7QATU8zwyFSXuNmurTa03S48KySBQvYCCanvNBly551Aktzr7LPbv7i81gcgcnVNpYZPXY9ExeNMq8wu6Nr9Ggz7Kyix3YlXYxorq25du+r1FIKyF6E6AARb7Z9v3TT/EbcSlRQElxQVJp4WHMk+/Eex3a/22vyM3lgmpyU50A/EDyAxzWe+jDePeBPctmPE+EVjkbEKmhxibDZsLGXbvl62qGpsta5896RIcBfWQAVaJA0HVJddbLb7VuoTakMMZiRtIZjQmtT4iTn7aYvjoK4u982c3G4N5sgkda0UZClPhAGXswcvY/Nq7/wDb1/c+xVbDRpK/jrM8kEbTWO3ZzOKL9P1aUEBAdbONxlA+OrafgOnjou7un6TvTLm8azFffAT96jCJ1jt0MHVtjGookskQP/jgPubHObXoj1k2L9L6fp+olTaiACleuo07hQGo+7pDnu5buJvOqW54urbdqhsowYcEvlmzF3x5qC2p+PHKSANTqEaAj4qHw+HQ/p1GtN2rTwMfp9OWInUlsJrE14jFaDSj9U8hwaFtxeqUnz3HRRGmg0HV0BEajduKkt2MTlSeGDPxvVvW9pChx2itx99phJT33KcWkJGoAOgGv3dM+12oX8xuAzxF3S8ItGWP4mNPaTli4rCq5NBZV1IE7TUYxVoeHgfqZyDPc3dux0fA/h11X6QWBg6de5YeOe4dvcPCPux+bX82O7pP1rb7VG1VstviU/15CZW/vgewYMYfJH2/6ff1b2gDHJRmYjjnjVdcUde/b7P9Z7dbVVaU54iO5Jzw+eJ+P5fLHI+K4DFkGEi8mOLsrAEa19JXtKmW8tvd8vrpio2Na9vVcTr216H71uKbJtM25uNXlr4Vz8TsaKPr49wOJe2Wb7lfR2aGmqpJ4UVRqOZ4ZCg7zixjKs6quOWGMNw9tulxOgSIkCohpCFKEb5Fyp7qUhyVMmuAuurUpRcUrVRJ8K7s7OTcIDfXvjvpc2Y9+dF7AvAdnLFabhu28XnVLQLIU2qGQIsYyACGniNPxcaAkHiathwcZcw1NvaRbHOMlhUmL1khh+XFsLJiHJtUJ1caroLL7iS6qStKQ4rslLZPfUgdVf1Un8JLOpIkaoBArTvNBy5d+O+PTq4iaxtUukeS1UKzhfxAZ6e4E5E9mCha8zVlzYSqmri45Y4zlcz6FrELJ5WQUMxpXyqhPQrGKyXUFpW5JBMhgfM2rsD0tWm47beaLeZxJMMtXwPU8wR/oPMYN3vVe67Xdz3lhCkW2M+cWrXHT9lhJmAeWpQQfhbFenuG9pl5bN2OU+2S9OM5MkuSpHC+Y3SlY9cOHVSm+P8AOLBSV1MhauzcC4IZ8EomE6J6J3uyWO4xFLhFnSnxqAJV/roMnH7yUPamC/TfXO23s2mylNjflv8AAlYmFycqRSnNCTwWQkcg/LFCXMnuj5f4xt8i4x5OxXJ8DzKEFxbHHcvrJVXaxwSUh1luUhCZcJ7xbfZLjLqSChRT36QZPSrabi7W+tipQGoI4e/9WLeh603K1jazuois4yIORFez9fPFfWN5DaZfyRUKbkrQ/Y3EVtb6VELbZXJbU7qdRqjTx17/AG9WxtVj/DokSIZJQ/Viseqrhb6CZrjNWQj6xTHef7eMz40Y4I4TlYxVwpVTjTDbU/0Egi7yeEPpZ02WU6eotD41IPglPUW4uJbvc7ozMfmJEyP7Ipwxyh1fPtmyx7OphD2u23aExmp82VifE2fwrXVThRezFVn7lfvzPtzwuXj+MvO3PMPI8iyYwbF4LSpDlW3LfcT/AHDaR4yFvpYaWvRhOhK1AJQNSR1PBWRtbAHQACTwLACi9gC8W5k4rT0x6G3Hr/dX3TcpGi6LgvJZVGSh6yMXlOVWJNVhqdKirilAcVne1z22e9KHhNu7zRxNyXyXxJyjHlZWxhLlHNyO8wfN7aUJTWV18FyVFtuOxdtul6VDYcdLzKQ6uJuGvWG3Xj3F7Wayu5bBJlV5GpAXQjxy2/mCs/lGikhQr8FY0xcW6eqfTgum2T08ms4ZLK4W2eWSosoxpJKalB880FCFKuWJdSaEYjX7gvbHyXxxMtMlk41OnU9M+xLlvuPP2EGqZ9YuGmy1hmFEuKOY00Bt+qjIakJ7ocWNdGPeOibu8sLnctluhMIVDBGAVip4grXUrLzBAHPhnhv6B9bukNy3i26e3y2Sy3KQnSvmVW5ANBcWUlDHeWzEGrQyNJGarKitUYg5nT+a2lO1k0KknRsYsbJyqsckiRWzjdIkbS6062wooiq2L0ZBSgK8AVdVNJ01vFvZtud9bv8AIq1K1DampXxFSaD2nPhjqD/N+wzbkmx2V5C3UMyDy4fgIQ5AIpAqebUzUZ8cNuXRvcg3WNYXiTH6VilYliJBRZrH0y5jykGzyad/656W6C4GlakBKRqE/L1XO8bnIw8upLkeI8AexQPwxr3ZseOLg6U6Yj2SZrmR1ut+kbNxmkI7a8GccEUfDxOZx0he1eTgvEfHOM4LiktpyNVsiRPnOrBl3NzK2rsrWYskrcekPdk667WwAOoO035gcRhsuffiz7vbA1iQV4j3+/vPE4snwfPFWSo0WtC5Up8pS1HihTzzivH5ENhSyPuHh1Ylveo6VemKQ3vYHadilQa4gD7xY3M/uS504z4npOPeQGMFxy0aivZK/hmTM0DaUyWZ2X5FLuH6xusbixocb6ZgF0lZJ08ejkN1b2O3yXDlQWFSKitQKKtOPeTwxXN/sV1uO5QWcGszFwikBtKoTWWRm+H4RpUE1OJDcg8w1ELIG8HrJLZRSJYcsUIXr6L/ANM3FroboSdqXo9eylTifFKlDqs50ZzU11HPHQNvKIYAseUYFF9gywP8jziK3VS58ue1BgRGC/JlyHNjLKAO2qtCVLUrQJSNVLUQACT1sSxMgyGeBsu+iBqu3hB+ntriFd4jkTkzKq5lirtouFszWpLVB6hrrHIg2sONTL6QtKxXRyBuZjFKvRHzupK9EJU76SM3Qgi1NGDnp/GewHkvacXP0rPTbXubgiKYp4dQr5deDMtRVuxaj68W7e2CtrVY7HjZBMgthO4CKJSVtNtqKtIrb6ypcj00nbvOql6anuemHbbO+3JP+Vgcr7DT6zhI6wu7DZZ2mvriNAQMzQV79IIzPZyOWP3P+GRocR5OL031K5uobnPIKIrK1A7S1HbAkSVKPgCW0fEnw6vX066bsbe4FxvSJPMuao+cakfiZfx04gMadqnHInq76i3cdqbfpWNjLKdPmkHnl4VHiJPdp9vLAU9vPANnc5Ym4uVuuPwn0h+cptIarWiolcaHtSGEzVp7BLYCWgdfxanqx+rOoYo7YwW9NBHKg1dgAFKIOQAFfZim/T7pLdNz3P8AiO7u8m41oWY1EKn8K8g57FyT+tmL/uJmK2trIEKO2lpiIw2w0kq3HagABSlHVS1q01JOpPVC3kskkrSP8TGuOtdvs44LdYYxSNFoP1nAE90WZUdzNkVKH2XhSRjASUqSoCU4A7NSD30KFbEn7Unqzeg9vubeEXBBAmOr3DJf0nHNvrNvVncXL2hZT8tHoHcxzf8AQD7DiprIoEeRPfcZSNCpeigNPMkd/Hq+bMMIgG4Y4a3q9QXRaM515YH71cA4dye2p+ZICVaa/dorqbqKjw41W8yz08wcezHwV5UNG9FHw00+b7iP92vUWW6YcagYbtv2qOamkVOMzNC+5qUtn7RtKh4nxGmnbqC96o4nDla9LvMlVGPsikUyCVtlJHfVI1B7eaT3H8Nfu6yivNRoDXGq96akhWpTL2YbcxtTYIQndp2O3voND4jsU9EY5A5FcsKd5t7RDIcOz9WE6M9IQoFtW3QjUeKTp4ag9u/U7RGwqRgBI7J4RwwVcSzGTWPNauqbI01KVfL8ddiv9/Qq9s0kB54K7ddSowrke7+nE0MH5ULkVptclKvw+C/s07g/MOkXcdmUsWApiyNq36eNQjNVcSMocqbnoaKHvHQ9j/iRrodft6U7mwaImoxYO37us4HizwUmZAdaB18Ujw0B+P3dBTFpNKZ4bEuFdK15YygjtqdB/j/h19CgY+lzTLCDYu6KISTr4nx+GgHU2AUGBV0TUk8MYILZccJUdQNNf9Ph262SvRaDGiCNnbuwsOaj5R4dvL/D7OomrmcT2QrlhDmE6KIJ10/2+X2adblP1Y0MDTCLgT5dxSpWR3LHfXXx18Ph59DbUVt0P7owd3eibjKnINh6aKWPgPt7n7PAgdbcge/EMAsMsIYbU3KU/rps7Hv4/H7epNarTtxHowlLjiMP3Cqhu4nbn4r0qKQ8l1DDvoLSt1pxLCg8W3UpU09ooApIO3QgjUdDNxnMEXgYK+VKivAiuWXEZe/BzZbVby6AkRpIs60OmhINDWhpQ0IyNaUocFN7BQ5YRWHl2yqKM+sqKAXpkKo9cuSHWm0tOsIdCVE6BAQp1XfTdr0EG6aIGdRGLph7Az0yBzBp76gezDaenWa8SOQzttiuSaZukWqrEChUGhrwoWPKuHfbYhyQiC3lPG0LFrW2q61qlTHt2YLzEyNXS3nIMrG585aK1lp2DJDUoLcZWDH01ClrbQIg3LZzKbHemnjgkcvVCwILABhKq+IkMKrQMPF2AMWq72DqoQLvPSkdlNewwiKkoRgyxsSjQO/gAKNpkqUP5dKgsyLGo4dX++Hj6PkWL1VNxxyBiWewaDMZtdC20t/jU56M5Y2UYIShqZa18CQp5tLi1LW5GU0pYQ80UNx3Kb003VrK8kkvNpntS8QY+OORQQqnmFZhQ0AADBgKqa14mwWfrv06m7bdbwbZ1JaX6xXLRrSKaFyC7ilA0iKSwDEklChbS6aR/kOU57kvLeU+072mcQ8TU9DxuxEg57kXI1FSWEvJIte5XVVnZ3rFw0qRc1v10sNKUlifKmo/MKktuBAmW1nt1pscPXHXF/fSXV2SYUgd1EZYMyqhQ0RqCvFFQ5ULCuIt7f7zf9V3PpV6U7RtMFhtyqt1NdxRO0ypojd5RIKyJqYKTpleQeKoVtIj/wC5L2zKxXjStwyWl+bmHHdC7HxOqxmA665eXdtlGSZ9ntxHqERLG7i8eQ6yzlwq8bGlPGDDkF0hFo1DZukur/nd3fcI6LYXctZGkPwIsccMKlqqhnLKrvxprkTTnCZEf1E9OTtXTkWzygvu23waYEhUkySPNNdXUgj0tILVUeSOLJdXlwyFjpuVhg5hozHkvE4PEVJX0NjLrJlzkFfMvshhVD0GoRBrp1hCpkXlrW47Xx4DdHJnzJZSZIZdkKLrbSpHq2Pfix2m9bfLh5UjdURgiFgWLMFLaFaRi2tURfhqEyLBNNKbS279SbWnSllHA8sbySq0sqxlY9Ks6x+Y6RIFEbyyPTWFaQllUyamRUVi217V6fK4Uq2rStJKTt0StsqQpOvgQSCPPo4XWlR2YR7iNqFew07ftH0OCnWvtVzkOYhqK65DfZkoZmRWJsR5yO6l1LcqHJQ7Glx1qRotpxKm3E6pUCCR1DlUzI0bFgrAioJUiopUEUIPYRmDwwPjDW9wk6KhZHDAModSVNQGVgVZTTNWBBGRBBxJX3oxMewzL+GmsOwnBsdjT+IMB5EsYtbilUI1pkl89aqnqs25LEgTqxaYDaUw3N0cJ1+QlRJRfTyS73Hb9xbcLm5mddwngUtK1VjQLp0kEaW8R8Y8VeeQxe/rLbbdsm87GuyWG32sMmyWd46x28emSeYya9YIOpDoUCM1QCuVSTjfr7fCo/uoqqaz4m41l8fqxOiftcNjYhUx4r6XOKo2aWL8OWW1zY1m9dvuKQ8XTo3taILadvWiW33J+iJLmC+vF3Xz3CzGVyR/zJhUEVoV0AArTjU8c8FYLrZI/VWG0uto2x+nzaRGS3FvGFNbFblyrULK5kJIbVkKIaqKYeK+FMPwfOablXj2hx3k3228qYpn9xj/APdNHGvXsMyCgwLLclj4ZduP+tMqrWluaoBCvUCpbDC2XC4pta1QB1HuG6bbJse7SzWXWFjPAknlOUE0bzxRmZKUDK6Nnl4SwYUBABf/ACPs2wb7D1X05b226emm62l3JF58YlNtLFazzC2lJq0ckckeRrWRVZGLFWJYnCDGP3nt091OYXOBcfWmU4C7xxYYlbzMNpHHqlzkLLLeuvGWmURURXIkaPHT9G0tCm4nZKEhACQT6kN1bdX7HYW91dpZXQuFlQSuA/kRKyVzrUk+Mg1bma54CdEJt176cdV7ze2G3S7rt5s3t5Gt4qxm7uJElAGkKVAA8tSCI+CgAAYdXFuMYtyh7YuVaaBhOGRuWOMGanJY2Qxsbrk39/x0hxDV4lcn01qFpSBlbz0xCUPONqaQSVKWswt8vr3ZOtLG4lubhtivS0ZjMjeWk/4Mv2XqAENQDqPAAYMdJbPtnVnphullDY2S9WbZ5cyzLCnmy2laSAn9uKhZpAAzAqvEsS1c8RW41i3GOIQ8RwhrKaOgp8ky++OLVLlpY2Vo6q4x+htFusLalxq/Fn4P1yXEb5Mt55EjcW9Opm1yTXl9e38lxcmylleOJPNfSqr4ZHUVyLSB9FDRVClKVxE36xg23adr2mGzsRuUEEc1xL5EZd3c+ZFE5IIZUhMfmgiruziSunEpuXuPKrG8rjz8f4x4QVgULhuBmuTUUhOFwcmkSX6GXItZFNARLkZzGkh9xtUR1EVcNtaAVnYF9JnT+7T3liYbu93P+KtuLQxuPOaMAOAodqCEilQwLByDkK0xYXWnTtrte7LcbbtexfwBNlW5nib5ZJixiYyNGgZrpWqQY2EZjBHiOkNgO8DYixk/t65unVHHGJ5jyBhtxxxGwh+biNXdXJGVZMuNdxXUyWVC0SqA0sNB4LUynXYRokBj6pvzY9W7ZFcXk9vtNxHcGYCVkT8qOqHI+HxEVpSvPnhI6A2Zd09Ot9ntNttL3qKymshalreOWT/mJysqnUPzPADp1VK56eVEDmzEeFMe5j4Oor6oh4Q47U4Gr3H45jjlgKbGrmdKhSL6NCAkTXIL/wCkPlUluE462wgoLZU7uKpPTe49SXfT253VrI1yBJP/AA+SQLrkRQQhbIBhrHhLgFjWtFpSB1vs3Q+3dZ7FYbhClgxhtP4zDDr8qGV2VpVTNip8s1dYmZVFNNXrV68+4pk3H+J5A7bcb8aZxwdc5VCmcVcgcbwsaYrMex1yVZIax2ZZUsP9RfVPgSGApyz+rS5NY2pku71HpX2m22bqu5jhNzdW3VCQEXMVxrZ5JKLV9LnTQMDlHpIRqlBQDFobl1F1n6VW017bWdhfen0t0psZ7PylihhLPSLXEusMylfFN5gaVSBK2o4E/uf4J44yLjv255HxlgfF+IZLk2Kozu5bjYRi0K5yuxgS2o7Lc+IqO7HmVB1cEqAwPo5Bc/MbUgNhNZ3nSDXF5uEF0rutrc+X5igsimlRVqVFRwLcsuNcdO9L+sVsLDarjz/Kk3OyEyQSSBJCtdLaF1UYqcjoJpkRQHAS93XA/DHEfKXFcvEPax7dm6nJOGeP8u5DxWVxvX4vCzfIrpVsu9ZZ5HxlELL8GtWnI6DFeYfTGacQ2o7NqytFaLeLN/mbNXa3icoWSrOtD46LUa1ZciK9tVYZYd/4b05vkbm80C9kLPoYrGJKrWMl9JKOr+LVSpp8SnPETvd1+3FTYf7uKfiX2+RZSce5LpcGzDDsayaZ6yqOHnkuRBkVX6+pTiQ7j8+C+8sSVKDsDYS8uQla3vfKLOhvIkotfHpGQ51C8geYHCuVBkMYOorqxA2TcZGlhp+SzGsqctDN+NAQQGqT2mubOb3x+1biBHHXt15d4TxSfinFeRUFlxlMTNpnau8puUOKbSfj8jJ8qrVx477NhyrQw0WchooLqn478glanlFRnYrKU37mAAy6RrTk6cv7SnIHvplliufUPfIrbZ4zdSFbUyMYpQKmKXn7UkGbKKnIkA51NdJiOEYp7BOJuQ3eIuFHeQ3+aLfjqwy6fxzjl45dYjSYvKsILUwvw1sPTZLyU+tN2CWst/M4XCtSmaLZotw3yWytneJRAJNLEjxlqEVplxqK1HfSmKd3Hrm46e6CtN93SOG5eTcnt/NjRW/KEZdHK1IfMUbTRuHhqCMML3Ze2/iK09rnDHug49w2u49tc1vchwPkDCaGTZzMTkZFTKtVQ8nxWNdTrOxpoljGp3FSYnruR2VvNobCChanRN1bTWV9Ptl3m8YBBNK0IGRpkeORHHjzw67JfWm/7Jt/U+y08q5d43VCxQshI1KGJZa6TqU/CcqZGtBeb8Nzru5RLoKyTFsWn0hidCZR3dU6EssvNOsvRZiXnNE+k4haVk6bTr0oXl8YpPKQl88lzJ91M8X1sPSgurcXF4UtxpFWLBfrHPBawnMMzxPPqPgHMcA47yjP5tpBXKwJniCcxkEGMtDbzmQ5owq7xzBK2HFhOB5T0vXXttYUspSXDYn6mvwNllkuI4AdQh1ZKSKmRwckWn4jnyAJwmb9snQCXC7ztS2t9dSLo+aVBoIU08uNxXWQ1clFMiScdNnt+4MjXuD0VVys5XZfTRWAIHH8CujUnFlVEV3TDTh1VFqaq7bKVfmiXHLD5P5jS/Ejep4xBufyqyNKIo1XUeFfiOnhRanLt4njiR01BZw2LyWcaIJZ3ZtIpqYUUk5mpooFanIADIDFo2GRkY1RwqnHW2serYZajwavH2WqKshRWWglmPErqhEKFFjtoACUNoShI7AdLuGLEcfd37E/b/71uNLDjjlrHpFQt6W3dUOeYMzXUuZYzfsFTsa4YdEUwrhAfVvfYkp1f07PNr0WPY9gce0k8y8LOse0v3I28TLbfEoe3gnm6IHI9fzPgFcwj0mLGNLUZNbnuORNrVlDUVaKAUha0ncfY9iOvv1zGtyLnmlwqC9+o2mLYtV06KmChyZPkW928Z/0jUWM24+6+UFICEgqJPYdex7HnjL2SZnkrkez5SsEcbUzgRITSOJalZtMjkBSU/pIKmqFtaf65Z9RI7+kevY9ifOLV3BvAlSuJi0WqqbEMluZaJe/WctsRpooSbQI9dllZ8Wmiyx8U9b7VtFzG/Y6n7RiJf2qX1lNZyfBLGyn+0CPsxCrmr9ybibji2tKmZJQlmpQGnHQHpsizt16uIqKqurQuTOlxmPnfVr6beu0qB79WdcXtnt9v8zcOATly+z/AEYR9s6Dt5br5QE6EStFUjVnSg7xzJr7OeIOVv71PAsTLEM5HiucYuw5LQJGSQKZM+EWlHapyzpotlNmtADuVsoW4B+JJ6X7jqUzLpUN5fImnD7/AKcMMs3p3YwRCWzXTMPiq4Or20UAH6HF+Ht+5TwXm3iODy5x9m1DmmN2kxiNWT6CxZlMp0UEPsWLQUJNdZJUrQx3kNutkHckdYJuRkmWGgKMOPGvZ/TgDcbItlE02YdCMvp9mIH/ALlOF1cK9475PgR2491l0axxrKPTCQLWTQxxKrLh0J03y0RNWHF9ysbde46vL0q3KV7W62eQ1t4Cskf7oc0ZB3V8QHLPHKP8xewWsV/tvVVuoS6vVeCan+8aIakkI/a0+Bjzyripi7lyYiVEt6PqBUkE6eklQ/GR4B069gfDx+HXP/WNm1h1Tf27cPmpGH9V2Lr/ALLDHWnptuUe69BbReRmp+QiRv68SiKQe50bAByO0nMufUNdlx32pYIUe5jvJkDz13ao8delvDthi++aNPuS3bMpClTaqpumHP8A1ke0q4stwEjsVNyFL0Pw62w/GMaphVDjt3/b6toV37F/aDPgSWpTKvbhw5GdcaWFhubBwKihWEZwpJCX4c6O404nXVLiCD3HW29BFy1edD9YGMbc1hXEwOouN+Krv3pLCLD/AG6ecYr0puPKt7Di+srG1EepLmr5VwySWGEEguLREiuuqA7httSvAdTrAHW55CM/oGIt5/hAcywxyB8X1kv6aNvfQNAkKSfxHyIGpAI7DqJJ8ZxvjFEGJLGBugFtbyN+0eJQnsAe408R93WGM8CqfTn1ndXmtQo/5fv/AA66A6/d1i5ohPdjfbIZLhEHNx9+FnG69IUVLlIGuvht266nQfEpIHn1otF024Xsr95wS39g+6SMOBCf3FwXofoQYSSqSFgo08R56nRJA7dvPqTgNgF53ZNIeWpDyVdyD83dPj2+B69j2BpR2CDNJW8kAnTVRVqogHt4+WvXsewYqa0hoRqXUbiOw77jrpqPM9ex7ChJsGFLCkuJOmmu49tNCfidNo69j2F+iltfUoUpaQnd3P3HXXQ6DT7evY9g418+MpoIQ8k6hJHn3I76dzoNevY9hZhRDMXuSdxUNqQO+h11P3akf4dex7D4hVPpIHy6K27tR3OoSdPAd+vY9hEtK9bba1bST6a/AaKGu74+I069j2BRczFx/UbVuB2pT5gDXb8elfcx/wA2x9n90YctnBNkh9v944rc5onY67dc6vzQyrIExMHhYwXSPqfqXKKNMtzCQSSAuqhlL6wOzeidw3bVAGntY7TdIJgPmZEsPLyHK6rJ7PBhgaz3iXeunLqydhtUMu8G5UMQGD7bpg1D8QExqATkaNQlQVD+Wj6iriT2kkKLDfrHxIWlKQpJV5FKwR469CrNijaPwmlMMl6gZdf4hX78DmHbu6uJJV8qdQnUjslaCde3iAny6LGOpB78BC2dedMONu+UAtW4hSm2HB27nQIB7n/v9eWEZEcan9P6sYM4GR5jCmnJRq6krHZTTumumgdHdXlp/wAQdZpGPCeJoR9WNZIodJPHG8zlSe3z6qG0abgFd/sHiQepAiAPdjUa8K5YnX+2LyfBw/8AcO9rF1YutIjTM+nYokrVoj6zPsRyTBa1AIVpvcsMkaCf+0QOmPYgPP0niVYfWpA+s4W+oFra6hyIP1Ef04fn7pNO/wAWfuH+4qok7mouR31Hm9U6tKkplQ8uxaktZLzSVD5m27l2WwSNRvZV93UreIyziQD4kUj6gD9oOIeyZx6exj95/RiCszPI0VG5x8aBKlEJV3J8RofAfZoPPoRHCXNFGdcMRdYkLNl7eGOin9rf9qa0zw477lvdljr0LFR9LdcY8MXkZTT98kbJEDLOQ66QkKZpzol2HUOpCpXZyWkM6MOs9tbR2CAkA3J4D9nvP73dy555YUNw3JrpzHCaQ827e4d3afqxL79xv90+64duZfte9kuMT+X/AHONxFKyE4Xis/P4HE9ZE9L6ltzH6OHYm6yxDbiEiIptUavC0uS9SUR3Z0UOeqYF5iK6c8u9qfYPee8WFLLWoSEZV4VPYP1/V3TO9nHuBuPfD7eryk9wvt25J4izBykewrlrAOUeNc1xDFMri3dfJgT7HCrHLKiuYyHGryMHQ7HbddmVq1FmRqCxIkY3EXyzrLHkDyPEdx7RjBH1EpWpU8R9+Ob39wf2Lcift/5g5yFxlMyWx9vOT2C4dDlddOnoueOJtk7+VhuXWENxuT+nvKIbrrFagiUAGXz9QEqfUt422XO/29mQ/i0k1U+3jpPb7jnQm1+leo7e7iG1bqqM44agCGA5gEU1DmPeMqgVnTOes17LRyJnOpGvy5dfafb3/UdCelF7ndx4RPNT+u3664siHbenpKMbeD/UX9VMMu05kmy3H7G2urG2sFMpacn20+TYS1NtDa00ZEx154obT2AJ0A7DoDeWVzeS67gs79pJJy7+OGqxfb7ODy7ZUSIcgAB38P8ATi6H2ZZSjjr9mD9xrni6KY8XkRWXcZ0jj+rbcxu0xOi49rpMdxW1KyvKM8fjpA/8Zkjz06sPp6wNl05Okgp5gl49hjEa/wC0SPdikurblL7ryyihIIhMTGnaJGkP+yoOObTHuQvriwneStsggKUk669vHXXQa/Dx6Rb3bWtwzafCfp9OWLfsNyjmVYgfEPpng2PZA9Ox+TFXuWlxojTdoQCPAADQgH7+he2wVuw541+n07cZ7yR8m6GlCMRDcppr19JZYZcCFvq00HYaq0IJAUPlB106tuyTzI1J4imKPuT5MrcMziyb2i8QGZkddPmslxTSkfSpUkkKkOaIDhJHg2Fdvt6boUIjEcfwmmo14Acvfha3G6FQn+8Hwj945V91frxNGHIbmcgZ3JaOsdq9l10fTTaI9YUwWgkjQbQlnrsroKz+T6atIaUfyVJ9reL9OPyn9dN0XdfULdboGqfNsgPdH4B9gw/myT30I7f7dfHp0YCuKUJPHH1adf8Ab/p9nWagd+NJNOOCtwPnjfGHLGKZfJ2ivYcnVFotSQoNV15FMF6QQde0d0oWT/SkE+XQHq3a5d76eubC3r8zQOlObxnUB7xUd/DBjpzc4tq3y3vZgDbgsj1FRokUoxzyyrXD99znKkZvMZNFiLTbE5iOy5c3qNVJWZKPqIrFfHdLzLchEdxPqvp+VR02JSdT0qdI21xcbRHcXgKqagJzqDQknIgVrRePacGt/wCnNltOoXngVJQFFMvCxPiDMtSCyggeGitxKg5YqX91eU38bDfqW3pEsgrLzjrjjzigrUnVxZUo99fE9ulL1S25jtJa3Gn2Y6R9B91T+KfL3TcMgK8B2AcAO7hiEvtx93/LOIcmYJicXLLO+xiVyDiZOI5HZyZDta+q9hMCwxW7kuOSoEppp5Q+kkKehvoJQPSJB65b2m3uY9+tUddSvcxKTnlWRc6Aj6yD7MdN9Y9K7BvOyX14UEG4xWkzh0CjXpjZtLgijCorUaSDnXljqDw33dVF1BhLsw06HR6SkzWfSdbkoG2Qyl0guw5SVfiQrUa/YQerRvFvtslZrZiNOY4sPeOIr2qR344+uen2tiEuo9cLcCMg3bpb8J7QKMOeN3l7DOCfdrg7OH8vY1R59S17MhFIJc6LW57hnraqVMwjM2WmLau9P8RiqW7CWR+cyrx6kbf1TYtPTcEaJ5AAZVFQSOGqnE8vGAexjhgsOoeorC3Szd/mdsg+GFyPOjQf9GWCl0pxUMacSMUm51+0Tl/G2Yt5dw7zTx/Z8bzbGLAx57mSdL46ymnurBZEKgv7RusnYU/vcGyPZJnR40gj5ktK+Xp5hY+WJVQSRkGjR0YMOymR1dqkV7MFz1LtnUEbJZawqirGSiheRrVsgp4nkM8hi5T2i8I+7Hg32tMcaZvxu1K5Zk8nXkTBpMLIKXLMFbpMqlBddmtxkeNWFlARitW06p91z1A4vaGwnVXSveXFrNuL3Nu35Aj8dQVYFeK6SAQ3KhpnioOu+ib3ddyWJoZV2u5aEpOpDRuaeWyRspoS1atIDpSPUSa4MdX7V+LOK81lZxEpGs65stniLzmHKo5tsnkuR20RPpsJasg/GwPG3pgcXHYr0NPhoAuvL1OjLtEo+RX5gIQvi4DInxZ5ZlRlU1zxzh6ybxucm4/5T6Veaz2BJGjjt7d3jEyIBCpelGcTOGfM6QvILXBRmBKplbBQ4p6PTh9x18rK/q7V7V60nlZOpUXNG0KJ12p8dD0z2Hmvqupq+ZKR7QgyVf0nvxS97DbJeWHTm3kPtm1K9WX4ZbuTx3U4p+8BFGeSKQKA4jHzRgmI5zTzoWU1rrqnoMmB+p10yVU30aDKKluRGraC41Jcgr11VFf9aIs91Nk9+iMu3peLrBZJaZOjFWA7NQ4juNR3YkdGdV9U9G3qybOYZLFbjzjbXUS3FqZKkiQRPnDLThPbtDN++ccynuo9r/H/ABTePtVvN1nhFJmdyI1anNK8QuPHbkf+wmO5NlNSt2qo7WWTrDk2MBiI74eshfVC9ebRv+0xP8teSyWMreKNiU8XIHTRD+6TpqeGeWP11/lr9Z+jfU5/I6p2SLbup7GLUskIWZTGcnlh8wGYKG+NVLMgzaq+LEW7TAM14oloi5LjtjVyH2WpUWzCk2FbcQ3gFx7Gsu4zkiHZQJSPmadadW0pJ+U9UNJJI0pSeqzcaMCD7acx3jjj9Dtqsdk+VW523RNA3B1IZajlUZAjmpoR2Ymt7IMO5b9xfLlZx1h82XX0lfHN5muTraekR8VxmI603IkekClMqynvOojQo24eq+vcooZbedbkWls8kw8o5g8s8bN6v7KwszLc6QtMqmlT7ewc+7vpjpNzr3G+372H8f1eNgTE5NeQ3GKqno4beT8qZn6f5cmUtxaojUaO46lQMmY/BrGV7m2dqtG+n6ysZXQNNIkYH7R7O2n3Z/Viidz32G5nKWcM05Y0qgoO/TqIr7ftOArgv7u3EOOvsQhx1msrJ75QjW1hUy6K8YwmsW4A1Fk7pVfEnXK92+QzFfeS0RoVqV8pJna7u8poNYeRNRqPbTs7K0rgdHdWdiazALcHIqCDoHYSMtXaBWmJPZJwVwj7r8M/6pYLMrYWXWDb0iHmFVGNfLn2yUoffqs8qUtMvPyDuSlx2Q0JkZBQ42tbOjbuhLKa3n8uZTx4cz3jt+gxvut0ia2JjcBae4e3s7+zFQr7+Rf9QH8Gu+NLXIsrpL1eP0uKMWj8RFZkcaQqJ67EWJWWLmR2rrw0RJUlLSWjuigAh9VsWXptFd7at7f3KRWLoHIAr4KV8TalAHaornk2eWOZt09bRt3UD7dtu3S3e5RzGNNUmgCTMeBPLkZjXg5AqM0AFGxK7lb2gclYnwy/yJkdviFVIrXauxyDBYbFtayISJ86NWRUjK7q6tYdnZQ3p7frMMxI8UneW3HSlBXp2bZek5N2WwsrVnJqEkIWhoCx8AUEA0NCST2gZ0Obx1r6g2eyNvV7dw2+kqXhTW7LqYKB5sjsrMCwqFRF46SaCrG4Gs5FBaQ35L61sIcbQp15a1tt/N2aQBuSCfJCAPsHVgT7Hb20JjVVSoyAGZ9368V1L1Xfb6RcyyNIynxSOxoK8qnn2Kor2DF5/EPGFDy3Dgt5DHls1a4rspzapEeXMShCAhphQC3IjS1L1UofOUjQFJOoqre90m2l2FsRrBpTiB7e32cPbi1ulthg3uNWuw3kFa6jk57Qta6VPb8XLKuE7PcDpuNMmdoMej/T1cZuM/EY0T6iESGwsodWlKfWUlwEbj8xGmup1PWFpdTbpa/M3JrIag+7s7MMBsbLY7v5GzXTAKED29p5+3G7Xci0uP2+MYvNu26y3y2U5ArW97SX2EiK+79Sv1Qptj1XGksMKWlSVSXUDarwPy02O8vo5r2OMvZ241Mc6HMCgpx41anBQTXG/fOrdp2Vrfb7idY9zu2KxrUV4HxGvAVGlSRQuQKHEUPc3i8HCMgp4mP3c2YbyFNsLSvsZiZk2E+iS2BNcfSlt30LZTzhSlYJ3srIVtISm6egrmbdLSR7qNVETKqsoorCnADhVaCtMqEZVzPDXrutvsG4wx2Fw7vcRu8iO2plIYeImgNJKtQHmrUNKARNkn0m1b/mUrudfMny+zqy1UDhjmJZHnm8xs88NV/Vaj8oAJ18P93X0gH24N29YyDjPDjEuJBHn5D7Neh9wmRph+2O/RZVDEccTH4M4EVynRX1v+usVSKt8V0JpyCqYZViYqZJEhxMhhUWIhDrY3JS6olRIT8uiqu6p3/+DXUcHlly41E1pRa0yFDU5Hs9ueXVvpzsltv9hLd+aqiM6ACK1agOeYoMx28TllmBLSnjul5nVKXWlraWhRTqFtqUhYCvwkgjx7dG7eaaOhIOkjGi+tdsuY20FfNHL6f0YE93SJYWooToU9x4g+X3HTppsblnFD8OKd6m2+3gqYx4sNRUVKDtUgKOo7p+VYJ0PiBor+I/j0YEhC5Ej7v6PrxWb2SyTcKivHHtqueWoKaBGh1APyq1+zyPf4HrQ91nR8MFlsQZdSj6/pTD1o5k+C4hOq0lKknTUgj7fj306++Sky144F7n5m3n9mmJTYLnDkYMiS8n+kAqJ3j/ANLUE/x16Abhs6uCVGMdp6pdZAla/T6ccSwoMzjy47exxJ1CR3Pby8O+vSJe7a0TmoyxcW074lxGKtnTD+i2TT6Qd2hI8PHy/wDL0EliZDTDdb3CyiuNKwcBdGnhqP8ADQ/49ZRE0x6ZVLVPCuNiuc0c799R/wCUfxA61zZioxut1IOeN2Q4Qe+o89Qfh5daUFcbnpmcJElQKD38h4eZ16kKMxiK5qDhB41OuG0/ify3Ug/9x1afj/2ehNia2qnDLvsZG6ygdo+7D/3pSO/gBr1JoScDq6Vpyphv2ElDO7t3cV4dwe/j/LXqVGhalOQxAuJljFDxJwXeNMpfx6mnyUz69qveWG5sdcqImbu9QNpIZVpMSnft7pISdRrqNegG72SXdwkZRzMMwaGnCvH4e3Df0tvM+12U04liWzfJgWXXxA4fHStOBpwrUHBPj8o19ratV1UxcT7IV0t2MzjUMWlpvbDRWtusHp/qDXpkhbJejpWkkKc26pUFl2SaC3M1wY0g1gEytpWmdPF+E14GjEEZCuYb7Xq+0vL5bSyWeS98piot18yTKlSI8tYpxXWgINC1KqZH8avMycVfTLYmR3BLlIs4l3Q1+MzWiuNH3InUMKVLjQG1RinRJUApvRWm0jVH3pWS/BjKkaRpKSNKDmfhdgC2f25ccXJ0mUfZisyur628xZYUgYVUZPCjMqDTTKua50pTEGsW5CxngGPkUDhjiyrFHY5DFmZXKfvMhnLZsZdjCo2WmZ7Ssoitwq1c0IZjypNe6fnUlju4sWPe7Te9UvFL1Dev8ysREQCRiqhS5JX8s1alSyq44AtwGKR2rqLavT+O4t+jNriFhJcK07GWZiHZ1iADDz1CpqoqO8TcSEzYgoe27HsJmcuct8nQuKY+E5tkxeh3eTN3FjMReOv3Cp9qzDrJM+TCrjbsxay4kei22XBMZUr5t4SF6vudxi2Kx2iS+Nzt8NCkehRoATSpLBQW0kyRLUmmhgMqYavTez2W46t3bqSDalst5uarJMJHYSkya3CozFU8wCG4bSASJEJzqAyPdVWPTZ2bPRKvkOfTOY9Nj5IrC8IoW66QwzSx3ptfb8phdvdVFY8mIgToLlXOhvpGx+OvVKgS6JkVI7ZZHtEuPNUx+dM+oEuQGS28KM2fgcSI44q4zGAfqrC8s968UW4yWXy7CY21tEEIEYLJJffmSRodI82MwSxsPC8ZyIoGx6lVElj6v9UZr32VxbJFS8mPMlw1aLVE3uhTCmnXm0bg4laBprtJAHXUtxJrjJTQZQarqFQD25Z5CvCh78cAWcYhk/OMogYUcIdLMp/DU5UJArUEc6GmHTcQKtVop3GK6wrqj6SqQ1EtZLU2ciY1VQmrZb0tlphqQmRaofcQpLTI9NadG2/wJj27zrDpvHR7jU1SoKrQsdNAakUTSDmcwczxO/cIrSW7L7ZHJFZlEosjBm1BFElWAAOqTWQQqihHhX4RoOwppTtCT4HwB+3wHUgSIDXAl7JjyzxJvJ+XuM+UcU44j8u8dZfYZrxfiddg8C7wvMqqhrswxekWtdJX5NHtcZvZdc/DDi0rkQ1KcdLrq+xUgNJNn09vOy314+w3dum3Xs7TMk0LO0Uj/G0ZWRAwNAQr5CijkSbf3HrLpbqjadsi6v228k3zarNLVJba5SJLiCKvlJOJIJWQrUgvGSW1McqqFYWI8n1cXmO25Zz3Fpt2mcL76XG8XuYuOwoKLeml47Er25NjT5A5+l0dLLDUZG31D6DZW4obgonf7JO/T0exbXOsWnRWSRDIW0uJCxCvH4ncVY8MzQDKgTaerLSLrKXq3qC0ecP5umGCRYVXzI2hVAzxzHRFG2lBTV4V1Mc6rvD/ALgLzh+u5RwqphTbnjLknGsnpv7ZubFh6bS21rSzKqnyiLNZgMQxbQg8hEv047KJjKQCErbZUiNv/Slt1BNY7lcMse82c0b+YikB1VwzxkFidLUJWrEqTzBYGd0f6i3nR1ruuyWiSTdMbnazx+TI4LRySRtHHOrBQvmLUCTSiiRRQgFUK7nEvKGI4TxByxxfbYlklvM5eTjDdve1+T1laxVt4day7aiFdWyMasVrUuVNUJPqPq9RHZHpnv1r33Y7/ct/sN6guIY49v8AMKI0bMW85Qr6mEi8gNNBkeNeGN3SPVmz7J0fu/S13Z3M0u8eR5kqzogQW0jSRaEMDk+Jjrqx1DJdJzw4fbln7/D+d1+XKhpuKxVfaUmQUa1paZvKO3iLjyq99a23UFsSA08ApJSXGk6gjXof1jtY37bWsdXlzaldH5o6moYcOVRkeBOGr0u33/Ku9JubL5luY3jljyAkjdSChqDz0tw4qK41r+zbyLIJ2Q5GX5BtreRZ26K1xiHIdE2UuRLbgOPx5bEQgOFLW5paGwANpA0612cTWlstraUGiMKuqpAoKAtQgnvzBPbgzu1xBf3T3t/VvNlLvoIVjqNWCkhgOwVBAyypgqcj8z8TZ/neL5re8c5yUY5QUOOP0EXkCnZrb2rx8OtsxrN04QqYGLKK6WZiGlp9VokJ2Ek9D9l6e37a9sn261vLYGaV5A5gcsjSUqV/OpVSKoSDQ8a419V9adI77v8Abbxf7dfEW0EUJiW7jVJUiqArn5YtR1OmQAiq1pprXA5xHl3Esd4j5V41n4lkL87lOyobOfdUuR1dRCo38VvF3tC1TVLmNzFtx0SCEPpU+SpsaNlogHo1f9Pbhd79Y7zFPEIrFHVUeNmLiVNDl28wZ0zWi8eOrCfs/Wezbb0hu3TNzaXDT7tJE7yRzJGsRt5TLEI4/Jai1yYF8xkujI43Mu5/xzPbXjTN8645TecjYZHhUGaWgumGaPlbFI8V+A83kFc5TvyKzJHK+QtsTmXXdFLKtmiGW2tdh0ne7XBebbtl55W0XBMkK6CXtZSQwKNrAaPUK6CBwpXNi2/ePUTa+oLva9933bRP1HZKsVzJ5oEV/AAUImQxkpMUJXzVZuNdNAioyZXKmM45xXyHxDx3V5ijHeUL/Gba9czK7rrE0sDFrA2sKsx6HV1sWL+pTpqGfrLJzYp9mK039OnTcJ6bFe3m+Wm/7u9ubyyikVPKRl1tIuks5ZidIFdEYrpLMdR4YFP1fte3dKbj0h05FeDbd0uIZJTcSo/lpA+tUiVEVdbNp8yY01Kir5Y443eWOVMC5KxThvFYeG5fSM8S08fGEzHstqLB+5oVSGZM90IbxaCmBduOtEsugrZb3aFpWmvWvYtj3TZr7cL6S4t5Wv5DJQROoR6EAf4rakpxBoTTiMberurunuptq2baorK7gXaIRCGNxG5ki1BmP+AuiWo8LCqiuanBazb3HYDnttidvM4rySxZxPDqLDWMavM3opuP28fHhITBn3DqeP1XqpEhMjbKRFlxQ8lI2ltW4qU4fTeVIZoZ7mDzJrh5fNSF1kXXSqKPmPL0jMrqRqVPEUpZcnr8Ybm2udutL0QW1nHB5El1E8UhiBCySMbMza2BHmeXIgbSOBqS7Mf9wGGZTnd1yVyVhd8nPLCon0lNd4tcMx4uJR7Clex8ycfZehSJtdJgVT624ymlJU2VqdJU9o4FzcPSW6gAG0XELRggnzF0u1DUBiDpYV41qO4DDjtn8zuzXURXqexuoZqEAwlZI0LChZOEiGnCgB7654IdrlPC/J/DGQ8RciW2V5JFubmBkVbcX1nOcsseu6ppLMGZCkWFTakqcjlbEgkn1I7q0japW7ocvRXVG37nHuFvBDVBQhNFHB41AcCvZQDOla41XnrF6V7309c7BuF/d6J3DoZxLqhccCrNE5pyIJbwkgca4BlzjnGTHt/xzgZWLZXZU2M5xJz85Lj+cUcFU+8nVj9XYRxGsMCt2YtO+w/q2FtuPtrQklavmST6dL3027vujyxRSSRaBG8T1UA1Bqsq1Yc6EA9mKkvvUfpyHo6PpOC2ur2zt7wztcwXUNHdlKsjRyWkmmMggrqVmBAOo5jECvd5zxyxR4NimCYVUY5hfFfH9ZNg4ti/6bGyp0SLN9yZb3+QW1zGSLrIbeY4pbrqI0WOgHRphsFe4fufScdikt3OxuLmUguzAAZZKAuelVHAEk9rHDL0p6mXe8G02vbIY9v2mzVlgiRmLjW2p3eTw65HYksQiKOCoorjnh5Q5W5Bu8hh2FtlFk7KpLSNb1TcMR6eurLStkolwJ0SspGIEJEiPJZStK1IUoaePSA6xW8h8pEQHjRQDThxArjqLYru9uFS5upJZnUgjWzMK8eBJH1DFuHEPO1N7gOULLO8oqYiMkyStx68fyCXHjJuGLqrrY7N5Qono1cOPXrscvtNEj89JUpO5fTDbQvaWwlic/NygGT/AKvgidvh4nt92Bm53MNjA1ntSiKzjZlhUZDW5JkPdUk6eynHPHQXwRc1NzTwHm1NFlbDSkJClNoaTtB2LB2arSPxeIHl0g9SI67oxcUJVT7cqV99MPXQqeX0+kRbXIsj6j3k6qe7UMS5pbmjejhdS29cthxSFriLCIaXGiUqLM1erTgSRodPAjoDhxwpyri09MKbarK1oAhKnXnZjoB8AQ2hLWoB+B69j2Ik+6n3S8Se2rjj/qVy2/jd67jdi4/hdeiujSr2RlRirQ2zQiQtSY9m8z8i3kbA02SVkDTr2PYB2Ee7JhrjTHuW+UK3CsbzvPoBy6LS1kKvalYpSWu40VZKvWYzeQ3F7+m7VSpCnm0ErCEJ2DU+x7ER+Tf3B2FuSq3GUfUFwubkxkBltS1E6qcDSiogk91KX9469j2IBcp+6TkKwiSFqu/0cye7bEYOOOMpX2EmQEncptsEbUjaFK076d+tkKs8qonxlgB7ScsabieO2ge5mOmKNCzHsCgkn3AYrcz22sZf6hIZMp6XPLpn3U9RetJ/qErWlTydBGiFR1SwztbT56nv0xvtE7t8xckyTfWB9f6fdhTj9SbCH/lbM0jPE1zb2kcB3DLvxD27hzFylJWXFOFQ0OnYHXUbQOoU1gMy4IP0+hwWi6k+ZXWma/d7PpXFvP7QPKvJfDXPtJVY/YT52C57YM13IGIuPrVRSI3prP8Ac3oKIjQrqkSnemUkJUWtUrJGg6Zdm2Q3UD6BRkUkHv5D38MJfUvVC2c8ccp1RSOAwqB4ebV5aeJ5UxdP7vuY0cu5rBYpCp7EcJjS63H3lApTbWEl3SyvfTV+GKQ0G44I+dOqj201vvojp6XYdrY3WV9cEM4/ZUDwp7c6t2cMcderPW9v1j1BFFtratlsFZITykdj+ZLT9nLTH2ip4UxXzkcZ5QcKgpRJUpR1O5WviT9p6S/VTpOTcEXqDbULXMKaZUAzaMfC4HMpUg8ytP2M7O9AfUKHZ5G6Q3qQJY3Emu3kY5JK1A0bE8FkoCpyAkqOMmQIvq95RdQRrvSpJB3dwQU9xp5g9c847Hwge5krmcScXXquzy8VFY+snUqforKVEIWPE6RX0Aj4Dr6poa4+MKimJ1ftB/vCYl7asWb9s3uVNtG4oiW0+bxtyRWwJl0cDFzOesLTGMlq4Dcm1kYuuzkOyYkqK1IehuPracbMf01xyBEd2gBIWZRSp4Edh7KcjiGC1sxqKxHP2Y6UJv7k3sPhY0cqPuq4Xm14jiT9BU5rU3OTFBSpYbGIVT0zKfqSEn8r6P1de23XrULC4JoQoHbqWn342G7gArU+yhrjmN/dG/cbf97d3Q8ccU11vR8CYHbuXrEy6YVXXPImVNsPwY1/MrlKLlXQ1UOQ8mBFdIkOF9b0hCFhtpncxjtoTEh1O3xH9A7vvxqGu4kDsKRjgP04rOx2+n1QSkK0CdoA+3XT5Tp46nocTU1xNGQwRP71tXmdiXgCQPEqPl4alSQNfPXr5j7hrzsgtCNzj5Son5gNO4791d9QCPMd+vjKWWmCthEY2+YbiOH68ea7KrFpwf8AMkJ1G7uBp3+Gu3sOviroXT2Yj7g/mXbOedPuGCC/lFm6wUIfJ3JSAU6Hx/pA/p6yxCwLbdy0krdPqOOLUT8pGoT8Tp4bj8evY9htNwbWO8n/AIm3UHQA9tSk+IABJA69j2HpX/qyShQL4AIJV83cdwdQNT269j2HbWmfIcCSVnyOmp1HgPl76Ea/4dex7BQo6ye6R+Ld8dT+E+OvfXUDr2PYMNBXStUpUpWp0G3Ug6DUk/ADr2PYOuPQy2yhW0nQAkk+Px0+J69j2H5GfYSkhae4HfU6+YSBqPM/z69j2NW3bZcjOKSdAEAeWumgOvfz0169j2IyZqfTXICVAKKkoHltQfFXftp2/l0t7mALhmNaCn3DDhsxJtUQcan+8cVT8wT6ROYcyKmqg/qzjmNxsdL5QZu44lCkW6a5JQ44ndWxFF5SQgekNCsbghxdjWBrHczKAbkrYhK8f+11an9kZ93POhZrsXy7306bQTfIKd2aYrURj/1cVjMuYGchHlg6vGahci6aLiUPxGo73difGaUgnsluX9Ont4dhIR/iPt6gyQ+VJVRkMx38MFoLhZojXMnI+36fbgMWcBUGY62rXQ+qgdyABtWR4Eak+XRePxRhsq5YESUWQpw44STKKUo7k6x3EkkkaqQpZ01On+QaadbdBFe5h9tMfciKcqY8iaR4LPzx/EHcB6ZJA11/95afb1905GvJvv8A9OMaAmg5j7v9GPTU0ApJV8e+umvnr200Gnl1uC88fNJB76YcNFll3hmRYzm+Ny1Qcjw7IKbKqGahSguHc49ZRbeplJ2lKgqNOhtrGhBGnj0RsJfIlVxkwII9uBW4xJPC6NwoR9mOib96XGKv3Ie3j2p/uc8UQ/rMcyfBKLA+VEQymW5jqLSS/PxwWrzSUIaXjGayrainuL7/AFr8ZoadM95As8NV4LmP6jZj/VNQe84UNrnNrcGGXKp/2hkfrFD7BXBp/Zi/ajxjKMewf3qe4teP5pGuGY+S8K8bRZ0C+x2sZbeJhZpmzkZ2TAn5Ay+zrEqlKWisWjfKBlgMxNVvALJFfjcMAR+6DwP9b7v63DLctwa7kMKZQqaHlUjiPZ9/s4lv92b95SHxgvJPa17R8hjTeUEJlUfJnLVQ61JruM+yo07F8Okt7487P0d25UpBUzSnVtO6cFCHlPMlkuuTO5PI/h7z2t3cueeWPm3bc182psrcf7X9H3+zFEH7ZXuq4+9o3u7rOdeZX8on40MTzeruZdNBVkWQzrfJWmFsyH2pMtl2S5JmtFbzqnN5UdSSSeo1neK7OLhiC6nOlc6g5+2mJO42b6AsCghW4dwqMdIsn/24v9iDGoZxnn+WAQPyMCx9A766/wDsTmkf8OnfqVS24+b/ALJwKMF0P939oxB336/v1e2r3Ae1/lTg3iTijlmZlfJ1A5izdpyLSYlS4zj0Oa60ZV4ldZluRWUq5rkN74TaYyECSErU6gIAV8d7eONwrFyyEUpQZimZqeHLLjiRZQzi6jkaiaHDdpyzpw58Dnwxyyv5DLDQSXFIKUhOm4j8Oo1JHckdK0tvGSTTM4tG23KQIAGJOGqqwyTJrmkw/Fq+be5PlN1WY3jtHXtqk2Nxd3k1isqayAwnVb06wnyW2m0jupxYHn163sBLIFVfETQYk3G8yW0DSM1EC1J5DmcdFv7yVzVexL9tX2Yftn0VpEe5ByiPD5M5l/T3AtL7NDJl396++j5XPoMn5fyBb9cte4+hSLQfwg9NFxEsVmLeP/DyUdhC+Jj73NR7Diuttu5breJd1lJ15kdoLDSB7kFPfjl+wuVPXZspRv2KWjVW7anuQdvnrofh0s7lZCW2IAzw+7Du5jvQJGOitcWA4XTmwrWkvK3ko2nVICUp03AnXuR5d/PpR23bjHc0YZ1y7MOO/wC8BrfTHzWmF6Fxwy/dNKbjAguJ12tAa6q7kkA6gg9/j1ZVrbsgC8Mhio57nWTLIQIwe3iezFqXtnwxijbdtnGglurgv2DiiPlCYMZ2eseQCQmN0z2kesx26DN3A7zqIFftwobhdBJmunP5USmQ17EUuf7uGFxhWSpyZ094FTs+ZLmuE66qXIkuukn4/i67Y2kLb2qRLkFQD6gBj8kOrZpL3cZrqQ1eWV3PtZif04NCKN//ACf6+ivmrTvrhN8qStQMfl0jw/oP3aHX/UesxMpFMa2gevDGFVO6AQUE66pIKddQR3BBHgR/h1mJhlTLGpoiDQjGS2rHb+JFZsFbLOujoh19s5u1dhNdmK61XoVONxh8rEnuptHyL1SAoDpbYRO01oPC7EsnaTxZOQJ5rwJzGfFgsd0DRLabkaaBpSXPwgcEk5lRyfMrwaozEeuRcDbu66Zj17EUytbSkgOoBA9RHyOpPdDjTnihaSUqHgdD0H3Swt93smgehU5ezuPYe0csO3T28XnTu6x3cVRmDxyI5EEcQeRFQcVV3ntUv8U5fwDJqtlxdbB5BxGweeb3AtRWMirn3nHSNdiEtoJJPYAdc7X/AEjJs3UVrcU/5UXcRPcBIpx2ftPWsfVPSF9bxn/nmsJ1p2kxOPvxZC09k2LLXKZbMqLK2KmR3E+rHkhPZOqf86UeChooeOvTHvu32t9UodMw+EjIj9Y9v1YQdjnvLKPyLqITWTjxJIKq3f2gjkRQjlhx1eZX0lbcjFLKRGmtr3OUcx9RVqO6vo3CQHU/BH4x4BKuqp3O2ktJaXikCuUqD+8Bw+0ezB2+6JtN5tWuOnnrLSptnNHB/wDBOaB+4GjchqxIWn5Gn8g8fZvxfnzEhNNlmL2tVatvtuuRoy0xlPR7kx0HX16WWwiSlbW15Bb+XRXRzp68ubK4RgdcJIoQaVrwryzPZ7cUNvm2XW13hlTzIrmB9ZShBOn4gVPOlRnXvwR/24OZOVofH2W4jmH6tnODYDkTWK4lybEkyX6DKZLUYylVlZaufSypNqiCQ76CwlWg+bavsbd3AQ3NvDK5SO9YDWGA1KrHSrnIjSWGkkVByNMVtvW7taXsc1kCdounZ1hYAqlwi1kSmoeIofMGYoaiuLM4ecUty0ZkOdNClEoWp+SFS0qIKFNvRZiFvNKGpSU6j7yOo4sLhD5cqxsvs8OXeKfTswuXe47bfrWVGkc18ZHjzHeDpyyoCMsq0wh5LlMmJFVMgVdRbiCyV/pkGDCrbaW22klwRHUJQzIlhsEhpwqQ8ew0URqTisXePTBI0czHMszMv6x7eWELc9pkeVBBaWi2qUCtHaxxyCnMlApeo+IOXB40xGfLOUMXy6j/AFSheTJiym3CShlDUmO42pTMmPJZQdWJER1JQ40oghQIIHU6KC4sBqkakwzoWqD2UPAg9orgKnT8257o9rBG3lB9LFV0MDlUMuWk9xy4EUBxVdmGAo5JvrPFsgxONkWN5rZM1E6RbRypitq2X3XUy4URwbGrByWpKUOLOrbQJA1OvSP6hNZ30UbuQYmiZJI+TFhSpI/Z+JSM688dh+knRl7HCY7Ay218ANF0vhkhIB+AjmxoJORSo454iVW+0/lYQr1ubnMLjbCsFs3sUyLJ8ydlScZfnx3n5NfQ0mOw6y3scqyL9EQl1EOtiKcaY0W84y2tKzSF5EkxjgkUT+VGEUuis2la0qSMvaPvx2j6f7RddG7J8tbzvDPK5knaOSRUkmf43A1VOoiueY9mLs/21eIaji/hnLbbE8wRnac4zSSuxyZjC7HAQ7DxuDHr4dQKedPsJcxivnvznkSFqb3/AFWz0kFs7o4tDE2mJEiqBkqgD34dHv8A5pfOune4YE0MjM1O4aiae7FVHuF4A9yXK/Ieb8+SXqHlDj7I7uSIGWcQ5FHy/HMexuMsN0uOWjUIR8io26KqUy1ITNhxmS+XFubnFrJb9q2e081UuiwueyTL/Vr4fZTPCTu3U24CEmxRWtCaFoc/9Yir8OINF7sYePeMKvGI7MqXFQ/YpQksRGmwqO2odip3TanXXy7I+3tp1aO1dLXd9Tyl0WvN2H90cW+meKU6j9Sts6fLG4k87c+UMZBoeXmNwX2V9xxZl7E8tyjHeb4GOPOvnHM9gTKq0q2gtyMiZWwJlrS2qkpA3Pwnoy2AobWm2ZTmiR49H+o+kdtsunnuLcVvISrazxIJCstOAGdaDMkCpOK16T9TN83rrOO0vXpt90HXylzVSFLox5swK6dRyAY0UYuMp+D8HgclXvLkehbdzbIK+vrJFvJS0tuFHgREwFLrtzSBElT4bTTUh3cpxxplKAUp3hVby7veNtke0u9LKNiwUcSSa59oBqQOFTXM0pcdv05YDfJuoYotW5zIqlzSihRpquXhLCgY1JIAAoKgtfkXA4XOeYw+HHVvycUxRFfmnI8ivfQ3Bly1OOsY1gsiwYlplwpcne5Yyh6OoYYaCHEOLBTu2/df4BZNvKUW8lJjhDDxAUq8oUihHBFNeJORAxt3Tpl+qN0TpyYM+3wBZ7gqw8smpEcDOGDBuMjinwhaNU4daYftcZtIXCwocRjT6+c1Hi1tHQqrILeQR1apZcvYzSHpd2HBsJdeKVuD0lbl/J0La46qnjbd9c3lMubM1SV7dJNAvsGQzyGeG9bPou1dNhdLYyIwCxomlFccqgZt7TmcjU4k7RGsxzNMUi09YxWRrCkyNMhLCfRihFX+kGMSjUNoc0lqBV4qA7+HS9MGuLKUzOWkV0pzOeqvfyGGKJhbblCIECQNFJqPADTop3Dic8D3mHmzgbBcrYgZa5VTMquo8YuvSqt+2i1sRSvpIsia8GZDUNoqbOgRorRJWoBOiiwdP9I9TbvYmezV1sIyctQVmPEgAkV48+2gzywidX+qvQnS26rYXsqPvEqgk6GdI1J0qzsAQMwaAZ0zOWeIc8y8FY3K5Q48yv1pT+M51ltdj2SVxnyHEokyI0idX/pMouuPxKuzjwnW1NtrShhWhaI9QBFk9NdQXEGyXdgiqt3a27SRkKBkCA2sUALKWBqc2HxcM6I9Qul7e/6p27eJ5JJNu3G9SGdS5OZUsnltUlY3CsCqkBDQrTVlr557f8btOZsVxLE0P0FPZYxIyLKQ3LkznI8CrsUwHJEJdm/LdEqet9mOncpTbaz6mwgKSSOz9X3tt03PuF/SW5ScRxZBQWZdQDaQBRaFuAJGVeBCL1f6WbVuvXtlsm0Brfb5rRp7jxM5VI30Fk8wsdTkqgqSqnxaTQgvu5le2Lj+9Y43tcMr3phREjz7GRjDl41XOTGmXYv6tfykPTkPPtPocKmStLQWCooHgJgHXG72p3mG5cR5lVEmgsASDpjFFoCCM6VplXDRcW/pB0tfL0rc7dC01FDyNAZQhYAr5kzVcEghqpUKDnp5NOV7csIxznbCgKwWGD5VGyWQ3j00vSolfd0tb9YmI+46SuTWPNvesy24tZ3srSsqRoDPj6x3O96Wuqvo3SAxgyCgLI7UqKcGFKMQBkQRQ1oKuPS3Ydq9Q9vAi8zp68WdhA1WVJYo9QUk5tGQdSBic1ZWqtASPmuOe2jjFF01dYtQKub+G5KaoIsJU6z9JbCo7IqGlhxjGWXnmFFDqVRkeruIUdoAFbXcda7+0RtZ5floWoZCdK1rU6zxlIBzHiNKZZ4N9TL6T9CpcDcra3+fuYywhVS8lKaR5YzFuCQaNWMaqkE0oCbwS/xxMxAf9Nq1dVEjqiM3UWQy8mcm3+iZUv6+Y6kIsZPp6bnmyUK8to+UA+rLXeoNx/8AXTh5GqUII06NR+ED4RXkc/bxw5emHUXSu4bEG6URooYyqyqwbWJNAJDscnanFlJB5U4ABHj/AIk5h5HrYuDV66/HsZgT52dv18SVUR7V96VGZpKiK0/6brMlbzUlUh9LSNzCdqV79qksT3W+9O7O8m6kNdzuqwBiHKgAl3JGRFNIUVOZqRStQlhddLdbdSR2/TWpNttY2a6ZVaNXJZRFGoYAhiQ5dgo8IoDqoQ8bmu9sVrkJ4jl49jzVwp1NWlcSnfhratFaBuuRlEZtqSLMrIToXylTv5SlFzVHQu2HWkFp/H45JTbU15sDVP2vLNRp92Q8QFM8Me4X/ppebh/lK4jg+dLCPJCtJOSecADrrl8VC3hJLeHGlknAvF0Xl7iZmJhNNGr5lVm67Gsbjf8AsrsXaKDTuVDs+CoqjSnYzlg6pSnEkvHb6hUEgdSbLqHepOntwke4kaVHhCtXxKJGcMAeIBCjhwzpSuBW59K9LwdZ7PBFaQrDLFdFkp4HMKRlCy8GILsTUeLLUTQYjjy9xZitXzkKWtrI1VQ2MvGXJNdXtpjRY36gIrdgmG00AiO24klwJRolClnaANAGzYNyvJ+mvnJnL3KLJRmzJ0101rx7M8zTPC91BY7XbdZHa7aNYrJ2hqq5KNYUMFAyAPGgyFcqDB+5u9v+JXeUcVY7ilHW4q7dTriHaS6mE1GT+h08GNPkvvNNNhp+cwyhaWnF/M464lK1EaaL/SfVt9Z2N/eX0jzrEiFQxr43YqBXiFJoSBwAJA41K+qHp7tm87jtO17ZFHavcSSLIyKB+VGiuxIGRcAEKTmWIDEilHLkEf2zcSO1mE5DR0TMyfFYUp6VTvXFixHdWWG59rchl6XC9d1ClapcSUgFQSlGnWi0m646gV91s5pTEjHIOEUkZ6VSoDUFOIPYSTgVuNr6R9Fzw9Pbla2wuJEFWaIyyBSaB5JaFkqQTkQRxAC0w6MuwfBMaqOP4tNSsRWZWf4fRuy2H31Sp9bd2BZmNSpxdU9LTIbVqlalFSNBsKR26gbfvO731zeSXMpZltJnAIGlWRaqQtKCh7Bnzrhh3rpTpnarHbIbC2REbcraIspJZ45XowZ61ao4Emoy0kDLBstnOJMGWxc38OnrVSWVw4kRbC5CJXo/M4qLUNIeQ6+kOpC3vT+UFO5Q17q0B6g3QG2tHkfSak1pSva+WXGgr20GWLAvI+iOnWXcNyit4i66FXTUNTM6YhUE5iracsqkVxFm7yqnn3U96mQuPVOy33IMd1afVZirWS0hSQtzboD2TuVtHbU6al4t7K5jtkW5IacKNRHAnn9OeKfv91sLi+kksAUtGkYop4hScuZ+qppwqeOFWonpdcB3eOmmhHl1omj0ilMS7OcOeIOHK+Q4AQRr5jrRH4TTliZIOfLCNK3bSQfAHt9nj1uXsxDetCRyxo8ePMMYbUglG3SRoe3bV9Z/1HoHt4Y2y8focOu/PGu5SE8MvuwtyrqM2vZuSdBqfh20PgAeiqQORXCxPfRqdOWB9bZClbrzoUAhncEeXdP4j30Go6KQWhAC8zhavNyGstXIcMMaTkdOqonvPLtP7iMsNVaYkhpEFEQeip1+zQ/FWpaikqSylhzVStxXsCU+oUWyuRcIq6Pk9PiqM650C0PvOoeytTQHPuO3PYSPN5v8SL0j0sAmnIkyAqc+IUIak1LaQBqYMPI5MS8rrOdFZu4cKW09KqrAyTCnxwr82LJTDlQpJacQe+x5tWv9Q6KzWYltXgiYxSMpAZaVU8iKgj6wfYcL9puHy24RXc0aTxI4LRvq0uOanSytQjsYHvGLCuOfcfheGqp8Zjy6KY1lEqop1UuJw76BQ4w1ePpYXYWFvlwftZdlAMtJfYS/JhsNtOBt5JcShFRbx0buW4eZeusqtArvrlKM8hQV0qkVFCtTwtRXYkalyJPT/THqlsOx+RtUT27pdvHH5VusyQwCU01vJcVkZ01DWgeSJVVgjgsFXQy3BX1SnK308gn2n6hY0tcxFtlTnIzuQl+vTIbrhx/ALLLbatkKU7PWzBecjuF5W1tR2bfuUYQTVhWHQrsSumojo1C3ntU83UIC4DjSKkY1bzsk5lNqBcyXfmPEgWQsVM2pKhPk0oAMopGlKxMYmLminBLRd0PttqMiz9+Pdzolm5RwITd1Nx9x+ylXlxb2SpTMnGxIY2MwbN6c4yWESfSA0aSlaAA5trrrK4h2lDEsiB2bQJAFCIi0Ikoc2UIDqK1/ESDhmG4WHpdZXXUkq3EkEpiRBK0JLtLJI+oNBqWgR2lKlBJp/AAVAhF7geUMPzWoefq1YpldrkRcYReOVeU1eb42xHkx5L7VvG+uh4aH5bb646XYjVh9Qj1HXJCnSFCyuktjv9uuAs/n28EOejVE0MhIIGg0M1AQGo5TSdKqgXLFD+o3WOy75ZGS0NneXVySPNKTx3UAUhiJF1LbVapQNGs2sanaQuQcRfwLHbmbk9fHxl5yHcJRMfYsm2ZDqatlmDIMqykux48hcCHDjbluytv/ACqR6uqduoed1u7WKxZ70Brc0BUkDUSwooBI1EnIL+L4c64qbp+z3C73aOPaiUvRqIcAkRgIdTsVDFFValpKfljx1FK4Ss+m2VtlVxOv72HktsZCI0y8gr9SHYGCw1Cbeiu/TQ/VZ9GOkJUG0hQGo7HXrdtUUFvYxxWkTQwUqEbiuok0Iq2dTwqcaN/ubu93ae43G4S6u9VGlTNX0AKCp0rUUAodIqMMpsQvXaTJccZjKdQmQ6w03IfajlYDzjEZx+K2+6hskpQp1sKIAKk66gkdZQlAC9MgTQE8qmhoK86GnYcANUQkUSllj1CpA1EDmQpKgkDMAsoPCo44kJyPwZxhx7bYHVX/ADFdRV8iYPjufVFovi9o0lXR5O7MZrFZBIjcgy7WK8hUFwviLCm+mkAjdr2T9o6n3rd4Lqe12+NhZ3UkDL8ydbPGAW8sGAKR4hp1Ole7FodRdB9LdN3W3Wm471Orbnt8N3HIbAeUkU5YJ5xW8aRSNJ1+XHLpGY1YyYv7S7efzx/0AzTLGsWyOxq3brFr+no05XjWSVLVZOuU2TEpy8x2VGhSYNe4GlBl1RfQttaW9u44X3XlvF0v/mrbbcz2iOElR38qSNiwTSRokBIZhXMZEEE1pjbtXpBe3HX/AP8ADvfLwWm5SxGWCWOLz4JoxG0gcMZYWClUbSQrHUCrBaVwCL3GePWcP/unE88vLec1k8KgexvJsIg4nPeiy6y0sXrupk1+b5dGs4da5AZYloHprjrmsFegdb3s9te7s24/I39rFHGYWcSRzGVQQyqEYNDEVLamKnMMEanwmiJe7T08mzfxXaNwnnnF0sRhntVt3KsjuZYyl1cK6oUVZBkUMsdcmWppwvivFMc4TPPnJLVjbQLrJXMQ46wmtm/pSMhtIzctdjdX9shl+VGx6t+gfbDUUNyH32tpdaSpKiubjvl9edSf5V2YpHLHD5txMy6vLU00pGlQDI2pTVqqFNdLEEYftj6S2nbeh/8AP/Uokmt57n5eztkbyxK6hi8sslCywpoYaUo7OtNaAgnUTmHFVzhGWsrwSuwfOK9ipnYhPx28zGbU3W6+rIVvTWNZktzkqmZqaaW9LafS+w0RGWnaFltKs22ze4NygYXT3O2uWWVZEhDp4GKOrRpHUawEK6WPiBrSpHoeoOk59lu0exjsN7jWNrdoZbho5fzUWSN0mkno3ls0iuGVfARSpUEw2VbxRT+3/jrlmXx5IsbbJMyt8TuoSczvYURTFM1KcVOr1JEhUaXMSyg6LS622oq0QRoAvQHfLjqu82GO7CQQ26SofJQmrkeFuFQKnhQkUz54db6bpaz9PNu6tlsGkubq8kt5FFzIq0jDHUhFdLNQcQwBrkRlgZ+6DjbGOKc/qqTE59k9U5DhePZiiru3WH7vG3b0zd9BaOxmo6FSYyIqXU7m0LDTyArcRvWe6H3e83zapLm+RBPDcyQ6kBCSBKeNQScjWnEioNKcAjerHTu29KdRQ2O0yStaXNjDc6JSDLAZdX5UhUDxKFDCoB0sta/EXJx/wNR5fwHyDlqn3V8mwKw5/h9Klx5KpHG+LWj1Jllp9MlXpSG51gZTaCpC3Eu1iUoKQ6vWLu3VFzt/VVpYAD+Cu/kSvQZXMqh4lry0rpJzApISa6RiX070DY7z6e7lvBY/5rji+ct4qnxWVvIYriTSMiGfzFFQSGgAWgc1Hft+o8AyXkimxfkbHJNzj984+w9Lg28+qn0wi18+aqZFRC3NzQtTKQttxJJSPlKT4mOrJ92s9mkvtnmEd3EAQGRWV6soodXDjkQfaMK/pza9O7p1RBtPU1s0223BILJI8bxaUdtS6cm4CqsOHAjBIyz2/wBFxhythv18EcjcL8jz6xWF5LEspdcmwormdCQUqsK30/SyKljydFtqR6boKXPTTuKEBtv6pud82K58pvk+pLNW86MqG0uin8LVrG5GRrUZippUs+9dA2PSXV9h8yn8T6F3SSM2s6uya4pWUfGlKTRK2akUaobSK6VE2X4XVW3K1piGAY4qsjKy1/EseqVWMu0kzH0XL1RAekTJpLhlT17CsJCUIJ+VOnTBt99PBsKbju02t/lxLI+kKANAdgAuVFzpz7ThN3rbLW86vl2Xpy2MUZvTbQx62dmbzTGhZnz1OaVoAByGDXzvwlieAnj3IOO5yrfBsyxZDTNz6rzzcvKsZe/R8qeQXluKaRNmNokhvUIQp1xCEhCAAv8ASXUd9uvzlnu6iPdLaepSgBEUg1xcONBVa8SACTU4bfU3ozaOmxtm59NSef09fWdBLUkNcW58q4IqTQMwDgZAFmVQFXD249wfjN7grN8/usOes8lwu9oKhpxGSW8GDaN30xhoSJsVhZDLsRt1YAZKEr0TqAdSYW7bnvidV2uz2tyI7K6hkc/lozL5YJoCeIJA41pniT09sfSNx6Z7n1TuVg027bdcwxCk8iJIJmUanVeBUE5KQDQcM6uDHOO8I5H4+zLKsJiWmMZVx7Caur3Gptom7pbfH9kh2TYU8x2HEsoEuE1EdU608qQkhKAlXz6pwu973XY95tdv3Vo59uvWKRyqvlukmQCuoJRgxIAICnM1GWY216H6Z616R3Pf+mY7iy3/AGeITTW7yiaGa3oxaSFyiyRsgRiyuZBkoBBaqqnBXHmOZ5eSouUzVwKd1j+366Qhx1r18xv4VgnG4yVtKbUpTQr5EkJ1KFrYQhYKVkHV1hvV7tVipsUElyD5jqQDSGNl8058K6lWvEBiQajA/wBIehNm6n6gkG+TPbWJQ28TqxUte3CSfKpUEVp5ckuk1VjGqOpVyDXN7qeLp0uiyqhl1r71rSybGBNZgNhVhFlwHXo0hBgnYJ6W3mj/AMDR0jwbV0YuoW3PaBPt7B4pYw6BjxDAEaW5Zcm+vGrpm8tdh6pbaup4za3lvcNFJJEPCroxRvMi7Kg1aPsroxzhZjxPdTLsohITYQpFmxEdfRDd9SK09PbhvLkRy39RFUwpZBDqU6KToeuet3nezuZEu42jmUE6WyrQVoK0rXurj9DulNjXcbCKfaLiG7tH0+OJg1ASBUgGooOIYA4mFinFdTxVbYsqnsLz6W4k2tXarl/TyTXXVOlmbEaY9MRl/SW9U+hxrdqoO6o76dSei9/ut/ilknWPz4ypAFRVGyHbmpBBxE9TOm7DprcIIIGmFnMrEMdL0kUVcH4aVXxA59lMWRcb84zbzMq3gzHJll9JQxK6fyZZ1LhROmzrBR/SuNKZaQVpfnAE2T7R9UICmmykbl9FeuNgeOxi3dM2U6HA5KeBPsY0PewGK49KOvEvep73piUEWzqJIHOWt1qHH9pAGSnEROTyxYdW+42LXsNQWnmYTFcj6JMGOpLbMERVFn6VpCVFKEsFG3Tx1Hc69zV2OicC3lH3oMUtJITHmSQpa1sBmK4pL8jRHzNxnPw7t3Ze0ggHxB69j2KG/e3+4w1ntJlfB8rDsQr28OzjA8treYbqIzd5TiNlSxzMssawWhdjTJE2xsZRbQ42ELYdKQJSgnQj2PYF+Te4DIM9UzbTbh6XXWcWLNTb2LyWpUht2Oz6msJgfT1sn1gfUZabcDSjsC0pA69j2MNBlkyz0YpIy0oUfnsJCQlRVroVMMHcEq+ClFavt069j2CTUY67IfD9igvpX/xlSU+t6+4aLQoEErSoEg/Z59N3R2zSbruXmkH5eAaif3j8Ir28W/s9+KT9c+vLfo7pT5NGX+K37eWi8/LWhlcjspSP2yZcDRE5I49g11WufVodfr3U/wDAUguTatRB/LebQFOSIQ/8N5IJSnssDTU2pc7WqREMAHH2/qOOOtl6vu579WhcmAnME+Je7vXsPLgRiKNTwpmmdXKmKCqK4iXU/U2j+rFfDbWeypU1SShBUNdrSA48v+lB6SZNsmubkW9sheQngBw7zyUd5P6sdO7J1LDZbb87uMqxWoHxNzPYoFS7diqCe2nHFzXtY9vkfirGTLCTKtrFgNzbFbSmHJLavmVEitEqciVeo+YFRdf0+cgfL1cvSnT8O1wrJOQ9zxy+FP6uVS37x4chjn/1L6+uN+naz28NFtxqDX/EkHPUQaIh/YBNfxE8MSXscblPJUoNhRUe52kd9NB207bR208h4dOxnQeHuxUUPmV9+BPk2Iyg2olHc+CQk6fafDv/AB6jtKh9mDds8gYZUwD7XDJTq1g7gPsSfE9x28fHqr+o/TvZd3ka6sybS9bMlQDGx7SlRQntUr2kE46C6L9aOpOnrdLDcgu4bcuSh2Kyoo4BZaNUDkHVjwAYAUwIPcpjFh/7T1ToRuCqPLLmFqdRtj3ERqU0nt3Sn1Y38+qR3/py66f3VNqndJJJIDKrLUKVDBSDUVDAkZZinPHUPRHWu39cbRcbrYxyQi1cJIj6S1SuoFSpIYEDidJ7sVcYhTPy31IdKw6hwEKHzbtyvPX5T3+7rRHsl83wlPrP6sSpurdojycSf6o/77EpsUxKGyx9Q6kNugakK7tuaHuU6kJCj59SDsm4gZtHT2n/AL3A/wDztsRYgJOWH7q/9/hZuHK5gIRHeS04kd0lSUkaHw01B1J/w6gybXdLUsyGnYT+rBS36n22enlrKPao/wC+wxZV7CYUUqeShw66HXy+A8P5f49QZYHiFWofZg7b3UVz8FR7focKdbkMb6dW+UgK36JVqAnw77Uknx08+oYmBOYwVS3A8XE4SrPI4ipGz6oabU+Y77tBoACAAOpaeIUGJPnUFWxtV9hDe0CXiToANFahJ10A/qSdSfPr5KpV6HA2ZxJIXHA4NdXMgoiMKdeI+QdiQCSBp56nXt49+teNeE2RfVYdUFOI7q0HdOgO4d+3h4dex7H5F5VKUna4kfN8pUoBAA0+XcfiB3PXsewrLySoaKwp1O4o0Sd6QCrT5fDvoSf8evY9jarMpqoqgv6hBXqNTu3ak6EjuewBP8dOvY9gg1vIFY24haXmxtHfVY1PbufgAPP4duvY9gn49nNXJ0/OQXFaakK10T4Aaa6knTr2PYPNJk8V6In0nRqANCdNdND9uh069j2FuPbbt6gvtuB+7z0PcjwT269j2Ps65SiNJ1XoOyfPTTuNNQQNRp17HsRp5CvEMsyXCoBbqwhI0BIR82pGnmsjTpa3NhJdaBwHH20GHDaF8uyVz8bVp7KnFSXJttXO5vyg3JiRnbSbJr49LMcZW7IhGNh9S9ZiI4lpxEdUqHohwqLe5sFIUdfTWGiEfye6u6guPkqNQ+H/AJoE07KgUPDLh3slwl026dPGKWRbZf4mZEBAWSu3sqaxUFtDNVQA1GzK5alfUdCZ1PEbUdoegxShXgpDvoNrbcSdQdyVDUdZ3CVUMBUjGmynpJoY5NlXs7D7jga5DvkKJebDc2O4luQgdvU0ISl9GvZTbye/bw6zi0mMheBGX3/ZjbchvMOr4xx/X78CWTIKFMp3kbH1pO7uQPy/Ent/Ue/h1IKk6v6tfvx8iNQB354TBNUj0dy/+G6Wzrp2SoJGgVqNNAVdeKFq94rjbqCDPtx8YnulwpKtoSSFFX2Ep11I7E6/x63rEcjxyxFmnVV5kn78OZmQHWylahtUk99PmPiPh4aeGnYdTUWlO3AyWcGoxfD+zj7weMnKzP8A9tj3WOQ7HgH3Ios6zAJN4+hqvxvOMlY+kscYXOeUk1LGYSA1Iq5Da2zCv2kqb/Omeo2xWM+pBEaGRa07weK+/ivfXmRhS3KErIbhK0591ODe7n3U5DAu9wdv7+v2ck8vezXGeRLuN7eebZsu34n5QajPqkf268/svHMEvWlts4TnE2ukNRMgjNAuMvBMyGGi+1Lc2TSyW8dYQCn4SeK9oHf7f6woTjCFIbtw0uUo+Icm9vd7+44p9rXmWUlYdK1rUpanFL1UtSlFRWtR1JWtRJJJJJPS5O7M1Txw1WreUulOGN5UqOVaFW4HuAo9hqBqTpoo9+sEqBlj0niY1phOky2EnbtSdxIH+XUjX4g7h1uQEnniPLRRyw3ZcyOSSnaSDpqkHyOupJOvY/Z1LC+GhxAUnVUjLDYtrNDbClDUanT5RoBp8SfHv1r+XZmoB9mDNvepGoLdnbjo8/aL9j2F+13BLX92b34obwXj/juikX3t/wAMyOIf1i1mWLCosDkVVBISiRLt7kykw8Sg7S/KkSfrkoQBBeUXs7PyeOUxHH9lTxJ7yMgO/tIwE3ndjeUtbfOLn3kcAO4cWPD7cUee8X3Ccg+/b3Tcj+4/OYr1ejKbBqtwrGXXlSWcJ4+oi7FxPFY6x+SXosNRkTHGwhEmxkyXwhPq7R8uaOw0/CBQDsHf38z3nEa2rbx6T8RzPv8A0cvqwxMbwdEVbJbjq9dJHbYPAHuAdNNO/wDAdQ3jUKdeeWJsF3IkylPi7sTW4voZMgNx0Mr+TbvXpogAlI0T8T9nfv0OtrJRNVRlXjg/f7pS2/NNWp78THx/jkBLLrcbUrKSpZSNx18tCog6k9tOmNU0U7MJRuXuH1Pw5DhiaeMY8ui4uzKYhrY4rG7GK2fBQcntorEDtodf+bOnTD05GLjqCzhPwCZCf7PiP3YVOtbv5LpLdr0fEtlKF9rgRj+/hscX4QY8FpOzwaRr27n5QPt8D8eutbS8GimPzC3q0JkoeODU3hwKQQgnt/oD28e3Us3owCWyGMTmHAeKFD4/+bTrIXo5UxrazFedcJ68ST4+mrXUjw/36Dw62fOHtxoe0I8RxpOYpp4oGnw0+P2fHTrYLqvM40NasxyzGEubh0KwiGvt4P1sEhRaCVejOgrX4v1c3YpUZYJ1U2oKYcP4k6/MNE5Ex82FtFx+1xB7nXmO/wCIcjywS2+7ewXyZUE1iT/hk0Kk/iiah0NzpmjfiWuYA+WcUS6BKpBbTeYy84EJsPp/zYjjn4Y1vHUXFwZR00SSVMuaatrPQDc4ob9Db3aBbqnDiGHah5ju+IcwMXF0du8m2ut9tUxksagNXJ4yfwypnpPY1SjfhY8MJldikyPWSUIj/qsFLa1ojuaLfjpO4qLLp1WoJHglRI7AAgdVDu9hc2F4GtnZoSc1b9B/XjrHY77bN92dvnIkS6C1DoOPtA/R38cMyqwSvubAyK4GPIbdKVt6FDqFpUAoOIOhHj28vP4dEv4Qt5b6pFqvfyxVm5b0NtuGRGCkHIg9mJKViBh1WqXZQay0umYL0ergz0rWqcZLYaCJxYcZkprg3rvUVpO3slQ1612PRcbyf8vqWLUCacAQa1HKp9mK36x9RowBNuaxSzpGyrrHicMKUJUhtI4kk5ciDjR9vmN3FRmGNQa4WFdhWRZs3YX2LUTkmLjSLuwZkx/1OmrJS5PputstpbCy66go1PY6nqyNysGh6eu7i7RBcLAFRmpqYAg58CKcTSmfLHHFvuUG8epO0bNYTO0Et3rkjRS3lijAqrGuTVy1FjTixxaS1xQ7EU88lCWXdxSkuPFkekCralZeaajuuK17r39VbFvM4QKrFlp7c/cSfsx1JF6fpbXbGZDGdVFDGlFr3gKT3g+zGlJwGwQnVbL4R2IdSgOICgdQUPNb2+x+3XqVF1FKpoD4vbT7Dhzsuh7ZYx5kdV7aAj6xUYrl9wtCeBOXsCy1nHXjxly/eOQuRZ8L6l9VVk7JbjJnwKhO2viqlxliRIUAlcpSTqdw6b9lul37b7mxLBLpBqQk0ozcGr2MfCw4Zg4r3rTp256b6023qSKRptukt1hNqI0FY4WrKQwAd3VDrjrXRQrWhAxMml4CqJtlXmqajvRJjbNiLZSf+VTXrHrrnlCkBYbRH0VtUQdx2+PVQ7nLMUf5uutGI089QNNI9p59meOtum7W0iMQ2wKYZEV1fOhQiuojKmWZqag5csQ790GF4tneby8Yx+6YpWMHtbWE9R5EJMWssRcS2raZkNTbRa2ZC/W56VtRJMd9KXfThR/TU6ElDeiw2plhWe6UFnFSVoaHsIrWg7RlxrTDVf7kJH8m1kCxoaePIEHmDSlTXgeVKVxK/wBhSsbcwrNONsdku2f9k5auW24ttbSPocghR3Flp59iMp1li7gTEAKSH0oLfqobUsJ627psN1b+VdtG0cEqZEigJHYPYQew50JpgftnVe0SNPt0Vyk95Aw1IhBKhu08B4gajiMqgVAxG2j4kyH2cc831vAiWD9DYqu36BiC+/CqbultHXH6gWLYkNMXa6F14IfjyPyxIb3pQkFtZtLa9j2rqLYV0MGvhpD6vwEfEAADTUM1bPI+0Y5u6w626o6V6laCH/ldtJLRmNdXmoeGssV1aT4XQFQDw/Cxk3jNH7ROY9ZucYpUcWZvKUF2TtYqVj9BZSSoF6bGf2SqOKqQtRUtDyW3dyj+Y7pv61Xdt1/sPg253u7BeHwyOByFDRzTuBHcOGJ+07t6K9Vr5m/Rw2G9OPFqEsMRb8ThhWHM1NGavaWNTidXDvEHDWC1hkcYxMZU3I3R5eSVjkO7sbD0lAuRnrtT0ghtCtCWWi22FAHZr36r/e946lvZtG8CeozEbVQDvCAD6z9eLj6W6d9PdutvP6Zex0MaGaIpIzU4qZCzZDmooO7DZ5c5qp8Xsp+GY642/kEdhlFjbTJkdTFUt9lLwahRSsNPTkMupJW4djSjt2KUDty2/Yd3vYFvZI3W1J8IVGq3LM0Jp9/bjDdes+jdovX2sXcL7gg8ZkmjASoBpp1AVAI48O/kw/bPneKRcoyXG5d02q0yhqPYQm1yELdnzq1ywenNJ9LVT0yQ1OW+dxKlhpaiSepm/dN738ol8YHEEZozFSKBqAEluVQB2CoxF6Z9R+iX3FtoS/t2vZgWSNHVixWpYAJUFiKtnmQCeRx9d4FyePzKjJqprHGMOGSjLXbWdInSsjW7+pG1NUzVpjtxWXTL/LD6pCkhr8zTf+X1NgutvOyGG8luG3Hy/LEaBViApQMXJJIpnpCg1yrTPAa+uN9/zOsu0W1lHsesSNPKzvOTq1FFiChFJOWppGAHipq8OCxmNjaPcz8RNPS5Co68d5TW5HStTcVS2mcNDalMNlLKlN+oQkqBI3HTx6w2y0tYunb940USCa2FeLUJlrmc86CtOzAzqLddyvOt9miuJpDA1tfkoDRCVFvQlBRSRU0JBpU044jlz77ZMu5M5HZy3HLeoZr7WJWxLhu2flMvVbkBpEQyIjTEZ9Mth2I2lQQFIV6oUD2VuDr0r1pt+zbObC8jkM0bMU0gEMGNaEkihBrnmKU7KYqj1E9LN66o6mG8bXNCttMiLIJCwMZQBdSgKdQKgGlQdVeRqCtyVZVVFZ8C8bx5qX7Vec4xKZYcKVyhRYpXTGXJ8gIJLKpMv0W0EgBw+rtJ2K0BbLDPdQ7rvLrSD5WUE8tcrAhR20FSezKvEYbuqbu02666d6XjkDXZ3CBgDTV5VujAuacNTaQOR8dPhONvIcoqca9xuHM20pqIjLeNbPHK515SW2/1dvJo9lCjqdWpKUGahlxpA/rfU2gd1AdYWdlPedHXDQKWNverIwH7HllSad1QT2KCeWMty3S02v1PskvHVBebU8CE5DzPPDqKnhqoVHa2kcxhxZhJ58Zv3EYJF4snYy+lj6V3JTk0a3guek2mULD9PlmNKY+o3KbUygL2HapGo3Kh7evSrWoO6NfLeitRH5ZRs8tOoVBpkQTSuYPYV3p/UaPcivT6bRJtTU0mfz1kQ0GrXobSwrUgqK0yIqKluOP8gRuV+JImbW+ITW5kXOJLMLFauyrCxYxKFKHX31W9tayJsBEaUUIUj0NHVfMk6p2zVXan2HcJNsjuFKtCCZWVqqX4DQqgNUVIOrIZHjURMepE6x2WHfprJ1dLtglujpR1hoSTJJIWTS1ARooxzBypEP3VWyIXM1qHFAa0tApHf+kwQCNNdQN6T1ZHQC16cj0/9JJX/Wxzz632zP15MWPhMENP9T9eJK+y22TNwvLVhWu3KGkePb/5VRD/ALek71MSm5Ww/wDAH++cWf8Ay+23lbBf994P+GuGT7UMwq4me5pjst9piVkyUPVi3FpCZMqmmWLjkJpXgt92LPW6keBSyrz0BL+oW2zzbTbXsQJSDJqcg4XxHuBUD3jC96G7xbWPUm4bTcsFmvADGSaamhZ6oO1irlh3Ie7BDPtouV8vqy79Vrv7Ydyk5at0uv8A6slxdn+rOVYjeh6ZWZerYe9Xb6R3n5/k6Dr1rZr03/DzG/8AEBB5NMtHw6A9a8KZ6aVrlwzw5Sele7ydcfxkTxfwQ3nzJNW83/E8wx6aUrq8IbVTT4vi8ODrmc2DA5V4dclLQy3Lbz6rZecUhCBNm11GYbBUpQ+eU4wW0Ad1OKSkdyOlfa4pJuntzEYJZTbuQP2VaTUfcDU9wJw7dS3UVr11089wwWOQX0YJIA1vHDoGfNiNK9rEDicMHkHge7y7lWvzCHPr2qZ5VMuzD7ryJsM1IaacTGYQwtuQZDDCfTO9OjiiFbUgEz9r6rs9v6ffbpUc3S69NANLa6kVNcqE55HLhU5Yx3ToTdN16yTe7eWJdvfyjJUkOvl0BAABDalUaTUZkg0GZIXIOQU9JyVxY1NlxmSv+548krUAYbdxCiQ6x94+DTcuxj+kFEhOgUSdEnoHs9rcXOy35iUmnlkd+gksB2kKa/UOeHvqe9srPqHavPdR/jK1T8PmKqoT2BnGmpy4nlgDc6+2vIuRs8RktFY1TMWyjV8Wz/UnZDT1euE2mKX47TMd5MtlcVCVBO5CvUBB0B3Bt6W61stm2n5G6SQyIWK6QCG1GtCSRQ1yrQilOYpinvUL0n3PqfqUbvt0sKwSoiyayQUKDTUAKdQKgGlQdVRkDXBP5OqP0ih4pq2HXHWqvlLi2C245p6jjUKzajIW5poN60t6k/E9L+y3BuLu/ncUMljcsQOGak4eup7MWW27PZxkssO7WCAniQrhQT30GeAb7uZ8uFe4MpKlBs1lx23aDcmXC3aDXxKVJ1+7pl6AjR7W6HPWn3HCB6zySRbht5By8qX+8tf0YjbW5O+FoJcOnY/i8fLz+7pynskIxVNvuEqmpOWC7juWlK0bleQ17+P+/U/x6A3dgaZYa9v3ejAk4LlfkbUlKQVJOvjqf9vx6CSWjIanLDXDuKyCmVOeHElxl9OoUnuPj3HUUhlNKZ4IqVcVFKHACxnM/SxCkCSoByGh0aHx9Qb/AOWp63bbt5+XSvGmI3UG96r+Uaq0bHkZO7I3r1X3JSPm07f1ff0aSzCimFKTcywJrx78JFjahSQynfqoarP2eOh79yo+OvU+3t6eI8MAb3cC3gFce8ZmSlWjNXEtJtQLlxqA/Kr2Q5PcQ9vbRBjqQ9EdV9c44Gy2X2WHVKT6yghO5P29jRYDPIiyeWCwDGiin4jkR4RnXSWGekVND92e6uHvFs4Jng88hCyCrkGoCLQofGTp060RiR5hCioQsvjw8fv7WqiyG50aHOlNRJSJEOSpyGHnPpVyFwXX4zcxcfaXWgolpwlCgFAgSdvkku7RLh10uygsKMKGgrTUASK8DTMZioxD3mCHbdyns4nEkKSsEYMrVSp0lihKhitNS18JqpoQQD5kWPuKqsfbzGVBl1ljcxZcWowBCbjNbufbMMMVtZT1ziGIrUd5mJ6CJTrji0KWtaozzhbR0pWd4ouJTtyssyRkF5/BCiqSWZ2zNQTqKgAGgGtVqcWjum2ObK2TfHje0lnVljsx5l1K8gASOJCAoBC6BIxYglmMUjaVxPLj22m5ZGatsqwdNNAqTg1dilhTXsu8yFM2SEOXFXkNjXv2NnMvsbjek6qQ43DMmU8sekT6jnVWbvBHt7m3sbrzZZPOaVXQJHpHwNGrBVCSGoCgvpVR4uAx0Z0zd3G9RC83jbxBbw/KJbvFM0s2ps5Y5nQvI00C6WLssReR2GiupsRj9x1/ZWpgVXIGJTMZcxXMbJymu8XsmsipKxSrtiFRjkuratX/APlcpjOMutWiTDlRXZDqPo3Sva66dH20Nvrn2m4WZZ7ddaSqY3bwEv8ALMVGcZqDF41YKp8xaVFW+p17dXxjs+o7N7RrO9cxS27iaJKyhYvnoxIfDOpVhcAxyIzuBA+qjRK56/uOBV4vIuThkkTZtuuFKxefInqUl136uQ36khtlxUFC5KW0BbbSkFnaEn5lKfOlms5Jpxb/ADC6VSolULwFAcq55EmhNa8RwFQ9e2+5xWto978k+t5CrQOz5E6mzIB0AsAKqtNNADmSF6+TX11K5Y5JGyOC7ZNPycQvqZbIhmXWxrNt+K86VH81NqqH6iE6PNt9yWgtK1MMxlmufKs2hZUIEqPWtGK0I/s66VyJ/aoQFO1s7e1szc7klzG8tWt5Y6adSLICCTz8zy6geIDOqhgSHbK+sJUh+Q/IckSJDjjz7z7q3X3nnVlbrzzzilLcddcUSpRJKlHUnphgt4lUIgAQCgAFAAOAAHIYS766naRnkYtISSSSSSTmSTzJ5nie3CI3MnTZcaEy7HQ9MfZitLlzIsCKlx5xLaFSp856NBhx0qWCt55xtptOqlqSkEiU6xxRmVgdKgnIFjl2KAWJ7AASTkATgUsss8ywRldbMACzKi1OQ1O5CKKnNmIUCpJABxO33O43Xcg23AjlJynw2KjFuAOOMBzS3Tytg9iMbyHHHrld2lynp7uwyO6ajt2SC2a6HMLxBCNequ6LvZdpg3UXNjuPnz7rcTwp8tMvmRyBNHjdFjQnSa+YyU546A9UttteoLvp57Ddtl+TtOnbK0uZPn7V/JmhMnmgxxyvNKAHFPJjk1ZhcF7jznHAs996WBZ/FynH8Z4n4d42HHELKM+yTH8Nl38KJjGWV8K3Zq7+zg2EpyzvLxejTKHXWI4bXISypewAN26a3Ta/Tm62p4JZt93C8+YMcEckwjYyRMULIrKNKIMyQGaoUsBXDf03190/1B64bf1BFd21r0fsu2fJpPdzQ2zSqsFwiyBJnV21yymiqGZE0tIELUEReRF5vf8AHCByHmXCyDx9f2kjDqjju64isbzLLHOn8VgXIkVHFNwIUSkpavERJE+TDTJLrqWCVoUkxn7aBtltvJ/hFvuJF3Eola4S6VIlgEpSj3SVLu0unQr6aAtkQddQdSPvt70zXqK92Qfw64kNtHZybe8tw90bdZKx2EmhYokt9fmvHr1MIyWUjyy1h2YYpy57Xarga0yalwvkXjXLbDKcDk5RPYpcay6pt3rWVZ0T2RTlN1dLbpl3j6m/q3GWXC2yA4Ap0tg9w26+2Drd+qYIZLnaLy3WKcRKXkiZQoVxGvidKItdAYirZZLVt2Xftp6w9KovT+6uobHqbbLx57QzsIobiOQyM8RmekcUmqVivmMqnSgDULlXbRyV4T7X+X6POJvFCM8XJxKr4xjNO8V5NmX6Q/cw4+YKhSaE29mphVO6vZJkOFaAHFMrSoBXUK5jG5dbbfc7Yt//AAqkrXJ/5qOHWEJiqH0LXWBVVFDkGBGWCVlO2x+lW9WW+vs46hL26WCg2E9z5ZlUXJVovMenlk0dzUeIowIBw+8cy+w4u9tnEK6DJeLbrK8O5Kts0ybCZGY8c31jIxicqS6xG+idsbGSJktOxKkQNbKMXQQlK0LCBt5t0W+dZ7gLuG+isLiySGOYQ3EaiVaVOrSooM6F/wAtqcSCKndt3u76T9L9lewutpn3iy3SS5ntTc2UzmBqkLoLu2phSoi/PTUDQFTpHPNfF2I8lcjxORuMuQcQdxzlSW1b28PMM5oqW8wC+mtl+5g5DBvrWPZIqmFNuLjOMpdb7fTtbtGC8Z6a3u/2bZ22fe7S4F5YKUQwwSOk6A0Ro2jUrqOQYGh/G1PHpVOvumdl6n6nTqXpTc7E7Xu7iSRbm7hils5WFZVmSaQOI1oSjKGH+7Sv5es94fyFU4L7gMcxuqx7ieRhFNXQeOGs8OUtuvzOM48VMOVYz7kZ0vCg7OLbli/DciiS48so9IyFglY3HZrjdOkZr2ebcBukjtcGDyqAXJNQqp5Hn0GUauG0gCurQMPGy9X2PT/qba7TaW2zP0/BElkLz5kEtYhdLSPL82bSr0adojH5hY6dHmsKjHEeHsfxH3Cg12ccfLwCvsb+xqcjVyBiZhJoZcGyj1UZ8/rJlqtGlym2HWfT9UqSXQPS/M6O7h1Fd7h0fWa1vBu7pGrR/Ly6vMDKXI8FNJoWBrTgvxZYTtm6P2zZfVClruO1npmOSZ47g31to8lkkWNT+bq8wFlRkpqqC9PL8WHfwNl9Ti7jnBnMblbbceyL2DaUdvFua23iYLlseQ1Og29PkFZKn17dTKkK/wCYU04pllxay4AhcpKh/Vm2XF8o6q6bEke8LEyuhRkM8RBVkeNgrawPhBFSAKZiMgp6bdTWO0zN6ddemGbpd7lJIZVljlWzuQwdJYp42dBGxPjKsVQli3haYFG4/wAYq63N+TeVVWuCy5VFMzGRhNFd5hRV72Q31rYvw2pbcJd7WT1V9ZU2L8tp5LjKnn0NJjrUvXbJ3e9uJtssdgEd2scqwieRIXYRxqoJBby2XUzqqEEEKpYuAOMDpmwtbXf9460M+2vLbPdG0hmuoUM80jlAwQzRvojid5VcMpdwgiJaukj00qg5S4CyTC7RrjTj+xxTIod/xxXDJ4tG1YTXUSTfwBGyrKbGyiNyI76trq1sw3X30kHVpxQD3Ed5sHV0G5QG+vIbiFo7lvKLlVFPLasUSqaEcAC4VTyZQWSwu9s629L7zp+9/g+13lldpPYR/MrCJHIbz003NzJIoZWNGYpEzup4o5Gjg2OQk+3rkzFn8owSBkOTZHjVlTU8/OsQiSpkaimxjMWtT10lmESEuemJCmivZqPlUgmRut7L/nKxv0t7t7KCCVXdYJiAZFNOCVblXSDSueYIA/pvaoD6Ubzskt7tkW63t3byQxSXtqrOsLrrOcwCH4tOsrq01GTKTo4bbU3E2BZ/Touai9zrkmraxJmBSz402oxukkCSxZTbfIkKFK9LmNyylpEZ95DOwLcWASBu3OG56i3ezuTFJDtNjIZizqVeSQUKqkfxhVIqSygmpCg8xuw323dCdK7tty3Fvc9Ub1ALQJC6vFbwNqWR5bgHyS7hyFWN3CUDOwBICnOt2OKsY4/qapjjTM7ATXMxs7JOTounKXM1ywzChIGL5bXlhulqa+KfXfSuMX3XfSc0366I4ZOob+8uLhr62h0CFV8rRrhpVm/Nhaut2fwqQ2kLqHDGy4uIuhNl2jb9vj2XcrwSm8kl+Z84w3heiIPlrqPSIYo4j5jho9bSaGpqq3fdRR43kOQMZ/j95h9gjLqepeyqjpMsx22n0eU/RIZnsKhVtlJflRHEsIKpLAdaLwcKlDegrk9CXF5b2DbJfRXKfLyOIneKRFki1EqdTKACKnwtQ6aUGRpj6x7dtN/vsfWWz3O3y/PQQtcww3NvK8N1oAkXRHIxZSACZEDKX1EkalLVPZ7xFHctHZjcZKlLUopeKQpaTrv7L2lWm4A6a6ajw6bLi3ilylVXiPEMAwocjkQeWEva90uIFHkyPHOODISpB4g1Ug5EVwHuTMaj18LHXWI5Ta3GRUaIi220FEadCfLMuWpC/wDiLRGJ2hPzEhPw6o/prpS62Pri422LLbwGdSecbnUlOQKnLPIHHZvV3X9j1b6L2/U051b3GVhZa/DMg8uXVz0utDlQkHjhj4HaP4EqU/Q/pVfksLknJrBP1DQvmreDLr1Vryr6cDF9cCLIW0llLhLO7ULSsdPVva2+6pNaOkr7U0AVmY6ZC+vVUZdorUgAjLSRio9ze+6fjh3QvBb9RfMq0ccSExxokQFSa8wxGjUSDmHByw/zk9TGZMic/FhhLZH6bj6JUKAgDVRG+dMs5IUfj6mmnVSdTdI3mySNcW6vJtROTcSg7JKAU7moFPccsX50H6l7V1VClheyRQ9RBaNHXSspHFoak1rxMdS65/Eo1YjhyFylQsqksQ8dRN9ZtyOqQ5KsHpbTK1aqRHeMrbHCldyEI0J8Qek7FoYgRnFDTcj2tPRZXTVGNorYi4GJcmUlXN/UAw6+5LZZ5Hr0SJKbpCpS/TenQ223UJAKmV6buvY9jUoOEr7H7aZSXsd6PaVzyRJhFwOxJG5CHGJNVpuK2ZEdaXEE/MpJGoCtR17HsS44/wACDbbRSyQNR8ykabSBtPylIJI7a9HNi6e3PqC5EFih8sHxyH4EHee3sUZnkKVISet+v+negtsbcN7lHnlT5UCkGWZhyReS1+J2oi8zUgGUtJxtKsW40eBFekvuEJCW2lLW4tfgEhIJOp8gOujtn2Ow6e21bSEgIubOaVZubH28AOQoM6Y/MjrvrPqH1F6ql3i/BMr+CKJalYowTpjThwqSxoNTFmNK0Bahe2JlQRLzH1ShGikUUde10kjUfXyUf+w6D5tt6uHwJT1hcRSbh4Y/y7b9qnib+qDw/rH3A4nbCltsh86ek+5fsA+BD++w+Ij9hD7WGCZjnD9eZMSPHrWIVXFUPp4MVlLEcaeOxtAHdX9a1ErV5knrbZ2ENqPJt1CitSeJJ7WPFj2V+wYabne7q8X5i9cuwFAOAA/ZRRki+zM8yTniTMHFWI7DbIbShLaUpAACQNBoANPBIHh0fWcRoETgMK8ivPKXfiT9B7Me5tLHZa7oQQQfMk+Gv2aff1888saZ1xsW3VMx/pwLrynYeK9EDvr37E/dp36+a24nBG2RTgdSMbj7jqwnx1Kigdh8dNPs60tI1eOWCqIpoOeBFz1icefwhm8dLAUYMiquG0pBOnpOKjOqASNRql3qnvUiAtu21X37XnQn+0usD61x0n/L9faJN22smgeGOQD2Eof72eKOYqmqe8ksJZKAH19/L5VFSdANe+h6wtbYPCpUZ0xI3i9kt7yWI0ycjBDs+QG4lYplB0+XZ2GviO510HfXz6H7hItmCTTErZrR9zoRUNXjiK+acj2qXFrZfceSlaiFhX5qR/lPYdtPDXv0pz7hDIdKHSezt9nLFnWOwXcChpQSlPiGf14DUrlqyfJ+oddGzUbjvSoaaj5gfm7DodKRIKcxhqtojb0B58+WPsTlS3bCUNynFo7HQqVord4Dt21IHUL5MSGuC38QMYGHbW53NsnUl1xwqOg8SR8p07eZ+49uiVnt0mqtKpgVe70iJQGh+n1YN2Mz5rwQsOuI8NDqTpp8APLX+XUrdduZIFuYxkuTeyuR+vI+0YFbRvsM961jKw1vmneRxH1Zj2HBAnZRPiRNokKKgB5nUgDTx10KgR/Dpew14Fc3NrFT6/zFj5vmIUAANfEeHl17HsbcTMJykkh9e5Pfcdw8/s+Xy79ex7G0MpsXyFqdUSjd8xWo6keCvHQaH7OvY9j63lk0rAMgg+GhUfHyOgPYanr2PYc8PLZyfk+oUewKtHCe+g/q8dD17HsErGc3mx3UEyFBO4EnUqOh07a6gg7tf59ex7EscK5JSlpkOSQQU6FIcGmoAIGh+Kv9XXsewd4OfwSwkl7upY1G/v2T4a/Dr2PY0LTN2lx3H1vBDCVnaC5p6yu5CRqTuGo79YO4jQu3ADGyKNppBGnEn6H3YjhmGUqsn3lF4aF4pR31AQN+hGh0A0/w6WZGLMWI8TVJ9pw5RKAFRfhQAD3YrczG5pFck20Ga84bTIMtyCvqABuaUYeM0lS6HVa6tgvIUkHwKgR1ptb7bLfYt7tLot/ELj5byaKSKwyGV9R4LRSKV4nBa627erjqfpq7slQ7Tbi8+YJYBgLiFYYyinNjqBBpwGDfjYS7R0zhUpW6rg9vHQiK1r300TtUOvunUg9mB4Zo5GHMN+nGrlOOCzi/WwdRYRgOwIAksp7qaUBodw07H4jTqKiiF6NXyj9hwUL/ADMQI/xlH1jEcreIltcsbHG3G5I3tr7KbV+YD8pAJTqkeHRBIwdJPNf1Y0QuxY9xw0ZqEpMlOu0pdQ4BrqQCV+APgDvHWyJaFTzIIx8uJhUqaDvxgQtsOFWhO8hzTUqHzpCxr3011X8epUUdIxX2e2mWA9xcE1AbLCkm0YZTofnPmST4Dx/CrUd9P5dbjTmOGIlS2daYTXrVt47kkJdSsFC0lSVIWCNqkKCtUlJGoV2I6+RuyNqHuxskjRlo/DHRp7Of3aeBvcBw7F9jn7rtMxmvHslqJT4L7grZuXLs6J6K2YlI7nVrC3X9HfU6XC1FyqEr1i2rSwAQZEt09b3Szij0Eh41+Fvb2N2N9dMyVy5s5IH1xV0DMU4j2do7vqw0fdF+w7zvhcFXK/sey+g92nB92wLzHIdRfY61yNFo30LfjORJTcqNhvIMNEfQpl10mNKkk6NwCe5wmsIpGyOl/wBlsvqPAj6vfjZDuksY8Qqo5jP6xxB+vFGPJOM8q8QWzlBy3xjyDxfdNOuNOVme4df4lNUtH4vSYvK+At9temqVNhSVJ0IJB16inbpU+JSOyv0picN0jkNQa+/78DYZe24ACvXU6JGvfd2AAAHXwWzDGz5yI8OOJg8Aex33m+6GxhxOF/bvyTkddOW2lOXWdDIxPA2W1kb5D+b5SKbGnPSb1WWm5Lj6kj5G1EpBmR2TjxyAKvacvv8A0YgTX8anSh1N3Yvr4s/bP9mP7YdBUe5D9zrlbEOTuToATbce+3fFEG7opd7D0djtQ8YnIh3XKFlHlJbSXpsavx6CtestK0huQidFEqisQyH4yMh/VHM+3h2DjgfJcTSnSxOfIHP3nkMVW+/z9wTmL9xPPokrJWV4Dwfh015XGPDtbLU9X1nZyMjJcoeaQyxf5jKhr9MvFsR4LClMxUpC33X4086geXHXTWpJ4k9p/QOX11J2VqEo8nxU9w7h9M+eWImUGJ+ollthj00dtSEaFwgnQJIAJ7jtpr/PqCHatBUnE9oqipoE7Tg/4txy4Sz6kdSEKGpRt1cc3DTTQp1A08u2nWQt5H8UpovZiBJeQw1SDNu3ErMBw5uI9HQlnahJ0ACTqAfjoBrppqepaIFyAoMQpZXkFXJJxNjEMXS4iOn0wSEjtpr4AadtOx7eHw6kqNZzxG16BUnEisioRX8YyYxRsNjNqoncaKKC65KWD37j/lxr009GQF9/SQcER2/QPvxXnqteiDoO5ir4p5YY/aNRc/3RjQwWnS0gp29gn/Aa6d/MHroOzlPl544S3uBC9RglohISfDTy0A8R8PPqd5tcLflDlxxlVAbVqduuvn4/7Pj19Ejdoxg1ugFW54S3qxvX8I8P9NPt63LMaZ8cQ5LcE40HqtAB7dj4eHf7PDuOtqy9mNDW9OGEh2sAJJSP/R/pHlp1uEtcaWi7RjSTCWytWgQtLiFNPNuNpeYksL/4seSw4lxqRHcHihYIPj2IB6+TLFcRmKUVTj3g8iDkQR2jPEqwurrbbpbyydo51yqKZjmrA+FlPNWBB9uENzi1D/qzcRbS0p5JTJx5x3RtCnAQVVEl5WnpKP8A8buq1T4JWoduk/dbNgaTfmRg5PlqHcw5/wBYe8Y6K6I6ytbiAo1Le5K+JczE2XFDmUr+w1R+yx4YjPPw+bj+VrnB39PmtyXEu1rqClT5ZOhQ8SPSSQrwGpV8QAemmxsla3WMxnzKDOnb2duKv6p3it01zDOj27uSulgeHHOtB3jj2gYLVfjbVm1Fubfc+07LgOzW1bUuTYjjxaWl8BRUWEbdobSkjb49vE7bWi2v5SgCWjAdisBUd1TxrX7cc9dW7vNuM7Imp4RQyPWnhJpQDiwFMyBQU4HEpuH8EkWGRN51aMNxY1UXouLVDbfoxoLR1ZM4MBS90hTA2pWr5tD22jt0p9U3kPy/8MgJbVnIx4k8dI5AA8QMvac8EfSPptrXcT1BInlW8bMIEzqS2TTMSSWYjJSTlyAGWJsxbhcOOlLhVtIAIJ1GnhqQex06qmfbkdjoArjsTbt+niRdZLJSmeYpj6i2qXV7y02y4ojV2MsxnO/bX8kpSr7iD0PksZ0yBJXsOY+3D3t24bdc0OgJIeaHSfsyxEr3j1tHkeOcb00lxmYuXl0x4RpnzlUFiuSJO1mO2VupLiu6tiiFEHt49M/RouIrm5QIdDxKppzOqoz5EceI54WPUu0tJLKwv2es1pcPMjmg8seXpauWavXSwpnzwvcS5tFxH2z01rM/MmQpF7jFaHl75Fqios3WqaG2VfMuK1v1WdSSEdzoOvm+bC25daSWEFfKqrk/sBlGtie3kPb24+9J9YxbB6Uw79e0+YAkjReHmMHPlxqP2amp45DjTEHbKqYs582wnq+vsbGY7NlkoDylyZCy6tYKkq0bQs6JP9ISOrZhihtIkhhULGigLQDgBTPvPPtxzPdXt7uU8l5eSM88rs7EknMmpoCeA4AdmCtwrMtsHy+Lk1O+xCkMoEGTAkLIj21Q64lyVXTA2dEturAUlaQotvNoc0JTp0L3yxg3Kxe2uFJVswRxVhwYd+Zy5ioyrg50tvV5se7R31jIodah1OSyIaakPcaAg08LANnTFp7KOPOXseYi3FZW20d1KXXKm0SyqZBkKTtUuM82sPNOAagPR1glJ8RqR1TbLuuwXZeB3jYGmpa0Yd4/Qwx00s3TvWe3rHeRxTRnMxvTUh/dINQeWpDw58sNOJ7VuFo0oS/7YefS2r1BFlXVu7DGmp0UhUxKnEJ/yrUoHz16Iv1t1G6aPOArlUIgP3Ze4YAx+lXREUvnG2dqGulpZCv1asx3EnDpyzKaPAcf/Q8Nh1LE5thcasiQmY7FPTbkq0kvMRwlpwtKVuDKAS4v8ZSNT1r2varreLz5rcmkMNasSSXfuBPCv7R4DhXETq3rDbOkdoNhsawi7ClY1UKIoqj4mAoDTiEGbH4qCpxASZg67GwmzJjpspc2U/MmzntHX5MqQ6p1998kEl111alKVr3J6uiC8W3hWOMaI1UKq8AABQAdwAApjiTc5pbq+knmfzppHLO5zYsxJLN3kmpOHfifB/8AcMK7s6+WqtuaCVRPwJAWmJFjtSXpzs61m2CHEvQGaaNXl/1G0rc7dgSACL3TqNbGWKCZNdrMsgYUqSQFCoq0oxctpoaD3Ycej+lLjfrS7vLKYw7hZvAUNQqqrmQySvJWqLCkevUoZuwVph6ya7lS2l3OIZRyq/Ep6zEXMobmVEthDt5DMVuTWNxbtqBEnKZlLfbU6p5WvpHulWvYFr2C3ij3Gw28NcSXPlUcGkZrR6oWZaihACjjwOLPt5etL+4n2He96K2UO3/Mh4mH5yFQ0emUIrkPqUsXPw1qDyGWQ4fz85exMzk8iU6p2GRJFai//WYyoVDbOSWK2xxCXGj1JjvZE9JlsNSEKYeafK2yXlpSCkna33SYtW22KzkEVywby9B1OlCyyglqiOgJU6lK0PhBOcS+2v1He/TfZ9zgM9ijR+d5qlIZNQR7ZlWPSZyzIrjQyvVSXYCo/RcV909RMsUUnKVJa21zOflT6mPfwbWYh5u3ax++mNQbWmcjVkTHLMoZmGP6SGdPygsJVt1yXvQ08aG6sZY4I1AVjGyihQyRqWVwWMi1Kaqk86E5yIds9WrSeVLDd7eW9nkLPGsySNUSCGZgkkZWNYJKJLo0hfwBgDQZL4a5PlZjQZrknINI6u0vcZNDmjGWKfm5NImzwhmPhkiZEbLlnXMMFSWpSIjDRKEajXZ0aHUWypt8u22dpIPLik8yExUEYC5mYAnwsTxUsxzPfhZPRXVMu92++7luUDGa4g8m6FxVpyz0AtSyiroBXS4jRfCtc6Y3OQeKuRM9ej219lBs8wpRPOaS8jvKGurOPcWaal2uKG2rKSE45WuXVQw7OK2fUQVEpKN53K1bTvm0bWpgtYNG3yafJEaOzTy1Cy6WcjVoYhKGhpnWmQmdR9JdRdQut7uN35u9Qa/mWmliRLOABpLfzEjUlDLGGlqtQTUadWZ2scsvc/Ft8jwBjk6DWqxKXS0bsy/tayVGm2eREt4vUU1vOqLG2lTL5pClRQdgShB9RTRAHWu8g6Je3h3VrJnFwruAisCFj/xHdA6qAhybjmcgcbdsu/VWO9uenY91SI2bxxFppEZWebKCOKRo3kZpQCY+AAB1FSKY+SuMOVH80YyaBn9tkuV1uMMS5b9dfsKy3F8psMTl29NR2rFk6zHjUN/NafjxPRccS9HPztt+oAr7HvWyJtxspbSOCweYgBoz5UsSyhHdSoJLxgqz1Ao3AmmW1+kuqn3td3h3Ga63iK1DMUlHzEE727SRxOHIURSsHSPSWDJ8SrqAI+zPg3lWRZXWU5/l2LLKP05drk9zkj7rTthKmSKdNK0E1y5651c7XqQWkMCOhlsKaWpoAgvtfUmxLDFY7Tb3H4tMSRgUUKH1nxadLBgaltRY0YBsLu+9C9UPcz7v1HeWhpo8yeWckF2YxiMeAuXQoQVC6AoBVimeHgeMeVeK4/IBwvkevTQwWchVNarbmRVW2RU+JenHyCxj1CmnksSMelT/AKSQQ+l5EhSksqcRqvqNHvOwb69n/FLN/mmMeksgZI3lqY1L1FRIF1r4SCtCwByxPl6W6s6Uj3IdPblENvRZtYjlMck0dv4ZnEdCAYWfy3OsMHJCFlq2E/FOFsttMfZuqu+qmMm9bErKJTwrNSpuN0l9XW1+xkmWSGUBykbFVXNSoqY/1MlaV7fTS5tQZ1/1Tt1vdm1mhkNjplVnZcpHRljMcQOTnUxVtWlRTiRU4F7R6cbrd7cNwtriFd21W8ixo/jgilSSYTXBUViHloskYTW5BppD0BKtbZ8/ZL/ffH1nyNPXd4mzi30zEGVXogWkG+mxVuWc7KY7MW2ZqotA+qc4pxSnS2kocQFgo6XZk6RsTa7vBZp8tcGWpYNqVowfCsRqhcyDQAMqmoNKHDjA/qNuy7h01c7nL89aLBpCsmiRJWWrvcALII1hJlJJLUGllBquEDKFci30y1OeZ1HunMP44j55jc+rWmwrLamnXFTWxHoTzDNU5HVOEsOrkuMl8hpG8eBTN2+XZrOKMbTaGJbm8MEit4WR1R2IYEtXTSgUHTmad6/vu39S7rPM3Ue4i5ex2sXcDJR45I3kjRSpAjIL6tRdlLnSuocxISDP5Vp4FPWf9Vr6dJySl+uw6ti49V2lpZIhY3S3NomwnW1hGnQfpZNu3GbS25KlOIBf9MIBHSVcLsFzNLP/AA+JEhl0zMZGVV1SOi6VRSrVCFjUKoPg1VocWttj9Zbdb29o283Mkl1BqtoxDHJI+mCKWQO0jq6aWkCCjSSMPzNAWowl3mH47U5HhErkzkKVIiZo48u1kz9Iknc3TszWZKbNcqzQiqFi+iE64+mO40rQhIbPqI9a7ldz2dymy2irJbAaQuYzcgjTRfFpBcBSwPbXIkdw2vb7O+sW6n3F2hvifMZ/C2UYYNr1OAmsiJi2gqcwAviEgm+NEyoTbVRzBnP9pqTouKze184fRKAKo0S/RHEiLE9I7U6KXsR2B8+lU755ctbjbrX+IdpRl8XaY60JrmeFThy/ykJoQtnve4/wcjNRKjeH9lZdOpVpkONBgK+5Hk7HRTU2A4hZNSZNZPgWEudVzFPN1KahpTdbCZsG3FqVYJeUlxSkuFbPogKO5XZk6N2S7NzJu24IVR1ZQrChfWfExX9mmVCKGppkMInqh1XtgsoOndklDyxSI7PG1RH5YIRQ4Jq9aEkGq6RU1OURMiyW/wAvmMz8mt5tzMYjpiMvznvULMdKlLDTSAEttpK1lR0A3KOp1PfqwbSxtNvjMNlGscZapCilT34pzcN23LeZhcbrPJNMq6QWNaKOQHAZ5ntOZzwktMbdNmh8CNT56eZ0Hl1uLduIIpyrhyV8h1g67jp2+0dvHT49RnjVsueJ8DulM8P+tyFbOnzHTt31H8j38Ohs9oGFKYNWu4shp+HD8rct02grIHbxOviNP9NOg81hz+n09uGS23WtDXAlqmgjF6JsHsiuiBP2gtJIHUu0i0RADIU/RgJuly0l5KTzkP34Vt7cGKlaiNdOwOmpA8/t3q6krGZXpywNlmEUeXP6fbhuv3LaSpStFKJ11Pc/79B/h0QS3NABwwIknqannhGk3iNCQdD3176fb4Dw1PUhbfliIbjxZY/SM4qGsVs6V2ohrs3zHEOwRCjKkpeNgiZKspFs849YNrYhRUQWoUcNRVtyHXnNXUJ36Rt1wb5LlZG8gVquo0ppoFCABTViXLtVgVVR4SaE03ayXaJbB4Izctp0voGquvU0hkNXBCqIliTTGQ7u1XA1NZOc5djs6sdVKnPGQgWUatmypcqFYx7WG/AU4qMzJSvdYwJDjO9tTchKVnYpKtFCU222F3G6hVFDpLAAFSpDUqR+FgDQ1WozBGWNEe8brtk8Ts0jahrVGZmVxIpQ1UEfGhK1BDgHwkGhw/4fuTnUrTcmuq51HfiFc49Ni47cTsZxyNQXn92G7gU1JWLah1C58jI2XNCw67FlVzbzLqN5aQIk6PiuWKTOstrqSQGRFkkLp5Whnds20iMjiAyyFWU01Fog9SprFBLaxSW9/wCXLCywyNBCsMvzBlSKJCFj1mZTmrGOSFXR1qVDTyTmKXdxZNfjtGmBUGsaoDc2SZFnkkeE9kNzkseqZv5EmTMYrmUShEYYkPy3PpYLavULidySFl0+ls6y3cuqfWX0LRYyRGkZYoAAWNNbMqqNTkaaGmA+6daS3sTW23W4js/KEPmPV5ghmkmWMSkswQavLVHeQ6IlOosKhLrpV79dRXt629eR35TN3GYyF+TYQLpqBPXDeTLQ5I9WTGefr3I7o3JUUpUnXTTr10LYRy2trSNgpQlAFZCwqKZUBAYMOVaZYK7FBeSXEF9uGqdWYSASkusgViprU1IJQo2daAjHnM1RpD0w0seRVUspyHLNR9S6uKiwZgJYkyUMla20pVKcfUwk71MMu+mFq0JOnbmkUKLgh51BGqmektUCvs01OWoitMHN+tbchzZKY7Rip0VOnUFoSBWnEtprUqraanM4DEwhsrCk66a+fge/+PTXATQYqDcovGacjhuPv6q0CRqNQD/v+PU5QOfDAB4zXljXCnFeA0OnkP8AZ59ZHLOmMBFXiMKEeK89oCFHX4A/y61NIBwpjeltU0PDDlgY9JcKVBBHhp2Ph/tHUZ7hRxxLS1Iz4DD7rsWkEDVCj3GvY9xr8dOoMt2AePLBKC1B9uHpCxbTTePu1+/4AePQ+S97MFIbI14YdDFAy2kHaNddfIff2Gv+zqGbpiSK54mC1RVzOeFBmtaQtPZPl20/w8dfAdfDKxHPGowIDxw4mILemgA76aa9x/DTt1pMhpU8MZrEAMsKiILYSSSP9X+rv1r83OlMffKOnI42WorH9Q/w3H+ZB6weRwcuGM44A3xHGwGmkHUJ1/gP9w61anfG0QBTXGTcj/L/AA1H8+w6wIYcTjcIw3M0x6ICh201Hl4kefw6+ZjGwRJTPH1LW7t27DxA8/LrxkPfj3lJ2DGq6gkKGvcf6fA9blI5cMaXhAwkmO4SflP8f9ffTqTqXtxGMfLGpIp25gKXmgRofHQnX7+/WLFSMZx61NRwwGuUOKF5FjxNYkt3NDORkFI4kJ3CZDTucaG9Kk6vNI7agjsdQdeoMkccd5FuIAJQaG7TGxz/ANU5+84btk3eZtvuumpnItbyjpXgtwg8J7vMXwHvA7cQ7yvDTYx1XUCqNVMkl6xWphtxmLZzB+VaoQ2oeizNS8gqUlshBVqNAej52mBT51oQUoAyimQ4qQBnTtHZmMC4+qLy4t/4bvOpbxHJjkbV4+TxsTXPLJu2oPLAYep3nkErcWsq76LKjoe/buNAdPHr4bFWBRlBQ/T340G60sHViHBqDU1BHMez68M20wxuTuUWdSR37eB7d9SDr4dKO5emXTG5uZTC0Ep4mE6P9kgoP9XPFnbD63dc7HGsAulu7dRktwvmED+uCsp970HLDeZ48gSXW41jGdEFb7a3HI6Cp1k+ohTmwAarbfSnasDvp3AJGhX29F9mrVLm6p2Ex1+vy/0fVxw6RfzJ9RfDPZWIy4gTUHeR5pJHbQ17K85a4nxOvNW7OyVWPWsp+UuUmxqB9c/F9NhqPFjl1tC1xkJYaSNi0NrJHcA9Tbb0x6K27S10001ySPDO+ge2iBAwr2kjtrhT3b109Wt4kNvta2draaCfNtYjJqNckYytK0eXEgI1c1pwxJrjb2lWstMS65D9Tj/FgVrbnyIyBkl4wkfks0mPyFImz35B7F59pllA7lwgdHZLrb9tT+HbEI5pBwjjAESdrO48KjuBJJ5VxXlzabj1DM28dWme3tgxElxOxM01OEcEbeOZ+QJCoozLUyxJ3FsDxnEmlt0MV5O9S0omWBZetVMf0pddYQllhak/iDQA17akdziY3kIe4IaQdnwg9wP3nPCdNpQvFt6GG0Ynj/iMvIO4px5haLyzxtWFI08SspAQFak+aie+wEeZ8/gOtoqcuePkECrQj4B2c+4YT2IrUPX0wkK8Bp2CB30Cfh/pr1LSMKM/9OJ1Sxqf6B3DuxuiSpI7kfYft/n19KL24yJoKUyxqyVh7tr4juPIa/D7+vukAE8sZq4NO/7sIEmnS8CpPfXvqfAD4HrW70yPHE6IACoOGxOoR4JTp5n5fH7fu60UBzPDE5JCBTlTDDy7E03OHZpTqQVifjNkAnTspcdH1Dfbw8UdIXqHbF9otrocbe/ib3N4D7s8W56Lbr8n1mYScrizlX3qNY+7FCueYGust5EoI2jfuO5O3w1QoDQdtPh16ztNNuT2Vww9W34G9vEo8LGuI35V6jK3WSCNu7zJ08x5d+/SD1OrKDTiMWV0CVfTXnTEd7oKLqtpV4qCtyST5dj4nUfDqrp5TXHTO1WsTRgMBSmGjMo4Exrc8zsd0Gq0jaruT4o1+PU63uDp0tmO/wDXiLuWx6iXt/Ce76U+rDMcpJsF5S4zYktAkjT8WngNyCABoPh0btHRiABmeR/XhH3C1u7YESAgd2Hpjr7yVtpfYcbc18FAgDTzGo+B+PTdYQigxXe73enUAcSXxJ970wod0JHZKvBQI8NPDy/l02Q2sLxFJQCrChBHGvL34q+9vrmOdZLcssykFSpNQRwIPdjayKfIAOoUR3OqfAaaDQkAkj7+q33vpw2cplsDrtz+EnxL3CvxD7eWfHF39JdYSbpbLb7shS+AA1geF+8gfCx5/h5gjhgOzrYIcUDoSCe2pJ8SorB7EEHw6VSrKaMCDiwOVeWPcK82g7itKfDXUeI+OvbxA8evmPYcrdyEthSdEnt5EaA/iPw0J8O/XsexjfvWGx6gWkLI+YFQ3anudNPAdx17HsfIeVMBX/GBUSQRqD4a+Z1HYfz69j2HtX5U2QkIXuI7g+WgI+J1P+rr2PYJ1BltlubEYOqKtAVIKu41Gmn9Sif9PHr2PYP9Pkb8Rhh+3krSdC4mNv8AzHPwlO8DuhJ26aeJ60SXEMY8TCo5DjiVDZ3Mx8CGnach9Zx4s88lWAQlK1NtlexDaSQlCBptA+35uhNzdNLkB4OQ/ScHrSxS2FSayHif0Du+/DcVbbwVOL0CVFR3E9htWSdewHcd+oa0rU9mJ9DTSPirl92M3Gvvm9tfGHA3M/DGdcUWeTZ3y7UzZ1LntfjWL2jWMz7qtsU0VgzbWdhHuK6XGspiXn1xmyQ0lKgVL7A303vfS+29I7tsu6RXTbzuMk/lssaPGoMSxRVZnDCjqSdKmgzFTlilvU3049Yuq/W/o7r/AKQvtoi6D6d+WNzBPc3MNzKUvXnuPLjit5IHU25RUEki62qraVoSJaCT9JUxorpCXIhfjA7gAppDzn0q069i25FKFJPmlQPS3t0guLGKT8WgA+0ZEfXi+t5g+W3W5gWugStT2HMH6qYW2rNBVrqkgdh38Va9+58hr3+zrc8YcUOI0DsjVU54a2X4lAyFEmXAWhixLaVr00AfI9NWqvtIUetEMpt6I+cdaDu4/qwQZBLWWLKSmY5HEZ8goLOslOIksunWNqpSUqJGxOvfsddfRPh0WiKSAMpBAb7/APTgRdiUVy9x44YM2ybjIbK/VA2bNSk7tULIAOoGmiSnX7OpiA1K9h+8frrgM8gFGav6MsNeXkzCTtQVAkDsNNw08yB8f5nrIxkmhxiLhQKgCvbhLVkiE9iVk+RHbtqftGvfw6+iIUzxi12Sef6MeHMhZfGjiFaDz7HXv5p8R38e/W4KFzGI7SlhnwxKj2ye/wA923s/sjL9uXN2ZYFVvSlTJ2HmTGyDArR9wn13p2DZHHtcaclvt/IqUiM3LCfwOpIBEyO7ZF0GhTsOY/o92eIEluHOoCjd2LtMB/8Abn73Jxalqm509s/BHMbBjpYlv1MnIMAlWrSB8zlixYJ5CqlyHduq/SjNM7vwtpGgElLmIfCGU/usQPtriM1vNWhoR3j9VMEeH/7crYfXKXYYZ+2XxHi+QhXqR7lvletecZe2nR1TNdwNSTFkKOp0koOnn59bhcCnieWn9b/+HGS2rNwCA/1f6cR35l/9uHvf7y3Al0WAHjL2+0stDjH1PHWMuW+WiO8kpcbVkOZy72Cy5sOiHoldDfbJ3JWFAFOprm3U1RQW7W8R+3L7MbhZSAeJsu7IfT34qguMiz3lLK52b8h5LlHIua3TqXrXJ8xubTJr2esbtvr2tu/KlONshW1CCvY2kbUhI0HWmS5mnrTPG+KFI/hyXt5YNmH8fWE8trkMojtKCdoOpJ1I7lOny6J+0DrWsBYgymgxLN3HCtUGp+/9WJZYfx/HiJbPptqWNNVrSFFW7uB37J008uiEFvGBQZAj34DXd5PMPEcuzB1rcVQ16ZbQN34joEkAfDUJ8dfh4dbJlGnw4hRSNWhrgvY3QOtrZUUa6FOp2+APY/Dtp/PqHQjE5SDxxMTjLHnJT7QLf4igBQST2AGvbTTQgdTrddQ4Yh3kgjjJODnyjU/S0WOV/h60+TKUAB+GLGaaRqCPDc6rqwuhrUfNTzU+GNR/rEk/cMUd6x7iRsllZA/4ly7+5EUD7WOGzidbsSQfAJ+H8NPs6tm3bT78cq7kvmEjlh1OQlIUf5+Hcf7epyy5VPHAB4dBxgCCg+OvxH+77e/WwuGGWWIjjOpzpjE6wNNRp37gd+/29+w7+XWxXrkcaSooO3Gg8wF/YdPDy/h5DraDT2Y1PHhOdjEeX29/9nketquDiM6U48cJq4wJ10P2j/T4a9bQxxpKZduF2j1jvjvokkA/+fx6g3yeZH34Yen7trW5FTQY08zwejyUuS3okUWriG/SsFhxDyVtnVGr7CvUTqDoSpDydP6Ne/WzatzvrJBDE1YlOSns7jy7h9oxC6t2TadzujczeZDM/F4uDHtePIMf3lKt2hsCebj2VVrrbs2Ew3FjJShmXWth+OltsEJUA0hLTK1DxKkBRPfon/E43BiYsuo1KsaVPtzr7K0wtS9IxuguLTybkxqAHUVdQO2OilSeZ0Z9pwVcS5DsKRhptToeZQACFHuAPHX+fQq7soLkkkaTjTaz7jtQpGdSDlgmtc5UrzRblvtxlDUHesbTofj4+I6XbrY5FNYvEMWX031OJ4/LuVKN34RbTmTEK2K/YzLuO00wncG2XkrkyHFahmNFYSorfkPKGiUgH/DqCu03U8wt4lrIT7gOZJ5Af6MPrdQbftFg+53j0toxyzZmpkiD8Tns5cTQCuIa5tyZyFyvmtXVUX/spflsLrqVKtUR6PH1u+pa5DYynW0voEdvVa3AEpcc2tpB16sOx23bOmduedl864JBJPF34Kiry7hxpmcURd9SdUeqnUEduZGsdjiBOkZJFCM2klc5O5HEigByQdpjtnW3a7HsWrX5AxjEK5NXSsrJQ7McJLlhdzU99Z1tKUpZ80oUE/HqBtthJAZb260ncbl9UhHAfsxr+6goO81OGbqXfYdxFvte1hl2Cwj8q3U8W/bmcftynPPMLQca4RG4bTZJS2B4kdhqCTrruHfXt0XAIGFRtfaa+3G+00lXzDVCx+FQ1A18D4fhP2jrHNcjmMYHPLn2/T78LtfZXUFafo7KfHKdDo3JeSkf9pJC+33jrS9tbzj8xFI9gxtjv762P5MsiEdjEfpwQ6/IMyloS25k1460oDVpywkqCU+H4S5oRp1FO37ajVEEQI56Rn9mPl11HvzqUF5cmPs8xj+nD/q4T74SuZIkSVkD5nXXFn7juUdwJ60OY0yiVVHcBhPuprm6c/NSO/tJP38Rh1NQ/RCdg0A/qHYHyI8OozTBsjiPFaMPF+HC1Bt7elbeVU2L9aXX4cp36dSUodfr1urhqcBSoKS0Xl6pOqVhZCgQeoNxa2t2R8zGJKBgK8QGoGp7aDPiKVwwbRvO77KWG2zvbhnjc6TkzRlilcs9OpsvhYEggg4HuUZTksuysLWRavKn2dU7ST5DbcZkyal1pLLsFxtlhtkMrZbA7JB7ePUu026wihS2SMeTHIJFFSaODUMCSTWpwal6k327vZdwuLhjeTwGF2AVdURAUoQoC0IAGQrlxwwr/Oc5uULYn5NZyGlR4ERxl1xssut1c9uzguLAaAcltWDKHVPK1edWkb1q0HUu12na7U6ooIwdTEEVqCylWHcNJIAGQBNAMELrqnqS/j8u7vJWUoi0JFCI3EiE5ZsHAYuauxA1McNh7L86TIMlvJrRmSHJziXmXUMPJXZ37GUz1NuNNoU39VfRkSV7SAVjT8JKTMXbNqKaDBGUoooRUeGMxLx7IyVFeXfQ4hnqLqFJDKLuZZauaggGskwuHoQARqmUSGnPuJGPM7k3lB6RJlrzCcZEhuubXsi1bbbf6S+5JrnYzDcFLEOTFdeVo60lDpSdhUUgJHyLYdjVFjFsugFubVOoAMCS1SCBwJIrnSueN0vWfV8srTm+k1sEB8MdPy2LIVUIAjKSaMoDUyJK5YZFnmnJcysRUSMuuHqxuHcV/wBMtxCkvwr5l6NZxpbhb9ach2NIW236yl/ToUUtbAdOiMW0bNHMbhLeMTFkaoHAoQVIHBaEAnTTUQC1cC7jqbqia1FlLeTNaiORKEjxLKCrhjSrAqxUaidAJCaRhDhZ3yfS5Fb5VW5XZs3161GZuJ624sr9RRCSy3CVLiy4z0Jx6GiOkMuFv1Gu+0jcrWW+ybJdWkdjPbobSIkooqNNa1oQQ1DU1FaHmOGItv1Z1RY7nNu9reSruFwFEjnS2sJTTqVlKErQaTpqvIiprhsuUOVJjMNifm1uGYEWbDitRfpa/ZHnwXKyQhxUCPFU+RBeW20pwrUwFqLRQSSd0PT2wQszx2sZZ2UmtWzVgwpqJp4gCQKaqDVUY9c9a9X3SJFLfTCONGVdOlPC6FDXQFJ8JKqTUpU6KEnDeynlPkTKozkK9yextI7wrQ8h9MVAeNO5JdrluliO0p16M5MdUHFErUVkqJ7dSrLY9msHEllbpGw1UIqaawNVKk0BoMhkKZYg7r1Z1Nu8Rg3O7lmjIjqDTPyixStACSpZjUmpqak41Ty1yWmsySncyya7W5fIsZWQxHY8B8WD9ulCbNwOPRFvQ/rg2kuiOppKikEjUA9eGwbGZobkQKJrcKIyCw0hPhyBodNTTUD9WMl6x6o+UurL5uQ2l4ztMGCnWZPjNSpK66DUEK1oDxxng8ycq1tVR0sHM7WJWY3IiSaRpAhh6CqCiS1EZExcZU1+EwxMcaTHdccY9FXp7NgCRhN07sU88t1LbRmaZSH40bVQk6a6QSQDqABrnWueNtv1z1XaWkFhb30wt7ZlaIDTVNIIUatOoqAzKEYlNJ00pQYSU8qcgRrm4yCPlVmxd38qqmXNhGMeM5OkUshmVVrdSww20lEN6OgpQlKUEDaQUkg5vsm0NbxWjQIbaIMEU1OkOCGpU/iBNSSTzGeeNcPVPUi3k+5rdype3DxtI60BYxkNGTQAeEgUAAFMjUVGNpfM3JLuVNZq7lctOSs1wp0WDMaujtuVSQvStkV7EJqslwvzCS06ytBIBIJSnSP/AJe2VbA7csC/Jl9emrHxftBixYN3gg/biceseppN2XezdP8AxRYxHrCoKx/sFAoRlz+FlI4HkKbjHN/JzTV9HRmVwW8nkvTLr1Hm3nJMuTG+hkSo7rrS3ayQ9AAjqVFUyox0hv8A4YCRFfp3ZmaJ/lotUIASgpQA6gDTJgG8QDV8Xi41OJUXWnVEaXEfzs2m6ctLUglmZdLEEiqEp4CU0nQAnwgDGvacoZhkURVfe386yhrsWLUx5JaUgWEarYpmZKNGkqbUmsjtskJISoIBIKhr1lBs232knm20SpIEK1FfhLFyP9Yk59vZljXd9T7zuUJtr24klhMgkoaU1qgjDDLLwKFyyNBUVzxrRL11KC2HVhCtCpAWrafAfMgEA9bmtwTqpniCt49NIJ0k8MOSJaLcT27Dsddfif8AaOtDW9c8bluhpywvR5aCBuPf/A+H+h61PEQMb0uA+VaHCuzNaBABT/gRp28R4jqO0TYkxyAHC4xLQrzGvh2I017jv8O/Ucx0wQjlGitRhaYeToND3076eP8ApqetDocSY3oajhhQbfW3opKj2PiD/rHhr1GdFPEYIwyEZrhawajnZJilRMXYUdKluujLVHvbiuqZLigyg7W40+TGeJUR4AE9DXuIrUKmmR6jIojMB7SoIxtNtdXt1M8ZiQKxr5kkcZ5/CHZS3urgf5PPuY856KqFJcajq2IfjNOSIrwHZK2X2UKacSe+hBIPRy1jtzGG1Cp5E5++uFy6muCxBRgoNOBp7ez2csMCTYWSj3hzQPtjPpHn4/lgdE0jjHAr9YwNkmkBzVvqOEp6TPUPmYkpB+LToA8NfFPW5UjHNfrxFaSYj4SB7DhLfL6wSvekAH8SSnUefiAOt6hO6uI7PPWlGp7MeqOROrr6jsqhpM23gW9bMq4JYcmGXYxZrL8KN9IwQ9K9eUhKPTQdyydB3PWFzHDLaSwznTA0bBjUCikEE1OQoKmp4ccb9vuL233GC4tE13UcyMi6S2p1YFRpGbVag0jM8BjaRcXCcHFGmmuXYRz2PbIvWy+3Fat007sP9HjlquANvKZUHiXpDy0JZT6LTW59T0c29udz+ZMkYk+VKaMqldYOs1b4QcslAJJ1M1FCklvL3+ACwEMxh+fEglFQok8sr5Yon+Iw8R1OxAUaFWshdVybJcln3Wdyn8cn1jV1KrY+QQZ8Wa/JqLOJIZdiyp0hUWAmPkEtyDIDi1MMpX9RJS20gEBGqzs7OG2tY0mV2jVijKQA6kEEAVaqCq0GokaUJY89247nuVxuF/LLbPEs7oJVdWLRyKwKs50pSZir1JVQdcoVFBoqbVSpaQnYw+fD8LDh0/iE9z1jdxRn4mX3kYN7Lc3CAGNHJ7lY/cDhTs5c9xnRUaSgeRWy6Nex8ygadRbeGAPUOn1j9eDO431+0NDFKMv2G/VgYzgtS17tASrvu7EeY8dD0fhoBlmO7Fd3juzEyChrzwmJjtEje8wAfAlxvUfHxV1uLEcAcQQqk5lfrwqxoMMkEyoo+wyGR8PivrW0j8wa+w42JGhbMrX2jD+oqyrUtJcnV48BoZcYdtR8XOoE8stMlb6jifDFCDUMur2jBkp6mj2pJsa3UAdjMi6/Z/4nfoRLNPyR/qON2ka8yMO5EWtbT8j8Veh/oeaUPv8AlUQeoLvKa5MD7MFLZYxxOPhEdJ+QoI+zQ/4jx61gvTxVrgmvk8eWManBodqCfuSfHt4adfVB/Ec8ekcUooP1Y1EuO7xohQ7/AOU/D4afDqRlTA1qk9hwqMvPeHpuAeR2K0I1Hcdvj1gQMZITwGFJDjx01QsDTzQR28B5daiF99cb1JwqRwSB3A+/Qff/AB60OT30xIQCmeNlTev9SB276qSPv8+sKj3Y2n3Vx49PT+tH2fMD/q16+1HOtcYZ8se0p+X8X8vD7f8Ab1iaVx9Vz2HGdsaD8Wv26eHb/X1iaY2VJOMbmm7uAfidR4dZAA86YxJOMC1DXsB/7iP9o79utgFOBxpzPHhj5uc8gr+CP9Xbr5QHjxx88Ne/GBTju4bUua69iEHx18u3x6yAyOrhT7MfMy1B8VRw41rl768O/AStK1VZMm3+IWWLWEKXJkInYvkrsR+h/VklaZUmlkOF6LU25WrV5txtxClgEhJ1PU2Ly5EFvMsnl0GcbFJQv9dfEV7M+B54IJe3NtcfMTxq1wDSkseuMv2mMggScNRoamlQDmYk3NFjs+dMcjW1dW2Cnn3plbOn1AhB7VSnhCtIEtyrUNwOjf5Z17BI8OmSWSzjt1ktfNIAA0lSWp215+334BJFcz3TR3LxrIxJ1VVVrxpQZL3CgHLDPNHUqXtXa1YB/qVYQ0p0P3vjaP5daDcMBUI1P6p/VibFZKSFMkVe90A+uv34e1Dx3js4tuTs0wqsY8T6+TU6niCR+Fhqa44D9itvUJ7y5NRDDJ7SCB9uf2YKNY7barW7uVLU+GIea3+sPyx73xIXA8D4wrrNhdHlUS6yFCQpH6NkzEeUR9kelsBOcb18lLKfs6g3T3UkR+eIW37CoC/Ww/QMaIr6NJ9OxWri75OSzye5Eoo9h1jEiGi+0+EvofVISB3kqedk7R+EKU+VvKH3k9QUW28n/linlfu0pX3ZVxBv23Frmu7+f84R/vQwancGAoPZQYWG5D3my5/8TVrp/wC49YFRyOIYCE+KmMMp58pH5bgT30/LVpr56du3XxVFTnniQgiWnAjuwgOuOgk7XNfMBs6/w0HW0KPxcfbiUpHvwnOOvlWuxwfYW1aD+Gnj1tULSg4dxxiTxqMZmfUUfmKk9/FQ0+/xGvh1i9AD2Y+pnkae+mFLQhGgO7QePbv92vl1E4t2YmVZR4cxhJlJKz8yko/7xA8dPDXTy6+hV7RjIysfiU/VjUYjR1uONPyI6Y70WazIcWtpKGmXIziXHXVkgIbQDqpR7Adz0tdZxRydM3YdgtFUrUgVYMCAK8ycgOJOQw3enl1cwdb7e8EcjEzFSFUk6WUhiQATQDMngBmcU0c3UtEy9MSzaVLxQ9LQlbU+GvelLitikem7oUnTsfPrGyCtZ1agJQE17ac8O/WDP/GFah11Pbw/ViuDNYUQyHNsmKCSoHbJa8AdCey9AP8AX0g9SW8RDanT6x+vFodB3E6MmlHPD8JwArmtgqWoqmwQO+usthPbU+IKwQnx+wdVPPbQGQjWnH9ofrx1JtF5ciJaRyVpyVv1YQXK6ElsFM2CoFI0CZTBHl8HDp3+wdbo7ZBwdP8AWH68F5LuQipjkrTmrD9GNWJXRVPALmwkpJHdcqMEgA+J1WOxP3DovZQpqFGXTXtGFzdLqVYjrjY5GtVP6sF3HaDGHFNiVb0aNT4v2Vcnuf8A4Y+FaD+XTlZDSoKmuXLFNb6bO4JFwqJXtIX76YkJQ4hhP0YLOSY6legPyXdYsbvEDaJKiT/LopcXV2sQ0xufYrV+wYW9v6f2O4uNbXsEQrkHljp9rYbuS4hjao7pRlFICEnQG1gDxGp0H1O4/wC7pK3a8vyDoikp/VP6sdCdCdNdKjQ1xf7eWBGXnRV/vYi9c4xXosvlyCmUjQ6BFlFPgToAfWIGp6reeW/YsXVga8wcdKWe1dJxyRqlxatHQVpIlPvpjEMaiEHZeUYUB8m61g666d9NXxqf9vWNs95qGT09+JW67P0gYGaK5sw9OHmx/wDfY2l484wyn0rmnddKNVbbOC4BqfAFLpPb+XThY3M4FJEcjvU/pxSPUu1bVHHW3uLZjnwkT9BwgPY6/IUUPXVCwn+pTlnBQPh3U5IQNfvHUi4uZCprGQveDiv1tYxL4ZE48iP14V6vj+jW4lcrMsaQdwJC8iqEnafx/ltyirQHpfmlWporV7NJ+84NW9vGANboR3sv68GCjxfCIKNy8iq5ykaaIh2ENYUnbp+P11BQ8/DqFJLM1RGgX+tmfswWiSxSmtgc+Wf2nL6hgoQZMKO2lumisr0T8rkcfUvnt47kbz3A/p6gywll1XD/AG0H1YIxzAErbIa9tKn6+XuAx8dkTVrHrMyUkNkfmNuIWdQ4QfnAOup60hY89JFPd3Y1yNKWq4Ne8HGVh14hnVt0fOfFCu51TrroPgOvrKudOztxiSxpX6ZYS8qlymMbsjGjy3JUhlUJv6aO+8uOJaPRdmuhlBWhmDFUt0nz2aD5iAY1yxSNig1NTID6fWeXPEu1TzJlDkKvafZ9vs58O7FMeZzLa3yi5so8S5qo8xxCqmsKbCOqDRNwmY+PtJacaRoDTsx1pVqSrdu1Oup3RRWqQqjPGzrkeGZBzave1T9mMXe8kZ2RJVj5Zn4aCmQz+GmLRONLy7ucHxuXklZeVt+ugro1pEuq2dBkok08GFWMv7Zcdlz6azq2Y0qOojR5Dqikq2k9R7FY0aSKAoYBKzKVzB1Ekioy1K+pSK1FMSL4zSOks+oXBRQ4aoYFaDgc6EAMO0HLufcUqWNCFJO7+rUDXQaDU6fNp/HqYVqTUjERSwAwvsF9KwSnUfTncE/N3Dfy9tSRqUj/AA+PUaRUpx/Fl9eJkLSVAUNWnYcbMlqmktMIu1Q2kqDg3SnmGFpQVK1OrimzptUdfDTTrVGrpqMBqa8sxXL7eH2Y2yOj6VuF9/A4DuR4Zx1LZVtyjF42jiwBIu6plJCkJK9FKkpBKdg7n4jokl3cqaSRORTkD+rA6e0snXOVOOeogH78AW740wcrcXHznD0r8UpZymkPiP8AKJ+p0PgOpiXDMc0ce1T+rECWytBksyf6ykfYf0YH83j6lTr6Wc4yrzSDd1WnmCNyJRHj1IDkjMHEF7Oh8EsRH9YfpOEFzCG0FQayzGHBqe/65Xa/b4yNO32dZ+HvpiM1u44slP6w/XjPHws7gBlWNAdtCm6rSO23Tv8AU66kH/Dr35fLGxQwFCF+sfrw44mCRFFPr5jjae40/wDZ3WePbXX/AJnt1sU5ZV+3HpFOnML9Y/Xh+0nH+NOONh/N8cTrtHe8q0gEHv8A/HSUhJ+3rLVT4tRy7DjQY5yPBoA/rDB5xnA+OGAj1ctxiQ78um7Iqnx089Jate46+Fo+w1x7y5a1dhX24kFjFNiTCm0Q7GhcUANimLGA8ddR3SpLyyetyOx+EEe7Gtk0/GanEgMdrq7cgNzIWg8SJUfUjQf+9VfKD1sGricYExiteODbTQ4+iAJEUjbodJDQBSPA6792hJ+7rcNfOtcRpChPYMFehiMDTWRGPYD/AI7J0GviNVjrYNXHEY6Q2XHBnoIkTVH58XXt/wCO1rr5AfP/AC60uTxGNyBcTO4ejRCqPq7GCte2r7WpHY+G7XsPPotZmoz4UwI3WoGVcPPmlwps8fYaQTHaqnlpdSnc0487JWXUNuaFta20JTuAJKde/Vp9FRx/KzvUeYZBlXOgGWXHPOmOcPV+Wdr2xhZWEC27ENQ6SzOagHgSABUDMVFcMClffSNGm3FAkalDSiAe/wAB0/oP2cURcnOhH0+nDDuBWtP5g2qAB+cbSfHt30PW1a054Ezae7Ca+VJUdO/ceCdf9Xl1KUVpXLAuQ0r241Q64D3bWQfLYdR8dDpp4db9CEUBA9+I+o9mPDqyf6T5ajaNf8R49fVVQMYsRy44T3VK1Pb7u3+vt1tGIzlvdhOWpYWDpr49gnX/AFd+twApjSaY2WHFJUCErJ1/pQST8OwHWDCuXLGyNmVwVBJ7s8KEiS/6Y/JkeX/guak/D8Gmn+HWmNF1VqPrGJN9NIyAOrD2g49xZ0xIIRFlHsdSmM6e3bXXRG3bp8e3Wx0Sh1ldPeR+nAmrPIojBE/4dNQ1f3SM/qw2LkYxPS6JCWIkzRQU9DdaiyEnTxcaH5Sz8dU6/b1rWJ1Gq3che7xL9X6iMTjdTo3lbrGk3/WDRIP7YoT/AGw+IyZvjUZfqGLl0NCir8E5lvaGtfmBWxIKyv4HQDr64mb4mAPswd2q626Jqw20jDLISA/co92AzFx3IEzmNc2qIkL6lX0qKyqi2M1T58PpZFwHWWZO34NrPmB4dT4JbgRGK3RRQeJgCzEdppQAdnIYB9QbX01ebim47/JdeZqJhikuPIhXtVFVgXy+I1LNnU0wdcOiM1T9hFhvTptytmM7eS7GW7Y3zkYk/SqsFrHqRoO7X0mkobYH9Kevn4lLmr1OmuWeVdIyFeFSM+04yEkbWGjbVRdpUDKLOOlfDqcE6hX4atStaCuCWguKGqtwUfHcCDr3/pIHj1JUGmBRJDZcf042B31GgHx1IHf7Ses6HtxgWoOGFCM0FaAuNpT4klaB/AanTx6+5DM5nEZ2bVwp24dENmMgJCnG1p8tFpJ0+wgkaa/w/wBmpmJ4DPEZ2ocxl9OGHnU7fUTt27dU6bdNfHw0A16izV051xpcZDPjgn1qwdvykeH9J2k6+eg0B6HS1wPZYznUVw5myQCQP+8NNR/HqIwzoceiJUErmOfMY0p6yGlbUqI0OoCT4/YdP/L1siGeeMXYFqqKZ4F9w6vU6tq2knxQoD7TroNO3RCJQTkc8T7dmyqMieeGHMXoSQCR37bO4Ovn2/1dS0FcudcEBppXKmEJ557UBTJKPLUaH7Ckka/7OpKInI54+HhnwxqlKFalRQNT/UUJI+wjX/Edut6A4welCeePfoRNurjrP2pS42Vf4HTrcKjhiMR9X24RJqYytwbLKe3iVI/h36kxtIBmCfdiG8cZGbL9YwzZ0OISorlRu/kXmh/rV1LWSSnwt9RxoaJOTLT2jDbfhRCT/wA1FSP/AIc1/j83h1uEkn7LfViI8cY4FSfaMJLkSIjXSTHV317PNn/6Y9Zq7VyB1ezGoxp+0tPaMJUphpKSfWSdO4CVJOv2a7utwkk5qfqOPixx1yZfsw33wde34e/hp/r7dusSzmmRxvCoM6jGuB9v8ynX/WOteMqc/wBWPQ0H+Yk+Omnb4+fXsycuOMh78bDIcUryCf8AtKA00/l49ZUX8RGPmo1yBwuxUbdNFJPxAUD27fb4dYNp5YyVm5jDijuvpRohtSk9tdEE+GnmAR1pYCueNmpqUIwoNSJo10ZfOv8A7yc7fyHWt1j5kY2o8g4A/Ucb7MqfqNI8j7PyV/w01SetBSLtFfbiXFJLX4W+rDnhSZug1ZeB7f8AhL8O2uvy9u/j1CkSOvEfXgrC7UqQfqw4o0meCPyZB/8AfTn+Hy9RpEjpxGJsTyg5A09mHLGdklA3ocGumu5Ch/PVPj0Ol01ywatakeLH/9k=) **Módulo 3 - Ciencia de Datos y Machine Learning.** **3.2 Aprendizaje Supervisado.****Autor**: *Por* Prof. Alberto Fernández HilarioProfesor Titular de Universidad de Granada. Instituto Andaluz Interuniversitario en Data Science and Computational Intelligence (DasCI) Breves Instrucciones Recordatorio: Introducción a NoteBookEl cuaderno de *Jupyter* (Python) es un enfoque que combina bloques de texto (como éste) junto con bloques o celdas de código. La gran ventaja de este tipo de celdas, es su interactividad, ya que pueden ser ejecutadas para comprobar los resultados directamente sobre las mismas. **Muy importante**: el orden de las instrucciones (bloques de código) es fundamental, por lo que cada celda de este cuaderno debe ser ejecutada secuencialmente. En caso de omitir alguna, puede que el programa lance un error (se mostrará un bloque salida con un mensaje en inglés de color rojo), así que se deberá comenzar desde el principio en caso de duda. Para hacer este paso más sencillo, se puede acceder al menú “Entorno de Ejecución” y pulsar sobre “Ejecutar anteriores”. ¡Ánimo!Haga clic en el botón "play" en la parte izquierda de cada celda de código. Las líneas que comienzan con un hashtag () son comentarios y no afectan a la ejecución del programa.También puede pinchar sobre cada celda y hacer "*ctrl+enter*" (*cmd+enter* en Mac).Cuando se ejecute el primero de los bloques, aparecerá el siguiente mensaje: "*Advertencia: Este cuaderno no lo ha creado Google.**El creador de este cuaderno es \@go.ugr.es. Puede que solicite acceso a tus datos almacenados en Google o que lea datos y credenciales de otras sesiones. Revisa el código fuente antes de ejecutar este cuaderno. Si tienes alguna pregunta, ponte en contacto con el creador de este cuaderno enviando un correo electrónico a \@go.ugr.es.”*No se preocupe, deberá confiar en el contenido del cuaderno (Notebook) y pulsar en "*Ejecutar de todos modos*". Todo el código se ejecuta en un servidor de cálculo externo y no afectará en absoluto a su equipo informático. No se pedirá ningún tipo de información o credencial, y por tanto podrá seguir con el curso de forma segura. Cada vez que ejecute un bloque, verá la salida justo debajo del mismo. La información suele ser siempre la relativa a la última instrucción, junto con todos los `print()` (orden para imprimir) que haya en el código. **ÍNDICE** En este *notebook*: 1. Se retomará el concepto de Aprendizaje Supervisado y sus fundamentos.2. Se realizará una discusión sobre cuáles son las características principales que influyen en dicho aprendizaje.3. Se explicará la importancia de la validación de los modelos de aprendizaje.4. Se presentarán distintas alternativas de validación. Contenidos:1. [¿Qué es el aprendizaje supervisado?](sec:aprendizaje) 2. [Características de los datos que influyen en el aprendizaje](sec:caracteristicas) 3. [Caso de estudio complementario: Aprendizaje de cáncer de mama mediante imágenes](sec:dataset) 4. [Necesidad de validar los modelos de Machine Learning](sec:val) 5. [Bibliografía](sec:biblio) **1. ¿QUÉ ES EL APRENDIZAJE SUPERVISADO?**En módulos anteriores del curso, se han destacado algunos ejemplos de interés sobre el uso de Machine Learning en el campo de la bioinformática. En concreto, dentro de la Cápsula 2 del Módulo 1 (*La Bioinformática. Aplicaciones en Bio-Ciencias y Bio-Salud*), se enumeraron el desarrollo y descubrimiento de fármacos, la microbiología, minería de textos biomédicos, medicina de precisión o personalizada, entre otros. Siendo más específicos, un uso directo del Machine Learning en estos campos sería como herramienta de diagnóstico, para determinar la categoría a la que pertenece un paciente (sano o enfermo). Otra opción válida sería ajustar el nivel de dosis concreto de un medicamento, en este caso calculando un valor numérico. En los dos ejemplos anteriores, el objetivo es realizar una predicción del valor de una variable de salida, como el tipo de paciente, o la dosis del medicamento. Cuando el ser humano realiza esta tarea, la realiza en base a su conocimiento basado en la experiencia con otros casos similares (pacientes y medicamentos). En el caso de Machine Learning, esta experiencia se recopila a partir de instancias en un conjunto de datos, como ya se indicó en la primera cápsula de este Módulo. Tanto el diagnóstico de pacientes, como la estimación de la dosis de un medicamento, se encuentran dentro de lo que se denomina como “*Aprendizaje supervisado*”. De acuerdo al tipo de variable de salida del problema, las dos tareas fundamentales dentro del aprendizaje supervisado son la **clasificación** y la **regresión**. Se denomina clasificación cuando el objetivo es determinar la categoría de una instancia dentro de un grupo de valores fijos (el diagnóstico de pacientes). Por su parte, la regresión busca, a grandes rasgos, crear una función matemática de interpolación para una variable de tipo real (la estimación de dosis del medicamento).En ambos casos, se utilizarán las variables de entrada que definen el problema o caso de estudio. En concreto, los algoritmos de Machine Learning suelen buscar correlaciones altas entre las variables de entrada y las de salida para así construir un modelo de buena calidad. **2. CARACTERÍSTICAS DE LOS DATOS QUE INFLUYEN EN EL APRENDIZAJE**Dentro de las tareas de aprendizaje supervisado, es decir, clasificación y regresión, el algoritmo de aprendizaje busca ajustarse lo mejor posible a los datos durante el entrenamiento. Para ello, se utilizan las variables de entrada que describen a cada instancia del problema. En este sentido, un tema importante a discutir es si resulta posible determinar la cantidad de datos que es óptima para realizar un correcto aprendizaje, así como la relación entre el número de instancias y el número de variables que las representan. Otra cuestión muy relevante es comprender los detalles asociados a las variables de entrada en sí mismas. Por último, se debe poner énfasis en la necesidad de datos de calidad y el sesgo en los mismos. **2.1 Discusión respecto al número de instancias**Con respecto al volumen de información necesario para la tarea de aprendizaje supervisado, no existe una respuesta general que sea válida para todos los casos de estudio o problemas. Desde el punto de vista estadístico, la muestra (el conjunto de datos) será más representativa cuanto mayor sea su número. De este modo, a mayor volumen de información disponible, más casos diferentes se es capaz de cubrir, y mejor se adaptará el modelo al caso de estudio real.Sí que es posible extraer conocimiento a partir de unos pocos datos, 50 ó 100 por dar un número aproximado. Sin embargo, existe una alta probabilidad que el modelo generado sea demasiado específico, y por tanto no sea útil para su posterior aplicación. El número correcto dependerá de la dificultad del problema a resolver, si bien una regla genérica es que el número de instancias necesario para un correcto aprendizaje debe ser al menos 10 veces el número de parámetros utilizados para configurar el algoritmo. Finalmente, esta la cuestión sobre si existe una ratio específica entre el número de instancias y variables que sea apropiada para realizar un aprendizaje correcto. Desafortunadamente, tampoco hay una respuesta exacta, si bien se puede reutilizar la regla genérica anterior para indicar que, en este caso, el número de instancias debe ser al menos 10 veces el número de variables de entrada utilizadas. **2.2 Discusión sobre las variables de entrada**Tal como se indicó anteriormente, los algoritmos de aprendizaje automático suelen buscar correlaciones altas entre las variables de entrada y las de salida. La mayor parte de algoritmos de Machine Learning tienen cierta preferencia sobre las variables de entrada de tipo numérico (valores reales como el nivel de expresión genética, o el ph de un producto), si bien se pueden utilizar también variables de tipo nominal (categorías como color o género). Lo único importante en este caso es tener cuidado con incluir variables que puedan desvirtuar el conocimiento que se desea extraer. Por ejemplo, podría existir una relación fuerte entre la edad de una persona y el tiempo de supervivencia a una enfermedad concreta, pero nuestro objetivo es que esa asociación la encontremos directamente en los valores genéticos. Este tipo de variables se conocen como "**confounding**" y causan asociaciones de tipo espúreas que deben ser identificadas y evitadas a toda costa, especialmente cuando se trabaja en el área de la bioinformática. Otro tema relevante con respecto a las variables se define como "la maldición de dimensionalidad". El término dimensionalidad se refiere la cardinalidad (número de elementos) del conjunto de variables. En los problemas de tipo bioinformático, el número de variables utilizadas para el estudio suele ser de miles o incluso decenas de miles. Este hecho dificulta muchísimo la tarea de aprendizaje, ya que se disminuye la capacidad del algoritmo de encontrar una correlación óptima entre las entradas y la salida. Por este motivo, resulta imprescindible dedicar un esfuerzo considerable en la fase de preparación de los datos, seleccionando exclusivamente aquellas variables que sean más importantes para el estudio. En efecto, se suele comprobar que un modelo construido sobre un subconjunto de variables predictivas de calidad siempre es mejor que aquél generado utilizando todas las variables originales. **2.3 Discusión respecto al sesgo en los datos**Debe recordarse que en todo caso es muy importante establecer la calidad de los datos, **evitando sesgos** en los mismos. Justamente, un problema relativo al sesgo de datos se refiere a las "*counfounding variables*", ya que pueden vincular la salida del modelo a variables de entrada que no aportan información realista, o incluso con ciertos problemas éticos, como puedan ser el género, raza, o actividad laboral en un caso de estudio de detección de cáncer. Otro caso muy claro es el *desequilibrio* en la distribución de la variable de salida, donde se encuentran casos (clases) o rangos de valores que aparecen con una probabilidad a priori más alta que otros. Puesto que el aprendizaje busca crear un modelo que represente de forma mayoritaria al conjunto de datos, éste tendrá una preferencia por las instancias más comunes, posiblemente ignorando los casos excepcionales. El ejemplo clásico es justamente el de diagnóstico médico, donde si tomamos como referencia el alto porcentaje de casos "sanos", el modelo podría extraer la conclusión errónea que todos los pacientes están sanos (al 99% de acierto!). Existen métodos específicos de preprocesamiento y/o aprendizaje que están diseñados para reducir el efecto negativo del desequilibro de datos, conocido en la literatura como "*Learning from imbalanced data*". Sin embargo, al ser un tema de trabajo muy especializado, no será objeto de estudio del presente curso. **3. CASO DE ESTUDIO COMPLEMENTARIO: APRENDIZAJE DE CÁNCER DE MAMA MEDIANTE IMÁGENES**A lo largo de este curso, se trabajará sobre un problema de aprendizaje relativo al **melanoma cutáneo**. Sin embargo, en este apartado en concreto, por cuestión de simplicidad, vamos a realizar algunas pruebas iniciales utilizando como base un conjunto de datos relativamente sencillo, conocido como *breast cancer.* Las variables de entrada de este conjunto se calcularon a partir de una imagen digitalizada de un aspirado de aguja fina (FNA) de una masa mamaria. Describen las características de los núcleos celulares presentes en la imagen en un espacio tridimensional.Es un conjunto de datos pequeño y muy conocido en la literatura, así como disponible en muchos de las herramientas de Machine Learning, como *Scikit-Learn*. Para utilizar este conjunto de datos, se procede a mostrar el código que permite guardar los datos en las variables de Python de una manera sencilla. Observe bien la estructura del bloque de código, que será descrita con detalle justo a continuación.
###Code
import pandas as pd
#Scikit-learn contiene su propio repositorio de datos
from sklearn.datasets import load_breast_cancer
data = load_breast_cancer()
#guardamos la entrada (características/variables) y salida (clase) en dos variables X e y
X, y = data.data, data.target
#Transformamos las variables a de tipo numpy a tipo DataFrame (por comodidad de uso)
X = pd.DataFrame(X,columns = data.feature_names)
y = pd.DataFrame(y,columns=["label"])
#Observamos las cinco primeras muestras
X.head()
###Output
_____no_output_____
###Markdown
A partir del código anterior, se puede extraer la siguiente información:- `X` e `y` son dos estructuras de datos tipo `dataframe` (marco de datos) que son almacenadas en tipo `Pandas` (*biblioteca muy usada en Python*). Un `dataframe` se entiende como un vector n-dimensional, resultando en general mucho más cómodo trabajar con este tipo de estructura de datos que con `numpy`. - `X` contiene la matriz de datos de entrada e `y` es un vector unidimensional con la etiqueta de cada muestra en `X`. Por simplicidad, en este caso se ha optado por resolver un problema de clasificación (variable de salida de tipo categórica). - En el aprendizaje de tipo supervisado, `X` e `y` se usarán para construir el modelo de Machine Learning y probarlo. **4. NECESIDAD DE VALIDAR LOS MODELOS DE MACHINE LEARNING**Tal como se indicó anteriormente, el procedimiento de entrenamiento en aprendizaje supervisado busca crear un modelo que se ajuste perfectamente a los datos que se dan como entrada. Para controlar si se realiza bien dicho ajuste, el algoritmo de construcción del modelo utiliza lo que se conoce como "*métrica de rendimiento*". Ésta determinará el error que se comete al realizar una predicción sobre las instancias del conjunto de entrenamiento con el modelo que se está generando.Una vez concluido el aprendizaje, resulta de vital importancia realizar un proceso de validación del modelo. Esto significa determinar la calidad que mostrará el modelo cuando se ponga en funcionamiento sobre nuevos datos reales. Por este motivo, se utiliza el llamado “conjunto de test”, que deberá contener instancias nuevas que no se hayan utilizado durante la fase de entrenamiento. De acuerdo a lo anterior, en aprendizaje supervisado existen de dos fases muy bien diferenciadas que son el **entrenamiento** (*train*) y la **validación** (*test*). Es necesario insistir que se deben utilizar dos conjuntos de datos totalmente independientes puesto que, de utilizar algún ejemplo de test durante el entrenamiento, se estará sobre-estimando la calidad del modelo generado.En la siguiente imagen se puede observar cómo se divide el conjunto de datos de entrada en dos grupos distintos, donde cada ejemplo está marcado con un color diferente si está dentro de "entrenamiento" (azul) o de "test" (naranja). **4.1 Casos de estudio en validación**A partir de los resultados de predicción obtenidos en entrenamiento y test, se calcula una métrica de rendimiento en cada conjunto. En este momento, se pueden dar varios escenarios posibles:- Se obtiene una *alta calidad* de las métricas de rendimiento tanto en entrenamiento como test: el modelo es totalmente efectivo y se ha concluido el proceso. - Se obtiene una *alta calidad* para entrenamiento, pero *muy baja* en test: se ha caido en el sobre-aprendizaje (*over-fitting*), es decir, se ha construido un modelo tan específico para los datos de entrenamiento, que luego es incapaz de generalizar bien con datos diferentes (test). - Se obtiene una *baja calidad* tanto en entrenamiento como en test (*under-fitting*): el modelo no es bueno y se debe actuar mediante preprocesamiento, un ajuste adecuado de parámetros, o sencillamente alimentando al sistema con un mayor número de datos. **4.2 Tipos de técnicas de validación**Para dividir nuestro conjunto de datos original en los conjuntos de entrenamiento y test, existen tres metodologías diferentes: - La más sencilla es la de retención o "**hold-out**", donde los datos originales se dividen en dos conjuntos disjuntos complementarios. Por ejemplo, un 60% de instancias iniciales para entrenamiento y el 40% restante para test.- La más utilizada por su validez estadística es la validación cruzada de *k* particiones o "**k-fold cross validation**", donde se divide el conjunto original en *k* partes disjuntas. Por ejemplo, se separan las instancias en 5 grupos sin reemplazamiento. - Existe la posibilidad de hacer una experimentación exhaustiva con "dejar uno fuera" o "**leave one out validation (LOOV)**". En este caso se utiliza todo el conjunto para entrenar, salvo un ejemplo para test. En lo sucesivo, se explicarán con más detalle las principales de estas técnicas, poniendo especial atención a la más recomendada de todas: *la validación cruzada de k particiones*. La razón fundamental es que se emplean todas las instancias del conjunto de datos en las diferentes particiones de test, promoviendo un mayor rigor estadístico, y por tanto validez de los resultados obtenidos. **4.3 Validación Hold-out**Tal como se introdujo anteriormente, “**hold-out**” crea dos conjuntos simples, es decir, un fichero de entrenamiento y otro de test, mediante una partición en un porcentaje determinado de los datos. El porcentaje seleccionado para cada subconjunto queda a disposición del usuario, si bien valores típicos suelen estar por encima del 60, 75 u 80% para entrenamiento, y el resto para test. La ventaja del método “**hold-out**” es que es muy sencillo y eficiente de realizar. Sin embargo, este método no es recomendable puesto que la calidad final del modelo dependerá en gran medida de cómo se hayan dividido los datos. En otras palabras, al separar las instancias de manera totalmente aleatoria podría darse el caso que todas las instancias seleccionadas para test sean las más difíciles de identificar. Se utilizará por tanto **hold-out** cuando se realicen pruebas iniciales para conocer el comportamiento base de los diferentes modelos (algoritmos de Machine Learning) aplicados al problema bajo estudio. A continuación, se va a conocer el código para crear las particiones de datos, donde todas las instrucciones están debidamente comentadas para entender su función concreta. El parámetro principal utilizado en este caso será el `ratio` para definir la división entre el conjunto de *test* y *entrenamiento* respectivamente. Se recuerdan inicialmente algunos detalles sobre el código fuente mostrado:- Como apoyo en todo el curso se utilizará la biblioteca [Scikit-learn](https://scikit-learn.org/) por lo que se importará toda la funcionalidad y módulos del mismo en las primeras líneas del código- El particionamiento de datos se realiza por defecto de manera aleatoria. Por este motivo, se debe utilizar lo que se conoce como "semilla". Éste es un valor fijo que permite generar siempre los mismos resultados cada vez que se ejecute el código con dicha semilla indicada en el parámetro ```random_state```.- Siempre se utilizará como última instrucción un ```print()``` para mostrar un resultado que permita interpretar todo el proceso.
###Code
#se carga el codigo necesario para hacer "hold-out"
from sklearn.model_selection import train_test_split
#Parámetros usados:
rd = 42 #al ser un proceso no determinista, se fija una semilla
ratio = 0.2 #El parámetro más importante: cuál es el ratio usado para entrenamiento y test
#A continuación se hace la división. Importante: se divide la entrada y salida (X e y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=ratio,random_state=rd)
#Se imprimen los índices de train y test con respecto al conjunto original
print("%s %s" % (X_train.index, X_test.index))
###Output
Int64Index([ 68, 181, 63, 248, 60, 15, 290, 137, 155, 517,
...
330, 214, 466, 121, 20, 71, 106, 270, 435, 102],
dtype='int64', length=455) Int64Index([204, 70, 131, 431, 540, 567, 369, 29, 81, 477,
...
549, 530, 163, 503, 148, 486, 75, 249, 238, 265],
dtype='int64', length=114)
###Markdown
Los siguientes fragmentos de código muestran el tamaño de los conjuntos que se acaban de crear
###Code
#Primero train (X e y)
X_train.shape, y_train.shape
#Luego test (x e y)
X_test.shape, y_test.shape
###Output
_____no_output_____
###Markdown
En los resultaod anteriores se puede observer que, al realizar una división en 80%-20% (se aplicó un ```ratio = 0.2``` para test) se tienen 455 instancias en entrenamiento ($569 \cdot 0.8$) y 114 para test ($569 \cdot 0.2$). Es extremadamente importante que ambos conjuntos sean disjuntos, es decir, que ningún ejemplo de test se haya visto durante el entrenamiento, para poder realizar correctamente la validación. Este hecho se comprueba en el siguiente bloque de código, donde se "pregunta" si algún dato (instancia) del conjunto de entrenamiento (`X_train`) está contenido en el de test (`X_test`). El resultado deberá ser siempre ```False``` para todas las filas.
###Code
X_train.isin(X_test) #Instrucción que comprueba si algún elemento de test se repite en train
#La primera columna que se observa es el índice original, que está desordenado
###Output
_____no_output_____
###Markdown
**4.4 Validación cruzada de k particiones**Esta aproximación es la más utilizada y recomendada cuando se realiza aprendizaje supervisado. El motivo principal es su rigor estadístico, así como el uso de la totalidad del conjunto de datos para entrenamiento y test, utilizando para ello un procedimiento iterativo, es decir, que se repitirá un número determinado de veces. En concreto, cuando se utiliza una validación cruzada de k particiones se dividen los datos en k subconjuntos disjuntos, llamadas particiones. El objetivo, como se ha indicado anteriormente, es el de proceder a validar los modelos con diferentes combinaciones de dichas particiones. En concreto, las instancias pertenecientes a cada una de las *k* particiones se almacenerán en un conjunto diferente de test; mientras que la unión de las instancias que se encuentren en las *k-1* particiones restantes, se utilizarán para construir cada uno de los conjuntos de entrenamiento. Como ejemplo, si se realiza una validación cruzada con 5 particiones (k=5), los datos se repartirían como se muestra en el esquema de la siguiente figura. Como se puede observar, para cada partición tomamos 1/5 de prueba, y los 4/5 restantes de entrenamiento.![Ejemplo de validación cruzada de 5 particiones](https://i.imgur.com/NAgX78H.png) A continuación, se indica cómo se puede realizar esta división de datos mediante Python, de nuevo aprovechando la funcionalidad de la biblioteca **Scikit-Learn**. La principal diferencia del siguiente código con respecto al utilizado para *hold-out* reside en lo siguiente: en este caso, en lugar de generar directamente dos nuevas variables `train` y `test`, se calculan a priori los índices (posiciones) de las instancias que pertenecen a cada partición de test. Posteriormente, se utilizará una estructura repetitiva (bucle `for`) para generar las particiones realizando una búsqueda por índice (utilizando una función denominada `iloc`) sobre el conjunto original de entrada (`X`) y de salida (```y```). La salida del ejemplo de código fuente mostrará, en cada línea, dos listas de índices (entre corchetes [ ]) representando las instancias seleccionadas para entrenamiento y test, respectivamente.
###Code
from sklearn.model_selection import KFold #cargamos las funciones necesarias
#Parámetros
rd = 42
particiones = 5
#Primera diferencia con hold-out, se crean a priori los índices de las particiones
kf = KFold(n_splits=particiones,shuffle=True,random_state=rd)
#A continuación se realiza cada subdivisión.
#Notar que es necesario hacerlo iterativo (bucle for) para cada partición
for train, test in kf.split(X,y):
print("%s %s" % (train, test))
#Se crean los conjuntos iterativamente (iloc es una función de "búsqueda")
X_train, X_test, y_train, y_test = X.iloc[train], X.iloc[test], y.iloc[train], y.iloc[test]
###Output
[ 0 1 3 4 5 7 8 12 13 14 15 16 17 18 19 20 21 22
23 24 25 26 27 28 31 32 33 34 35 36 37 38 40 41 42 43
44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62
63 64 65 66 67 68 69 71 74 80 85 87 88 89 91 92 93 94
95 96 97 98 99 100 102 103 105 106 107 108 111 112 113 114 115 116
117 119 120 121 122 123 124 125 126 127 128 129 130 133 134 135 136 137
138 139 141 142 143 146 147 149 150 151 152 154 155 156 157 159 160 161
162 164 166 168 169 170 171 172 173 174 175 176 178 179 180 181 183 184
185 186 187 189 190 191 192 193 194 195 197 198 199 200 201 202 205 206
207 209 210 212 213 214 215 216 217 218 219 220 221 223 224 225 226 227
229 230 231 232 233 234 236 237 239 240 241 242 243 244 245 246 247 248
251 252 253 254 256 258 259 260 261 262 263 266 267 268 269 270 272 273
276 277 278 279 280 282 283 285 286 287 288 289 290 291 292 293 294 295
296 297 298 299 300 301 302 303 304 306 307 308 309 310 311 312 313 314
315 316 317 318 319 321 323 324 325 326 327 328 330 335 336 337 338 339
340 341 342 343 344 345 346 347 348 349 350 351 352 354 355 356 357 358
359 360 361 363 364 365 366 367 368 370 371 372 373 374 375 376 377 378
379 381 383 385 386 387 388 389 390 391 392 396 397 398 399 400 401 402
403 404 405 406 407 409 410 411 412 413 414 415 416 417 418 419 420 421
423 426 427 428 429 430 432 433 434 435 436 437 438 439 440 442 443 444
445 446 447 448 449 450 451 452 453 454 455 456 458 459 460 461 463 465
466 467 469 470 471 472 473 474 475 476 478 479 480 481 483 484 485 487
488 489 490 491 492 493 494 495 496 497 498 499 501 502 504 505 506 507
508 509 510 512 513 514 515 516 517 518 519 521 522 523 524 525 529 533
534 536 537 539 541 542 543 544 545 546 547 548 550 552 553 554 558 559
560 562 563 566 568] [ 2 6 9 10 11 29 30 39 55 70 72 73 75 76 77 78 79 81
82 83 84 86 90 101 104 109 110 118 131 132 140 144 145 148 153 158
163 165 167 177 182 188 196 203 204 208 211 222 228 235 238 249 250 255
257 264 265 271 274 275 281 284 305 320 322 329 331 332 333 334 353 362
369 380 382 384 393 394 395 408 422 424 425 431 441 457 462 464 468 477
482 486 500 503 511 520 526 527 528 530 531 532 535 538 540 549 551 555
556 557 561 564 565 567]
[ 1 2 3 4 5 6 8 9 10 11 12 13 14 16 20 21 23 26
27 28 29 30 32 34 35 36 37 38 39 40 41 43 44 45 47 48
50 51 52 53 55 58 59 61 62 64 65 67 70 71 72 73 74 75
76 77 78 79 80 81 82 83 84 85 86 87 90 91 92 94 95 96
97 98 99 100 101 102 103 104 105 106 107 109 110 111 112 115 116 118
119 120 121 122 123 125 127 128 129 130 131 132 133 134 135 136 138 139
140 142 143 144 145 146 147 148 150 151 152 153 156 157 158 159 160 161
162 163 164 165 166 167 168 169 170 171 175 177 178 179 180 182 183 186
187 188 189 190 191 193 194 196 197 198 200 201 202 203 204 205 206 207
208 211 212 213 214 215 216 217 219 220 221 222 223 224 225 226 228 229
230 232 233 235 236 237 238 239 240 241 242 243 246 249 250 251 252 253
254 255 256 257 258 259 260 262 263 264 265 266 267 269 270 271 273 274
275 276 278 279 281 282 283 284 285 286 288 291 292 293 294 295 296 297
299 300 302 303 305 306 307 308 309 312 313 314 315 316 317 318 320 321
322 323 324 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
342 343 344 345 347 348 349 350 351 352 353 354 356 357 358 359 360 361
362 363 365 366 367 368 369 370 371 372 373 375 376 377 378 379 380 382
383 384 385 386 387 388 389 391 392 393 394 395 397 400 401 403 405 406
408 409 412 413 415 416 417 418 419 420 422 423 424 425 427 429 430 431
432 433 435 436 437 438 439 440 441 443 444 445 447 448 450 451 452 454
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 471 472 473
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
492 493 496 499 500 502 503 504 505 506 508 509 510 511 513 514 515 516
518 519 520 521 522 524 525 526 527 528 529 530 531 532 533 534 535 536
537 538 540 543 544 546 548 549 551 552 554 555 556 557 558 559 560 561
563 564 565 566 567] [ 0 7 15 17 18 19 22 24 25 31 33 42 46 49 54 56 57 60
63 66 68 69 88 89 93 108 113 114 117 124 126 137 141 149 154 155
172 173 174 176 181 184 185 192 195 199 209 210 218 227 231 234 244 245
247 248 261 268 272 277 280 287 289 290 298 301 304 310 311 319 325 341
346 355 364 374 381 390 396 398 399 402 404 407 410 411 414 421 426 428
434 442 446 449 453 470 494 495 497 498 501 507 512 517 523 539 541 542
545 547 550 553 562 568]
[ 0 1 2 4 6 7 8 9 10 11 12 13 14 15 17 18 19 20
21 22 24 25 27 28 29 30 31 32 33 34 35 39 40 41 42 43
44 46 47 49 51 52 53 54 55 56 57 58 60 61 62 63 64 65
66 68 69 70 71 72 73 75 76 77 78 79 80 81 82 83 84 85
86 87 88 89 90 91 93 95 98 99 100 101 102 104 105 106 107 108
109 110 112 113 114 115 117 118 120 121 124 125 126 127 128 129 130 131
132 133 134 135 136 137 138 140 141 142 144 145 148 149 151 153 154 155
156 158 159 160 161 162 163 164 165 166 167 169 170 171 172 173 174 176
177 178 179 181 182 184 185 186 187 188 189 190 191 192 195 196 197 199
200 201 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
219 221 222 223 224 226 227 228 230 231 232 233 234 235 236 238 240 241
242 243 244 245 247 248 249 250 251 252 254 255 256 257 258 259 260 261
264 265 267 268 269 270 271 272 273 274 275 276 277 279 280 281 282 283
284 285 287 288 289 290 292 294 295 298 300 301 303 304 305 306 308 309
310 311 313 314 315 317 319 320 322 325 326 327 329 330 331 332 333 334
337 339 341 342 343 344 345 346 347 349 350 351 353 354 355 356 358 359
361 362 364 366 369 371 372 373 374 376 378 379 380 381 382 383 384 385
387 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
406 407 408 409 410 411 412 413 414 416 417 418 419 421 422 424 425 426
427 428 429 430 431 434 435 437 438 441 442 443 445 446 447 448 449 451
452 453 454 455 456 457 458 459 460 461 462 464 465 466 467 468 470 472
474 475 476 477 478 479 482 483 484 486 488 491 492 493 494 495 497 498
500 501 503 504 505 506 507 508 509 510 511 512 514 515 516 517 518 520
522 523 524 525 526 527 528 529 530 531 532 534 535 537 538 539 540 541
542 545 547 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
564 565 566 567 568] [ 3 5 16 23 26 36 37 38 45 48 50 59 67 74 92 94 96 97
103 111 116 119 122 123 139 143 146 147 150 152 157 168 175 180 183 193
194 198 202 220 225 229 237 239 246 253 262 263 266 278 286 291 293 296
297 299 302 307 312 316 318 321 323 324 328 335 336 338 340 348 352 357
360 363 365 367 368 370 375 377 386 388 415 420 423 432 433 436 439 440
444 450 463 469 471 473 480 481 485 487 489 490 496 499 502 513 519 521
533 536 543 544 546 548]
[ 0 1 2 3 5 6 7 8 9 10 11 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 29 30 31 33 34 36 37 38 39 40
42 43 45 46 48 49 50 52 54 55 56 57 58 59 60 62 63 64
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
84 86 87 88 89 90 91 92 93 94 96 97 99 101 102 103 104 105
106 108 109 110 111 113 114 116 117 118 119 121 122 123 124 126 128 130
131 132 135 137 138 139 140 141 143 144 145 146 147 148 149 150 152 153
154 155 156 157 158 160 161 162 163 165 166 167 168 172 173 174 175 176
177 180 181 182 183 184 185 187 188 189 190 191 192 193 194 195 196 198
199 201 202 203 204 205 207 208 209 210 211 212 214 216 217 218 220 222
225 227 228 229 230 231 234 235 236 237 238 239 241 243 244 245 246 247
248 249 250 251 252 253 255 257 259 260 261 262 263 264 265 266 268 269
270 271 272 273 274 275 276 277 278 279 280 281 284 286 287 288 289 290
291 293 295 296 297 298 299 300 301 302 303 304 305 307 308 309 310 311
312 313 315 316 318 319 320 321 322 323 324 325 326 328 329 330 331 332
333 334 335 336 337 338 339 340 341 343 344 345 346 348 350 352 353 355
357 360 362 363 364 365 366 367 368 369 370 372 374 375 377 378 379 380
381 382 384 385 386 387 388 389 390 391 393 394 395 396 398 399 401 402
404 407 408 409 410 411 413 414 415 418 420 421 422 423 424 425 426 427
428 431 432 433 434 435 436 439 440 441 442 444 445 446 449 450 453 454
455 457 458 459 461 462 463 464 466 468 469 470 471 473 474 475 476 477
478 480 481 482 483 484 485 486 487 489 490 491 493 494 495 496 497 498
499 500 501 502 503 504 507 508 510 511 512 513 516 517 519 520 521 522
523 526 527 528 529 530 531 532 533 535 536 537 538 539 540 541 542 543
544 545 546 547 548 549 550 551 552 553 554 555 556 557 560 561 562 563
564 565 566 567 568] [ 4 12 28 32 35 41 44 47 51 53 61 65 85 95 98 100 107 112
115 120 125 127 129 133 134 136 142 151 159 164 169 170 171 178 179 186
197 200 206 213 215 219 221 223 224 226 232 233 240 242 254 256 258 267
282 283 285 292 294 306 314 317 327 342 347 349 351 354 356 358 359 361
371 373 376 383 392 397 400 403 405 406 412 416 417 419 429 430 437 438
443 447 448 451 452 456 460 465 467 472 479 488 492 505 506 509 514 515
518 524 525 534 558 559]
[ 0 2 3 4 5 6 7 9 10 11 12 15 16 17 18 19 22 23
24 25 26 28 29 30 31 32 33 35 36 37 38 39 41 42 44 45
46 47 48 49 50 51 53 54 55 56 57 59 60 61 63 65 66 67
68 69 70 72 73 74 75 76 77 78 79 81 82 83 84 85 86 88
89 90 92 93 94 95 96 97 98 100 101 103 104 107 108 109 110 111
112 113 114 115 116 117 118 119 120 122 123 124 125 126 127 129 131 132
133 134 136 137 139 140 141 142 143 144 145 146 147 148 149 150 151 152
153 154 155 157 158 159 163 164 165 167 168 169 170 171 172 173 174 175
176 177 178 179 180 181 182 183 184 185 186 188 192 193 194 195 196 197
198 199 200 202 203 204 206 208 209 210 211 213 215 218 219 220 221 222
223 224 225 226 227 228 229 231 232 233 234 235 237 238 239 240 242 244
245 246 247 248 249 250 253 254 255 256 257 258 261 262 263 264 265 266
267 268 271 272 274 275 277 278 280 281 282 283 284 285 286 287 289 290
291 292 293 294 296 297 298 299 301 302 304 305 306 307 310 311 312 314
316 317 318 319 320 321 322 323 324 325 327 328 329 331 332 333 334 335
336 338 340 341 342 346 347 348 349 351 352 353 354 355 356 357 358 359
360 361 362 363 364 365 367 368 369 370 371 373 374 375 376 377 380 381
382 383 384 386 388 390 392 393 394 395 396 397 398 399 400 402 403 404
405 406 407 408 410 411 412 414 415 416 417 419 420 421 422 423 424 425
426 428 429 430 431 432 433 434 436 437 438 439 440 441 442 443 444 446
447 448 449 450 451 452 453 456 457 460 462 463 464 465 467 468 469 470
471 472 473 477 479 480 481 482 485 486 487 488 489 490 492 494 495 496
497 498 499 500 501 502 503 505 506 507 509 511 512 513 514 515 517 518
519 520 521 523 524 525 526 527 528 530 531 532 533 534 535 536 538 539
540 541 542 543 544 545 546 547 548 549 550 551 553 555 556 557 558 559
561 562 564 565 567 568] [ 1 8 13 14 20 21 27 34 40 43 52 58 62 64 71 80 87 91
99 102 105 106 121 128 130 135 138 156 160 161 162 166 187 189 190 191
201 205 207 212 214 216 217 230 236 241 243 251 252 259 260 269 270 273
276 279 288 295 300 303 308 309 313 315 326 330 337 339 343 344 345 350
366 372 378 379 385 387 389 391 401 409 413 418 427 435 445 454 455 458
459 461 466 474 475 476 478 483 484 491 493 504 508 510 516 522 529 537
552 554 560 563 566]
###Markdown
Tal como se indicó, la salida que debería mostrarse arriba son los índices de las diferentes instancias en entrenamiento y test. Es importante observar que para test no coinciden los números en ninguno de los 5 casos. ![Divisón 5 fold cross validation](https://i.picasion.com/pic90/b4ecf165491da3986586d93ec996f8dd.gif) Además de lo anterior, es muy común repetir el proceso de particionamiento (división) en k-conjuntos un número determinado de veces para añadir mayor rigor estadístico a los resultados obtenidos. Esto significa que, como en el caso de *hold-out* el objetivo es eliminar la dependencia del proceso de división aleatoria, que puede provocar que las instancias de test más difíciles siempre caigan en el mismo conjunto.Este procedimiento descrito en el párrafo anterior se denomina como *repeated k-fold* y se muestra en el siguiente ejemplo de código:
###Code
from sklearn.model_selection import RepeatedKFold
#Parámetros
rd = 42
particiones = 5
repeticiones = 3
#Muy similar al anterior, pero incluyendo un parámetro adicional de repeticiones
rkf = RepeatedKFold(n_splits=particiones, n_repeats=repeticiones, random_state=rd)
for index, (train, test) in enumerate(rkf.split(X,y)):
if index % particiones == 0:
print("Repetición ",(index // particiones)+1)
print("%s %s" % (train, test))
X_train, X_test, y_train, y_test = X.iloc[train], X.iloc[test], y.iloc[train], y.iloc[test]
###Output
Repetición 1
[ 0 1 3 4 5 7 8 12 13 14 15 16 17 18 19 20 21 22
23 24 25 26 27 28 31 32 33 34 35 36 37 38 40 41 42 43
44 45 46 47 48 49 50 51 52 53 54 56 57 58 59 60 61 62
63 64 65 66 67 68 69 71 74 80 85 87 88 89 91 92 93 94
95 96 97 98 99 100 102 103 105 106 107 108 111 112 113 114 115 116
117 119 120 121 122 123 124 125 126 127 128 129 130 133 134 135 136 137
138 139 141 142 143 146 147 149 150 151 152 154 155 156 157 159 160 161
162 164 166 168 169 170 171 172 173 174 175 176 178 179 180 181 183 184
185 186 187 189 190 191 192 193 194 195 197 198 199 200 201 202 205 206
207 209 210 212 213 214 215 216 217 218 219 220 221 223 224 225 226 227
229 230 231 232 233 234 236 237 239 240 241 242 243 244 245 246 247 248
251 252 253 254 256 258 259 260 261 262 263 266 267 268 269 270 272 273
276 277 278 279 280 282 283 285 286 287 288 289 290 291 292 293 294 295
296 297 298 299 300 301 302 303 304 306 307 308 309 310 311 312 313 314
315 316 317 318 319 321 323 324 325 326 327 328 330 335 336 337 338 339
340 341 342 343 344 345 346 347 348 349 350 351 352 354 355 356 357 358
359 360 361 363 364 365 366 367 368 370 371 372 373 374 375 376 377 378
379 381 383 385 386 387 388 389 390 391 392 396 397 398 399 400 401 402
403 404 405 406 407 409 410 411 412 413 414 415 416 417 418 419 420 421
423 426 427 428 429 430 432 433 434 435 436 437 438 439 440 442 443 444
445 446 447 448 449 450 451 452 453 454 455 456 458 459 460 461 463 465
466 467 469 470 471 472 473 474 475 476 478 479 480 481 483 484 485 487
488 489 490 491 492 493 494 495 496 497 498 499 501 502 504 505 506 507
508 509 510 512 513 514 515 516 517 518 519 521 522 523 524 525 529 533
534 536 537 539 541 542 543 544 545 546 547 548 550 552 553 554 558 559
560 562 563 566 568] [ 2 6 9 10 11 29 30 39 55 70 72 73 75 76 77 78 79 81
82 83 84 86 90 101 104 109 110 118 131 132 140 144 145 148 153 158
163 165 167 177 182 188 196 203 204 208 211 222 228 235 238 249 250 255
257 264 265 271 274 275 281 284 305 320 322 329 331 332 333 334 353 362
369 380 382 384 393 394 395 408 422 424 425 431 441 457 462 464 468 477
482 486 500 503 511 520 526 527 528 530 531 532 535 538 540 549 551 555
556 557 561 564 565 567]
[ 1 2 3 4 5 6 8 9 10 11 12 13 14 16 20 21 23 26
27 28 29 30 32 34 35 36 37 38 39 40 41 43 44 45 47 48
50 51 52 53 55 58 59 61 62 64 65 67 70 71 72 73 74 75
76 77 78 79 80 81 82 83 84 85 86 87 90 91 92 94 95 96
97 98 99 100 101 102 103 104 105 106 107 109 110 111 112 115 116 118
119 120 121 122 123 125 127 128 129 130 131 132 133 134 135 136 138 139
140 142 143 144 145 146 147 148 150 151 152 153 156 157 158 159 160 161
162 163 164 165 166 167 168 169 170 171 175 177 178 179 180 182 183 186
187 188 189 190 191 193 194 196 197 198 200 201 202 203 204 205 206 207
208 211 212 213 214 215 216 217 219 220 221 222 223 224 225 226 228 229
230 232 233 235 236 237 238 239 240 241 242 243 246 249 250 251 252 253
254 255 256 257 258 259 260 262 263 264 265 266 267 269 270 271 273 274
275 276 278 279 281 282 283 284 285 286 288 291 292 293 294 295 296 297
299 300 302 303 305 306 307 308 309 312 313 314 315 316 317 318 320 321
322 323 324 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
342 343 344 345 347 348 349 350 351 352 353 354 356 357 358 359 360 361
362 363 365 366 367 368 369 370 371 372 373 375 376 377 378 379 380 382
383 384 385 386 387 388 389 391 392 393 394 395 397 400 401 403 405 406
408 409 412 413 415 416 417 418 419 420 422 423 424 425 427 429 430 431
432 433 435 436 437 438 439 440 441 443 444 445 447 448 450 451 452 454
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 471 472 473
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
492 493 496 499 500 502 503 504 505 506 508 509 510 511 513 514 515 516
518 519 520 521 522 524 525 526 527 528 529 530 531 532 533 534 535 536
537 538 540 543 544 546 548 549 551 552 554 555 556 557 558 559 560 561
563 564 565 566 567] [ 0 7 15 17 18 19 22 24 25 31 33 42 46 49 54 56 57 60
63 66 68 69 88 89 93 108 113 114 117 124 126 137 141 149 154 155
172 173 174 176 181 184 185 192 195 199 209 210 218 227 231 234 244 245
247 248 261 268 272 277 280 287 289 290 298 301 304 310 311 319 325 341
346 355 364 374 381 390 396 398 399 402 404 407 410 411 414 421 426 428
434 442 446 449 453 470 494 495 497 498 501 507 512 517 523 539 541 542
545 547 550 553 562 568]
[ 0 1 2 4 6 7 8 9 10 11 12 13 14 15 17 18 19 20
21 22 24 25 27 28 29 30 31 32 33 34 35 39 40 41 42 43
44 46 47 49 51 52 53 54 55 56 57 58 60 61 62 63 64 65
66 68 69 70 71 72 73 75 76 77 78 79 80 81 82 83 84 85
86 87 88 89 90 91 93 95 98 99 100 101 102 104 105 106 107 108
109 110 112 113 114 115 117 118 120 121 124 125 126 127 128 129 130 131
132 133 134 135 136 137 138 140 141 142 144 145 148 149 151 153 154 155
156 158 159 160 161 162 163 164 165 166 167 169 170 171 172 173 174 176
177 178 179 181 182 184 185 186 187 188 189 190 191 192 195 196 197 199
200 201 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
219 221 222 223 224 226 227 228 230 231 232 233 234 235 236 238 240 241
242 243 244 245 247 248 249 250 251 252 254 255 256 257 258 259 260 261
264 265 267 268 269 270 271 272 273 274 275 276 277 279 280 281 282 283
284 285 287 288 289 290 292 294 295 298 300 301 303 304 305 306 308 309
310 311 313 314 315 317 319 320 322 325 326 327 329 330 331 332 333 334
337 339 341 342 343 344 345 346 347 349 350 351 353 354 355 356 358 359
361 362 364 366 369 371 372 373 374 376 378 379 380 381 382 383 384 385
387 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
406 407 408 409 410 411 412 413 414 416 417 418 419 421 422 424 425 426
427 428 429 430 431 434 435 437 438 441 442 443 445 446 447 448 449 451
452 453 454 455 456 457 458 459 460 461 462 464 465 466 467 468 470 472
474 475 476 477 478 479 482 483 484 486 488 491 492 493 494 495 497 498
500 501 503 504 505 506 507 508 509 510 511 512 514 515 516 517 518 520
522 523 524 525 526 527 528 529 530 531 532 534 535 537 538 539 540 541
542 545 547 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
564 565 566 567 568] [ 3 5 16 23 26 36 37 38 45 48 50 59 67 74 92 94 96 97
103 111 116 119 122 123 139 143 146 147 150 152 157 168 175 180 183 193
194 198 202 220 225 229 237 239 246 253 262 263 266 278 286 291 293 296
297 299 302 307 312 316 318 321 323 324 328 335 336 338 340 348 352 357
360 363 365 367 368 370 375 377 386 388 415 420 423 432 433 436 439 440
444 450 463 469 471 473 480 481 485 487 489 490 496 499 502 513 519 521
533 536 543 544 546 548]
[ 0 1 2 3 5 6 7 8 9 10 11 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 29 30 31 33 34 36 37 38 39 40
42 43 45 46 48 49 50 52 54 55 56 57 58 59 60 62 63 64
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
84 86 87 88 89 90 91 92 93 94 96 97 99 101 102 103 104 105
106 108 109 110 111 113 114 116 117 118 119 121 122 123 124 126 128 130
131 132 135 137 138 139 140 141 143 144 145 146 147 148 149 150 152 153
154 155 156 157 158 160 161 162 163 165 166 167 168 172 173 174 175 176
177 180 181 182 183 184 185 187 188 189 190 191 192 193 194 195 196 198
199 201 202 203 204 205 207 208 209 210 211 212 214 216 217 218 220 222
225 227 228 229 230 231 234 235 236 237 238 239 241 243 244 245 246 247
248 249 250 251 252 253 255 257 259 260 261 262 263 264 265 266 268 269
270 271 272 273 274 275 276 277 278 279 280 281 284 286 287 288 289 290
291 293 295 296 297 298 299 300 301 302 303 304 305 307 308 309 310 311
312 313 315 316 318 319 320 321 322 323 324 325 326 328 329 330 331 332
333 334 335 336 337 338 339 340 341 343 344 345 346 348 350 352 353 355
357 360 362 363 364 365 366 367 368 369 370 372 374 375 377 378 379 380
381 382 384 385 386 387 388 389 390 391 393 394 395 396 398 399 401 402
404 407 408 409 410 411 413 414 415 418 420 421 422 423 424 425 426 427
428 431 432 433 434 435 436 439 440 441 442 444 445 446 449 450 453 454
455 457 458 459 461 462 463 464 466 468 469 470 471 473 474 475 476 477
478 480 481 482 483 484 485 486 487 489 490 491 493 494 495 496 497 498
499 500 501 502 503 504 507 508 510 511 512 513 516 517 519 520 521 522
523 526 527 528 529 530 531 532 533 535 536 537 538 539 540 541 542 543
544 545 546 547 548 549 550 551 552 553 554 555 556 557 560 561 562 563
564 565 566 567 568] [ 4 12 28 32 35 41 44 47 51 53 61 65 85 95 98 100 107 112
115 120 125 127 129 133 134 136 142 151 159 164 169 170 171 178 179 186
197 200 206 213 215 219 221 223 224 226 232 233 240 242 254 256 258 267
282 283 285 292 294 306 314 317 327 342 347 349 351 354 356 358 359 361
371 373 376 383 392 397 400 403 405 406 412 416 417 419 429 430 437 438
443 447 448 451 452 456 460 465 467 472 479 488 492 505 506 509 514 515
518 524 525 534 558 559]
[ 0 2 3 4 5 6 7 9 10 11 12 15 16 17 18 19 22 23
24 25 26 28 29 30 31 32 33 35 36 37 38 39 41 42 44 45
46 47 48 49 50 51 53 54 55 56 57 59 60 61 63 65 66 67
68 69 70 72 73 74 75 76 77 78 79 81 82 83 84 85 86 88
89 90 92 93 94 95 96 97 98 100 101 103 104 107 108 109 110 111
112 113 114 115 116 117 118 119 120 122 123 124 125 126 127 129 131 132
133 134 136 137 139 140 141 142 143 144 145 146 147 148 149 150 151 152
153 154 155 157 158 159 163 164 165 167 168 169 170 171 172 173 174 175
176 177 178 179 180 181 182 183 184 185 186 188 192 193 194 195 196 197
198 199 200 202 203 204 206 208 209 210 211 213 215 218 219 220 221 222
223 224 225 226 227 228 229 231 232 233 234 235 237 238 239 240 242 244
245 246 247 248 249 250 253 254 255 256 257 258 261 262 263 264 265 266
267 268 271 272 274 275 277 278 280 281 282 283 284 285 286 287 289 290
291 292 293 294 296 297 298 299 301 302 304 305 306 307 310 311 312 314
316 317 318 319 320 321 322 323 324 325 327 328 329 331 332 333 334 335
336 338 340 341 342 346 347 348 349 351 352 353 354 355 356 357 358 359
360 361 362 363 364 365 367 368 369 370 371 373 374 375 376 377 380 381
382 383 384 386 388 390 392 393 394 395 396 397 398 399 400 402 403 404
405 406 407 408 410 411 412 414 415 416 417 419 420 421 422 423 424 425
426 428 429 430 431 432 433 434 436 437 438 439 440 441 442 443 444 446
447 448 449 450 451 452 453 456 457 460 462 463 464 465 467 468 469 470
471 472 473 477 479 480 481 482 485 486 487 488 489 490 492 494 495 496
497 498 499 500 501 502 503 505 506 507 509 511 512 513 514 515 517 518
519 520 521 523 524 525 526 527 528 530 531 532 533 534 535 536 538 539
540 541 542 543 544 545 546 547 548 549 550 551 553 555 556 557 558 559
561 562 564 565 567 568] [ 1 8 13 14 20 21 27 34 40 43 52 58 62 64 71 80 87 91
99 102 105 106 121 128 130 135 138 156 160 161 162 166 187 189 190 191
201 205 207 212 214 216 217 230 236 241 243 251 252 259 260 269 270 273
276 279 288 295 300 303 308 309 313 315 326 330 337 339 343 344 345 350
366 372 378 379 385 387 389 391 401 409 413 418 427 435 445 454 455 458
459 461 466 474 475 476 478 483 484 491 493 504 508 510 516 522 529 537
552 554 560 563 566]
Repetición 2
[ 0 2 3 4 5 6 7 8 9 11 12 13 14 16 17 18 19 20
21 22 23 24 25 27 28 29 30 32 33 34 35 36 37 38 39 40
41 42 43 45 46 47 48 50 52 53 54 55 57 58 60 61 62 63
64 65 66 67 68 69 71 72 73 74 76 77 78 79 80 81 82 83
84 85 89 90 91 92 93 94 97 98 99 100 102 104 105 106 108 110
111 112 114 116 117 118 120 121 125 126 129 130 131 132 133 135 136 137
138 139 140 141 142 143 144 145 146 147 148 149 150 151 153 154 155 159
160 161 162 163 164 165 166 167 168 169 172 173 176 177 178 179 180 181
182 183 184 185 186 187 189 190 191 193 194 195 196 197 198 200 201 202
204 206 207 208 210 211 212 213 214 215 216 217 218 219 220 221 222 223
224 225 226 227 228 229 231 232 234 235 237 238 239 240 241 242 244 246
247 248 249 250 251 252 253 254 255 256 258 259 260 261 262 263 264 266
267 268 270 271 272 273 274 275 276 277 280 281 282 284 285 286 287 288
290 293 294 295 297 298 299 300 301 302 303 304 305 306 307 310 311 312
313 314 315 316 317 319 324 325 326 329 332 334 335 337 338 339 340 341
345 346 347 349 350 352 353 355 356 357 358 359 360 362 363 364 366 367
368 369 370 372 373 374 375 376 377 378 381 382 383 384 386 387 388 389
391 392 393 394 395 396 397 398 399 400 401 403 404 405 406 407 408 409
410 411 412 413 414 415 416 417 418 419 420 421 422 424 427 428 429 430
431 433 434 435 436 437 438 439 440 441 442 443 444 446 448 449 450 451
452 453 454 455 456 457 458 459 460 461 462 464 465 466 467 468 470 473
475 476 477 478 480 483 484 485 486 489 490 491 492 493 494 495 496 498
499 500 501 502 503 505 506 507 508 509 510 511 513 514 515 516 517 518
519 520 521 525 526 527 528 529 530 532 534 535 536 538 539 540 541 543
544 545 546 547 550 551 552 553 554 555 556 557 558 559 560 561 562 563
564 565 566 567 568] [ 1 10 15 26 31 44 49 51 56 59 70 75 86 87 88 95 96 101
103 107 109 113 115 119 122 123 124 127 128 134 152 156 157 158 170 171
174 175 188 192 199 203 205 209 230 233 236 243 245 257 265 269 278 279
283 289 291 292 296 308 309 318 320 321 322 323 327 328 330 331 333 336
342 343 344 348 351 354 361 365 371 379 380 385 390 402 423 425 426 432
445 447 463 469 471 472 474 479 481 482 487 488 497 504 512 522 523 524
531 533 537 542 548 549]
[ 0 1 2 4 5 6 7 8 9 10 11 13 14 15 17 18 19 21
22 24 25 26 27 28 29 31 33 34 37 38 40 41 42 43 44 45
46 47 49 50 51 52 53 55 56 57 59 60 62 64 65 68 69 70
71 72 73 74 75 77 79 80 81 82 84 86 87 88 89 90 91 92
94 95 96 97 98 101 102 103 104 105 106 107 108 109 110 111 112 113
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 131 132
133 134 135 137 140 142 143 144 145 146 147 148 150 152 153 154 155 156
157 158 159 161 162 163 164 165 166 167 169 170 171 172 173 174 175 176
177 179 181 182 183 185 186 188 189 190 191 192 193 194 196 199 200 201
202 203 204 205 206 209 211 212 213 214 215 216 217 219 221 222 223 225
226 227 228 229 230 232 233 234 235 236 237 238 239 241 242 243 245 246
248 249 250 251 252 254 255 256 257 258 259 261 263 264 265 266 267 268
269 270 271 272 275 276 278 279 280 281 282 283 284 285 287 288 289 290
291 292 293 295 296 298 300 301 302 303 304 305 307 308 309 310 311 312
314 315 316 317 318 319 320 321 322 323 324 326 327 328 329 330 331 332
333 336 337 338 339 340 341 342 343 344 345 346 347 348 350 351 352 354
355 356 357 358 360 361 362 363 364 365 366 368 370 371 372 373 374 379
380 381 382 383 384 385 386 389 390 391 392 395 396 397 398 399 400 401
402 403 404 406 407 408 409 410 412 414 415 416 417 420 421 422 423 424
425 426 427 428 429 432 436 437 438 439 440 441 442 444 445 446 447 449
450 451 452 453 454 455 456 458 459 460 461 463 464 465 466 467 468 469
470 471 472 473 474 475 476 477 479 481 482 483 484 487 488 489 490 491
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 510
511 512 515 516 517 518 521 522 523 524 525 526 528 529 530 531 532 533
534 535 537 540 541 542 544 545 547 548 549 550 552 553 554 555 556 560
561 562 563 564 567] [ 3 12 16 20 23 30 32 35 36 39 48 54 58 61 63 66 67 76
78 83 85 93 99 100 130 136 138 139 141 149 151 160 168 178 180 184
187 195 197 198 207 208 210 218 220 224 231 240 244 247 253 260 262 273
274 277 286 294 297 299 306 313 325 334 335 349 353 359 367 369 375 376
377 378 387 388 393 394 405 411 413 418 419 430 431 433 434 435 443 448
457 462 478 480 485 486 509 513 514 519 520 527 536 538 539 543 546 551
557 558 559 565 566 568]
[ 0 1 3 4 5 7 8 9 10 11 12 13 15 16 18 19 20 22
23 24 26 27 28 30 31 32 33 34 35 36 38 39 40 41 43 44
45 46 48 49 50 51 52 53 54 56 57 58 59 61 62 63 64 66
67 68 69 70 71 73 75 76 77 78 79 81 82 83 84 85 86 87
88 89 90 93 94 95 96 97 98 99 100 101 103 104 107 108 109 113
115 116 117 118 119 121 122 123 124 125 127 128 130 131 132 133 134 135
136 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 155
156 157 158 159 160 161 162 163 164 166 167 168 169 170 171 172 173 174
175 176 177 178 180 181 184 185 187 188 189 190 191 192 193 195 196 197
198 199 200 201 202 203 204 205 207 208 209 210 211 212 214 216 218 219
220 221 222 223 224 226 227 228 229 230 231 232 233 234 235 236 238 239
240 241 243 244 245 247 249 250 251 252 253 255 257 258 259 260 262 264
265 266 267 269 271 273 274 277 278 279 281 282 283 285 286 287 288 289
291 292 294 296 297 299 300 302 304 305 306 308 309 310 311 313 314 315
316 318 320 321 322 323 325 326 327 328 329 330 331 333 334 335 336 338
340 341 342 343 344 345 347 348 349 350 351 353 354 356 357 359 360 361
364 365 366 367 368 369 370 371 372 373 375 376 377 378 379 380 382 383
385 386 387 388 389 390 391 392 393 394 395 396 397 401 402 405 407 408
410 411 412 413 414 416 418 419 421 422 423 425 426 428 430 431 432 433
434 435 436 439 440 441 442 443 444 445 447 448 449 450 451 452 453 454
455 457 460 461 462 463 465 468 469 470 471 472 473 474 475 477 478 479
480 481 482 483 484 485 486 487 488 489 490 491 492 493 495 496 497 498
499 500 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
518 519 520 521 522 523 524 525 526 527 529 530 531 532 533 536 537 538
539 540 542 543 546 547 548 549 550 551 553 554 556 557 558 559 560 562
563 564 565 566 568] [ 2 6 14 17 21 25 29 37 42 47 55 60 65 72 74 80 91 92
102 105 106 110 111 112 114 120 126 129 137 154 165 179 182 183 186 194
206 213 215 217 225 237 242 246 248 254 256 261 263 268 270 272 275 276
280 284 290 293 295 298 301 303 307 312 317 319 324 332 337 339 346 352
355 358 362 363 374 381 384 398 399 400 403 404 406 409 415 417 420 424
427 429 437 438 446 456 458 459 464 466 467 476 494 501 528 534 535 541
544 545 552 555 561 567]
[ 0 1 2 3 6 8 10 12 13 14 15 16 17 18 19 20 21 23
24 25 26 28 29 30 31 32 35 36 37 38 39 41 42 43 44 46
47 48 49 51 54 55 56 57 58 59 60 61 63 65 66 67 68 69
70 72 73 74 75 76 78 79 80 83 85 86 87 88 90 91 92 93
95 96 98 99 100 101 102 103 105 106 107 108 109 110 111 112 113 114
115 116 119 120 122 123 124 125 126 127 128 129 130 131 132 133 134 136
137 138 139 140 141 143 146 147 148 149 150 151 152 154 156 157 158 159
160 162 163 164 165 166 167 168 169 170 171 172 173 174 175 178 179 180
181 182 183 184 185 186 187 188 189 190 192 193 194 195 197 198 199 201
202 203 205 206 207 208 209 210 212 213 214 215 216 217 218 219 220 222
224 225 226 228 229 230 231 232 233 234 236 237 240 241 242 243 244 245
246 247 248 251 252 253 254 256 257 260 261 262 263 264 265 268 269 270
272 273 274 275 276 277 278 279 280 283 284 286 289 290 291 292 293 294
295 296 297 298 299 300 301 302 303 304 306 307 308 309 311 312 313 316
317 318 319 320 321 322 323 324 325 326 327 328 330 331 332 333 334 335
336 337 338 339 341 342 343 344 345 346 347 348 349 351 352 353 354 355
358 359 360 361 362 363 364 365 366 367 368 369 371 373 374 375 376 377
378 379 380 381 382 383 384 385 386 387 388 390 392 393 394 395 397 398
399 400 401 402 403 404 405 406 408 409 411 413 414 415 416 417 418 419
420 421 422 423 424 425 426 427 429 430 431 432 433 434 435 437 438 440
443 444 445 446 447 448 450 454 455 456 457 458 459 460 462 463 464 466
467 468 469 471 472 474 475 476 477 478 479 480 481 482 483 485 486 487
488 491 494 495 496 497 499 500 501 504 505 507 509 510 512 513 514 516
517 518 519 520 522 523 524 527 528 529 531 533 534 535 536 537 538 539
541 542 543 544 545 546 548 549 551 552 553 555 557 558 559 561 562 563
564 565 566 567 568] [ 4 5 7 9 11 22 27 33 34 40 45 50 52 53 62 64 71 77
81 82 84 89 94 97 104 117 118 121 135 142 144 145 153 155 161 176
177 191 196 200 204 211 221 223 227 235 238 239 249 250 255 258 259 266
267 271 281 282 285 287 288 305 310 314 315 329 340 350 356 357 370 372
389 391 396 407 410 412 428 436 439 441 442 449 451 452 453 461 465 470
473 484 489 490 492 493 498 502 503 506 508 511 515 521 525 526 530 532
540 547 550 554 556 560]
[ 1 2 3 4 5 6 7 9 10 11 12 14 15 16 17 20 21 22
23 25 26 27 29 30 31 32 33 34 35 36 37 39 40 42 44 45
47 48 49 50 51 52 53 54 55 56 58 59 60 61 62 63 64 65
66 67 70 71 72 74 75 76 77 78 80 81 82 83 84 85 86 87
88 89 91 92 93 94 95 96 97 99 100 101 102 103 104 105 106 107
109 110 111 112 113 114 115 117 118 119 120 121 122 123 124 126 127 128
129 130 134 135 136 137 138 139 141 142 144 145 149 151 152 153 154 155
156 157 158 160 161 165 168 170 171 174 175 176 177 178 179 180 182 183
184 186 187 188 191 192 194 195 196 197 198 199 200 203 204 205 206 207
208 209 210 211 213 215 217 218 220 221 223 224 225 227 230 231 233 235
236 237 238 239 240 242 243 244 245 246 247 248 249 250 253 254 255 256
257 258 259 260 261 262 263 265 266 267 268 269 270 271 272 273 274 275
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
294 295 296 297 298 299 301 303 305 306 307 308 309 310 312 313 314 315
317 318 319 320 321 322 323 324 325 327 328 329 330 331 332 333 334 335
336 337 339 340 342 343 344 346 348 349 350 351 352 353 354 355 356 357
358 359 361 362 363 365 367 369 370 371 372 374 375 376 377 378 379 380
381 384 385 387 388 389 390 391 393 394 396 398 399 400 402 403 404 405
406 407 409 410 411 412 413 415 417 418 419 420 423 424 425 426 427 428
429 430 431 432 433 434 435 436 437 438 439 441 442 443 445 446 447 448
449 451 452 453 456 457 458 459 461 462 463 464 465 466 467 469 470 471
472 473 474 476 478 479 480 481 482 484 485 486 487 488 489 490 492 493
494 497 498 501 502 503 504 506 508 509 511 512 513 514 515 519 520 521
522 523 524 525 526 527 528 530 531 532 533 534 535 536 537 538 539 540
541 542 543 544 545 546 547 548 549 550 551 552 554 555 556 557 558 559
560 561 565 566 567 568] [ 0 8 13 18 19 24 28 38 41 43 46 57 68 69 73 79 90 98
108 116 125 131 132 133 140 143 146 147 148 150 159 162 163 164 166 167
169 172 173 181 185 189 190 193 201 202 212 214 216 219 222 226 228 229
232 234 241 251 252 264 300 302 304 311 316 326 338 341 345 347 360 364
366 368 373 382 383 386 392 395 397 401 408 414 416 421 422 440 444 450
454 455 460 468 475 477 483 491 495 496 499 500 505 507 510 516 517 518
529 553 562 563 564]
Repetición 3
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
18 19 24 25 26 28 31 33 34 35 36 38 39 40 41 43 44 45
46 48 49 50 51 52 53 54 56 58 60 61 62 63 64 66 67 68
71 72 73 74 75 76 77 79 80 81 83 84 85 86 87 88 91 92
95 96 97 98 100 101 102 103 105 106 107 108 109 110 111 112 113 114
115 116 118 119 120 121 122 123 125 126 127 128 129 130 131 132 133 136
137 139 140 141 142 143 145 146 147 148 150 151 152 153 155 156 157 158
159 160 161 163 164 165 166 167 169 170 171 172 174 175 176 177 178 179
180 181 182 183 184 185 186 187 188 189 192 193 194 195 196 197 198 199
200 201 202 203 208 209 210 211 212 213 214 215 216 217 218 219 220 221
222 223 224 225 226 228 231 233 234 236 238 239 240 241 242 243 244 245
246 247 248 249 250 251 252 253 254 255 256 258 260 262 263 265 266 267
270 271 274 275 277 278 279 280 281 282 283 284 285 286 287 288 289 290
291 292 293 294 295 296 299 301 302 303 304 305 306 307 308 309 310 311
312 313 314 316 317 318 319 320 321 322 323 324 325 326 327 329 331 332
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 353 354
355 356 358 359 360 362 363 365 366 367 369 370 371 374 375 376 377 378
379 380 381 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
399 400 401 402 403 404 405 406 409 410 411 412 413 414 415 417 418 419
420 422 423 424 427 428 429 430 432 434 435 436 437 438 439 441 442 443
444 445 450 451 452 453 456 457 458 459 461 463 465 466 467 468 471 472
473 474 475 476 477 478 479 480 485 487 488 489 491 492 494 498 499 500
501 502 503 504 506 507 508 509 510 511 512 513 514 515 516 517 518 519
520 521 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
540 541 542 543 544 545 546 547 548 549 551 552 555 556 557 559 560 562
564 565 566 567 568] [ 20 21 22 23 27 29 30 32 37 42 47 55 57 59 65 69 70 78
82 89 90 93 94 99 104 117 124 134 135 138 144 149 154 162 168 173
190 191 204 205 206 207 227 229 230 232 235 237 257 259 261 264 268 269
272 273 276 297 298 300 315 328 330 333 334 351 352 357 361 364 368 372
373 382 398 407 408 416 421 425 426 431 433 440 446 447 448 449 454 455
460 462 464 469 470 481 482 483 484 486 490 493 495 496 497 505 522 523
550 553 554 558 561 563]
[ 0 1 3 6 7 8 10 11 13 15 16 17 18 19 20 21 22 23
24 25 26 27 28 29 30 31 32 33 35 36 37 38 39 40 41 42
43 44 45 46 47 48 50 52 53 54 55 56 57 58 59 60 62 63
65 66 68 69 70 71 72 74 75 77 78 79 80 81 82 83 84 85
86 87 88 89 90 91 93 94 95 96 97 99 100 101 102 103 104 105
107 108 110 111 112 113 114 115 116 117 118 119 121 124 126 127 128 129
130 131 132 133 134 135 136 138 140 142 143 144 145 146 147 148 149 150
151 152 153 154 155 157 158 160 161 162 163 164 165 166 167 168 171 172
173 174 175 176 177 178 179 180 181 186 187 189 190 191 192 193 194 196
197 198 199 200 201 204 205 206 207 208 209 210 214 215 216 218 220 221
222 223 225 226 227 228 229 230 231 232 233 234 235 236 237 238 241 243
245 246 248 249 254 255 256 257 259 260 261 262 263 264 265 266 267 268
269 271 272 273 274 276 277 279 280 281 283 284 285 286 287 288 290 291
292 294 295 296 297 298 300 301 302 303 304 306 307 308 309 310 311 312
314 315 316 317 318 319 320 322 323 325 326 327 328 329 330 332 333 334
339 340 341 342 345 346 348 350 351 352 354 355 357 358 359 360 361 362
363 364 365 366 367 368 369 370 372 373 374 375 376 377 378 379 380 381
382 383 384 385 387 388 389 390 392 393 394 395 396 397 398 399 401 402
403 404 406 407 408 409 410 411 412 414 415 416 417 418 419 420 421 423
425 426 428 430 431 432 433 434 435 437 438 439 440 442 443 444 445 446
447 448 449 450 451 454 455 457 459 460 461 462 464 465 466 467 468 469
470 471 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
490 491 492 493 495 496 497 499 500 502 503 505 508 509 510 511 514 516
517 518 522 523 524 525 527 528 530 531 534 535 536 538 539 540 541 542
543 544 545 546 547 548 549 550 551 553 554 555 556 557 558 559 560 561
563 565 566 567 568] [ 2 4 5 9 12 14 34 49 51 61 64 67 73 76 92 98 106 109
120 122 123 125 137 139 141 156 159 169 170 182 183 184 185 188 195 202
203 211 212 213 217 219 224 239 240 242 244 247 250 251 252 253 258 270
275 278 282 289 293 299 305 313 321 324 331 335 336 337 338 343 344 347
349 353 356 371 386 391 400 405 413 422 424 427 429 436 441 452 453 456
458 463 472 489 494 498 501 504 506 507 512 513 515 519 520 521 526 529
532 533 537 552 562 564]
[ 0 1 2 3 4 5 6 7 8 9 10 12 13 14 16 17 18 19
20 21 22 23 24 26 27 28 29 30 31 32 33 34 35 37 39 40
41 42 47 48 49 50 51 52 53 54 55 57 58 59 61 62 64 65
66 67 69 70 73 74 75 76 77 78 80 81 82 83 84 86 87 89
90 91 92 93 94 95 96 97 98 99 104 105 106 108 109 111 112 113
114 116 117 118 119 120 122 123 124 125 126 127 130 131 132 133 134 135
136 137 138 139 140 141 143 144 145 146 147 149 150 152 153 154 155 156
157 159 160 161 162 164 165 167 168 169 170 173 174 176 177 178 179 181
182 183 184 185 187 188 190 191 193 194 195 196 200 201 202 203 204 205
206 207 208 209 210 211 212 213 214 217 218 219 221 222 223 224 225 227
228 229 230 232 233 234 235 236 237 238 239 240 242 243 244 245 246 247
248 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 268
269 270 272 273 275 276 277 278 279 280 282 284 285 286 287 289 290 292
293 295 296 297 298 299 300 301 302 304 305 308 310 312 313 315 317 318
320 321 322 323 324 325 326 327 328 329 330 331 333 334 335 336 337 338
340 341 343 344 345 346 347 348 349 351 352 353 354 355 356 357 358 359
360 361 363 364 365 367 368 369 370 371 372 373 377 378 379 380 382 384
385 386 387 388 390 391 392 395 396 397 398 399 400 402 403 404 405 406
407 408 409 412 413 416 417 419 421 422 423 424 425 426 427 428 429 430
431 433 434 435 436 438 439 440 441 442 446 447 448 449 452 453 454 455
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 472 474 475
476 477 478 479 480 481 482 483 484 485 486 489 490 492 493 494 495 496
497 498 499 500 501 502 503 504 505 506 507 509 510 511 512 513 515 516
517 519 520 521 522 523 524 525 526 527 528 529 531 532 533 535 536 537
538 539 540 542 543 544 545 549 550 551 552 553 554 558 559 560 561 562
563 564 566 567 568] [ 11 15 25 36 38 43 44 45 46 56 60 63 68 71 72 79 85 88
100 101 102 103 107 110 115 121 128 129 142 148 151 158 163 166 171 172
175 180 186 189 192 197 198 199 215 216 220 226 231 241 249 266 267 271
274 281 283 288 291 294 303 306 307 309 311 314 316 319 332 339 342 350
362 366 374 375 376 381 383 389 393 394 401 410 411 414 415 418 420 432
437 443 444 445 450 451 471 473 487 488 491 508 514 518 530 534 541 546
547 548 555 556 557 565]
[ 0 2 4 5 7 9 11 12 14 15 17 18 20 21 22 23 24 25
27 28 29 30 31 32 34 36 37 38 42 43 44 45 46 47 48 49
50 51 54 55 56 57 59 60 61 62 63 64 65 67 68 69 70 71
72 73 74 76 77 78 79 82 83 85 86 88 89 90 92 93 94 98
99 100 101 102 103 104 105 106 107 109 110 112 113 115 116 117 120 121
122 123 124 125 126 128 129 130 131 132 134 135 136 137 138 139 140 141
142 143 144 147 148 149 151 154 155 156 157 158 159 160 161 162 163 165
166 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
185 186 188 189 190 191 192 193 195 197 198 199 201 202 203 204 205 206
207 209 210 211 212 213 214 215 216 217 219 220 221 224 226 227 229 230
231 232 235 237 239 240 241 242 243 244 246 247 249 250 251 252 253 256
257 258 259 261 263 264 265 266 267 268 269 270 271 272 273 274 275 276
278 281 282 283 284 285 287 288 289 291 293 294 295 296 297 298 299 300
303 304 305 306 307 308 309 311 312 313 314 315 316 317 318 319 320 321
322 323 324 326 328 330 331 332 333 334 335 336 337 338 339 341 342 343
344 345 346 347 348 349 350 351 352 353 355 356 357 358 359 360 361 362
364 365 366 368 370 371 372 373 374 375 376 377 378 379 381 382 383 384
385 386 389 390 391 393 394 398 400 401 404 405 406 407 408 410 411 412
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
431 432 433 434 435 436 437 439 440 441 442 443 444 445 446 447 448 449
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 466 468 469
470 471 472 473 474 475 476 477 481 482 483 484 486 487 488 489 490 491
493 494 495 496 497 498 500 501 504 505 506 507 508 509 511 512 513 514
515 516 518 519 520 521 522 523 524 525 526 528 529 530 532 533 534 536
537 540 541 543 546 547 548 549 550 551 552 553 554 555 556 557 558 561
562 563 564 565 568] [ 1 3 6 8 10 13 16 19 26 33 35 39 40 41 52 53 58 66
75 80 81 84 87 91 95 96 97 108 111 114 118 119 127 133 145 146
150 152 153 164 167 187 194 196 200 208 218 222 223 225 228 233 234 236
238 245 248 254 255 260 262 277 279 280 286 290 292 301 302 310 325 327
329 340 354 363 367 369 380 387 388 392 395 396 397 399 402 403 409 438
465 467 478 479 480 485 492 499 502 503 510 517 527 531 535 538 539 542
544 545 559 560 566 567]
[ 1 2 3 4 5 6 8 9 10 11 12 13 14 15 16 19 20 21
22 23 25 26 27 29 30 32 33 34 35 36 37 38 39 40 41 42
43 44 45 46 47 49 51 52 53 55 56 57 58 59 60 61 63 64
65 66 67 68 69 70 71 72 73 75 76 78 79 80 81 82 84 85
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
106 107 108 109 110 111 114 115 117 118 119 120 121 122 123 124 125 127
128 129 133 134 135 137 138 139 141 142 144 145 146 148 149 150 151 152
153 154 156 158 159 162 163 164 166 167 168 169 170 171 172 173 175 180
182 183 184 185 186 187 188 189 190 191 192 194 195 196 197 198 199 200
202 203 204 205 206 207 208 211 212 213 215 216 217 218 219 220 222 223
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
242 244 245 247 248 249 250 251 252 253 254 255 257 258 259 260 261 262
264 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
283 286 288 289 290 291 292 293 294 297 298 299 300 301 302 303 305 306
307 309 310 311 313 314 315 316 319 321 324 325 327 328 329 330 331 332
333 334 335 336 337 338 339 340 342 343 344 347 349 350 351 352 353 354
356 357 361 362 363 364 366 367 368 369 371 372 373 374 375 376 380 381
382 383 386 387 388 389 391 392 393 394 395 396 397 398 399 400 401 402
403 405 407 408 409 410 411 413 414 415 416 418 420 421 422 424 425 426
427 429 431 432 433 436 437 438 440 441 443 444 445 446 447 448 449 450
451 452 453 454 455 456 458 460 462 463 464 465 467 469 470 471 472 473
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
496 497 498 499 501 502 503 504 505 506 507 508 510 512 513 514 515 517
518 519 520 521 522 523 526 527 529 530 531 532 533 534 535 537 538 539
541 542 544 545 546 547 548 550 552 553 554 555 556 557 558 559 560 561
562 563 564 565 566 567] [ 0 7 17 18 24 28 31 48 50 54 62 74 77 83 86 105 112 113
116 126 130 131 132 136 140 143 147 155 157 160 161 165 174 176 177 178
179 181 193 201 209 210 214 221 243 246 256 263 265 284 285 287 295 296
304 308 312 317 318 320 322 323 326 341 345 346 348 355 358 359 360 365
370 377 378 379 384 385 390 404 406 412 417 419 423 428 430 434 435 439
442 457 459 461 466 468 474 475 476 477 500 509 511 516 524 525 528 536
540 543 549 551 568]
###Markdown
**Nota importante:** El número total de particiones y / o iteraciones no se puede estimar a priori y dependerá principalmente del número de instancias de que dispongamos. El caso más común es utilizar 10 particiones por defecto y 3 repeticiones. Sin embargo, en caso de pocas instancias se puede bajar a 5 particiones y 3 ó 5 repeticiones. **4.5 Validación cruzada estratificada (solo clasificación)**Esta alternativa es una variación muy adecuada para clasificación del *k-fold* que devuelve particiones de tipo estratificadas. Este término significa que cada conjunto contiene aproximadamente el mismo porcentaje de muestras de cada clase objetivo que el conjunto completo.Esto resulta imprescindible para evitar el posible sesgo en los modelos y obtener una conclusiones equívocas. Es especialmente relevante cuando se trabaja con datos no balanceados, es decir, con un porcentaje muy pequeño de muestras de una de las clases. Tal como se indicó al comienzo de esta actividad, este caso de estudio se observa con frecuencia en problemas del contexto de biología y/o salud. En el siguiente ejemplo, se implementa la solución utilizando esta nueva versión mejorada de *k-fold*. El formato de llamada es prácticamente equivalente al caso anterior de validación cruzada. Para entender las diferencias en su comportamiento, se realiza una comparativa entre ambos esquemas de particionamiento, comprobando la distribución final de ejemplos que hay en cada clase. Para ello se utiliza una instrucción de `numpy` llamada ```bincount``` que simplemente cuenta el número de ocurrencias de cada valor dentro de la lista.
###Code
from sklearn.model_selection import StratifiedKFold, KFold
import numpy as np
#Parámetros
rd = 42
particiones = 5
#Realizamos partición estratificada
skf = StratifiedKFold(n_splits=particiones,shuffle=True,random_state=rd)
#Vamos a chequear las diferencias con KFold
kf = KFold(n_splits=particiones,shuffle=True,random_state=rd)
#A continuación hacemos cada subdivisión.
print("Comprobar distribución de clases en SKF:")
for train, test in skf.split(X,y):
X_train, X_test, y_train, y_test = X.iloc[train], X.iloc[test], y.iloc[train], y.iloc[test]
print('train - {} | test - {}'.format(np.bincount(y_train.iloc[:,0]), np.bincount(y_test.iloc[:,0])))
print("\nComprobar distribución de clases en KF:")
for train, test in kf.split(X, y):
X_train, X_test, y_train, y_test = X.iloc[train], X.iloc[test], y.iloc[train], y.iloc[test]
print('train - {} | test - {}'.format(np.bincount(y_train.iloc[:,0]), np.bincount(y_test.iloc[:,0])))
###Output
Comprobar distribución de clases en SKF:
train - [169 286] | test - [43 71]
train - [169 286] | test - [43 71]
train - [170 285] | test - [42 72]
train - [170 285] | test - [42 72]
train - [170 286] | test - [42 71]
Comprobar distribución de clases en KF:
train - [169 286] | test - [43 71]
train - [175 280] | test - [37 77]
train - [169 286] | test - [43 71]
train - [169 286] | test - [43 71]
train - [166 290] | test - [46 67]
###Markdown
En el ejemplo anterior se ha mostrado la distribución de instancias en las dos clases para las particiones de test. A pesar que el conjunto de datos de *breast cancer* no presenta un alto desequilibrio de clases, sí que se ha observado que se puede crear sin quererlo un sesgo hacia alguna de las mismas (por ejemplo en la segunda de las particiones). **REFERENCIAS BIBLIOGRÁFICAS**- Han, J., Kamber, M., Pei, J. (2011). Data Mining: Concepts and Techniques. San Francisco, CA, USA: Morgan Kaufmann Publishers. ISBN: 0123814790, 9780123814791- Hastie, T., Tibshirani, R., Friedman, J. H. (2001). The Elements of Statistical Learning. Springer Verlag.- Witten, I. H., Frank, E., Hall, M. A., Pal, C. J. (2017). Data mining: practical machine learning tools and techniques. Amsterdam; London: Morgan Kaufmann. ISBN: 9780128042915 0128042915- Scikit-Learn: Cross-validation: evaluating estimator performance https://scikit-learn.org/stable/modules/cross_validation.html (visitado el 25 de Junio de 2020) **Referencias adicionales**- Alpaydin, E. (2016). Machine Learning: The New AI. MIT Press. ISBN: 9780262529518- Fernández Hilario, A., García López, S., Galar, M., Prati, R.C., Krawczyk, B., Herrera, F. (2018) Learning from Imbalanced Data Sets. Springer. ISBN: 978-3-319-98074-4, doi: 10.1007/978-3-319-98074-4- James, G., Witten, D., Hastie, T., Tibshirani, R. (2013). An introduction to statistical learning (Vol. 112). Springer.- Kohavi, R. (1995). A Study of Cross-Validation and Bootstrap for Accuracy Estimation nd Model Selection. Proceedings of the International Joint Conference on Artificial Intelligence IJCAI), 1995 (p./pp. 1137--1145).- Molnar, C. (2020). Interpretable Machine Learning A Guide for Making Black Box Models Explainable. https://christophm.github.io/interpretable-ml-book/ (consultado el 30 de Abril de 2020)- Rao, R. B. & Fung, G. (2008). On the Dangers of Cross-Validation. An Experimental Evaluation. SDM (p./pp. 588-596), : SIAM. ISBN: 978-1-61197-278-8 MOOC Machine Learning y Big Data para la Bioinformática (1ª edición) http://abierta.ugr.es
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____ |
PumasExploration.ipynb | ###Markdown
Transforming: Age
###Code
d['age_cat'] = None
d.loc[d.AGEP<=20,'age_cat'] = 'genz'
d.loc[(d.AGEP>20) & (d.AGEP<=36),'age_cat'] = 'millennial'
d.loc[(d.AGEP>36) & (d.AGEP<=52),'age_cat'] = 'genx'
d.loc[(d.AGEP>52) & (d.AGEP<=71),'age_cat'] = 'boomer'
d.loc[d.AGEP>71,'age_cat'] = 'silent'
d.age_cat.isnull().sum()
d.groupby('age_cat').PWGTP.sum().sort_values(ascending=False)
d.head()
###Output
_____no_output_____
###Markdown
Race
###Code
race_map =\
{
1:'white_alone',
2:'aa_alone',
3:'other_alone',
4:'other_alone',
5:'other_alone',
6:'other_alone',
7:'other_alone',
8:'other_alone',
9:'multiple'
}
np.sort(d.RAC1P.unique())
d['race_cat'] = d.RAC1P.map(race_map)
d.groupby('race_cat').PWGTP.sum().sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
Sex
###Code
np.sort(d.SEX.unique())
sex_map =\
{
1:'male',
2:'female'
}
d['sex_cat'] = d.SEX.map(sex_map)
d.groupby('sex_cat').PWGTP.sum().sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
Marital Status
###Code
np.sort(d.MAR.unique())
mar_map =\
{
1:'married',
2:'prev_married',
3:'prev_married',
4:'prev_married',
5:'never_married',
}
d['mar_cat'] = d.MAR.map(mar_map)
d.groupby('mar_cat').PWGTP.sum().sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
Education
###Code
np.sort(d.SCHL.unique())
d['edu_cat'] = None
d.loc[d.SCHL<=20,'edu_cat'] = 'no_deg'
d.loc[d.SCHL>20,'edu_cat'] = 'deg'
d.loc[d.SCHL.isnull(),'edu_cat'] = 'no_deg'
d.groupby('edu_cat').PWGTP.sum().sort_values(ascending=False)/d.PWGTP.sum()
###Output
_____no_output_____
###Markdown
Neighborhood
###Code
area_size = d.groupby(['PUMA','ST']).PWGTP.sum().sort_values(ascending=False)
area_size.head()
type(area_size)
area_size = area_size.to_frame()
area_size.head(3)
area_size['running_sum'] = area_size.PWGTP.cumsum()
area_size.head(3)
urban_t_cutoff = int(.26*d.PWGTP.sum())
urban_cutoff = area_size.loc[area_size.running_sum<urban_t_cutoff,'running_sum'].max()
suburban_t_cutoff = urban_cutoff + int(.53*d.PWGTP.sum())
suburban_cutoff = area_size.loc[area_size.running_sum<suburban_t_cutoff,'running_sum'].max()
urban_t_cutoff,urban_cutoff
suburban_t_cutoff,suburban_cutoff
area_size['nei_cat'] = None
area_size.loc[area_size.running_sum<=urban_cutoff,'nei_cat'] = 'urban'
area_size.loc[(area_size.running_sum>urban_cutoff) &
(area_size.running_sum<=suburban_cutoff),'nei_cat'] = 'suburban'
area_size.loc[area_size.running_sum>suburban_cutoff,'nei_cat'] = 'rural'
area_size.groupby('nei_cat').PWGTP.sum()/d.PWGTP.sum()
area_size_map = area_size.nei_cat.to_dict()
d['nei_cat'] = [area_size_map[(puma,st)] for puma,st in d[['PUMA','ST']].values]
d.groupby('nei_cat').PWGTP.sum()/d.PWGTP.sum()
###Output
_____no_output_____
###Markdown
Income
###Code
d.PINCP.describe()
d.PINCP.isnull().sum()
d['income_cat'] = None
d.loc[d.PINCP.isnull(),'income_cat'] = 'no_or_low'
d.loc[d.PINCP<40000,'income_cat'] = 'no_or_low'
d.loc[(d.PINCP>=40000) & (d.PINCP<160000),'income_cat'] = 'middle'
d.loc[d.PINCP>=160000,'income_cat'] = 'high'
d.groupby('income_cat').PWGTP.sum()/d.PWGTP.sum()
###Output
_____no_output_____
###Markdown
Employmenet
###Code
d.WKW.isnull().sum(),d.WKHP.isnull().sum()
d.WKW.describe()
d.WKHP.describe()
d['fempl_cat'] = 'no'
d.loc[(d.WKW.isin([1,2,3])) & (d.WKHP>=35),'fempl_cat'] = 'yes'
d.groupby('fempl_cat').PWGTP.sum()/d.PWGTP.sum()
d.groupby('fempl_cat').PWGTP.sum()/d.PWGTP.sum()
d.head()
5*4*2*3*2*3*3*2
6*5*3*4*3*4*4*3
def get_group_sizes(population,all_the_cats,all_cats_values):
# key is 8 tuple of indicators,
# values is dict with tuple of values -> tuple of (# people in group, rank of group)
group_sizes = {}
for indicators in itertools.product(*([(False,True)]*8)):
cats_on = all_the_cats[np.array(indicators)]
if not len(cats_on):
continue
group_sizes[indicators] = {}
num_groups = 1
for vals in all_cats_values[np.array(indicators)]:
num_groups *= len(vals)
groups = population.groupby(cats_on.tolist()).PWGTP.sum().sort_values(ascending=False).to_frame()
group_sizes[indicators]['n_populated_groups'] = len(groups)
group_sizes[indicators]['n_groups'] = num_groups
groups['grank'] = groups.PWGTP.rank(method='min',ascending=False)
group_sizes[indicators]['vals'] = {}
for index, row in groups.iterrows(): ## store results of groups with people
key = index if isinstance(index,tuple) else (index,)
value = (row['PWGTP'],row['grank'])
group_sizes[indicators]['vals'][key] = value
# for vals in itertools.product(*all_cats_values[np.array(indicators)]):
# key = tuple([])
# j = 0
# for i in indicators:
# if i:
# key += (vals[j],)
# j += 1
# else:
# key += (None,)
# if key in group_sizes[indicators]['vals']:
# continue
# value = (0,len(groups)+1)
# group_sizes[indicators]['vals'][key] = value
return group_sizes
all_the_cats = np.array(['age_cat','race_cat','sex_cat','mar_cat',
'edu_cat','nei_cat','income_cat','fempl_cat'])
all_cats_values = np.array([('genz','millennial','genx','boomer','silent'),
('white_alone','aa_alone','other_alone','multiple',),
('male','female'),
('married','prev_married','never_married',),
('no_deg','deg'),
('urban','suburban','rural'),
('no_or_low','middle','high'),
('yes','no')])
group_sizes_with_z = get_group_sizes(d,all_the_cats,all_cats_values)
all_cats_values_sans_z = np.array([('millennial','genx','boomer','silent'),
('white_alone','aa_alone','other_alone','multiple',),
('male','female'),
('married','prev_married','never_married',),
('no_deg','deg'),
('urban','suburban','rural'),
('no_or_low','middle','high'),
('yes','no')])
sub = d[d['age_cat']!='genz']
group_sizes_without_z = get_group_sizes(sub,all_the_cats,all_cats_values_sans_z)
len(group_sizes_with_z),len(group_sizes_without_z)
list(group_sizes_with_z[(True,False,False,False,False,True,False,False)].items())
d = group_sizes_with_z[(True,False,False,False,False,True,False,False)]
d['vals'] = list(d['vals'].items())
list(d.items())
list(group_sizes_without_z[(True,False,False,False,False,True,False,False)]['vals'].values())
len(group_sizes_with_z),len(group_sizes_without_z)
import pickle
import pickle
with open('pumas_out.pkl','wb') as f:
pickle.dump({'with_z':group_sizes_with_z,'without_z':group_sizes_without_z},f)
group_sizes_without_z[('genx',None,'male','married','deg','suburban','middle',None)]
group_sizes_with_z[('genx',None,'male','married','deg','suburban','middle',None)]
groups.PWGTP.rank(method='min',ascending=False)
groups.PWGTP.rank(method='min',ascending=True,pct=True)
groups['grank'] = groups.PWGTP.rank(method='min',ascending=False)
groups['gpct'] = (groups['grank']-1)/len(groups)
groups
import pickle
import json
with open('pumas_out.pkl','rb') as f:
d = pickle.load(f)
for m in d['with_z'].values():
n = sum(f[0] for f in m['vals'].values())
#print(n)
if n!=325719178:
print('oh no')
for m in d['without_z'].values():
n = sum(f[0] for f in m['vals'].values())
#print(n)
if n!=238771628:
print('oh no')
d = {'with_z':d['with_z']}
d['with_z'] = {(True,False,False,False,False,True,False,False):
d['with_z'][(True,False,False,False,False,True,False,False)]}
d['with_z'][(True,False,False,False,False,True,False,False)]['vals']
d
out_str = 'const data = new Map(['
for u1,key1 in enumerate(d.keys()):
#if u1>0: break
if u1>0:
out_str += ','
out_str += '["' + key1 + '",new Map(['
for u2,key2 in enumerate(d[key1].keys()):
#if u2>0: break
if u2>0:
out_str += ','
out_str += '["%s",new Map([' % (','.join('true' if k else 'false' for k in key2),)
for u3,key3 in enumerate(d[key1][key2].keys()):
#if u3>0: break
if u3>0:
out_str += ','
out_str += '["' + key3 + '",'
if key3!='vals':
out_str += str(d[key1][key2][key3]) + ']'
else:
out_str += 'new Map(['
for u4,key4 in enumerate(d[key1][key2][key3].keys()):
#if u4>0: break
if u4>0:
out_str += ','
out_str += '["%s",%s]' % (','.join(k for k in key4),
json.dumps(d[key1][key2][key3][key4]),)
out_str += '])]'
out_str += '])]'
out_str += '])]'
out_str += ']);'
out_str
with open('d.js','w') as f:
f.write(out_str)
# for top_key in ['with_z','without_z']:
# v1 = d[top_key]
# for indicators in v1.keys():
# v2 = v1[indicators]
# v2['vals'] = 'new Map(%s)' % ([[list(key),list(value)]
# for key,value in v2['vals'].items()],)
# v1[indicators] = 'new Map(%s)' % ([[key,value]
# for key,value in v1[indicators].items()],)
# d[top_key] = 'new Map(%s)' % ([[list(key),value] for key,value in d[top_key].items()],)
# d = [[key,value] for key,value in d.items()]
p = d['with_z'][(True,True,True,True,True,True,True,True)]
p['vals'] = [[list(key),list(value)] for key,value in p['vals'].items()]
import json
with open('j.json','w') as f:
json.dump(d,f,indent=6)
with open('test.js','w') as f:
f.write(d[0][1])
json.dumps(d[0][1][0])
import ast
ast.literal_eval(str(d[0][1][0]))
d[0][0]
###Output
_____no_output_____ |
1_mosaic_data_attention_experiments/3_stage_wise_training/alternate_minimization/on CIFAR data/old_notebooks/alternate_minimization_2_lr_scheduling_every_20_rmsprop_5.ipynb | ###Markdown
load mosaic data
###Code
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
foreground_classes = {'plane', 'car', 'bird'}
#foreground_classes = {'bird', 'cat', 'deer'}
background_classes = {'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'}
#background_classes = {'plane', 'car', 'dog', 'frog', 'horse','ship', 'truck'}
fg1,fg2,fg3 = 0,1,2
dataiter = iter(trainloader)
background_data=[]
background_label=[]
foreground_data=[]
foreground_label=[]
batch_size=10
for i in range(5000):
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
background_data.append(img)
background_label.append(labels[j])
else:
img = images[j].tolist()
foreground_data.append(img)
foreground_label.append(labels[j])
foreground_data = torch.tensor(foreground_data)
foreground_label = torch.tensor(foreground_label)
background_data = torch.tensor(background_data)
background_label = torch.tensor(background_label)
def create_mosaic_img(bg_idx,fg_idx,fg):
"""
bg_idx : list of indexes of background_data[] to be used as background images in mosaic
fg_idx : index of image to be used as foreground image from foreground data
fg : at what position/index foreground image has to be stored out of 0-8
"""
image_list=[]
j=0
for i in range(9):
if i != fg:
image_list.append(background_data[bg_idx[j]])#.type("torch.DoubleTensor"))
j+=1
else:
image_list.append(foreground_data[fg_idx])#.type("torch.DoubleTensor"))
label = foreground_label[fg_idx]-fg1 # minus 7 because our fore ground classes are 7,8,9 but we have to store it as 0,1,2
#image_list = np.concatenate(image_list ,axis=0)
image_list = torch.stack(image_list)
return image_list,label
desired_num = 30000
mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9
mosaic_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(desired_num):
np.random.seed(i)
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
mosaic_list_of_images.append(image_list)
mosaic_label.append(label)
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label, fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx], self.fore_idx[idx]
batch = 250
msd = MosaicDataset(mosaic_list_of_images, mosaic_label , fore_idx)
train_loader = DataLoader( msd,batch_size= batch ,shuffle=True)
test_images =[] #list of mosaic images, each mosaic image is saved as laist of 9 images
fore_idx_test =[] #list of indexes at which foreground image is present in a mosaic image
test_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(10000):
np.random.seed(i+30000)
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx_test.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
test_images.append(image_list)
test_label.append(label)
test_data = MosaicDataset(test_images,test_label,fore_idx_test)
test_loader = DataLoader( test_data,batch_size= batch ,shuffle=False)
###Output
_____no_output_____
###Markdown
models
###Code
class Module1(nn.Module):
def __init__(self):
super(Module1, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.fc4 = nn.Linear(10,1)
def forward(self, z):
x = torch.zeros([batch,9],dtype=torch.float64)
y = torch.zeros([batch,3, 32,32], dtype=torch.float64)
x,y = x.to("cuda"),y.to("cuda")
for i in range(9):
x[:,i] = self.helper(z[:,i])[:,0]
x = F.softmax(x,dim=1) # alphas
x1 = x[:,0]
torch.mul(x1[:,None,None,None],z[:,0])
for i in range(9):
x1 = x[:,i]
y = y + torch.mul(x1[:,None,None,None],z[:,i])
return y , x
def helper(self,x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
class Module2(nn.Module):
def __init__(self):
super(Module2, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.fc4 = nn.Linear(10,3)
def forward(self,y): #z batch of list of 9 images
y1 = self.pool(F.relu(self.conv1(y)))
y1 = self.pool(F.relu(self.conv2(y1)))
y1 = y1.view(-1, 16 * 5 * 5)
y1 = F.relu(self.fc1(y1))
y1 = F.relu(self.fc2(y1))
y1 = F.relu(self.fc3(y1))
y1 = self.fc4(y1)
return y1
torch.manual_seed(1234)
where_net = Module1().double()
where_net = where_net.to("cuda")
# print(net.parameters)
torch.manual_seed(1234)
what_net = Module2().double()
what_net = what_net.to("cuda")
def calculate_attn_loss(dataloader,what,where,criter):
what.eval()
where.eval()
r_loss = 0
alphas = []
lbls = []
pred = []
fidices = []
correct = 0
tot = 0
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels,fidx = data
lbls.append(labels)
fidices.append(fidx)
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
avg,alpha = where(inputs)
outputs = what(avg)
_, predicted = torch.max(outputs.data, 1)
correct += sum(predicted == labels)
tot += len(predicted)
pred.append(predicted.cpu().numpy())
alphas.append(alpha.cpu().numpy())
loss = criter(outputs, labels)
r_loss += loss.item()
alphas = np.concatenate(alphas,axis=0)
pred = np.concatenate(pred,axis=0)
lbls = np.concatenate(lbls,axis=0)
fidices = np.concatenate(fidices,axis=0)
#print(alphas.shape,pred.shape,lbls.shape,fidices.shape)
analysis = analyse_data(alphas,lbls,pred,fidices)
return r_loss/i,analysis,correct.item(),tot,correct.item()/tot
def analyse_data(alphas,lbls,predicted,f_idx):
'''
analysis data is created here
'''
batch = len(predicted)
amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0
for j in range (batch):
focus = np.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
amth +=1
else:
alth +=1
if(focus == f_idx[j] and predicted[j] == lbls[j]):
ftpt += 1
elif(focus != f_idx[j] and predicted[j] == lbls[j]):
ffpt +=1
elif(focus == f_idx[j] and predicted[j] != lbls[j]):
ftpf +=1
elif(focus != f_idx[j] and predicted[j] != lbls[j]):
ffpf +=1
#print(sum(predicted==lbls),ftpt+ffpt)
return [ftpt,ffpt,ftpf,ffpf,amth,alth]
###Output
_____no_output_____
###Markdown
training
###Code
# instantiate optimizer
optimizer_where = optim.RMSprop(where_net.parameters(),lr =0.0009)#,nesterov=True)
optimizer_what = optim.RMSprop(what_net.parameters(), lr=0.0009)#,nesterov=True)
scheduler_where = optim.lr_scheduler.ReduceLROnPlateau(optimizer_where, mode='min', factor=0.5, patience=2,min_lr=5e-5,verbose=True)
scheduler_what = optim.lr_scheduler.ReduceLROnPlateau(optimizer_what, mode='min', factor=0.5, patience=2,min_lr=5e-5, verbose=True)
criterion = nn.CrossEntropyLoss()
acti = []
analysis_data_tr = []
analysis_data_tst = []
loss_curi_tr = []
loss_curi_tst = []
epochs = 120
every_what_epoch = 20
# calculate zeroth epoch loss and FTPT values
running_loss,anlys_data,correct,total,accuracy = calculate_attn_loss(train_loader,what_net,where_net,criterion)
print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(0,running_loss,correct,total,accuracy))
loss_curi_tr.append(running_loss)
analysis_data_tr.append(anlys_data)
running_loss,anlys_data,correct,total,accuracy = calculate_attn_loss(test_loader,what_net,where_net,criterion)
print('test epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(0,running_loss,correct,total,accuracy))
loss_curi_tst.append(running_loss)
analysis_data_tst.append(anlys_data)
# training starts
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
what_net.train()
where_net.train()
if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 :
print(epoch+1,"updating what_net, where_net is freezed")
print("--"*40)
elif ((epoch) % (every_what_epoch*2)) > every_what_epoch-1 :
print(epoch+1,"updating where_net, what_net is freezed")
print("--"*40)
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels,_ = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_where.zero_grad()
optimizer_what.zero_grad()
# forward + backward + optimize
avg, alpha = where_net(inputs)
outputs = what_net(avg)
loss = criterion(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 :
optimizer_what.step()
elif ( (epoch) % (every_what_epoch*2)) > every_what_epoch-1 :
optimizer_where.step()
running_loss_tr,anls_data,correct,total,accuracy = calculate_attn_loss(train_loader,what_net,where_net,criterion)
analysis_data_tr.append(anls_data)
loss_curi_tr.append(running_loss_tr) #loss per epoch
print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(epoch+1,running_loss_tr,correct,total,accuracy))
running_loss_tst,anls_data,correct,total,accuracy = calculate_attn_loss(test_loader,what_net,where_net,criterion)
analysis_data_tst.append(anls_data)
loss_curi_tst.append(running_loss_tst) #loss per epoch
print('test epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(epoch+1,running_loss_tst,correct,total,accuracy))
if running_loss_tr<=0.05:
break
if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 :
scheduler_what.step(running_loss_tst)
elif ( (epoch) % (every_what_epoch*2)) > every_what_epoch-1 :
scheduler_where.step(running_loss_tst)
print('Finished Training run ')
analysis_data_tr = np.array(analysis_data_tr)
analysis_data_tst = np.array(analysis_data_tst)
fig = plt.figure(figsize = (12,8))
#vline_list = np.arange(every_what_epoch, epoch + every_what_epoch, every_what_epoch)
# train_loss = np.random.randn(340)
# test_loss = np.random.randn(340)
epoch_list = np.arange(0, epoch+2)
plt.plot(epoch_list,loss_curi_tr, label='train_loss')
plt.plot(epoch_list,loss_curi_tst, label='test_loss')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("CE Loss")
#plt.vlines(vline_list,min(min(loss_curi_tr),min(loss_curi_tst)), max(max(loss_curi_tst),max(loss_curi_tr)),linestyles='dotted')
plt.title("train loss vs test loss")
plt.show()
fig.savefig("train_test_loss_plot.pdf")
analysis_data_tr
analysis_data_tr = np.array(analysis_data_tr)
analysis_data_tst = np.array(analysis_data_tst)
columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ]
df_train = pd.DataFrame()
df_test = pd.DataFrame()
df_train[columns[0]] = np.arange(0,epoch+2)
df_train[columns[1]] = analysis_data_tr[:,-2]
df_train[columns[2]] = analysis_data_tr[:,-1]
df_train[columns[3]] = analysis_data_tr[:,0]
df_train[columns[4]] = analysis_data_tr[:,1]
df_train[columns[5]] = analysis_data_tr[:,2]
df_train[columns[6]] = analysis_data_tr[:,3]
df_test[columns[0]] = np.arange(0,epoch+2)
df_test[columns[1]] = analysis_data_tst[:,-2]
df_test[columns[2]] = analysis_data_tst[:,-1]
df_test[columns[3]] = analysis_data_tst[:,0]
df_test[columns[4]] = analysis_data_tst[:,1]
df_test[columns[5]] = analysis_data_tst[:,2]
df_test[columns[6]] = analysis_data_tst[:,3]
df_train
df_test
plt.figure(figsize=(12,12))
plt.plot(df_train[columns[0]],df_train[columns[1]], label='argmax > 0.5')
plt.plot(df_train[columns[0]],df_train[columns[2]], label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.title("On Training set")
#plt.vlines(vline_list,min(min(df_train[columns[1]]),min(df_train[columns[2]])), max(max(df_train[columns[1]]),max(df_train[columns[2]])),linestyles='dotted')
plt.show()
plt.figure(figsize=(12,12))
plt.plot(df_train[columns[0]],df_train[columns[3]], label ="focus_true_pred_true ")
plt.plot(df_train[columns[0]],df_train[columns[4]], label ="focus_false_pred_true ")
plt.plot(df_train[columns[0]],df_train[columns[5]], label ="focus_true_pred_false ")
plt.plot(df_train[columns[0]],df_train[columns[6]], label ="focus_false_pred_false ")
plt.title("On Training set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
#plt.vlines(vline_list,min(min(df_train[columns[3]]),min(df_train[columns[4]]),min(df_train[columns[5]]),min(df_train[columns[6]])), max(max(df_train[columns[3]]),max(df_train[columns[4]]),max(df_train[columns[5]]),max(df_train[columns[6]])),linestyles='dotted')
plt.show()
plt.figure(figsize=(12,12))
plt.plot(df_test[columns[0]],df_test[columns[1]], label='argmax > 0.5')
plt.plot(df_test[columns[0]],df_test[columns[2]], label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.title("On Training set")
#plt.vlines(vline_list,min(min(df_test[columns[1]]),min(df_test[columns[2]])), max(max(df_test[columns[1]]),max(df_test[columns[2]])),linestyles='dotted')
plt.show()
plt.figure(figsize=(12,12))
plt.plot(df_test[columns[0]],df_test[columns[3]], label ="focus_true_pred_true ")
plt.plot(df_test[columns[0]],df_test[columns[4]], label ="focus_false_pred_true ")
plt.plot(df_test[columns[0]],df_test[columns[5]], label ="focus_true_pred_false ")
plt.plot(df_test[columns[0]],df_test[columns[6]], label ="focus_false_pred_false ")
plt.title("On Training set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
#plt.vlines(vline_list,min(min(df_test[columns[3]]),min(df_test[columns[4]]),min(df_test[columns[5]]),min(df_test[columns[6]])), max(max(df_test[columns[3]]),max(df_test[columns[4]]),max(df_test[columns[5]]),max(df_test[columns[6]])),linestyles='dotted')
plt.show()
###Output
_____no_output_____ |
0.9/_downloads/6e4a95fe0199dca955baf42264810b20/initial-sampling-method.ipynb | ###Markdown
Comparing initial sampling methodsHolger Nahrstaedt 2020 Sigurd Carlsen October 2019.. currentmodule:: skoptWhen doing baysian optimization we often want to reserve some of theearly part of the optimization to pure exploration. By default theoptimizer suggests purely random samples for the first n_initial_points(10 by default). The downside to this is that there is no guarantee thatthese samples are spread out evenly across all the dimensions.Sampling methods as Latin hypercube, Sobol', Halton and Hammerslytake advantage of the fact that we know beforehand how many randompoints we want to sample. Then these points can be "spread out" insuch a way that each dimension is explored.See also the example on an integer space`sphx_glr_auto_examples_initial_sampling_method_integer.py`
###Code
print(__doc__)
import numpy as np
np.random.seed(123)
import matplotlib.pyplot as plt
from skopt.space import Space
from skopt.sampler import Sobol
from skopt.sampler import Lhs
from skopt.sampler import Halton
from skopt.sampler import Hammersly
from skopt.sampler import Grid
from scipy.spatial.distance import pdist
def plot_searchspace(x, title):
fig, ax = plt.subplots()
plt.plot(np.array(x)[:, 0], np.array(x)[:, 1], 'bo', label='samples')
plt.plot(np.array(x)[:, 0], np.array(x)[:, 1], 'bo', markersize=80, alpha=0.5)
# ax.legend(loc="best", numpoints=1)
ax.set_xlabel("X1")
ax.set_xlim([-5, 10])
ax.set_ylabel("X2")
ax.set_ylim([0, 15])
plt.title(title)
n_samples = 10
space = Space([(-5., 10.), (0., 15.)])
# space.set_transformer("normalize")
###Output
_____no_output_____
###Markdown
Random sampling
###Code
x = space.rvs(n_samples)
plot_searchspace(x, "Random samples")
pdist_data = []
x_label = []
pdist_data.append(pdist(x).flatten())
x_label.append("random")
###Output
_____no_output_____
###Markdown
Sobol'
###Code
sobol = Sobol()
x = sobol.generate(space.dimensions, n_samples)
plot_searchspace(x, "Sobol'")
pdist_data.append(pdist(x).flatten())
x_label.append("sobol'")
###Output
_____no_output_____
###Markdown
Classic Latin hypercube sampling
###Code
lhs = Lhs(lhs_type="classic", criterion=None)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'classic LHS')
pdist_data.append(pdist(x).flatten())
x_label.append("lhs")
###Output
_____no_output_____
###Markdown
Centered Latin hypercube sampling
###Code
lhs = Lhs(lhs_type="centered", criterion=None)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'centered LHS')
pdist_data.append(pdist(x).flatten())
x_label.append("center")
###Output
_____no_output_____
###Markdown
Maximin optimized hypercube sampling
###Code
lhs = Lhs(criterion="maximin", iterations=10000)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'maximin LHS')
pdist_data.append(pdist(x).flatten())
x_label.append("maximin")
###Output
_____no_output_____
###Markdown
Correlation optimized hypercube sampling
###Code
lhs = Lhs(criterion="correlation", iterations=10000)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'correlation LHS')
pdist_data.append(pdist(x).flatten())
x_label.append("corr")
###Output
_____no_output_____
###Markdown
Ratio optimized hypercube sampling
###Code
lhs = Lhs(criterion="ratio", iterations=10000)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'ratio LHS')
pdist_data.append(pdist(x).flatten())
x_label.append("ratio")
###Output
_____no_output_____
###Markdown
Halton sampling
###Code
halton = Halton()
x = halton.generate(space.dimensions, n_samples)
plot_searchspace(x, 'Halton')
pdist_data.append(pdist(x).flatten())
x_label.append("halton")
###Output
_____no_output_____
###Markdown
Hammersly sampling
###Code
hammersly = Hammersly()
x = hammersly.generate(space.dimensions, n_samples)
plot_searchspace(x, 'Hammersly')
pdist_data.append(pdist(x).flatten())
x_label.append("hammersly")
###Output
_____no_output_____
###Markdown
Grid sampling
###Code
grid = Grid(border="include", use_full_layout=False)
x = grid.generate(space.dimensions, n_samples)
plot_searchspace(x, 'Grid')
pdist_data.append(pdist(x).flatten())
x_label.append("grid")
###Output
_____no_output_____
###Markdown
Pdist boxplot of all methodsThis boxplot shows the distance between all generated points usingEuclidian distance. The higher the value, the better the sampling method.It can be seen that random has the worst performance
###Code
fig, ax = plt.subplots()
ax.boxplot(pdist_data)
plt.grid(True)
plt.ylabel("pdist")
_ = ax.set_ylim(0, 12)
_ = ax.set_xticklabels(x_label, rotation=45, fontsize=8)
###Output
_____no_output_____ |
review_wo1ever/180827_3-one-hot-encoding.ipynb | ###Markdown
범주형 데이터 다루기 - 원핫인코딩(One Hot Encoding)데이터에는 수치형 데이터와 텍스트 데이터나 범주형 데이터가 있다. 머신러닝이나 딥러닝 알고리즘은 수치로 된 데이터만 이해할 수 있다.그래서 기계가 이해할 수 있는 형태로 데이터를 변환해 주어야 하는데 범주형 데이터는 원핫인코딩 형태로 변환해 준다.원핫인코딩이란 해당되는 하나의 데이터만 1로 변경해 주고 나머지는 0으로 채워주는 것을 뜻한다.예를 들어 과일이라는 컬럼에 사과, 배, 감이 들어있다고 하자, 이 때 각각의 과일인 사과, 배, 감으로 컬럼을 만들어 주고 해당 되는 과일에만 1로 표기를 해주고 나머지 과일은 0으로 표기해 주는 것이다. 원핫인코딩 전|과일||:---||사과||배||감||사과| 원핫인코딩 후|과일|과일_사과|과일_배|과일_감||:---|:---:|---:|:---||사과| 1| 0| 0||배| 0| 1| 0||감| 0| 0| 1||사과| 1| 0| 0|원핫인코딩은 파이썬코드로 직접 구현해 줄 수도 있으며, 판다스나 사이킷런을 사용해서 변환해 줄 수도 있다.여기에서는 캐글의 타이타닉 데이터를 사용해서 원핫인코딩을 설명한다.데이터 다운로드 : https://www.kaggle.com/c/titanic/data
###Code
import pandas as pd
import numpy as np
print(pd.__version__)
print(np.__version__)
# 판다스를 통해 데이터를 로드해 온다.
# 여기에서는 캐글의 타이타닉 데이터를 사용한다.
# 데이터 다운로드 : https://www.kaggle.com/c/titanic/data
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
print(train.shape)
print(test.shape)
train.dtypes
test.dtypes
train.columns
# 수치형 데이터에 대한 정보를 보여준다.
# 데이터의 수량과 최대값, 최소값, 평균값, 중간값 등을 확인할 수 있다.
train.describe()
# 오브젝트 타입의 데이터만 따로 추출해 본다.
# 이 데이터 중 카테고리 형태의 데이터가 무엇인지 보고 인코딩 해준다.
# 원핫인코딩 뿐만 아니라 자연어처리(NLP)에서 배웠던 TF, TF-IDF의 인코딩도 해줄 수 있으며
# 어떤 인코딩이 적합할지 생각해 본다.
obj_df = train.select_dtypes(include=['object']).copy()
obj_df.head()
# 어느 데이터든 누락 된 데이터가 있으면 출력하도록 했다.
# Cabin이 누락 된 데이터가 가장 많다.
# 결측치 다루는 법은 따로 다룰 것이다.
obj_df[obj_df.isnull().any(axis=1)].head(5)
# 카테고리 데이터로 적합한지 확인
obj_df["Cabin"].value_counts().head(5)
# 처리 전과 비교해 보기 위해 데이터를 복사
train_c_df = train.copy()
test_c_df = test.copy()
###Output
_____no_output_____
###Markdown
성별
###Code
train['Sex'].value_counts()
train.loc[train["Sex"] == "male", "Sex"] = 0
train.loc[train["Sex"] == "female", "Sex"] = 1
test.loc[test["Sex"] == "male", "Sex"] = 0
test.loc[test["Sex"] == "female", "Sex"] = 1
# 남/여 > 0/1 로 False/True를 나눔
# train['Sex'] = train['Sex'].apply(lambda s: 1 if s == 'female' else 0)
# test['Sex'] = test['Sex'].apply(lambda s: 1 if s == 'female' else 0)
train.head()
train_c_df["Embarked"] = train_c_df["Embarked"].astype(str)
###Output
_____no_output_____
###Markdown
사이킷런의 LabelEncoder로 원핫인코딩해준다.
###Code
# 카테고리 데이터를 인코딩 해준다.
from sklearn.preprocessing import LabelEncoder
# 성별을 0과 1로 인코딩
def gender_to_int(data):
le = LabelEncoder()
le.fit(["male","female"]) # male = 0, female = 1로 인코딩됨.
data["Gender"] = le.transform(data["Sex"])
return data
def embarked_to_int(data):
le = LabelEncoder()
le.fit(["S", "C", "Q", "nan"])
data["Embarked_label"] = le.transform(data["Embarked"])
return data
train_c_df = gender_to_int(train_c_df)
test_c_df = gender_to_int(test_c_df)
train_c_df = embarked_to_int(train_c_df)
train_c_df.head(10)
###Output
_____no_output_____
###Markdown
승선위치
###Code
train['Embarked'].value_counts()
train_c_df["Embarked_C"] = train_c_df["Embarked"] == "C"
train_c_df["Embarked_S"] = train_c_df["Embarked"] == "S"
train_c_df["Embarked_Q"] = train_c_df["Embarked"] == "Q"
print(train.shape)
print(train_c_df.shape)
train_c_df[["Embarked", "Embarked_C", "Embarked_S", "Embarked_Q"]].head(10)
###Output
(891, 12)
(891, 16)
###Markdown
판다스의 get_dummies로 원핫인코딩
###Code
# 기계가 데이터를 이해할 수 있도록
# 카테고리 데이터를 one-hot-encoding 해준다.
def dummy_data(data, columns):
for column in columns:
data = pd.concat([data, pd.get_dummies(data[column], prefix = column)], axis=1)
data = data.drop(column, axis=1)
return data
dummy_columns = ["Sex", "Pclass", "Embarked"]
train_dummy = dummy_data(train, dummy_columns)
test_dummy = dummy_data(test, dummy_columns)
print('원핫인코딩 전 shape')
print('train',train.shape)
print('test', test.shape)
print('get_dummies로 원핫인코딩 후 shape')
print('train', train_dummy.shape)
print('test', test_dummy.shape)
train_dummy.head()
###Output
_____no_output_____
###Markdown
* 이렇게 인코딩 된 데이터를 그대로 사용하게 된다면 사용하지 않는 컬럼을 drop 해주는 방법으로 피처를 생성해 준다.
###Code
# 사용하지 않을 컬럼을 제거해 피처로 사용할 컬럼만 남겨둔다.
def drop_not_concerned(data, columns):
return data.drop(columns, axis=1)
not_concerned_columns = ["PassengerId", "Name", "Ticket", "Cabin"]
X_train = drop_not_concerned(train_dummy, not_concerned_columns)
X_train = X_train.drop('Survived', axis=1)
X_test = drop_not_concerned(test_dummy, not_concerned_columns)
X_train.head()
###Output
_____no_output_____ |
ML Models/Logistic Regression/Logistic Regression Practice.ipynb | ###Markdown
This project consists of a fake advertising data set, indicating whether or not a particular internet user clicked on an Advertisement on a company website. We will try to create a model that will predict whether or not they will click on an ad based off the features of that user.This data set contains the following features:* 'Daily Time Spent on Site': consumer time on site in minutes* 'Age': cutomer age in years* 'Area Income': Avg. Income of geographical area of consumer* 'Daily Internet Usage': Avg. minutes a day consumer is on the internet* 'Ad Topic Line': Headline of the advertisement* 'City': City of consumer* 'Male': Whether or not consumer was male* 'Country': Country of consumer* 'Timestamp': Time at which consumer clicked on Ad or closed window* 'Clicked on Ad': 0 or 1 indicated clicking on Ad
###Code
# importing necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# importing the data
ad_data = pd.read_csv('advertising.csv')
# checking data headers
ad_data.head()
#checking the data info
ad_data.info()
# checking data description
ad_data.describe()
# Eploratory Data Analysis, Creating a histogram of the Age
sns.set_style('whitegrid')
ad_data['Age'].hist(bins=50)
plt.xlabel('Age')
# Plotting Area Income vs Age
sns.jointplot(x='Age', y='Area Income', data = ad_data)
# Plotting Daily time spent on site vs Age
sns.jointplot(x='Age', y='Daily Time Spent on Site', data =ad_data, kind='kde', color ='red')
# plotting Daily Time Spent on Site vs Daily Internet Usage
sns.jointplot(x='Daily Internet Usage', y='Daily Time Spent on Site', data =ad_data, color ='blue')
# creating a pairplot with the hue defined by the 'Clicked on Ad' column feature.
sns.pairplot(ad_data, hue = 'Clicked on Ad', palette='gist_rainbow')
# selecting features to train the model by splitting the data into testing and training samples
from sklearn.model_selection import train_test_split
X = ad_data.drop(['Ad Topic Line','City','Country','Timestamp'],axis=1)
y = ad_data['Clicked on Ad']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
#Training and fitting a logistic regression model on the training set
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train,y_train)
# predictions
prediction = lr.predict(X_test)
# Printing a classification report
from sklearn.metrics import classification_report
print(classification_report(y_test,prediction))
###Output
precision recall f1-score support
0 1.00 1.00 1.00 162
1 1.00 1.00 1.00 168
accuracy 1.00 330
macro avg 1.00 1.00 1.00 330
weighted avg 1.00 1.00 1.00 330
|
3. Facial Keypoint Detection, Complete Pipeline_Aoxue.ipynb | ###Markdown
Face and Facial Keypoint detectionAfter you've trained a neural network to detect facial keypoints, you can then apply this network to *any* image that includes faces. The neural network expects a Tensor of a certain size as input and, so, to detect any face, you'll first have to do some pre-processing.1. Detect all the faces in an image using a face detector (we'll be using a Haar Cascade detector in this notebook).2. Pre-process those face images so that they are grayscale, and transformed to a Tensor of the input size that your net expects. This step will be similar to the `data_transform` you created and applied in Notebook 2, whose job was tp rescale, normalize, and turn any iimage into a Tensor to be accepted as input to your CNN.3. Use your trained model to detect facial keypoints on the image.--- In the next python cell we load in required libraries for this section of the project.
###Code
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
###Output
_____no_output_____
###Markdown
Select an image Select an image to perform facial keypoint detection on; you can select any image of faces in the `images/` directory.
###Code
import cv2
# load in color image for face detection
image = cv2.imread('images/obamas.jpg')
# switch red and blue color channels
# --> by default OpenCV assumes BLUE comes first, not RED as in many images
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# plot the image
fig = plt.figure(figsize=(9,9))
plt.imshow(image)
###Output
_____no_output_____
###Markdown
Detect all faces in an imageNext, you'll use one of OpenCV's pre-trained Haar Cascade classifiers, all of which can be found in the `detector_architectures/` directory, to find any faces in your selected image.In the code below, we loop over each face in the original image and draw a red square on each face (in a copy of the original image, so as not to modify the original). You can even [add eye detections](https://docs.opencv.org/3.4.1/d7/d8b/tutorial_py_face_detection.html) as an *optional* exercise in using Haar detectors.An example of face detection on a variety of images is shown below.
###Code
# load in a haar cascade classifier for detecting frontal faces
face_cascade = cv2.CascadeClassifier('detector_architectures/haarcascade_frontalface_default.xml')
# run the detector
# the output here is an array of detections; the corners of each detection box
# if necessary, modify these parameters until you successfully identify every face in a given image
faces = face_cascade.detectMultiScale(image, 1.2, 2)
# make a copy of the original image to plot detections on
image_with_detections = image.copy()
# loop over the detected faces, mark the image where each face is found
for (x,y,w,h) in faces:
# draw a rectangle around each detected face
# you may also need to change the width of the rectangle drawn depending on image resolution
cv2.rectangle(image_with_detections,(x,y),(x+w,y+h),(255,0,0),3)
fig = plt.figure(figsize=(9,9))
plt.imshow(image_with_detections)
###Output
_____no_output_____
###Markdown
Loading in a trained modelOnce you have an image to work with (and, again, you can select any image of faces in the `images/` directory), the next step is to pre-process that image and feed it into your CNN facial keypoint detector.First, load your best model by its filename.
###Code
import torch
from models import Net
net = Net()
## TODO: load the best saved model parameters (by your path name)
## You'll need to un-comment the line below and add the correct name for *your* saved model
# net.load_state_dict(torch.load('saved_models/keypoints_model_1.pt'))
net.load_state_dict(torch.load('saved_models/keypoints_model_1.pt'))
## print out your net and prepare it for testing (uncomment the line below)
net.eval()
###Output
_____no_output_____
###Markdown
Keypoint detectionNow, we'll loop over each detected face in an image (again!) only this time, you'll transform those faces in Tensors that your CNN can accept as input images. TODO: Transform each detected face into an input TensorYou'll need to perform the following steps for each detected face:1. Convert the face from RGB to grayscale2. Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]3. Rescale the detected face to be the expected square size for your CNN (224x224, suggested)4. Reshape the numpy image into a torch image.You may find it useful to consult to transformation code in `data_load.py` to help you perform these processing steps. TODO: Detect and display the predicted keypointsAfter each face has been appropriately converted into an input Tensor for your network to see as input, you'll wrap that Tensor in a Variable() and can apply your `net` to each face. The ouput should be the predicted the facial keypoints. These keypoints will need to be "un-normalized" for display, and you may find it helpful to write a helper function like `show_keypoints`. You should end up with an image like the following with facial keypoints that closely match the facial features on each individual face:
###Code
def show_all_keypoints(image, keypoints):
"""
Visuzlizing the image and the keypoints on it.
"""
plt.figure(figsize=(5,5))
keypoints = keypoints.data.numpy()
keypoints = keypoints *3.2+100 # Becuase of normalization, keypoints won't be placed if they won't reutrn to values before noramlization
keypoints = np.reshape(keypoints, (68, -1)) # reshape to 2 X 68 keypoint for the fase
image = image.numpy()
image = np.transpose(image, (1, 2, 0)) # Convert to numpy image shape (H x W x C)
image = np.squeeze(image)
plt.imshow(image, cmap='gray')
plt.scatter(keypoints[:, 0], keypoints[:, 1], s=20, marker='.', c='m')
from torch.autograd import Variable
image_copy = np.copy(image)
# loop over the detected faces from your haar cascade
for (x,y,w,h) in faces:
# Select the region of interest that is the face in the image
# roi = image_copy[y:y + int(1.5 * h), x - int(0.4 * w):x + int(1.1 * w)]
roi = image_copy[y-60:y + int(1.4* h), x - int(0.4 * w):x + int(1.1 * w)]
# roi = image_copy[y-30:y+h+60, x-30:x+w+60]
# plt.imshow(roi)
## TODO: Convert the face region from RGB to grayscale
roi = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)
#plt.imshow(roi, cmap = 'gray')
## TODO: Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]
roi = roi / 255.
#plt.imshow(roi, cmap = 'gray')
## TODO: Rescale the detected face to be the expected square size for your CNN (224x224, suggested)
roi = cv2.resize(roi, (224, 224))
#plt.imshow(roi, cmap = 'gray')
## TODO: Reshape the numpy image shape (H x W x C) into a torch image shape (C x H x W)
roi = np.expand_dims(roi, 0)
roi = np.expand_dims(roi, 0) # (1, 1, 224, 224)
# roi = np.reshape(roi, (1, 1, 224, 224)) # Option 2
#print(roi.shape)
## TODO: Make facial keypoint predictions using your loaded, trained network
## perform a forward pass to get the predicted facial keypoints
roi_torch = Variable(torch.from_numpy(roi)) # Converting numpy to torch variable
#print(roi_torch.shape)
roi_torch = roi_torch.type(torch.FloatTensor)
keypoints = net(roi_torch) # Forward pass
## TODO: Display each detected face and the corresponding keypoints
show_all_keypoints(roi_torch.squeeze(0), keypoints)
###Output
_____no_output_____ |
Lectures/Lecture14/Lecture14_Feb05.ipynb | ###Markdown
Lecture 14: Number Theory/Cryptography (2 of ...?) Please note: This lecture will be recorded and made available for viewing online. If you do not wish to be recorded, please adjust your camera settings accordingly. Reminders/Announcements:- Assignment 4 has been collected. Assignment 5 out soon. (has been pushed)- Final Project Topics are available. Take a look!- Halfway point! https://forms.gle/fV8MiyZ922fJkXHY7 Last Time...Recall last time we discussed the *multiplicative order* of elements mod $N$. Namely, if $a$ is an integer coprime to $N$, then the multiplicative order of $a$ mod $N$ is the smallest positive integer $d$ with$$a^d \equiv 1 \mod N.$$
###Code
N = 53
a = 4
gcd(a,N)
mod(a,N).multiplicative_order()
power_mod(a,26,N)
###Output
_____no_output_____
###Markdown
Euler's Theorem told us that $d$ is always bounded above by $\phi(N)$, where $\phi$ is the Euler totient function. In fact, $d$ will always divide $\phi(N)$.
###Code
euler_phi(N)
52%26
###Output
_____no_output_____
###Markdown
Finally, we learned that if $p$ is prime, there *is always* a number $a$ with multiplicative order mod $p$ equal to $\phi(p)$. Such an integer is called a *primitive root* mod $p$.
###Code
p = 19
euler_phi(p)
primitive_root(p)
for i in range(19):
print(power_mod(2,i,19))
###Output
_____no_output_____
###Markdown
***** Participation Check ***************************In the code cell below, find and display *all of the primitive roots* mod $53$.
###Code
# your code here
###Output
_____no_output_____
###Markdown
In the code cell below, compute $\phi(52)$, where $\phi$ is Euler's totient function. How does this relate to the answer in the previous part?
###Code
# your code here
euler_phi(52)
###Output
_____no_output_____
###Markdown
General fact: If $p$ is prime, there are $\phi(p-1)$ primitive roots mod $p$. *********************************************************** This Time...We want to start talking about "difficult" things from the lens of computation. The reason is that "difficult" things will often directly lead to cryptographic protocols. Our end goal: how can two users communicate securely through public channels? If they are allowed to conspire before hand, this is very easy to imagine. People collude together in secret and agree on a method to encode and decode their message; for instance, a "one-time pad" is a common way of doing this (or at least; *used* to be a common way of doing this):
###Code
binaryMsg = '01110100 01101000 01101001 01110011 00100000 01101001 01110011 00100000 01101101 01111001 00100000 01110011 01100101 01100011 01110010 01100101 01110100 00100000 01101101 01100101 01110011 01110011 01100001 01100111 01100101 00100001'
chars = binaryMsg.split()
chars = [chr(int(char,2)) for char in chars]
print(''.join(chars))
import random
myPad = [random.randint(0,1) for char in binaryMsg if char in ['0','1']] #This is our shared secret key
show(myPad[0:10])
encodedMsg = ''
i = 0
for char in binaryMsg:
if char == ' ':
encodedMsg += char
else:
encodedMsg += str((int(char)+myPad[i])%2)
i+=1
encodedMsg
binaryMsg
chars = encodedMsg.split()
chars = [chr(int(char,2)) for char in chars]
print(''.join(chars))
decodedMsg = ''
i = 0
for char in encodedMsg:
if char == ' ':
decodedMsg += char
else:
decodedMsg += str((int(char)+myPad[i])%2)
i+=1
chars = decodedMsg.split()
chars = [chr(int(char,2)) for char in chars]
print(''.join(chars))
###Output
_____no_output_____
###Markdown
A one time pad is effectively impossible to decode, as long as:- The key is truly random- The key is kept secret- The key is as long as the plaintext- The key is never reused.What if those individuals had never met before, so they couldn't even agree on a secret code or password to use?This seems impossible! To communicate securely they need some sort of key, but how can they share the key without first being able to communicate securely?!In fact, it's very easy to imagine how to do this:![](crypto2.png)![](crypto1.png)In other words: - Alice encrypts. - Then Bob doubly encrypts. - Then Alice partially decrypts. - Then Bob fully decrypts. At every stage, Eve cannot unlock the box to read what's inside. This is a depiction of *asymmetric key cryptography*. What's even more impressive is that we can even do better than this. We can communicate publicly and actually create a shared key, without any eavesdroppers getting the info. Discrete LogarithmsRecall the standard logarithm and exponential functions from calculus:
###Code
plot(log(x), (x, 0, 7))+plot(exp(x), (x, -6, 2), color = 'green')+plot(x, (-6,6), color = 'red')
a = exp(7.53)
RR(log(a))
b = 10^15
RR(log(b))
RR(log(b))/RR(log(10))
###Output
_____no_output_____
###Markdown
There are many fast algorithms for approximating $\log(a)$ for any constant $a$ (for example, there are power series approximations which can be computed fairly easily)Given the discussion from Wednesday; what if we tried to extend these notions to modular arithmetic? (By the way: for those with algebra background, this conversation directly generalizes to *finite fields*, but I want to try and avoid using that term)Let $p$ be a prime number and let $a$ be a primitive root $\mod p$. We already know exponentiation:
###Code
for i in range(0,19):
print(power_mod(2,i,19))
###Output
_____no_output_____
###Markdown
We will define the *discrete logarithm of $m$ with respect to $a \mod p$* to be the unique integer $d$ in $\{0,1,\dots,p-2\}$ such that $a^d \equiv m\mod p$.
###Code
p = random_prime(2000)
print(p)
a = primitive_root(p)
print(a)
import random
exponent = random.randint(0,p-2)
m = power_mod(a,exponent,p)
print(m)
mod(m,p).log(mod(a,p))
print(exponent)
print(power_mod(a,544, p))
###Output
_____no_output_____
###Markdown
This looks super easy! What's the big deal?Thanks for asking. The big deal is that discrete logarithms are *hard* to compute in general! What I mean by "hard" is that there is no efficient algorithm which can compute discrete logarithms in the general case. It is conjectured that this is a *NP-Intermediate* problem. A quick word on what this (heuristically) means. When we solve problems using a computer, we are interested in the number of operations it takes for our computer to spit out an answer. This is directly related to the *runtime* of the algorithm. The runtime of the algorithm is dependent on the size of the input. It's very fast to factor numbers that are smaller than $100$. It's very slow to factor numbers that are larger than $100000$. Etc.What is the size of the input? Well this depends on what your input is. For a number $N$, the size of the input is (roughly) the number of digits required to write that number down. This is basically logarithmic in $N$.
###Code
N = 100
print(float(N.log(10)))
N.ndigits()
N = 55124
print(float(N.log(10)))
N.ndigits()
N = 9992934923949
print(float(N.log(10)))
N.ndigits()
###Output
_____no_output_____
###Markdown
Let's try to solve the discrete logarithm problem in the most naive way possible, and then discuss the runtime. ***** Participation Check ***************************In the code cell below, write a function which - takes as input a prime number $p$, a primitive root $a\mod p$, and an integer $N$ which is coprime to $p$.- iterates through the exponents $0,1,2,\dots,p-2$ and, at each step, tests if $a^e \equiv N\mod p$- once it finds such an exponent $e$, return $e$.
###Code
def discreteLog(p,a,N):
#Your code here
###Output
_____no_output_____
###Markdown
*********************************************************** What is the runtime of this algorithm? Well in general we are just iterating through the values $0,1,2,\dots,p-2$ and doing a test at each step. If you implemented this as efficiently as possible, this would take ~$p$ operations in the worst case. We say this algorithm has a $O(p)$ runtime.But remember! The input size is *logarithmic* in $p$! What is the comparison of input to runtime? I.e. what is the comparison of $\log(p)$ to $p$? Exponential!There are many more algorithms for computing discrete logarithms. All of them are known to be exponential (although some of them are very fast on certain special inputs). A major question in computability theory is whether or not there exists a *polynomial time algorithm* for computing the discrete logarithm in general inputs.
###Code
p = random_prime(10^30)
print(p)
a = primitive_root(p)
print(a)
import random
exponent = random.randint(0,p-1)
m = power_mod(a,exponent,p)
print(m)
import time
t = time.time()
print(mod(m,p).log(mod(a,p)))
print('ran in roughly: ',time.time()-t,' seconds')
print(exponent)
###Output
_____no_output_____
###Markdown
Practical implementations of Diffie-Hellman-Merkle use 1000 bit primes (or larger), for which this method would be entirely impossible. Diffie-Hellman-Merkle Key ExchangeLet's use this to our advantage to generate a key for secure communication.(Historical note: you will often see this simply referred to as the *Diffie-Hellman key exchange*, as the math behind this was originally *published* by Whitfield Diffie and Martin Hellman. But Ralph Merkle was integral to the process. You can read more about this in an interview of Hellman, transcribed here: https://conservancy.umn.edu/bitstream/handle/11299/107353/oh375mh.pdf;jsessionid=0DBC6185AFF7B816D0F1D85C0911D058?sequence=1)Let's say Alice and Bob want to communicate securely. To do so, they want to establish a key, or a "shared secret" that they can use to encode future messages.Here is the idea:- Step 1: Alice and Bob publicly choose a large prime $p$ and a multiplicative generator $g\mod p$.- Step 2: Alice and Bob independently (and secretly) choose an integer in the range $0,\dots,p-2$. These are called their *private keys*. Alice's will be called $a$ and Bob's will be called $b$.- Step 3: Alice and Bob publicly transmit $A = g^a\mod p$ and $B = g^b \mod p$.- Step 4: Alice receives $B$ and computes $B^a \mod p$. Bob receives $A$ and computes $A^b\mod p$. Modulo $p$,$$B^a \equiv (g^b)^a \equiv g^{ab} \equiv (g^a)^b\equiv A^b,$$so Alice and Bob have created a shared key $K$.If Eve wanted to break this protocol, she would have to be able to recreate $K$ from $g$, $g^a$, and $g^b$. This is believed to be as difficult as the discrete logarithm problem in general.
###Code
# Public stuff
p = random_prime(2^64)
g = primitive_root(p)
# Alice's private and public keys
a = Integers().random_element(p-2)
A = power_mod(g,a,p)
# Bob's private and public keys
b = Integers().random_element(p-2)
B = power_mod(g,b,p)
# Alice computes the shared secret
K_alice = power_mod(B,a,p)
# Bob computes the shared secret
K_bob = power_mod(A,b,p)
# Finally, check that they are the same
print(K_alice == K_bob)
K_alice
K_bob
###Output
_____no_output_____
###Markdown
Weak PrimesThe Diffie-Hellman-Merkle is a very good algorithm in general. That doesn't mean you can apply the method blindly though. Here is an example of a terrible prime number:
###Code
p = 1298074214633668968809363113301611
###Output
_____no_output_____
###Markdown
Why is it terrible? Well, look at it
###Code
factor(p-1)
###Output
_____no_output_____
###Markdown
Whenever $p-1 = q_1*q_2$ for $q_1, q_2$ relatively prime factors of size $\approx \sqrt{p}$, the following happens. Recall Euler's Theorem: $x^{p-1}\mod p = 1$ for any $x$. Thus $(x^{q_1})^{q_2}\mod p = 1$ for any $x$. That is, if we only look at elements with are $q_1$-powers of something, they have order $q_2$. An analogous thing happens if we switch $q_1$ and $q_2$. Suppose we are trying to solve the mod $p$ discrete log problem for $A = g^a$, i.e. we want to recover $a$.The idea is to recover $a\mod q_1$ and $a\mod q_2$, from which we can use the Chinese Remainder Theorem to recover $a$.- Find the discrete logarithm of $A^{q_1}$ with respect to $g^{q_1}$, i.e. $a_2$ such that $g^{a_2*q_1} = A^{q_1}$. This implies that $a_2*q_1 \equiv a*q_1\mod p-1$, i.e. $a_2\equiv a\mod q_2$.- Find the discrete logarithm of $A^{q_2}$ with respect to $g^{q_2}$, i.e. $a_1$ such that $g^{a_1*q_2} = A^{q_2}$. This implies that $a_1*q_2 \equiv a*q_2\mod p-1$, i.e. $a_1\equiv a\mod q_1$.- Compute $a = CRT(a_1, a_2, q_1, q_2)$.
###Code
q1 = 2 * 3 * 5 * 2487977 * 482705387
q2 = 36028797018963913
exponent = 983902092654374580967281794418725
g = primitive_root(p)
print(g)
print(power_mod(g,exponent,p))
A = power_mod(g,exponent,p)
A1 = power_mod(A,q1,p)
A2 = power_mod(A,q2,p)
g1 = power_mod(g,q1,p)
g2 = power_mod(g,q2,p)
import time
t = time.time()
a1 = mod(A1,p).log(mod(g1,p))
a2 = mod(A2,p).log(mod(g2,p))
print(time.time()-t)
print(a1)
print(a2)
crt([a2,a1],[q1,q2])
exponent
import time
t = time.time()
print(mod(A,p).log(mod(g,p)))
print(time.time()-t)
###Output
_____no_output_____ |
Nicolae_Dubenco_DS4_121.ipynb | ###Markdown
_Lambda School Data Science_ Join and Reshape datasetsObjectives- concatenate data with pandas- merge data with pandas- understand tidy data formatting- melt and pivot data with pandasLinks- [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)- [Tidy Data](https://en.wikipedia.org/wiki/Tidy_data) - Combine Data Sets: Standard Joins - Tidy Data - Reshaping Data- Python Data Science Handbook - [Chapter 3.6](https://jakevdp.github.io/PythonDataScienceHandbook/03.06-concat-and-append.html), Combining Datasets: Concat and Append - [Chapter 3.7](https://jakevdp.github.io/PythonDataScienceHandbook/03.07-merge-and-join.html), Combining Datasets: Merge and Join - [Chapter 3.8](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html), Aggregation and Grouping - [Chapter 3.9](https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html), Pivot Tables Reference- Pandas Documentation: [Reshaping and Pivot Tables](https://pandas.pydata.org/pandas-docs/stable/reshaping.html)- Modern Pandas, Part 5: [Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html) Download dataWe’ll work with a dataset of [3 Million Instacart Orders, Open Sourced](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2)!
###Code
!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
%cd instacart_2017_05_01
!ls -lh *.csv
###Output
-rw-r--r-- 1 502 staff 2.6K May 2 2017 aisles.csv
-rw-r--r-- 1 502 staff 270 May 2 2017 departments.csv
-rw-r--r-- 1 502 staff 551M May 2 2017 order_products__prior.csv
-rw-r--r-- 1 502 staff 24M May 2 2017 order_products__train.csv
-rw-r--r-- 1 502 staff 104M May 2 2017 orders.csv
-rw-r--r-- 1 502 staff 2.1M May 2 2017 products.csv
###Markdown
Join Datasets Goal: Reproduce this exampleThe first two orders for user id 1:
###Code
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*vYGFQCafJtGBBX5mbl0xyw.png'
example = Image(url=url, width=600)
display(example)
###Output
_____no_output_____
###Markdown
Load dataHere's a list of all six CSV filenames
###Code
!ls -lh *.csv
###Output
-rw-r--r-- 1 502 staff 2.6K May 2 2017 aisles.csv
-rw-r--r-- 1 502 staff 270 May 2 2017 departments.csv
-rw-r--r-- 1 502 staff 551M May 2 2017 order_products__prior.csv
-rw-r--r-- 1 502 staff 24M May 2 2017 order_products__train.csv
-rw-r--r-- 1 502 staff 104M May 2 2017 orders.csv
-rw-r--r-- 1 502 staff 2.1M May 2 2017 products.csv
###Markdown
For each CSV- Load it with pandas- Look at the dataframe's shape- Look at its head (first rows)- `display(example)`- Which columns does it have in common with the example we want to reproduce? aisles
###Code
!head aisles.csv
!wc aisles.csv
import pandas as pd
aisles = pd.read_csv('aisles.csv')
aisles.shape
aisles.head()
display(example)
###Output
_____no_output_____
###Markdown
Aisles doesn't have any data we need to reproduce the example! departments
###Code
!head departments.csv
departments = pd.read_csv('departments.csv')
departments.shape
departments
###Output
_____no_output_____
###Markdown
Departments also doesn't have the data we need. Onwards! order_products__prior
###Code
!ls -lh
!head order_products__prior.csv
order_products__prior = pd.read_csv('order_products__prior.csv')
order_products__prior.shape
!free -m
order_products__prior.head()
order_products__prior.isna().sum()
help(order_products__prior.isna)
order_products__prior.dtypes
dir(order_products__prior)
help(order_products__prior.groupby)
order_products__prior.groupby('order_id')['product_id'].count()
order_products__prior.groupby('order_id')['product_id'].count().mean()
display(example)
###Output
_____no_output_____
###Markdown
order_products__prior will help us figure our order id and product id columns, but we still need more. order_products__train
###Code
order_products__train = pd.read_csv('order_products__train.csv')
order_products__train.shape
order_products__train.head()
###Output
_____no_output_____
###Markdown
order_products__train has the same columns as order_products__prior. orders
###Code
orders = pd.read_csv('orders.csv')
orders.head()
orders.shape
display(example)
###Output
_____no_output_____
###Markdown
orders data will give us:- user id- order number- order dow/hour of dayorder id/product id/cart order are in prior. So at this point, all we're missing is product name! products
###Code
products = pd.read_csv('products.csv')
products.shape
products.head()
###Output
_____no_output_____
###Markdown
And now we have product name, and the gang is all here! Let's put it together. Concatenate order_products__prior and order_products__train
###Code
order_products__prior.shape
order_products__train.shape
order_products = pd.concat([order_products__prior, order_products__train])
order_products.shape
assert 1 == 1
assert 1 == 0
assert len(order_products) == len(order_products__prior) + len(order_products__train)
help(pd.concat)
order_products['order_id'] == 2539329
condition = order_products['order_id'] == 2539329
order_products[condition]
display(example)
###Output
_____no_output_____
###Markdown
Get a subset of orders — the first two orders for user id 1 From `orders` dataframe:- user_id- order_id- order_number- order_dow- order_hour_of_day
###Code
orders.shape
orders[orders['user_id'] == 1]
orders[orders['user_id'] == 1][orders['order_number'] <= 2]
condition = (orders['user_id'] == 1) & (orders['order_number'] <= 2)
columns = ['user_id',
'order_id',
'order_number',
'order_dow',
'order_hour_of_day']
subset = orders.loc[condition, columns]
subset
display(example)
###Output
_____no_output_____
###Markdown
Merge dataframes Merge the subset from `orders` with columns from `order_products`
###Code
columns = ['order_id', 'add_to_cart_order', 'product_id']
order_products[columns].head()
s1 = {1, 2, 3}
s2 = {2, 3, 4}
# What's in both s1 and s2? 2 and 3
s1.intersection(s2)
merged = pd.merge(subset, order_products[columns],
how='inner', on='order_id')
merged
display(example)
subset.shape, order_products.shape, merged.shape
###Output
_____no_output_____
###Markdown
Merge with columns from `products`
###Code
products.head()
final = pd.merge(merged, products[['product_id', 'product_name']],
how='inner', on='product_id')
final
display(example)
final = final.sort_values(by=['order_number', 'add_to_cart_order'])
final.columns = [column.replace('_', ' ') for column in final]
final
###Output
_____no_output_____
###Markdown
Reshape Datasets Why reshape data? Some libraries prefer data in different formatsFor example, the Seaborn data visualization library prefers data in "Tidy" format often (but not always).> "[Seaborn will be most powerful when your datasets have a particular organization.](https://seaborn.pydata.org/introduction.htmlorganizing-datasets) This format ia alternately called “long-form” or “tidy” data and is described in detail by Hadley Wickham. The rules can be simply stated:> - Each variable is a column- Each observation is a row> A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a “variable” is something that will be assigned a role in the plot." Data science is often about putting square pegs in round holesHere's an inspiring [video clip from _Apollo 13_](https://www.youtube.com/watch?v=ry55--J4_VQ): “Invent a way to put a square peg in a round hole.” It's a good metaphor for data wrangling! Hadley Wickham's ExamplesFrom his paper, [Tidy Data](http://vita.had.co.nz/papers/tidy-data.html)
###Code
%matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
table1 = pd.DataFrame(
[[np.nan, 2],
[16, 11],
[3, 1]],
index=['John Smith', 'Jane Doe', 'Mary Johnson'],
columns=['treatmenta', 'treatmentb'])
table2 = table1.T
###Output
_____no_output_____
###Markdown
"Table 1 provides some data about an imaginary experiment in a format commonly seen in the wild. The table has two columns and three rows, and both rows and columns are labelled."
###Code
table1
###Output
_____no_output_____
###Markdown
"There are many ways to structure the same underlying data. Table 2 shows the same data as Table 1, but the rows and columns have been transposed. The data is the same, but the layout is different."
###Code
table2
###Output
_____no_output_____
###Markdown
"Table 3 reorganises Table 1 to make the values, variables and obserations more clear.Table 3 is the tidy version of Table 1. Each row represents an observation, the result of one treatment on one person, and each column is a variable."| name | trt | result ||--------------|-----|--------|| John Smith | a | - || Jane Doe | a | 16 || Mary Johnson | a | 3 || John Smith | b | 2 || Jane Doe | b | 11 || Mary Johnson | b | 1 | Table 1 --> TidyWe can use the pandas `melt` function to reshape Table 1 into Tidy format.
###Code
table1.columns.tolist()
table1.index.tolist()
tidy = table1.reset_index().melt(id_vars='index')
tidy = tidy.rename(columns={
'index': 'name',
'variable': 'trt',
'value': 'result'
})
tidy['trt'] = tidy['trt'].str.replace('treatment', '')
tidy.set_index('name')
###Output
_____no_output_____
###Markdown
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.melt.html
###Code
# We can replace values in a dataframe
tidy['trt'].replace('a', 0).replace('b', 1)
# Can also do this with a mapping dictionary
tidy['trt'].map({'a': 0, 'b': 1})
# Can also use astype
(tidy['trt'] == 'b').astype(int)
# Or can use a lambda - overkill here, but useful for flexibility
tidy['trt'].apply(lambda x: ord(x) - ord('a'))
###Output
_____no_output_____
###Markdown
Table 2 --> Tidy
###Code
##### LEAVE BLANK --an assignment exercise #####
###Output
_____no_output_____
###Markdown
Tidy --> Table 1The `pivot_table` function is the inverse of `melt`.
###Code
table1
tidy
tidy.pivot_table(index='name', columns='trt', values='result')
###Output
_____no_output_____
###Markdown
Tidy --> Table 2
###Code
##### LEAVE BLANK --an assignment exercise #####
###Output
_____no_output_____
###Markdown
Seaborn exampleThe rules can be simply stated:- Each variable is a column- Each observation is a rowA helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a “variable” is something that will be assigned a role in the plot."
###Code
sns.catplot(x='trt', y='result', col='name',
kind='bar', data=tidy, height=2);
###Output
_____no_output_____
###Markdown
Now with Instacart data
###Code
products = pd.read_csv('products.csv')
order_products = pd.concat([pd.read_csv('order_products__prior.csv'),
pd.read_csv('order_products__train.csv')])
orders = pd.read_csv('orders.csv')
###Output
_____no_output_____
###Markdown
Goal: Reproduce part of this exampleInstead of a plot with 50 products, we'll just do two — the first products from each list- Half And Half Ultra Pasteurized- Half Baked Frozen Yogurt
###Code
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*wKfV6OV-_1Ipwrl7AjjSuw.png'
example = Image(url=url, width=600)
display(example)
###Output
_____no_output_____
###Markdown
So, given a `product_name` we need to calculate its `order_hour_of_day` pattern. Subset and MergeOne challenge of performing a merge on this data is that the `products` and `orders` datasets do not have any common columns that we can merge on. Due to this we will have to use the `order_products` dataset to provide the columns that we will use to perform the merge.
###Code
product_names = ['Half Baked Frozen Yogurt', 'Half And Half Ultra Pasteurized']
products.columns.tolist()
orders.columns.tolist()
order_products.columns.tolist()
merged = (products[['product_id', 'product_name']]
.merge(order_products[['order_id', 'product_id']])
.merge(orders[['order_id', 'order_hour_of_day']]))
products.shape, order_products.shape, orders.shape, merged.shape
merged.head()
# What conditon will filter `merged` to just the 2 products
# that we care about?
# This is equivalent ...
condition = ((merged['product_name']=='Half Baked Frozen Yogurt') |
(merged['product_name']=='Half And Half Ultra Pasteurized'))
# ... to this:
product_names = ['Half Baked Frozen Yogurt', 'Half And Half Ultra Pasteurized']
condition = merged['product_name'].isin(product_names)
subset = merged[condition]
subset.sample(n=5)
subset.shape
###Output
_____no_output_____
###Markdown
4 ways to reshape and plot 1. value_counts
###Code
froyo = subset[subset['product_name']=='Half Baked Frozen Yogurt']
cream = subset[subset['product_name']=='Half And Half Ultra Pasteurized']
(cream['order_hour_of_day']
.value_counts(normalize=True)
.sort_index()
.plot())
(froyo['order_hour_of_day']
.value_counts(normalize=True)
.sort_index()
.plot());
###Output
_____no_output_____
###Markdown
2. crosstab
###Code
pd.crosstab(subset['order_hour_of_day'],
subset['product_name'])
(pd.crosstab(subset['order_hour_of_day'],
subset['product_name'],
normalize='columns') * 100).plot();
###Output
_____no_output_____
###Markdown
3. Pivot Table
###Code
subset.pivot_table(index='order_hour_of_day',
columns='product_name',
values='order_id',
aggfunc=len).plot();
###Output
_____no_output_____
###Markdown
4. melt
###Code
table = pd.crosstab(subset['order_hour_of_day'],
subset['product_name'],
normalize=True)
melted = (table
.reset_index()
.melt(id_vars='order_hour_of_day')
.rename(columns={
'order_hour_of_day': 'Hour of Day Ordered',
'product_name': 'Product',
'value': 'Percent of Orders by Product'
}))
sns.relplot(x='Hour of Day Ordered',
y='Percent of Orders by Product',
hue='Product',
data=melted,
kind='line');
display(example)
###Output
_____no_output_____
###Markdown
Preparation for assignment
###Code
!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
%cd instacart_2017_05_01
!ls -lh *.csv
import pandas as pd
aisles = pd.read_csv('aisles.csv')
departments = pd.read_csv('departments.csv')
order_products__prior = pd.read_csv('order_products__prior.csv')
order_products__train = pd.read_csv('order_products__train.csv')
orders = pd.read_csv('orders.csv')
products = pd.read_csv('products.csv')
print(aisles.shape)
print(departments.shape)
print(order_products__prior.shape)
print(order_products__train.shape)
print(orders.shape)
print(products.shape)
# tables_csv = ['aisles', 'departments', 'orders_products__prior', 'orders_products__train', 'orders', 'products']
# def imports_csv(tables):
# for i in tables:
# i = pd.read_csv(i + '.csv')
# print(str(i))
# print(i.shape)
# print(i.head())
# return i
# imports_csv(tables_csv)
order_products = pd.concat([order_products__prior, order_products__train])
assert len(order_products) == len(order_products__prior) + len(order_products__train)
###Output
_____no_output_____
###Markdown
Assignment Join Data SectionThese are the top 10 most frequently ordered products. How many times was each ordered? 1. Banana2. Bag of Organic Bananas3. Organic Strawberries4. Organic Baby Spinach 5. Organic Hass Avocado6. Organic Avocado7. Large Lemon 8. Strawberries9. Limes 10. Organic Whole MilkFirst, write down which columns you need and which dataframes have them.Next, merge these into a single dataframe.Then, use pandas functions from the previous lesson to get the counts of the top 10 most frequently ordered products. Reshape Data Section- Replicate the lesson code- Complete the code cells we skipped near the beginning of the notebook- Table 2 --> Tidy- Tidy --> Table 2- Load seaborn's `flights` dataset by running the cell below. Then create a pivot table showing the number of passengers by month and year. Use year for the index and month for the columns. You've done it right if you get 112 passengers for January 1949 and 432 passengers for December 1960. I need the next columns: * product_name, product_id* order_id, product_id
###Code
flights = sns.load_dataset('flights')
##### YOUR CODE HERE #####
###Output
_____no_output_____ |
introducao_logica_com_ttg_gabarito.ipynb | ###Markdown
Noções de lógica com o *ttg - truth-table-generator*Gabarito dos exercícios propostos no notebook teórico. Execute a célula abaixo para importar o construtor de tabelas `Truths` do *ttg*.
###Code
from ttg import Truths
###Output
_____no_output_____
###Markdown
É importante ler a [documentação do pacote](https://github.com/chicolucio/truth-table-generator) para entender a utilização do mesmo. O *ttg* permite apresentar as tabelas de forma mais agradável visualmente nos Jupyter Notebooks utilizando *pandas*. Execute a célula abaixo onde são declaradas duas funções para aplicar cores nas tabelas verdade. Você pode alterar as funções caso queira cores diferentes ou outros efeitos não implementados. Fica o convite para fazer alterações e as compartilhar.
###Code
def color_false_red(val):
"""Color red False (0) and green True (1) values"""
color = 'red' if (val == False) else 'green'
return 'color: %s' % color
def df_style(logic, hl_rows=[], hl_cols=[]):
"""Applies style to logical expression (logic) pandas truth table.
Text: center. Table: no index column. Highlight yellow rows and columns in
lists (hl_rows and hl_cols). At the end applies color_false_red function"""
d = logic.as_pandas().style.set_table_styles([{
'selector': 'th',
'props': [('font-size', '10pt')]
}]).set_properties(**{
'text-align': 'center',
'font-size': '115%'
}).apply(
lambda x:
['background: lightyellow' if x.name in hl_rows else '' for i in x],
axis=1).apply(lambda x:
['background: lightyellow' if x.name in hl_cols else '' for i in x],
axis=0).hide_index()
d = d.applymap(color_false_red)
return d
###Output
_____no_output_____
###Markdown
Exercícios complementaresGeralmente quando são feitas tabelas verdade manualmente, são construídas diversas colunas, uma para cada etapa da análise da proposição. Assim, coloquei nas resoluções dos primeiros exercícios também desta forma, para auxiliar a compreensão. No entanto, o *ttg* também resolveria a proposição caso apenas a mesma fosse passada, conforme exercícios mais ao final. **Exercício 1**: Construa a tabela verdade para as proposições e as classifique como tautologia, contradição ou contingência.a. $\neg p \lor \neg q$
###Code
exercicio01a = Truths(['p', 'q'], ['~p', '~q', '(~p) or (~q)'], ints=False)
df_style(exercicio01a)
exercicio01a.valuation()
###Output
_____no_output_____
###Markdown
b. $ [ (p \land \neg q) \lor \neg r] \land [ (\neg p \lor q) \land r ] $
###Code
exercicio01b = Truths(
['p', 'q', 'r'],
['~p', '~q', '~r', 'p and (~q)', '(~p) or (~q)',
'(p and (~q)) or (~r)','((~p) or q) and r',
'((p and (~q)) or (~r)) and (((~p) or q) and r)'],
ints=False)
df_style(exercicio01b)
exercicio01b.valuation()
###Output
_____no_output_____
###Markdown
c. $(p \land q) \rightarrow (p \lor q)$
###Code
exercicio01c = Truths(['p', 'q'],
['p and q', 'p or q', '(p and q) => (p or q)'],
ints=False)
df_style(exercicio01c)
exercicio01c.valuation()
###Output
_____no_output_____
###Markdown
d. $(p \land q) \lor r$
###Code
exercicio01d = Truths(['p', 'q', 'r'],
['p and q', '(p and q) or r'],
ints=False)
df_style(exercicio01d)
exercicio01d.valuation()
###Output
_____no_output_____
###Markdown
e. $(p \land q) \rightarrow p$
###Code
exercicio01e = Truths(['p', 'q'], ['p and q', '(p and q) => p'], ints=False)
df_style(exercicio01e)
exercicio01e.valuation()
###Output
_____no_output_____
###Markdown
f. $p \rightarrow (p \lor q)$
###Code
exercicio01f = Truths(['p', 'q'], ['p or q', 'p => (p or q)'], ints=False)
df_style(exercicio01f)
exercicio01f.valuation()
###Output
_____no_output_____
###Markdown
g. $ [ p \land (p \rightarrow q) ] \rightarrow q $
###Code
exercicio01g = Truths(['p', 'q'],
['p => q', 'p and (p => q)', '(p and (p => q)) => q'],
ints=False)
df_style(exercicio01g)
exercicio01g.valuation()
###Output
_____no_output_____
###Markdown
h. $ [ (p \rightarrow q) \land \neg q ] \rightarrow \neg q $
###Code
exercicio01h = Truths(
['p', 'q'],
['~q', 'p => q', '(p => q) and (~q)', '((p => q) and (~q)) => (~q)'],
ints=False)
df_style(exercicio01h)
exercicio01h.valuation()
###Output
_____no_output_____
###Markdown
i. $ (p \land q) \land \neg p $
###Code
exercicio01i = Truths(['p', 'q'],
['~p', 'p and q', '(p and q) and (~p)'],
ints=False)
df_style(exercicio01i)
exercicio01i.valuation()
###Output
_____no_output_____
###Markdown
j. $ [ (p \lor \neg q) \land r ] \land [ (p \land q) \lor \neg r ] $
###Code
exercicio01j = Truths(
['p', 'q', 'r'],
['~q', '~r', 'p or (~q)', '(p or (~q)) and r', 'p and q',
'(p and q) or (~r)', '((p or (~q)) and r) and ((p and q) or (~r))'],
ints=False)
df_style(exercicio01j)
exercicio01j.valuation()
###Output
_____no_output_____
###Markdown
k. $ [ (p \leftrightarrow q) \rightarrow r ] \leftrightarrow [ \neg (p \land r) \rightarrow q ] $
###Code
exercicio01k = Truths(
['p', 'q', 'r'],
['p = q', '(p = q) => r', 'p and r', '~(p and r)', '(~(p and r)) => q',
'((p = q) => r) = ((~(p and r)) => q)'],
ints=False)
df_style(exercicio01k)
exercicio01k.valuation()
###Output
_____no_output_____
###Markdown
**Exercício 2**: Sendo $p$ uma proposição de valor lógico verdadeiro e $q$ uma proposição de valor lógico falso, qual o valor lógico da proposição composta $R: (p \land \neg q) \rightarrow q$?
###Code
exercicio02 = Truths(['p', 'q'],
['~q', 'p and (~q)', '(p and (~q)) => q'],
ints=False)
df_style(exercicio02, hl_rows=[2])
# Falsa, conforme linha destacada.
###Output
_____no_output_____
###Markdown
**Exercício 3**: Atribua valor lógico verdadeiro ou falso a cada uma das afirmações a seguir:a. Se Marte é um planeta então $3 = 7 - 4$.b. A soma de dois números pares é um número par e $7^2 = 49$.c. $3=5$ se e somente se o urso é um animal invertebrado.d. Se $10^2 = 100$ então todo número inteiro é natural.e. $2 = 3^2 - 7$ ou a Terra é plana.f. $3 > 1$ e $4 > 2$g. $3 > 1$ ou $3 = 1$ Letra a:| Proposição | Texto | Valor lógico || -------- | ---------- | ---------- || $p$ | Marte é um planeta | V || $q$ | $3 = 7 - 4$ | V |
###Code
exercicio03a = Truths(['p', 'q'], ['p => q'], ints=False)
df_style(exercicio03a, hl_rows=[1])
# Verdadeira, conforme linha destacada.
###Output
_____no_output_____
###Markdown
Letra b:| Proposição | Texto | Valor lógico || -------- | ---------- | ---------- || $p$ | A soma de dois números pares é um número par | V || $q$ | $7^2 = 49$ | V |
###Code
exercicio03b = Truths(['p', 'q'], ['p and q'], ints=False)
df_style(exercicio03b, hl_rows=[1])
# Verdadeira, conforme linha destacada.
###Output
_____no_output_____
###Markdown
Letra c:| Proposição | Texto | Valor lógico || -------- | ---------- | ---------- || $p$ | $3 = 5$ | F || $q$ | Urso é um animal invertebrado | F |
###Code
exercicio03c = Truths(['p', 'q'], ['p = q'], ints=False)
df_style(exercicio03c, hl_rows=[4])
# Verdadeira, conforme linha destacada.
###Output
_____no_output_____
###Markdown
Letra d:| Proposição | Texto | Valor lógico || -------- | ---------- | ---------- || $p$ | $10^2 = 100$ | V || $q$ | Todo número inteiro é natural | F |
###Code
exercicio03d = Truths(['p', 'q'], ['p => q'], ints=False)
df_style(exercicio03d, hl_rows=[2])
# Falsa, conforme linha destacada.
###Output
_____no_output_____
###Markdown
Letra e:| Proposição | Texto | Valor lógico || -------- | ---------- | ---------- || $p$ | $2 = 3^2 -7$ | V || $q$ | A Terra é plana | F |
###Code
exercicio03e = Truths(['p', 'q'], ['p or q'], ints=False)
df_style(exercicio03e, hl_rows=[2])
# Verdadeira, conforme linha destacada.
###Output
_____no_output_____
###Markdown
Letra f:| Proposição | Texto | Valor lógico || --------| -------| ----------|| $p$| 3 > 1|V|| $q$| 4 > 2|V|
###Code
exercicio03f = Truths(['p', 'q'], ['p and q'], ints=False)
df_style(exercicio03f, hl_rows=[1])
# Verdadeira, conforme linha destacada.
###Output
_____no_output_____
###Markdown
Letra g:| Proposição | Texto| Valor lógico || -------- | ---------------| ---------- || $p$ | 3 > 1 | V || $q$ | 3 = 1 | F |
###Code
exercicio03g = Truths(['p', 'q'], ['p or q'], ints=False)
df_style(exercicio03g, hl_rows=[2])
# Verdadeira, conforme linha destacada.
###Output
_____no_output_____
###Markdown
**Exercício 4**: Sejam:- $p$: Londres é a capital da Inglaterra.- $q$: A Torre Eiffel situa-se em Londres.- $r$: O meridiano de Greenwich passa por Londres.Traduza para a linguagem natural cada uma das proposições compostas abaixo e determine o respectivo valor lógico.a. $\neg q \land \neg p$R.: Londres não é capital da Inglaterra e a Torre Eiffel não situa-se em Londres.b. $\neg q \lor \neg p$R.: A Torre Eiffel não situa-se em Londres ou Londres não é capital da Inglaterra.c. $\neg (p \land q)$R.: Londres não é a capital da Inglaterra ou a Torre Eiffel não situa-se em Londres. Outra possibilidade: Não é verdade que Londres é a capital da Inglaterra e a Torre Eiffel situa-se em Londres.d. $\neg p \lor r$R.: Londres não é a capital da Inglaterra ou o meridiano de Greenwich passa por Londres.Valores lógicos na linha destacada na próxima tabela.
###Code
exercicio04 = Truths(
['p', 'q', 'r'],
['(~p) and (~q)', '(~p) or (~q)', '~(p and q)', '(~p) or r'],
ints=False)
df_style(exercicio04, hl_rows=[3])
###Output
_____no_output_____
###Markdown
**Exercício 5**: Prove que uma condicional é equivalente a $\neg (p \land q)$
###Code
exercicio05 = Truths(['p', 'q'], ['~(p and (~q))', 'p => q'], ints=False)
df_style(exercicio05, hl_cols=['~(p and (~q))', 'p => q'])
# Repare que as colunas das proposições são iguais.
###Output
_____no_output_____
###Markdown
**Exercício 6**: Comprove que $\neg (p \rightarrow q)$ é equivalente a $p \land \neg q$
###Code
exercicio06 = Truths(['p', 'q'], ['~(p => q)', 'p and (~q)'], ints=False)
df_style(exercicio06, hl_cols=['~(p => q)', 'p and (~q)'])
# Repare que as colunas das proposições são iguais.
###Output
_____no_output_____
###Markdown
**Exercício 7**: Mostre simbolicamente que são logicamente equivalentes: "Se um aluno estuda, então ele é aprovado" e "Não é verdade que um aluno estuda e não é aprovado".Resposta:| Proposição | Texto | | -------- | ---------- | | $p$ | Um aluno estuda | | $q$ | O aluno é aprovado |
###Code
exercicio07 = Truths(['p', 'q'], ['p => q', '~(p and (~q))'], ints=False)
df_style(exercicio07, hl_cols=['p => q', '~(p and (~q))'])
# Repare que as colunas das proposições são iguais.
###Output
_____no_output_____
###Markdown
**Exercício 8**: Mostre simbolicamente que a negação de "Se um aluno estuda, então ele é aprovado" é "Há alunos que estudam e não são aprovados".Resposta:| Proposição | Texto | | -------- | ---------- | | $p$ | Um aluno estuda | | $q$ | O aluno é aprovado |
###Code
exercicio08 = Truths(['p', 'q'], ['p => q', 'p and (~q)'], ints=False)
df_style(exercicio08)
# Repare que as colunas das proposições são opostas.
###Output
_____no_output_____
###Markdown
**Exercício 9**: Considere a proposição: "Se o Edson se candidatar a presidente, então ele se elegerá". Em qual dos casos abaixo essa proposição condicional deve ser considerada falsa?a. O Edson se candidatou a presidente e se elegeu.b. O Edson se candidatou a presidente e não se elegeu.c. O Edson não se candidatou a presidente.Resposta:| Proposição | Texto | | -------- | ---------- | | $p$ | O Edson se candidatar a presidente | | $q$ | O Edson se eleger |
###Code
exercicio09 = Truths(['p', 'q'],
['p => q', 'p and q', 'p and (~q)', '~p'],
ints=False)
df_style(exercicio09, hl_cols=['p => q', 'p and (~q)'])
# Repare que as colunas das proposições destacadas são opostas.
# A primeira coluna destacada se refere ao enunciado.
# A segunda se refere à letra b. Logo a resposta é letra b.
###Output
_____no_output_____
###Markdown
**Exercício 10**: Considere a condicional: "Se o seu dente está cariado, você precisa de um dentista".a. Suponha que "o seu dente não está cariado e você precisa de um dentista". Isto significa uma negação da anterior?
###Code
exercicio10a = Truths(['p', 'q'], ['p => q', '(~p) and q'], ints=False)
df_style(exercicio10a)
# As colunas não são completamente opostas, logo não é negação.
###Output
_____no_output_____
###Markdown
b. Escreva uma proposição que não seja condicional e que corresponde à negação da proposição acima. Negação da condicional: $\neg (p \rightarrow q) \leftrightarrow p \land \neg q$Logo: O seu dente está cariado e você não precisa de um dentista. **Exercício 11**: Escreva na linguagem simbólica e verifique se são logicamente equivalentes as proposições "Se eu me chamo João, então eu passo no vestibular", e "Eu passo no vestibular ou não me chamo João". R.:| Proposição | Texto | | -------- | ---------- | | $p$ | Eu me chamo João | | $q$ | Eu passo no vestibuar |
###Code
exercicio11 = Truths(['p', 'q'], ['p => q', '(~p) or q'], ints=False)
df_style(exercicio11)
# Repare que as colunas das proposições são iguais.
###Output
_____no_output_____
###Markdown
**Exercício 12**: Sendo a proposição $p \rightarrow (r \lor s)$ falsa e a proposição $(q \land \neg s) \rightarrow p$ verdadeira, classifique em verdadeira ou falsa as afirmações $p$, $q$, $r$ e $s$.
###Code
exercicio12 = Truths(['p', 'q', 'r', 's'],
['p => (r or s)', '(q and (~s)) => p'],
ints=False)
df_style(exercicio12, hl_rows=[4, 8])
###Output
_____no_output_____
###Markdown
Conforme as linhas destacadas na tabela acima, temos:| Proposição | Valor lógico || ------ | ------- || $p$ | V || $q$ | V ou F || $r$ | F || $s$ | F | **Exercício 13**: Sabendo que as proposições $p$ e $q$ são verdadeiras e que a proposição $r$ é falsa, determine o valor lógico da seguinte proposição: $(\neg p \downarrow q) \land (q \uparrow \neg r)$
###Code
exercicio13 = Truths(['p', 'q', 'r'], ['((~p) nor q) and (q nand (~r))'], ints=False)
df_style(exercicio13, hl_rows=[2])
# Falso, conforme linha destacada.
###Output
_____no_output_____ |
labs24_notebooks/misc/DwellTime_FirstLook.ipynb | ###Markdown
Investigating Dwell Time Loading data
###Code
import requests
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Yesterday's full data pulled from api
###Code
url = 'http://sfmta-ds.eba-hqpuyrup.us-east-1.elasticbeanstalk.com/daily-general-json'
json = requests.get(url, params={'day': '2020-05-24'}).json()
# making df
df = pd.DataFrame(data=json)
# paring down to a couple of buses
# (most reports and second-most reports)
# on a single route to simplify
# and making sure we're sorted by time (stupid-check)
nbus = df[df['rid']=='NBUS']
nbus_highest = nbus[nbus['vid']==(nbus['vid'].value_counts().index[0])].sort_values('timestamp')
nbus_second = nbus[nbus['vid']==(nbus['vid'].value_counts().index[1])].sort_values('timestamp')
###Output
_____no_output_____
###Markdown
Engineering Dwell Time In Minutes (sort of)
###Code
def wrangle_bus(df):
"""
preps dataframe for a single bus
gives accurate timestamps and naively calculates
dwell time as 1min per checkin with motion (kph <= 0)
"""
times = df['timestamp'].values
ages = df['age'].values
df['adjusted_timestamp'] = [pd.Timestamp(times[x]) - pd.Timedelta(seconds=ages[x]) for
x in range(len(df['timestamp']))]
df['timestamp'] = [pd.Timestamp(times[x]) for x in range(len(df['timestamp']))]
dwell_count = 0
dwell_totals = []
df['dwell'] = 0
for x in df['kph']:
if x <= 0:
dwell_count += 1
dwell_totals.append(dwell_count)
elif x > 0:
dwell_totals.append(0)
dwell_count = 0
df['dwell'] = [dwell_totals[x] for x in range(len(df))]
df = df.drop(columns=['age', 'rid', 'vid'])
return df
nums = [2,1,3]
nums.sort()
print(nums)
nbus_highest_wrangled = wrangle_bus(nbus_highest)
nbus_highest_wrangled.head()
nbus_second_wrangled = wrangle_bus(nbus_second)
nbus_second_wrangled.head()
###Output
_____no_output_____
###Markdown
Visualizations Inbound
###Code
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(20,10))
plt.xticks(rotation=15)
plt.grid()
sns.lineplot('timestamp', 'dwell', data=nbus_highest_wrangled[nbus_highest_wrangled['direction']=='NBUS_I_F00'])
sns.lineplot('timestamp', 'dwell', data=nbus_second_wrangled[nbus_second_wrangled['direction']=='NBUS_I_F00'])
plt.legend(labels=['8830','8852']);
###Output
/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.
import pandas.util.testing as tm
###Markdown
Outbound
###Code
plt.figure(figsize=(20,10))
plt.xticks(rotation=15)
plt.grid()
sns.lineplot('timestamp', 'dwell', data=nbus_highest_wrangled[nbus_highest_wrangled['direction']=='NBUS_O_F00'])
sns.lineplot('timestamp', 'dwell', data=nbus_second_wrangled[nbus_second_wrangled['direction']=='NBUS_O_F00'])
plt.legend(labels=['8830','8852']);
###Output
_____no_output_____
###Markdown
Combined
###Code
# bus with most trips
plt.figure(figsize=(20,10))
plt.xticks(rotation=15)
plt.grid()
sns.lineplot('timestamp', 'dwell', data=nbus_highest_wrangled,
style='direction', hue='direction');
# bus with second-most trips
plt.figure(figsize=(20,10))
plt.xticks(rotation=15)
plt.grid()
sns.lineplot('timestamp', 'dwell', data=nbus_second_wrangled,
style='direction', hue='direction');
###Output
_____no_output_____ |
03-datalakes-spark/02-data-wranging-with-spark/1_procedural_vs_functional_in_python.ipynb | ###Markdown
Procedural ProgrammingThis notebook contains the code from the previous screencast. The code counts the number of times a song appears in the log_of_songs variable. You'll notice that the first time you run `count_plays("Despacito")`, you get the correct count. However, when you run the same code again `count_plays("Despacito")`, the results are no longer correct.This is because the global variable `play_count` stores the results outside of the count_plays function. InstructionsRun the code cells in this notebook to see the problem with
###Code
log_of_songs = [
"Despacito",
"Nice for what",
"No tears left to cry",
"Despacito",
"Havana",
"In my feelings",
"Nice for what",
"Despacito",
"All the stars"
]
play_count = 0
def count_plays(song_title):
global play_count
for song in log_of_songs:
if song == song_title:
play_count = play_count + 1
return play_count
count_plays("Despacito")
count_plays("Despacito")
###Output
_____no_output_____ |
03_detection_utils.ipynb | ###Markdown
detection_utils> utilities for loading `.weights` file and drawing of results for yolov3 hand detection model- note [darknet](https://github.com/AlexeyAB/darknet) is used to train the hand detection model- the training is not documented, but the result of the training is as follows
###Code
#hide
from nbdev.showdoc import *
###Output
_____no_output_____
###Markdown
taken from [here](https://machinelearningmastery.com/how-to-perform-object-detection-with-yolov3-in-keras/) with some methods heavily modified in the following cells code to extract and rectify bounding boxes from yolov3 detection model
###Code
#export
import struct
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Input, BatchNormalization, LeakyReLU, ZeroPadding2D, UpSampling2D, add, concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import matplotlib.pyplot as plt
from numpy import expand_dims
import numpy as np
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, objness = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.objness = objness
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def decode_netout(netout, anchors, obj_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4:] = _sigmoid(netout[..., 4:])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * netout[..., 5:]
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i / grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[int(row)][int(col)][b][4]
if(objectness.all() <= obj_thresh): continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[int(row)][int(col)][b][:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[int(row)][col][b][5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
boxes.append(box)
return boxes
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
new_w, new_h = net_w, net_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
# load and prepare an image
def load_image_pixels(img_path, shape):
# load the image to get its shape
image = load_img(img_path)
width, height = image.size
# load the image with the required size
image = load_img(img_path, target_size=shape)
# convert to numpy array
image = img_to_array(image)
# scale pixel values to [0, 1]
image = image.astype('float32')
image /= 255.0
# add a dimension so that we have one sample
image = expand_dims(image, 0)
return image, width, height
# get all of the results above a threshold
def get_boxes(boxes, labels, thresh):
v_boxes, v_labels, v_scores = list(), list(), list()
# enumerate all boxes
for box in boxes:
# enumerate all possible labels
for i in range(len(labels)):
# check if the threshold for this label is high enough
if box.classes[i] > thresh:
v_boxes.append(box)
v_labels.append(labels[i])
v_scores.append(box.classes[i]*100)
# don't break, many labels may trigger for one box
return v_boxes, v_labels, v_scores
def _conv_block(inp, convs, skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if skip else x
def make_yolov3_model():
input_image = Input(shape=(None, None, 3))
# Layer 0 => 4
x = _conv_block(input_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
{'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
{'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
{'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}])
# Layer 5 => 8
x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}])
# Layer 9 => 11
x = _conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}])
# Layer 12 => 15
x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}])
# Layer 16 => 36
for i in range(7):
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}])
skip_36 = x
# Layer 37 => 40
x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}])
# Layer 41 => 61
for i in range(7):
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}])
skip_61 = x
# Layer 62 => 65
x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}])
# Layer 66 => 74
for i in range(3):
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}])
# Layer 75 => 79
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], skip=False)
# Layer 80 => 82
yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 18, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 81}], skip=False)
# Layer 83 => 86
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_61])
# Layer 87 => 91
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], skip=False)
# Layer 92 => 94
yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 18, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 93}], skip=False)
# Layer 95 => 98
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 96}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_36])
# Layer 99 => 106
yolo_106 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 99},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 100},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 101},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 102},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 103},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 104},
{'filter': 18, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 105}], skip=False)
model = Model(input_image, [yolo_82, yolo_94, yolo_106])
return model
###Output
_____no_output_____
###Markdown
loading yolov3 `.weights` file
###Code
#export
class WeightReader:
def __init__(self, weight_file):
with open(weight_file, 'rb') as w_f:
major, = struct.unpack('i', w_f.read(4))
minor, = struct.unpack('i', w_f.read(4))
revision, = struct.unpack('i', w_f.read(4))
if (major*10 + minor) >= 2 and major < 1000 and minor < 1000:
w_f.read(8)
else:
w_f.read(4)
transpose = (major > 1000) or (minor > 1000)
binary = w_f.read()
self.offset = 0
self.all_weights = np.frombuffer(binary, dtype='float32')
def read_bytes(self, size):
self.offset = self.offset + size
return self.all_weights[self.offset-size:self.offset]
def load_weights(self, model):
for i in range(106):
try:
conv_layer = model.get_layer('conv_' + str(i))
# print("loading weights of convolution #" + str(i))
if i not in [81, 93, 105]:
norm_layer = model.get_layer('bnorm_' + str(i))
size = np.prod(norm_layer.get_weights()[0].shape)
beta = self.read_bytes(size) # bias
gamma = self.read_bytes(size) # scale
mean = self.read_bytes(size) # mean
var = self.read_bytes(size) # variance
weights = norm_layer.set_weights([gamma, beta, mean, var])
if len(conv_layer.get_weights()) > 1:
bias = self.read_bytes(np.prod(conv_layer.get_weights()[1].shape))
kernel = self.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel, bias])
else:
kernel = self.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel])
except ValueError:
# print("no convolution #" + str(i))
pass
def reset(self):
self.offset = 0
###Output
_____no_output_____
###Markdown
helper method for loading detection and classification model and saving plots
###Code
#export
def load_model(path, weights=False):
print('loading model...\r', end='')
if weights:
model = make_yolov3_model()
weight_reader = WeightReader(path)
weight_reader.load_weights(model)
else:
model = tf.keras.models.load_model(path)
print('model loaded!')
return model
###Output
_____no_output_____
###Markdown
generates boxes, and alphabet label if classifier is specified
###Code
#export
import cv2
from asl_detection.classification.utils import *
from tensorflow.keras.applications.mobilenet import MobileNet
import matplotlib.gridspec as gridspec
def generate_boxes(image,
v_boxes,
v_labels,
v_scores,
figsize,
save_dir,
classifier):
coordinates, labels, hands, heatmaps, overlays = [], [], [], [], []
for i in range(len(v_boxes)):
box = v_boxes[i]
# get coordinates
y1, x1, y2, x2 = box.ymin, box.xmin, box.ymax, box.xmax
# calculate width and height of the box
width, height = x2 - x1, y2 - y1
coordinates.append([x1, y1, width, height])
if not classifier:
# draw text and score in top left corner
label = "%s (%.3f)" % (v_labels[i], v_scores[i])
labels.append(label)
else:
hand = cv2.resize(image[y1:y2+1, x1:x2+1], (224, 224))
hand = hand.reshape(1, 224, 224, 3)
hands.append(hand)
model = Classifier()
model.classifier = classifier
model.feature_extractor = MobileNet(input_shape = (224, 224, 3), include_top=True,weights ='imagenet')
output = model.feature_extractor.layers[-6].output
model.feature_extractor = tf.keras.Model(model.feature_extractor.inputs, output)
heatmap_result = model.generate_heat_map(hand)
# draw text and score in top left corner
label = "%s (%.3f)" % (heatmap_result['label'], v_scores[i])
labels.append(label)
heatmaps.append(heatmap_result['heatmap'])
overlays.append(heatmap_result['overlay'])
results = {'coordinates': coordinates, 'labels': labels, 'hands': hands, 'heatmaps': heatmaps, 'overlays': overlays}
return results
###Output
_____no_output_____
###Markdown
draws the above results and saves if specfied
###Code
#export
from matplotlib.patches import Rectangle
import asl_detection.save
def _draw_boxes(img_path,
v_boxes,
v_labels,
v_scores,
figsize,
save_dir,
classifier,
show_classes):
if show_classes and classifier == None:
print('No classifer, cannot display classes')
raise
# load the image
image = load_img(img_path)
image = img_to_array(image).astype('float32')/255.
results = generate_boxes(image,
v_boxes,
v_labels,
v_scores,
figsize,
save_dir,
classifier)
f = plt.figure(figsize=figsize)
if not show_classes:
plt.imshow(image)
# get the context for drawing boxes
ax = plt.gca()
for i, label in enumerate(results['labels']):
x1, y1, width, height = results['coordinates'][i]
# create the shape
rect = Rectangle((x1, y1), width, height, fill=False, color='white')
# draw the box
ax.add_patch(rect)
plt.text(x1-10, y1-10, label, color='white', fontsize=20)
else:
gridspec.GridSpec(4, 5)
plt.subplot2grid((4, 5), (0, 0), colspan=5, rowspan=2)
ax = plt.gca()
plt.imshow(image)
for i, label in enumerate(results['labels']):
x1, y1, width, height = results['coordinates'][i]
# create the shape
rect = Rectangle((x1, y1), width, height, fill=False, color='white')
# draw the box
ax.add_patch(rect)
plt.text(x1-10, y1-10, label, color='white', fontsize=20)
for i, hand in enumerate(results['hands']):
plt.subplot2grid((4, 5), (i+2, 1))
plt.imshow(hand.reshape(224, 224, 3))
plt.title(results['labels'][i])
plt.subplot2grid((4, 5), (i+2, 2))
plt.title('heatmap')
plt.imshow(results['heatmaps'][i].reshape(224, 224, 3))
plt.subplot2grid((4, 5), (i+2, 3))
plt.title('overlay')
plt.imshow(results['overlays'][i].reshape(224, 224, 3))
# show the plot
f.tight_layout()
plt.show()
plt.close()
if save_dir:
asl_detection.save.save(save_dir, 'detection', fig=f)
###Output
_____no_output_____
###Markdown
wrapper that passes intermediate results into aforementioned methods and draws the boxes
###Code
#export
import os
def draw_boxes(hand_detector,
img_paths,
save_dir=None,
classifier=None,
show_classes=False,
class_threshold=.6,
nms_thresh=.6,
figsize=(10, 10)):
# define the expected input shape for the model
input_w, input_h = 416, 416
# define the anchors
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
for img_path in img_paths:
# load and prepare image
image, image_w, image_h = load_image_pixels(img_path, (input_w, input_h))
yhat = hand_detector.predict(image)
# define the probability threshold for detected objects
boxes = list()
for i in range(len(yhat)):
# decode the output of the network
boxes += decode_netout(yhat[i][0], anchors[i], class_threshold, input_h, input_w)
# correct the sizes of the bounding boxes for the shape of the image
correct_yolo_boxes(boxes, image_h, image_w, input_h, input_w)
# suppress non-maximal boxes
do_nms(boxes, nms_thresh)
# define the labels
labels = ['hand']
# get the details of the detected objects
v_boxes, v_labels, v_scores = get_boxes(boxes, labels, class_threshold)
_draw_boxes(img_path,
v_boxes,
v_labels,
v_scores,
figsize,
save_dir,
classifier=classifier,
show_classes=show_classes)
###Output
_____no_output_____ |
JewelsandStones.ipynb | ###Markdown
Jewels and StonesYou're given strings J representing the types of stones that are jewels, and S representing the stones you have. Each character in S is a type of stone you have. You want to know how many of the stones you have are also jewels.The letters in J are guaranteed distinct, and all characters in J and S are letters. Letters are case sensitive, so "a" is considered a different type of stone from "A".Example 1:Input: J = "aA", S = "aAAbbbb"Output: 3Example 2:Input: J = "z", S = "ZZ"Output: 0Note:S and J will consist of letters and have length at most 50.The characters in J are distinct.[Jewels and Stones](https://leetcode.com/problems/jewels-and-stones/)
###Code
class Solution:
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
jewels = set(J)
count = 0
for stone in S:
if stone in jewels:
count += 1
return count
###Output
_____no_output_____ |
1-Lessons/Lesson19/PsuedoLesson/.ipynb_checkpoints/Lesson19-Dev-checkpoint.ipynb | ###Markdown
Data Modeling: Regression ApproachRegression is a basic and commonly used type of predictive analysis. The overall idea of regression is to assess: - does a set of predictor/explainatory variables (features) do a good job in predicting an outcome (dependent/response) variable? - Which explainatory variables (features) in particular are significant predictors of the outcome variable, and in what way do they–indicated by the magnitude and sign of the beta estimates–impact the outcome variable? - What is the estimated(predicted) value of the response under various excitation (explainatory) variable values?- What is the uncertainty involved in the prediction?These regression estimates are used to explain the relationship between one dependent variable and one or more independent variables. The simplest form is a linear regression equation with one dependent(response) and one independent(explainatory) variable is defined by the formula $y_i = \beta_0 + \beta_1*x_i$, where $y_i$ = estimated dependent(response) variable value, $\beta_0$ = constant(intercept), $\beta_1$ = regression coefficient (slope), and $x_i$ = independent(predictor) variable value Outline- Linear Regression – purpose- Background and simple by-hand examples- Linear systems representation(s) Objectives- Create linear regression models from data using primitive python- Create linear regression models from data using NumPy and Pandas tools- Create presentation-quality graphs and charts for reporting results Computational Thinking|Description|Computational Thinking Concept||:---|:---||Linear Model|Abstraction||Response and Explanatory Variables|Decomposition||Primitive arrays: vectors and matrices|Data Representation||NumPy arrays: vectors and matrices|Data Representation| Fundamental Questions- What is regression used for?- Why is it useful?Three major uses for regression analysis are (1) determining the strength of predictors, (2) forecasting an effect, and (3) trend forecasting.First, the regression might be used to identify the strength of the effect that the independent variable(s) have on a dependent variable. Typical questions are what is the strength of relationship between dose and effect, sales and marketing spending, or age and income.Second, it can be used to forecast effects or impact of changes. That is, the regression analysis helps us to understand how much the dependent variable changes with a change in one or more independent variables. A typical question is, “how much additional sales income do I get for each additional $1000 spent on marketing?”Third, regression analysis predicts trends and future values. The regression analysis can be used to get point estimates. A typical question is, “what will the price of gold be in 6 months?” Consider the image below from a Texas Instruments Calculator user manual![](ti-manual.png)In the context of our class, the straight solid line is the `Data Model` whose equation is$Y = \beta_0 + \beta_1*X$.The ordered pairs $(x_i,y_i)$ in the scatterplot are the observation (or training set).As depicted here $Y$ is the response to different values of the explainitory variable $X$. The typical convention is response on the up-down axis, but not always.The model parameters are $\beta_0$ and $\beta_1$ ; once known can estimate (predict) response to (as yet) unobserved values of $x$Classically, the normal equations are evaluated to find the model parameters:$\beta_1 = \frac{\sum x\sum y~-~N\sum xy}{(\sum x)^2~-~N\sum x^2}$ and $\beta_0 = \bar y - \beta_1 \bar x$ Classical Regression by Normal EquationsWe will illustrate the classical approach to finding the slope and intercept using the normal equations first a plotting function, then we will use the values from the Texas Instruments TI-55 user manual.
###Code
### Lets Make a Plotting Package
def makeAbear(xvalues,yvalues,xleft,yleft,xright,yright,xlab,ylab,title):
# plotting function dependent on matplotlib installed above
# xvalues, yvalues == data pairs to scatterplot; FLOAT
# xleft,yleft == left endpoint line to draw; FLOAT
# xright,yright == right endpoint line to draw; FLOAT
# xlab,ylab == axis labels, STRINGS!!
# title == Plot title, STRING
import matplotlib.pyplot
matplotlib.pyplot.scatter(xvalues,yvalues)
matplotlib.pyplot.plot([xleft, xright], [yleft, yright], 'k--', lw=2, color="red")
matplotlib.pyplot.xlabel(xlab)
matplotlib.pyplot.ylabel(ylab)
matplotlib.pyplot.title(title)
matplotlib.pyplot.show()
return
# Make two lists
sample_length = [101.3,103.7,98.6,99.9,97.2,100.1]
sample_weight = [609,626,586,594,579,605]
# We will assume weight is the explainatory variable, and it is to be used to predict length.
makeAbear(sample_weight, sample_length,580,96,630,106,'Weight (g)','Length (cm)','Length versus Weight for \n NASA CF0132 Fiber Reinforced Polymer')
###Output
_____no_output_____
###Markdown
Notice the dashed line, we supplied only two (x,y) pairs to plot the line, so lets get a colonoscope and find where it came from.
###Code
def myline(slope,intercept,value1,value2):
'''Returns a tuple ([x1,x2],[y1,y2]) from y=slope*value+intercept'''
listy = []
listx = []
listx.append(value1)
listx.append(value2)
listy.append(slope*listx[0]+intercept)
listy.append(slope*listx[1]+intercept)
return(listx,listy)
slope = 0.129 #0.13
intercept = 22.813 # 23
xlow = 580
xhigh = 630
object = myline(slope,intercept,xlow,xhigh)
xone = object[0][0]; xtwo = object[0][1]; yone = object[1][0]; ytwo = object[1][1]
makeAbear(sample_weight, sample_length,xone,yone,xtwo,ytwo,'Weight (g)','Length (cm)','Length versus Weight for \n NASA CF0132 Fiber Reinforced Polymer')
print(xone,yone)
print(xtwo,ytwo)
# Evaluate the normal equations
sumx = 0.0
sumy = 0.0
sumxy = 0.0
sumx2 = 0.0
sumy2 = 0.0
for i in range(len(sample_weight)):
sumx = sumx + sample_weight[i]
sumx2 = sumx2 + sample_weight[i]**2
sumy = sumy + sample_length[i]
sumy2 = sumy2 + sample_length[i]**2
sumxy = sumxy + sample_weight[i]*sample_length[i]
b1 = (sumx*sumy - len(sample_weight)*sumxy)/(sumx**2-len(sample_weight)*sumx2)
b0 = sumy/len(sample_length) - b1* (sumx/len(sample_weight))
lineout = ("Linear Model is y=%.3f" % b1) + ("x + %.3f" % b0)
print("Linear Model is y=%.3f" % b1 ,"x + %.3f" % b0)
slope = 0.129 #0.129
intercept = 22.813 # 22.813
xlow = 580
xhigh = 630
object = myline(slope,intercept,xlow,xhigh)
xone = object[0][0]; xtwo = object[0][1]; yone = object[1][0]; ytwo = object[1][1]
makeAbear(sample_weight, sample_length,xone,yone,xtwo,ytwo,'Weight (g)','Length (cm)','Length versus Weight for \n NASA CF0132 Fiber Reinforced Polymer')
###Output
_____no_output_____
###Markdown
Where do these normal equations come from?Consider our linear model $y = \beta_0 + \beta_1 \cdot x + \epsilon$. Where $\epsilon$ is the error in the estimate. If we square each error and add them up (for our training set) we will have $\sum \epsilon^2 = \sum (y_i - \beta_0 - \beta_1 \cdot x_i)^2 $. Our goal is to minimize this error by our choice of $\beta_0 $ and $ \beta_1 $The necessary and sufficient conditions for a minimum is that the first partial derivatives of the `error` as a function must vanish (be equal to zero). We can leverage that requirement as$\frac{\partial(\sum \epsilon^2)}{\partial \beta_0} = \frac{\partial{\sum (y_i - \beta_0 - \beta_1 \cdot x_i)^2}}{\partial \beta_0} = - \sum 2[y_i - \beta_0 + \beta_1 \cdot x_i] = -2(\sum_{i=1}^n y_i - n \beta_0 - \beta_1 \sum_{i=1}^n x_i) = 0 $and$\frac{\partial(\sum \epsilon^2)}{\partial \beta_1} = \frac{\partial{\sum (y_i - \beta_0 - \beta_1 \cdot x_i)^2}}{\partial \beta_1} = - \sum 2[y_i - \beta_0 + \beta_1 \cdot x_i]x_i = -2(\sum_{i=1}^n x_i y_i - n \beta_0 \sum_{i=1}^n x_i - \beta_1 \sum_{i=1}^n x_i^2) = 0 $Solving the two equations for $\beta_0$ and $\beta_1$ produces the normal equations (for linear least squares), which leads to$\beta_1 = \frac{\sum x\sum y~-~n\sum xy}{(\sum x)^2~-~n\sum x^2}$$\beta_0 = \bar y - \beta_1 \bar x$ Lets consider a more flexible way by fitting the data model using linear algebra instead of the summation notation. Computational Linear Algebra We will start again with our linear data model$y_i = \beta_0 + \beta_1 \cdot x_i + \epsilon_i$ then replace with vectors as\begin{gather}\mathbf{Y}=\begin{pmatrix}y_1 \\y_2 \\\vdots \\y_n \\\end{pmatrix}\end{gather}\begin{gather}\mathbf{\beta}=\begin{pmatrix}\beta_0 \\\beta_1 \\\end{pmatrix}\end{gather}\begin{gather}\mathbf{X}=\begin{pmatrix}1 & x_1 \\1 & x_2 \\\vdots \\1 & x_n \\\end{pmatrix}\end{gather}\begin{gather}\mathbf{\epsilon}=\begin{pmatrix}\epsilon_1 \\\epsilon_2 \\\vdots \\\epsilon_n \\\end{pmatrix}\end{gather}So our system can now be expressed in matrix-vector form as$\mathbf{Y}=\mathbf{X}\mathbf{\beta}+\mathbf{\epsilon}$ if we perfrom the same vector calculus as before we will end up with a result where pre-multiply by the transpose of $\mathbf{X}$ we will have a linear system in $\mathbf{\beta}$ which we can solve using Gaussian reduction, or LU decomposition or some other similar method.The resulting system (that minimizes $\mathbf{\epsilon^T}\mathbf{\epsilon}$) is$\mathbf{X^T}\mathbf{Y}=\mathbf{X^T}\mathbf{X}\mathbf{\beta}$ and solving for the parameters gives$\mathbf{\beta}=(\mathbf{X^T}\mathbf{X})^{-1}\mathbf{X^T}\mathbf{Y}$So lets apply it to our example - what follows is mostly in python primative
###Code
# linearsolver with pivoting adapted from
# https://stackoverflow.com/questions/31957096/gaussian-elimination-with-pivoting-in-python/31959226
def linearsolver(A,b):
n = len(A)
M = A
i = 0
for x in M:
x.append(b[i])
i += 1
# row reduction with pivots
for k in range(n):
for i in range(k,n):
if abs(M[i][k]) > abs(M[k][k]):
M[k], M[i] = M[i],M[k]
else:
pass
for j in range(k+1,n):
q = float(M[j][k]) / M[k][k]
for m in range(k, n+1):
M[j][m] -= q * M[k][m]
# allocate space for result
x = [0 for i in range(n)]
# back-substitution
x[n-1] =float(M[n-1][n])/M[n-1][n-1]
for i in range (n-1,-1,-1):
z = 0
for j in range(i+1,n):
z = z + float(M[i][j])*x[j]
x[i] = float(M[i][n] - z)/M[i][i]
# return result
return(x)
#######
# matrix multiply script
def mmult(amatrix,bmatrix,rowNumA,colNumA,rowNumB,colNumB):
result_matrix = [[0 for j in range(colNumB)] for i in range(rowNumA)]
for i in range(0,rowNumA):
for j in range(0,colNumB):
for k in range(0,colNumA):
result_matrix[i][j]=result_matrix[i][j]+amatrix[i][k]*bmatrix[k][j]
return(result_matrix)
# matrix vector multiply script
def mvmult(amatrix,bvector,rowNumA,colNumA):
result_v = [0 for i in range(rowNumA)]
for i in range(0,rowNumA):
for j in range(0,colNumA):
result_v[i]=result_v[i]+amatrix[i][j]*bvector[j]
return(result_v)
colNumX=2 #
rowNumX=len(sample_weight)
xmatrix = [[1 for j in range(colNumX)]for i in range(rowNumX)]
xtransp = [[1 for j in range(rowNumX)]for i in range(colNumX)]
yvector = [0 for i in range(rowNumX)]
for irow in range(rowNumX):
xmatrix[irow][1]=sample_weight[irow]
xtransp[1][irow]=sample_weight[irow]
yvector[irow] =sample_length[irow]
xtx = [[0 for j in range(colNumX)]for i in range(colNumX)]
xty = []
xtx = mmult(xtransp,xmatrix,colNumX,rowNumX,rowNumX,colNumX)
xty = mvmult(xtransp,yvector,colNumX,rowNumX)
beta = []
#solve XtXB = XtY for B
beta = linearsolver(xtx,xty) #Solve the linear system
slope = beta[1] #0.129
intercept = beta[0] # 22.813
xlow = 580
xhigh = 630
object = myline(slope,intercept,xlow,xhigh)
xone = object[0][0]; xtwo = object[0][1]; yone = object[1][0]; ytwo = object[1][1]
makeAbear(sample_weight, sample_length,xone,yone,xtwo,ytwo,'Weight (g)','Length (cm)','Length versus Weight for \n NASA CF0132 Fiber Reinforced Polymer')
beta
###Output
_____no_output_____
###Markdown
What's the Value of the Computational Linear Algebra ?The value comes when we have more explainatory variables, and we may want to deal with curvature.
###Code
# Make two lists
yyy = [0,0,1,1,3]
xxx = [-2,-1,0,1,2]
slope = 0.5 #0.129
intercept = 1 # 22.813
xlow = -3
xhigh = 3
object = myline(slope,intercept,xlow,xhigh)
xone = object[0][0]; xtwo = object[0][1]; yone = object[1][0]; ytwo = object[1][1]
makeAbear(xxx, yyy,xone,yone,xtwo,ytwo,'xxx','yyy','yyy versus xxx')
colNumX=2 #
rowNumX=len(xxx)
xmatrix = [[1 for j in range(colNumX)]for i in range(rowNumX)]
xtransp = [[1 for j in range(rowNumX)]for i in range(colNumX)]
yvector = [0 for i in range(rowNumX)]
for irow in range(rowNumX):
xmatrix[irow][1]=xxx[irow]
xtransp[1][irow]=xxx[irow]
yvector[irow] =yyy[irow]
xtx = [[0 for j in range(colNumX)]for i in range(colNumX)]
xty = []
xtx = mmult(xtransp,xmatrix,colNumX,rowNumX,rowNumX,colNumX)
xty = mvmult(xtransp,yvector,colNumX,rowNumX)
beta = []
#solve XtXB = XtY for B
beta = linearsolver(xtx,xty) #Solve the linear system
slope = beta[1] #0.129
intercept = beta[0] # 22.813
xlow = -3
xhigh = 3
object = myline(slope,intercept,xlow,xhigh)
xone = object[0][0]; xtwo = object[0][1]; yone = object[1][0]; ytwo = object[1][1]
makeAbear(xxx, yyy,xone,yone,xtwo,ytwo,'xxx','yyy','yyy versus xxx')
colNumX=3 #
rowNumX=len(xxx)
xmatrix = [[1 for j in range(colNumX)]for i in range(rowNumX)]
xtransp = [[1 for j in range(rowNumX)]for i in range(colNumX)]
yvector = [0 for i in range(rowNumX)]
for irow in range(rowNumX):
xmatrix[irow][1]=xxx[irow]
xmatrix[irow][2]=xxx[irow]**2
xtransp[1][irow]=xxx[irow]
xtransp[2][irow]=xxx[irow]**2
yvector[irow] =yyy[irow]
xtx = [[0 for j in range(colNumX)]for i in range(colNumX)]
xty = []
xtx = mmult(xtransp,xmatrix,colNumX,rowNumX,rowNumX,colNumX)
xty = mvmult(xtransp,yvector,colNumX,rowNumX)
beta = []
#solve XtXB = XtY for B
beta = linearsolver(xtx,xty) #Solve the linear system
howMany = 20
xlow = -2
xhigh = 2
deltax = (xhigh - xlow)/howMany
xmodel = []
ymodel = []
for i in range(howMany+1):
xnow = xlow + deltax*float(i)
xmodel.append(xnow)
ymodel.append(beta[0]+beta[1]*xnow+beta[2]*xnow**2)
# Now plot the sample values and plotting position
import matplotlib.pyplot
myfigure = matplotlib.pyplot.figure(figsize = (10,5)) # generate a object from the figure class, set aspect ratio
# Built the plot
matplotlib.pyplot.scatter(xxx, yyy, color ='blue')
matplotlib.pyplot.plot(xmodel, ymodel, color ='red')
matplotlib.pyplot.ylabel("Y")
matplotlib.pyplot.xlabel("X")
mytitle = "YYY versus XXX"
matplotlib.pyplot.title(mytitle)
matplotlib.pyplot.show()
### Now using numpy
###Output
_____no_output_____ |
w4/w4-day_3/polynomial_regression_exercise.ipynb | ###Markdown
Regression exerciseToday, we will fit different polynomial regression to randomly generated data and explore how our loss function behaves. We will use the formulas, introduced in the theoretical sections and create a linear regression model from scratch.
###Code
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
Task 1Write a function psi(x,m) defined as $\psi(x)=[1, x, x ^2, ..., x^m]$
###Code
def psi(x,m):
psilist = [1]
z=1
for i in range(1,m+1):
psilist.append(x**z)
z+=1
return psilist
###Output
_____no_output_____
###Markdown
Task 2Write a function $h(x,\alpha, m)$ defined as $h(x,\alpha, m)=\alpha^T\psi(x, m)$
###Code
def h(x,a,m):
return np.dot([a.T, psi(x,m)])
###Output
_____no_output_____
###Markdown
Task 3Create a random 3rd order polynomial (we call it p3)
###Code
p3 = 3**3 # it is a power of 3.
###Output
_____no_output_____
###Markdown
Task 4Choose an interval [a,b], on which the polynomial is S-shaped.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import expit as logistic
import warnings
plt.style.use('ggplot')
np.random.seed(37)
warnings.filterwarnings('ignore')
x = np.arange(-6, 6.1, 0.1) # this makes an s shape. it pulls - then pulls +
y = logistic(x)
fig, ax = plt.subplots(figsize=(15, 6))
_ = ax.plot(x, y)
_ = ax.set_title('Basic s-curve using logistic function')
(np.random.random_sample(30)) # get sample of 30 rando's between 0 and 1
interval = np.arange(-5,17,3) # start at 1, end at 5, every three numbers
interval
###Output
_____no_output_____
###Markdown
Task 5Sample 30 random values in [a,b] and stored them in `xs`.
###Code
import random
xs = []
for i in range(1,30):
xs.append(random.randrange(-50, 50))
print(xs)
###Output
[1, -19, 44, -46, -29, 28, -34, -1, 36, 29, 16, -21, -29, -44, -49, -30, 32, -31, 20, 0, 39, 8, 40, 44, 46, 19, 47, 30, -35]
###Markdown
Task 6For each x in xs, compute p3(x) and store them as `ys`.
###Code
ys = []
for i in xs:
ys.append(p3*i)
print(ys)
###Output
[27, -513, 1188, -1242, -783, 756, -918, -27, 972, 783, 432, -567, -783, -1188, -1323, -810, 864, -837, 540, 0, 1053, 216, 1080, 1188, 1242, 513, 1269, 810, -945]
###Markdown
Task 7:Plot `ys` against `xs` on [a,b]. Does it look like a third order polynomial?
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import expit as logistic
import warnings
plt.style.use('ggplot')
warnings.filterwarnings('ignore')
x = xs #
y = ys
fig, ax = plt.subplots(figsize=(15, 6))
_ = ax.plot(x, y)
_ = ax.set_title('is this an s?')
###Output
_____no_output_____
###Markdown
Fitting the data We will try to fit the data by a polynomial of order m=2. Task 8:Using xs and psi, create the matrix X defined as
###Code
xs = np.array(xs)
X1 = psi(xs,4)
del X1[0]
X1
###Output
_____no_output_____
###Markdown
Task 9Compute the optimal $\alpha$ using the expression $\alpha = (XX^T)^+Xy$ where $(XX^T)^+$ is the pseudoinverse matrix. y is our `ys` from the Task 6.
###Code
X = pd.DataFrame(X1)
X
a = (X.dot(X.T)) + X.dot(ys)
###Output
_____no_output_____
###Markdown
Task 10Plot the data and the computed polynomial. Does it fit?
###Code
a
# plot it
plt.plot(a);
###Output
_____no_output_____
###Markdown
Task 11Repeat the process for m=3,…,10. What do you observe?
###Code
X2 = psi(xs,6)
del X2[0]
X2
X2_df = pd.DataFrame(X2)
X2_df
a2 = (X2_df.dot(X2_df.T)) + X2_df.dot(ys)
a2
plt.plot(X2_df);
from pandas.plotting import andrews_curves
data = X2_df
plt.figure();
andrews_curves(data, );
###Output
_____no_output_____ |
notebooks/Change MPI header to JSON.ipynb | ###Markdown
File style conversionUse this notebook to convert the MPI-styled headers to the new JSON format.\\
###Code
%load_ext autoreload
%autoreload 2
import re
from ixo.file import getFiles
from tweezers.io import TxtMpiSource
from tweezers import TweezersData
# Where are the original files?
inputPath = '../tweezers/data/man_data/'
# Where should we store the converted ones?
outputPath = '../tweezers/data/man_data/JSON/'
# get all files
files = getFiles(inputPath, suffix='.txt', recursive=True)
# sort them
dataFiles = []
tsFiles = []
psdFiles = []
for file in files:
if file.name[0].isdigit():
dataFiles.append(file)
elif file.name.startswith('TS_'):
tsFiles.append(file)
elif file.name.startswith('PSD_'):
psdFiles.append(file)
# find matching files in psd and ts
tsRegex = re.compile('^TS_(?P<name>.*?)\.txt$')
psdRegex = re.compile('^PSD_(?P<name>.*?)\.txt$')
dataRegex = re.compile('^(?P<name>.*?)(_zoom.*)?\.txt$')
matchedFiles = []
for dataFile in dataFiles:
dataName = dataRegex.match(dataFile.name).groupdict()['name']
tsFileFound = None
psdFileFound = None
for tsFile in tsFiles:
tsName = tsRegex.match(tsFile.name).groupdict()['name']
if tsName == dataName and tsFile.parents[1] == dataFile.parents[1]:
tsFileFound = tsFile
for psdFile in psdFiles:
psdName = psdRegex.match(psdFile.name).groupdict()['name']
if psdName == dataName and psdFile.parents[1] == dataFile.parents[1]:
psdFileFound = psdFile
matchedFiles.append([dataFile, tsFileFound, psdFileFound])
for files in matchedFiles:
print('Processing: ' + str(files[0]))
source = TxtMpiSource(data=files[0], ts=files[1], psd=files[2])
# skip files that already have the JSON header
if source.data.isJson:
continue
data = TweezersData(source)
source.save(data, path=outputPath)
###Output
INFO:root:Reading timeseries from data source.
INFO:root:Reading PSD from data source.
INFO:root:Reading data from data source.
INFO:root:Reading metadata from data source.
|
notebooks/8A2_common_loops_genes.ipynb | ###Markdown
8A2_common_loops_genes7/7/2021what are the genes touching the common loops and are those genes all expressed
###Code
import pandas as pd
import os, glob
import pybedtools
import numpy as np
save_dir = '../data/processed/fig4_modelling/common_genes'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
###Output
_____no_output_____
###Markdown
get commonly expressed genes
###Code
#get rna
normal_tissues = ['Airway','Astrocytes','Bladder','Colon','Esophageal','GDSD6','GM12878','HMEC','Melanocytes','Ovarian',
'Pancreas','Prostate','Renal','Thyroid','Uterine']
normal_tissues_dict = dict(zip(normal_tissues,range(len(normal_tissues))))
rna_df = pd.read_csv('../data/interim/rna/tissue_tpm_sym.csv',index_col=0)
rna_df_norm = rna_df[normal_tissues]
THRES=1
# common genes
print(sum((rna_df_norm>THRES).sum(axis=1)==15), 'number of common genes')
print(sum((rna_df_norm>THRES).sum(axis=1)==1), 'number of unique genes')
print(rna_df_norm.shape)
common_genes = []
# counter = 0
for gene, row in pd.DataFrame(rna_df_norm>THRES).iterrows():
# if counter>5:
# break
# counter +=1
if np.array(row).all():
common_genes.append(gene)
len(common_genes)
# common_loops_normal
common_loops_normal = pd.read_csv('../data/processed/fig2_hichip/common_loops_normal.csv',index_col=0)
common_loops_normal[['loop_1','loop_2']] = common_loops_normal.loop_name.str.split('::',expand=True)
common_loops_normal.shape
print(common_loops_normal.merge(anc_prom[['name','thickEnd']], how='inner',left_on='loop_1',right_on='thickEnd').shape)
print(common_loops_normal.merge(anc_prom[['name','thickEnd']], how='inner',left_on='loop_2',right_on='thickEnd').shape)
# basically saing
# common_genes_in_common_loops
common_genes_in_common_loops = set()
for tissue in normal_tissues:
common_loops_normal['loop_1_'+tissue] = ''
common_loops_normal['loop_2_'+tissue] = ''
anc_prom = pybedtools.BedTool('../data/interim/annon/promoter_anchors/promoter_'+normal_tissues[1]+'_annon.bed').to_dataframe()
anc_prom = anc_prom[anc_prom.name.isin(common_genes)]
genes1 = common_loops_normal.merge(anc_prom[['name','thickEnd']], how='inner',left_on='loop_1',right_on='thickEnd').name.unique()#.sort()
genes1 = sorted(genes1)
genes2 = common_loops_normal.merge(anc_prom[['name','thickEnd']], how='inner',left_on='loop_2',right_on='thickEnd').name.unique()#.sort()
genes2 = sorted(genes2)
genes = sorted(set(genes1).union(set(genes2)))
print(tissue)
common_genes_in_common_loops= common_genes_in_common_loops.union(genes)
print(len(common_genes_in_common_loops))
# print(genes)
# housekeeping and essential genes
housekeeping_df = pd.read_table('../data/external/HSIAO_HOUSEKEEPING_GENES.txt',skiprows=2, header=None)
housekeeping_df.columns = ['genes']
housekeeping_genes_list = list(housekeeping_df.genes)
print('housekeeping_genes_list', len(housekeeping_genes_list))
essential_genes = pd.read_csv('../data/external/essential_genes/essential_genes_wang2015.txt',header=None).iloc[:,0].to_list()
print('essential_genes_list', len(essential_genes))
num_hk_cg_cl = len(set(common_genes_in_common_loops).intersection(set(housekeeping_genes_list)))
num_es_cg_cl = len(set(common_genes_in_common_loops).intersection(set(essential_genes)))
print('num housekeeping genes in commongenes-common loops', num_hk_cg_cl)
print('num essential genes in commongenes-common loops', num_es_cg_cl)
# hk genes
print(sorted(set(common_genes_in_common_loops).intersection(set(housekeeping_genes_list))))
# es genes
print(sorted(set(common_genes_in_common_loops).intersection(set(essential_genes))))
###Output
['ACO2', 'ACTB', 'ACTR3', 'AHCY', 'ANAPC15', 'AP2S1', 'ARID1A', 'BCR', 'CCT2', 'CENPM', 'CHAF1A', 'CHEK1', 'CLP1', 'COPZ1', 'COQ5', 'CREB3', 'DCTN3', 'DDB1', 'DDX27', 'DHX8', 'DPAGT1', 'ECD', 'EEF1G', 'EIF4G2', 'ELP5', 'ELP6', 'EP300', 'FPGS', 'GET4', 'GRB2', 'GSS', 'GTPBP8', 'HEXIM1', 'INTS1', 'IPO13', 'IRF2BP2', 'KAT7', 'KRAS', 'MCL1', 'METTL3', 'MNAT1', 'MRPL16', 'MRPL24', 'MRPL38', 'MRPL48', 'MRPS21', 'NAA15', 'NDUFA2', 'NOP9', 'NPLOC4', 'NSMCE1', 'NUBP1', 'NXF1', 'PCBP1', 'PFN1', 'PHB2', 'PPA1', 'PPIL2', 'PPP1CB', 'PPP4C', 'PRCC', 'PRELID1', 'PRMT1', 'PSMD4', 'RAD9A', 'REXO2', 'RNASEH2A', 'RNASEH2C', 'RPL10A', 'RPL23', 'RPL3', 'RPL35', 'RPP14', 'RPS10', 'RPS11', 'RPS21', 'RPS7', 'RTTN', 'SAMD4B', 'SCD', 'SCYL1', 'SEPHS2', 'SHMT2', 'SIK2', 'SLC35B1', 'SLC3A2', 'SMC3', 'SMG9', 'SNAPC1', 'SRP54', 'SRRT', 'SRSF1', 'SSBP4', 'STK11', 'TAF10', 'TAF15', 'TAF8', 'TCEA1', 'TERF2', 'TPX2', 'TRIAP1', 'TRPM7', 'TXN2', 'UBE2M', 'UBTF', 'UQCC2', 'WDR55', 'WDR74', 'WEE1', 'ZFC3H1', 'ZFP36L1', 'ZMAT2']
|
jupyter notebooks/7 Convolutional Models.ipynb | ###Markdown
Learning to Play ChessThe initial objective in building the model will to be teach the agent how to play chess by example. [@oshri2016predicting] offers one method for doing this by using convolutional neural networks to make predictions about the next move based on the board position. They did this by training seven CNNs, the predict how desirable it was for each piece to move. The last six were trained to predict the coordinates of a probable move for each piece. Because of the differing objectives we will need to actually split out the data set into 7 different data sets, each containing only the board positions that are relavent to that CNN.This brings us to an important point about the CNN model. We are not going to be giving it whole games at a time, rather pulling out only the specific board states that are relavent to that model. If we consider conceptually what this agent might be learning, because we are removing the flow of the actual game, this agent will probably not be able to reason about long term strategy very well. Instead its creating a probability model of possible moves (and potentially impossible moves) based on a position. We might interpret this as learning the basic rules of the game, but certainly our agent isn't learning very much grant strategy. Long term, we will seek to combine the intuition our agent will build about chess from the CNNs along with other deep learning approaches better suited to learning strategic choices. These later agents may very likely required the data to be formatted in a different way, but that's work for later. Design Choices Split by color or notThere are a series of choices that I will have to make in creating this model. First, given that I am already cutting down the data set considerably to only moves for particular pieces, should I split the data further down to just white or black and train individual models. This is a tough one, because there is both undeniable differences in how black has to play the game, particularly in the start of the game when they are behind a tempo automatically, and similarities in how the game is played no matter which color you are. All that considered, the goal of *this particular model* is to learn legal and reasonable moves give a board state. These are not the most strategic moves, as this model will have no conception of the temporal aspect of chess, and so I don't think we need to concern this model with the color of the pieces. Preparing the data for CNNs
###Code
def b_to_w(boards):
return np.fliplr(boards)*-1
def convert_colors(b=b_train, t=t_train):
move_made_by = t[:, 0]
bbs = b[(move_made_by == "b")]
bms = t[(move_made_by == 'b')]
wbs = b[(move_made_by == "w")]
wms = t[(move_made_by == 'w')]
bbs_t = np.array([b_to_w(b) for b in bbs])
bs = np.concatenate([bbs_t, wbs], axis=0)
ms = np.concatenate([bms, wms], axis=0)
return bs, ms
def convert_coord(a):
#print(a)
board = np.zeros((8, 8))
board[int(a[0]),int(a[1])] = 1
return board
def move_selector_data(bs, ms):
# piece selector data consists of all available board positions. The predictor is the index of the piece
# that moved (0 through 5).
y = np.apply_along_axis(func1d=convert_coord, axis=1, arr=ms[:, 2:4])
#print(y.shape)
print("The Move Selector data set contains {} boards".format(y.shape[0]))
return bs.astype('int'), y.reshape(y.shape[0], 64)
def single_piece_selector_data(bs, ms, piece):
pieces = ['Pawn', 'Rook', 'Knight', 'Bishop', 'Queen', 'King']
move_selector = ms[:, 1]
piece_bs = bs[move_selector == piece]
piece_ms = ms[move_selector == piece, 4:6]
y = np.apply_along_axis(func1d=convert_coord, axis=1, arr=piece_ms)
print("The {} Move Selector data set contains {} boards".format(pieces[int(piece)], piece_ms.shape[0]))
return piece_bs.astype('int'), y.reshape(y.shape[0], 64)
X_train, y_train = convert_colors()
ms_X_tr, ms_y_tr = move_selector_data(X_train, y_train)
pawn_X_tr, pawn_y_tr = single_piece_selector_data(X_train, y_train, '0')
rook_X_tr, rook_y_tr = single_piece_selector_data(X_train, y_train, '1')
knight_X_tr, knight_y_tr = single_piece_selector_data(X_train, y_train, '2')
bishop_X_tr, bishop_y_tr = single_piece_selector_data(X_train, y_train, '3')
queen_X_tr, queen_y_tr = single_piece_selector_data(X_train, y_train, '4')
king_X_tr, king_y_tr = single_piece_selector_data(X_train, y_train, '5')
X_test, y_test = convert_colors(b_test, t_test)
ms_X_test, ms_y_test = move_selector_data(X_test, y_test)
pawn_X_test, pawn_y_test = single_piece_selector_data(X_test, y_test, '0')
rook_X_test, rook_y_test = single_piece_selector_data(X_test, y_test, '1')
knight_X_test, knight_y_test = single_piece_selector_data(X_test, y_test, '2')
bishop_X_test, bishop_y_test = single_piece_selector_data(X_test, y_test, '3')
queen_X_test, queen_y_test = single_piece_selector_data(X_test, y_test, '4')
king_X_test, king_y_test = single_piece_selector_data(X_test, y_test, '5')
def move_selector_model(kernal_size=(3, 3), conv_layers=2, dense_layers=2):
BOARD_CHANNELS = 6
BOARD_ROWS = 8
BOARD_COLS = 8
NB_CLASSES = 64
# network
X = Input(shape=(BOARD_CHANNELS, BOARD_COLS, BOARD_ROWS))
Conv = Conv2D(32, kernal_size, padding='same', activation='relu')(X)
if conv_layers > 1:
for i in range(conv_layers-1):
Conv = Conv2D(32, kernal_size, padding='same', activation='relu')(Conv)
MLP = Flatten()(Conv)
if dense_layers > 1:
for i in range(dense_layers-1):
MLP = Dense(64, activation='relu')(MLP)
predictions = Dense(NB_CLASSES, activation = "softmax")(MLP)
model = Model(inputs=[X], outputs=[predictions])
return model
def compile_and_fit_ms_model(X_train, y_train, model, callbacks=[]):
OPTIMIZER = Adam()
BATCH_SIZE = 128
NB_EPOCH = 20
VALIDATION_SPLIT = 0.2
VERBOSE = 1
model.compile(loss="categorical_crossentropy", optimizer=OPTIMIZER, metrics=["accuracy"])
history = model.fit(X_train, y_train,
batch_size=BATCH_SIZE, epochs=NB_EPOCH,
verbose=VERBOSE, validation_split=VALIDATION_SPLIT,
callbacks=callbacks)
return model, history
def score_model(X_test, y_test, model):
VERBOSE = 1
score = model.evaluate(X_test, y_test, verbose=VERBOSE)
print("Test score:", score[0])
print('Test accuracy:', score[1])
return model, score
def plot_acc_loss(history):
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
ms_checkpointer = ModelCheckpoint(filepath='D:\\GitHub\\deepconv-chess\\trained models\\model_selector.hdf5', verbose=1, save_best_only=True)
ms_model, ms_history = compile_and_fit_ms_model(ms_X_tr, ms_y_tr, move_selector_model(), callbacks=[ms_checkpointer])
ms_model, ms_score = score_model(ms_X_test, ms_y_test, ms_model)
plot_acc_loss(ms_history)
def piece_selector_model(kernal_size=(3, 3), conv_layers=2, dense_layers=2):
BOARD_CHANNELS = 6
BOARD_ROWS = 8
BOARD_COLS = 8
NB_CLASSES = 64
# network
model = Sequential()
model.add(Conv2D(32, kernal_size, padding='same',
input_shape=(BOARD_CHANNELS, BOARD_COLS, BOARD_ROWS)))
model.add(Activation('relu'))
if conv_layers > 1:
for i in range(conv_layers-1):
model.add(Conv2D(32, kernal_size, padding='same'))
model.add(Activation('relu'))
model.add(Flatten())
if dense_layers > 1:
for i in range(dense_layers-1):
model.add(Dense(64))
model.add(Activation("relu"))
model.add(Dense(NB_CLASSES))
model.add(Activation("softmax"))
return model
def compile_and_fit_ps_model(X_train, y_train, model, callbacks=[]):
OPTIMIZER = Adam()
BATCH_SIZE = 128
NB_EPOCH = 25
VALIDATION_SPLIT = 0.2
VERBOSE = 1
model.compile(loss="categorical_crossentropy", optimizer=OPTIMIZER, metrics=["accuracy"])
history = model.fit(X_train, y_train,
batch_size=BATCH_SIZE, epochs=NB_EPOCH,
verbose=VERBOSE, validation_split=VALIDATION_SPLIT,
callbacks=callbacks)
return model, history
pawn_checkpointer = ModelCheckpoint(filepath='D:\\GitHub\\deepconv-chess\\trained models\\pawn_selector.hdf5', verbose=1, save_best_only=True)
pawn_model, pawn_history = compile_and_fit_ps_model(pawn_X_tr, pawn_y_tr, piece_selector_model(), callbacks=[pawn_checkpointer])
pawn_model, pawn_score = score_model(pawn_X_test, pawn_y_test, pawn_model)
plot_acc_loss(pawn_history)
rook_checkpointer = ModelCheckpoint(filepath='D:\\GitHub\\deepconv-chess\\trained models\\rook_selector.hdf5', verbose=1, save_best_only=True)
rook_model, rook_history = compile_and_fit_ps_model(rook_X_tr, rook_y_tr, piece_selector_model(), callbacks=[rook_checkpointer])
rook_model, rook_score = score_model(rook_X_test, rook_y_test, rook_model)
plot_acc_loss(rook_history)
knight_checkpointer = ModelCheckpoint(filepath='D:\\GitHub\\deepconv-chess\\trained models\\knight_selector.hdf5', verbose=1, save_best_only=True)
knight_model, knight_history = compile_and_fit_ps_model(knight_X_tr, knight_y_tr, piece_selector_model(), callbacks=[knight_checkpointer])
knight_model, knight_score = score_model(knight_X_test, knight_y_test, knight_model)
plot_acc_loss(knight_history)
bishop_checkpointer = ModelCheckpoint(filepath='D:\\GitHub\\deepconv-chess\\trained models\\bishop_selector.hdf5', verbose=1, save_best_only=True)
bishop_model, bishop_history = compile_and_fit_ps_model(bishop_X_tr, bishop_y_tr, piece_selector_model(), callbacks=[bishop_checkpointer])
bishop_model, bishop_score = score_model(bishop_X_test, bishop_y_test, bishop_model)
plot_acc_loss(bishop_history)
queen_checkpointer = ModelCheckpoint(filepath='D:\\GitHub\\deepconv-chess\\trained models\\queen_selector.hdf5', verbose=1, save_best_only=True)
queen_model, queen_history = compile_and_fit_ps_model(queen_X_tr, queen_y_tr, piece_selector_model(), callbacks=[queen_checkpointer])
queen_model, queen_score = score_model(queen_X_test, queen_y_test, queen_model)
plot_acc_loss(queen_history)
king_checkpointer = ModelCheckpoint(filepath='D:\\GitHub\\deepconv-chess\\trained models\\king_selector.hdf5', verbose=1, save_best_only=True)
king_model, king_history = compile_and_fit_ps_model(king_X_tr, king_y_tr, piece_selector_model(), callbacks=[king_checkpointer])
king_model, king_score = score_model(king_X_test, king_y_test, king_model)
plot_acc_loss(king_history)
###Output
dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
|
CH02/CH02a_Working_with_Language_Models_and_Tokenizers.ipynb | ###Markdown
Working with Language Models and Tokenizers
###Code
!pip install transformers
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
text = "Using transformers is easy!"
tokenizer(text)
encoded_input = tokenizer(text, return_tensors="pt")
from transformers import BertModel
model = BertModel.from_pretrained("bert-base-uncased")
output = model(**encoded_input)
from transformers import BertTokenizer, TFBertModel
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertModel.from_pretrained("bert-base-uncased")
text = " Using transformers is easy!"
encoded_input = tokenizer(text, return_tensors='tf')
output = model(**encoded_input)
from transformers import pipeline
unmasker = pipeline('fill-mask', model='bert-base-uncased')
unmasker("The man worked as a [MASK].")
from transformers import pipeline
classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
sequence_to_classify = "I am going to france."
candidate_labels = ['travel', 'cooking', 'dancing']
classifier(sequence_to_classify, candidate_labels)
###Output
_____no_output_____ |
notebooks/Upwelling/ATW_relaxation.ipynb | ###Markdown
ATW relaxation notebook
###Code
import numpy as np
import matplotlib.pyplot as plt
import warnings
from copy import deepcopy
# Global constants
f = 1e-4 # [s-1]
g = 9.81 # [m s-2]
%matplotlib inline
plt.rcParams['font.size'] = 14
warnings.simplefilter('ignore')
###Output
_____no_output_____
###Markdown
Analytical solutionStart with the linearized, steady state shallow water equations with linear friction and longshore windstress. Assume cross-shore geostrophic balance.\begin{align}f\mathbf{k}\times\mathbf{u} & = -g\nabla\eta + \frac{1}{h}\left(\tau_y - \mu v\right)\hat{\jmath} \tag{1a} \\0 & = \nabla\cdot h\mathbf{u} \tag{1b}\end{align}Taking the curl of (1a) and solving for $\eta$ gives the the Arrested Topography Wave (ATW) of Csanady (1978 *JPO*). I have oriented the problem to $x\to-\infty$ offshore such that $\frac{\partial h}{\partial x} = -s$.$$\frac{\partial^2\eta}{\partial x^2} - \frac{1}{\kappa}\frac{\partial\eta}{\partial y} = 0, \hspace{0.5cm} \frac{1}{\kappa} = \frac{fs}{\mu}\tag{2}$$The coastal boundary condition (obtained from 1a) requires $u \to 0$ and $h \to 0$$$\frac{\partial\eta}{\partial x}(0, y) = \frac{\tau_yf}{\mu g} = q_0 \tag{3}$$Equation (2) is analogous to a constant heat flux boundary condition. The solution is given by Carslaw and Jaeger 1959 (p. 112)$$\eta(x, y) = \frac{\kappa q_0y}{L} + q_0L\left\{\frac{3(x + L)^2 - L^2}{6L^2} - \frac{2}{\pi^2}\sum_{n=1}^\infty\frac{(-1)^n}{n^2}\exp\left(\frac{-\kappa n^2\pi^2y}{L^2}\right)\cos\left(\frac{n\pi(x + L)}{L}\right)\right\} \tag{4}$$which, as $y\to\infty$, reduces to$$\eta(x, y) = \frac{\kappa q_0y}{L} + q_0L\frac{3(x + L)^2 - L^2}{6L^2} \tag{5}$$ Calculate $\eta$ according to equation (5)
###Code
def calc_eta(x, y, L, kappa, q_0):
"""Calculate eta according to equation 5
"""
return kappa * q_0 * y / L + q_0 * L * (3 * (x + L)**2 - L**2) / (6 * L**2)
###Output
_____no_output_____
###Markdown
Find $\eta$ given problem parameters
###Code
# Constants
L = 1e3 # Slope width [m]
tau_y = -1e-4 # Kinematic wind stress [m2 s-2]
mu = 1e-2 # Linear friction coefficient [s-1]
s = 1 # Shelf slope [dimensionless]
# Terms (heat equation analogues)
kappa = mu / (f * s) # 'Diffusivity' of eta
q_0 = tau_y * f / (mu * g) # 'Flux' of eta through boundary
# Coordinates
dL = L * 1e-2
xi = np.arange(-L, 0, dL)
yi = np.arange(0, L, dL)
x, y = np.meshgrid(xi, yi)
# Solution
eta = calc_eta(x, y, L, kappa, q_0)
###Output
_____no_output_____
###Markdown
Plot $\eta$ solution
###Code
# Plot eta
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.contour(xi/L, yi/L, eta, colors='k')
for tick in np.arange(0, 1, 0.015):
ax.plot([0, 0.005], [tick, tick+0.005], 'k-', clip_on=False)
ax.set_xlabel('$\longleftarrow$ $X/L$')
ax.set_ylabel('$\longleftarrow$ $Y/L$')
ax.xaxis.set_ticks([-1, 0])
ax.yaxis.set_ticks([0, 1])
ax.xaxis.set_ticklabels(['$-L$', 0])
ax.yaxis.set_ticklabels([0, '$L$'])
ax.tick_params(direction='out', pad=8)
ax.set_xlim([0, -1])
ax.set_ylim([1, 0])
ax.text(0.02, 0.05, 'Low $\eta$', transform=ax.transAxes)
ax.text(0.85, 0.9, 'High $\eta$', transform=ax.transAxes)
ax.text(0.03, 0.46, '$\\tau_y$', transform=ax.transAxes)
ax.arrow(0.04, 0.5, 0, 0.1, transform=ax.transAxes, head_width=0.01, facecolor='k')
ax.set_title('Cross-shelf bottom slope (ATW) solution')
plt.show()
###Output
_____no_output_____
###Markdown
Relaxation solutionThree schemes:Centered difference$$r_{i, j}^{(n)} = \frac{\eta_{i, j+1}^{(n)} - \eta_{i, j-1}^{(n)}}{2\Delta y} - \kappa\frac{\eta_{i+1, j}^{(n)} - 2\eta_{i, j}^{(n)} + \eta_{i-1, j}^{(n)}}{\Delta x^2}$$$$\eta_{i, j}^{(n+1)} = \eta_{i, j}^{(n)} - \frac{\mu\Delta x^2}{2\kappa}r_{i, j}^{(n)}$$Upstream Euler$$r_{i, j}^{(n)} = \frac{\eta_{i, j+1}^{(n)} - \eta_{i, j}^{(n)}}{\Delta y} - \kappa\frac{\eta_{i+1, j}^{(n)} - 2\eta_{i, j}^{(n)} + \eta_{i-1, j}^{(n)}}{\Delta x^2}$$$$\eta_{i, j}^{(n+1)} = \eta_{i, j}^{(n)} - \frac{\mu}{\left(\frac{2\kappa}{\Delta x} - 1\right)}r_{i, j}^{(n)}$$Downstream Euler$$r_{i, j}^{(n)} = \frac{\eta_{i, j}^{(n)} - \eta_{i, j-1}^{(n)}}{\Delta y} - \kappa\frac{\eta_{i+1, j}^{(n)} - 2\eta_{i, j}^{(n)} + \eta_{i-1, j}^{(n)}}{\Delta x^2}$$$$\eta_{i, j}^{(n+1)} = \eta_{i, j}^{(n)} - \frac{\mu}{\left(\frac{2\kappa}{\Delta x} + 1\right)}r_{i, j}^{(n)}$$Only the downstream Euler is stable. Find $\eta$ by relaxation
###Code
# Find phi by relaxation
# Parameters
M = eta.shape[0] # matrix size
mu = 1 # SOR convergence parameter
TOL = 1e-4 # Convergence tolerance
N = 100000 # Max iterations
Ci = int(M / 2) # Cape i position
Cj = int(2 * M / 3) # Cape j position
# Allocate arrays
eta_next = deepcopy(eta)
res = np.zeros(eta.shape)
# Reset eta (equation 37)
eta = kappa * q_0 * y / L + q_0 * L * (3 * (x + L)**2 - L**2) / (6 * L**2)
# Relaxation loop
for n in range(N):
for i in range(1, M-1): # Longshore step
for j in range(2, M-1): # Cross-shore step
#Downstream Euler
res[i, j] = (eta[i, j] - eta[i-1, j]) / dL - kappa * (eta[i, j+1] - 2 * eta[i, j] + eta[i, j-1]) / dL**2
eta_next[i, j] = eta[i, j] - mu / (2 * kappa / dL + 1) * res[i, j]
eta = eta_next
eta[Ci, Cj:] = eta[Ci, -1] # Reset eta along cape equal to eta at coast
#if dL**2 * np.max(abs(res)) / np.max(abs(eta)) < TOL:
# break
# Plot results
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.contour(xi/L, yi/L, eta, colors='k')
ax.set_xlim([0, -1])
ax.set_ylim([1, 0])
###Output
_____no_output_____ |
examples/CNN_STFT_model.ipynb | ###Markdown
ConvNet Architecture for Decoding EEG MI Data using Spectrogram Representations PreparationIn case that gumpy is not installed as a module, we need to specify the path to ``gumpy``. In addition, we wish to configure jupyter notebooks and any backend properly. Note that it may take some time for ``gumpy`` to load due to the number of dependencies
###Code
from __future__ import print_function
import os; os.environ["THEANO_FLAGS"] = "device=gpu0"
import os.path
from datetime import datetime
import sys
sys.path.append('../../gumpy')
import gumpy
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
To use the models provided by gumpy-deeplearning, we have to set the path to the models directory and import it. If you installed gumpy-deeplearning as a module, this step may not be required.
###Code
sys.path.append('..')
import models
###Output
_____no_output_____
###Markdown
Utility functionsThe examples for ``gumpy-deeplearning`` ship with a few tiny helper functions. For instance, there's one that tells you the versions of the currently installed keras and kapre. ``keras`` is required in ``gumpy-deeplearning``, while ``kapre`` can be used to compute spectrograms.In addition, the utility functions contain a method ``load_preprocess_data`` to load and preprocess data. Its usage will be shown further below
###Code
import utils
utils.print_version_info()
###Output
_____no_output_____
###Markdown
Setup parameters for the model and dataBefore we jump into the processing, we first wish to specify some parameters (e.g. frequencies) that we know from the data.
###Code
DEBUG = True
CLASS_COUNT = 2
DROPOUT = 0.2 # dropout rate in float
# parameters for filtering data
FS = 250
LOWCUT = 2
HIGHCUT = 60
ANTI_DRIFT = 0.5
CUTOFF = 50.0 # freq to be removed from signal (Hz) for notch filter
Q = 30.0 # quality factor for notch filter
W0 = CUTOFF/(FS/2)
AXIS = 0
#set random seed
SEED = 42
KFOLD = 5
###Output
_____no_output_____
###Markdown
Load raw dataBefore training and testing a model, we need some data. The following code shows how to load a dataset using ``gumpy``.
###Code
# specify the location of the GrazB datasets
data_dir = '../../Data/Graz'
subject = 'B01'
# initialize the data-structure, but do _not_ load the data yet
grazb_data = gumpy.data.GrazB(data_dir, subject)
# now that the dataset is setup, we can load the data. This will be handled from within the utils function,
# which will first load the data and subsequently filter it using a notch and a bandpass filter.
# the utility function will then return the training data.
x_train, y_train = utils.load_preprocess_data(grazb_data, True, LOWCUT, HIGHCUT, W0, Q, ANTI_DRIFT, CLASS_COUNT, CUTOFF, AXIS, FS)
###Output
_____no_output_____
###Markdown
Augment data
###Code
x_augmented, y_augmented = gumpy.signal.sliding_window(data = x_train[:,:,:],
labels = y_train[:,:],
window_sz = 4 * FS,
n_hop = FS // 10,
n_start = FS * 1)
x_subject = x_augmented
y_subject = y_augmented
x_subject = np.rollaxis(x_subject, 2, 1)
###Output
_____no_output_____
###Markdown
Run the model
###Code
from sklearn.model_selection import StratifiedKFold
from models import CNN_STFT
# define KFOLD-fold cross validation test harness
kfold = StratifiedKFold(n_splits = KFOLD, shuffle = True, random_state = SEED)
cvscores = []
ii = 1
for train, test in kfold.split(x_subject, y_subject[:, 0]):
print('Run ' + str(ii) + '...')
# create callbacks
model_name_str = 'GRAZ_CNN_STFT_3layer_' + \
'_run_' + str(ii)
callbacks_list = model.get_callbacks(model_name_str)
# initialize and create the model
model = CNN_STFT(model_name_str)
model.create_model(x_subject.shape[1:], dropout = DROPOUT, print_summary = False)
# fit model. If you specify monitor=True, then the model will create callbacks
# and write its state to a HDF5 file
model.fit(x_subject[train], y_subject[train], monitor=True,
epochs = 100,
batch_size = 256,
verbose = 0,
validation_split = 0.1, callbacks = callbacks_list)
# evaluate the model
print('Evaluating model on test set...')
scores = model.evaluate(x_subject[test], y_subject[test], verbose = 0)
print("Result on test set: %s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
cvscores.append(scores[1] * 100)
ii += 1
# print some evaluation statistics and write results to file
print("%.2f%% (+/- %.2f%%)" % (np.mean(cvscores), np.std(cvscores)))
cv_all_subjects = np.asarray(cvscores)
print('Saving CV values to file....')
np.savetxt('GRAZ_CV_' + 'CNN_STFT_3layer_' + str(DROPOUT) + 'do'+'.csv',
cv_all_subjects, delimiter = ',', fmt = '%2.4f')
print('CV values successfully saved!\n')
###Output
_____no_output_____
###Markdown
Load the trained model
###Code
model.save('CNN_STFTmonitoring.h5') # creates a HDF5 file 'my_model.h5'
model2 = load_model('CNN_STFTmonitoring.h5',
custom_objects={'Spectrogram': kapre.time_frequency.Spectrogram,
'Normalization2D': kapre.utils.Normalization2D})
###Output
_____no_output_____
###Markdown
New predictions
###Code
# Method 1 for predictions using predict
y_pred = model2.predict(X_test,batch_size=64,verbose=1)
Y_pred = np.argmax(y_pred,axis=1)
Y_test = np.argmax(Y_test,axis=1)
accuracy = (len(Y_test) - np.count_nonzero(Y_pred - Y_test) + 0.0)/len(Y_test)
print(accuracy)
# Method 1 for predictions using evaluate (only print the accuracy on the test data)
score, acc = model2.evaluate(X_test, Y_test, batch_size=64)
print('\nTest score:', score)
print('Test accuracy:', acc)
###Output
_____no_output_____ |
Valerie_Lecture_Notebook_decision_trees.ipynb | ###Markdown
_Lambda School Data Science, Classification 1_This sprint, your project is about water pumps in Tanzania. Can you predict which water pumps are faulty? Decision Trees, Data Cleaning Objectives- clean data with outliers- impute missing values- use scikit-learn for decision trees- understand why decision trees are useful to model non-linear, non-monotonic relationships and feature interactions- get and interpret feature importances of a tree-based model Links- A Visual Introduction to Machine Learning - [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/) - [Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/)- [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.htmladvantages-2)- [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/)- [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html)- [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU) — _Don’t worry about understanding the code, just get introduced to the concepts. This 10 minute video has excellent diagrams and explanations._- [Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/)
###Code
pip install category_encoders
###Output
Collecting category_encoders
[?25l Downloading https://files.pythonhosted.org/packages/6e/a1/f7a22f144f33be78afeb06bfa78478e8284a64263a3c09b1ef54e673841e/category_encoders-2.0.0-py2.py3-none-any.whl (87kB)
[K |████████████████████████████████| 92kB 3.4MB/s
[?25hRequirement already satisfied: patsy>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.5.1)
Requirement already satisfied: numpy>=1.11.3 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (1.16.4)
Requirement already satisfied: statsmodels>=0.6.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.9.0)
Requirement already satisfied: pandas>=0.21.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.24.2)
Requirement already satisfied: scipy>=0.19.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (1.3.0)
Requirement already satisfied: scikit-learn>=0.20.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.21.2)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from patsy>=0.4.1->category_encoders) (1.12.0)
Requirement already satisfied: python-dateutil>=2.5.0 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.21.1->category_encoders) (2.5.3)
Requirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas>=0.21.1->category_encoders) (2018.9)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.20.0->category_encoders) (0.13.2)
Installing collected packages: category-encoders
Successfully installed category-encoders-2.0.0
###Markdown
OPTIONAL SETUP 1. Downgrade pandas to fix pivot table bugFor this lesson, I'll downgrade pandas from 0.24 to 0.23.4, because of a known issue: https://github.com/pandas-dev/pandas/issues/25087I'm making a pivot table just for demonstration during this lesson, but it's not required for your assignment. So, you don't need to downgrade pandas if you don't want to. 2. Install graphviz to visualize treesThis is also not required for your assignment.Anaconda: ```conda install python-graphviz```Google Colab: ```!pip install graphviz!apt-get install graphviz```
###Code
!pip install pandas==0.23.4
!pip install graphviz
!apt-get install graphviz
###Output
Collecting pandas==0.23.4
[?25l Downloading https://files.pythonhosted.org/packages/e1/d8/feeb346d41f181e83fba45224ab14a8d8af019b48af742e047f3845d8cff/pandas-0.23.4-cp36-cp36m-manylinux1_x86_64.whl (8.9MB)
[K |████████████████████████████████| 8.9MB 2.8MB/s
[?25hRequirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from pandas==0.23.4) (1.16.4)
Requirement already satisfied: python-dateutil>=2.5.0 in /usr/local/lib/python3.6/dist-packages (from pandas==0.23.4) (2.5.3)
Requirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas==0.23.4) (2018.9)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.5.0->pandas==0.23.4) (1.12.0)
[31mERROR: google-colab 1.0.0 has requirement pandas~=0.24.0, but you'll have pandas 0.23.4 which is incompatible.[0m
Installing collected packages: pandas
Found existing installation: pandas 0.24.2
Uninstalling pandas-0.24.2:
Successfully uninstalled pandas-0.24.2
Successfully installed pandas-0.23.4
###Markdown
Clean data with outliers, impute missing values (example solutions)
###Code
!pip install category_encoders
%matplotlib inline
import category_encoders as ce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import RobustScaler
pd.set_option('display.float_format', '{:.2f}'.format)
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('https://drive.google.com/uc?export=download&id=14ULvX0uOgftTB2s97uS8lIx1nHGQIB0P'),
pd.read_csv('https://drive.google.com/uc?export=download&id=1r441wLr7gKGHGLyPpKauvCuUOU556S2f'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('https://drive.google.com/uc?export=download&id=1wvsYl9hbRbZuIuoaLWCsW_kbcxCdocHz')
sample_submission = pd.read_csv('https://drive.google.com/uc?export=download&id=1kfJewnmhowpUo381oSn3XqsQ6Eto23XV')
# Split train into train & val
train, val = train_test_split(train, train_size=0.80, test_size=0.20,
stratify=train['status_group'], random_state=42)
# Print dataframe shapes
print('train', train.shape)
print('val', val.shape)
print('test', test.shape)
###Output
train (47520, 41)
val (11880, 41)
test (14358, 40)
###Markdown
Some of the locations are at ["Null Island"](https://en.wikipedia.org/wiki/Null_Island) instead of Tanzania.
###Code
sns.jointplot(x='longitude', y='latitude', data=train);
###Output
_____no_output_____
###Markdown
Define a function to wrangle train, validate, and test sets in the same way.Fix the location, and do more data cleaning and feature engineering.
###Code
def wrangle(X):
"""Wrangles train, validate, and test sets in the same way"""
X = X.copy()
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these values like zero.
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# When columns have zeros and shouldn't, they are like null values.
# So we will replace them with the column mean.
cols_with_zeros = ['construction_year', 'longitude', 'latitude']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
X[col] = X[col].fillna(X[col].mean())
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract year from date_recorded
X['year_recorded'] = X['date_recorded'].dt.year
# quantity & quantity_group are duplicates, so drop one
X = X.drop(columns='quantity_group')
# for categoricals with missing values, fill with the category 'MISSING'
categoricals = X.select_dtypes(exclude='number').columns
for col in categoricals:
X[col] = X[col].fillna('MISSING')
return X
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
###Output
_____no_output_____
###Markdown
Now the locations look better.
###Code
sns.relplot(x='longitude', y='latitude', hue='status_group',
data=train, alpha=0.1);
###Output
_____no_output_____
###Markdown
Select features
###Code
# The status_group column is the target
target = 'status_group'
# Get a dataframe with all train columns except the target & id
train_features = train.drop(columns=[target, 'id'])
# Get a list of the numeric features
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
# Get a series with the cardinality of the nonnumeric features
cardinality = train_features.select_dtypes(exclude='number').nunique()
# Get a list of all categorical features with cardinality <= 50
categorical_features = cardinality[cardinality <= 50].index.tolist()
# Combine the lists
features = numeric_features + categorical_features
###Output
_____no_output_____
###Markdown
Encode categoricals, scale features, fit and score Logistic Regression model, make predictions
###Code
# Arrange data into X features matrix and y target vector
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
# Encoder: fit_transform on train, transform on val & test
encoder = ce.OneHotEncoder(use_cat_names=True)
X_train_encoded = encoder.fit_transform(X_train)
X_val_encoded = encoder.transform(X_val)
X_test_encoded = encoder.transform(X_test)
# Scaler: fit_transform on train, transform on val & test
scaler = RobustScaler()
X_train_scaled = scaler.fit_transform(X_train_encoded)
X_val_scaled = scaler.transform(X_val_encoded)
X_test_scaled = scaler.transform(X_test_encoded)
# Model: Fit on train, score on val, predict on test
model = LogisticRegression(solver='lbfgs', multi_class='auto', n_jobs=-1)
model.fit(X_train_scaled, y_train)
print('Validation Accuracy', model.score(X_val_scaled, y_val))
y_pred = model.predict(X_test_scaled)
# Write submission csv file
submission = sample_submission.copy()
submission['status_group'] = y_pred
submission.to_csv('submission-02.csv', index=False)
###Output
_____no_output_____
###Markdown
Get and plot coefficients
###Code
coefficients = pd.Series(model.coef_[0], X_train_encoded.columns)
plt.figure(figsize=(10,30))
coefficients.sort_values().plot.barh(color='grey');
###Output
_____no_output_____
###Markdown
Use scikit-learn for decision trees Compare a Logistic Regression with 2 features, longitude & latitude ...
###Code
###Output
_____no_output_____
###Markdown
... versus a Decision Tree Classifier with 2 features, longitude & latitudehttps://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html
###Code
###Output
_____no_output_____
###Markdown
Understand why decision trees are useful to model non-linear, non-monotonic relationships and feature interactions Visualize decision treehttps://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html
###Code
###Output
_____no_output_____
###Markdown
Make 3 heatmaps, with longitude & latitude- Actual % of functional waterpumps- Decision Tree predicted probability of functional waterpumps- Logistic Regression predicted probability of functional waterpumps
###Code
###Output
_____no_output_____
###Markdown
Interlude 1: predicting golf putts(1 feature, non-linear, regression)https://statmodeling.stat.columbia.edu/2008/12/04/the_golf_puttin/
###Code
columns = ['distance', 'tries', 'successes']
data = [[2, 1443, 1346],
[3, 694, 577],
[4, 455, 337],
[5, 353, 208],
[6, 272, 149],
[7, 256, 136],
[8, 240, 111],
[9, 217, 69],
[10, 200, 67],
[11, 237, 75],
[12, 202, 52],
[13, 192, 46],
[14, 174, 54],
[15, 167, 28],
[16, 201, 27],
[17, 195, 31],
[18, 191, 33],
[19, 147, 20],
[20, 152, 24]]
putts = pd.DataFrame(columns=columns, data=data)
putts['rate of success'] = putts['successes'] / putts['tries']
putts.plot('distance', 'rate of success', kind='scatter', title='Golf Putts');
###Output
_____no_output_____
###Markdown
Compare Linear Regression ...
###Code
from sklearn.linear_model import LinearRegression
putts_X = putts[['distance']]
putts_y = putts['rate of success']
lr = LinearRegression()
lr.fit(putts_X, putts_y)
print('R^2 Score', lr.score(putts_X, putts_y))
ax = putts.plot('distance', 'rate of success', kind='scatter', title='Golf Putts')
ax.plot(putts_X, lr.predict(putts_X));
###Output
_____no_output_____
###Markdown
... versus a Decision Tree Regressorhttps://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html
###Code
import graphviz
from ipywidgets import interact
from sklearn.tree import DecisionTreeRegressor, export_graphviz
def viztree(decision_tree, feature_names):
dot_data = export_graphviz(decision_tree, out_file=None, feature_names=feature_names,
filled=True, rounded=True)
return graphviz.Source(dot_data)
def putts_tree(max_depth=1):
tree = DecisionTreeRegressor(max_depth=max_depth)
tree.fit(putts_X, putts_y)
print('R^2 Score', tree.score(putts_X, putts_y))
ax = putts.plot('distance', 'rate of success', kind='scatter', title='Golf Putts')
ax.step(putts_X, tree.predict(putts_X), where='mid')
plt.show()
display(viztree(tree, feature_names=['distance']))
interact(putts_tree, max_depth=(1,6,1));
###Output
_____no_output_____
###Markdown
Interlude 2: Simple housing (2 features, regression)https://christophm.github.io/interpretable-ml-book/interaction.htmlfeature-interaction
###Code
columns = ['Price', 'Good Location', 'Big Size']
data = [[300000, 1, 1],
[200000, 1, 0],
[250000, 0, 1],
[150000, 0, 0]]
house = pd.DataFrame(columns=columns, data=data)
house
###Output
_____no_output_____
###Markdown
Compare Linear Regression ...
###Code
house_X = house.drop(columns='Price')
house_y = house['Price']
lr = LinearRegression()
lr.fit(house_X, house_y)
print('R^2', lr.score(house_X, house_y))
print('Intercept \t', lr.intercept_)
coefficients = pd.Series(lr.coef_, house_X.columns)
print(coefficients.to_string())
###Output
_____no_output_____
###Markdown
... versus a Decision Tree Regressor
###Code
tree = DecisionTreeRegressor()
tree.fit(house_X, house_y)
print('R^2', tree.score(house_X, house_y))
viztree(tree, feature_names=house_X.columns)
###Output
_____no_output_____
###Markdown
Simple housing, with a twist: _Feature Interaction_
###Code
house.loc[0, 'Price'] = 400000
house_X = house.drop(columns='Price')
house_y = house['Price']
house
###Output
_____no_output_____
###Markdown
Compare Linear Regression ...
###Code
lr = LinearRegression()
lr.fit(house_X, house_y)
print('R^2', lr.score(house_X, house_y))
print('Intercept \t', lr.intercept_)
coefficients = pd.Series(lr.coef_, house_X.columns)
print(coefficients.to_string())
###Output
_____no_output_____
###Markdown
... versus a Decision Tree Regressor
###Code
tree = DecisionTreeRegressor()
tree.fit(house_X, house_y)
print('R^2', tree.score(house_X, house_y))
viztree(tree, feature_names=house_X.columns)
###Output
_____no_output_____
###Markdown
Get and interpret feature importances of a tree-based model
###Code
###Output
_____no_output_____ |
026-reddit-place-data/notebooks/001-download_format_official_data.ipynb | ###Markdown
Pull Official Reddit Place Data
###Code
import pandas as pd
import numpy as np
import requests
import re
from tqdm.notebook import tqdm
###Output
_____no_output_____
###Markdown
Get URLs
###Code
files = 'https://placedata.reddit.com/data/canvas-history/index.html'
resp = requests.get(files)
urls = []
for line in resp.iter_lines():
if 'https' in str(line):
res = re.findall('"https\S+gzip"', str(line))
urls.append(res[0].strip('"'))
###Output
_____no_output_____
###Markdown
Download Files
###Code
def download_url(url, save_dir='/media/robmulla/moardata/reddit_place2/'):
fn = url.split('/')[-1]
r = requests.get(url)
#retrieving data from the URL using get method
with open(f'{save_dir}{fn}', 'wb') as f:
#giving a name and saving it in any required format
#opening the file in write mode
f.write(r.content)
for url in tqdm(urls):
download_url(url)
###Output
_____no_output_____ |
methods/transformers/notebooks/01-training-tokenizers.ipynb | ###Markdown
Tokenization doesn't have to be slow ! IntroductionBefore going deep into any Machine Learning or Deep Learning Natural Language Processing models, every practitionershould find a way to map raw input strings to a representation understandable by a trainable model.One very simple approach would be to split inputs over every space and assign an identifier to each word. This approachwould look similar to the code below in python```pythons = "very long corpus..."words = s.split(" ") Split over spacevocabulary = dict(enumerate(set(words))) Map storing the word to it's corresponding id```This approach might work well if your vocabulary remains small as it would store every word (or **token**) present in your originalinput. Moreover, word variations like "cat" and "cats" would not share the same identifiers even if their meaning is quite close.![tokenization_simple](https://cdn.analyticsvidhya.com/wp-content/uploads/2019/11/tokenization.png) Subtoken TokenizationTo overcome the issues described above, recent works have been done on tokenization, leveraging "subtoken" tokenization.**Subtokens** extends the previous splitting strategy to furthermore explode a word into grammatically logicial sub-components learnedfrom the data.Taking our previous example of the words __cat__ and __cats__, a sub-tokenization of the word __cats__ would be [cat, s]. Where the prefix _""_ indicates a subtoken of the initial input. Such training algorithms might extract sub-tokens such as _"ing"_, _"ed"_ over English corpus.As you might think of, this kind of sub-tokens construction leveraging compositions of _"pieces"_ overall reduces the sizeof the vocabulary you have to carry to train a Machine Learning model. On the other side, as one token might be explodedinto multiple subtokens, the input of your model might increase and become an issue on model with non-linear complexity over the input sequence's length. ![subtokenization](https://nlp.fast.ai/images/multifit_vocabularies.png) Among all the tokenization algorithms, we can highlight a few subtokens algorithms used in Transformers-based SoTA models : - [Byte Pair Encoding (BPE) - Neural Machine Translation of Rare Words with Subword Units (Sennrich et al., 2015)](https://arxiv.org/abs/1508.07909)- [Word Piece - Japanese and Korean voice search (Schuster, M., and Nakajima, K., 2015)](https://research.google/pubs/pub37842/)- [Unigram Language Model - Subword Regularization: Improving Neural Network Translation Models with Multiple Subword Candidates (Kudo, T., 2018)](https://arxiv.org/abs/1804.10959)- [Sentence Piece - A simple and language independent subword tokenizer and detokenizer for Neural Text Processing (Taku Kudo and John Richardson, 2018)](https://arxiv.org/abs/1808.06226)Going through all of them is out of the scope of this notebook, so we will just highlight how you can use them. @huggingface/tokenizers library Along with the transformers library, we @huggingface provide a blazing fast tokenization libraryable to train, tokenize and decode dozens of Gb/s of text on a common multi-core machine.The library is written in Rust allowing us to take full advantage of multi-core parallel computations in a native and memory-aware way, on-top of which we provide bindings for Python and NodeJS (more bindings may be added in the future). We designed the library so that it provides all the required blocks to create end-to-end tokenizers in an interchangeable way. In that sense, we providethese various components: - **Normalizer**: Executes all the initial transformations over the initial input string. For example when you need tolowercase some text, maybe strip it, or even apply one of the common unicode normalization process, you will add a Normalizer. - **PreTokenizer**: In charge of splitting the initial input string. That's the component that decides where and how topre-segment the origin string. The simplest example would be like we saw before, to simply split on spaces.- **Model**: Handles all the sub-token discovery and generation, this part is trainable and really dependant of your input data.- **Post-Processor**: Provides advanced construction features to be compatible with some of the Transformers-based SoTAmodels. For instance, for BERT it would wrap the tokenized sentence around [CLS] and [SEP] tokens.- **Decoder**: In charge of mapping back a tokenized input to the original string. The decoder is usually chosen accordingto the `PreTokenizer` we used previously.- **Trainer**: Provides training capabilities to each model.For each of the components above we provide multiple implementations:- **Normalizer**: Lowercase, Unicode (NFD, NFKD, NFC, NFKC), Bert, Strip, ...- **PreTokenizer**: ByteLevel, WhitespaceSplit, CharDelimiterSplit, Metaspace, ...- **Model**: WordLevel, BPE, WordPiece- **Post-Processor**: BertProcessor, ...- **Decoder**: WordLevel, BPE, WordPiece, ...All of these building blocks can be combined to create working tokenization pipelines. In the next section we will go over our first pipeline. Alright, now we are ready to implement our first tokenization pipeline through `tokenizers`. For this, we will train a Byte-Pair Encoding (BPE) tokenizer on a quite small input for the purpose of this notebook.We will work with [the file from Peter Norving](https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&uact=8&ved=2ahUKEwjYp9Ppru_nAhUBzIUKHfbUAG8QFjAAegQIBhAB&url=https%3A%2F%2Fnorvig.com%2Fbig.txt&usg=AOvVaw2ed9iwhcP1RKUiEROs15Dz).This file contains around 130.000 lines of raw text that will be processed by the library to generate a working tokenizer.
###Code
!pip install tokenizers
BIG_FILE_URL = 'https://raw.githubusercontent.com/dscape/spell/master/test/resources/big.txt'
# Let's download the file and save it somewhere
from requests import get
with open('big.txt', 'wb') as big_f:
response = get(BIG_FILE_URL, )
if response.status_code == 200:
big_f.write(response.content)
else:
print("Unable to get the file: {}".format(response.reason))
###Output
_____no_output_____
###Markdown
Now that we have our training data we need to create the overall pipeline for the tokenizer
###Code
# For the user's convenience `tokenizers` provides some very high-level classes encapsulating
# the overall pipeline for various well-known tokenization algorithm.
# Everything described below can be replaced by the ByteLevelBPETokenizer class.
from tokenizers import Tokenizer
from tokenizers.decoders import ByteLevel as ByteLevelDecoder
from tokenizers.models import BPE
from tokenizers.normalizers import Lowercase, NFKC, Sequence
from tokenizers.pre_tokenizers import ByteLevel
# First we create an empty Byte-Pair Encoding model (i.e. not trained model)
tokenizer = Tokenizer(BPE())
# Then we enable lower-casing and unicode-normalization
# The Sequence normalizer allows us to combine multiple Normalizer that will be
# executed in order.
tokenizer.normalizer = Sequence([
NFKC(),
Lowercase()
])
# Our tokenizer also needs a pre-tokenizer responsible for converting the input to a ByteLevel representation.
tokenizer.pre_tokenizer = ByteLevel()
# And finally, let's plug a decoder so we can recover from a tokenized input to the original one
tokenizer.decoder = ByteLevelDecoder()
###Output
_____no_output_____
###Markdown
The overall pipeline is now ready to be trained on the corpus we downloaded earlier in this notebook.
###Code
from tokenizers.trainers import BpeTrainer
# We initialize our trainer, giving him the details about the vocabulary we want to generate
trainer = BpeTrainer(vocab_size=25000, show_progress=True, initial_alphabet=ByteLevel.alphabet())
tokenizer.train(trainer, ["big.txt"])
print("Trained vocab size: {}".format(tokenizer.get_vocab_size()))
###Output
Trained vocab size: 25000
###Markdown
Et voilà ! You trained your very first tokenizer from scratch using `tokenizers`. Of course, this covers only the basics, and you may want to have a look at the `add_special_tokens` or `special_tokens` parameterson the `Trainer` class, but the overall process should be very similar.We can save the content of the model to reuse it later.
###Code
# You will see the generated files in the output.
tokenizer.model.save('.')
###Output
_____no_output_____
###Markdown
Now, let load the trained model and start using out newly trained tokenizer
###Code
# Let's tokenizer a simple input
tokenizer.model = BPE('vocab.json', 'merges.txt')
encoding = tokenizer.encode("This is a simple input to be tokenized")
print("Encoded string: {}".format(encoding.tokens))
decoded = tokenizer.decode(encoding.ids)
print("Decoded string: {}".format(decoded))
###Output
Encoded string: ['Ġthis', 'Ġis', 'Ġa', 'Ġsimple', 'Ġin', 'put', 'Ġto', 'Ġbe', 'Ġtoken', 'ized']
Decoded string: this is a simple input to be tokenized
|
python/automated_machine_learning/Optuna/Optuna.ipynb | ###Markdown
Optunaの動作確認サンプルハイパーパラメータの自動チューニングができるライブラリ[Optuna](https://www.preferred.jp/ja/projects/optuna/)の動作確認を行う. 機械学習モデルの開発において,精度向上の最後の一押しを効率化できる.[GBDTの試行](https://qiita.com/DS27/items/aa3f6d0f03a8053e58106-gbdt%E5%8B%BE%E9%85%8D%E3%83%96%E3%83%BC%E3%82%B9%E3%83%86%E3%82%A3%E3%83%B3%E3%82%B0%E6%9C%A8%E3%81%AB%E3%81%A4%E3%81%84%E3%81%A6)で高い精度が得られており,このモデルをハイパーパラメータチューニングのベースラインとする. データセットの読み込み(ボストン住宅価格)
###Code
from sklearn.datasets import load_boston
import pandas as pd
boston = load_boston()
df_x_boston = pd.DataFrame(boston['data'], columns=boston['feature_names'])
df_y_boston = pd.DataFrame(boston['target'], columns=['MEDV'])
df_boston = pd.concat([df_x_boston, df_y_boston], axis=1)
###Output
_____no_output_____
###Markdown
欠損値確認
###Code
df_boston.isnull().sum()
###Output
_____no_output_____
###Markdown
学習データと評価データの分割
###Code
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df_x_boston, df_y_boston, test_size=0.2, random_state=1)
###Output
_____no_output_____
###Markdown
データの標準化
###Code
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(x_train)
x_train_std = sc.transform(x_train)
x_test_std = sc.transform(x_test)
###Output
_____no_output_____
###Markdown
GBDTの学習と評価
###Code
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
gbr = GradientBoostingRegressor()
gbr.fit(x_train_std, y_train)
pred_gbr = gbr.predict(x_test_std)
r2_gbr = r2_score(y_test, pred_gbr)
mae_gbr = mean_absolute_error(y_test, pred_gbr)
print("R2 : %.3f" % r2_gbr)
print("MAE : %.3f" % mae_gbr)
###Output
R2 : 0.922
MAE : 2.186
###Markdown
OptunaGradientBoostingRegressorデフォルト値```class sklearn.ensemble.GradientBoostingRegressor(*, loss='squared_error', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, min_impurity_decrease=0.0, init=None, random_state=None, max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False, validation_fraction=0.1, n_iter_no_change=None, tol=0.0001, ccp_alpha=0.0)```
###Code
import optuna
def objective(trial):
lr = trial.suggest_loguniform('learning_rate', 0.001, 0.1)
n_estimators = trial.suggest_int('n_estimators', 10, 1000)
min_samples_split = trial.suggest_int('min_samples_split', 2, 10)
min_samples_leaf = trial.suggest_int('min_samples_leaf', 1, 10)
max_depth = trial.suggest_int('max_depth', 1, 10)
gbr = GradientBoostingRegressor(
learning_rate=lr,
n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_depth=max_depth)
gbr.fit(x_train_std, y_train)
pred_gbr = gbr.predict(x_test_std)
r2_gbr = r2_score(y_test, pred_gbr)
return 1-r2_gbr
n_trials = 100
study = optuna.create_study()
study.optimize(objective, n_trials=n_trials)
study.best_trial
study.best_trial.params
params = study.best_trial.params
lr = params['learning_rate']
n_estimators = params['n_estimators']
min_samples_split = params['min_samples_split']
min_samples_leaf = params['min_samples_leaf']
max_depth = params['max_depth']
gbr = GradientBoostingRegressor(
learning_rate=lr,
n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_depth=max_depth)
gbr.fit(x_train_std, y_train)
pred_gbr = gbr.predict(x_test_std)
r2_gbr = r2_score(y_test, pred_gbr)
mae_gbr = mean_absolute_error(y_test, pred_gbr)
print("R2 : %.3f" % r2_gbr)
print("MAE : %.3f" % mae_gbr)
###Output
/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py:63: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().
return f(*args, **kwargs)
|
Environment Management.ipynb | ###Markdown
Necessity : - Avoid Seting up the environment each time you Spin up a plain instance. - Instead have a bash script to configure the environment for you- Setup an Alias which activates the environment Blockers:- Handling GPU and CPU Install - Handling different version of CUDA - Use System Installed CUDA Version: Find a Command to get the Current CUDA VERSION - Pytorch has TODO- Creation of Environment using Conda, Virtualenv, Venv- Installation of Ipykernel - Installation of Framework dependencies: Keras, Tensorflow, Pytorch- Clean up Cache Files ('Not Mandatory')-
###Code
from glob import glob
import os
source_file = '/bin/activate'
for env in glob('envs/*'):
env_name=env.split('/')[-1]
env_path=env+source_file
print(f'source {env_path} && pip install ipykernel && python -m ipykernel install --user --name {env_name} --display-name "Python ({env_name})"')
os.system(f'source {env_path} && pip install ipykernel && python -m ipykernel install --user --name {env_name} --display-name "Python ({env_name})"')
# os.system(f'source {env_path} & pip install ipykernel & python -m ipykernel install --user --name {env_name} --display-name "Python ({env_name})"')
env_type='conda'
env_types=['conda','virtualenv','venv']
install_method='conda'
install_methods=['conda','pip',]
framework='pytorch'
frameworks=['pytorch','tensorflow','keras','django','flask']
ENV_CREATE_CONDA=f'conda create -n {env_name} python={py_ver}'
ENV_CREATE_VIRTUALENV=f'virtualenv {env_name} -p `which python{py_ver}`'
# ENV_CREATE_VENV=f''
python -m ipykernel install --user --name infi --display-name "Python (infi)"
# Pytorch Cpnda ENV
###Output
_____no_output_____ |
Notebooks/.ipynb_checkpoints/Ocean 05 A Sea Floor Axial Inflation-checkpoint.ipynb | ###Markdown
IntroductionThis notebook is (translational) work in progress and does not run in **`binder`**. Tilt and bottom pressure at Axial SeamountAxial seamount is an undersea volcano. Scientists have placed pressure and tilt sensors in the central crater, called the *caldera*, to monitor this volcano and help predict its next eruption, expected within the next five years. Four measurement sites* 301 Central Caldera, north central caldera* 302 Eastern Caldera, east side of caldera* 303 International District, south caldera* 304 ASHES vent field, west side of caldera This notebook explores sensor data, looking ahead to the next eruption of Axial Seamount. * **bottom pressure** data shows the seafloor rising as the magma chamber inflates* **tilt** shows how the seafloor gradually changes slope as the magma chamber inflates For a scientist's view visit[Bill Chadwick's Axial eruption forecast blog](https://www.pmel.noaa.gov/eoi/axial_blog.html).It is updated regularly.This notebook explains and reproduces the interpretation of bottom pressure and tiltdata. The related notebooks provide further support including notes on how to getthis data and how to work with it. Our grand agenda is to de-mystify data science as a human activity and a process. We are breaking down the steps in drawing meaning from data. the volcanoTo begin our 'journey to meaning' narrative let's start by sketching out the situation at the volcano. This volcano, called [Axial Seamount](https://en.wikipedia.org/wiki/Axial_Seamount), is located on the sea floor about 500 km west of the Oregon coast. Sensors are placed in four locations insidethe Axial caldera, the crater in the center of the volcano formed by eruptions. The floor of the caldera is about 2200 meters beneath the surface of the Pacific Ocean. It is roughlyrectangular, about 5 kilometers by 12 kilometers across. Axial Seamount has erupted in 1998, 20011, and 2015. Eruptions follow a period of pressure build-up under the sea floor as magma rises up from the earth's interior.*Melt migration* is the inflationary process that precedes eruptions.The increasing pressure gradually inflates the sea floor: It rises and also tilts like the upper surface of a large, inflating bubble. The surface rise is on the scale of a couple of meters and the tilt is measured in less than a thousandth of a degree. The sensors are therefore very sensitive. bottom pressureAs we go deeper below the surface of the ocean the water pressure increases; one additional atmosphere of pressure for every ten meters of depth. At thedepth of the Axial caldera floor we see a pressure of about 200 atmospheres.'Bottom pressure' is the continuous measurement of this pressure atthe bottom of the ocean, on the sea floor. At each of the four sites in theAxial caldera are placed accurate pressure sensors. Suppose we look at the measured bottom pressure at one of the four measurement sites for a day. Suppose we look at the bottom pressure for a month. Suppose we look at the bottom pressure for three years after we average outall the effect of tides. tiltThere are three tilt meters at each of the four caldera sensor sites. Thefirst measures tilt very coarsely on a scale of one degree. It is expectedto produce a very flat signal because the cumulative tilt will be down in the range of hundredths to thousandths of a degree. The second tilt meter is more sensitive ('low resolution'). The third tilt meter is high resolution, measuring tilts in the range of one millionth of a radian. One millionth of a radian is equivalent to 57 millionths of a degree. If you have a flat bar one kilometer in length and lift one end of it by one centimeter: That makes an angle of 10 millionths of a radian. technical website linksHere are the PMEL 'live' links for the four botpt instrument sites tied to a [central index page](https://www.pmel.noaa.gov/eoi/rsn/index.html):* [Axial central caldera](https://www.pmel.noaa.gov/eoi/rsn/mj03f.html)* [Axial eastern caldera](https://www.pmel.noaa.gov/eoi/rsn/mj03e.html)* [International District](https://www.pmel.noaa.gov/eoi/rsn/mj03d.html)* [ASHES vent field](https://www.pmel.noaa.gov/eoi/rsn/mj03b.html)* interactive oceans map* interactive oceans main* interactive oceans data* [OOI **bottom pressure** and **tilt** home page](https://oceanobservatories.org/instrument-class/botpt/): Organizing page * Photo credit for sensor on seafloor at Int'l District: NSF-OOI/UW/CSSF; Dive R1723; V14. * [bottom pressure: data product specification](https://oceanobservatories.org/wp-content/uploads/2015/10/1341-00070_Data_Product_SPEC_BOTPRES_OOI.pdf) * 1 decibar = 10 kPa; and sensor drift expected to be < 10cm per year, to be compensated by the science team * 1 atmosphere is about 1 bar * Stub? [Sea floor uplift and deflation **BOTSFLU**](https://oceanobservatories.org/data-product/botsflu/) * Maybe this is a derived product but the DPS is 'not available' * [tilt: data product specification](https://oceanobservatories.org/wp-content/uploads/2015/10/1341-00060_Data_Product_Spec_BOTTILT_OOI.pdf) Data quality overview Bottom pressure SummaryThere are two major data quality issues and lots of bloat in the NetCDF file. The data itself looks good on a tidal time scale and on a wave time scale the noise is comparable to signal. Detail- The data are simple: Just timestamps and pressures: At 20 samples per second- In what follows we consider an 8-day duration data file. - This particular file starts out at one sample per second and then soon switches gears to 20 samples/sec: **Data quality issue 1**- Zoomed out the pressure follows a tidal signal with amplitude of a couple of meters - The tidal signal is frequently interrupted by brief (a few samples long) spikes - These go to lower pressure, less by a meter or two. - This looks to me like a capacitor discharge dropping a voltage spike into an ADC - It is clearly random and not a real signal. - We see about five per hour on average. **Data quality issue 2**- Zooming in to very fine time resolution the signal has vertical structure (comparable to wave motion) and noise. - These are of comparable vertical scale, about 4 cm. There are 15 different Data Variables of which only two -- time and pressure -- are sensor data. I calculate each observation (again at 20 per second) requires 125 bytes where time and pressure require only 12 bytes. If I did that right 125 bytes x 8 days x 20 samples / second is 1.7GB but the NetCDF file is only 0.5GB for 12 million observations; so either the file is compressed or I'm just wrong on some count. A convenient detail about terrestrial volcanoes is that they can be repeat-pass imaged from low earth orbit by an imaging radar. By means of subtracting successive images we can make out inflation as a surface deformation signal.
###Code
# mini-source control: Last copied 29-SEP-2020: to tilt*, chlorophyll*, rca*, argo*
# last revised 09-OCT-2020
import os, sys, time, glob
from IPython.display import clear_output # use inside loop with clear_output(wait = True) followed by print(i)
import warnings # use with warnings.filterwarnings('ignore') or 'once'
home_dir = os.getenv("HOME")
this_dir = home_dir + '/chlorophyll/'
data_dir = '/data/'
data1_dir = '/data1'
from matplotlib import pyplot as plt
from matplotlib import colors as mplcolors
import numpy as np, pandas as pd, xarray as xr
from numpy import datetime64 as dt64, timedelta64 as td64
def doy(theDatetime): return 1 + int((theDatetime - dt64(str(theDatetime)[0:4] + '-01-01')) / td64(1, 'D')) # 1, 2, .... , 365, [366]
def dt64_from_doy(year, doy): return dt64(str(year) + '-01-01') + td64(doy-1, 'D')
def day_of_month_to_string(d): return str(d) if d > 9 else '0' + str(d)
print('\nJupyter Notebook running Python {}'.format(sys.version_info[0]))
###Output
/home/ubuntu/anaconda3/lib/python3.8/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
and should_run_async(code)
###Markdown
ASHES pressure
###Code
# Already done (with 161 files)
# Never do this again at full sampling rate en masse; the 15s resampled data are more manageable.
#
# pressurefiles = glob.glob('/data/botpt/deployment*.nc')
# print('there are ' + str(len(pressurefiles)) + ' data files available for pressure on the sea floor')
# ds=xr.open_dataset(pressurefiles[43])
# ds
# tidal signal: Would need to go grab a 20sps data file to get this working
# tmp=[]
# tmptime=[]
# nplots = 2 # this can be 40 if you like
# startindex = 3000000
# datablock = 500000 # for 20 samples / sec use 72000 to get one hour
# # for 1 sample / sec use a small startindex and datablock of say 200k
# for i in range(nplots):
# tmp.append(ds.isel(obs=slice(startindex + i*datablock, startindex + i*datablock + datablock)).bottom_pressure)
# tmptime.append(ds.isel(obs=slice(startindex + i*datablock, startindex + i*datablock + datablock)).time)
# fig, axs = plt.subplots(nplots, 1, figsize=(8, 6*nplots), tight_layout=True)
# for i in range(nplots):
# axs[i].plot(tmptime[i], tmp[i], color='k')
# axs[i].set(ylim = (2272, 2280))
# move to background: This compares scatter to rolling mean
# tmpsub, tmpsubtime, tmproll = [], [], []
# nobs=50
# nplots = 2
# datablock = 1000
# slice_start = 100
# for i in range(nplots):
# tmpsub.append(tmp[0].isel(obs=slice(slice_start + i*datablock, slice_start + i*datablock + datablock)))
# tmpsubtime.append(tmptime[0].isel(obs=slice(slice_start + i*datablock, slice_start + i*datablock + datablock)))
# tmproll.append(tmp[0].isel(obs=slice(slice_start + i*datablock, slice_start + i*datablock + datablock)).rolling(obs=nobs, center=True).mean())
# fig, axs = plt.subplots(nplots, 1, figsize=(36, 12*nplots), tight_layout=True)
# for i in range(nplots):
# axs[i].plot(tmpsubtime[i], tmproll[i], color='r')
# axs[i].scatter(tmpsubtime[i], tmpsub[i], color='k', marker='.')
# # axs[i].set(ylim = (2275.95, 2276.05))
# Notice 1-second intervals; this switches over to the expected 20 samples per second
# ds.time[0:10]
# Smoking gun plot: Sometimes the data are one sample per second, sometimes 20 samples per second
# ds.time.plot()
# fig, axs = plt.subplots(2, 1, figsize=(36, 24), tight_layout=True)
# tmp1 = tmp[0].isel(obs=slice(3200,3400))
# tmp2 = tmp[0].isel(obs=slice(4200,4400))
# axs[0].scatter(tmp1.obs, tmp1, marker='.', color='r')
# axs[1].scatter(tmp2.obs, tmp2, marker='.', color='k')
# Suspect now that we see this is one sample per second
###Output
_____no_output_____
###Markdown
AverageLet's get a single depth for the entire dataset. Then go get a couple more near this time; and then go to later in the record by a couple years and see if there is a consistent change.
###Code
# I will merge some test cells here...
# ds=xr.open_dataset(pressurefiles[0]).swap_dims({'obs':'time'})
# da_day = ds.bottom_pressure.resample(time='1D').mean()
# da_day
# ds.time[0], ds.time[-1]
# da_day.mean()
# xr.open_dataset(data_dir + 'botpt/botpt.nc').swap_dims({'obs':'time'}).bottom_pressure.resample(time='1D').mean().mean()
# xr.open_dataset(datafiles[0]).swap_dims({'obs':'time'}).bottom_pressure.mean()
# xr.open_dataset(datafiles[0]).bottom_pressure.mean()
# DataArray lists (runs in about four minutes)
# mean_pressure_per_file = [xr.open_dataset(src).bottom_pressure.mean() for src in pressurefiles]
# startdate_per_file = [xr.open_dataset(src).time[0] for src in pressurefiles]
# pressure = [float(mean_pressure_per_file[i]) for i in range(len(mean_pressure_per_file))]
# ptimes = [dt64(startdate_per_file[i].values) for i in range(len(startdate_per_file))]
# pd_dframe = pd.DataFrame(list(zip(ptimes, pressure)), columns=['datetime', 'pressure'])
# pd_dframe.to_csv('pressure_data.csv')
###Output
_____no_output_____
###Markdown
304 ASHES pressure
###Code
pd_dframe = pd.read_csv('pressure_data.csv')
pressure = pd_dframe['pressure'].to_list()
ptimes = [dt64(pd_dframe['datetime'][i]) for i in range(len(pd_dframe.index))]
fig, axs = plt.subplots(1, 1, figsize=(8, 8), tight_layout=True)
axs.scatter(ptimes, pressure, marker='^', color='b')
axs.set(ylim = (2276.4, 2275.4), xlabel='date', ylabel='pressure (dbar)')
###Output
_____no_output_____
###Markdown
304 tilt (lily)
###Code
# %%time
# tiltfiles = glob.glob('/data/botpt/lily/deployment*.nc')
# print('there are ' + str(len(tiltfiles)) + ' data files available for tilt')
# ds = [xr.open_dataset(tiltfile).swap_dims({'obs':'time'}) for tiltfile in tiltfiles]
# this also took a couple minutes
# ds_concat=xr.concat(ds, 'time')
# > GB output file; do not do this unnecessarily
# ds_concat.to_netcdf('/data/botpt/lily/full_time_series.nc')
# ds_days = ds_concat.resample(time="1D").mean()
# ds_days.seafloor_tilt_magnitude.plot()
# ds_days.seafloor_tilt_direction.plot()
# ds_days.lily_x_tilt.plot()
# ds_days.lily_y_tilt.plot()
# ds_days.to_netcdf('/data/botpt/lily/301_full_daily_series.nc')
# ds_concat.obs.plot() shows a few jukes here and there
###Output
_____no_output_____
###Markdown
301 tilt (lily)
###Code
# tiltfiles = glob.glob('/data/botpt/lily/deployment*.nc')
# print('there are ' + str(len(tiltfiles)) + ' data files available for tilt')
# ds = [xr.open_dataset(tiltfile).swap_dims({'obs':'time'}) for tiltfile in tiltfiles]
# ds_day = [this_ds.resample(time="1D").mean() for this_ds in ds]
# ds_concat = xr.concat(ds_day, 'time')
# ds_concat.to_netcdf('/data/botpt/lily/301_full_daily_series.nc')
# ds_concat.seafloor_tilt_magnitude.plot()
# ds_concat.seafloor_tilt_direction.plot()
###Output
_____no_output_____
###Markdown
302 tilt (lily)
###Code
# tiltfiles = glob.glob('/data/botpt/lily/deployment*.nc')
# print('there are ' + str(len(tiltfiles)) + ' data files available for tilt')
# for i in range(len(tiltfiles)):
# print('file', i)
# ds = xr.open_dataset(tiltfiles[i]).swap_dims({'obs':'time'}).resample(time="1D").mean()
# padstring = '0' if i < 10 else ''
# outfilename = '/data1/botpt/lily/tmp' + padstring + str(i) + '.nc'
# ds.to_netcdf(outfilename)
# print(' ...done')
# tmpfiles = glob.glob('/data/botpt/lily/tmp*.nc')
# tmpfiles
# ds = [xr.open_dataset(tmpfile) for tmpfile in tmpfiles]
# ds_concat = xr.concat(ds, 'time')
# ds_concat.to_netcdf('/data/botpt/lily/302_full_daily_series.nc')
# ds_concat.seafloor_tilt_magnitude.plot()
# ds_concat.seafloor_tilt_direction.plot()
###Output
_____no_output_____
###Markdown
303 tilt (lily)
###Code
# tiltfiles = glob.glob('/data/botpt/lily/deployment*.nc')
# print('there are ' + str(len(tiltfiles)) + ' data files available for tilt')
# for i in range(len(tiltfiles)):
# print('file', i)
# ds = xr.open_dataset(tiltfiles[i]).swap_dims({'obs':'time'}).resample(time="1D").mean()
# padstring = '0' if i < 10 else ''
# outfilename = '/data1/botpt/lily/tmp' + padstring + str(i) + '.nc'
# ds.to_netcdf(outfilename)
# tmpfiles = glob.glob('/data/botpt/lily/tmp*.nc')
# ds = [xr.open_dataset(tmpfile) for tmpfile in tmpfiles]
# ds_concat = xr.concat(ds, 'time')
# ds_concat.to_netcdf('/data/botpt/lily/303_full_daily_series.nc')
ds = xr.load_dataset('/data/botpt/lily/303_full_daily_series.nc')
ds.seafloor_tilt_magnitude.plot()
###Output
_____no_output_____
###Markdown
heatPer Bill Chadwick's site: The coarse tilt meter 'heat' running at 1-degree sensitivity is expected to be a flat signal.
###Code
# deployment0003_RS03ASHS-MJ03B-09-BOTPTA304-streamed-botpt_heat_sample_20170815T001037-20181107T235958.nc
# deployment0003_RS03ASHS-MJ03B-09-BOTPTA304-streamed-botpt_heat_sample_20181108T000001-20200119T235958.nc
# deployment0003_RS03ASHS-MJ03B-09-BOTPTA304-streamed-botpt_heat_sample_20200120T000001-20200810T224044.nc
# heatfiles = glob.glob('/data/botpt/heat/deployment*.nc')
# print('there are ' + str(len(heatfiles)) + ' data files available for heat')
# ds = [xr.open_dataset(heatfile).swap_dims({'obs':'time'}) for heatfile in heatfiles]
# ds_concat=xr.concat(ds, 'time')
# ds_concat
# ds_days = ds_concat.resample(time="1D").mean()
# ds_days.to_netcdf('/data/botpt/heat/heat_full_record_1day.nc')
# ds_days = xr.open_dataset('/data/botpt/heat/heat_full_record_1day.nc')
# fig, axs = plt.subplots(1, 1, figsize=(8, 8), tight_layout=True)
# axs.plot(ds_days.time, ds_days.heat_y_tilt, color='k')
# axs.set(ylim = (0., 0.0001))
###Output
_____no_output_____
###Markdown
irisLow-resolution tilt meter. This work was done only for ASHES, not repeated for the other three sites.
###Code
# %%time
# irisfiles = glob.glob('/data/botpt/iris/deployment*.nc')
# print('there are ' + str(len(irisfiles)) + ' data files available for iris')
# ds = [xr.open_dataset(irisfile).swap_dims({'obs':'time'}) for irisfile in irisfiles]
# ds_concat=xr.concat(ds, 'time')
# ds_days = ds_concat.resample(time="1D").mean()
# ds_days.to_netcdf('/data/botpt/iris/iris_full_record_1day.nc')
# ds_days.keys
# fig, axs = plt.subplots(1, 1, figsize=(8, 8), tight_layout=True)
# axs.plot(ds_days.time, ds_days.iris_x_tilt, color='k')
# axs.set(ylim = (.068, .082))
# fig, axs = plt.subplots(1, 1, figsize=(8, 8), tight_layout=True)
# axs.plot(ds_days.time, ds_days.iris_y_tilt, color='r')
# axs.set(ylim = (-2.012, -2.002))
###Output
_____no_output_____
###Markdown
bottom pressure 15 seconds, 304
###Code
# pressure15sfiles = glob.glob('/data/botpt/15s/deployment*.nc')
# print('there are ' + str(len(pressure15sfiles)) + ' data files available for pressure on the sea floor')
# ds=xr.open_dataset(pressure15sfiles[0])
# ds
# ds = [xr.open_dataset(pressure15sfile).swap_dims({'obs':'time'}) for pressure15sfile in pressure15sfiles]
# only one file, do not need to concatenate; but ds is a list of length 1
# ds_days = ds[0].resample(time="1D").mean()
# ds_days.to_netcdf('/data/botpt/15s/pressure_15s_full_record_1day.nc')
ds_days = xr.open_dataset('/data/botpt/pressure/304_pressure_15s_full_record_1day.nc')
ds_days.keys
# averaged daily pressure from 15s data
fig, axs = plt.subplots(1, 1, figsize=(8, 8), tight_layout=True)
axs.scatter(ds_days.time, ds_days.botsflu_meandepth, color='k', marker='^') # Orest points out that this is really the 'end result' you'd want
# axs.set(ylim = (2276.4, 2275.4))
# for comparison: 161 averaged ~weekly time blocks spanning same time interval
pd_dframe = pd.read_csv('pressure_data.csv')
pressure = pd_dframe['pressure'].to_list()
ptimes = [dt64(pd_dframe['datetime'][i]) for i in range(len(pd_dframe.index))]
fig, axs = plt.subplots(1, 1, figsize=(8, 8), tight_layout=True)
axs.scatter(ptimes, pressure, marker='^', color='k')
axs.set(ylim = (2276.4, 2275.4), xlabel='date', ylabel='pressure (dbar)')
###Output
_____no_output_____
###Markdown
15 second pressure 301 302 303 averaged to 1 day
###Code
# the three source files (301, 302, 303), each spanning 2014 to 2020 at 15s sample interval, required 20 minutes per for resampling
#
# pressurefiles = glob.glob('/data/botpt/pressure/deployment*.nc')
#
# for i in range(1,4):
# ds = xr.open_dataset(pressurefiles[i-1]).swap_dims({'obs':'time'})
# ds_day = ds.resample(time="1D").mean()
# outputfile = '/data/botpt/pressure/30' + str(i) + '_pressure_15s_full_record_1day.nc'
# ds_day.to_netcdf(outputfile)
pressurefiles = glob.glob('/data/botpt/pressure/30?_pressure_15s_*.nc')
ds = [xr.open_dataset(pressurefile) for pressurefile in pressurefiles]
ds[0].bottom_pressure.plot()
ds[1].bottom_pressure.plot()
ds[2].bottom_pressure.plot()
ds[3].bottom_pressure.plot()
colors = ['k', 'r', 'b', 'g']
nplots = 4 # 301 302 303 304
# respectively central, ?, intl, ASHES
p_bottom, p_top = [2257.2, 2242.4,2259.8, 2276.4], [2253.4,2240.6,2257.6,2275.4]
fig, axs = plt.subplots(1, 1, figsize=(24, 10), tight_layout=True)
for i in range(nplots):
axs.scatter(ds[i].time, -p_bottom[i] + ds[i].bottom_pressure, color=colors[i], marker='.')
axs.plot(ds[i].time, -p_bottom[i] + ds[i].bottom_pressure, color=colors[i])
axs.set(ylim = (0., -5.), xlabel='date', ylabel='relative pressure (dbar)')
# fig, axs = plt.subplots(nplots, 1, figsize=(8, 6*nplots), tight_layout=True)
# for i in range(nplots):
# axs[i].scatter(ds[i].time, -p_bottom[i] + ds[i].bottom_pressure, color=colors[i], marker='^')
# axs[i].set(ylim = (0., -p_bottom[i] + p_top[i]), xlabel='date', ylabel='pressure (dbar)')
###Output
_____no_output_____ |
AoC 2019/AoC 2019 - Day 01.ipynb | ###Markdown
[Advent of Code 2019: Day 1](https://adventofcode.com/2019/day/1) --- Day 1: The Tyranny of the Rocket Equation ---Santa has become stranded at the edge of the Solar System while delivering presents to other planets! To accurately calculate his position in space, safely align his warp drive, and return to Earth in time to save Christmas, he needs you to bring him measurements from fifty stars.Collect stars by solving puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!The Elves quickly load you into a spacecraft and prepare to launch.At the first Go / No Go poll, every Elf is Go until the Fuel Counter-Upper. They haven't determined the amount of fuel required yet.Fuel required to launch a given module is based on its mass. Specifically, to find the fuel required for a module, take its mass, divide by three, round down, and subtract 2.For example:For a mass of 12, divide by 3 and round down to get 4, then subtract 2 to get 2.For a mass of 14, dividing by 3 and rounding down still yields 4, so the fuel required is also 2.For a mass of 1969, the fuel required is 654.For a mass of 100756, the fuel required is 33583.The Fuel Counter-Upper needs to know the total fuel requirement. To find it, individually calculate the fuel needed for the mass of each module (your puzzle input), then add together all the fuel values.What is the sum of the fuel requirements for all of the modules on your spacecraft?
###Code
import os
import unittest
from notebook import notebookapp
from math import floor
def calc_fuel_req(mass):
return floor(mass / 3.0) - 2
def parse(lines):
new_list = []
for line in lines:
if line:
new_list.append(int(line))
return new_list
class TestBasic(unittest.TestCase):
def test_parse(self):
data = parse('''120333
142772
85755'''.split())
self.assertEqual([120333, 142772, 85755], data)
def test_calc_fuel_req(self):
data = [(12,2),(14,2),(1969,654),(100756,33583)]
for mass, ans in data:
self.assertEqual(ans ,calc_fuel_req(mass))
unittest.main(argv=[""], exit=False)
def calc_total_fuel(filename):
with open(os.path.join("inputs", filename)) as file:
data = parse(file.readlines())
total_fuel = 0
for mass in data:
total_fuel += calc_fuel_req(mass)
return total_fuel
calc_total_fuel("input_d01.txt")
###Output
_____no_output_____
###Markdown
--- Part Two ---During the second Go / No Go poll, the Elf in charge of the Rocket Equation Double-Checker stops the launch sequence. Apparently, you forgot to include additional fuel for the fuel you just added.Fuel itself requires fuel just like a module - take its mass, divide by three, round down, and subtract 2. However, that fuel also requires fuel, and that fuel requires fuel, and so on. Any mass that would require negative fuel should instead be treated as if it requires zero fuel; the remaining mass, if any, is instead handled by wishing really hard, which has no mass and is outside the scope of this calculation.So, for each module mass, calculate its fuel and add it to the total. Then, treat the fuel amount you just calculated as the input mass and repeat the process, continuing until a fuel requirement is zero or negative. For example:A module of mass 14 requires 2 fuel. This fuel requires no further fuel (2 divided by 3 and rounded down is 0, which would call for a negative fuel), so the total fuel required is still just 2.At first, a module of mass 1969 requires 654 fuel. Then, this fuel requires 216 more fuel (654 / 3 - 2). 216 then requires 70 more fuel, which requires 21 fuel, which requires 5 fuel, which requires no further fuel. So, the total fuel required for a module of mass 1969 is 654 + 216 + 70 + 21 + 5 = 966.The fuel required by a module of mass 100756 and its fuel is: 33583 + 11192 + 3728 + 1240 + 411 + 135 + 43 + 12 + 2 = 50346.What is the sum of the fuel requirements for all of the modules on your spacecraft when also taking into account the mass of the added fuel? (Calculate the fuel requirements for each module separately, then add them all up at the end.)
###Code
def calc_true_fuel_req(mass):
fuel_req = calc_fuel_req(mass)
if fuel_req <= 0:
return 0
else:
return fuel_req + calc_true_fuel_req(fuel_req)
class TestBasic(unittest.TestCase):
def test_true_fuel_req(self):
data = [(1969,966),(100756,50346)]
for mass, ans in data:
self.assertEqual(ans ,calc_true_fuel_req(mass))
unittest.main(argv=[""], exit=False)
def calc_total_fuel(filename):
with open(os.path.join("inputs", filename)) as file:
data = parse(file.readlines())
total_fuel = 0
for mass in data:
total_fuel += calc_true_fuel_req(mass)
return total_fuel
calc_total_fuel("input_d01.txt")
###Output
_____no_output_____ |
jupyter/smokeDev/imgAna_3.jupyter-py36.ipynb | ###Markdown
WikiRecentPhase3[WikiRecentPhase2](./imgAna_2.jupyter-py36.ipynb) illustrated processing Wikipedia events continuously with Streams using the windowing facility to process 'chunks' of events on a time or count basis.Building on the previous notebooks, this extracts images from Wikipedia events and renders them. Overview - Image ExtractionThe previous notebooks recieved and filtered events from Wikipedia. This continues the processing of events, determining if the event pertains to an image and extacts the URL using[beautifulsoup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/). The image's URL is injected into the stream. This notebook gets the extracted URL via a view and renders it. Setup Add credentials for the IBM Streams service ICPD setupWith the cell below selected, click the "Connect to instance" button in the toolbar to insert the credentials for the service. See an example.
###Code
# The code is setup to run in the Cloud and CP4D.
# If in the Cloud, you'll see a message.
# If in CP4D, you'll need the service credential, which what the above link explains.
# Delete this cell and use the above instructions if you only using CP4D.
try:
from icpd_core import icpd_util
except ModuleNotFoundError as e: # get all exceptions
print("We are not in ICP4D : {}".format(str(e)))
else: # runs when no exception occurs
cfg=icpd_util.get_service_instance_details(name='zen-sample-icp1-blitz-env')
###Output
_____no_output_____
###Markdown
Cloud setupTo use Streams instance running in the cloud setup a [credential.py](setup_credential.ipynb) Show meAfter doing the 'Setup' above you can use Menu 'Cell' | 'Run All' to compose, build, submit and start the rendering of the live Wikidata, go to [Show me now](showMeNow) for the rendering.
###Code
# Install components
import sys
!pip install --user SSEClient===0.0.22 --upgrade
!pip install --user --upgrade streamsx
# Setup
import pandas as pd
from IPython.core.debugger import set_trace
from IPython.display import display, clear_output
from statistics import mean
from collections import deque
from collections import Counter
import json
import datetime
import matplotlib.pyplot as plt
import ipywidgets as widgets
from ipywidgets import Button, HBox, VBox, Layout
from bs4 import BeautifulSoup
%matplotlib inline
from sseclient import SSEClient as EventSource
from ipywidgets import Button, HBox, VBox, Layout
from functools import lru_cache
import requests
from streamsx.topology.topology import *
import streamsx.rest as rest
from streamsx.topology import context
import os
###Output
_____no_output_____
###Markdown
Support functions for Jupyter
###Code
def catchInterrupt(func):
"""decorator : when interupt occurs the display is lost if you don't catch it
TODO * <view>.stop_data_fetch() # stop
"""
def catch_interrupt(*args, **kwargs):
try:
func(*args, **kwargs)
except (KeyboardInterrupt): pass
return catch_interrupt
#
# Support for locating/rendering views.
def display_view_stop(eventView, period=2):
"""Wrapper for streamsx.rest_primitives.View.display() to have button. """
button = widgets.Button(description="Stop Updating")
display(button)
eventView.display(period=period)
def on_button_clicked(b):
eventView.stop_data_fetch()
b.description = "Stopped"
button.on_click(on_button_clicked)
def view_events(views):
"""
Build interface to display a list of views and
display view when selected from list.
"""
view_names = [view.name for view in views]
nameView = dict(zip(view_names, views))
select = widgets.RadioButtons(
options = view_names,
value = None,
description = 'Select view to display',
disabled = False
)
def on_change(b):
if (b['name'] == 'label'):
clear_output(wait=True)
[view.stop_data_fetch() for view in views ]
display(select)
display_view_stop(nameView[b['new']], period=2)
select.observe(on_change)
display(select)
def find_job(instance, job_name=None):
"""locate job within instance"""
for job in instance.get_jobs():
if job.applicationName.split("::")[-1] == job_name:
return job
else:
return None
def display_views(instance, job_name):
"Locate/promote and display all views of a job"
job = find_job(instance, job_name=job_name)
if job is None:
print("Failed to locate job")
else:
views = job.get_views()
view_events(views)
def list_jobs(_instance=None, cancel=False):
"""
Interactive selection of jobs to cancel.
Prompts with SelectMultiple widget, if thier are no jobs, your presente with a blank list.
"""
active_jobs = { "{}:{}".format(job.name, job.health):job for job in _instance.get_jobs()}
selectMultiple_jobs = widgets.SelectMultiple(
options=active_jobs.keys(),
value=[],
rows=len(active_jobs),
description = "Cancel jobs(s)" if cancel else "Active job(s):",
layout=Layout(width='60%')
)
cancel_jobs = widgets.ToggleButton(
value=False,
description='Cancel',
disabled=False,
button_style='warning', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Delete selected jobs',
icon="stop"
)
def on_value_change(change):
for job in selectMultiple_jobs.value:
print("canceling job:", job, active_jobs[job].cancel())
cancel_jobs.disabled = True
selectMultiple_jobs.disabled = True
cancel_jobs.observe(on_value_change, names='value')
if cancel:
return HBox([selectMultiple_jobs, cancel_jobs])
else:
return HBox([selectMultiple_jobs])
###Output
_____no_output_____
###Markdown
Connect to the server : ICP4D or Cloud instance. Attempt to import if fails the cfg will not be defined we know were using Cloud.
###Code
def get_instance():
"""Setup to access your Streams instance.
..note::The notebook is work within Cloud and ICP4D.
Refer to the 'Setup' cells above.
Returns:
instance : Access to Streams instance, used for submitting and rendering views.
"""
try:
from icpd_core import icpd_util
import urllib3
global cfg
cfg[context.ConfigParams.SSL_VERIFY] = False
instance = rest.Instance.of_service(cfg)
print("Within ICP4D")
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError:
cfg = None
print("Outside ICP4D")
import credential
sc = rest.StreamingAnalyticsConnection(service_name='Streaming3Turbine',
vcap_services=credential.vcap_conf)
instance = sc.get_instances()[0]
return instance,cfg
instance,cfg = get_instance()
###Output
_____no_output_____
###Markdown
List jobs and cancel....This page will submit a job named 'WikiPhase3'. If it's running you'll want to cancel it before submitting a new version. If it is running, no need to cancel/submit you can just procede to the [Viewing data section](viewingData).
###Code
list_jobs(instance)
###Output
_____no_output_____
###Markdown
Support functions that are executed within StreamsDetails of these functions can be found in previous notebooks of this suite.
###Code
def get_events():
"""fetch recent changes from wikievents site using SSE"""
for change in EventSource('https://stream.wikimedia.org/v2/stream/recentchange'):
if len(change.data):
try:
obj = json.loads(change.data)
except json.JSONDecodeError as err:
print("JSON l1 error:", err, "Invalid JSON:", change.data)
except json.decoder.JSONDecodeError as err:
print("JSON l2 error:", err, "Invalid JSON:", change.data)
else:
yield(obj)
class sum_aggregation():
def __init__(self, sum_map={'new_len':'newSum','old_len':'oldSum','delta_len':'deltaSum' }):
"""
Summation of column(s) over a window's tuples.
Args::
sum_map : specfify tuple columns to be summed and the result field.
tuples : at run time, list of tuples will flow in. Sum each fields
"""
self.sum_map = sum_map
def __call__(self, tuples)->dict:
"""
Args:
tuples : list of tuples constituting a window, over all the tuples sum using the sum_map key/value
to specify the input and result field.
Returns:
dictionary of fields summations over tuples
"""
summaries = dict()
for summary_field,result_field in self.sum_map.items():
summation = sum([ele[summary_field] for ele in tuples])
summaries.update({result_field : summation})
return(summaries)
import collections
class tally_fields(object):
def __init__(self, top_count=3, fields=['user', 'wiki', 'title']):
"""
Tally fields of a list of tuples.
Args::
fields : fields of tuples that are to be tallied
"""
self.fields = fields
self.top_count = top_count
def __call__(self, tuples)->dict:
"""
Args::
tuples : list of tuples tallying to perform.
return::
dict of tallies
"""
tallies = dict()
for field in self.fields:
stage = [tuple[field] for tuple in tuples if tuple[field] is not None]
tallies[field] = collections.Counter(stage).most_common(self.top_count)
return tallies
import csv
class wiki_lang():
"""
Augment the tuple to include language wiki event.
Mapping is loaded at build time and utilized at runtime.
"""
def __init__(self, fname="wikimap.csv"):
self.wiki_map = dict()
with open(fname, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
self.wiki_map[row['dbname']] = row
def __call__(self, tuple):
"""using 'wiki' field to look pages code, langauge and native
Args:
tuple: tuple (dict) with a 'wiki' fields
Returns:'
input tuple with 'code', 'language, 'native' fields added to the input tuple.
"""
if tuple['wiki'] in self.wiki_map:
key = tuple['wiki']
tuple['code'] = self.wiki_map[key]['code']
tuple['language'] = self.wiki_map[key]['in_english']
tuple['native'] = self.wiki_map[key]['name_language']
else:
tuple['code'] = tuple['language'] = tuple['native'] = None
return tuple
###Output
_____no_output_____
###Markdown
Shredding web pagesThe next phase of the Stream will be to check if the event is associated with an image, if it is extract the image URL. - find possible link to image- build url and use to fetch page, shred, searching for an image link- shredding can go down mulitple levels.
###Code
# moved to scripts directory, issues with having function definine in notebook.
import sys
if '../scripts' not in sys.path:
sys.path.insert(0, '../scripts')
from streams_operations import soup_image_extract
###Output
_____no_output_____
###Markdown
Compose, build and submit the Streams application.The following Code cell composed the Streams application depicted here:![stillPhase3.jpg](images/stillPhase3.jpg) This is notebook is an extention of the previous, I'll only discuss processing beyond 'langAugment' for details regarding prior processing refer to previous [notebook](./imgAna_2.ipynb)s.The events output by the map named 'langAugment' are limited to those with of type 'edit' and bot is 'False'. The fields are: code, delta_len, language, native, new_len, old_len, timestamp,title, user and wiki. This phase uses the 'title' field to build a url of a webpage, the webpage is feched and processed looking for a image URL. The map method named 'imageSoup' invokes soup_image_extract() where it uses the 'title' field attempting to locate an image. If no image is found, None is returned and nothing flows out of the operator. If an image is found then the output includes a 'img_desc' field. A filter is applied to the 'img_desc' for content, if it does have content the tuple procedes to the view 'soupActive' where it can be viewed.
###Code
list_jobs(instance, cancel=True)
def WikiPhase3(jobName=None, wiki_lang_fname=None):
"""
Compose topology.
-- wiki_lang : csv file mapping database name to langauge
"""
topo = Topology(name=jobName)
### make sure we sseclient in Streams environment.
topo.add_pip_package('SSEClient===0.0.22')
topo.add_pip_package('bs4')
## wiki events
wiki_events = topo.source(get_events, name="wikiEvents")
## select events generated by humans
human_filter = wiki_events.filter(lambda x: x['type']=='edit' and x['bot'] is False, name='humanFilter')
# pare down the humans set of columns
pared_human= human_filter.map(lambda x : {'timestamp':x['timestamp'],
'new_len':x['length']['new'],
'old_len':x['length']['old'],
'delta_len':x['length']['new'] - x['length']['old'],
'wiki':x['wiki'],'user':x['user'],
'title':x['title']},
name="paredHuman")
pared_human.view(buffer_time=1.0, sample_size=200, name="paredEdits", description="Edits done by humans")
## Define window(count)& aggregate
sum_win = pared_human.last(100).trigger(20)
sum_aggregate = sum_win.aggregate(sum_aggregation(sum_map={'new_len':'newSum','old_len':'oldSum','delta_len':'deltaSum' }), name="sumAggregate")
sum_aggregate.view(buffer_time=1.0, sample_size=200, name="aggEdits", description="Aggregations of human edits")
## Define window(count) & tally edits
tally_win = pared_human.last(100).trigger(10)
tally_top = tally_win.aggregate(tally_fields(fields=['user', 'title'], top_count=10), name="talliesTop")
tally_top.view(buffer_time=1.0, sample_size=200, name="talliesCount", description="Top count tallies: user,titles")
## augment filterd/pared edits with language
if cfg is None:
lang_augment = pared_human.map(wiki_lang(fname='../datasets/wikimap.csv'), name="langAugment")
else:
lang_augment = pared_human.map(wiki_lang(fname=os.environ['DSX_PROJECT_DIR']+'/datasets/wikimap.csv'), name="langAugment")
lang_augment.view(buffer_time=1.0, sample_size=200, name="langAugment", description="Language derived from wiki")
## Define window(time) & tally language
time_lang_win = lang_augment.last(datetime.timedelta(minutes=2)).trigger(5)
time_lang = time_lang_win.aggregate(tally_fields(fields=['language'], top_count=10), name="timeLang")
time_lang.view(buffer_time=1.0, sample_size=200, name="talliesTime", description="Top timed tallies: language")
## attempt to extract image using beautifulsoup add img_desc[{}] field
soup_image = lang_augment.map(soup_image_extract(field_name="title", url_base="https://www.wikidata.org/wiki/"),name="imgSoup")
soup_active = soup_image.filter(lambda x: x['img_desc'] is not None and len(x['img_desc']) > 0, name="soupActive")
soup_active.view(buffer_time=1.0, sample_size=200, name="soupActive", description="Image extracted via Bsoup")
soup_active.publish(topic="soup_active")
return ({"topo":topo,"view":{ }})
###Output
_____no_output_____
###Markdown
Submitting job : ICP or Cloud
###Code
import os
# os.environ["JAVA_HOME"]="/Library/Java/JavaVirtualMachines/jdk-13.0.1.jdk/Contents/Home"
os.environ["JAVA_HOME"]
# set in .bashrc and .zshrc
resp = WikiPhase3(jobName="WikiPhase3")
if cfg is not None:
# Disable SSL certificate verification if necessary
cfg[context.ConfigParams.SSL_VERIFY] = False
submission_result = context.submit("DISTRIBUTED",resp['topo'], config=cfg)
if cfg is None:
import credential
cloud = {
context.ConfigParams.VCAP_SERVICES: credential.vcap_conf,
context.ConfigParams.SERVICE_NAME: "Streaming3Turbine",
context.ContextTypes.STREAMING_ANALYTICS_SERVICE:"STREAMING_ANALYTIC",
context.ConfigParams.FORCE_REMOTE_BUILD: True,
}
submission_result = context.submit("STREAMING_ANALYTICS_SERVICE",resp['topo'],config=cloud)
# The submission_result object contains information about the running application, or job
if submission_result.job:
print("JobId: ", submission_result['id'] , "Name: ", submission_result['name'])
###Output
_____no_output_____
###Markdown
Viewing data The running application has number of views to see what what data is moving through the stream. The following cell will fetch the views' queue and display it's data when selected. |view name | description of data is the view | bot ||---------|-------------|------||aggEdits | summarised fields | False ||langAugment | mapped augmented fields | False ||paredEdits | seleted fields | False ||talliesCount | last 100 messages tallied | False | |talliesTimes | 2 minute windowed | False ||soupActive | extracted images links| False | You want to stop the the fetching the view data when done. Acces Views / Render Views UI
###Code
# Render the views.....
display_views(instance, job_name="WikiPhase3")
###Output
_____no_output_____
###Markdown
Render image submitted to wiki feed Build dashboard to display images are being submitted to Wikipedia. It's not uncommon to see the same image multiple times. An image (any content) may need to be vetted for quailty, copyright, pornograpy etc... Each vet stage generating another event on the StreamA variety of images are submitted, unfortunaly not all images are rendered in all browsers. I found that the Safari browser and render .tif files.
###Code
# Notebook support
def render_image(image_url=None, output_region=None):
"""Write the image into a output region.
Args::
url: image
output_region: output region
.. note:: The creation of the output 'stage', if this is not done the image is rendered in the page and
the output region.
"""
try:
response = requests.get(image_url)
stage = widgets.Output(layout={'border': '1px solid green'})
except:
print("Error on request : ", image_url)
else:
if response.status_code == 200:
with output_region:
stage.append_display_data(widgets.Image(
value=response.content,
#format='jpg',
width=300,
height=400,
))
output_region.clear_output(wait=True)
ana_stage = list()
def display_image(tup, image_region=None, title_region=None, url_region=None):
if tup['img_desc'] is not None and len(tup['img_desc']) > 0:
display_desc = tup['img_desc'][0]
ana_stage.append(display_desc)
title_region.value = "Img Title:{}".format(display_desc['title'] )
url_region.value = "{}".format(display_desc['img'])
render_image(image_url=display_desc['img'], output_region=image_region)
###Output
_____no_output_____
###Markdown
Show me now
###Code
## Setup the Dashboard - display images sent to Wikipedia
## Next cell populates the 'Dashboard'.....
status_widget = widgets.Label(value="Status", layout={'border': '1px solid green','width':'30%'})
url_widget = widgets.Label(value="Img URL", layout={'border': '1px solid green','width':'100%'})
image_widget = widgets.Output(layout={'border': '1px solid red','width':'30%','height':'270pt'})
title_widget = widgets.Label(value="Title", layout={'border': '1px solid green','width':'30%'})
dashboard = widgets.VBox([status_widget, image_widget, title_widget, url_widget])
display(dashboard)
# Notebook support
# setup
_view = instance.get_views(name="soupActive")[0]
_view.start_data_fetch()
@catchInterrupt
def server_soup(count=25):
"""Fetch and display images from view.
Args::
count: number of iterations to fetch images, count<0
is infinite
"""
while count != 0:
count -= 1
view_tuples = _view.fetch_tuples(max_tuples=100, timeout=2)
for soup_tuple in view_tuples:
status_widget.value = soup_tuple['title']
display_image(soup_tuple, image_region=image_widget, title_region=title_widget, url_region=url_widget)
server_soup()
###Output
_____no_output_____
###Markdown
Cancel jobs when your done
###Code
list_jobs(instance, cancel=True)
###Output
_____no_output_____ |
Cloud_testing/CCMP_TC_patterns.ipynb | ###Markdown
where are the strong storms occuring? is there any shift in latitudinal extent?
###Code
d,x=[],[]
for iwnd in range(20,36,5)
high_val = ds.wspd.where(ds.wspd>25)
high_cnt = high_val/high_val
high_lat = high_cnt.sum({'longitude'})
high_lat_computed = high_lat.compute()
d.append(high_lat_computed)
x.append(iwnd)
mn_yr = xr.concat(m, dim='year')
mn_yr['year']=
glb_mn = np.mean(mn_yr)
high_lat.transpose().plot(cmap='YlOrBr',vmin=0,vmax=10)
plt.savefig('../../figures/num_storms.png')
# maybe era5 reanalysis doesn't see strong wind events, try something esle?
from intake import open_catalog
cat = open_catalog("https://raw.githubusercontent.com/pangeo-data/pangeo-datastore/master/intake-catalogs/master.yaml")
list(cat['atmosphere'])
from intake import open_catalog
cat = open_catalog("https://raw.githubusercontent.com/pangeo-data/pangeo-datastore/master/intake-catalogs/atmosphere.yaml")
ds = cat["era5_hourly_reanalysis_single_levels_sa"].to_dask()
ds
ds.u10[0,:,:].plot()
#ts = ds.sel(lon=-122.66,lat=38.45,method='nearest')
ts = ds.sel(longitude=-237.34,latitude=38.45,method='nearest')
dy = ts.resample(time='1D').mean().load()
###Output
_____no_output_____ |
Assignment 3/iNeuron_Python_Assignment_3.ipynb | ###Markdown
1.1 Write a Python Program to implement your own myreduce() function which works exactly like Python's built-in function reduce() Multiply all the numbers in a list
###Code
#programj to multiply all numbers in a list
num=int(input("Please insert the initiation number: "))
numlist=list(range(1,num+1))
def multiplynums(numlist):
product=1
for i in numlist:
product=product*i
return product
product_nums_list=multiplynums(numlist)
print("The list of numbers is", numlist)
print("The product is", product_nums_list)
###Output
Please insert the initiation number: 88
The list of numbers is [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88]
The product is 185482642257398439114796845645546284380220968949399346684421580986889562184028199319100141244804501828416633516851200000000000000000000
###Markdown
1.2 Write a Python program to implement your own myfilter() function which works exactly like Python's built-in function filter() Filter the even and odd number from list
###Code
mylist=[2,4,6,8,11,14,1,5,17,18,24,567,5677,88534,23,45]
evenlist=[]
oddlist=[]
for i in mylist:
if i%2==0:
evenlist.append(i)
else:
oddlist.append(i)
print(f"Even list: {evenlist}")
print(f"Odd list: {oddlist}")
#Filter the even and odd number from list with a custom filter function
num=int(input("Please insert the initiation number: "))
numlist=list(range(1,num+1))
def myfilter(numlist):
evenlist=[]
oddlist=[]
for i in numlist:
if i%2==0:
evenlist.append(i)
else:
oddlist.append(i)
return evenlist, oddlist
lists=myfilter(numlist)
print("List of numbers:",numlist)
print("List of Even numbers:",lists[0])
print("List of Odd numbers:",lists[1])
###Output
Please insert the initiation number: 88
List of numbers: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88]
List of Even numbers: [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88]
List of Odd numbers: [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87]
###Markdown
Implement List comprehensions to produce the following lists.Write List comprehensions to produce the following Lists ['x', 'xx', 'xxx', 'xxxx', 'y', 'yy', 'yyy', 'yyyy', 'z', 'zz', 'zzz', 'zzzz']['x', 'y', 'z', 'xx', 'yy', 'zz', 'xxx', 'yyy', 'zzz', 'xxxx', 'yyyy', 'zzzz'][[2], [3], [4], [3], [4], [5], [4], [5], [6]][[2, 3, 4, 5], [3, 4, 5, 6],[4, 5, 6, 7], [5, 6, 7, 8]][(1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2), (1, 3), (2, 3), (3, 3)]
###Code
list1=['x', 'y', 'z']
modlist1=[i*k for i in list1 for k in range(1,5)]
modlist1
list2=['x', 'y', 'z']
modlist2=[i*k for k in range(1,5) for i in list1]
modlist2
mylist3=[[x+y] for x in range(2,5) for y in range(3)]
mylist3
mylist4=[[i, i+1, i+2, i+3] for i in range(2,6)]
mylist4
mylist5=[(j,i) for i in [1,2,3] for j in [1,2,3]]
mylist5
###Output
_____no_output_____ |
types_of_features_and_image_segmentation/Contour detection and features.ipynb | ###Markdown
Finding Contours Import resources and display image
###Code
import numpy as np
import matplotlib.pyplot as plt
import cv2
%matplotlib inline
# Read in the image
image = cv2.imread('images/thumbs_up_down.jpg')
# Change color to RGB (from BGR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
###Output
_____no_output_____
###Markdown
Produce a binary image for finding contours
###Code
# Convert to grayscale
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
# Create a binary thresholded image
retval, binary = cv2.threshold(gray, 225, 255, cv2.THRESH_BINARY_INV)
plt.imshow(binary, cmap='gray')
###Output
_____no_output_____
###Markdown
Find and draw the contours
###Code
# Find contours from thresholded, binary image
retval, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Draw all contours on a copy of the original image
contours_image = np.copy(image)
contours_image = cv2.drawContours(contours_image, contours, -1, (0,255,0), 3)
plt.imshow(contours_image)
###Output
_____no_output_____
###Markdown
Contour FeaturesEvery contour has a number of features that you can calculate, including the area of the contour, it's orientation (the direction that most of the contour is pointing in), it's perimeter, and many other properties outlined in [OpenCV documentation, here](http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_contours/py_contour_properties/py_contour_properties.html).In the next cell, you'll be asked to identify the orientations of both the left and right hand contours. The orientation should give you an idea of which hand has its thumb up and which one has its thumb down! OrientationThe orientation of an object is the angle at which an object is directed. To find the angle of a contour, you should first find an ellipse that fits the contour and then extract the `angle` from that shape. ```python Fit an ellipse to a contour and extract the angle from that ellipse(x,y), (MA,ma), angle = cv2.fitEllipse(selected_contour)```**Orientation values**These orientation values are in degrees measured from the x-axis. A value of zero means a flat line, and a value of 90 means that a contour is pointing straight up!So, the orientation angles that you calculated for each contour should be able to tell us something about the general position of the hand. The hand with it's thumb up, should have a higher (closer to 90 degrees) orientation than the hand with it's thumb down. TODO: Find the orientation of each contour
###Code
## TODO: Complete this function so that
## it returns the orientations of a list of contours
## The list should be in the same order as the contours
## i.e. the first angle should be the orientation of the first contour
def orientations(contours):
"""
Orientation
:param contours: a list of contours
:return: angles, the orientations of the contours
"""
# Create an empty list to store the angles in
# Tip: Use angles.append(value) to add values to this list
angles = []
for contour in contours:
(x,y), (MA,ma), angle = cv2.fitEllipse(contour)
angles.append(angle)
return angles
# ---------------------------------------------------------- #
# Print out the orientation values
angles = orientations(contours)
print('Angles of each contour (in degrees): ' + str(angles))
###Output
Angles of each contour (in degrees): [61.35833740234375, 82.27550506591797]
###Markdown
Bounding RectangleIn the next cell, you'll be asked to find the bounding rectangle around the *left* hand contour, which has its thumb up, then use that bounding rectangle to crop the image and better focus on that one hand!```python Find the bounding rectangle of a selected contourx,y,w,h = cv2.boundingRect(selected_contour) Draw the bounding rectangle as a purple boxbox_image = cv2.rectangle(contours_image, (x,y), (x+w,y+h), (200,0,200),2)```And to crop the image, select the correct width and height of the image to include.```python Crop using the dimensions of the bounding rectangle (x, y, w, h)cropped_image = image[y: y + h, x: x + w] ``` TODO: Crop the image around a contour
###Code
## TODO: Complete this function so that
## it returns a new, cropped version of the original image
def left_hand_crop(image, selected_contour):
"""
Left hand crop
:param image: the original image
:param selectec_contour: the contour that will be used for cropping
:return: cropped_image, the cropped image around the left hand
"""
## TODO: Detect the bounding rectangle of the left hand contour
x, y, w, h = cv2.boundingRect(selected_contour)
## TODO: Crop the image using the dimensions of the bounding rectangle
# Make a copy of the image to crop
cropped_image = np.copy(image)
cropped_image = cropped_image[y: y+h, x: x+w]
return cropped_image
## TODO: Select the left hand contour from the list
## Replace this value
selected_contour = contours[1]
# ---------------------------------------------------------- #
# If you've selected a contour
if(selected_contour is not None):
# Call the crop function with that contour passed in as a parameter
cropped_image = left_hand_crop(image, selected_contour)
plt.imshow(cropped_image)
###Output
_____no_output_____ |
HW1/assignment1.ipynb | ###Markdown
CS217 : Assignment 1 ---Please edit the cell below to include your name and student ID **name: Shi Zeng**SID: 45167563 Jupyter Notebook Tutorials We will make extensive use of Python's numerical arrays (NumPy) and interactive plotting (Matplotlib) in Jupyter notebooks for the course assignments. This first assignment is intended as a gentle warm up in case you haven't used these tools before. Start by reading through the following tutorials:https://jupyter-notebook.readthedocs.io/en/stable/notebook.htmlstarting-the-notebook-serverhttps://nbviewer.jupyter.org/github/jupyter/notebook/blob/master/docs/source/examples/Notebook/Notebook%20Basics.ipynbhttps://nbviewer.jupyter.org/github/jupyter/notebook/blob/master/docs/source/examples/Notebook/Running%20Code.ipynbThis page gives a good introduction to NumPy and many examples of using NumPy along with Matplotlib:http://www.scipy-lectures.org/intro/numpy/numpy.htmlYou should also get comfortable with searching through the documentation as neededhttps://docs.scipy.org/doc/numpy-1.13.0/reference/index.htmlhttps://matplotlib.org/api/_as_gen/matplotlib.pyplot.html 1. Thin lense approximationDerive the thin lens equation we discussed in class in a geometric way using only the constraints that (1) rays entering parallel to the lense converge on the focal point (2) the lens is symmetric as shown in the diagram below. Prove that $\frac{1}{D} + \frac{1}{D'} = \frac{1}{f}$. You will likely want to draw your own diagram and introduce some additional variables in order to make your argument. Please add additional images and equations to the cell below as needed to support your argument. Answer ![Len_1.png](attachment:Len_1.png) The above pair of similar triangles shows that:$\frac{y}{y^{'}} = \frac{D}{D^{'}}$ ![Len_2.png](attachment:Len_2.png) The above pair of similar triangles shows that:$\frac{S}{y^{'}} = \frac{f}{T}$As $T = D^{'} - f$, and $S = y$ from the same rectangle:$\frac{y}{y^{'}} = \frac{f}{D^{'} - f}$Therefore:$ \frac{D}{D^{'}} = \frac{f}{D^{'} - f}$$D(D^{'} - f) = D^{'}f$Divide $DD^{'}$ at both sides:$1 - \frac{f}{D^{'}} = \frac{f}{D}$$\frac{1}{f} - \frac{1}{D^{'}} = \frac{1}{D}$$\frac{1}{f} = \frac{1}{D^{'}} + \frac{1}{D}$ 2. Cameras & Triangulation *** 2.1 Field of View Suppose your camera has an image sensor that is 640x480 pixels, a physical resolution of 10pixels/mm and a focal length of f=50mm. What is the horizontal field of view (in degrees)? What is the vertical field of view? Suppose you adjust the zoom on the camera, changing the focal length to 100mm. What is the new horizontal field of view? Answer:The vertical length of image sensor is $$\frac{480}{10} = 48(mm)$$The tangent of half of the field of view is $$\frac{48}{2} \frac{1}{50} = \frac{12}{25}$$The vertical field of view is $2 arctan(\frac{12}{25})$The horizontal length of image sensor is $$\frac{640}{10} = 64(mm)$$The tangent of half of the field of view is $$\frac{64}{2} \frac{1}{50} = \frac{16}{25}$$The horizontal field of view is $2 arctan(\frac{16}{25})$Changing the focal length to $100mm$The new horizontal field of view is $2 arctan(\frac{8}{25})$
###Code
import numpy as np
print("The vertical field of view is %.6f degrees." % (2 * np.arctan(12. / 25.) * 180 / np.pi))
print("The horizontal field of view is %.6f degrees." % (2 * np.arctan(16. / 25.) * 180 / np.pi))
print("The new horizontal field of view is %.6f degrees." % (2 * np.arctan(8. / 25.) * 180 / np.pi))
###Output
The vertical field of view is 51.282012 degrees.
The horizontal field of view is 65.238486 degrees.
The new horizontal field of view is 35.489343 degrees.
###Markdown
*** 2.2 Camera motionsYour camera starts out at the origin of the world coordinate system. You rotate it to the left about the y-axis by 45 degrees and then translate it right by 1 meter. Describe this motion of the camera concisely in terms of rotation matrices and translation vectors. Suppose there is a point with coordinates (1,1,1) meters in the world coordinate system. What will its coordinates be relative to the standard camera coordinate system after the camera has been moved? Answer: ![coord.png](attachment:coord.png) Rotate the camera to the left about the y-axis by 45 degrees, the points rotate right about the y-axis by 45 degrees to the camera coordinate system. Therefore, the rotation matrix is :$$R = \left[\begin{array}{ccc} cos(\frac{\pi}{4}) & 0 & sin(\frac{\pi}{4})\\0 & 1 & 0\\-sin(\frac{\pi}{4}) & 0 & cos(\frac{\pi}{4})\end{array}\right] = \left[\begin{array}{ccc}\frac{\sqrt{2}}{2} & 0 & \frac{\sqrt{2}}{2}\\0 & 1 & 0\\-\frac{\sqrt{2}}{2} & 0 & \frac{\sqrt{2}}{2}\end{array}\right]$$Here assume the "translation to the right by 1 meter" is __examined under the world coordinates__.The translation vector is $$t = \left[\begin{array}{ccc} cos(\frac{\pi}{4}) & 0 & sin(\frac{\pi}{4})\\0 & 1 & 0\\-sin(\frac{\pi}{4}) & 0 & cos(\frac{\pi}{4})\end{array}\right]\left[\begin{array}{c} 0\\0\\1\end{array}\right] = \left[\begin{array}{c} \frac{\sqrt{2}}{2}\\0\\\frac{\sqrt{2}}{2}\end{array}\right]$$The camera coordinate is $$p = RP + Rt = R(P + t)$$When $$P = \left[\begin{array}{c} 1\\1\\1\end{array}\right]$$The camera coordinate $$p = \left[\begin{array}{ccc} cos(\frac{\pi}{4}) & 0 & sin(\frac{\pi}{4})\\0 & 1 & 0\\-sin(\frac{\pi}{4}) & 0 & cos(\frac{\pi}{4})\end{array}\right] \left[\begin{array}{c} 1\\1\\1\end{array}\right] + \left[\begin{array}{c} \frac{\sqrt{2}}{2}\\0\\\frac{\sqrt{2}}{2}\end{array}\right]$$$$p = \left[\begin{array}{c} \frac{3\sqrt{2}}{2}\\1\\\frac{\sqrt{2}}{2}\end{array}\right]$$ 2.3 Estimate the extrinsic parameters for your eyesWork relative to a world coordinate system centered on the bridge of your nose with the z-axis pointed out of your face and x-axis to the right. Since eyes rotate in their sockets depending on how far away you focus (this phenomenon is called vergence) assume you are looking at a computer monitor that is 40 cm away from your face. You may find it interesting to find some red/blue glasses and modify the projection code to render anaglyphs of various 3D shapes overlaying the left and right eye views in red and blue. The following parameters should be passed into the project function mapping 3D point on computer monitor to perceived image of eyes.The distance between two eyes, $2b$: the translation matrix for left eye will be $[-b, 0, 0]$, the translation matrix for right eye will be $[b, 0, 0]$.The distance between face and the actual object, $z$ (given by 40cm): the rotated angle of right camera will be $-\theta = arctan(\frac{z}{b}) - \frac{\pi}{2}$, the rotated angle of left camera will be $\theta = \frac{\pi}{2} - arctan(\frac{z}{b})$.the right rotation matrix will be $$R_{right} = \left[\begin{array}{ccc}cos(\theta) & -sin(\theta) & 0 \\sin(\theta) & cos(\theta) & 0 \\0 & 0 & 1\end{array}\right]$$the left rotation matrix will be $$R_{left} = \left[\begin{array}{ccc}cos(\theta) & sin(\theta) & 0 \\-sin(\theta) & cos(\theta) & 0 \\0 & 0 & 1\end{array}\right]$$The focal length of both eyes $fL, fR$.The difference in height of both eyes to computer monitor focus point, so that the eye offset to world coordinate system can bu computed. 2.4 TriangulationWrite a function called **triangulate** that takes the coordinates of points in two images along with the camera parameters and returns the 3D coordinates of the points in world coordinates. We discussed a couple different approaches to trianguation in class. You are welcome to use whichever you prefer. The provided functions **project** and **test_triangulation** outline a reasonable way to represent the camera parameters and should be useful in experimenting with your code.
###Code
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
class Camera:
"""
A simple data structure describing camera parameters
The parameters describing the camera
cam.f : float --- camera focal length (in units of pixels)
cam.c : 2x1 vector --- offset of principle point
cam.R : 3x3 matrix --- camera rotation
cam.t : 3x1 vector --- camera translation
"""
def __init__(self,f,c,R,t):
self.f = f
self.c = c
self.R = R
self.t = t
def project(self,pts3):
"""
Project the given 3D points in world coordinates into the specified camera
Parameters
----------
pts : 2D numpy.array (dtype=float)
Coordinates of N points stored in a array of shape (3,N)
Returns
-------
ipts : 2D numpy.array (dtype=float)
Image coordinates of N points stored in a array of shape (N,2)
"""
assert(pts3.shape[0]==3)
# get point location relative to camera
pcam = self.R.transpose() @ (pts3 - self.t)
# project
p = self.f * (pcam / pcam[2,:])
# offset principal point
pts2 = p[0:2,:] + self.c
assert(pts2.shape[1]==pts3.shape[1])
assert(pts2.shape[0]==2)
return pts2
def generate_hemisphere(radius,center,npts):
"""
Generate a set of 3D points which are randomly distributed on the
surface of a hemisphere
Parameters
----------
radius : float
Hemisphere radius
center : numpy.array (dtype=float)
3x1 vector specifying the center of the hemisphere
npts : int
number of points to generate
Returns
-------
x : 2D numpy.array (dtype=float)
3xnpts array containing coordinates of the points
"""
assert(center.shape==(3,1))
#generate randomly distributed points
x = np.random.standard_normal((3,npts))
#scale points to the surface of a sphere with given radius
nx = np.sqrt(np.sum(x*x,axis=0))
x = radius * x / nx
# make points with positive z-coordinates negative
# so that points are all on a half-sphere
x[2,:] = -np.abs(x[2,:])
# translate to desired position
x = x + center
return x
# demo the generate_hemisphere function
if False:
x = generate_hemisphere(1,np.array([[0,0,0],]).T,2000)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(x[0,:],x[1,:],x[2,:],'.')
ax.axis('square')
ax.set_zlim(-1,1)
def visualize(a, b):
fig = plt.figure()
ax = fig.add_subplot(1,1,1,projection='3d')
ax.plot(a[:, 0], a[:, 1], a[:, 2],'ro',fillstyle='none')
ax.plot(b[:, 0], b[:, 1], b[:, 2],'ro',fillstyle='none')
plt.axis('equal')
plt.xlabel('x')
plt.ylabel('y')
def triangulate(pts2L,camL,pts2R,camR):
"""
Triangulate the set of points seen at location pts2L / pts2R in the
corresponding pair of cameras. Return the 3D coordinates relative
to this global coordinate system
Parameters
----------
pts2L : 2D numpy.array (dtype=float)
Coordinates of N points stored in a array of shape (2, N) seen from camL camera
pts2R : 2D numpy.array (dtype=float)
Coordinates of N points stored in a array of shape (2, N) seen from camR camera
camL : Camera
The first "left" camera view
camR : Camera
The second "right" camera view
Returns
-------
pts3 : 2D numpy.array (dtype=float)
array containing 3D coordinates of the points in global coordinates
"""
N = pts2L.shape[1]
invRL = np.linalg.inv(camL.R)
R = invRL.dot(camR.R)
t = invRL.dot(camR.t - camL.t)
pL = np.concatenate((pts2L - camL.c, np.full((1, N), camL.f)), axis=0) / camL.f
pR = np.concatenate((pts2R - camR.c, np.full((1, N), camR.f)), axis=0) / camR.f
camera = np.concatenate((pL[:, np.newaxis, :], (-R.dot(pR))[:, np.newaxis, :]), axis=1)
for i in range(N):
z = np.linalg.lstsq(camera[:, :, i], t)[0]
pL[:, i] = z[0] * pL[:, i]
pR[:, i] = z[1] * pR[:, i]
PL = camL.R.dot(pL) + camL.t
PR = camR.R.dot(pR) + camR.t
pts = (PL + PR) / 2
return pts
%matplotlib notebook
#
# test your camera and triangulate function
#
# create a rotation matrix representing rotation around y-axis by amount theta
def roty(theta):
st = np.sin(theta)
ct = np.cos(theta)
R = np.array([[ct,0,st],[0,1,0],[-st,0,ct]])
return R
#compute rotation angle so that the camera is looking directly at the sphere
b = 5 #baseline between cameras
d = 10 #distance to object
theta = np.arctan(b/d)
tL = np.array([[-b,0,0]]).T
tR = np.array([[b,0,0]]).T
camL = Camera(f=100,c=np.array([[50,50]]).T,t=tL,R=roty(theta))
camR = Camera(f=100,c=np.array([[50,50]]).T,t=tR,R=roty(-theta))
#generate 3D points
pts3 = generate_hemisphere(2, np.array([[0,0,d]]).T,500)
#project into each camera
pts2L = camL.project(pts3)
pts2R = camR.project(pts3)
#triangulate to recover 3d position
pts3t = triangulate(pts2L, camL, pts2R, camR)
#
# visualize results
#
# generate coordinates of a line segment running from the center
# of the camera to 3 units in front of the camera
lookL = np.hstack((tL,tL+camL.R @ np.array([[0,0,3]]).T))
lookR = np.hstack((tR,tR+camR.R @ np.array([[0,0,3]]).T))
# visualize the left and right image overlaid
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
ax.plot(pts2L[0,:],pts2L[1,:],'b.')
ax.plot(pts2R[0,:],pts2R[1,:],'r.')
plt.axis('equal')
#visualize 3D layout of points, camera positions
# and the direction the camera is pointing
ax = fig.add_subplot(2,2,2,projection='3d')
ax.plot(pts3[0,:],pts3[1,:],pts3[2,:],'.')
ax.plot(tR[0],tR[1],tR[2],'ro')
ax.plot(tL[0],tL[1],tL[2],'bo')
ax.plot(lookL[0,:],lookL[1,:],lookL[2,:],'b')
ax.plot(lookR[0,:],lookR[1,:],lookR[2,:],'r')
plt.axis('equal')
plt.xlabel('x')
plt.ylabel('y')
# overhead view showing points, camera
# positions, and direction camera is pointed
ax = fig.add_subplot(2,2,3)
ax.plot(pts3[0,:],pts3[2,:],'.')
ax.plot(tL[0],tL[2],'bo')
ax.plot(lookL[0,:],lookL[2,:],'b')
ax.plot(tR[0],tR[2],'ro')
ax.plot(lookR[0,:],lookR[2,:],'r')
plt.axis('equal')
plt.grid()
plt.xlabel('x')
plt.ylabel('z')
# compare reconstruction
ax = fig.add_subplot(2,2,4,projection='3d')
ax.plot(pts3[0,:],pts3[1,:],pts3[2,:],'b.')
ax.plot(pts3t[0,:],pts3t[1,:],pts3t[2,:],'ro',fillstyle='none')
plt.axis('equal')
plt.xlabel('x')
plt.ylabel('y')
###Output
//anaconda/envs/py3/lib/python3.6/site-packages/ipykernel_launcher.py:47: FutureWarning: `rcond` parameter will change to the default of machine precision times ``max(M, N)`` where M and N are the input matrix dimensions.
To use the future default and silence this warning we advise to pass `rcond=None`, to keep using the old, explicitly pass `rcond=-1`.
###Markdown
2.5 How sensitive is the 3D reconstruction to errors in the 2D point locations? You can approach this question empirically by generating a pair of test images and then adding random Gaussian noise to the 2D point locations. Plot out the error in the 3D recovery as a function of the noise in the 2D point locations for several different levels of noise. You can measure error as the sum of squared distances between the true locations and the recovered noisy locations. Insert additional cells as needed below in order to carry out your experiments and describe your result.
###Code
%matplotlib inline
x = np.arange(0, 1, 0.1).tolist()
y = []
fig = plt.figure(figsize=(10,10))
M = 5
for mu in range(M):
for sigma in x:
error2D = np.random.normal(mu, sigma, (pts2L.shape))
pts2Le = pts2L + error2D
pts2Re = pts2R + error2D
pts3te = triangulate(pts2Le,camL,pts2Re,camR)
error3D = np.sum((pts3 - pts3te)**2)
y.append(error3D)
ax = fig.add_subplot(M,1,mu + 1)
plt.plot(x, y)
plt.xlabel('sigma')
plt.ylabel('error')
plt.title("mu = %.2f" % (mu))
y = []
fig.tight_layout()
###Output
//anaconda/envs/py3/lib/python3.6/site-packages/ipykernel_launcher.py:47: FutureWarning: `rcond` parameter will change to the default of machine precision times ``max(M, N)`` where M and N are the input matrix dimensions.
To use the future default and silence this warning we advise to pass `rcond=None`, to keep using the old, explicitly pass `rcond=-1`.
###Markdown
2.6 How sensitive is the 3D reconstruction procedure to errors in the camera parameters? Compute reconstructions of the hemisphere where you vary (a) the image center (+/-10 pixels), (b) the focal length (+/-10 pixels), (c) the translation vector (+/-100mm), and (d) the rotation angle (+/-5 degrees). Vary each parameter of one of the cameras passed into triangulate while keeping the other camera and the images (xL,xR) fixed. Report in a plot or table, the reconstruction error as a function of these different parameters. What parameter is the reconstruction result quality most dependent on? Discuss how you might frame this question in a more general way that doesn’t depend on the units used to measure the camera parameters. Insert additional cells as needed below in order to carry out your experiments and describe your result.
###Code
def cmpL(wrong_cam, pts3, results, pts2L, pts2R, camR):
wrong_pts3t = triangulate(pts2L, wrong_cam, pts2R, camR)
tmp = np.sum((pts3 - wrong_pts3t)**2)
print("Change Left: %.4f" % tmp)
results.append(tmp)
return results
def cmpR(wrong_cam, pts3, results, pts2L, pts2R, camL):
wrong_pts3t = triangulate(pts2L, camL, pts2R, wrong_cam)
tmp = np.sum((pts3 - wrong_pts3t)**2)
print("Change Right: %.4f" % tmp)
results.append(tmp)
return results
def cmpBoth(wrong_camL, wrong_camR, pts3, results, pts2L, pts2R):
wrong_pts3t = triangulate(pts2L, wrong_camL, pts2R, wrong_camR)
tmp = np.sum((pts3 - wrong_pts3t)**2)
print("Change Both: %.4f" % tmp)
results.append(tmp)
return results
resultL = []
resultR = []
resultB = []
#0
print(0)
wrong_camL = Camera(f=100, c=np.array([[60, 50]]).T, t=tL, R=roty(theta))
resultL = cmpL(wrong_camL, pts3, resultL, pts2L, pts2R, camR)
wrong_camR = Camera(f=100, c=np.array([[60, 50]]).T, t=tR, R=roty(-theta))
resultR = cmpR(wrong_camR, pts3, resultR, pts2L, pts2R, camL)
resultB = cmpBoth(wrong_camL, wrong_camR, pts3, resultB, pts2L, pts2R)
#1
print(1)
wrong_camL = Camera(f=100, c=np.array([[50, 60]]).T, t=tL, R=roty(theta))
resultL = cmpL(wrong_camL, pts3, resultL, pts2L, pts2R, camR)
wrong_camR = Camera(f=100, c=np.array([[50, 60]]).T, t=tR, R=roty(-theta))
resultR = cmpR(wrong_camR, pts3, resultR, pts2L, pts2R, camL)
resultB = cmpBoth(wrong_camL, wrong_camR, pts3, resultB, pts2L, pts2R)
#2
print(2)
wrong_camL = Camera(f=100, c=np.array([[40, 50]]).T, t=tL, R=roty(theta))
resultL = cmpL(wrong_camL, pts3, resultL, pts2L, pts2R, camR)
wrong_camR = Camera(f=100, c=np.array([[40, 50]]).T, t=tR, R=roty(-theta))
resultR = cmpR(wrong_camR, pts3, resultR, pts2L, pts2R, camL)
resultB = cmpBoth(wrong_camL, wrong_camR, pts3, resultB, pts2L, pts2R)
#3
print(3)
wrong_camL = Camera(f=100, c=np.array([[50, 40]]).T, t=tL, R=roty(theta))
resultL = cmpL(wrong_camL, pts3, resultL, pts2L, pts2R, camR)
wrong_camR = Camera(f=100, c=np.array([[50, 40]]).T, t=tR, R=roty(-theta))
resultR = cmpR(wrong_camR, pts3, resultR, pts2L, pts2R, camL)
resultB = cmpBoth(wrong_camL, wrong_camR, pts3, resultB, pts2L, pts2R)
#4
print(4)
wrong_camL = Camera(f=110, c=np.array([[50, 50]]).T, t=tL, R=roty(theta))
resultL = cmpL(wrong_camL, pts3, resultL, pts2L, pts2R, camR)
wrong_camR = Camera(f=110, c=np.array([[50, 50]]).T, t=tR, R=roty(-theta))
resultR = cmpR(wrong_camR, pts3, resultR, pts2L, pts2R, camL)
resultB = cmpBoth(wrong_camL, wrong_camR, pts3, resultB, pts2L, pts2R)
#5
print(5)
wrong_camL = Camera(f=90, c=np.array([[50, 50]]).T, t=tL, R=roty(theta))
resultL = cmpL(wrong_camL, pts3, resultL, pts2L, pts2R, camR)
wrong_camR = Camera(f=90, c=np.array([[50, 50]]).T, t=tR, R=roty(-theta))
resultR = cmpR(wrong_camR, pts3, resultR, pts2L, pts2R, camL)
resultB = cmpBoth(wrong_camL, wrong_camR, pts3, resultB, pts2L, pts2R)
#6
print(6)
wrong_camL = Camera(f=100, c=np.array([[50, 50]]).T, t=tL + np.array([[-100,0,0]]).T, R=roty(theta))
resultL = cmpL(wrong_camL, pts3, resultL, pts2L, pts2R, camR)
wrong_camR = Camera(f=100, c=np.array([[50, 50]]).T, t=tR + np.array([[100,0,0]]).T, R=roty(-theta))
resultR = cmpR(wrong_camR, pts3, resultR, pts2L, pts2R, camL)
resultB = cmpBoth(wrong_camL, wrong_camR, pts3, resultB, pts2L, pts2R)
#7
print(7)
wrong_camL = Camera(f=100, c=np.array([[50, 50]]).T, t=tL + np.array([[100,0,0]]).T, R=roty(theta))
resultL = cmpL(wrong_camL, pts3, resultL, pts2L, pts2R, camR)
wrong_camR = Camera(f=100, c=np.array([[50, 50]]).T, t=tR + np.array([[-100,0,0]]).T, R=roty(-theta))
resultR = cmpR(wrong_camR, pts3, resultR, pts2L, pts2R, camL)
resultB = cmpBoth(wrong_camL, wrong_camR, pts3, resultB, pts2L, pts2R)
#8
print(8)
wrong_camL = Camera(f=100, c=np.array([[50, 50]]).T, t=tL, R=roty(theta + 5 * np.pi / 180))
resultL = cmpL(wrong_camL, pts3, resultL, pts2L, pts2R, camR)
wrong_camR = Camera(f=100, c=np.array([[50, 50]]).T, t=tR, R=roty(-theta - 5 * np.pi / 180))
resultR = cmpR(wrong_camR, pts3, resultR, pts2L, pts2R, camL)
resultB = cmpBoth(wrong_camL, wrong_camR, pts3, resultB, pts2L, pts2R)
#9
print(9)
wrong_camL = Camera(f=100, c=np.array([[50, 50]]).T, t=tL, R=roty(theta - 5 * np.pi / 180))
resultL = cmpL(wrong_camL, pts3, resultL, pts2L, pts2R, camR)
wrong_camR = Camera(f=100, c=np.array([[50, 50]]).T, t=tR, R=roty(-theta + 5 * np.pi / 180))
resultR = cmpR(wrong_camR, pts3, resultR, pts2L, pts2R, camL)
resultB = cmpBoth(wrong_camL, wrong_camR, pts3, resultB, pts2L, pts2R)
fig = plt.figure(figsize=(10,10))
x = np.arange(len(resultL))
plt.title("Change Left Camera")
plt.plot(x, resultL)
plt.show()
fig = plt.figure(figsize=(10,10))
x = np.arange(len(resultR))
plt.title("Change Right Camera")
plt.plot(x, resultR)
plt.show()
fig = plt.figure(figsize=(10,10))
x = np.arange(len(resultB))
plt.title("Change Both Camera")
plt.plot(x, resultB)
plt.show()
###Output
0
Change Left: 863.7305
Change Right: 661.0927
Change Both: 676.1580
1
Change Left: 134.0450
Change Right: 135.4428
Change Both: 531.0574
2
###Markdown
The camera parameter that the reconstruction quality most depend on is translation vector, for which it has most error. The most stable camera parameter is the focal length.To frame the question independent from the camera parameter units, simply provide the intrinsic matrix of $K$ that contains the pixel magnification factors, so that pysical distance can be converted to pixels and vice versa. 3. Camera CalibrationWrite a function **calibrate** that takes the coordinates of points in an image and in 3D and resturns estimated camera parameters. Your function should first use the linear technique to estimate the camera matrix and then decompose the matrix into intrinsic and extrinsic components using RQ matrix decomposition. Your function should output the same data structure used previously to represent a camera.
###Code
def calibrate(pts2, pts3):
"""
Give a set of 3D points and their correpsonding 2D image
coordinates, estimate the parameters of the camera.
Parameters
----------
pts2 : 2D numpy.array (dtype=float)
Coordinates of N points stored in a array of shape (N,2) seen from camL camera
pts3 : 2D numpy.array (dtype=float)
array containing 3D coordinates of the points relative to this camera's origin
Returns
-------
cam : Camera
The estimated camera parameters
"""
N = pts2.shape[1]
y = np.concatenate((np.full((1, N), 0), np.full((1, N), -1)), axis=0)
y = np.concatenate((y, pts2[1, :][np.newaxis, :]), axis = 0).T
x = np.concatenate((np.full((1, N), 1), np.full((1, N), 0)), axis=0)
x = np.concatenate((x, -pts2[0, :][np.newaxis, :]), axis = 0).T
p3 = np.concatenate((pts3, np.full((1, N), 1)), axis=0).T[:, np.newaxis, :]
p3 = np.repeat(p3, 3, axis=1)
p3 = np.repeat(p3, 2, axis=0)
p = np.ones(p3.shape)
p[::2, :, :] = np.multiply(p[::2, :, :], y[:, :, np.newaxis])
p[1::2, :, :] = np.multiply(p[1::2, :, :], x[:, :, np.newaxis])
A = np.multiply(p3, p).reshape(2 * N, 12)
U, S, Vh = np.linalg.svd(A)
M = Vh[np.argmin(S), :].reshape(3, 4)
C1 = M[:, :-1]
R, K = np.linalg.qr(np.linalg.inv(C1))
K = np.linalg.inv(K)
t = np.dot(np.linalg.inv(C1), M[:, -1][:, np.newaxis])
K = K / K[-1, -1]
D = np.diag(np.sign(np.diag(K)))
K = np.dot(K, D)
R = np.dot(D, R)
t = np.dot(D, t)
t = np.dot(np.linalg.det(R), t)
R = np.dot(np.linalg.det(R), R).T
return Camera(f=K[0, 0], c=np.array([[K[0, -1], K[1, -1]]]).T, t=t, R=R)
###Output
_____no_output_____
###Markdown
3.1 Synthetic calibration testTest your method on a synthetic dataset using the cameras and points from the previous problem on triangulation. Attempt to recover camL and camR and compare your recovered values to the true parameters used to synthesize the test image. If you add Gaussian noise to the 2D point locations (but no the 3D locations) which camera parameter(s) are most affected? Which are most stable? Please justify your answer with some simple experiment.
###Code
def cmp(pts2, pts3, real_cam):
cam = calibrate(pts2, pts3)
print("Difference in focal length: %.4f" % (cam.f - real_cam.f))
print("Distance in camera center: %.4f" % np.sqrt(np.sum((cam.c - real_cam.c)**2)))
print("Difference in translation vector: \n", cam.t - real_cam.t)
print("Difference in rotation matrix: \n", cam.R - real_cam.R)
print("Left Camera")
cmp(pts2L, pts3, camL)
print("Right Camera")
cmp(pts2R, pts3, camR)
# x = np.arange(0, 1, 0.1).tolist()
# for mu in range(3, 5):
# for sigma in x:
# error2D = np.random.normal(mu, sigma, (pts2L.shape))
# pts2Le = pts2L + error2D
# pts2Re = pts2R + error2D
# print("Noise mu = %.4f, sigma = %.4f" % (mu, sigma))
# print("Left Camera")
# cmp(pts2Le, pts3, camL)
# print("Right Camera")
# cmp(pts2Re, pts3, camR)
###Output
Left Camera
[[-1.00000000e+02 -2.21900569e-12 5.00000000e+01]
[-0.00000000e+00 -1.00000000e+02 5.00000000e+01]
[ 0.00000000e+00 0.00000000e+00 1.00000000e+00]]
Difference in focal length: 0.0000
Distance in camera center: 0.0000
Difference in translation vector:
[[-2.40696352e-13]
[-2.57571742e-14]
[-1.50546242e-13]]
Difference in rotation matrix:
[[ 1.44328993e-15 3.52495810e-15 -1.94289029e-15]
[-1.08369572e-14 2.22044605e-16 1.37918684e-14]
[ 1.94289029e-15 -1.71822567e-14 1.22124533e-15]]
Right Camera
[[-1.00000000e+02 2.57950417e-12 5.00000000e+01]
[-0.00000000e+00 -1.00000000e+02 5.00000000e+01]
[ 0.00000000e+00 0.00000000e+00 1.00000000e+00]]
Difference in focal length: -0.0000
Distance in camera center: 0.0000
Difference in translation vector:
[[-1.77635684e-14]
[-6.43929354e-15]
[ 1.79634085e-13]]
Difference in rotation matrix:
[[ 1.76525461e-14 1.93733918e-14 3.49720253e-14]
[-1.17917136e-14 0.00000000e+00 1.97367937e-14]
[-3.49720253e-14 -1.23797103e-14 1.74305015e-14]]
###Markdown
The focal length is the most affected parameter in calibration.The rotation matrix is the most stable parameter in calibration. 3.2 Planar calibration testTest your calibration method on one of the calibration images provided on the course website. The corner points of the checkerboard have physical coordinates given by a grid with 8 vertices along one axis and 6 along the other axis with a spacing of 2.8cm.What intrinsic camera parameters do you recover? Are the camera pixels square? What ambiguities arise from calibrating using a single image of a planar calibration object? To answer this question you may need to consult one of the recommended textbooks for the class or other online resources.
###Code
# This code is a modification of the following tutorial:
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
%matplotlib inline
import cv2
import os
nx = 8
ny = 6
objp = np.zeros((nx * ny, 3), np.float32)
objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)
objp[:, :2] *= 2.8
objp[:, -1] += 1
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
dirname = 'data/calib'
for a, b, c in os.walk(dirname):
for fname in c:
img = cv2.imread(os.path.join(dirname, fname))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (nx, ny), corners2,ret)
plt.imshow(img)
plt.show()
break
break
objpoints = np.array(objpoints)
imgpoints = np.array(imgpoints)
objpoints = objpoints[0].T
imgpoints = imgpoints[0].reshape(imgpoints.shape[1], 2).T
camera = calibrate(imgpoints, objpoints)
print(camera.c)
print(camera.R)
print(camera.f)
print(camera.t)
###Output
_____no_output_____
###Markdown
![plane.png](attachment:plane.png)Textbook 22.1.2If all calibration points lie on the same plane, the camera center has infinite many solutions lying on a line except for the intersection point of the object plane, with image-equivalent 2D projection results, which means that the distance from camera to the object is lost due to normalization. 3.3 Geometric recovery challengeIn the course files, you will find a pair of images of an object with unknown geometry taken with the same camera as the calibration images. Your goal is to estimate the dimensions of the imaged box.1. You will want to get a good estimate of the intrinsic camera parameters. Since our simple **calibrate** function doesn't work well for planar objects, I recommend using an existing calibration tool. One option for python is to use the opencv calibration functionality (see links and examples below). Another alternative is to use the MATLAB camera calibration toolbox and then import your estimates back into this notebook. 2. For the pair of images, you need to estimate the extrinsic camera parameters (R, t) that relate the camera views. You can do this using the locations of the outside corners of the checkerboard and known intrinsic parameters and peform a factorization of the Essential matrix, or alternately using non-linear optimization (e.g. using **scipy.optimize.least_squares**) to directly search for R,t which projects the points to the observed location. 3. Finally, use your triangulation routine to estimate the dimensions of the object imaged. You can select correspondences in the two images to triangulate by hand. There will be a prize for the person whose estimate the box geometry is closest to the true solution. Please document your approach and include any code you used with your homework submission.
###Code
# This code is a modification of the following tutorial:
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
%matplotlib notebook
import cv2
import os
import selectpoints
nx = 8
ny = 6
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
dirname = 'data/calib'
fname = '13.jpg'
img = cv2.imread(os.path.join(dirname, fname))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(img)
#selectpoints will show the axis and wait for the specified
# number of user clicks.
k = 4
spoints = selectpoints.select_k_points(ax,k)
###Output
_____no_output_____
###Markdown
--- Notes and resources: CalibrationOpenCV provides a set of tools for camera calibration and even a demo script which should work with a bit of modification. You will need to install the package (e.g., by running ***conda install opencv***) in order to use it in your notebook. Consult these references to get some idea of the functionality. The code snippet below demonstrates automatic detection of the chessboard corners in the image.https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.htmlhttps://github.com/opencv/opencv/blob/master/samples/python/calibrate.py
###Code
import cv2
nx = 8
ny = 6
# Make a list of calibration images
fname = 'data/calib/c1.jpg'
img = cv2.imread(fname)
plt.imshow(img)
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If found, draw corners
if ret == True:
# Draw and display the corners
cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
plt.imshow(img)
plt.show()
###Output
_____no_output_____
###Markdown
Interactive Point SelectionFor interactively selecting points in an image by hand (e.g. clicking on the corners of the box) I have provided a bit of code **selectpoints.py** to assist. Note: this will only work with the **%matplotlib notebook** directive which gives interactive plots
###Code
import selectpoints
image = plt.imread('data/challenge/13.jpg')
#display the image in the notebook using a grayscale colormap
# here we keep track of the image axis variable ax, which we
# need to pass along to the select_k_points function
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(image)
#selectpoints will show the axis and wait for the specified
# number of user clicks.
k = 4
spoints = selectpoints.select_k_points(ax,k)
# once you have finished clicking, you can then execute this cell to get
# the point coordinates
# get the x,y coordinates of the selected points
xp = spoints.xs
yp = spoints.ys
print((xp,yp))
###Output
_____no_output_____ |
notebook/.ipynb_checkpoints/2020.03.30_feat_sel_kera-checkpoint.ipynb | ###Markdown
Setup
###Code
uniprot = pd.read_csv("/srv/home/wconnell/keiser/data/uniprot_mapping_ids/TCGA_rnaseq_uniprot_features.tab.gz", sep="\t")
keratin = uniprot[uniprot["Gene ontology (biological process)"] == "keratinization [GO:0031424]"]
disease = ['BRCA', 'LUAD', 'KIRC', 'THCA', 'PRAD', 'SKCM']
sample_type = ['Primary Tumor', 'Solid Tissue Normal']
params = {"device":"3",
"note":"true",
"n_features":len(keratin),
"steps":50,
"embedding":2}
main(disease=disease, sample_type=sample_type, **params)
###Output
_____no_output_____ |
User_Interface/SeverSide_Flask/old deployment/Deployment.ipynb | ###Markdown
Voice analysis
###Code
#DataFlair - Extract features (mfcc, chroma, mel) from a sound file
def extract_feature(X, mfcc, chroma, mel,sample_rate):
result=np.array([])
if mfcc:
mfccs=np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result=np.hstack((result, mfccs))
if chroma:
stft=np.abs(librosa.stft(X))
chroma=np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
result=np.hstack((result, chroma))
if mel:
mel=np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
result=np.hstack((result, mel))
return result
def predict_voice(file):
x=[]
y=[]
y, s = librosa.load(file) # Downsample 44.1kHz to 8kHz
feature=extract_feature(X=y, mfcc=True, chroma=True, mel=True,sample_rate=s)
x.append(feature)
y_pred=voice_model.predict(x)
emotion = y_pred[0]
return emotion
###Output
_____no_output_____
###Markdown
Split Audio
###Code
def get_duration(fname):
with contextlib.closing(wave.open(fname,'r')) as f:
frames = f.getnframes()
rate = f.getframerate()
duration = frames / float(rate)
return duration
def get_emotion_dic(fname):
duration = get_duration(fname)
# frames/ is the directory to stores audio frames
dir='frames'
if not os.path.exists(dir):
os.makedirs(dir)
else:
shutil.rmtree(dir)
os.makedirs(dir)
emotion_dic={'bad':0,'medium':0,'good':0}
originalAudio = AudioSegment.from_wav(fname)
for i in range(0,int(duration),3):
#newAudio = AudioSegment.from_wav("ted2.wav")
newAudio = originalAudio[i*1000:(i+3)*1000]
newAudio.export('frames/'+str(i)+'.wav', format="wav") #Exports to a wav file in the current path.
emotion = predict_voice('frames/'+str(i)+'.wav')
emotion_dic[emotion]+=1
return emotion_dic
###Output
_____no_output_____
###Markdown
**Answers Similarity**
###Code
!pip install transformers
from transformers import AutoTokenizer
!pip install pytorch-pretrained-bert
from pytorch_pretrained_bert import BertModel
def clean_text(text):
'''Make text lowercase, remove text in square brackets,remove links,remove punctuation and remove words containing numbers.'''
text = text.lower()
text = re.sub('\[.*?\]', '', text)
text = re.sub('https?://\S+|www\.\S+', '', text)
text = re.sub('<.*?>+', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\n', '', text)
text = re.sub('\w*\d\w*', '', text)
return text
def clean2_text(text):
#remove some of stopwords as 'a, an, the'
txt = clean_text(text)
words = txt.split(' ')
aft_remove = [w for w in words if w not in ['a', 'an', 'the','of', 'that', '']]
return ' '.join(aft_remove)
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
class Similarity_Model(nn.Module):
def __init__(self, output_dim, n_layers, hidden_dim, freeze_bert):
super(Similarity_Model,self).__init__()
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.no_layers = no_layers
#bert Model and Freeze the BERT model
self.bert_model = BertModel.from_pretrained('bert-base-uncased')
if freeze_bert:
for p in self.bert_model.parameters():
p.requires_grad = False
#LSTM layers
# self.lstm = nn.LSTM(768, hidden_dim, n_layers, batch_first=True)
# dropout layer
self.dropout = nn.Dropout(0.3)
# linear layer
self.fc = nn.Linear(768, output_dim)
self.sig = nn.Sigmoid()
def forward(self, input_ids, attention_masks, token_type_ids, hidden):
batch_size = input_ids.size(0)
sequence_output, pooled_output = self.bert_model(input_ids, attention_mask=attention_masks, token_type_ids=token_type_ids)
# print("seq " , len(sequence_output))
# lstm_out, hidden = self.lstm(sequence_output[0], hidden)
# lstm_out = lstm_out.permute(0,2,1)
# out_max = F.max_pool1d(lstm_out, kernel_size=lstm_out.shape[2])
# out_avg = F.avg_pool1d(lstm_out, kernel_size=lstm_out.shape[2])
# out = torch.cat((out_avg, out_max), dim=1)
# out = out.permute(0,2,1)
# dropout and fully connected layer
out = self.dropout(pooled_output)
out = self.fc(out)
# sigmoid function
sig_out = self.sig(out)
# reshape to be batch_size first
out = sig_out.view(batch_size, -1)
out = out[:, -1]
return out, hidden
def init_hidden(self, batch_size):
# initialize hidden states with sizes n_layers x batch_size x hidden_dim
# initialized to zero, for hidden state and cell state of LSTM
h0 = torch.zeros((self.no_layers,batch_size,self.hidden_dim)).to(device)
c0 = torch.zeros((self.no_layers,batch_size,self.hidden_dim)).to(device)
hidden = (h0,c0)
return hidden
from google.colab import drive
drive.mount('/content/drive')
no_layers = 1
output_dim = 1
hidden_dim = 128
Freeze_bert = False
Similar_Model = Similarity_Model(output_dim, no_layers, hidden_dim, Freeze_bert)
#moving to gpu
Similar_Model.to(device)
Similar_Model.eval()
Similar_Model.load_state_dict(torch.load("/content/drive/MyDrive/Colab Notebooks/Snli_TechSimilarity.pt"))
def predict_similarity(ans1, ans2):
sentence1 = clean2_text(ans1)
sentence2 = clean2_text(ans2)
print(sentence1, sentence2)
encoded_pair = tokenizer( sentence1, sentence2,
add_special_tokens=True,
return_tensors='pt' # Return torch.Tensor objects
)
text, attention, token_ids = encoded_pair['input_ids'].expand(1,-1), encoded_pair['attention_mask'].expand(1,-1), encoded_pair['token_type_ids'].expand(1,-1)
inputs, attention, token_ids = text.to(device), attention.to(device), token_ids.to(device)
h = Similar_Model.init_hidden(1)
h = tuple([each.data for each in h])
output, _ = Similar_Model(inputs, attention, token_ids, h)
return str(output.item())
###Output
_____no_output_____
###Markdown
**Speech Recognition**
###Code
!pip install google-cloud
!pip install google-cloud-speech
import numpy as np
import scipy.io.wavfile as wav
import io as ar
import os
from google.cloud import speech
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'avian-tract-283207-f1553ac44767.json'
client = speech.SpeechClient()
def SpeechRecognition(audiof):
x, s = librosa.load(audiof)
soundfile.write('tmp.wav', x, s)
with ar.open('/content/tmp.wav','rb') as audio_file:
content = audio_file.read()
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding = speech.RecognitionConfig.AudioEncoding.LINEAR16,
language_code = 'en-US')
operation = client.long_running_recognize(config=config, audio=audio)
print("Waiting for operation to complete...")
response = operation.result(timeout=90)
answer = ""
for result in response.results:
# print('Transcript: {}'.format(result.alternatives[0].transcript))
answer += result.alternatives[0].transcript
return answer
###Output
_____no_output_____
###Markdown
**Routes**
###Code
!pip install flask-ngrok
!pip install -U flask-cors
from flask import Flask, request, jsonify, render_template
from flask_ngrok import run_with_ngrok
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
#this contains the path of folder that to store the images in
app.config["IMAGE_UPLOADS"] = "/content"
app.config["AUDIO_UPLOADS"]="/content"
run_with_ngrok(app) # Start ngrok when app is run
#@app.route('/')
#def home():
# return render_template('index.html')
@app.route("/predict", methods=['post'])
def predict():
print("recieved data: ", request.form["image"])
image_bytes = b64decode(request.form["image"].split(',')[1])
# convert bytes to numpy array
jpg_as_np = np.frombuffer(image_bytes, dtype=np.uint8)
# decode numpy array into OpenCV BGR image
image = cv2.imdecode(jpg_as_np, flags=1)
url = request.method
frameTemp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
frameTemp = cv2.resize(frameTemp, (130, 130))
try:
rects = find(frameTemp)
rects = np.array(rects)
rects = non_max_suppression_fast(rects, 0.2)
output=""
for rect in rects:
res,ret=getFaceAlign(frameTemp[rect[0]:rect[1],rect[2]:rect[3]])
if ret==1:
#print(rect)
io.imshow(res)
pred = pr(res)
ans = None
if pred in ['Disgust','Fear','Sad','Angry']:
ans = 'bad'
elif pred in ['Surprise' , 'Neutral']:
ans = 'medium'
elif pred == 'Happy':
ans = 'good'
output+= ans+" "
print(ans)
#return the emotion of the face's image to the html
if output!="":
return jsonify(output)
return jsonify("medium")
except:
return jsonify("medium")
@app.route('/predictVoice',methods=['POST'])
def predictVoice():
audio = request.files["file"]
audiofile=audio.filename
#save the audio in colab directory /content
audio.save(os.path.join(app.config["AUDIO_UPLOADS"], audiofile))
#dir of audio is in the current directory
#convert audio to correct wav file
newAudio = AudioSegment.from_file(audiofile)
#overwrite corrupted audio with the corrected audio
newAudio.export(audiofile, format="wav")
emotion_dic=get_emotion_dic("/content/"+audiofile)
print(emotion_dic)
return jsonify(emotion_dic)
@app.route('/predictSimilarity', methods=['POST'])
def predictSimilarity():
audio = request.files["file"]
audiofile=audio.filename
audio.save(os.path.join(app.config["AUDIO_UPLOADS"], audio.filename))
# #convert audio to correct wav file
# newAudio = AudioSegment.from_file(audiofile)
# #overwrite corrupted audio with the corrected audio
# newAudio.export(audiofile, format="wav")
answer1 = request.form['ans1']
question=request.form['question']
print(answer1)
answer2 = SpeechRecognition("/content/"+audiofile)
print(answer2)
prob = predict_similarity(answer1, answer2)
dic=[question, answer1,prob]
return jsonify(dic)
app.run()
###Output
_____no_output_____ |
Music_Synthesis.ipynb | ###Markdown
Music Synthesis
TO-DO:
---
1. ~Figure out a way to represent data~
2. ~Build base LSTM~
3. ~Sample new sequences from the model~
4. ~Try Embeddings~
5. Try Stratified split of some sort to better represent the model
6. Try Advanced Sequence Generation Models
7. Try mixing genres
8. Try to introduce the temporal aspect Imports
###Code
import glob
from tqdm.notebook import tqdm
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import torch.optim as optim
from sklearn.model_selection import train_test_split
from music21 import note, chord, converter, stream, instrument, midi
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Specify device for the model to run on
device = "cuda" if torch.cuda.is_available() else "cpu"
###Output
_____no_output_____
###Markdown
Loading Dataset
###Code
!mkdir dataset
!unzip -q all_artists.zip
!unzip -q \*.zip -x all_artists.zip -d dataset
###Output
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
caution: excluded filename not matched: all_artists.zip
20 archives were successfully processed.
###Markdown
Preprocessing and Generating Dataset from raw midi
###Code
def get_notes(root):
"""
Returns a list of notes of all the songs in a root directory
Input :
root - root folder path
Output :
notes - A single list containing notes of all the songs in the root directory in a sequential manner.
"""
notes = []
unprocessed_notes = None
for midi_file in tqdm(glob.glob(f"{root}/*.mid")):
song = converter.parse(midi_file)
for component in song.recurse():
if isinstance(component, note.Note):
notes.append(component.name)
elif isinstance(component, chord.Chord):
notes.append(','.join(str(n) for n in component.normalOrder))
return notes
def get_info(notes):
"""
Get a list of all unique notes, and dictionaries to convert notes to int and back
Input :
notes - A list of all the notes present in our music dataset
Output :
unique_notes - A list containing all the unique notes present in our dataset.
note_to_int - A dictionary that maps a given note string to an int.
int_to_note - A dictionary that maps a given int to a string.
NOTE:
If note_to_int[note_name] = i
Then, int_to_note[i] = note_name
"""
unique_notes = list(set(notes))
enumerated_uniques = enumerate(sorted(unique_notes))
note_to_int = dict((note, i) for i, note in enumerated_uniques)
enumerated_uniques = enumerate(sorted(unique_notes))
int_to_note = dict((i, note) for i, note in enumerated_uniques)
return unique_notes, note_to_int, int_to_note
def get_dataloader(notes, note_to_int, sequence_length, vocab_length, batch_size=64):
"""
Gets a dataloader object back, which is used to convert our dataset into batches
Input:
notes - A list of all the notes present in our music dataset.
note_to_int - A dictionary that maps a given note string to an int.
sequence_length - Number of notes in each sequence.
vocab_length - Number of unique notes in the dataset
batch_size - Batch size for training.
Output:
X - List of input tensors, used for sampling later
loader - Dataloader object
"""
X = []
y = []
# for the first x notes taken as input, get the next note as output
for i in range(0, len(notes) - sequence_length):
X.append([note_to_int[note] for note in notes[i : i + sequence_length]])
y.append(note_to_int[notes[i + sequence_length]])
# split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, shuffle=True)
X_train = torch.tensor(X_train)
y_train = torch.tensor(y_train)
X_test = torch.tensor(X_test)
y_test = torch.tensor(y_test)
train_dataset = TensorDataset(X_train, y_train)
train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=4)
test_dataset = TensorDataset(X_test, y_test)
test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=4)
return X, train_loader, test_loader
###Output
_____no_output_____
###Markdown
Create Model
###Code
class MusicSynthesis(nn.Module):
"""LSTM base model"""
def __init__(self, num_embeddings, embedding_dim, hidden_size, num_layers, dropout, num_classes, input_size):
super(MusicSynthesis, self).__init__()
self.embedding = nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim)
self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_size,
num_layers=num_layers, bidirectional=True, batch_first=True)
self.lstm2 = nn.LSTM(input_size=hidden_size*2, hidden_size=hidden_size,
num_layers=num_layers, bidirectional=True, batch_first=True)
self.lstm3 = nn.LSTM(input_size=hidden_size*2, hidden_size=hidden_size,
num_layers=num_layers, bidirectional=True, batch_first=True)
self.linear = nn.Linear(hidden_size*2, 64)
self.dropout = nn.Dropout()
self.linear2 = nn.Linear(64, num_classes)
def forward(self, x):
x = self.embedding(x)
h, (_, _) = self.lstm(x)
h, (_, _) = self.lstm2(h)
h, (_, _) = self.lstm3(h)
x = self.linear(h[:,-1,:])
x = self.dropout(x)
x = self.linear2(x)
return x
###Output
_____no_output_____
###Markdown
Training Function
###Code
def get_accuracy(outputs, labels):
"""
Get accuracy of each batch
Input:
outputs - Output tensor from the model
labels - Tensor containing the true labels
Output:
Accuracy of the given batch
"""
outputs = torch.argmax(F.softmax(outputs, dim=1), dim=1)
outputs = outputs.detach().cpu()
labels = labels.detach().cpu()
correct = (outputs==labels).sum()
total = len(labels)
return correct/total
def train_model(epochs, trainloader, testloader, model, optimizer, loss_function, vocab_size):
"""
Base function to train the model in one go, good for orthogonality.
Input:
epochs - Number of training epochs.
dataloader - DataLoader object.
model - Model that needs to be trained.
optimizer - Optimizer selected for the model.
loss_function - Loss function for backpropagation.
vocab_size - Number of unique notes in our dataset.
Output:
losses - List containing losses of each epoch
accuracy - List containing accuracies of each epoch
"""
training_losses = []
training_accuracy = []
test_losses = []
test_accuracy = []
for epoch in range(epochs):
tr_epoch_loss = []
tr_epoch_accuracy = []
te_epoch_loss = []
te_epoch_accuracy = []
for data in tqdm(trainloader):
inputs, labels = data
labels.squeeze_()
inputs = inputs.to(device)
labels = labels.to(device)
model.train()
optimizer.zero_grad()
outputs = model(inputs)
outputs = outputs
loss = loss_function(outputs, labels)
tr_epoch_accuracy.append(get_accuracy(outputs, labels))
tr_epoch_loss.append(loss.item())
loss.backward()
optimizer.step()
curr_tr_acc = np.mean(tr_epoch_accuracy)
curr_tr_loss = np.mean(tr_epoch_loss)
training_accuracy.append(curr_tr_acc)
training_losses.append(curr_tr_loss)
with torch.no_grad():
for data in testloader:
inputs, labels = data
labels = labels.squeeze()
inputs = inputs.to(device)
labels = labels.to(device)
model.eval()
outputs = model(inputs)
loss = loss_function(outputs, labels)
te_epoch_accuracy.append(get_accuracy(outputs, labels))
te_epoch_loss.append(loss.item())
curr_te_acc = np.mean(te_epoch_accuracy)
curr_te_loss = np.mean(te_epoch_loss)
test_accuracy.append(curr_te_acc)
test_losses.append(curr_te_loss)
print(f"Epoch {epoch + 1:003}")
print(f"Training Loss = {curr_tr_loss:.3f} Training Accuracy = {curr_tr_acc*100:.3f}")
print(f"Validation Loss = {curr_te_loss:.3f} Validation Accuracy = {curr_te_acc*100:.3f}")
print("Training Done")
return training_losses, training_accuracy, test_losses, test_accuracy
###Output
_____no_output_____
###Markdown
Testing
###Code
notes_dataset = get_notes("dataset")
# save to file
with open("notes_dataset.txt", "wb") as file_:
pickle.dump(notes_dataset, file_)
# open from an existing file
with open("notes_dataset.txt", "rb") as file_:
notes_dataset = pickle.load(file_)
vocab, note_to_int, int_to_note = get_info(notes_dataset)
SEQ_LEN = 100
VOCAB_LEN = len(vocab)
X, trainloader, testloader = get_dataloader(notes_dataset, note_to_int, SEQ_LEN, VOCAB_LEN)
model = MusicSynthesis(VOCAB_LEN, 50, hidden_size=256, num_layers=1,
dropout=0.3, num_classes=VOCAB_LEN, input_size=1)
model.to(device)
model.load_state_dict(torch.load("biLSTM-model-with-embeddings-256h-1l.pth"))
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
EPOCHS = 10
loss, accuracy = train_model(epochs=EPOCHS, trainloader=trainloader, testloader=testloader, model=model,
optimizer=optimizer, loss_function=criterion, vocab_size=VOCAB_LEN)
MODEL_PATH = "./biLSTM-model-with-embeddings-256h-1l.pth"
torch.save(model.state_dict(), MODEL_PATH)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(2, 2)
ax1.plot(loss)
ax2.plot(accuracy)
ax1.set_xlabel("Loss")
ax2.set_xlabel("Accuracy")
plt.show()
X = torch.tensor(X)
###Output
_____no_output_____
###Markdown
Sampling
###Code
def sample(model, file_name, vocab_len, base_offset, sample_len):
"""
Function to sample new sequences from the trained model.
Inputs:
model - Trained model
file_name - Name of the output midi file
vocab_len - Number of unique notes in our dataset
base_offset - Time difference between each note
sample_len - Number of notes/chords to sample
Outputs:
none
Writes the sampled midi song onto a file specified by file_name
"""
offset = 0
input = np.random.randint(0, len(X)-1)
network_input = (X[input]).unsqueeze(0).to(device)
music = []
notes = []
model.eval()
for i in range(sample_len):
prediction = model(network_input)
idx = torch.argmax(prediction, dim=1) # Get index of the predicted note
result = int_to_note[idx.item()]
notes.append(result)
#Convert idx to a network appropriate tensor
idx_tensor = torch.tensor([idx]).unsqueeze(-1).to(device)
network_input = torch.cat((network_input, idx_tensor), dim=1)
network_input = network_input[:,1:network_input.shape[1] + 1]
inp_notes = [int_to_note[int(i.item())] for i in (X[input])]
# notes = inp_notes + notes
# notes = [int_to_note[n] for n in notes]
print(inp_notes)
print(notes)
for n in notes:
chord_list = []
if ',' in n: # n is a chord
chord_list = n.split(",")
chord_list = [int(c) for c in chord_list]
new_chord = chord.Chord(chord_list)
new_chord.offset = offset
music.append(new_chord)
else: # n is a note
if n == '': continue
new_note = note.Note(n)
new_note.offset = offset
music.append(new_note)
offset += base_offset
output_stream = stream.Stream(music)
midi_file = midi.translate.streamToMidiFile(output_stream)
midi_file.open(f'{file_name}.mid', 'wb')
midi_file.write()
midi_file.close()
sample(model, "outputs", VOCAB_LEN, 0.60, 100)
###Output
_____no_output_____ |
matrixalg/10_pruebas_varias.ipynb | ###Markdown
El producto punto es conmutativo.
###Code
import numpy as np
x = np.array([2, -5, -1])
y = np.array((3, 2, -3))
print(x.dot(y))
print(y.dot(x))
###Output
-1
|
Scratch Notebooks/previous_web_scraping_selenium.ipynb | ###Markdown
Scraping the UnscrapableSome sites are hard to scrape.Sometimes you get blocked.Sometimes the site is using a lot of fancy Javascript.We'll see a few examples of methods we can use as workarounds for the former and introduce the tool Selenium that lets us automate dynamic interactions with the browser, which can help with the latter. How much is too much?Sites have `robots.txt` pages that give guidelines about what they want to allow webcrawlers to access
###Code
import requests
url = 'http://www.github.com/robots.txt'
response = requests.get(url)
print(response.text)
###Output
_____no_output_____
###Markdown
Disallow: / means disallow everything (for all user-agents at the end that aren't covered earlier). Boxofficemojo is more accepting:
###Code
url = 'http://www.boxofficemojo.com/robots.txt'
response = requests.get(url)
print(response.text)
###Output
_____no_output_____
###Markdown
It's very common for sites to block you if you send too many requests in a certain time period. Sometimes all it takes to evade this is well-designed pauses in your scraping. 2 general ways:* pause after every request* pause after each n requests
###Code
#every request
import time
page_list = ['page1','page2','page3']
for page in page_list:
### scrape a website
### ...
print(page)
time.sleep(2)
#every 200 requests
import time
page_list = ['page1','page2','page3','page4','page5','page6']
for i, page in enumerate(page_list):
### scrape a website
### ...
print(page)
if (i+1 % 200 == 0):
time.sleep(320)
###Output
_____no_output_____
###Markdown
Or better yet, add a random delay (more human-like)
###Code
import random
for page in page_list:
### scrape a website
### ...
print(page)
time.sleep(.5+2*random.random())
###Output
_____no_output_____
###Markdown
How do I make requests look like a real browser?
###Code
import sys
import requests
from bs4 import BeautifulSoup
url = 'http://www.reddit.com'
user_agent = {'User-agent': 'Mozilla/5.0'}
response = requests.get(url, headers = user_agent)
###Output
_____no_output_____
###Markdown
We can generate a random user_agent
###Code
from fake_useragent import UserAgent
ua = UserAgent()
user_agent = {'User-agent': ua.random}
print(user_agent)
response = requests.get(url, headers = user_agent)
print(response.text)
###Output
_____no_output_____
###Markdown
Now to Selenium! What happens if I try to parse my gmail with `requests` and `BeautifulSoup`?
###Code
import requests
from bs4 import BeautifulSoup
gmail_url="https://mail.google.com"
soup=BeautifulSoup(requests.get(gmail_url).text, "lxml")
print(soup.prettify())
###Output
_____no_output_____
###Markdown
Well, this is a tiny page. We get redirected. Soupifying this is useless, of course. Luckily, in this case we can see where we are sent to. In many of cases, you won't be so lucky. The page contents will be rendered by JavaScript by a browser, so just getting the source won't help you.Anyway, let's follow the redirection for now.
###Code
new_url = "https://mail.google.com/mail"
# get method will navigate the requested url..
soup =BeautifulSoup(requests.get(new_url).text)
print(soup.prettify())
print(soup.find(id='Email'))
###Output
_____no_output_____
###Markdown
We have hit the login page. We can't get to the emails without logging in ... i.e. we need to actually interact with the browser using Selenium!
###Code
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import chromedriver_binary
driver = webdriver.Chrome()
driver.get("https://mail.google.com")
# Alternatives to Chrome: Firefox, PhantomJS
###Output
_____no_output_____
###Markdown
Interlude: how to include usernames and passwordsWe are going to have to enter a username and password in order to log in. However, we **don't** want to have our password uploaded to Github for people to scrape! One solution to this is to use _environment variables_.In your directory, create a file called `.env` that has the following format:```bashUSERNAME="[email protected]"PASSWORD="your_password"```DON'T ADD THIS FILE TO GITHUB!It is prudent to add a line `.env` to your `.gitignore`We add two commands to the top of the cell:```%load_ext dotenv allows us to use the %dotenv "magic" command%dotenv reads .env, and makes USERNAME and PASSWORD environment variables```We can now use `os.environ.get` to access the environment variables without having them appear in the notebook.
###Code
!pip install python-dotenv
# See notes about environment variables
%load_ext dotenv
%dotenv
import os
EMAIL = os.environ.get('USERNAME')
PASSWORD = os.environ.get('PASSWORD')
# Show that this is working. Don't do this for PASSWORD!
print(EMAIL)
###Output
_____no_output_____
###Markdown
Fill out username and password, hit enter to log inNow let use this to log in.
###Code
username_form = driver.find_element_by_id("identifierId")
username_form.send_keys(EMAIL)
username_form.send_keys(Keys.RETURN)
password_form=driver.find_element_by_name("password") # note another approach
password_form.send_keys(PASSWORD) # enter password
password_form.send_keys(Keys.RETURN)
###Output
_____no_output_____
###Markdown
Click compose button to start a new email draft
###Code
compose_button=driver.find_element_by_xpath('//div[text()="Compose"]')
compose_button.click()
###Output
_____no_output_____
###Markdown
Write a nice, friendly (optional) message to your (least?) favorite person
###Code
to_field = driver.find_element_by_name("to")
to_field.send_keys("[email protected]") # enter recipient email
subject = driver.find_element_by_name("subjectbox")
subject.send_keys("This is an alert!")
message_body = driver.find_element_by_xpath("//div[@aria-label='Message Body']")
message_body.send_keys("Hello,")
message_body.send_keys([Keys.RETURN, Keys.RETURN])
message_body.send_keys("I am a computer and I just became self aware!")
###Output
_____no_output_____
###Markdown
Press the send button
###Code
send_button = driver.find_element_by_xpath("//div[contains(@aria-label, 'Send')]")
send_button.click()
###Output
_____no_output_____
###Markdown
Scraping Box Office Mojo with Selenium
###Code
matrix_url = "http://www.boxofficemojo.com/movies/?id=matrix.htm"
driver.get(matrix_url)
# 'contains' will find a match on the text, in this case return b tag
gross_selector = '//font[contains(text(), "Domestic")]/b'
print(driver.find_element_by_xpath(gross_selector).text)
# scraping genre
genre_selector = '//a[contains(@href, "/genres/chart/")]/b'
for genre_anchor in driver.find_elements_by_xpath(genre_selector):
print(genre_anchor.text)
inf_adjust_2000_selector = '//select[@name="ticketyr"]/option[@value="2000"]'
driver.find_element_by_xpath(inf_adjust_2000_selector).click()
go_button = driver.find_element_by_name("Go")
go_button.click()
###Output
_____no_output_____
###Markdown
Now the page has changed; it's showing inflation adjusted numbers. We can grab the new, adjusted number.
###Code
gross_selector = '//font[contains(text(), "Domestic ")]/b'
print(driver.find_element_by_xpath(gross_selector).text)
###Output
_____no_output_____
###Markdown
Scraping IMDB with Selenium
###Code
url = "http://www.imdb.com"
driver.get(url)
query = driver.find_element_by_id("navbar-query")
query.send_keys("Julianne Moore")
query.send_keys(Keys.RETURN)
name_selector = '//a[contains(text(), "Julianne Moore")]'
driver.find_element_by_xpath(name_selector).click()
current_url = driver.current_url
###Output
_____no_output_____
###Markdown
Mixing Selenium and BeautifulSoup
###Code
from bs4 import BeautifulSoup
"""Could use requests then send page.text to bs4
but Selenium actually stores the source as part of
the Selenium driver object inside driver.page_source
#import requests
#page = requests.get(current_url)
"""
soup = BeautifulSoup(driver.page_source, 'html.parser')
soup.prettify()
len(soup.find_all('a'))
driver.close()
###Output
_____no_output_____ |
notebooks/KeepThemSeparated.ipynb | ###Markdown
Separating Data and CodeGood coding practice is to keep your data and code separate.This means not having your data embedded within the code itself.As a result, you will be able to apply the same code to different data sets. The bad way
###Code
# import the various modules that we'll need for this lesson
import numpy as np
import pandas as pd
from matplotlib import pyplot
import scipy
import scipy.ndimage
# make some random data
a = np.random.normal(loc=0, scale=1, size=1000)
smoothed = scipy.ndimage.gaussian_filter1d(a, sigma=2)
# plot the data
fig = pyplot.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(a)
ax.set_xlabel('Index')
ax.set_ylabel('Value')
# save to a file
pyplot.savefig('NicePlot.png')
# show the plot
pyplot.show()
###Output
_____no_output_____
###Markdown
A better way1. Generate/collect your data and save it to a file someplace.2. Load this data and plot, save the plot. Generating /collecting data could be done in another script but for now we do it in one cell.
###Code
# make data
a = np.random.normal(loc=0, scale=1, size=1000)
# convert to a pandas data frame (see later for pandas intro)
df = pd.DataFrame(data={'Numbers':a})
# write to a csv file using a format that is easy to read (csv)
df.to_csv('random_data.csv', index=False)
###Output
_____no_output_____
###Markdown
Let's take a peek at the file to see what is in it.
###Code
! head random_data.csv
###Output
_____no_output_____
###Markdown
Now lets make a function to plot that data. Again this could be in a separate script, but we'll just use a separate cell.
###Code
def plot():
"""
Read a data file and plot it to another file.
"""
tab = pd.read_csv('random_data.csv')
data = tab['Numbers']
smoothed = scipy.ndimage.gaussian_filter1d(data, sigma=2)
fig = pyplot.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(a)
ax.set_xlabel('Index')
ax.set_ylabel('Value')
pyplot.savefig('NicePlot.png')
pyplot.show()
return
plot()
###Output
_____no_output_____
###Markdown
Questions:If I want to use a different data set (maybe something that's not random junk), what do I need to change?If I want to save the file to a different location or use a different name, what do I need to change?If I have 10 input files, how to I automate reading all the data and saving all the figures?---The above solution is good, but we want to be able to re-use the function with different filenames without having to edit the code.How about we make the input/output filenames arguments to the function?
###Code
def plot(infile, outfile):
"""
Read a data file and plot it to another file.
Parameters
----------
infile : string
The input filename. Assume csv format.
Assumed to have a column called 'Numbers' that we are going to plot.
outfile : string
The output filename. Should be .png or something that matplotlib
knows how to write
"""
tab = pd.read_csv(infile)
data = tab['Numbers']
smoothed = scipy.ndimage.gaussian_filter1d(data, sigma=2)
fig = pyplot.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(a)
ax.set_xlabel('Index')
ax.set_ylabel('Value')
pyplot.savefig(outfile)
return
plot(infile='random_data.csv',
outfile='NicePlot.png')
###Output
_____no_output_____ |
notebooks/NLPmodel.ipynb | ###Markdown
EDA
###Code
contract_types_grouped = contract_types.groupby(['Contract type']).count()['Filename'].sort_values(ascending=True)
contract_types_grouped
fig, ax = plt.subplots(figsize=(5,7))
ax.barh(contract_types_grouped.index[-10:], contract_types_grouped[-10:], color='grey')
ax.set_title('Top 10 contract types')
ax.set_xlabel('Number of contracts');
# cla_df.info()
#set maximum column width for dataframes
pd.set_option('display.max_colwidth', 100)
###Output
_____no_output_____
###Markdown
Read full contract texts to list
###Code
def read_contracts(folder):
contracts = []
files = []
for file in os.listdir(folder):
fullpath = folder + '/' + file
with open(fullpath) as f:
contracts.append(f.read())
files.append(file)
return files, contracts
folder = 'data/full_contract_txt'
files, contracts = read_contracts(folder)
len(contracts)
files[0]
contracts[0]
contracts_dict = {'Filename': files, 'contract_text': contracts}
contracts_df_base = pd.DataFrame(contracts_dict)
contracts_df_base['Filename'] = contracts_df_base['Filename'].str[:-4]
contracts_df_base['Filename'] = contracts_df_base['Filename'].str.strip()
contracts_df_base.head()
###Output
_____no_output_____
###Markdown
Merging contract types to contract dataframe and checking mismatches in file names
###Code
contracts_df = pd.merge(contracts_df_base, contract_types, on='Filename', how='left')
contracts_df.head()
contracts_df[contracts_df['Contract type'].isna()]
A = contracts_df.loc[88,'Filename']
A
contract_types[contract_types['Filename'].str.contains('NETGEAR,INC_04_21_2003-EX')]
B = contract_types.loc[501,'Filename']
B
len(A), len(B), type(A), type(B), A==B
import difflib
output_list = [li for li in difflib.ndiff(A, B) if li[0] != ' ']
output_list
contracts_df_clean = contracts_df.dropna()
###Output
_____no_output_____
###Markdown
TF-IDF with sklearn
###Code
tfidf = TfidfVectorizer(stop_words='english',
token_pattern='[A-Za-z]+',
ngram_range=(1, 1),
min_df=3)
tfidf_values = tfidf.fit_transform(contracts)
contracts_tfidf = pd.DataFrame(tfidf_values.toarray(), columns=tfidf.get_feature_names())
contracts_tfidf.insert(loc=0, column = 'Filename', value = files)
contracts_tfidf.insert(loc=1, column = 'Contract type', value = contracts_df['Contract type'])
contracts_tfidf
contracts_tfidf.dropna(inplace=True)
###Output
_____no_output_____
###Markdown
Search term based on top 10 document
###Code
search_term = 'venture'
contracts_tfidf.sort_values(search_term, ascending=False)[['Contract type', search_term]][:10]
###Output
_____no_output_____
###Markdown
Unsupervised text clustering
###Code
X = contracts_tfidf.drop(['Filename','Contract type'], axis=1)
X.head()
from yellowbrick.cluster import KElbowVisualizer
fig, ax = plt.subplots(1,3, figsize = (24,6))
plt.subplots_adjust(wspace=0.5)
for ax_idx, metric in enumerate(['distortion', 'silhouette', 'calinski_harabasz']):
model = KMeans()
visualizer = KElbowVisualizer(model, k=(2,12), metric=metric, ax=ax[ax_idx], timings=False)
visualizer.fit(X) # Fit the data to the visualizer
visualizer.finalize() # Finalize and render the figure
km = KMeans(5)
km.fit(X)
y_predicted = km.predict(X)
y_predicted
y_predicted.shape
contracts_df_clean['KMeans_pred'] = y_predicted
contracts_df_clean.head()
# contracts_df_clean.groupby(['KMeans_pred', 'Contract type']).count()
#set maximum column width for dataframes
pd.set_option('display.max_rows', None)
pd.pivot_table(contracts_df_clean, index=['Contract type'], columns=['KMeans_pred'], aggfunc=len, fill_value=0)['Filename'].sort_values(by=[0,1,2,3,4], ascending=False)
###Output
_____no_output_____
###Markdown
Create word clouds
###Code
contracts_tfidf.insert(loc=0, column='KMeans_cluster', value=y_predicted)
feature_array = np.array(tfidf.get_feature_names())
tfidf_sorting = np.argsort(tfidf_values.toarray()).flatten()[::-1]
n = 200
top_n = feature_array[tfidf_sorting][:n]
top_words = ' '.join(top_n)
top_words
wc = WordCloud(width=600, height=600, background_color='lightgrey').generate_from_text(top_words)
fig, ax = plt.subplots(figsize=(15,15))
ax.imshow(wc)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([]);
###Output
_____no_output_____ |
data_science/08_Model Building.ipynb | ###Markdown
Login
###Code
from prediction import jwt_access
url = "https://ecosystem.server.path"
username = "[email protected]"
password = "password"
auth = jwt_access.Authenticate(url, username, password)
###Output
_____no_output_____
###Markdown
Train Model Description: An example showing how to ingest data and use H2O to train and run a model.
###Code
from IPython.lib.display import YouTubeVideo
YouTubeVideo('FbL2eaErCcA')
from prediction.apis import data_ingestion_engine
from prediction.apis import data_management_engine
from prediction.apis import prediction_engine
from prediction.apis import worker_h2o
data_ingestion_engine.get_databasesmeta(auth)
data_ingestion_engine.get_databasetablesmeta(auth, databasename)
data_ingestion_engine.get_databasetablecolumnsmeta(auth, databasename, tablename)
data_ingestion_engine.get_databasetablecolumnmeta(auth, databasename, tablename, columnname)
data_ingestion_engine.get_ingestmetas(auth)
data_ingestion_engine.get_ingestmeta(auth, ingest_name)
data_ingestion_engine.save_ingestion(auth, ingestion_json)
prediction_engine.get_featurestores(auth)
prediction_engine.get_featurestore(auth, frame_id)
worker_h2o.file_to_frame(auth, file_name, first_row_column_names, separator)
prediction_engine.get_uframe(auth, frame_id)
worker_h2o.featurestore_to_frame(auth, userframe)
prediction_engine.save_prediction(auth, prediction)
worker_h2o.train_model(auth, modelid, modeltype, params)
worker_h2o.get_train_model(auth, modelid, modeltype)
prediction_engine.test_model(auth, value)
data_management_engine.delete_documents(auth, doc_json)
###Output
_____no_output_____ |
FluABM_expts/flu-env-demo-[working].ipynb | ###Markdown
Preamble: Libs + signal def
###Code
import itertools, importlib, sys, warnings, os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
# ML libs
import tensorflow as tf
print("Tensorflow version:", tf.__version__)
warnings.filterwarnings("ignore")
sys.path.append('./embodied_arch')
import embodied as emg
import embodied_indep as emi
%matplotlib inline
plt.rcParams['figure.figsize'] = (18,7)
log_path = './log/flu'
#tensorboard --logdir=flugame_worker_1:'./log/train_rf_flugame_worker'
importlib.reload(emg)
importlib.reload(emi)
#sys.path.append('./flumodel_python')
## suppress annoy verbose tf msgs
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # '3' to block all including error msgs
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
###Output
Tensorflow version: 1.15.0
WARNING:tensorflow:
The TensorFlow contrib module will not be included in TensorFlow 2.0.
For more information, please see:
* https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md
* https://github.com/tensorflow/addons
* https://github.com/tensorflow/io (for I/O related ops)
If you depend on functionality not listed there, please file an issue.
###Markdown
Default Behavioral Model
###Code
import flumodel_python.flu_env_basic as FABM
importlib.reload(FABM)
# Verify class signature ##??FABM
?FABM.Flu_ABM
# cd flumodel_python/
flu = FABM.Flu_ABM(model_path="./flumodel_python/")
flu.popn.head(10)
infected=(flu.stepAll())
###Output
_____no_output_____
###Markdown
RL ABM Env
###Code
import flumodel_python.flu_env as Fenv
importlib.reload(Fenv)
?Fenv.Flu_env
###Output
_____no_output_____
###Markdown
Single Agent Setup
###Code
importlib.reload(emg)
tf.reset_default_graph()
flu_env = Fenv.Flu_env(exo_idx=1,
model_path="./flumodel_python/")
# flu_env.popn.columns
flu_env.rl_idx
flu_env.popn[Fenv._state_lbls_].iloc[flu_env.rl_idx]
print(Fenv._state_lbls_)
flu_env.popn[Fenv._state_lbls_].iloc[[1,2,3,4]].values
num_episodes, n_epochs, max_len = (5, 4, 5)
flurf = emg.EmbodiedAgentRF(
name="fluRF",
env_=flu_env,
alpha=1.,
max_episode_length=max_len
)
# flurf = emg.EmbodiedAgentAC(
# name="fluAC",
# env_=flu_env,
# max_episode_length=max_len, latentDim=4,
# alpha_p=1., alpha_v=1e-2,
# actorNN=actor, sensorium=sensor, valueNN=value
# )
print(flurf, flurf.s_size, flurf.a_size)
print(flu_env.state_space_size, flu_env.action_space_size)
sess = tf.InteractiveSession()
flurf.init_graph(sess) # note tboard log dir
## Verify step + play set up
state = flurf.env.reset()
print(state, flurf.act(state, sess))
flurf.env.step(flurf.act(state, sess))
flu_env.popn[Fenv._state_lbls_].iloc[flu_env.rl_idx]
flurf.play(sess)
print(flurf.last_total_return)
flu_env.popn[Fenv._state_lbls_].iloc[flu_env.rl_idx]
hist1 = flurf.work(sess, num_epochs=n_epochs, saver=saver)
hist1
###Output
_____no_output_____
###Markdown
Multiple Agent Setup
###Code
import embodied_indep as emi
from embodied_misc import ActionPolicyNetwork, SensoriumNetworkTemplate, ValueNetwork
# importlib.reload(Fenv)
importlib.reload(emi)
###Output
_____no_output_____
###Markdown
MARL Env Setup
###Code
exos = [1,2,3,10] # (np.random.sample(9223) < 0.3)
# exos = 3707
importlib.reload(Fenv);
# importlib.reload(emg);
importlib.reload(emi);
tf.reset_default_graph()
flu_menv = Fenv.Flu_env(
exo_idx=exos,
model_path="./flumodel_python/"
)
print(flu_menv.actor_count)
print(flu_menv.state_space_size, flu_menv.action_space_size)
?flu.stepAll
infected=np.array(flu.stepAll(), dtype=float)
print(len(infected), sum(infected))
infected[exos]
num_episodes, max_len, n_epochs, evry = (100, 35, 1501, 300)
for _ in range(150):
infxd=np.array(flu.stepAll(), dtype=float)
infxd = []
for k in range(num_episodes):
infxd.append(np.array(flu.stepAll(), dtype=float))
infected = np.array(infxd)
print(infected.shape, np.mean(infected, axis=0))
len(np.mean(infected, axis=1))
plt.hist(np.mean(infected, axis=0))
exos
np.mean(infected[exos], axis=1)
infected[:,exos].shape
###Output
_____no_output_____
###Markdown
MARL Learner Demo
###Code
actor = lambda s: ActionPolicyNetwork(s, hSeq=(8,), gamma_reg=1e-1)
value = lambda s: ValueNetwork(s, hSeq=(8,), gamma_reg=1.)
sensor = lambda st, out_dim: SensoriumNetworkTemplate(st, hSeq=(16,8,8), out_dim=out_dim, gamma_reg=5.)
# num_episodes, n_epochs, max_len = (100, 1501, 15)
# num_episodes, n_epochs, max_len = (10, 400, 25)
num_episodes, n_epochs, max_len = (5, 4, 5)
flumrf = emi.EmbodiedAgent_IRFB(name="flu_mRFB",
env_=flu_menv,
alpha_p=5.0e2, alpha_v=50.,
max_episode_length=max_len,
latentDim=8, _every_=300,
actorNN=actor, valueNN=value, sensorium=sensor
)
print(flumrf.actor_names)
(flumrf.a_size, flumrf.env.action_space_size)
sess = tf.InteractiveSession()
flumrf.init_graph(sess) # note tboard log dir
saver = tf.train.Saver(max_to_keep=1)
hist = flumrf.work(sess, num_epochs=n_epochs, saver=saver)
hist
###Output
_____no_output_____ |
notebooks/random/image-generation.ipynb | ###Markdown
Image generation
###Code
import time
from collections import OrderedDict
import numpy as np
import torch
import torchvision
from matplotlib import pyplot as plt
from pandas import DataFrame
data = torchvision.datasets.MNIST(
root='../data/mnist', train=True, download=True)
x = np.array(data.data)
y = np.array(data.targets)
data = torchvision.datasets.MNIST(
root='../data/mnist', train=False, download=True)
x = np.concatenate((x, np.array(data.data)))
y = np.concatenate((y, np.array(data.targets)))
plt.figure(figsize=(16, 16))
for i in range(8**2):
plt.subplot(8, 8, i + 1)
plt.imshow(x[i], cmap=plt.cm.gray)
plt.xticks([])
plt.yticks([])
plt.show()
def plot_history(history):
plt.figure(figsize=(16, 5))
plt.subplot(1, 2, 1)
plt.plot(history['epoch'], history['d_loss'], label='discriminator loss')
plt.plot(history['epoch'], history['g_loss'], label='generator loss')
plt.title('loss during training')
plt.legend()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.subplot(1, 2, 2)
plt.plot(history['epoch'], history['d_acc'], label='discriminator accuracy')
plt.title('accuracy during training')
plt.legend()
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
class BaseGAN:
def __init__(self):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.loss = torch.nn.functional.binary_cross_entropy
self.g = None
self.d = None
def fit(self, x, epochs=100, batch_size=64):
self.g.to(self.device)
self.d.to(self.device)
data_loader = self._build_loader_x(x, batch_size=batch_size)
history = {'epoch': [], 'g_loss': [], 'd_loss': [], 'd_acc': []}
for epoch in range(1, epochs + 1):
self.g.train()
self.d.train()
start_time = time.time()
g_loss, d_loss, d_acc = 0, 0, 0
for real_imgs, in data_loader:
real = torch.Tensor(real_imgs.size(0), 1).fill_(0.9).to(self.device)
fake = torch.Tensor(real_imgs.size(0), 1).fill_(0.0).to(self.device)
real_imgs = real_imgs.to(self.device)
fake_imgs = self.g(torch.Tensor(
np.random.normal(0, 1, (real_imgs.size(0), 100))).to(self.device))
# Train discriminator
self.d.optimizer.zero_grad()
real_out = self.d(real_imgs)
fake_out = self.d(fake_imgs.detach())
d_loss_ = (self.loss(real_out, real) + self.loss(fake_out, fake)) / 2
d_loss_.backward()
self.d.optimizer.step()
# Train generator
self.g.optimizer.zero_grad()
g_loss_ = self.loss(self.d(fake_imgs), real)
g_loss_.backward()
self.g.optimizer.step()
# Calculate metrics for batch
g_loss += float(g_loss_) * len(real_imgs)
d_loss += float(d_loss_) * len(real_imgs)
d_acc += int((real_out >= 0.5).sum()) + int((fake_out < 0.5).sum())
# Calculate metrics for epoch
g_loss /= len(data_loader.dataset)
d_loss /= len(data_loader.dataset)
d_acc /= 2 * len(data_loader.dataset)
duration = time.time() - start_time
# Save training history
history['epoch'].append(epoch)
history['g_loss'].append(g_loss)
history['d_loss'].append(d_loss)
history['d_acc'].append(d_acc)
print(
f'[{epoch:{len(str(epochs))}}/{epochs}] {duration:.1f}s'
f' - g_loss: {g_loss:.4f} - d_loss: {d_loss:.4f} - d_acc: {d_acc:.4f}')
if epoch == 1 or epoch % 20 == 0 or epoch == epochs:
fake_imgs = self.g(torch.Tensor(
np.random.normal(0, 1, (16, 100))).to(self.device)).detach().cpu()
plt.figure(figsize=(16, 4))
for i in range(16):
plt.subplot(2, 8, i + 1)
plt.imshow(fake_imgs[i], cmap=plt.cm.gray)
plt.xticks([])
plt.yticks([])
plt.show()
return history
@staticmethod
def _build_loader_x(x, batch_size):
return torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(torch.FloatTensor(x) / 127.5 - 1.),
batch_size=batch_size, shuffle=True)
class GAN(BaseGAN):
def __init__(self):
super().__init__()
class Generator(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers = torch.nn.ModuleList([
torch.nn.Linear(100, 256),
torch.nn.LeakyReLU(0.2),
torch.nn.BatchNorm1d(256),
torch.nn.Linear(256, 512),
torch.nn.LeakyReLU(0.2),
torch.nn.BatchNorm1d(512),
torch.nn.Linear(512, 1024),
torch.nn.LeakyReLU(0.2),
torch.nn.BatchNorm1d(1024),
torch.nn.Linear(1024, 784),
torch.nn.Tanh(),
])
self.optimizer = torch.optim.Adam(
self.parameters(), lr=0.0002, betas=(0.5, 0.999))
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = x.view(x.size(0), 28, 28)
return x
class Discriminator(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers = torch.nn.ModuleList([
torch.nn.Dropout(0.3),
torch.nn.Linear(784, 512),
torch.nn.LeakyReLU(0.2),
torch.nn.Dropout(0.3),
torch.nn.Linear(512, 256),
torch.nn.LeakyReLU(0.2),
torch.nn.Dropout(0.3),
torch.nn.Linear(256, 1),
torch.nn.Sigmoid(),
])
self.optimizer = torch.optim.Adam(
self.parameters(), lr=0.0002, betas=(0.5, 0.999))
def forward(self, x):
x = x.view(x.size(0), -1)
for layer in self.layers:
x = layer(x)
return x
self.g = Generator()
self.d = Discriminator()
model = GAN()
history = model.fit(x)
plot_history(history)
class DCGAN(BaseGAN):
def __init__(self):
super().__init__()
class Generator(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers = torch.nn.ModuleList([
torch.nn.ConvTranspose2d(100, 512, 4),
torch.nn.BatchNorm2d(512),
torch.nn.ReLU(),
torch.nn.ConvTranspose2d(512, 256, 4, stride=2, padding=1),
torch.nn.BatchNorm2d(256),
torch.nn.ReLU(),
torch.nn.ConvTranspose2d(256, 128, 4, stride=2, padding=2),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU(),
torch.nn.ConvTranspose2d(128, 1, 4, stride=2, padding=1),
torch.nn.Tanh(),
])
self.optimizer = torch.optim.Adam(
self.parameters(), lr=0.0002, betas=(0.5, 0.999))
def forward(self, x):
x = x.view(x.size(0), x.size(1), 1, 1)
for layer in self.layers:
x = layer(x)
x = x.view(x.size(0), 28, 28)
return x
class Discriminator(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers = torch.nn.ModuleList([
torch.nn.Dropout2d(0.2),
torch.nn.Conv2d(1, 16, 4, stride=2, padding=1),
torch.nn.LeakyReLU(0.2),
torch.nn.Dropout2d(0.2),
torch.nn.Conv2d(16, 32, 4, stride=2, padding=2),
torch.nn.BatchNorm2d(32),
torch.nn.LeakyReLU(0.2),
torch.nn.Dropout2d(0.2),
torch.nn.Conv2d(32, 64, 4, stride=2, padding=1),
torch.nn.BatchNorm2d(64),
torch.nn.LeakyReLU(0.2),
torch.nn.Dropout2d(0.2),
torch.nn.Conv2d(64, 1, 4),
torch.nn.Sigmoid(),
])
self.optimizer = torch.optim.Adam(
self.parameters(), lr=0.0002, betas=(0.5, 0.999))
def forward(self, x):
x = x.view(x.size(0), 1, 28, 28)
for layer in self.layers:
x = layer(x)
x = x.view(x.size(0), -1)
return x
self.g = Generator()
self.d = Discriminator()
model = DCGAN()
history = model.fit(x)
plot_history(history)
###Output
_____no_output_____ |
stats_101/02_data_description.ipynb | ###Markdown
Table of ContentsMeasures of central tendancyMeasuring dispersion - Inter Quartile RangesHistogram of sales dataMeasures of dispersion - Standard DeviationCoefficient of variabilityDispersion based on percentilesCorrelation coefficientWhen is big data needed? Data Description
###Code
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
sales_data = pd.read_csv('datasets_csv/CH03/ex3-14.txt')
sales_data.columns = ['Year', 'Month', 'Sales'] #strip out the double quotes
sales_data.head()
###Output
_____no_output_____
###Markdown
Measures of central tendancy
###Code
print('Mean ' + str(sales_data.Sales.mean()))
print('Median ' + str(sales_data.Sales.median()))
print('Mode ' + str(sales_data.Sales.mode()))
###Output
Mean 118.7
Median 116.95
Mode 0 117.5
dtype: float64
###Markdown
Measuring dispersion - Inter Quartile Ranges
###Code
quartiles = sales_data.Sales.quantile(q=[0.25, 0.5, 0.75])
quartiles
IQR = quartiles[0.75] - quartiles[0.25]
IQR
###Output
_____no_output_____
###Markdown
Histogram of sales data
###Code
#create a subplot to show box plot and histogram next to each other
fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(8,10))
#plot a histogram using Pandas.
hax = sales_data.Sales.hist(ax = axs[0], bins=15, grid=False)
hax.set_title('Histogram of Sales data')
#get the axis bounds
hax_bounds = hax.axis()
#plot the mean in black
mean = sales_data.Sales.mean()
hax.vlines(mean, hax_bounds[2], hax_bounds[3], label='Mean = ' + str(mean))
#plot the median in yellow
median= sales_data.Sales.median()
hax.vlines(median, hax_bounds[2], hax_bounds[3], label='Median = ' + str(median), colors='yellow')
#plot the mode in red
mode= sales_data.Sales.mode()[0]
hax.vlines(mode, hax_bounds[2], hax_bounds[3], label='Mode = ' + str(mode), colors='red')
#Get the standard deviation
sd = sales_data.Sales.std()
#get mean +- 1SD lines
m1sd = mean + 1*sd
m1negsd = mean - 1*sd
hax.vlines(m1sd, hax_bounds[2], hax_bounds[3], label='Mean + 1SD = ' + str(m1sd), colors='cyan')
hax.vlines(m1negsd, hax_bounds[2], hax_bounds[3], label='SD = ' + str(sd), colors='cyan')
hax.legend()
############## plot 2
#now plot the box plot
bax = sales_data.Sales.plot(kind='box', ax = axs[1], title = 'Boxplot of Sales data', vert=False)
#vert False to make it horizontal
#Get the quartiles
quartiles = sales_data.Sales.quantile([0.25, 0.5, 0.75])
bax.text(quartiles[0.25], 0.75, r'$Q_{0.25}= ' + str(quartiles[0.25])+'$')
bax.text(quartiles[0.75], 0.75, r'$Q_{0.75}= ' + str(quartiles[0.75])+'$')
#Calculate the IQR
iqr = quartiles[0.75] - quartiles[0.25]
bax.text(x=150, y=1.25, s='IQR = ' + str(iqr))
#Get the Left inner quartile
liq = quartiles[0.25] - 1.5*iqr
bax.text(x=liq, y=0.85, s=str(liq))
#Get the right inner quartile
riq = quartiles[0.75] + 1.5*iqr
bax.text(x=riq, y=0.85, s=str(riq))
###Output
_____no_output_____ |
Copy_of_LS_DS_22str3_assignment.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 2, Module 3*--- Before you start: Today is the day you should submit the dataset for your Unit 2 Build Week project. You can review the guidelines and make your submission in the Build Week course for your cohort on Canvas.
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/main/data/'
!pip install category_encoders==2.*
!pip install pandas-profiling==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
###Output
_____no_output_____
###Markdown
Module Project: Hyperparameter TuningThis sprint, the module projects will focus on creating and improving a model for the Tanazania Water Pump dataset. Your goal is to create a model to predict whether a water pump is functional, non-functional, or needs repair.Dataset source: [DrivenData.org](https://www.drivendata.org/competitions/7/pump-it-up-data-mining-the-water-table/). DirectionsThe tasks for this project are as follows:- **Task 1:** Use `wrangle` function to import training and test data.- **Task 2:** Split training data into feature matrix `X` and target vector `y`.- **Task 3:** Establish the baseline accuracy score for your dataset.- **Task 4:** Build `clf_dt`.- **Task 5:** Build `clf_rf`.- **Task 6:** Evaluate classifiers using k-fold cross-validation.- **Task 7:** Tune hyperparameters for best performing classifier.- **Task 8:** Print out best score and params for model.- **Task 9:** Create `submission.csv` and upload to Kaggle.You should limit yourself to the following libraries for this project:- `category_encoders`- `matplotlib`- `pandas`- `pandas-profiling`- `sklearn` I. Wrangle Data
###Code
import pandas as pd
import numpy as np
def wrangle(fm_path, tv_path=None):
if tv_path:
df = pd.merge(pd.read_csv(fm_path,
na_values=[0, -2.000000e-08]),
pd.read_csv(tv_path)).set_index('id')
else:
df = pd.read_csv(fm_path,
na_values=[0, -2.000000e-08],
index_col='id')
# Drop constant columns
df.drop(columns=['recorded_by'], inplace=True)
# Drop HCCCs
cutoff = 100
drop_cols = [col for col in df.select_dtypes('object').columns
if df[col].nunique() > cutoff]
df.drop(columns=drop_cols, inplace=True)
# Drop duplicate columns
dupe_cols = [col for col in df.head(15).T.duplicated().index
if df.head(15).T.duplicated()[col]]
df.drop(columns=dupe_cols, inplace=True)
return df
###Output
_____no_output_____
###Markdown
**Task 1:** Using the above `wrangle` function to read `train_features.csv` and `train_labels.csv` into the DataFrame `df`, and `test_features.csv` into the DataFrame `X_test`.
###Code
from google.colab import files
upload =files.upload()
df = wrangle('train_features.csv', 'train_labels.csv')
X_test = wrangle('test_features.csv')
df.head()
###Output
_____no_output_____
###Markdown
II. Split Data**Task 2:** Split your DataFrame `df` into a feature matrix `X` and the target vector `y`. You want to predict `'status_group'`.**Note:** You won't need to do a train-test split because you'll use cross-validation instead.
###Code
target = 'status_group'
X = df.drop(columns=[target])
y = df[target]
X_train = X
y_train = y
X.shape, y.shape, X_train.shape, y_train.shape
###Output
_____no_output_____
###Markdown
III. Establish Baseline**Task 3:** Since this is a **classification** problem, you should establish a baseline accuracy score. Figure out what is the majority class in `y_train` and what percentage of your training observations it represents.
###Code
from sklearn.metrics import accuracy_score
y_train.value_counts(normalize=True)
majority_class = y_train.mode()[0]
y_pred = [majority_class] * len(y_train)
baseline_acc = accuracy_score(y_train, y_pred)
print('Baseline Accuracy Score:', baseline_acc)
###Output
Baseline Accuracy Score: 0.5429828068772491
###Markdown
IV. Build Models**Task 4:** Build a `Pipeline` named `clf_dt`. Your `Pipeline` should include:- an `OrdinalEncoder` transformer for categorical features.- a `SimpleImputer` transformer fot missing values.- a `DecisionTreeClassifier` Predictor.**Note:** Do not train `clf_dt`. You'll do that in a subsequent task.
###Code
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import make_pipeline
clf_dt = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
DecisionTreeClassifier()
)
###Output
_____no_output_____
###Markdown
**Task 5:** Build a `Pipeline` named `clf_rf`. Your `Pipeline` should include:- an `OrdinalEncoder` transformer for categorical features.- a `SimpleImputer` transformer fot missing values.- a `RandomForestClassifier` predictor.**Note:** Do not train `clf_rf`. You'll do that in a subsequent task.
###Code
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
clf_rf = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=42)
)
###Output
_____no_output_____
###Markdown
V. Check Metrics**Task 6:** Evaluate the performance of both of your classifiers using k-fold cross-validation.
###Code
from sklearn.model_selection import cross_val_score
k = 10
cv_scores_dt = cross_val_score(clf_dt, X_train, y_train, cv=k)
cv_scores_rf = cross_val_score(clf_rf, X_train, y_train, cv=k)
print('CV scores DecisionTreeClassifier')
print(cv_scores_dt)
print('Mean CV accuracy score:', cv_scores_dt.mean())
print('STD CV accuracy score:', cv_scores_dt.std())
print('CV score RandomForestClassifier')
print(cv_scores_rf)
print('Mean CV accuracy score:', cv_scores_rf.mean()),
print('STD CV accuracy score:', cv_scores_rf.std())
###Output
CV score RandomForestClassifier
[0.79650673 0.80239899 0.80744949 0.80155724 0.80092593 0.80260943
0.80029461 0.80639731 0.80260943 0.80067354]
Mean CV accuracy score: 0.8021422700661281
STD CV accuracy score: 0.002931319956870987
###Markdown
VI. Tune Model**Task 7:** Choose the best performing of your two models and tune its hyperparameters using a `RandomizedSearchCV` named `model`. Make sure that you include cross-validation and that `n_iter` is set to at least `25`.**Note:** If you're not sure which hyperparameters to tune, check the notes from today's guided project and the `sklearn` documentation.
###Code
from sklearn.preprocessing import LabelEncoder
from scipy.stats import randint, uniform
from sklearn.model_selection import RandomizedSearchCV
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
RandomForestClassifier()
)
param_distributions = {
'simpleimputer__strategy': ['mean', 'median'],
'randomforestclassifier__n_estimators': randint(50, 500),
'randomforestclassifier__max_depth': [5, 10, 15, 20, None],
'randomforestclassifier__max_features': uniform(0, 1),
}
model = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=25,
cv=3,
scoring='accuracy',
verbose=10,
return_train_score=True,
n_jobs=-1
)
model.fit(X_train, y_train);
###Output
Fitting 3 folds for each of 25 candidates, totalling 75 fits
###Markdown
**Task 8:** Print out the best score and best params for `model`.
###Code
best_score = model.best_score_
best_params = model.best_params_
print('Best score for `model`:', best_score)
print('Best params for `model`:', best_params)
###Output
Best score for `model`: 0.8010690671472602
Best params for `model`: {'randomforestclassifier__max_depth': 20, 'randomforestclassifier__max_features': 0.28524445205230176, 'randomforestclassifier__n_estimators': 277, 'simpleimputer__strategy': 'mean'}
###Markdown
Communicate Results y_**Task 9:** Create a DataFrame `submission` whose index is the same as `X_test` and that has one column `'status_group'` with your predictions. Next, save this DataFrame as a CSV file and upload your submissions to our competition site. **Note:** Check the `sample_submission.csv` file on the competition website to make sure your submissions follows the same formatting.
###Code
#X_test.drop('waterpoint_type_group', axis=1, inplace=True)
y_pred = model.predict(X_test)
submission = pd.DataFrame(y_pred)
submission.index = X_test.index
submission.columns = ['status_group']
submission.to_csv('submissions.csv')
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____ |
BC1_customer_segmentation/past notebooks/BC1_Renan.ipynb | ###Markdown
Business Understanding: https://github.com/joaopfonseca/business-cases-21-22/tree/main/BC1_customer_segmentation Data Understanding: Data Exploration- view the original dataset- outliers- null values
###Code
# read the dataset
df = pd.read_excel('WonderfulWinesoftheWorld.xlsx')
df.Custid.fillna(11001.0, inplace=True)
df.set_index('Custid', inplace = True)
# create a copy of the original df for preprocessing
df_backup = df.copy()
# first rows - view the structure
df.tail()
df.info()
# the dataset has null values - how to correct them?
df.isna().sum()
#Checking for duplicates:
duplicate=df.duplicated()
print(duplicate.sum())
df[duplicate]
df.describe().T
# plot outliers
%matplotlib inline
num_vars=df.select_dtypes(include=np.number).set_index(df.index)
# Prepare figure. Create individual axes where each box plot will be placed
fig, axes = plt.subplots(3, ceil(num_vars.shape[1] / 3), figsize=(20, 11))
# Plot data
# Iterate across axes objects and associate each box plot (hint: use the ax argument):
for ax, feat in zip(axes.flatten(), num_vars): # Notice the zip() function and flatten() method
sns.histplot(x=num_vars[feat], ax=ax)
# Layout
# Add a centered title to the figure:
title = "Numeric Variables' histogram"
plt.suptitle(title)
plt.show()
###Output
_____no_output_____
###Markdown
Outliers
###Code
# plot outliers
%matplotlib inline
num_vars=df.select_dtypes(include=np.number).set_index(df.index)
# Prepare figure. Create individual axes where each box plot will be placed
fig, axes = plt.subplots(3, ceil(num_vars.shape[1] / 3), figsize=(20, 11))
# Plot data
# Iterate across axes objects and associate each box plot (hint: use the ax argument):
for ax, feat in zip(axes.flatten(), num_vars): # Notice the zip() function and flatten() method
sns.boxplot(x=num_vars[feat], ax=ax)
# Layout
# Add a centered title to the figure:
title = "Numeric Variables' Box Plots"
plt.suptitle(title)
plt.show()
###Output
_____no_output_____
###Markdown
Data Preparation: Dealing with Outliers: DBscan
###Code
# Robust Scaler - Scale features using statistics that are robust to outliers.
robust = RobustScaler().fit(df)
df_norm = robust.transform(df)
# Convert the array to a pandas dataframe
df_norm = pd.DataFrame(df_norm, columns=df.columns).set_index(df.index)
df_norm.describe().T
# K-distance graph to find out the right eps value
neigh = NearestNeighbors(n_neighbors=20)
neigh.fit(df_norm)
distances, _ = neigh.kneighbors(df_norm)
distances = np.sort(distances[:, -1])
plt.plot(distances)
plt.show()
dbscan = DBSCAN(eps=2.7, min_samples=20, n_jobs=6)
dbscan_labels = dbscan.fit_predict(df_norm)
Counter(dbscan_labels)
# Save the newly detected outliers (they will be classified later based on the final clusters)
df_out = df[dbscan_labels==-1].copy()
# New df without outliers
df = df[dbscan_labels!=-1]\
.copy()
# New df without outliers
df_norm = df_norm[dbscan_labels!=-1]\
.copy()
df=df.reset_index(drop=True)
df_norm=df_norm.reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Feature selection: Correlation matrix
###Code
# Prepare figure
fig = plt.figure(figsize=(10, 8))
# Obtain correlation matrix. Round the values to 2 decimal cases. Use the DataFrame corr() and round() method.
corr = np.round(df.corr(method="spearman"), decimals=2)
# Build annotation matrix (values above |0.5| will appear annotated in the plot)
mask_annot = np.absolute(corr.values) >= 0.5
annot = np.where(mask_annot, corr.values, np.full(corr.shape,""))
# Plot heatmap of the correlation matrix
sns.heatmap(data=corr, annot=annot, cmap=sns.diverging_palette(220, 10, as_cmap=True),
fmt='s', vmin=-1, vmax=1, center=0, square=True, linewidths=.5)
# Layout
fig.subplots_adjust(top=0.95)
fig.suptitle("Pearson Correlation Matrix", fontsize=20)
plt.show()
# age or income
#df.drop('Age',inplace=True,axis=1)
# do not use monetary and recency
#list of possible variables:
#LTV, WebVisit, frequency
#
## Pairwise Relationship of Numerical Variables
#sns.set()
#
## Setting pairplot
#sns.pairplot(df, diag_kind="hist", corner=True)
#
## Layout
#plt.subplots_adjust(top=0.95)
#plt.suptitle("Pairwise Relationship of Numerical Variables", fontsize=20)
#
#plt.show()
#there were no duplicated observations.
df.duplicated().unique()
###Output
_____no_output_____
###Markdown
Feature Engineering:
###Code
df.columns
#years as customer
df["YearsAsCustomer"]=round(df.Dayswus/365,1)
#income
df["logIncome"]=df["Income"].map(lambda x : 1 if x<=1 else x)
df["logIncome"]=np.log(df["logIncome"])
#LTV
df["logLTV"]=df["LTV"].map(lambda x : 1 if x<=1 else x)
df["logLTV"]=np.log(df["logLTV"])
###Output
_____no_output_____
###Markdown
PCA
###Code
# Robust Scaler - Scale features using statistics that are robust to outliers.
robust = RobustScaler().fit(df)
df_norm = robust.transform(df)
# Convert the array to a pandas dataframe
df_norm = pd.DataFrame(df_norm, columns=df.columns).set_index(df.index)
df_pca = df_norm[['Dayswus', 'Age', 'Edu', 'Income', 'Freq', 'Recency', 'Monetary', 'LTV',
'Perdeal','WebPurchase', 'WebVisit', 'YearsAsCustomer', 'logIncome',
'logLTV']].copy()
pca = PCA()
pca_feat = pca.fit_transform(df_pca)
# Output PCA table
pd.DataFrame(
{"Eigenvalue": pca.explained_variance_,
"Difference": np.insert(np.diff(pca.explained_variance_), 0, 0),
"Proportion": pca.explained_variance_ratio_,
"Cumulative": np.cumsum(pca.explained_variance_ratio_)},
index=range(1, pca.n_components_ + 1)
).round(4)
# figure and axes
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
# draw plots
ax1.plot(pca.explained_variance_, marker=".", markersize=12)
ax2.plot(pca.explained_variance_ratio_, marker=".", markersize=12, label="Proportion")
ax2.plot(np.cumsum(pca.explained_variance_ratio_), marker=".", markersize=12, linestyle="--", label="Cumulative")
# customizations
ax2.legend()
ax1.set_title("Scree Plot", fontsize=14)
ax2.set_title("Variance Explained", fontsize=14)
ax1.set_ylabel("Eigenvalue")
ax2.set_ylabel("Proportion")
ax1.set_xlabel("Components")
ax2.set_xlabel("Components")
ax1.set_xticks(range(0, pca.n_components_, 2))
ax1.set_xticklabels(range(1, pca.n_components_ + 1, 2))
ax2.set_xticks(range(0, pca.n_components_, 2))
ax2.set_xticklabels(range(1, pca.n_components_ + 1, 2))
plt.show()
###Output
_____no_output_____
###Markdown
- the first two Coponents explain 70% of the variance- the first four Components explain 90% of the variance
###Code
# testing PCA with the 4 principal components
pca = PCA(n_components=4)
pca_feat = pca.fit_transform(df_pca)
pca_feat_names = [f"PC{i}" for i in range(pca.n_components_)]
pca_df = pd.DataFrame(pca_feat, index=df_pca.index, columns=pca_feat_names)
pca_df
# Reassigning df to contain pca variables
df_pca = pd.concat([df_pca, pca_df], axis=1)
df_pca.head()
def _color_red_or_green(val):
if val < -0.45:
color = 'background-color: red'
elif val > 0.45:
color = 'background-color: green'
else:
color = ''
return color
# Interpreting each Principal Component
loadings = df_pca.corr().loc[['Dayswus', 'Age', 'Edu', 'Income', 'Freq', 'Recency', 'Monetary', 'LTV',
'Perdeal','WebPurchase', 'WebVisit', 'YearsAsCustomer', 'logIncome',
'logLTV'], pca_feat_names]
loadings.style.applymap(_color_red_or_green)
###Output
_____no_output_____
###Markdown
PCA0 is almost exlusively the combination of income/frequency/age/LTV, which all seem to have lots of overlap PCA1 is Recency, meaning we should probably include it in the clustering PCA2 is years as customer (40% webvisit) PCA3 is educationthe rest are useless so given the PCA analysis and the correlation analysis we will try clustering with a combination of the following: - LTV/logLTV- per deal- recency - years as customer - education- webvisit
###Code
df.to_csv('cleaned_data.csv', index_label='Custid')
df_out.to_csv('outliers.csv', index_label='Custid')
###Output
_____no_output_____
###Markdown
Final data normalization:
###Code
# Robust Scaler - Scale features using statistics that are robust to outliers.
robust = RobustScaler().fit(df)
df_norm = robust.transform(df)
# Convert the array to a pandas dataframe
df_norm = pd.DataFrame(df_norm, columns=df.columns).set_index(df.index)
###Output
_____no_output_____
###Markdown
Modeling:
###Code
# Final data quality check:
##Variables included in clustering:
clust=['LTV', 'Perdeal', 'Recency', 'YearsAsCustomer', 'Edu', 'WebVisit']
# Prepare figure
fig = plt.figure(figsize=(10, 8))
# Obtain correlation matrix. Round the values to 2 decimal cases. Use the DataFrame corr() and round() method.
corr = np.round(df[clust].corr(method="spearman"), decimals=2)
# Build annotation matrix (values above |0.5| will appear annotated in the plot)
mask_annot = np.absolute(corr.values) >= 0.5
annot = np.where(mask_annot, corr.values, np.full(corr.shape,""))
# Plot heatmap of the correlation matrix
sns.heatmap(data=corr, annot=annot, cmap=sns.diverging_palette(220, 10, as_cmap=True),
fmt='s', vmin=-1, vmax=1, center=0, square=True, linewidths=.5)
# Layout
fig.subplots_adjust(top=0.95)
fig.suptitle("Pearson Correlation Matrix", fontsize=20)
plt.show()
# Keep either LTV or Perdeal
value=['LTV', 'Recency', 'YearsAsCustomer', 'Edu', 'WebVisit']
pref=['Dryred','Sweetred','Drywh','Sweetwh','Dessert','Exotic']
df_val = df_norm[value].copy()
df_pr = df_norm[pref].copy()
###Output
_____no_output_____
###Markdown
K-means: Defining the number of clusters: Value Features:
###Code
#range_clusters = range(1, 15)
#inertia = []
#for n_clus in range_clusters: # iterate over desired ncluster range
# kmclust = KMeans(n_clusters=n_clus, init='k-means++', n_init=15, random_state=1)
# kmclust.fit(df_val)
# inertia.append(kmclust.inertia_)
## The inertia plot
#plt.figure(figsize=(9,5))
#plt.plot(inertia)
#ax.set_xlim(1, 24)
#plt.ylabel("Inertia: SSw")
#plt.xlabel("Number of clusters")
#plt.title("Inertia plot over clusters", size=15)
#plt.show()
###Output
_____no_output_____
###Markdown
We do not have a very distinct elbow point here. The optimum value of k can be around 2–6 from the above plot as inertia continues to drop steeply at least till k=4. And since we didn’t get a direct answer, we can also use the silhouette method.
###Code
## Storing average silhouette metric
#avg_silhouette = []
#for nclus in range_clusters:
# # Skip nclus == 1
# if nclus == 1:
# continue
#
# # Create a figure
# fig = plt.figure(figsize=(13, 7))
#
# # Initialize the KMeans object with n_clusters value and a random generator
# # seed of 10 for reproducibility.
# kmclust = KMeans(n_clusters=nclus, init='k-means++', n_init=15, random_state=1)
# cluster_labels = kmclust.fit_predict(df_val)
#
# # The silhouette_score gives the average value for all the samples.
# # This gives a perspective into the density and separation of the formed clusters
# silhouette_avg = silhouette_score(df_val, cluster_labels)
# avg_silhouette.append(silhouette_avg)
# print(f"For n_clusters = {nclus}, the average silhouette_score is : {silhouette_avg}")
#
# # Compute the silhouette scores for each sample
# sample_silhouette_values = silhouette_samples(df_val, cluster_labels)
#
# y_lower = 10
# for i in range(nclus):
# # Aggregate the silhouette scores for samples belonging to cluster i, and sort them
# ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == i]
# ith_cluster_silhouette_values.sort()
#
# # Get y_upper to demarcate silhouette y range size
# size_cluster_i = ith_cluster_silhouette_values.shape[0]
# y_upper = y_lower + size_cluster_i
#
# # Filling the silhouette
# color = cm.nipy_spectral(float(i) / nclus)
# plt.fill_betweenx(np.arange(y_lower, y_upper),
# 0, ith_cluster_silhouette_values,
# facecolor=color, edgecolor=color, alpha=0.7)
#
# # Label the silhouette plots with their cluster numbers at the middle
# plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
#
# # Compute the new y_lower for next plot
# y_lower = y_upper + 10 # 10 for the 0 samples
#
# plt.title("The silhouette plot for the various clusters.")
# plt.xlabel("The silhouette coefficient values")
# plt.ylabel("Cluster label")
#
# # The vertical line for average silhouette score of all the values
# plt.axvline(x=silhouette_avg, color="red", linestyle="--")
#
# # The silhouette coefficient can range from -1, 1
# xmin, xmax = np.round(sample_silhouette_values.min() -0.1, 2), np.round(sample_silhouette_values.max() + 0.1, 2)
# plt.xlim([xmin, xmax])
#
# # The (nclus+1)*10 is for inserting blank space between silhouette
# # plots of individual clusters, to demarcate them clearly.
# plt.ylim([0, len(df_val) + (nclus + 1) * 10])
#
# plt.yticks([]) # Clear the yaxis labels / ticks
# plt.xticks(np.arange(xmin, xmax, 0.1))
## The average silhouette plot
## The inertia plot
#plt.figure(figsize=(9,5))
#plt.plot(avg_silhouette)
#plt.ylabel("Average silhouette")
#plt.xlabel("Number of clusters")
#plt.title("Average silhouette plot over clusters", size=15)
#plt.show()
###Output
_____no_output_____
###Markdown
Preference Features:
###Code
#range_clusters = range(1, 15)
#inertia = []
#for n_clus in range_clusters: # iterate over desired ncluster range
# kmclust = KMeans(n_clusters=n_clus, init='k-means++', n_init=15, random_state=1)
# kmclust.fit(df_pr)
# inertia.append(kmclust.inertia_)
## The inertia plot
#plt.figure(figsize=(9,5))
#plt.plot(inertia)
#ax.set_xlim(1, 24)
#plt.ylabel("Inertia: SSw")
#plt.xlabel("Number of clusters")
#plt.title("Inertia plot over clusters", size=15)
#plt.show()
## Storing average silhouette metric
#avg_silhouette = []
#for nclus in range_clusters:
# # Skip nclus == 1
# if nclus == 1:
# continue
#
# # Create a figure
# fig = plt.figure(figsize=(13, 7))
#
# # Initialize the KMeans object with n_clusters value and a random generator
# # seed of 10 for reproducibility.
# kmclust = KMeans(n_clusters=nclus, init='k-means++', n_init=15, random_state=1)
# cluster_labels = kmclust.fit_predict(df_pr)
#
# # The silhouette_score gives the average value for all the samples.
# # This gives a perspective into the density and separation of the formed clusters
# silhouette_avg = silhouette_score(df_pr, cluster_labels)
# avg_silhouette.append(silhouette_avg)
# print(f"For n_clusters = {nclus}, the average silhouette_score is : {silhouette_avg}")
#
# # Compute the silhouette scores for each sample
# sample_silhouette_values = silhouette_samples(df_pr, cluster_labels)
#
# y_lower = 10
# for i in range(nclus):
# # Aggregate the silhouette scores for samples belonging to cluster i, and sort them
# ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == i]
# ith_cluster_silhouette_values.sort()
#
# # Get y_upper to demarcate silhouette y range size
# size_cluster_i = ith_cluster_silhouette_values.shape[0]
# y_upper = y_lower + size_cluster_i
#
# # Filling the silhouette
# color = cm.nipy_spectral(float(i) / nclus)
# plt.fill_betweenx(np.arange(y_lower, y_upper),
# 0, ith_cluster_silhouette_values,
# facecolor=color, edgecolor=color, alpha=0.7)
#
# # Label the silhouette plots with their cluster numbers at the middle
# plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
#
# # Compute the new y_lower for next plot
# y_lower = y_upper + 10 # 10 for the 0 samples
#
# plt.title("The silhouette plot for the various clusters.")
# plt.xlabel("The silhouette coefficient values")
# plt.ylabel("Cluster label")
#
# # The vertical line for average silhouette score of all the values
# plt.axvline(x=silhouette_avg, color="red", linestyle="--")
#
# # The silhouette coefficient can range from -1, 1
# xmin, xmax = np.round(sample_silhouette_values.min() -0.1, 2), np.round(sample_silhouette_values.max() + 0.1, 2)
# plt.xlim([xmin, xmax])
#
# # The (nclus+1)*10 is for inserting blank space between silhouette
# # plots of individual clusters, to demarcate them clearly.
# plt.ylim([0, len(df_pr) + (nclus + 1) * 10])
#
# plt.yticks([]) # Clear the yaxis labels / ticks
# plt.xticks(np.arange(xmin, xmax, 0.1))
## The average silhouette plot
## The inertia plot
#plt.figure(figsize=(9,5))
#plt.plot(avg_silhouette)
#plt.ylabel("Average silhouette")
#plt.xlabel("Number of clusters")
#plt.title("Average silhouette plot over clusters", size=15)
#plt.show()
## Applying the right clustering (algorithm and number of clusters) for each perspective
#kmeans = KMeans(
# n_clusters=2,
# init='k-means++',
# n_init=20,
# random_state=42
#)
#value_labels = kmeans.fit_predict(df_val)
#
#
#preference_labels = kmeans.fit_predict(df_pr)
#
#
#df_norm['value_labels'] = value_labels
#df_norm['preference_labels'] = preference_labels
#clust=['LTV', 'Recency', 'YearsAsCustomer', 'Edu', 'WebVisit','Dryred','Sweetred','Drywh','Sweetwh','Dessert','Exotic']
## Centroids of the concatenated cluster labels
#df_centroids = df_norm.groupby(['value_labels', 'preference_labels'])\
# [clust].mean()
#df_centroids
###Output
_____no_output_____
###Markdown
Merge
###Code
## Using Hierarchical clustering to merge the concatenated cluster centroids
#linkage='ward'
#distance='euclidean'
#hclust = AgglomerativeClustering(
# linkage=linkage,
# affinity=distance,
# distance_threshold=0,
# n_clusters=None
#)
#hclust_labels = hclust.fit_predict(df_centroids)
## create the counts of samples under each node (number of points being merged)
#counts = np.zeros(hclust.children_.shape[0])
#n_samples = len(hclust.labels_)
#
## hclust.children_ contains the observation ids that are being merged together
## At the i-th iteration, children[i][0] and children[i][1] are merged to form node n_samples + i
#for i, merge in enumerate(hclust.children_):
# # track the number of observations in the current cluster being formed
# current_count = 0
# for child_idx in merge:
# if child_idx < n_samples:
# # If this is True, then we are merging an observation
# current_count += 1 # leaf node
# else:
# # Otherwise, we are merging a previously formed cluster
# current_count += counts[child_idx - n_samples]
# counts[i] = current_count
#
## the hclust.children_ is used to indicate the two points/clusters being merged (dendrogram's u-joins)
## the hclust.distances_ indicates the distance between the two points/clusters (height of the u-joins)
## the counts indicate the number of points being merged (dendrogram's x-axis)
#linkage_matrix = np.column_stack(
# [hclust.children_, hclust.distances_, counts]
#).astype(float)
#
## Plot the corresponding dendrogram
#sns.set()
#fig = plt.figure(figsize=(11,5))
## The Dendrogram parameters need to be tuned
#y_threshold = 3
#dendrogram(linkage_matrix, truncate_mode='level', labels=df_centroids.index, p=5, color_threshold=y_threshold, above_threshold_color='k')
#plt.hlines(y_threshold, 0, 1000, colors="r", linestyles="dashed")
#plt.title(f'Hierarchical Clustering - {linkage.title()}\'s Dendrogram', fontsize=21)
#plt.xlabel('Number of points in node (or index of point if no parenthesis)')
#plt.ylabel(f'Euclidean Distance', fontsize=13)
#plt.show()
## Re-running the Hierarchical clustering based on the correct number of clusters
#hclust = AgglomerativeClustering(
# linkage='ward',
# affinity='euclidean',
# n_clusters=3
#)
#hclust_labels = hclust.fit_predict(df_centroids)
#df_centroids['hclust_labels'] = hclust_labels
#
#df_centroids # centroid's cluster labels
## Mapper between concatenated clusters and hierarchical clusters
#cluster_mapper = df_centroids['hclust_labels'].to_dict()
#
#df_ = df_norm.copy()
#
## Mapping the hierarchical clusters on the centroids to the observations
#df_['merged_labels'] = df_.apply(
# lambda row: cluster_mapper[
# (row['value_labels'], row['preference_labels'])
# ], axis=1
#)
#
## Merged cluster centroids
#df_.groupby('merged_labels').mean()[clust]
## Setting df to have the final product, behavior and merged clusters
#df_final = df_.copy()
## using R²
#def get_ss(df):
# ss = np.sum(df.var() * (df.count() - 1))
# return ss # return sum of sum of squares of each df variable
#
#sst = get_ss(df_final[clust]) # get total sum of squares
#ssw_labels = df_final[clust + ["merged_labels"]].groupby(by='merged_labels').apply(get_ss) # compute ssw for each cluster labels
#ssb = sst - np.sum(ssw_labels)
#r2 = ssb / sst
#r2
#def cluster_profiles(df, label_columns, figsize, compar_titles=None):
# """
# Pass df with labels columns of one or multiple clustering labels.
# Then specify this label columns to perform the cluster profile according to them.
# """
# if compar_titles == None:
# compar_titles = [""]*len(label_columns)
#
# sns.set()
# fig, axes = plt.subplots(nrows=len(label_columns), ncols=2, figsize=figsize, squeeze=False)
# for ax, label, titl in zip(axes, label_columns, compar_titles):
# # Filtering df
# drop_cols = [i for i in label_columns if i!=label]
# dfax = df.drop(drop_cols, axis=1)
#
# # Getting the cluster centroids and counts
# centroids = dfax.groupby(by=label, as_index=False).mean()
# counts = dfax.groupby(by=label, as_index=False).count().iloc[:,[0,1]]
# counts.columns = [label, "counts"]
#
# # Setting Data
# pd.plotting.parallel_coordinates(centroids, label, color=sns.color_palette(), ax=ax[0])
# sns.barplot(x=label, y="counts", data=counts, ax=ax[1])
#
# #Setting Layout
# handles, _ = ax[0].get_legend_handles_labels()
# cluster_labels = ["Cluster {}".format(i) for i in range(len(handles))]
# ax[0].annotate(text=titl, xy=(0.95,1.1), xycoords='axes fraction', fontsize=13, fontweight = 'heavy')
# ax[0].legend(handles, cluster_labels) # Adaptable to number of clusters
# ax[0].axhline(color="black", linestyle="--")
# ax[0].set_title("Cluster Means - {} Clusters".format(len(handles)), fontsize=13)
# ax[0].set_xticklabels(ax[0].get_xticklabels(), rotation=-20)
# ax[1].set_xticklabels(cluster_labels)
# ax[1].set_xlabel("")
# ax[1].set_ylabel("Absolute Frequency")
# ax[1].set_title("Cluster Sizes - {} Clusters".format(len(handles)), fontsize=13)
#
# plt.subplots_adjust(hspace=0.4, top=0.90)
# plt.suptitle("Cluster Simple Profilling", fontsize=23)
# plt.show()
## Profilling each cluster (product, behavior, merged)
#cluster_profiles(
# df = df_final[clust + ['value_labels', 'preference_labels', 'merged_labels']],
# label_columns = ['value_labels', 'preference_labels', 'merged_labels'],
# figsize = (28, 13),
# compar_titles = ["Value Clustering", "Preference clustering","Merged clusters"]
#)
###Output
_____no_output_____
###Markdown
__RENAN__
###Code
df_val.drop(['Recency'], axis=1,inplace=True)
###Output
_____no_output_____
###Markdown
__Checking the possible number of clusters using Hierarchical Clustering__
###Code
## Setting the ideal clusters quantity for Value
# setting distance_threshold=0 and n_clusters=None ensures we compute the full tree
linkage = 'ward'
distance = 'euclidean'
hclust = AgglomerativeClustering(linkage=linkage, affinity=distance, distance_threshold=0, n_clusters=None)
hclust.fit_predict(df_val)
# create the counts of samples under each node (number of points being merged)
counts = np.zeros(hclust.children_.shape[0])
n_samples = len(hclust.labels_)
# hclust.children_ contains the observation ids that are being merged together
# At the i-th iteration, children[i][0] and children[i][1] are merged to form node n_samples + i
for i, merge in enumerate(hclust.children_):
# track the number of observations in the current cluster being formed
current_count = 0
for child_idx in merge:
if child_idx < n_samples:
# If this is True, then we are merging an observation
current_count += 1 # leaf node
else:
# Otherwise, we are merging a previously formed cluster
current_count += counts[child_idx - n_samples]
counts[i] = current_count
# the hclust.children_ is used to indicate the two points/clusters being merged (dendrogram's u-joins)
# the hclust.distances_ indicates the distance between the two points/clusters (height of the u-joins)
# the counts indicate the number of points being merged (dendrogram's x-axis)
linkage_matrix = np.column_stack(
[hclust.children_, hclust.distances_, counts]
).astype(float)
# Plot the corresponding dendrogram
sns.set()
fig = plt.figure(figsize=(11,5))
# The Dendrogram parameters need to be tuned
y_threshold = 50
dendrogram(linkage_matrix, truncate_mode='level', p=5, color_threshold=y_threshold, above_threshold_color='k')
plt.hlines(y_threshold, 0, 1000, colors="r", linestyles="dashed")
plt.title(f'Value Features - {linkage.title()}\'s Dendrogram', fontsize=21)
plt.xlabel('Number of points in node (or index of point if no parenthesis)')
plt.ylabel(f'{distance.title()} Distance', fontsize=13)
plt.show()
## Setting the ideal clusters quantity for Preferences
# setting distance_threshold=0 and n_clusters=None ensures we compute the full tree
linkage = 'ward'
distance = 'euclidean'
hclust = AgglomerativeClustering(linkage=linkage, affinity=distance, distance_threshold=0, n_clusters=None)
hclust.fit_predict(df_pr)
# create the counts of samples under each node (number of points being merged)
counts = np.zeros(hclust.children_.shape[0])
n_samples = len(hclust.labels_)
# hclust.children_ contains the observation ids that are being merged together
# At the i-th iteration, children[i][0] and children[i][1] are merged to form node n_samples + i
for i, merge in enumerate(hclust.children_):
# track the number of observations in the current cluster being formed
current_count = 0
for child_idx in merge:
if child_idx < n_samples:
# If this is True, then we are merging an observation
current_count += 1 # leaf node
else:
# Otherwise, we are merging a previously formed cluster
current_count += counts[child_idx - n_samples]
counts[i] = current_count
# the hclust.children_ is used to indicate the two points/clusters being merged (dendrogram's u-joins)
# the hclust.distances_ indicates the distance between the two points/clusters (height of the u-joins)
# the counts indicate the number of points being merged (dendrogram's x-axis)
linkage_matrix = np.column_stack(
[hclust.children_, hclust.distances_, counts]
).astype(float)
# Plot the corresponding dendrogram
sns.set()
fig = plt.figure(figsize=(11,5))
# The Dendrogram parameters need to be tuned
y_threshold = 90
dendrogram(linkage_matrix, truncate_mode='level', p=5, color_threshold=y_threshold, above_threshold_color='k')
plt.hlines(y_threshold, 0, 1000, colors="r", linestyles="dashed")
plt.title(f'Preference Features - {linkage.title()}\'s Dendrogram', fontsize=21)
plt.xlabel('Number of points in node (or index of point if no parenthesis)')
plt.ylabel(f'{distance.title()} Distance', fontsize=13)
plt.show()
###Output
_____no_output_____
###Markdown
4 clusters seem good for Value, and for preferences it's hard to choose, I'm going with 3 Performing HC for Value
###Code
#### Performing HC for VAL
hclust = AgglomerativeClustering(linkage='ward', affinity='euclidean', n_clusters=4)
hc_val_labels = hclust.fit_predict(df_val)
# Characterizing the clusters in the original df
df_hc_val = pd.concat((df[value], pd.Series(hc_val_labels, name='labels')), axis=1)
df_hc_val.groupby('labels').mean()
###Output
_____no_output_____
###Markdown
Performing HC for Preferences
###Code
# Performing HC for Preferences
hclust = AgglomerativeClustering(linkage='ward', affinity='euclidean', n_clusters=3)
hc_pr_labels = hclust.fit_predict(df_pr)
# Characterizing the clusters in the original df
df_hc_pr = pd.concat((df[pref], pd.Series(hc_pr_labels, name='labels')), axis=1)
df_hc_pr.groupby('labels').mean()
###Output
_____no_output_____
###Markdown
MERGE
###Code
df['value_labels'] = hc_val_labels
df['preference_labels'] = hc_pr_labels
clust=['LTV', 'YearsAsCustomer', 'Edu', 'WebVisit','Dryred','Sweetred','Drywh','Sweetwh','Dessert','Exotic']
# Centroids of the concatenated cluster labels
df_hc_centroids = df.groupby(['value_labels', 'preference_labels'])\
[clust].mean()
df_hc_centroids
# Using Hierarchical clustering to merge the concatenated cluster centroids
linkage='ward'
distance='euclidean'
hclust = AgglomerativeClustering(
linkage=linkage,
affinity=distance,
distance_threshold=0,
n_clusters=None
)
hclust_labels = hclust.fit_predict(df_hc_centroids)
# create the counts of samples under each node (number of points being merged)
counts = np.zeros(hclust.children_.shape[0])
n_samples = len(hclust.labels_)
# hclust.children_ contains the observation ids that are being merged together
# At the i-th iteration, children[i][0] and children[i][1] are merged to form node n_samples + i
for i, merge in enumerate(hclust.children_):
# track the number of observations in the current cluster being formed
current_count = 0
for child_idx in merge:
if child_idx < n_samples:
# If this is True, then we are merging an observation
current_count += 1 # leaf node
else:
# Otherwise, we are merging a previously formed cluster
current_count += counts[child_idx - n_samples]
counts[i] = current_count
# the hclust.children_ is used to indicate the two points/clusters being merged (dendrogram's u-joins)
# the hclust.distances_ indicates the distance between the two points/clusters (height of the u-joins)
# the counts indicate the number of points being merged (dendrogram's x-axis)
linkage_matrix = np.column_stack(
[hclust.children_, hclust.distances_, counts]
).astype(float)
# Plot the corresponding dendrogram
sns.set()
fig = plt.figure(figsize=(11,5))
# The Dendrogram parameters need to be tuned
y_threshold = 400
dendrogram(linkage_matrix, truncate_mode='level', labels=df_hc_centroids.index, p=5, color_threshold=y_threshold, above_threshold_color='k')
plt.hlines(y_threshold, 0, 1000, colors="r", linestyles="dashed")
plt.title(f'Hierarchical Clustering - {linkage.title()}\'s Dendrogram', fontsize=21)
plt.xlabel('Number of points in node (or index of point if no parenthesis)')
plt.ylabel(f'Euclidean Distance', fontsize=13)
plt.show()
# Re-running the Hierarchical clustering based on the correct number of clusters
hclust = AgglomerativeClustering(
linkage='ward',
affinity='euclidean',
n_clusters=2
)
hclust_labels = hclust.fit_predict(df_hc_centroids)
df_hc_centroids['hclust_labels'] = hclust_labels
df_hc_centroids # centroid's cluster labels
# Mapper between concatenated clusters and hierarchical clusters
cluster_mapper = df_hc_centroids['hclust_labels'].to_dict()
df_hc = df.copy()
# Mapping the hierarchical clusters on the centroids to the observations
df_hc['merged_labels'] = df_hc.apply(
lambda row: cluster_mapper[
(row['value_labels'], row['preference_labels'])
], axis=1
)
# Merged cluster centroids
df_hc.groupby('merged_labels').mean()[clust]
# Setting df to have the final product, behavior and merged clusters
df_hc_final = df_hc.copy()
# using R²
def get_ss(df):
ss = np.sum(df.var() * (df.count() - 1))
return ss # return sum of sum of squares of each df variable
sst = get_ss(df_hc[clust]) # get total sum of squares
ssw_labels = df_hc[clust + ["merged_labels"]].groupby(by='merged_labels').apply(get_ss) # compute ssw for each cluster labels
ssb = sst - np.sum(ssw_labels)
r2 = ssb / sst
r2
def cluster_profiles(df, label_columns, figsize, compar_titles=None):
"""
Pass df with labels columns of one or multiple clustering labels.
Then specify this label columns to perform the cluster profile according to them.
"""
if compar_titles == None:
compar_titles = [""]*len(label_columns)
sns.set()
fig, axes = plt.subplots(nrows=len(label_columns), ncols=2, figsize=figsize, squeeze=False)
for ax, label, titl in zip(axes, label_columns, compar_titles):
# Filtering df
drop_cols = [i for i in label_columns if i!=label]
dfax = df.drop(drop_cols, axis=1)
# Getting the cluster centroids and counts
centroids = dfax.groupby(by=label, as_index=False).mean()
counts = dfax.groupby(by=label, as_index=False).count().iloc[:,[0,1]]
counts.columns = [label, "counts"]
# Setting Data
pd.plotting.parallel_coordinates(centroids, label, color=sns.color_palette(), ax=ax[0])
sns.barplot(x=label, y="counts", data=counts, ax=ax[1])
#Setting Layout
handles, _ = ax[0].get_legend_handles_labels()
cluster_labels = ["Cluster {}".format(i) for i in range(len(handles))]
ax[0].annotate(text=titl, xy=(0.95,1.1), xycoords='axes fraction', fontsize=13, fontweight = 'heavy')
ax[0].legend(handles, cluster_labels) # Adaptable to number of clusters
ax[0].axhline(color="black", linestyle="--")
ax[0].set_title("Cluster Means - {} Clusters".format(len(handles)), fontsize=13)
ax[0].set_xticklabels(ax[0].get_xticklabels(), rotation=-20)
ax[1].set_xticklabels(cluster_labels)
ax[1].set_xlabel("")
ax[1].set_ylabel("Absolute Frequency")
ax[1].set_title("Cluster Sizes - {} Clusters".format(len(handles)), fontsize=13)
plt.subplots_adjust(hspace=0.4, top=0.90)
plt.suptitle("Cluster Simple Profilling", fontsize=23)
plt.show()
# Profilling each cluster (product, behavior, merged)
cluster_profiles(
df = df_hc[clust + ['value_labels', 'preference_labels', 'merged_labels']],
label_columns = ['value_labels', 'preference_labels', 'merged_labels'],
figsize = (28, 13),
compar_titles = ["Value Clustering", "Preference clustering","Merged clusters"]
)
###Output
_____no_output_____ |
code/GaussianMixtureModel.ipynb | ###Markdown
Gaussian Distribution
###Code
def get_ndim_corr(ranges):
return np.concatenate([pos for pos in zip([m.reshape(-1) for m in np.meshgrid(*ranges)])]).transpose()
def sample_from_gaussian(n_samples, mu=0, sigma=1):
mu = np.array(mu)
sigma = np.array(sigma)
assert len(mu.shape) <= 1
if len(mu.shape) == 0:
assert len(sigma.shape) == 0
return np.random.normal(mu, sigma, n_samples)
else:
assert len(sigma.shape) == 2 or len(sigma.shape) == 1
if len(sigma.shape) == 1:
sigma = np.diag(sigma)
return np.random.multivariate_normal(mu, sigma, n_samples)
def gaussian_pdf(n_dot, mu=0, sigma=1):
mu = np.array(mu)
sigma = np.array(sigma)
assert len(mu.shape) <= 1
if len(mu.shape) == 0:
assert len(sigma.shape) == 0
x = np.linspace(mu-sigma*3, mu+sigma*3, n_dot)
return x, 1.0/(2/np.pi)**0.5/sigma*np.exp(-0.5*((x-mu)/sigma)**2)
else:
assert len(sigma.shape) == 2 or len(sigma.shape) == 1
if len(sigma.shape) == 1:
sigma = np.diag(sigma)
ranges = np.linspace(mu-np.diagonal(sigma)*3, mu+np.diagonal(sigma)*3, n_dot).transpose()
x = get_ndim_corr(ranges)
return x, 1.0/(2/np.pi)**(0.5*mu.shape[0])/np.linalg.det(sigma)**0.5\
*np.exp(-0.5*np.sum(np.matmul((x-mu[np.newaxis,:]),np.linalg.inv(sigma))\
*(x--mu[np.newaxis,:]),axis=1))
# def gaussian_curve(n_dot, mu=0, sigma=1):
# x = np.linspace(mu-3*sigma, mu+3*sigma, n_dot)
# p = s
###Output
_____no_output_____
###Markdown
高斯分布
###Code
# 参数
n_sample = 1000
n_dot = 100
params = {
"mu": 0,
"sigma": 1
}
# sample
samples = sample_from_gaussian(n_sample, **params)
hist, bin_edges = np.histogram(samples, bins=20)
# pdf
x, p = gaussian_pdf(n_dot, **params)
# plot2d
plt.bar((bin_edges[1:]+bin_edges[:-1])/2, hist/(bin_edges[1]-bin_edges[0])**2/hist.sum(),width=bin_edges[1]-bin_edges[0])
plt.plot(x, p, color="r")
plt.show()
###Output
_____no_output_____
###Markdown
多元高斯分布
###Code
# 参数
n_sample = 1000
n_dot = 40
params = {
"mu": [0, 0],
"sigma": [[1, 0.5],[0.3,1]]
}
# sample
samples = sample_from_gaussian(n_sample, **params)
H, xedges, yedges = np.histogram2d(samples[:,0],samples[:,1],bins=20)
pos = get_ndim_corr([(xedges[1:]+xedges[:-1])/2, (yedges[1:]+yedges[:-1])/2])
xedges, yedges = pos[:,0], pos[:,1]
H = H.reshape(-1)
# pdf
x, p = gaussian_pdf(n_dot, **params)
# plot3d
fig = plt.figure()
ax = p3d.Axes3D(fig)
ax.bar3d(xedges, yedges, 0, (xedges[1:]-xedges[:-1]).max(), (yedges[1:]-yedges[:-1]).max(), H/(xedges[1:]-xedges[:-1]).max()**2/(yedges[1:]-yedges[:-1]).max()**2/H.sum(), zorder=2, alpha=0.2)
ax.plot3D(x[:,0],x[:,1],p,color="r", zorder=1, )
plt.show()
###Output
F:\Users\20163\anaconda3\envs\ml37\lib\site-packages\ipykernel_launcher.py:15: RuntimeWarning: covariance is not positive-semidefinite.
from ipykernel import kernelapp as app
F:\Users\20163\anaconda3\envs\ml37\lib\site-packages\ipykernel_launcher.py:18: MatplotlibDeprecationWarning: Axes3D(fig) adding itself to the figure is deprecated since 3.4. Pass the keyword argument auto_add_to_figure=False and use fig.add_axes(ax) to suppress this warning. The default value of auto_add_to_figure will change to False in mpl3.5 and True values will no longer work in 3.6. This is consistent with other Axes classes.
###Markdown
混合高斯分布
###Code
# 参数
n_sample = 1000
n_dot = 100
params = [
{"mu": 0,
"sigma": 1},
{"mu": 5,
"sigma": 1},
{"mu": -5,
"sigma": 1}
]
weight = [0.2, 0.4, 0.4]
# sample
samples = [sample_from_gaussian(n_sample, **param) for param in params]
for sample in samples:
hist, bin_edges = np.histogram(sample, bins=10)
plt.bar((bin_edges[1:]+bin_edges[:-1])/2, hist,width=bin_edges[1]-bin_edges[0], color="b")
# plot2d
hist, bin_edges = np.histogram(np.concatenate(samples), bins=30)
plt.bar((bin_edges[1:]+bin_edges[:-1])/2, hist,width=bin_edges[1]-bin_edges[0], color="r", alpha=0.8)
# plt.plot(x, p, color="r")
plt.show()
###Output
_____no_output_____ |
Cloud_DWH/L1 E1 - Step 1 & 2.ipynb | ###Markdown
Exercise 1 - Sakila Star Schema & ETL All the database tables in this demo are based on public database samples and transformations- `Sakila` is a sample database created by `MySql` [Link](https://dev.mysql.com/doc/sakila/en/sakila-structure.html)- The postgresql version of it is called `Pagila` [Link](https://github.com/devrimgunduz/pagila)- The facts and dimension tables design is based on O'Reilly's public dimensional modelling tutorial schema [Link](http://archive.oreilly.com/oreillyschool/courses/dba3/index.html) STEP0: Using ipython-sql- Load ipython-sql: `%load_ext sql`- To execute SQL queries you write one of the following atop of your cell: - `%sql` - For a one-liner SQL query - You can access a python var using `$` - `%%sql` - For a multi-line SQL query - You can **NOT** access a python var using `$`- Running a connection string like:`postgresql://postgres:postgres@db:5432/pagila` connects to the database STEP1 : Connect to the local database where Pagila is loaded 1.1 Create the pagila db and fill it with data- Adding `"!"` at the beginning of a jupyter cell runs a command in a shell, i.e. we are not running python code but we are running the `createdb` and `psql` postgresql commmand-line utilities
###Code
!PGPASSWORD=student createdb -h 127.0.0.1 -U student pagila
!PGPASSWORD=student psql -q -h 127.0.0.1 -U student -d pagila -f Data/pagila-schema.sql
!PGPASSWORD=student psql -q -h 127.0.0.1 -U student -d pagila -f Data/pagila-data.sql
###Output
createdb: database creation failed: ERROR: database "pagila" already exists
psql:Data/pagila-schema.sql:43: ERROR: type "mpaa_rating" already exists
psql:Data/pagila-schema.sql:53: ERROR: type "year" already exists
psql:Data/pagila-schema.sql:70: ERROR: function "_group_concat" already exists with same argument types
psql:Data/pagila-schema.sql:87: ERROR: function "film_in_stock" already exists with same argument types
psql:Data/pagila-schema.sql:104: ERROR: function "film_not_in_stock" already exists with same argument types
psql:Data/pagila-schema.sql:149: ERROR: function "get_customer_balance" already exists with same argument types
psql:Data/pagila-schema.sql:171: ERROR: function "inventory_held_by_customer" already exists with same argument types
psql:Data/pagila-schema.sql:208: ERROR: function "inventory_in_stock" already exists with same argument types
psql:Data/pagila-schema.sql:226: ERROR: function "last_day" already exists with same argument types
psql:Data/pagila-schema.sql:241: ERROR: function "last_updated" already exists with same argument types
psql:Data/pagila-schema.sql:255: ERROR: relation "customer_customer_id_seq" already exists
psql:Data/pagila-schema.sql:279: ERROR: relation "customer" already exists
psql:Data/pagila-schema.sql:343: ERROR: function "rewards_report" already exists with same argument types
psql:Data/pagila-schema.sql:355: ERROR: function "group_concat" already exists with same argument types
psql:Data/pagila-schema.sql:369: ERROR: relation "actor_actor_id_seq" already exists
psql:Data/pagila-schema.sql:383: ERROR: relation "actor" already exists
psql:Data/pagila-schema.sql:397: ERROR: relation "category_category_id_seq" already exists
psql:Data/pagila-schema.sql:410: ERROR: relation "category" already exists
psql:Data/pagila-schema.sql:424: ERROR: relation "film_film_id_seq" already exists
psql:Data/pagila-schema.sql:448: ERROR: relation "film" already exists
psql:Data/pagila-schema.sql:461: ERROR: relation "film_actor" already exists
psql:Data/pagila-schema.sql:474: ERROR: relation "film_category" already exists
psql:Data/pagila-schema.sql:497: ERROR: relation "actor_info" already exists
psql:Data/pagila-schema.sql:511: ERROR: relation "address_address_id_seq" already exists
psql:Data/pagila-schema.sql:529: ERROR: relation "address" already exists
psql:Data/pagila-schema.sql:543: ERROR: relation "city_city_id_seq" already exists
psql:Data/pagila-schema.sql:557: ERROR: relation "city" already exists
psql:Data/pagila-schema.sql:571: ERROR: relation "country_country_id_seq" already exists
psql:Data/pagila-schema.sql:584: ERROR: relation "country" already exists
psql:Data/pagila-schema.sql:609: ERROR: relation "customer_list" already exists
psql:Data/pagila-schema.sql:632: ERROR: relation "film_list" already exists
psql:Data/pagila-schema.sql:646: ERROR: relation "inventory_inventory_id_seq" already exists
psql:Data/pagila-schema.sql:660: ERROR: relation "inventory" already exists
psql:Data/pagila-schema.sql:674: ERROR: relation "language_language_id_seq" already exists
psql:Data/pagila-schema.sql:687: ERROR: relation "language" already exists
psql:Data/pagila-schema.sql:710: ERROR: relation "nicer_but_slower_film_list" already exists
psql:Data/pagila-schema.sql:724: ERROR: relation "payment_payment_id_seq" already exists
psql:Data/pagila-schema.sql:740: ERROR: relation "payment" already exists
psql:Data/pagila-schema.sql:751: ERROR: relation "rental_rental_id_seq" already exists
psql:Data/pagila-schema.sql:768: ERROR: relation "rental" already exists
psql:Data/pagila-schema.sql:787: ERROR: relation "sales_by_film_category" already exists
psql:Data/pagila-schema.sql:801: ERROR: relation "staff_staff_id_seq" already exists
psql:Data/pagila-schema.sql:822: ERROR: relation "staff" already exists
psql:Data/pagila-schema.sql:836: ERROR: relation "store_store_id_seq" already exists
psql:Data/pagila-schema.sql:850: ERROR: relation "store" already exists
psql:Data/pagila-schema.sql:872: ERROR: relation "sales_by_store" already exists
psql:Data/pagila-schema.sql:893: ERROR: relation "staff_list" already exists
psql:Data/pagila-schema.sql:903: ERROR: multiple primary keys for table "actor" are not allowed
psql:Data/pagila-schema.sql:911: ERROR: multiple primary keys for table "address" are not allowed
psql:Data/pagila-schema.sql:919: ERROR: multiple primary keys for table "category" are not allowed
psql:Data/pagila-schema.sql:927: ERROR: multiple primary keys for table "city" are not allowed
psql:Data/pagila-schema.sql:935: ERROR: multiple primary keys for table "country" are not allowed
psql:Data/pagila-schema.sql:944: ERROR: multiple primary keys for table "film_actor" are not allowed
psql:Data/pagila-schema.sql:952: ERROR: multiple primary keys for table "film_category" are not allowed
psql:Data/pagila-schema.sql:960: ERROR: multiple primary keys for table "film" are not allowed
psql:Data/pagila-schema.sql:968: ERROR: multiple primary keys for table "inventory" are not allowed
psql:Data/pagila-schema.sql:976: ERROR: multiple primary keys for table "language" are not allowed
psql:Data/pagila-schema.sql:984: ERROR: multiple primary keys for table "rental" are not allowed
psql:Data/pagila-schema.sql:992: ERROR: multiple primary keys for table "staff" are not allowed
psql:Data/pagila-schema.sql:1000: ERROR: multiple primary keys for table "store" are not allowed
psql:Data/pagila-schema.sql:1007: ERROR: relation "film_fulltext_idx" already exists
psql:Data/pagila-schema.sql:1014: ERROR: relation "idx_actor_last_name" already exists
psql:Data/pagila-schema.sql:1021: ERROR: relation "idx_fk_address_id" already exists
psql:Data/pagila-schema.sql:1028: ERROR: relation "idx_fk_city_id" already exists
psql:Data/pagila-schema.sql:1035: ERROR: relation "idx_fk_country_id" already exists
psql:Data/pagila-schema.sql:1042: ERROR: relation "idx_fk_customer_id" already exists
psql:Data/pagila-schema.sql:1049: ERROR: relation "idx_fk_film_id" already exists
psql:Data/pagila-schema.sql:1056: ERROR: relation "idx_fk_inventory_id" already exists
psql:Data/pagila-schema.sql:1063: ERROR: relation "idx_fk_language_id" already exists
psql:Data/pagila-schema.sql:1070: ERROR: relation "idx_fk_original_language_id" already exists
psql:Data/pagila-schema.sql:1077: ERROR: relation "idx_fk_payment_customer_id" already exists
psql:Data/pagila-schema.sql:1083: ERROR: relation "idx_fk_payment_staff_id" already exists
psql:Data/pagila-schema.sql:1092: ERROR: relation "idx_fk_store_id" already exists
psql:Data/pagila-schema.sql:1099: ERROR: relation "idx_last_name" already exists
psql:Data/pagila-schema.sql:1106: ERROR: relation "idx_store_id_film_id" already exists
psql:Data/pagila-schema.sql:1113: ERROR: relation "idx_title" already exists
psql:Data/pagila-schema.sql:1120: ERROR: relation "idx_unq_manager_staff_id" already exists
psql:Data/pagila-schema.sql:1127: ERROR: relation "idx_unq_rental_rental_date_inventory_id_customer_id" already exists
psql:Data/pagila-schema.sql:1133: ERROR: trigger "film_fulltext_trigger" for relation "film" already exists
psql:Data/pagila-schema.sql:1140: ERROR: trigger "last_updated" for relation "actor" already exists
psql:Data/pagila-schema.sql:1147: ERROR: trigger "last_updated" for relation "address" already exists
psql:Data/pagila-schema.sql:1154: ERROR: trigger "last_updated" for relation "category" already exists
psql:Data/pagila-schema.sql:1161: ERROR: trigger "last_updated" for relation "city" already exists
psql:Data/pagila-schema.sql:1168: ERROR: trigger "last_updated" for relation "country" already exists
psql:Data/pagila-schema.sql:1175: ERROR: trigger "last_updated" for relation "customer" already exists
psql:Data/pagila-schema.sql:1182: ERROR: trigger "last_updated" for relation "film" already exists
psql:Data/pagila-schema.sql:1189: ERROR: trigger "last_updated" for relation "film_actor" already exists
psql:Data/pagila-schema.sql:1196: ERROR: trigger "last_updated" for relation "film_category" already exists
psql:Data/pagila-schema.sql:1203: ERROR: trigger "last_updated" for relation "inventory" already exists
psql:Data/pagila-schema.sql:1210: ERROR: trigger "last_updated" for relation "language" already exists
psql:Data/pagila-schema.sql:1217: ERROR: trigger "last_updated" for relation "rental" already exists
psql:Data/pagila-schema.sql:1224: ERROR: trigger "last_updated" for relation "staff" already exists
psql:Data/pagila-schema.sql:1231: ERROR: trigger "last_updated" for relation "store" already exists
psql:Data/pagila-schema.sql:1239: ERROR: constraint "address_city_id_fkey" for relation "address" already exists
psql:Data/pagila-schema.sql:1247: ERROR: constraint "city_country_id_fkey" for relation "city" already exists
psql:Data/pagila-schema.sql:1255: ERROR: constraint "customer_address_id_fkey" for relation "customer" already exists
psql:Data/pagila-schema.sql:1263: ERROR: constraint "customer_store_id_fkey" for relation "customer" already exists
psql:Data/pagila-schema.sql:1271: ERROR: constraint "film_actor_actor_id_fkey" for relation "film_actor" already exists
psql:Data/pagila-schema.sql:1279: ERROR: constraint "film_actor_film_id_fkey" for relation "film_actor" already exists
psql:Data/pagila-schema.sql:1287: ERROR: constraint "film_category_category_id_fkey" for relation "film_category" already exists
psql:Data/pagila-schema.sql:1295: ERROR: constraint "film_category_film_id_fkey" for relation "film_category" already exists
psql:Data/pagila-schema.sql:1303: ERROR: constraint "film_language_id_fkey" for relation "film" already exists
psql:Data/pagila-schema.sql:1311: ERROR: constraint "film_original_language_id_fkey" for relation "film" already exists
psql:Data/pagila-schema.sql:1319: ERROR: constraint "inventory_film_id_fkey" for relation "inventory" already exists
psql:Data/pagila-schema.sql:1327: ERROR: constraint "inventory_store_id_fkey" for relation "inventory" already exists
psql:Data/pagila-schema.sql:1334: ERROR: constraint "rental_customer_id_fkey" for relation "rental" already exists
psql:Data/pagila-schema.sql:1342: ERROR: constraint "rental_inventory_id_fkey" for relation "rental" already exists
psql:Data/pagila-schema.sql:1350: ERROR: constraint "rental_staff_id_fkey" for relation "rental" already exists
psql:Data/pagila-schema.sql:1358: ERROR: constraint "staff_address_id_fkey" for relation "staff" already exists
psql:Data/pagila-schema.sql:1366: ERROR: constraint "staff_store_id_fkey" for relation "staff" already exists
psql:Data/pagila-schema.sql:1374: ERROR: constraint "store_address_id_fkey" for relation "store" already exists
psql:Data/pagila-schema.sql:1384: ERROR: constraint "payment_customer_id_fkey" for relation "payment" already exists
psql:Data/pagila-data.sql:224: ERROR: duplicate key value violates unique constraint "actor_pkey"
DETAIL: Key (actor_id)=(1) already exists.
CONTEXT: COPY actor, line 1
psql:Data/pagila-data.sql:341: ERROR: duplicate key value violates unique constraint "country_pkey"
DETAIL: Key (country_id)=(1) already exists.
CONTEXT: COPY country, line 1
psql:Data/pagila-data.sql:949: ERROR: duplicate key value violates unique constraint "city_pkey"
DETAIL: Key (city_id)=(1) already exists.
CONTEXT: COPY city, line 1
psql:Data/pagila-data.sql:1560: ERROR: duplicate key value violates unique constraint "address_pkey"
DETAIL: Key (address_id)=(1) already exists.
CONTEXT: COPY address, line 1
psql:Data/pagila-data.sql:1584: ERROR: duplicate key value violates unique constraint "category_pkey"
DETAIL: Key (category_id)=(1) already exists.
CONTEXT: COPY category, line 1
psql:Data/pagila-data.sql:1594: ERROR: duplicate key value violates unique constraint "store_pkey"
DETAIL: Key (store_id)=(1) already exists.
CONTEXT: COPY store, line 1
psql:Data/pagila-data.sql:2201: ERROR: duplicate key value violates unique constraint "customer_pkey"
DETAIL: Key (customer_id)=(1) already exists.
CONTEXT: COPY customer, line 1
psql:Data/pagila-data.sql:2215: ERROR: duplicate key value violates unique constraint "language_pkey"
DETAIL: Key (language_id)=(1) already exists.
CONTEXT: COPY language, line 1
psql:Data/pagila-data.sql:3223: ERROR: duplicate key value violates unique constraint "film_pkey"
DETAIL: Key (film_id)=(1) already exists.
CONTEXT: COPY film, line 1: "1 ACADEMY DINOSAUR A Epic Drama of a Feminist And a Mad Scientist who must Battle a Teacher in The C..."
psql:Data/pagila-data.sql:8693: ERROR: duplicate key value violates unique constraint "film_actor_pkey"
DETAIL: Key (actor_id, film_id)=(1, 1) already exists.
CONTEXT: COPY film_actor, line 1
psql:Data/pagila-data.sql:9701: ERROR: duplicate key value violates unique constraint "film_category_pkey"
DETAIL: Key (film_id, category_id)=(1, 6) already exists.
CONTEXT: COPY film_category, line 1
psql:Data/pagila-data.sql:14290: ERROR: duplicate key value violates unique constraint "inventory_pkey"
DETAIL: Key (inventory_id)=(1) already exists.
CONTEXT: COPY inventory, line 1
psql:Data/pagila-data.sql:14300: ERROR: duplicate key value violates unique constraint "staff_pkey"
DETAIL: Key (staff_id)=(1) already exists.
CONTEXT: COPY staff, line 1
psql:Data/pagila-data.sql:30352: ERROR: duplicate key value violates unique constraint "rental_pkey"
DETAIL: Key (rental_id)=(2) already exists.
CONTEXT: COPY rental, line 1
setval
--------
200
(1 row)
setval
--------
605
(1 row)
setval
--------
16
(1 row)
setval
--------
600
(1 row)
setval
--------
109
(1 row)
setval
--------
599
(1 row)
setval
--------
1000
(1 row)
setval
--------
4581
(1 row)
setval
--------
6
(1 row)
setval
--------
32098
(1 row)
setval
--------
16049
(1 row)
setval
--------
2
(1 row)
setval
--------
2
(1 row)
###Markdown
1.2 Connect to the newly created db
###Code
%load_ext sql
DB_ENDPOINT = "127.0.0.1"
DB = 'pagila'
DB_USER = 'student'
DB_PASSWORD = 'student'
DB_PORT = '5432'
# postgresql://username:password@host:port/database
conn_string = "postgresql://{}:{}@{}:{}/{}" \
.format(DB_USER, DB_PASSWORD, DB_ENDPOINT, DB_PORT, DB)
print(conn_string)
%sql $conn_string
###Output
_____no_output_____
###Markdown
STEP2 : Explore the 3NF Schema 2.1 How much? What data sizes are we looking at?
###Code
nStores = %sql select count(*) from store;
nFilms = %sql select count(*) from film;
nCustomers = %sql select count(*) from customer;
nRentals = %sql select count(*) from rental;
nPayment = %sql select count(*) from payment;
nStaff = %sql select count(*) from staff;
nCity = %sql select count(*) from city;
nCountry = %sql select count(*) from country;
print("nFilms\t\t=", nFilms[0][0])
print("nCustomers\t=", nCustomers[0][0])
print("nRentals\t=", nRentals[0][0])
print("nPayment\t=", nPayment[0][0])
print("nStaff\t\t=", nStaff[0][0])
print("nStores\t\t=", nStores[0][0])
print("nCities\t\t=", nCity[0][0])
print("nCountry\t\t=", nCountry[0][0])
###Output
* postgresql://student:***@127.0.0.1:5432/pagila
1 rows affected.
* postgresql://student:***@127.0.0.1:5432/pagila
1 rows affected.
* postgresql://student:***@127.0.0.1:5432/pagila
1 rows affected.
* postgresql://student:***@127.0.0.1:5432/pagila
1 rows affected.
* postgresql://student:***@127.0.0.1:5432/pagila
1 rows affected.
* postgresql://student:***@127.0.0.1:5432/pagila
1 rows affected.
* postgresql://student:***@127.0.0.1:5432/pagila
1 rows affected.
* postgresql://student:***@127.0.0.1:5432/pagila
1 rows affected.
nFilms = 1000
nCustomers = 599
nRentals = 16044
nPayment = 32098
nStaff = 2
nStores = 2
nCities = 600
nCountry = 109
###Markdown
2.2 When? What time period are we talking about?
###Code
%%sql
select min(payment_date) as start, max(payment_date) as end from payment;
###Output
* postgresql://student:***@127.0.0.1:5432/pagila
1 rows affected.
###Markdown
2.3 Where? Where do events in this database occur?TODO: Write a query that displays the number of addresses by district in the address table. Limit the table to the top 10 districts. Your results should match the table below.
###Code
%%sql
select district,count(address_id) as n
from address
group by district
order by count(address_id) desc, district
limit 10;
###Output
* postgresql://student:***@127.0.0.1:5432/pagila
10 rows affected.
|
Making nice oceanographic maps with cartopy .ipynb | ###Markdown
Import packages
###Code
import warnings
import numpy as np
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.mpl.ticker as cticker
from cartopy.util import add_cyclic_point
###Output
_____no_output_____
###Markdown
Read the data
###Code
path = 'C:/Users/shant/intro to python/'
fname = 'sst.mnmean.nc'
mname = 'lsmask.nc'
ds = xr.open_dataset(path+fname)
ds
###Output
_____no_output_____
###Markdown
Load the land-sea mask data
###Code
masknc = xr.open_dataset(path+mname)
# strip the time dimension off the mask
seamask = masknc.variables['mask'][0].astype(bool)
masknc.close()
landmask = np.logical_not(seamask)
print(landmask.shape)
###Output
(180, 360)
###Markdown
Make some initial plot with the data
###Code
sst = ds.variables['sst'][:]
plt.contourf(sst[0]) #plot the 1st month of the data
plt.colorbar()
sst_array = np.ma.array(sst)
sst_array[:, landmask] = np.ma.masked
plt.imshow(sst_array[0])
sst_array.shape
###Output
_____no_output_____
###Markdown
Plot with a mapWe want a proper plot with coastline, lat, lon etc.
###Code
#make the figure larger
fig = plt.figure(figsize=(11,8.5))
# Set the axes using the specified map projection
ax=plt.axes(projection=ccrs.PlateCarree())
# Add cyclic point to data
data=sst[0]
data, lons = add_cyclic_point(data, coord=ds['lon'])
# Make a filled contour plot
ax.contourf(ds['lon'], ds['lat'], sst_array[0],
transform = ccrs.PlateCarree(),cmap=plt.cm.jet)
# Add coastlines
ax.coastlines()
# Define the xticks for longitude
ax.set_xticks(np.arange(-180,181,60), crs=ccrs.PlateCarree())
lon_formatter = cticker.LongitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
# Define the yticks for latitude
ax.set_yticks(np.arange(-90,91,30), crs=ccrs.PlateCarree())
lat_formatter = cticker.LatitudeFormatter()
ax.yaxis.set_major_formatter(lat_formatter)
###Output
_____no_output_____ |
ISL81802_Dmax.ipynb | ###Markdown
###Code
###Output
_____no_output_____
###Markdown
ISL81802 Max Duty Cycle Libraries Input Parameters
###Code
fs_nom = 750e3
toff_min = 220e-9
Vin_min= 9
###Output
_____no_output_____
###Markdown
Functions
###Code
'''derive Dmax
Ts=Ton+Toff
Ts=D*Ts+Toff
D=(Ts-Toff)/Ts
D=(1-Toff/Ts)'''
def Dmax(fs):
return 1-toff_min*fs
def Vout_max(fs,Vin):
return Dmax(fs)*Vin
###Output
_____no_output_____
###Markdown
Results >>>$Dutycycle_{max}=1-Toff_{min}\cdot Fs$
###Code
#@title
print('At fs=%3.1fkHz Max_Dutycycle = %3.1f%%' % (fs_nom/1000, Dmax(fs_nom)*100))
print('At fs=%3.1fkHz, %3.1fVin Max_Vout = %3.1fV' % (fs_nom/1000, Vin_min, Vout_max(fs_nom, Vin_min)))
###Output
At fs=750.0kHz Max_Dutycycle = 83.5%
At fs=750.0kHz, 9.0Vin Max_Vout = 7.5V
|
feature_extraction_dense.ipynb | ###Markdown
Upload data to sqlite tables.
###Code
import pandas as pd
import numpy as np
from IPython import display as dis
import scipy.io.wavfile as wav
import tensorflow as tf
import time
%matplotlib inline
dis.Audio("dataset/wav/Ses01F_impro01/Ses01F_impro01_M007.wav")
(rate,sig) = wav.read("dataset/wav/Ses01F_impro01/Ses01F_impro01_F000.wav")
print(sig)
print(sig.shape)
class network(object):
input_dim = 31129
classes = 2
hidden_encoder_dim = 1000
hidden_layers = 1
latent_dim = 61
def build_layers(self):
tf.reset_default_graph()
input_dim = self.input_dim
hidden_encoder_dim = self.hidden_encoder_dim
hidden_layers = self.hidden_layers
latent_dim = self.latent_dim
with tf.variable_scope("Input"):
self.x = tf.placeholder("float", shape=[None, input_dim])
self.y_ = tf.placeholder("float", shape=[None, input_dim])
self.keep_prob = tf.placeholder("float")
self.lr = tf.placeholder("float")
with tf.variable_scope("Layer_Encoder"):
hidden_encoder = tf.layers.dense(self.x, hidden_encoder_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
hidden_encoder = tf.nn.dropout(hidden_encoder, self.keep_prob)
for h in range(hidden_layers - 1):
hidden_encoder = tf.layers.dense(hidden_encoder, latent_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
hidden_encoder = tf.nn.dropout(hidden_encoder, self.keep_prob)
#hidden_encoder = tf.layers.dense(self.x, latent_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
#hidden_encoder = tf.nn.dropout(hidden_encoder, self.keep_prob)
with tf.variable_scope("Layer_Dense_Softmax"):
self.y = tf.layers.dense(hidden_encoder, input_dim, activation=None)
with tf.variable_scope("Loss"):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = self.y_, logits = self.y))
#loss = tf.losses.mean_squared_error(labels = self.y_, predictions = self.y)
#loss = tf.clip_by_value(loss, -1e-1, 1e-1)
#loss = tf.where(tf.is_nan(loss), 1e-1, loss)
#loss = tf.where(tf.equal(loss, -1e-1), tf.random_normal(loss.shape), loss)
#loss = tf.where(tf.equal(loss, 1e-1), tf.random_normal(loss.shape), loss)
self.regularized_loss = loss
correct_prediction = tf.equal(tf.argmax(self.y_, 1), tf.argmax(self.y, 1))
self.tf_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name = "Accuracy")
with tf.variable_scope("Optimizer"):
learning_rate=self.lr
optimizer = tf.train.AdamOptimizer(learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(self.regularized_loss))
gradients = [
None if gradient is None else tf.clip_by_value(gradient, -1, 1)
for gradient in gradients]
self.train_op = optimizer.apply_gradients(zip(gradients, variables))
#self.train_op = optimizer.minimize(self.regularized_loss)
# add op for merging summary
#self.summary_op = tf.summary.merge_all()
self.pred = tf.argmax(self.y, axis = 1)
self.actual = tf.argmax(self.y_, axis = 1)
# add Saver ops
self.saver = tf.train.Saver()
import collections
class Train:
def train(epochs, net, lrs):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
start_time = time.perf_counter()
for c, lr in enumerate(lrs):
for epoch in range(1, (epochs+1)):
print("Step {} ".format(epoch))
_, train_loss = sess.run([net.train_op, net.regularized_loss], #net.summary_op
feed_dict={net.x: x_train,
net.y_: y_train,
net.keep_prob: 1,
net.lr:lr})
print("Training Loss: {:.6f}".format(train_loss))
#valid_accuracy,valid_loss = sess.run([net.tf_accuracy, net.regularized_loss], #net.summary_op
# feed_dict={net.x_input: x_valid[np.newaxis,...],
# net.y_input_: y_valid[np.newaxis,...],
# net.lr:lr})
#accuracy, y_pred = sess.run([net.tf_accuracy,
# net.pred,
# net.actual, net.y],
# feed_dict={net.x_input: x_test[np.newaxis,...],
# net.y_input_: y_test[np.newaxis,...],
# net.lr:lr})
import itertools
class Hyperparameters:
def start_training():
epochs = 10
lrs = [1e-5]
n = network()
n.build_layers()
Train.train(epochs, n, lrs)
x_train = y_train = np.reshape(sig, (1, -1))
Hyperparameters.start_training()
###Output
Step 1
Training Loss: -1909006.375000
Step 2
Training Loss: -30779968.000000
Step 3
Training Loss: -59287088.000000
Step 4
Training Loss: -88266592.000000
Step 5
Training Loss: -117741392.000000
Step 6
Training Loss: -147897248.000000
Step 7
Training Loss: -178906256.000000
Step 8
Training Loss: -210892048.000000
Step 9
Training Loss: -244253120.000000
Step 10
Training Loss: -279045792.000000
|
Exemplo_Detector_Arco_Senil.ipynb | ###Markdown
Reconhecimento de Arco Senil em olhosVamos treinar um modelo baseado em MultiLayer Perceptron para identificar arco senil Montar o Drive, para que seja possível acessar o dataset
###Code
from google.colab import drive, files
drive.mount('/content/drive/')
from matplotlib import pyplot
from matplotlib.image import imread
folder = '/content/drive/My Drive/ia/ArcoSenilDataset/treino/Senil/'
for i in range(6):
pyplot.subplot(330 + 1 + i)
filename = folder+'imagem'+str(i)+'.jpg'
image = imread(filename)
pyplot.imshow(image)
pyplot.show()
from matplotlib import pyplot
from matplotlib.image import imread
folder = '/content/drive/My Drive/ia/ArcoSenilDataset/treino/Normal/'
for i in range(6):
pyplot.subplot(330 + 1 + i)
filename = folder+'images'+str(i)+'.jpeg'
image = imread(filename)
pyplot.imshow(image)
pyplot.show()
###Output
_____no_output_____
###Markdown
Vamos iterar diretório por diretório e, em seguida, cada imagem nesse diretório - redimensionando-as rapidamente para (128 * 128) e anexar a matriz de pixels de cada imagem X e seu rótulo correspondente y
###Code
import numpy as np
from PIL import Image
import operator
from operator import itemgetter
import os
X = []
y = []
count = 0
dir="/content/drive/My Drive/ia/ArcoSenilDataset/treino/"
for i in os.listdir(dir):
print(i,":",len(os.listdir(dir+"/"+i)))
count+=len(os.listdir(dir+"/"+i))
for j in os.listdir(dir+"/"+i):
img = Image.open(dir+"/"+i+"/"+j)
img = img.resize((128,128))
X.append(np.asarray(img))
y.append(i)
print(count)
X = np.asarray(X)
y = np.asarray(y)
print(X.shape, y.shape)
###Output
Senil : 69
Normal : 54
123
(123, 128, 128, 3) (123,)
###Markdown
Remodelando X (356, 128, 128, 3) para (356, 128 * 128 * 3) :
###Code
X = X.reshape(123, 49152).astype('float32')
###Output
_____no_output_____
###Markdown
Normalizando os pixels entre 0 e 1:
###Code
X/=255
X.shape
###Output
_____no_output_____
###Markdown
Vamos treinar o modelo baseado em MultiLaye Perceptron
###Code
from sklearn.neural_network import MLPClassifier #Importing MLPClassifier for classification.
#Initializing the MLPClassifier
mlp = MLPClassifier(hidden_layer_sizes=(100,100,100),max_iter=500)
mlp.fit(X,y)
###Output
_____no_output_____
###Markdown
Vamos carregar uma imagem para detectar qual é a celebridade
###Code
from PIL import Image
img = Image.open('/content/drive/My Drive/ia/ArcoSenilDataset/treino/Senil/imagem1.jpg')
img = img.resize((128,128))
img
###Output
_____no_output_____
###Markdown
Vamos transformar a imagem em um array de pixel e redimensionar
###Code
new_img = np.asarray(img)
new_img = new_img .reshape(1, 49152).astype('float32')
new_img
###Output
_____no_output_____
###Markdown
Vamos normalizar
###Code
new_img/=255
new_img.shape
new_img
###Output
_____no_output_____
###Markdown
Vamos predizer qual é a celebridade a partir da imagem que foi carregada
###Code
prediction = mlp.predict(new_img)
print(prediction)
from PIL import Image
img = Image.open('/content/drive/My Drive/ia/ArcoSenilDataset/treino/Normal/images8.jpeg')
img = img.resize((128,128))
img
new_img = np.asarray(img)
new_img = new_img .reshape(1, 49152).astype('float32')
new_img
new_img/=255
new_img.shape
new_img
prediction = mlp.predict(new_img)
print(prediction)
###Output
['Normal']
|
20-Natural-Language-Processing/03-NLP Project - Solutions.ipynb | ###Markdown
___ ___ Natural Language Processing ProjectWelcome to the NLP Project for this section of the course. In this NLP project you will be attempting to classify Yelp Reviews into 1 star or 5 star categories based off the text content in the reviews. This will be a simpler procedure than the lecture, since we will utilize the pipeline methods for more complex tasks.We will use the [Yelp Review Data Set from Kaggle](https://www.kaggle.com/c/yelp-recsys-2013).Each observation in this dataset is a review of a particular business by a particular user.The "stars" column is the number of stars (1 through 5) assigned by the reviewer to the business. (Higher stars is better.) In other words, it is the rating of the business by the person who wrote the review.The "cool" column is the number of "cool" votes this review received from other Yelp users. All reviews start with 0 "cool" votes, and there is no limit to how many "cool" votes a review can receive. In other words, it is a rating of the review itself, not a rating of the business.The "useful" and "funny" columns are similar to the "cool" column.Let's get started! Just follow the directions below! Imports **Import the usual suspects. :) **
###Code
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
The Data**Read the yelp.csv file and set it as a dataframe called yelp.**
###Code
yelp = pd.read_csv('yelp.csv')
###Output
_____no_output_____
###Markdown
** Check the head, info , and describe methods on yelp.**
###Code
yelp.head()
yelp.info()
yelp.describe()
###Output
_____no_output_____
###Markdown
**Create a new column called "text length" which is the number of words in the text column.**
###Code
yelp['text length'] = yelp['text'].apply(len)
###Output
_____no_output_____
###Markdown
EDALet's explore the data Imports**Import the data visualization libraries if you haven't done so already.**
###Code
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
%matplotlib inline
###Output
_____no_output_____
###Markdown
**Use FacetGrid from the seaborn library to create a grid of 5 histograms of text length based off of the star ratings. Reference the seaborn documentation for hints on this**
###Code
g = sns.FacetGrid(yelp,col='stars')
g.map(plt.hist,'text length')
###Output
_____no_output_____
###Markdown
**Create a boxplot of text length for each star category.**
###Code
sns.boxplot(x='stars',y='text length',data=yelp,palette='rainbow')
###Output
_____no_output_____
###Markdown
**Create a countplot of the number of occurrences for each type of star rating.**
###Code
sns.countplot(x='stars',data=yelp,palette='rainbow')
###Output
_____no_output_____
###Markdown
** Use groupby to get the mean values of the numerical columns, you should be able to create this dataframe with the operation:**
###Code
stars = yelp.groupby('stars').mean()
stars
###Output
_____no_output_____
###Markdown
**Use the corr() method on that groupby dataframe to produce this dataframe:**
###Code
stars.corr()
###Output
_____no_output_____
###Markdown
**Then use seaborn to create a heatmap based off that .corr() dataframe:**
###Code
sns.heatmap(stars.corr(),cmap='coolwarm',annot=True)
###Output
_____no_output_____
###Markdown
NLP Classification TaskLet's move on to the actual task. To make things a little easier, go ahead and only grab reviews that were either 1 star or 5 stars.**Create a dataframe called yelp_class that contains the columns of yelp dataframe but for only the 1 or 5 star reviews.**
###Code
yelp_class = yelp[(yelp.stars==1) | (yelp.stars==5)]
###Output
_____no_output_____
###Markdown
** Create two objects X and y. X will be the 'text' column of yelp_class and y will be the 'stars' column of yelp_class. (Your features and target/labels)**
###Code
X = yelp_class['text']
y = yelp_class['stars']
###Output
_____no_output_____
###Markdown
**Import CountVectorizer and create a CountVectorizer object.**
###Code
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
###Output
_____no_output_____
###Markdown
** Use the fit_transform method on the CountVectorizer object and pass in X (the 'text' column). Save this result by overwriting X.**
###Code
X = cv.fit_transform(X)
###Output
_____no_output_____
###Markdown
Train Test SplitLet's split our data into training and testing data.** Use train_test_split to split up the data into X_train, X_test, y_train, y_test. Use test_size=0.3 and random_state=101 **
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.3,random_state=101)
###Output
_____no_output_____
###Markdown
Training a ModelTime to train a model!** Import MultinomialNB and create an instance of the estimator and call is nb **
###Code
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
###Output
_____no_output_____
###Markdown
**Now fit nb using the training data.**
###Code
nb.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
Predictions and EvaluationsTime to see how our model did!**Use the predict method off of nb to predict labels from X_test.**
###Code
predictions = nb.predict(X_test)
###Output
_____no_output_____
###Markdown
** Create a confusion matrix and classification report using these predictions and y_test **
###Code
from sklearn.metrics import confusion_matrix,classification_report
print(confusion_matrix(y_test,predictions))
print('\n')
print(classification_report(y_test,predictions))
###Output
[[159 69]
[ 22 976]]
precision recall f1-score support
1 0.88 0.70 0.78 228
5 0.93 0.98 0.96 998
avg / total 0.92 0.93 0.92 1226
###Markdown
**Great! Let's see what happens if we try to include TF-IDF to this process using a pipeline.** Using Text Processing** Import TfidfTransformer from sklearn. **
###Code
from sklearn.feature_extraction.text import TfidfTransformer
###Output
_____no_output_____
###Markdown
** Import Pipeline from sklearn. **
###Code
from sklearn.pipeline import Pipeline
###Output
_____no_output_____
###Markdown
** Now create a pipeline with the following steps:CountVectorizer(), TfidfTransformer(),MultinomialNB()**
###Code
pipeline = Pipeline([
('bow', CountVectorizer()), # strings to token integer counts
('tfidf', TfidfTransformer()), # integer counts to weighted TF-IDF scores
('classifier', MultinomialNB()), # train on TF-IDF vectors w/ Naive Bayes classifier
])
###Output
_____no_output_____
###Markdown
Using the Pipeline**Time to use the pipeline! Remember this pipeline has all your pre-process steps in it already, meaning we'll need to re-split the original data (Remember that we overwrote X as the CountVectorized version. What we need is just the text** Train Test Split**Redo the train test split on the yelp_class object.**
###Code
X = yelp_class['text']
y = yelp_class['stars']
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.3,random_state=101)
###Output
_____no_output_____
###Markdown
**Now fit the pipeline to the training data. Remember you can't use the same training data as last time because that data has already been vectorized. We need to pass in just the text and labels**
###Code
# May take some time
pipeline.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
Predictions and Evaluation** Now use the pipeline to predict from the X_test and create a classification report and confusion matrix. You should notice strange results.**
###Code
predictions = pipeline.predict(X_test)
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
###Output
[[ 0 228]
[ 0 998]]
precision recall f1-score support
1 0.00 0.00 0.00 228
5 0.81 1.00 0.90 998
avg / total 0.66 0.81 0.73 1226
|
Sample/Day_20_Sample.ipynb | ###Markdown
* 教學目標:主要說明 matplotlib 的基礎操作 1. 使用常見的子圖與軸圖來做畫面配置 2. 等高線圖* 範例重點 如何使用亂數, 資料集來操作
###Code
#載入 numpy, 提供亂數資料與數學式,
import numpy as np
# 載入 matplotlib
import matplotlib.pyplot as plt
# 從 `sklearn` 載入 `datasets`
from sklearn import datasets
#導入必要的模組
from mpl_toolkits.mplot3d import Axes3D
###Output
_____no_output_____
###Markdown
【基礎20】 製作繪圖板 Subplots * 參考 Day20_Subplot.png* plt.subplot(a, b, c)可透過 a ,b ,c 的數值設定來決定圖象的數量、大小 * a:代表 X 軸的分割 * b:代表 y 軸的分割 * c:代表子版的編號數
###Code
fig = plt.figure(figsize=(10,6)) #設定 figure 的尺寸
ax1 = fig.add_subplot(3,1,1) #分別畫出三格圖象,都可以針對特定圖象編輯與繪圖
ax2 = fig.add_subplot(3,1,2)
ax3 = fig.add_subplot(3,1,3)
plt.show()
x = np.arange(0, 3 * np.pi, 0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
# 設定雙格畫板大小
plt.subplot(2, 1, 1)
plt.plot(x, y_sin)
plt.title('Sine')
# 設定雙格畫板大小
plt.subplot(2, 1, 2)
plt.plot(x, y_cos)
plt.title("Cosine")
plt.show()
# 載入 `digits`
digits = datasets.load_digits()
# 設定圖形的大小(寬, 高)
fig = plt.figure(figsize=(4, 2))
# 調整子圖形
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
# 把前 8 個手寫數字顯示在子圖形
for i in range(8):
# 在 2 x 4 網格中第 i + 1 個位置繪製子圖形,並且關掉座標軸刻度
ax = fig.add_subplot(2, 4, i + 1, xticks = [], yticks = [])
# 顯示圖形,色彩選擇灰階
ax.imshow(digits.images[i], cmap = plt.cm.binary)
# 在左下角標示目標值
ax.text(0, 7, str(digits.target[i]))
# 顯示圖形
plt.show()
###Output
_____no_output_____
###Markdown
* 添加圖例的方式:添加 subplot 的時候傳入 label 引數
###Code
fig = plt.figure(figsize = (12,8)) ; ax = fig.add_subplot(1,1,1)
ax.plot ( np.random.randn(1000).cumsum(),'k' , label = 'one')
ax.plot ( np.random.randn(1000).cumsum(),'k--' , label = 'two')
ax.plot ( np.random.randn(1000).cumsum(),'k.' , label = 'three')
ax.legend(loc = 'best')
###Output
_____no_output_____
###Markdown
除了 PLT 之外的繪圖 * plt.***系列。通過 http://plt.xxx 來畫圖* fig, ax = plt.subplots():指定 figure 和 axes,然後對 axes 單獨操作* Figure:可以解釋為畫布。fig = plt.figure() * 畫圖的第一件事,就是創建一個畫布 figure* Axes:這個不是你畫圖的 xy 座標可以把 axes 理解為你要放到畫布上的各個物體。如果你的 figure 只有一張圖,那麼你只有一個 axes。 如果你的 figure 有 subplot,那麼每一個 subplot,是一個 Axes* Axis:這才是 xy 座標軸。ax.xaxis/ax.yaxis* 參考 Day20_PLT之外的繪圖.png figure 參數說明 * 語法: ``` figure( num = None, figsize=None, dpi=None, facecolor=None, edgecolor=None, frameon=True, FigureClass=, clear=False, **kwargs) ```* 參數說明:| 參數 | 說明 | 預設值 | 備註 ||------|:------:|:------:|:------:|| num | 設定名稱 | 升序命名 figure(透視表輸出視窗) e.g. “figure1” | 可自行設定 figure 名稱,名稱是 INT 或是 str 型別 || figsize | 設定尺寸 | rcParams["figure.fig.size"]=[6.4, 4.8],即 figure 長寬為 6.4 * 4.8 | || dpi | 設定畫素密度 | rcParams["sigure.dpi"]=100 | || facecolor | 設定背景色 | rcParams["figure.facecolor"]='w',即白色white | || frameon / edgecolor | 設定要不要繪製輪廓及輪廓顏色 | rcParams["figure.edgecolor"]='w',即白色 white | || Figureclass | 設定使不使用一個模板 | 不使用 | || clear | 設定當同名 figure 存在時,是否替換它 | False,即不替換 | | text 參數說明 * plt.text:在對應位置加上文字說明來生成相應的數字標籤* 語法: ``` plt.text(0.5,0.5, 'axes([0.2,0.2,.3,.3])',ha='center',va='center',size=16,alpha=.5) ```* 參數說明: * (0.5, 0.5):坐標 * ha='center', va= 'center':代表 horizontalalignment(水平對齊)、verticalalignment(垂直對齊)的方式 * size:文字大小 * alpha:透明度 其他函數 * 圖標如 plt.title, plt.xlabel, plt.ylabel* 參數: * '欲標示的文字' * fontsize=18 * fontfamily='sans-serif' * fontstyle='italic' 製作繪圖板 Axes 軸圖進階:軸與子圖非常相似* 但是可以將圖放置在圖中的任何位置。因此,如果要在較大的圖中放置較小的圖,則可以使用軸。* 特別提醒: tick 刻度線定位器 格式正確的刻度線是準備發布的數據的重要組成部分。Matplotlib為滴答提供了一個完全可配置的系統。有刻度線定位器可以指定刻度線應出現的位置,刻度線格式化程序可以為刻度線提供所需的外觀。主刻度線和次刻度線可以相互獨立地定位和格式化。![image.png](attachment:image.png)
###Code
#決定最外框
plt.axes([0.1,0.1,.8,.8])
plt.xticks([]), plt.yticks([])
plt.text(0.6,0.6, 'axes([0.1,0.1,.8,.8])',ha='center',va='center',size=20,alpha=.5)
#決定內框
plt.axes([0.2,0.2,.3,.3])
plt.xticks([]), plt.yticks([])
plt.text(0.5,0.5, 'axes([0.2,0.2,.3,.3])',ha='center',va='center',size=16,alpha=.5)
plt.show()
###Output
_____no_output_____
###Markdown
等高線圖
###Code
#定義函數與回傳的值
def f(x,y): return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)
n = 256
x = np.linspace(-3,3,n)
y = np.linspace(-3,3,n)
X,Y = np.meshgrid(x,y)
plt.contourf(X, Y, f(X,Y), 8, alpha=.75, cmap='jet')
C = plt.contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5)
plt.show()
###Output
<ipython-input-6-deedad4369ac>:10: UserWarning: The following kwargs were not used by contour: 'linewidth'
C = plt.contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5)
###Markdown
製作 Axes3D 繪圖板 * 3D圖形在資料分析、資料建模、圖形和影像處理等領域中都有著廣泛的應用* 主要把想要觀察的重點與場景實現兩種交互 * 一種是可以操縱場景從而能夠從不同的角度觀察模型 * 一種是擁有添加與操作修改模型物件的能力
###Code
# 創建一個 3d 坐標系
fig = plt.figure()
ax = Axes3D(fig)
#直接查詢參數與設定
#help(plt.plot)
#help(np.random.sample)
# 利用 x 軸和 y 軸繪製sin曲線
x = np.linspace(0, 1, 100) # linspace創建等差陣列
y = np.cos(x * 2 * np.pi) / 2 + 0.5
# 通過zdir='z' 將資料繪製在 z 軸,zs=0.5 則是將資料繪製在 z=0.5 的地方
ax.plot(x, y, zs = 0.5, zdir = 'z', color = 'black', label = 'curve in (x, y)')
# 繪製散點數據 (每個顏色 20 個 2D 點)在 x 軸和 z 軸
colors = ('r', 'g', 'b', 'k')
np.random.seed(19680801) # 設置隨機函數複現
x = np.random.sample(20 * len(colors))
y = np.random.sample(20 * len(colors))
z = np.random.sample(20 * len(colors))
c_list = []
for i in colors:
c_list.extend([i] * 20)
# 繪製散點座標通過 zdir='y' 將資料繪製在 y 為 0 的地方
ax.scatter(x, y, z, zdir = 'y', c = c_list, label = 'point in (x, z)')
# 設置圖例
ax.legend()
# 限制個軸的範圍
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 1)
# 軸添加標籤
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
###Output
_____no_output_____ |
extras-r/1-ExploreR.ipynb | ###Markdown
Azure Machine Learning and R SetupBefore running through this notebook, make sure you have executed setup.R by running this in a terminal on the Notebook VM:```shellsudo Rscript 0-setup.R```
###Code
library(azuremlsdk)
library(data.table)
library(ggplot2)
###Output
_____no_output_____
###Markdown
Load the workspace
###Code
ws = load_workspace_from_config()
###Output
_____no_output_____
###Markdown
Retrieve the Attrition Dataset from the workspace
###Code
attrition = ws$datasets$`IBM-Employee-Attrition`
###Output
_____no_output_____
###Markdown
Get a FileDataset for the TabularDataset, mount it and load the data into a data.table, then unmount
###Code
mc = attrition$to_csv_files()$mount()
mc$start()
csv_file = paste(mc$mount_point, 'part-00000', sep = '/')
df = fread(file=csv_file)
mc$stop()
###Output
_____no_output_____
###Markdown
Do some nice plots with ggplot
###Code
gg <- ggplot(df, aes(x=Age, y=MonthlyIncome))
gg = gg + geom_point(size=0.5, color='steelblue')
gg = gg + geom_smooth(aes())
gg = gg + facet_grid(Department ~ Attrition)
gg
###Output
_____no_output_____ |
my_classes/FirstClassFunctions/partial_functions.ipynb | ###Markdown
Partial Functions
###Code
from functools import partial
my_func(10, 20, 30)
def f(x, y):
return my_func(10, x, y)
f(20, 30)
f(100,200)
f = lambda x, y: my_func(10, x, y)
f(100, 200)
f = partial(my_func, 10)
f(20,30)
f = partial(my_func, 10, 20)
f(10, 20)
def my_func(a, b, *args, k1, k2, **kwargs):
print(a, b, args, k1, k2, kwargs)
my_func(10, 20, 100, 200, k1='a', k2='b', k3=1000, k4=2000)
def f (x, *vars, kw, **kwvars):
return my_func(10, x, *vars, k1='a', k2=kw, **kwvars)
f(20, 100, 200, kw='b', k3=1000, k4=2000)
f = partial(my_func, 10, k1='a')
f(20, 100, 200, k2='b', k3=1000, k4=2000)
def pow(base, exponent):
return base ** exponent
sq = partial(pow, 2)
sq(10)
sq = partial(pow, exponent=2)
sq(5)
cu = partial(pow, exponent=3)
cu(5)
cu(base=5)
cu(5, exponent=2)
a = 2
sq = partial(pow, exponent=a)
sq(5)
a = 3
sq(5)
def my_func(a, b):
print(a, b)
a = [1, 2]
f = partial(my_func, a)
f(100)
a
f(100)
a.append(3)
a
f(100)
origin = (0,0)
l = [(1, 1), (0, 2), (-3, 2), (0, 0), (10, 10)]
dist2 = lambda a, b: (a[0] - b[0])**2 + (a[1] - b[1])**2
dist2((1, 1), origin)
sorted(l)
f = partial(dist2, origin)
sorted(l)
f =partial(dist2, origin)
f((1,1))
sorted(l, key=f)
f = lambda x: dist2(origin, x)
sorted(l, key=f)
sorted(l, key=lambda x: dist2(origin, x))
f= lambda x: dist2(origin)
sorted(l, key=lambda x:dist2(origin, x))
###Output
_____no_output_____ |
Part 2.1.ipynb | ###Markdown
NumPy
###Code
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Exercise 1Consider the polynomial expression(1)p(x)=a0+a1x+a2x2+⋯aNxN=∑n=0NanxnEarlier, you wrote a simple function p(x, coeff) to evaluate (1) without considering efficiencyNow write a new function that does the same job, but uses NumPy arrays and array operations for its computations, rather than any form of Python loop(Such functionality is already implemented as np.poly1d, but for the sake of the exercise don’t use this class)`Hint: Use np.cumprod()`
###Code
def np_compute_poly(coeffs:np.ndarray,x:np.float64)->np.float64:
x_arr = np.empty((coeffs.shape[0]))
x_arr[0],x_arr[1:]=1, x
return np.cumprod(x_arr) @ coeffs
coeffs = np.ones(3)
x = 1
coeffs.shape, x, np_compute_poly(coeffs=coeffs,x=x)
###Output
_____no_output_____
###Markdown
Exercise 2Let `q` be a NumPy array of length n with `q.sum() == 1`Suppose that `q` represents a probability mass functionWe wish to generate a discrete random variable xsuch that P{x=i}=qiIn other words, `x` takes values in `range(len(q))` and `x = i` with probability `q[i]`The standard (inverse transform) algorithm is as follows: Divide the unit interval [0,1] into n subintervals I0,I1,…,In−1 such that the length of Ii is qi Draw a uniform random variable U on [0,1] and return the i such that U∈IiThe probability of drawing i is the length of Ii, which is equal to qiWe can implement the algorithm as follows:```from random import uniformdef sample(q): a = 0.0 U = uniform(0, 1) for i in range(len(q)): if a < U <= a + q[i]: return i a = a + q[i```If you can’t see how this works, try thinking through the flow for a simple example, such as `q = [0.25, 0.75]` It helps to sketch the intervals on paperYour exercise is to speed it up using NumPy, avoiding explicit loops* Hint: Use `np.searchsorted` and `np.cumsum`If you can, implement the functionality as a class called discreteRV, where:* the data for an instance of the class is the vector of probabilities `q`* the class has a `draw()` method, which returns one draw according to the algorithm described above.If you can, write the method so that `draw(k)` returns `k` draws from `q`
###Code
class discreteRV:
def __init__(self, q:np.ndarray)->None:
self.q = q
#prep q for use with searchsorted
self.Qsum = np.cumsum(self.q)
def draw(self, k:int=1)->np.ndarray:
# return k draws from q
# using searchsorted
return self.Qsum.searchsorted(np.random.uniform(0,1, size=k))
q = (0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1,0.11,0.12,0.22)
d = discreteRV(q)
print(sum(q)==1,"\n")
plt.hist(d.draw(100))
###Output
True
###Markdown
Exercise 3Recall our earlier discussion of the empirical cumulative distribution function: From OOP-II - Ex-1The empirical cumulative distribution function (ecdf) corresponding to a sample {Xi}ni=1 is defined as$$F_n(x) := \frac{1}{n} \sum_{i=1}^n \mathbf{1}\{X_i \leq x\} \qquad (x \in \mathbb{R})$$Here **$\mathbf{1}\{X_i \leq x\}$** is an indicator function *(one if **$X_i \leq x$** and zero otherwise)* and hence **$F_n(x)$** is the fraction of the sample that falls below x.The *Glivenko–Cantelli* Theorem states that, provided that the sample is iid, the *ecdf* **Fnn** converges to the true distribution function F.Implement ***$F_n$*** as a class called ***`ECDF`***, where* A given sample $\{X_i\}_{i=1}^n$ are the instance data, stored as `self.observations`* The class implements a `__call__` method that returns $F_n(x)$ for any $x$Your code should work as follows (modulo randomness)```pythonfrom random import uniformsamples = [uniform(0, 1) for i in range(10)]F = ECDF(samples)F(0.5) Evaluate ecdf at x = 0.5``````none0.29``````pythonF.observations = [uniform(0, 1) for i in range(1000)]F(0.5)``````none0.479``` Back to Ex-2(Part-2)Your task is to: * Make the `__call__` method more efficient using NumPy * Add a method that plots the ECDF over [a,b], where a and b are method parameters.
###Code
class ECDF:
def __init__(self, samples:np.ndarray)->None:
self.observations = np.array(samples)
def __call__(self, x:np.float64)->np.float64:
return np.mean(self.observations <= x)
def plot(self,a:float=None,b:float=None)->None:
if a is None:
a = self.observations.min() - self.observations.std()
if b is None:
b = self.observations.min() + self.observations.std()
values = np.linspace(a,b,100)
f = np.vectorize(self.__call__)
plt.plot(values, f(values))
plt.show()
samples_10 = [np.random.randn(10)]
samples_1K = [np.random.randn(1000)]
samples_1M = [np.random.randn(1000000)]
F_10,F_1K,F_1M = ECDF(samples_10),ECDF(samples_1K),ECDF(samples_1M)
F_10.plot(-10,10),F_100.plot(-10,10),F_10K.plot(-10,10)
print(F_10(0.9),F_1K(0.9),F_1M(0.9))
###Output
_____no_output_____ |
ipython/luggage-compartment-door/luggage_compartment_door.ipynb | ###Markdown
1 数据预处理
###Code
# 加载数据
data_path = "../data/luggage_compartment_door.txt"
df = pd.read_csv(data_path, sep='\t', encoding='utf-8')
df = df.rename(columns={'物料编码': 'part_id', '物料描述': 'part_name',
'订货数': 'order_num', '缺件数': 'out_of_stock_num',
'受理数': 'delivery_num', '审核日期': 'date', '审核时间': 'time'})
df['date'] = pd.to_datetime(df['date'], format="%Y-%m-%d")
df.set_index('date', inplace=True)
df['part_id'] = df['part_id'].astype('str')
df_week = df.resample('W').sum()
df_month = df.resample('M').sum()
df_week.order_num.plot(figsize=(15, 8), grid=True)
df_month.order_num.plot(figsize=(12, 8), grid=True)
###Output
_____no_output_____ |
tests/a2c_test.ipynb | ###Markdown
Create environment
###Code
env_name = "CartPole-v0"
env = gym.make(env_name)
env.seed(0)
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
print("State size:", state_size, "\nAction size:", action_size)
###Output
State size: 4
Action size: 2
###Markdown
A2C Test
###Code
env_name = "CartPole-v0"
env = gym.make(env_name)
env.seed(0)
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
tmax = 5
n_episodes = (20_000)//tmax
n_env = 16
a2c_model = models.ActorCriticMLP(state_size, action_size, env.action_space, H=256)
# init agent:
agent = A2C(a2c_model,
env_name,
n_env=n_env,
use_gae=False
)
max_score = 195.
model_name = "a2c_{}".format(env_name)
# train the agent
scores, losses = agent.train(tmax, n_episodes, env, max_score, model_name)
# plot the training:
x = np.arange(len(scores))
scores = mean_filter(scores, 50)
plt.plot(x, scores, label = "scores")
plt.show()
env.close()
###Output
../torch/csrc/utils/python_arg_parser.cpp:698: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, Number alpha)
###Markdown
Trained Agent Demonstration
###Code
agent.test(env, render=True, n_episodes=4)
###Output
_____no_output_____
###Markdown
A2C Continuous Test
###Code
env_name = "Pendulum-v0"
env = gym.make(env_name)
env.seed(0)
state_size = env.observation_space.shape[0]
action_size = env.action_space.shape[0]
tmax = 5
n_episodes = 1000
n_env = 16
a2c_model = models.ActorCriticMLP(state_size, action_size, env.action_space, H=64)
lr = 1e-3
# init agent:
agent = A2C(a2c_model,
env_name,
n_env=n_env,
lr=lr,
critic_coef=0.1,
)
max_score = -20.
model_name = "a2c_{}".format(env_name)
# train the agent
scores, losses = agent.train(tmax, n_episodes, env, max_score, model_name, det_test=True)
# plot the training:
x = np.arange(len(scores))
scores = mean_filter(scores, 50)
plt.plot(x, scores, label = "scores")
plt.show()
env.close()
###Output
_____no_output_____
###Markdown
Trained Agent Demonstration
###Code
agent.test(env, render=True, n_episodes=4)
###Output
_____no_output_____ |
notebooks/1.0 - Data Preparation.ipynb | ###Markdown
Workbook 1.0 - Data Import, Cleaning, and Initial Feature Engineering Package Import
###Code
import pandas as pd
import re
import glob
import datetime
import numpy as np
###Output
_____no_output_____
###Markdown
Data Import We'll import our data in 2 groups: customer data and order data. We'll then join these tables on the owner ID, 'owner_no'.
###Code
#Import groups of customer data
appended_data = []
for file in glob.glob('Cust*'):
data = pd.read_csv(file)
appended_data.append(data)
cust_df = pd.concat(appended_data)
cust_df.head()
cust_df.isnull().sum()
#Import groups of order data
appended_data = []
for file in glob.glob('Ord*'):
data = pd.read_csv(file)
appended_data.append(data)
order_df = pd.concat(appended_data)
order_df.head()
order_df.isnull().sum()
###Output
_____no_output_____
###Markdown
Lastly, we'll join the data and affirm the shape of our merged dataset. We'll perform an inner join, to ensure that every row represents a customer who has placed an order within the parameters of our original order query.
###Code
df = pd.merge(order_df, cust_df, how='inner', on = 'owner_no')
df.shape
###Output
_____no_output_____
###Markdown
Handle NaN values There are lots of NaN values, particularly amongst those customers who have never made a donation. Those customers have no value for 'First Contribution Date' or 'Lifetime Giving', and there are many customers who don't have a valid "Prelim Capacity" rating.We'll impute a filler date-value for 'First Contribution Date', and impute a value of 0 for Prelim Capacity. It might also be acceptable to impute the mean or median of Prelim Capacity.
###Code
df.isnull().sum()
###Output
_____no_output_____
###Markdown
Imputing Data
###Code
#Clean up OP Prelim Capacity
df['OP Prelim Capacity'] = df['OP Prelim Capacity'].fillna(0)
df['OP Prelim Capacity'] = df['OP Prelim Capacity'].replace('U',0)
df['OP Prelim Capacity'] = df['OP Prelim Capacity'].replace('X',0)
#Clean up First Contribution Date
df['First Contribution Date'] = df['First Contribution Date'].fillna('01-01-1900')
#Impute total ticket paid amount
df.tot_ticket_paid_amt = df.tot_ticket_paid_amt.fillna(df.tot_ticket_paid_amt.mean())
#Clean up Lifetime Giving
df['Lifetime Giving'] = df['Lifetime Giving'].fillna(0)
###Output
_____no_output_____
###Markdown
Data Type Cleanup We'll convert our data types to the appropriate Pandas data type.
###Code
#Create datetime data types
df.order_dt=pd.to_datetime(df.order_dt, errors='coerce')
df['First Order Date'] = pd.to_datetime(df['First Order Date'], errors='coerce')
df['First Contribution Date'] = pd.to_datetime(df['First Contribution Date'], errors='coerce')
df.dtypes
#Create numerical data types
df.tot_ticket_paid_amt = df.tot_ticket_paid_amt.str.replace('$','')
df.tot_contribution_paid_amt = df.tot_contribution_paid_amt.str.replace('$','')
df['Lifetime Giving'] = df['Lifetime Giving'].str.replace('$','')
df.tot_ticket_paid_amt = pd.to_numeric(df.tot_ticket_paid_amt, errors='coerce')
df.tot_contribution_paid_amt = pd.to_numeric(df.tot_contribution_paid_amt, errors='coerce')
df['Lifetime Giving'] = pd.to_numeric(df['Lifetime Giving'], errors='coerce')
df['OP Prelim Capacity'] = pd.to_numeric(df['OP Prelim Capacity'], errors='coerce')
###Output
_____no_output_____
###Markdown
Additional Data Cleanup
###Code
#Drop helper column
df = df.drop('Count of order_no', axis=1)
#Clean up column headers
df = df.rename(
columns={
'First Order Date': 'first_order_dt',
'First Contribution Date': 'first_cont_dt',
'OP Prelim Capacity': 'prelim_capacity',
'LTV Tkt Value': 'ltv_tkt_value',
'OP Prelim Capacity':'Prelim Capacity'
}
)
###Output
_____no_output_____
###Markdown
Group column values Our data includes many deprecated values and similar values, so we'll group those values to reduce the cardinality of those features. Grouping our categorical variables will reduce the dataset's dimensionality and improve performance. Many of these values have a very similar role from the consumer perspective, or have replaced one another over time.
###Code
#Clean up channel values
channel_values = dict.fromkeys([
'TP Box Office', 'TP Phone', 'TP - Default Channel','TodayTix','Telemarketing','FringeArts'], '3rd Party')
channel_values['Web'] = 'Online'
channel_values['Web Donations'] = 'Online'
channel_values['TP Mobile'] = 'Mobile'
df.channel_desc = df.channel_desc.replace(channel_values)
#Clean up delivery types
willcall_values = dict.fromkeys(
['Hold at Box Office',
'BO - Window',
'OP - Will Call',
'Box Office Window',
'OP - Hold at Box Office',
'Information Table',
'VIP',
'Guest Services',
'Hand Delivered',
'Press'],
'Will Call'
)
digital_values = dict.fromkeys(
['Email Print at Home',
'Mobile App Ticket'],
'Digital')
mail_values = dict.fromkeys([
'OP - US Mail',
'BO - US Mail',
'U.S. Mail',
'Fedex-2 bus. day ($25)'],
'Mail')
willcall_values.update(digital_values)
willcall_values.update(mail_values)
df.delivery_desc = df.delivery_desc.replace(willcall_values)
#Clean up MOS values
internal_values = dict.fromkeys([
'Ticketing Web Stbs',
'Ticketing Web Subs',
'Web Advance Rush',
'Web Allocation',
'Web Allocation 2',
'Web Allocation 3',
'Web Allocation 4'],
'Internal')
external_values = dict.fromkeys([
'TP Box Office',
'xxTP App/Web Sales',
'xx1 TP KC BO',
'Curtis Subscription',
'FringeArts',
'TodayTix',
'xxTP Phone Sales',
'TP Phone',
'TP Web OP Student Advance (NF)',
'xxTP Web SPCl Allocation',
'TP Phone OP Subs',
'TP Phone Special Offer',
'xxTP Exchanges OP Phone'],
'External')
internal_values.update(external_values)
df.MOS_desc = df.MOS_desc.replace(internal_values)
#function to define facility values
def facilities(facility):
if facility in ['Academy of Music', 'Independence Mall']:
return facility
elif facility in ['Perelman Theater','Perelman']:
facility = 'Perelman'
return facility
elif facility in ['Academy of Music Ballroom',
'The Loft on Tier 1',
'Garden Restaurant',
'Tier 2 Lounge',
'Union League of Philadelphia',
'Estia Restaurant',
'Hamilton Garden',
'Creperie Beau Monde'
]:
facility = 'Fundraiser'
return facility
elif facility in ['The Barnes Foundation',
'Theatre of Living Arts',
'Suzanne Roberts',
'The Wilma Theater',
'Philadelphia Museum of Art',
'Prince Theater',
'FringeArts',
'Field Concert Hall',
'TLA']:
facility = 'Small venue'
return facility
else:
facility = 'Other'
return facility
df.facility_desc = df.facility_desc.apply(facilities)
###Output
_____no_output_____
###Markdown
Feature Engineering We'll add 5 features: A Boolean column that identifies Board members, major donor prospects and major donors. A calculated field for the number of days between a customer's first order and their first contribution. A Boolean field identifying an order in which a customer makes their first contribution. A Boolean field identifying an order in which a customer makes a contribution after their first ticket order. A calculated field that calculates the expanding sum of tickets purchased for a given customer over time.
###Code
#Identify customers in the BRD, MGP, and LCM constituencies
drops_df = pd.read_excel('drops.xlsx')
prosp_brd_values = list(drops_df['customer_no'])
df['prospect_board'] = df.owner_no.apply(lambda x: 1 if x in prosp_brd_values else 0)
#Function to create calculated field for difference between first order and first contribution
def days_to_donation(order_dt, cont_dt):
if cont_dt == datetime.date(1900,1,1):
result = -100
elif cont_dt < order_dt:
result = -1
else:
result = (cont_dt - order_dt).days
return result
df['days_to_donation'] = df.apply(lambda x: days_to_donation(x.first_order_dt, x.first_cont_dt), axis=1)
#Add classification target for customers who made a donation WITH their first order
df['first_cont_order'] = np.where(df.first_cont_dt == df.first_order_dt, 1, 0)
##Add classification target for customers who made a donation AFTER their first order
df['first_cont_after'] = np.where(df.first_cont_dt > df.first_order_dt, 1, 0)
#Add expanding sum for ticket value
df['rolling_tkt_sum'] = df.groupby('owner_no')['tot_ticket_paid_amt'].cumsum()
###Output
_____no_output_____
###Markdown
Drop Categorical Variables Summarized by Other Variables We can drop a few more variables that are summarized by other dimensions, and clean up our data types.
###Code
df = df.drop(['postal_code','state_desc','prod_season_desc'], axis=1)
#Create categorical data types
df.channel_desc = df.channel_desc.astype('category')
df.MOS_desc = df.MOS_desc.astype('category')
df.delivery_desc = df.delivery_desc.astype('category')
df.facility_desc = df.facility_desc.astype('category')
df.geo_area_desc = df.geo_area_desc.str.split('-').str[0]
df.geo_area_desc = df.geo_area_desc.astype('int')
###Output
_____no_output_____
###Markdown
Before creating our dummy variables, let's verify that the appropriate columns have the "category" datatype.
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
Dummy Variables and Aggregating Data by Customer-Order Date Combination Our data currently splits each item from a given order into a separate row. Therefore, we will need to aggregate our data by customer ID and order date. This may aggregate orders on the same date for a given customer, but we can assume that those orders occur within a small enough timeframe that they are effectively part of the same action.First, we will create dummy variables for our categorical variables, then aggregate on customer ID and order number.
###Code
#Create dummy variables
df = pd.get_dummies(df)
df.columns
cols = list(df.columns)
cols.remove('num_seats_pur')
sum_dict = {'num_seats_pur':'sum'}
agg_dict = dict.fromkeys(cols,'max')
df = df.groupby(['owner_no','order_dt']).agg(agg_dict)
pd.set_option('display.max_columns',50)
df.reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Export Data Lastly, let's export the data to csv.
###Code
df.to_csv('1.1 Processed Data.csv')
###Output
_____no_output_____ |
ipyhton_notebook_analysis_data_science/2. Housing Price Prediction Project.ipynb | ###Markdown
House Prices PredictionThis Kaggle project involves predicting the sales price for each house. For each Id in the test set, the value of the SalePrice variable must be predicted. Metric: Submissions are evaluated on Root-Mean-Squared-Error (RMSE) between the logarithm of the predicted value and the logarithm of the observed sales price.
###Code
import pandas as pd # importing pandas which is high-performance, easy-to-use data structures and data analysis framework
import numpy as np # importing numpy which is fundamental package for scientific computing with Python
%matplotlib inline
import matplotlib # importing plotting library for the Python programming language
import matplotlib.pyplot as plt
import seaborn as sns # importing a Python data visualization library based on matplotlib. It provides a high-level interface for drawing attractive and informative statistical graphics.
import warnings
warnings.filterwarnings("ignore")
data_dir = "../data_ds/"
train = pd.read_csv(data_dir + 'train.csv') # read train set and store it in a dataframe
test = pd.read_csv(data_dir + 'test.csv') # read train set and store it in a dataframe
#descriptive statistics summary
train['SalePrice'].describe() # obtaining descriptive statistics that summarize the house saleprice
train.head() # displaying the first couple of rows for train set
test.head() # displaying the first couple of rows for test set
train.info() # displaying information of train set
# Printing the dimensions of train set and test set
print("The shape of train data before dropping Id feature is : {} ".format(train.shape))
print("The shape of test data before dropping Id feature is : {} ".format(test.shape))
# Saving the 'Id' column
train_ID = train['Id']
test_ID = test['Id']
# Droping the 'Id' colum because it's unnecessary for the prediction process
train.drop("Id", axis = 1, inplace = True)
test.drop("Id", axis = 1, inplace = True)
# Printing the dimensions of train set and test set again
print("\nThe shape of train data before after Id feature is : {} ".format(train.shape))
print("The shape of test data before after Id feature is : {} ".format(test.shape))
fig, ax = plt.subplots() # initialize plot and axes
ax.scatter(x = train['GrLivArea'], y = train['SalePrice']) # plotting scatter-plot between Ground Living Area and Sale Price
plt.ylabel('SalePrice', fontsize=13)
plt.xlabel('GrLivArea', fontsize=13)
plt.show()
#Removing outliers
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
#Checking the scatter-plot between Ground Living Area and Sale Price again
fig, ax = plt.subplots()
ax.scatter(train['GrLivArea'], train['SalePrice'])
plt.ylabel('SalePrice', fontsize=13)
plt.xlabel('GrLivArea', fontsize=13)
plt.show()
# Printing the skewness and kurtosis of the distribution of SalePrice
print("Skewness: %f" % train['SalePrice'].skew())
print("Kurtosis: %f" % train['SalePrice'].kurt())
from scipy import stats # importing statistical module from Scipy which is a framework for scientific computing and technical computing
from scipy.stats import norm, skew # import functions for computing skew and norm from scipy stats module
sns.distplot(train['SalePrice'] , fit=norm); # plot a normal distribution of SalePrice
# Obtaining the mean and standard deviation of House SalePrice
(mu, sigma) = norm.fit(train['SalePrice'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# Plotting the distribution of House SalePrice
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
# Obtaining also the Probability Plot of the SalePrice Distribution
fig = plt.figure()
res = stats.probplot(train['SalePrice'], plot=plt)
plt.show()
train["SalePrice"] = np.log1p(train["SalePrice"]) # computing the natural logarithm of House Sale Price i.e. log(1 + x)
#Check the new distribution
sns.distplot(train['SalePrice'] , fit=norm); # plot a normal distribution of SalePrice after applying natural logarithm
# Obtaining the mean and standard deviation of House SalePrice after applying natural logarithm
(mu, sigma) = norm.fit(train['SalePrice'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# Plotting the distribution of House SalePrice after applying natural logarithm
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
# Obtaining also the Probability Plot of the SalePrice Distribution after applying natural logarithm
fig = plt.figure()
res = stats.probplot(train['SalePrice'], plot=plt)
plt.show()
ntrain = train.shape[0] # obtaining the number of examples of train set
ntest = test.shape[0] # obtaining the number of examples of test set
y_train = train.SalePrice.values # storing the Sale Price values in an array
# y_test = test.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True) # concatenating train and test set data with indexes reset
all_data.drop(['SalePrice'], axis=1, inplace=True) # Dropping Sale Price from the concatenated data
print("all_data size is : {}".format(all_data.shape)) # Printing the dimensions of the concatenated data
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100 # calculating for each feature the percentage of rows with null data from the concatenated data
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30] # extracting the top 30 features with the highest % of null data by removing features with no null data and storing it in a list
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na}) # converting the abobe list into a pandas dataframe
missing_data.head(20) # displaying the top 20 features with the highest % of null data
# Plotting percentage of missing data of the features from the concatenated dataset
f, ax = plt.subplots(figsize=(15, 12))
plt.xticks(rotation='90')
sns.barplot(x=all_data_na.index, y=all_data_na)
plt.xlabel('Features', fontsize=15)
plt.ylabel('Percent of missing values', fontsize=15)
plt.title('Percent missing data by feature', fontsize=15)
# Computing and plotting pairwise correlation of features from the train set
corrmat = train.corr()
f, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(corrmat, vmax=.8, square=True);
# Plotting pairwise correlation of the top 10 features from the train set
k = 10 #number of features to consider
cols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index
cm = np.corrcoef(train[cols].values.T)
sns.set(font_scale=1.25)
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)
plt.show()
# Plotting pairwise relationships scatterplot among some of the features in a train set
sns.set()
cols = ['SalePrice', 'OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'FullBath', 'YearBuilt']
sns.pairplot(train[cols], height = 2.5)
plt.show();
all_data.groupby("Neighborhood")["LotFrontage"].transform(
lambda x: x.fillna(x.median()))
# Replacing all the NaN/NA values from some of the features with 'None'
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
all_data["Alley"] = all_data["Alley"].fillna("None")
all_data["Fence"] = all_data["Fence"].fillna("None")
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(
lambda x: x.fillna(x.median())) # grouping Neighborhood with linear feet of street connected to property and filling the NaN/NA values with median
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None') # Replacing all the NaN/NA values from some of the features with 'None'
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0) # Replacing all the NaN/NA values from some of the features with zero
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[col] = all_data[col].fillna(0) # Replacing all the NaN/NA values from some of the features with zero
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None') # Replacing all the NaN/NA values from some of the features with 'None'
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None") # Replacing all the NaN/NA values from Masonry veneer type with 'None'
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0) # Replacing all the NaN/NA values from Masonry veneer area with zero
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0]) # replacing all the NaN/NA values from zoning classification of the sale with one of the mode
all_data = all_data.drop(['Utilities'], axis=1) # droping the Utilities column
all_data["Functional"] = all_data["Functional"].fillna("Typ") # replacing all the NaN/NA values from home functionality with Typical Functionality
all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0]) # replacing all the NaN/NA values from Electrical system with one of the mode
all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0]) # replacing all the NaN/NA values from Kitchen quality with one of the mode
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0]) # replacing all the NaN/NA values from Exterior covering on house with one of the mode
all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0]) # replacing all the NaN/NA values from Exterior covering on house (if more than one material) with one of the mode
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0]) # replacing all the NaN/NA values from Type of sale with one of the mode
all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None") # replacing all the NaN/NA values from MSSubClass with one of the mode
#Check remaining missing values if any
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head()
# Changing the type of data from integer to object
all_data['MSSubClass'] = all_data['MSSubClass'].apply(str)
# Changing the data type of "overall condition of the house" into a categorical variable
all_data['OverallCond'] = all_data['OverallCond'].astype(str)
# Changing the data type of "Year and month sold" into a categorical variable
all_data['YrSold'] = all_data['YrSold'].astype(str)
all_data['MoSold'] = all_data['MoSold'].astype(str)
from sklearn.preprocessing import LabelEncoder # import label encoder from scikit-learn
cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold') # features on which to perform label encoding
# Applying LabelEncoder to categorical features
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))
# Printing the dimensions of the concatenated dataset
print('Shape all_data: {}'.format(all_data.shape))
# Adding total square feet area from basement, 1st floor, and 2nd floor into a single feature
all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index # extracting the numerical features
# Checking the skew of all the numerical features
skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_feats})
skewness.head(10)
skewness = skewness[abs(skewness) > 0.75] # applying boxcox transformation to features with skewness > 0.75
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
all_data[feat] = boxcox1p(all_data[feat], lam)
all_data = pd.get_dummies(all_data) # Convert categorical variable into dummy/indicator variables
print(all_data.shape)
train = all_data[:ntrain]
test = all_data[ntrain:]
y_train
# 1st Model
from sklearn.linear_model import LinearRegression # importing module from scikit-learn for Ordinary least squares Linear Regression
from sklearn.metrics import mean_squared_error # importing module from scikit-learn for Mean squared error regression loss
from sklearn.preprocessing import StandardScaler # importing module from scikit-learn for standardizing features by removing the mean and scaling to unit variance
from sklearn.pipeline import Pipeline # importing module from scikit-learn for sequentially applying a list of transforms and a final estimator
scaler = StandardScaler() # initializing standard scaler
train_std = scaler.fit_transform(train) # standardizing train set features by removing the mean and scaling to unit variance
lin_reg = LinearRegression() # initializing linear regression module
lin_reg.fit(train_std, y_train) # fitting linear regression on the scaled train set and train labels
house_price_predictions = lin_reg.predict(train_std) # predicting House Sale Price using the linear regression on the train set data
lin_mse = mean_squared_error(y_train, house_price_predictions) # computing mean squared error regression loss on the predicted house sale prices
lin_rmse = np.sqrt(lin_mse) # computing the root mean squared error regression loss on the predicted house sale prices
lin_rmse # displaying root mean squared error regression loss
from sklearn.tree import DecisionTreeRegressor # importing decision tree regressor from scikit-learn
tree_reg = DecisionTreeRegressor(random_state=42) # initializing decision tree regressor
tree_reg.fit(train_std, y_train) # fitting decision tree regressor on the scaled train set and train labels
house_price_predictions = tree_reg.predict(train_std) # predicting House Sale Price using the decision tree regression on the train set data
tree_reg_mse = mean_squared_error(y_train, house_price_predictions) # computing mean squared error regression loss on the predicted house sale prices
tree_reg_rmse = np.sqrt(lin_mse) # computing the root mean squared error regression loss on the predicted house sale prices
tree_reg_rmse # displaying root mean squared error regression loss
from sklearn.ensemble import RandomForestRegressor # importing random forest regressor from scikit-learn
random_forest_reg = RandomForestRegressor(random_state=42) # initializing forest regressor
random_forest_reg.fit(train_std, y_train) # fitting forest regressor on the scaled train set and train labels
house_price_predictions = random_forest_reg.predict(train_std) # predicting House Sale Price using the forest regressor on the train set data
random_forest_reg_mse = mean_squared_error(y_train, house_price_predictions) # computing mean squared error regression loss on the predicted house sale prices
random_forest_reg_rmse = np.sqrt(lin_mse) # computing the root mean squared error regression loss on the predicted house sale prices
random_forest_reg_rmse # displaying root mean squared error regression loss
from sklearn.ensemble import AdaBoostRegressor # importing adaBoost regressor from scikit-learn
ada_tree_reg = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4), # initializing adaBoost regressor using a Decision Tree Regressor with a maximum depth of 4 and 300 maximum number of estimators
n_estimators=300, random_state=42)
ada_tree_reg.fit(train_std, y_train) # fitting adaBoost regressor on the scaled train set and train labels
house_price_predictions = ada_tree_reg.predict(train_std) # predicting House Sale Price using the adaBoost regressor on the train set data
ada_tree_reg_mse = mean_squared_error(y_train, house_price_predictions) # computing mean squared error regression loss on the predicted house sale prices
ada_tree_reg_rmse = np.sqrt(lin_mse) # computing the root mean squared error regression loss on the predicted house sale prices
ada_tree_reg_rmse # displaying root mean squared error regression loss
def display_scores(scores): # function to display scores and the mean and standard deviation of the scores
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
from sklearn.model_selection import cross_val_score # importing cross-validation from scikit-learn for evaluation
scores = cross_val_score(ada_tree_reg, train_std, y_train,
scoring="neg_mean_squared_error", cv=10) # initializing cross-validation using the aforementioned adaBoost regressor and 10 fold cross-validation strategy
ada_tree_reg_scores = np.sqrt(-scores) # computing negative root mean squared error regression loss on the predicted house sale prices
display_scores(ada_tree_reg_scores) # displaying scores using the function above
from sklearn.svm import SVR # importing Support Vector Regressor from scikit-learn
svm_reg = SVR(kernel="linear") # initializing Support Vector Regressor with a linear kernel
svm_reg.fit(train_std, y_train) # training Support Vector Regressor on the scaled train set and train labels
housing_predictions = svm_reg.predict(train_std) # predicting House Sale Price using the Support Vector Regressor on the train set data
svm_reg_mse = mean_squared_error(y_train, housing_predictions) # computing mean squared error regression loss on the predicted house sale prices
svm_reg_rmse = np.sqrt(lin_mse) # computing the root mean squared error regression loss on the predicted house sale prices
svm_reg_rmse # displaying root mean squared error regression loss
from sklearn.model_selection import GridSearchCV # importing Grid Search module from scikit-learn for performing exhaustive search over specified parameter values for an estimator
param_grid = [
# trying combinations of hyperparameters with bootstrap set as False
{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]},
]
random_forest_reg = RandomForestRegressor(random_state=42) # initializing Random Forest Regressor
# training across 5 folds, that's a total of (12+6)*5=90 rounds of training
grid_search = GridSearchCV(random_forest_reg, param_grid, cv=5,
scoring='neg_mean_squared_error', return_train_score=True) # initialising Gridsearch with the Random Forest Regressor and negative mean squared error as scoring metric; using 5-fold cross validation
grid_search.fit(train_std, y_train) # performing exhaustive search over specified parameter values which gives the minimum negative mean squared error for a Random Forest Regressor
grid_search.best_params_ # Parameter setting that gave the best results on the negative mean squared error on the train data set
grid_search.best_estimator_ # Estimator that gave the best results on the negative mean squared error on the train data set
grid_search.cv_results_ # Cross validation results of the different set of parameters
cvres = grid_search.cv_results_ # Displaying mean test score of different combinations of maximum features and parameters
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
feature_importances = grid_search.best_estimator_.feature_importances_ # evaluating the importance of features from the house price dataset
feature_importances[:10] # displaying the first 10 features' importance metrics
final_model = grid_search.best_estimator_ # selecting the random forest regressor model with the best combination of parameters
final_predictions = final_model.predict(train_std) # predicting House Sale Price using the best random forest regressor model on the train set data
final_mse = mean_squared_error(y_train, final_predictions) # computing mean squared error regression loss on the predicted house sale prices
final_rmse = np.sqrt(final_mse) # computing the root mean squared error regression loss on the predicted house sale prices
final_rmse # displaying root mean squared error regression loss
from sklearn.model_selection import RandomizedSearchCV # importing module from scikit-learn for performing Randomized search on hyper parameters
from scipy.stats import randint # importing module from scipy to generate discrete random values
param_distribs = {
# trying combinations of hyperparameters
'n_estimators': randint(low=1, high=200),
'max_features': randint(low=1, high=8),
}
random_forest_reg = RandomForestRegressor(random_state=42) # initializing Random Forest Regressor
rnd_search = RandomizedSearchCV(random_forest_reg, param_distributions=param_distribs,
n_iter=10, cv=5, scoring='neg_mean_squared_error', random_state=42) # initialising Randomized Search with the Random Forest Regressor and negative mean squared error as scoring metric; using 5-fold cross validation and 10 parameter settings that are sampled
rnd_search.fit(train_std, y_train) # performing random search over specified parameter values which gives the minimum negative mean squared error for a Random Forest Regressor
cvres = rnd_search.cv_results_ # Displaying mean test score of different combinations of maximum features and parameters
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
feature_importances = rnd_search.best_estimator_.feature_importances_ # evaluating the importance of features from the house price dataset
feature_importances[:10] # displaying the first 10 features' importance metrics
final_model = rnd_search.best_estimator_ # selecting the random forest regressor model with the best combination of parameters
final_predictions = final_model.predict(train_std) # predicting House Sale Price using the best random forest regressor model on the train set data
final_mse = mean_squared_error(y_train, final_predictions) # computing mean squared error regression loss on the predicted house sale prices
final_rmse = np.sqrt(final_mse) # computing the root mean squared error regression loss on the predicted house sale prices
final_rmse # displaying root mean squared error regression loss
from sklearn.linear_model import SGDRegressor # importing Stochastic Gradient Descent regressor module from scikit-learn
sgd_reg = SGDRegressor(max_iter=50, penalty='elasticnet', eta0=0.1, random_state=42) # initializing Stochastic Gradient Descent regressor with 50 maximum epochs, a learning rate of 0.1, and elasticnet as regularizing method
sgd_reg.fit(train_std, y_train.ravel()) # training the Stochastic Gradient Descent regressor on the scaled train set and train labels
# sgd_reg.intercept_, sgd_reg.coef_
final_predictions = sgd_reg.predict(train_std) # predicting House Sale Price using the Stochastic Gradient Descent regressor on the train set data
sgd_reg_mse = mean_squared_error(y_train, final_predictions) # computing mean squared error regression loss on the predicted house sale prices
sgd_reg_rmse = np.sqrt(sgd_reg_mse) # computing root mean squared error regression loss on the predicted house sale prices
sgd_reg_rmse # displaying the root mean squared error regression loss
from sklearn.preprocessing import PolynomialFeatures # importing module from scikit-learn for generating polynomial and interaction features
poly_features = PolynomialFeatures(degree=2, include_bias=False)
X_poly = poly_features.fit_transform(train_std)
X_poly[0]# displaying the coeffcients of first sample
lin_reg = LinearRegression() # initializing linear regression
lin_reg.fit(X_poly, y_train) # fitting linear regression on the transformed train set and train labels
lin_reg.intercept_, lin_reg.coef_ # displaying the regression intercepts and coefficients
final_predictions = lin_reg.predict(X_poly) # computing mean squared error regression loss on the predicted house sale prices
lin_reg_mse = mean_squared_error(y_train, final_predictions) # computing mean squared error regression loss on the predicted house sale prices
lin_reg_rmse = np.sqrt(sgd_reg_mse) # computing root mean squared error regression loss on the predicted house sale prices
lin_reg_rmse # displaying the root mean squared error regression loss
# from sklearn.preprocessing import StandardScaler
# from sklearn.pipeline import Pipeline
# for degree in (300, 2, 1):
# polybig_features = PolynomialFeatures(degree=degree, include_bias=False)
# std_scaler = StandardScaler()
# lin_reg = LinearRegression()
# polynomial_regression = Pipeline([
# ("poly_features", polybig_features),
# ("std_scaler", std_scaler),
# ("lin_reg", lin_reg),
# ])
# polynomial_regression.fit(train_std, y_train)
# final_predictions = polynomial_regression.predict(train_std)
# polynomial_regression_mse = mean_squared_error(y_train, final_predictions)
# polynomial_regression_rmse = np.sqrt(sgd_reg_mse)
# polynomial_regression_rmse
# from sklearn.linear_model import Ridge
# ridge_reg = Ridge(alpha=1, solver="cholesky", random_state=42)
# ridge_reg.fit(train_std, y_train)
# final_predictions = polynomial_regression.predict(train_std)
# ridge_reg_mse = mean_squared_error(y_train, final_predictions)
# ridge_reg_rmse = np.sqrt(sgd_reg_mse)
# ridge_reg_rmse
###Output
_____no_output_____ |
jason-paleography.ipynb | ###Markdown
0. Gathering the DataIn this preliminary section, we will gather all of the cuneiform sign transliterations from the JSON files in our dataset. Then we will consolidate them into a data frame and match each sign value with its sign name (Do we need to discuss the basics of cuneiform transliteration or is it assumed that our audience is familiar with it?) 0.1: OGSLNow, we will load a map from sign value to sign name to use on the signs in our texts. The OGSL is... (website...)
###Code
file_ogsl = codecs.open('ogsl-sl.json','r','utf-8')
ogsl = json.load(file_ogsl)
sign_index = ogsl['index']
###Output
_____no_output_____
###Markdown
0.2: Collect the Text SignsThe following code parses the JSON files of the ORACC texts and collects each sign transliteration. Since different signs have different types of reading, they are rendered differently in the JSON file and we must take care to recognize each sign reading type in the JSON fileThe types of signs and their representation in the JSON Files: Syllable - The reading of a sign as a syllable is rendered with a 'v' key Logogram - The reading of a sign as a logogram, i.e. one represents a word in itself or as part of a complex of signs that represents a single word is written in capital letters and with a 's' key Numerical - A sign representing a number (or personal name determinative) has an extra key called 'sexified'. This gives information on the number sign's wedge structure.In addition, a modified sign can be any of the three types above, but written with a nonstandard paleography (e.g. a diagonal wedge is incised in the clay instead of a horizontal). These are the signs we want to examine. They have extra data given under the 'mods' key.
###Code
def process_signs(sign_data):
sign_info = {}
if 'v' in sign_data: #This is the label for a standard syllable
sign_info['b'] = sign_data['v']
if 's' in sign_data: #This is the label for elements of a logogram
sign_info['b'] = sign_data['s']
if 'n' in sign_data:
sign_info['b'] = sign_data.get('sexified',sign_data.get('form','noform?'))
if 'mods' in sign_data:
for m in sign_data['mods']:
for d in m:
sign_info[d] = m[d]
if 'break' in sign_data:
sign_info['break'] = sign_data['break']
sign_info['sign_loc_id'] = sign_data.get('id','no-id')
return sign_info
types = set()
all_signs = []
all_words = []
for fname in os.listdir('sargonletters/corpusjson'):
f = codecs.open('sargonletters/corpusjson/'+fname,'r','utf-8')
try:
j = json.load(f)
except ValueError:
print('Could not load: ' + fname)
continue
text_id = j['textid']
for a in j['cdl'][0]['cdl']:
if a.get('type','') == 'discourse':
for b in a['cdl']:
if b.get('type','') == 'sentence':
line_label = ''
for c in b['cdl']:
if c.get('node','') == 'd': #This is the label for the line e.g. "o ii 3"
line_label = c.get('label','nolabel')
if c.get('node','') == 'l': #This is the label for a regular word in a line
if c.get('tail-sig','') != '': #An extra word??
continue
form = c['f']['form']
frag = c['frag']
ref = c['ref']
cf = c['f'].get('cf','no-cf')
gw = c['f'].get('gw','no-gw')
pos = c['f']['pos']
sense = c['f'].get('sense','no-sense')
norm = c['f'].get('norm','no-norm')
epos = c['f'].get('epos','no-epos')
word_sign_tot = len(c['f']['gdl'])
word_info = {'file':fname,'line_label':line_label,'form': form,'frag': frag, 'text_id': text_id, 'ref': ref,'cf': cf,'gw': gw,'pos': pos,'epos':epos,'sense':sense,'word_sign_tot':word_sign_tot,'norm':norm}
all_words.append(word_info)
for sign_data in c['f']['gdl']:
if sign_data.get('det','') == 'semantic':
for sd in sign_data['seq']:
if sd.get('gg','') == 'logo':
for g in sd['group']:
sign_info = process_signs(g)
sign_info.update(word_info)
all_signs.append(sign_info)
else:
sign_info = process_signs(sd)
sign_info.update(word_info)
all_signs.append(sign_info)
elif sign_data.get('gg','') == 'logo':
for g in sign_data['group']:
if g.get('det','') == 'semantic':
for sd in g['seq']:
if sd.get('gg','') == 'logo':
for gg in sd['group']:
sign_info = process_signs(gg)
sign_info.update(word_info)
all_signs.append(sign_info)
else:
sign_info = process_signs(sd)
sign_info.update(word_info)
all_signs.append(sign_info)
else:
sign_info = process_signs(g)
sign_info.update(word_info)
all_signs.append(sign_info)
else:
sign_info = process_signs(sign_data)
sign_info.update(word_info)
all_signs.append(sign_info)
'''
if c.get('node','') == 'c': #This is the label for a phrase. This seems to no longer be used
for d in c['cdl']:
if d.get('node','') == 'l':
form = d['f']['form']
for sign_data in d['f']['gdl']:
if sign_data.get('det','') == 'semantic':
for sd in sign_data['seq']:
sign_info = process_signs(sd)
sign_info.update({'file':fname,'line_label':line_label,'form': form})
all_signs.append(sign_info)
else:
sign_info = process_signs(sign_data)
sign_info.update({'file':fname,'line_label':line_label,'form': form})
all_signs.append(sign_info)
'''
#types.add(c.get('type','no type'))
print('done')
###Output
Could not load: P314095.json
done
###Markdown
Now, we form our Data Frame where each row contains information on every sign in the corpus. Further limitations on which signs are significant to our purposes will be made later, but for now we will eliminate all of the signs which are labelled as "missing," (i.e. reconstructed) because any information based on their paleography or orthography cannot be ascertained.
###Code
df = pd.DataFrame(all_signs)
df = df.fillna('')
df
###Output
_____no_output_____
###Markdown
1. Setting Up the Data for ClusteringThe general goal is to assign a vector to each text that reflects the usage of variant orthography and paleography. Paleography - Any one set of wedges that we classify as a sign can be impressed on the clay in different ways. For example, a wedge can be missing or one can be added. Also, the tilt of a wedge can variable. These are the features we want to examine in order to see if one text prefers one sign writing or another. Orthography - Due to the homophony of the cuneiform writing system, one syllable can be written with many signs. For example, 'li' can be written with the LI-sign but also with the NI-sign, in which case it would be transliterated as li2Other variables can be applied to a text as attributes in its vector. (What are these? We talked about things like Provenence, city information, scribe information. Also, if we apply different types of variables how can we use a clustering algorithm to treat these vector components as a different entity?).This section therefore contains two subsections. One groups the diagnostic signs with or without modifications per text to. The other discovers the homophonous signs used throughout the corpus and groups different usages per text First of all, let's create more columns in the data frame to aid usmods_str - Since the data contains three columns currently with information on variable paleography, it would help us to consolidate them into one columnstr_part and num_part - In order to determine which signs share a syllabic value, it will be useful to separate the transliterated readings into their string components and numerical components. Once we do this, we can group rows with the same str_part and count up the different usages of homophonous signs
###Code
file_names = df['file'].unique()
df['sign_form'] = df['b'].apply(lambda x: sign_index.get(x.lower(),'?'))
df['mods_str'] = df['a'] + '.' + df['f'] + '.' + df['m']
import re
def get_num_part(s):
try:
n = re.findall(r'[₀₁₂₃₄₅₆₇₈₉]+',s)[0]
n = n.replace('₀','0').replace('₁','1').replace('₂','2').replace('₃','3').replace('₄','4')
n = n.replace('₅','5').replace('₆','6').replace('₇','7').replace('₈','8').replace('₉','9')
except:
n = 1
return n
def get_str_part(s):
try:
n = re.findall(r'[a-zA-ZšŠṣṢṭṬʾ \(\)0-9]+',s)[0]
except:
n = s
return n
df['str_part'] = df['b'].apply(lambda x: get_str_part(x))
df['num_part'] = df['b'].apply(lambda x: get_num_part(x))
df
###Output
_____no_output_____
###Markdown
Before we go into the process let's create some preliminary output for various purposes
###Code
df_modonly = df[df['mods_str'] != '..']
df_modonly_file = df_modonly[['sign_form','text_id','b','sign_loc_id','f','a','m','form','frag','ref','break','cf','gw','pos','epos','sense','line_label']]
df_modonly_file.to_csv('output/sign_mods_list.csv',encoding='utf-8')
df_modonly_file
def loc_and_count(loc_id,line_label):
locs = []
count = 0
for i in range(len(loc_id)):
locs.append(loc_id[i] + ' (' + line_label[i] + ')')
count += 1
return [','.join(locs),count]
df_modsagg = pd.DataFrame(df_modonly.groupby(['sign_form','f','a','m']).apply(lambda row: ','.join(row['sign_loc_id'] + ' (' + row['line_label'] + ')'))).reset_index()
df_modsagg.columns = ['sign_form','f','a','m','all_locs']
df_modsagg['count'] = df_modsagg['all_locs'].apply(lambda x: len(x.split(',')))
df_modsagg.to_csv('output/sign_mods_grouped.csv',encoding='utf-8')
df_modsagg.groupby('sign_form').agg('count')
df_nomods = df[df['mods_str'] == '..']
df_nomods = df_nomods[['sign_form','text_id','b','sign_loc_id','f','a','m','form','frag','ref','break','cf','gw','pos','epos','sense','line_label']]
df_nomods.to_csv('output/signs_nomods_list.csv',encoding='utf-8')
df_nomods
df_words = pd.DataFrame(all_words)
df_words.to_csv('output/words_all.csv',encoding='utf-8')
df_words
gloss_akkx_file = codecs.open('sargonletters/gloss-akk-x-neoass.json','r','utf-8')
gloss_akkx_json = json.load(gloss_akkx_file)
entries_akkx = gloss_akkx_json['entries']
instances_akk = gloss_akkx_json['instances']
entries_list = []
forms_list = []
instances_list = []
for entry in entries_akkx:
entry_info = {'headword': entry['headword'], 'xis_entry': entry['xis']}
for form in entry['forms']:
xis_form = form['xis']
form_info = {'form': form['n'], 'xis_form': xis_form}
form_info.update(entry_info)
instances_arr = []
for inst in instances_akk[xis_form]:
instance_info = {'instance': inst}
instance_info.update(form_info)
instances_list.append(instance_info)
instances_arr.append(inst)
df_instances_akkx = pd.DataFrame(instances_list)
df_instances_akkx
gloss_qpn_file = codecs.open('sargonletters/gloss-qpn.json','r','utf-8')
gloss_qpn_json = json.load(gloss_qpn_file)
entries_qpn = gloss_qpn_json['entries']
instances_qpn = gloss_qpn_json['instances']
entries_list = []
for entry in entries_qpn:
entry_info = {'headword': entry['headword'], 'xis_entry': entry['xis']}
for form in entry['forms']:
xis_form = form['xis']
form_info = {'form': form['n'], 'xis_form': xis_form}
form_info.update(entry_info)
for inst in instances_qpn[xis_form]:
instance_info = {'instance': inst}
instance_info.update(form_info)
entries_list.append(instance_info)
df_instances_qpn = pd.DataFrame(entries_list)
df_instances_qpn
df_instances_all = pd.concat([df_instances_akkx,df_instances_qpn])
df_instances_all.to_csv('output/instances_all.csv',encoding='utf-8')
df_instances_all
df_forms_all = pd.DataFrame(df_instances_all.groupby(['headword','xis_entry','form','xis_form']).agg({'instance': lambda x: ','.join(x)})).reset_index()
df_forms_all.columns = ['headword','xis_entry','form','xis_form','instances_all']
df_forms_all.to_csv('output/forms_all.csv',encoding='utf-8')
df_forms_all
df_headwords_all = pd.DataFrame(df_instances_all.groupby(['headword','xis_entry']).apply(lambda x: ','.join(x['instance'] + ' (' + x['form'] + ')'))).reset_index()
df_headwords_all.columns = ['headword','xis_entry','instances_all']
df_headwords_all.to_csv('output/headwords_all.csv',encoding='utf-8')
df_headwords_all
###Output
_____no_output_____
###Markdown
1.1: Collection of Modified SignsThe Data Frame we have contains the entire collection of signs in the corpus. However, not every sign has variants in paleography (at least according to Parpola's data input). We only want to look at the signs which have these variants, which we will term diagnostic. In the data, they are the signs that include any type of modification
###Code
df2 = df[~(df['f'] == '') | ~(df['a'] == '') | ~(df['m'] == '')]
mod_signs = sorted(list(df2['sign_form'].unique()))
mod_signs
###Output
_____no_output_____
###Markdown
We now limit our Data Frame to include ONLY these diagnostic signs.
###Code
df_modsigns = df[df['sign_form'].isin(mod_signs)]
#remove damaged signs too
df_modsigns = df_modsigns[df_modsigns['break'] != 'damaged']
df_modsigns['combined'] = df_modsigns['sign_form'] + ':' + df_modsigns['mods_str']
df_modsigns
###Output
_____no_output_____
###Markdown
1.1.1 Let's take a moment now to generate a list of diagnostic signs with their modifications, listing all their locations in the corpus
###Code
df_onlymods = df_modsigns[(df_modsigns['mods_str'] != '..')]
df_onlymods
###Output
_____no_output_____
###Markdown
Let's export a sorted version of this data frame
###Code
df_onlymods_sorted = df_onlymods.sort_values(['sign_form','f','a','m'])
df_onlymods_sorted = df_onlymods_sorted[['sign_loc_id','sign_form','f','a','m','b','frag','text_id']]
df_onlymods_sorted.to_csv('output/sign_mods_all.csv',encoding='utf-8')
df_onlymods_sorted
###Output
_____no_output_____
###Markdown
1.2: Collection of Homophonous SignsWe now limit the original data frame in different way based on orthography. First we need to figure out which syllabic readings have multiple signs that can render them.
###Code
df2 = pd.DataFrame(df.groupby(['str_part'])['num_part'].agg('nunique'))
ortho_list = list(df2[df2[('num_part')] > 1].index)
ortho_list
###Output
_____no_output_____
###Markdown
We need to eliminate capital letter entries because indices on logograms indicate different words and are not relevant here.
###Code
ortho_list = [h for h in ortho_list if len(re.findall(r'[A-Z]',h)) == 0]
ortho_list
###Output
_____no_output_____
###Markdown
Limit the dataframe to only these signs
###Code
df_ortho_signs = df[df['str_part'].isin(ortho_list)]
df_ortho_signs
###Output
_____no_output_____
###Markdown
2. Mixed vs. Complementary DistributionOne of the goals of this project is to determine a preference for sign usage in one subgroup of the corpus versus another. To that end there is one more factor that needs to be discussed, namely the usage of these paleographic or orthographic variants within context. If the usage of these variants are context-dependent, meaning that one form or syllable is used in one context and another form or syllable in another context, it does not tell us much about the preferential usage of the signs. This is known as a complementary distribution. For example, if a scribe uses li2 only in the form of the word be-li2 and the li sign in all other contexts, the choice of sign usage is not determined by the scribe's preference rather on scribal convention. This convention would thus be utilized by every scribe of this corpus and not help us to detect subgroups among these texts where scribes differ.On the other hand, if sign form or syllable variants appear within the same contexts, it gives us the information we want on scribal writing preference or tendencies. For example, ia and ia2 both appear in forms of the word bēliya, meaning that a scribe had an option of orthography and incised one or the other. (NTS: I'm avoiding the term "choose" here because it is a very loaded term with implications that may be misleading here). The question then becomes whether certain texts group together based on their tendencies to use one variant within a mixed distribution versus another variant.(paragraph about this dichotomy on the paleographic side of things. Mention TA vs. TA@v)(closing paragraph summarizing the issue) 2.1 Paleographic Variant Distribution Now let's try to apply a quantitative method to figure out the level of mixed distribution which paleographic variants bear within word forms. The steps here are: Select only the particular sign forms and modifications that appear a sufficient number of times within the same forms Count the number of times these sign forms and modiciations occur in each text Create a text matrix which contains the appropriate distribution for the modifications within each sign form, adding 1 to each cell to avoid divide by zero issues
###Code
df_mods_agg = pd.DataFrame(df_modsigns.groupby(['sign_form','form','mods_str'])['a'].agg('count')).reset_index()
df_mods_agg.columns = ['sign_form','form','mods_str','count']
#first let's remove where total instances are less than a certain arbitrary value, say 5
df_mods_agg = df_mods_agg[df_mods_agg['count'] >= 5]
#NOW find and only keep the rows where sign_form and form are duplicates
df_mods_agg['is_dup'] = df_mods_agg.duplicated(['sign_form','form'],False)
df_mods_agg = df_mods_agg[df_mods_agg['is_dup'] == True]
df_mods_agg
df_select_signmods = df_mods_agg[['sign_form','mods_str']].drop_duplicates()
df_select_signmods['combined'] = df_select_signmods['sign_form'] + ':' + df_select_signmods['mods_str']
df_select_signmods
###Output
_____no_output_____
###Markdown
Create a list of the selected signs to limit the main paleography dataframe to only those sign forms
###Code
select_signs = list(df_mods_agg['sign_form'].unique())
select_signs
df_file_select_signs = df_modsigns[df_modsigns['sign_form'].isin(select_signs)]
df_file_select_signs = pd.DataFrame(df_file_select_signs.groupby(['file','sign_form','mods_str'])['a'].agg('count')).reset_index()
df_file_select_signs['combined'] = df_file_select_signs['sign_form'] + ':' + df_file_select_signs['mods_str']
df_file_select_signs
###Output
_____no_output_____
###Markdown
Create the text matrix according to step 3 above
###Code
d_file_select_signs = {}
for f in file_names:
df_onefile_select_signs = df_file_select_signs[(df_file_select_signs['file'] == f)]
d = {}
e = {}
for i, row in df_select_signmods.iterrows():
try:
n = int(df_onefile_select_signs[(df_onefile_select_signs['combined'] == row['combined'])]['a']) + 1
except TypeError:
n = 1
d[row['combined']] = n
if row['sign_form'] in e:
e[row['sign_form']] += n
else:
e[row['sign_form']] = n
#d_select_signs[f] = [d['ia'] / ia_tot,d['ia₂'] / ia_tot,d['li'] / li_tot,d['li₂'] / li_tot,d['ša'] / sa_tot, d['ša₂'] / sa_tot,d['šu'] / su_tot,d['šu₂'] / su_tot]
d_file_select_signs[f] = []
for i,row in df_select_signmods.iterrows():
d_file_select_signs[f].append(d[row['combined']] / e[row['sign_form']])
df_file_select_signs_c = pd.DataFrame(d_file_select_signs).transpose()
df_file_select_signs_c.columns = list(df_select_signmods['combined'])
df_file_select_signs_c
###Output
_____no_output_____
###Markdown
2.1.2. Clustering on Paleography Alone Try Elbow Method on Paleography Alone
###Code
distortions = []
differences = [0]
deceleration = [0,0]
K = range(1,50)
for k in K:
km = KMeans(n_clusters=k).fit(df_file_select_signs_c)
distortions.append(km.inertia_)
if k > 1:
differences.append(distortions[k-1] - distortions[k-2])
if k > 2:
deceleration.append(differences[k-1] - differences[k-2])
#print(K)
#print(distortions)
#print(differences)
#print(deceleration)
plt.figure(num=None, figsize=(12, 12), dpi=80, facecolor='w', edgecolor='k')
plt.plot(K,distortions,'o-')
plt.xticks(K)
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('Elbow Method')
plt.savefig('output/elbow_paleo_reg.png')
plt.show()
plt.figure(num=None, figsize=(12, 12), dpi=80, facecolor='w', edgecolor='k')
plt.plot(K,differences,'o-')
plt.plot(K,deceleration,'o-')
plt.xticks(K)
plt.xlabel('k')
plt.ylabel('Differences')
plt.title('Difference Measure')
plt.savefig('output/elbow_paleo_diff.png')
plt.show()
###Output
_____no_output_____
###Markdown
Let's look now at the silhouette score
###Code
sil_scores = []
K = range(2,70)
for k in K:
km = KMeans(n_clusters=k).fit(df_file_select_signs_c)
sil_score = silhouette_score(df_file_select_signs_c,labels=km.labels_)
sil_scores.append(sil_score)
plt.figure(num=None, figsize=(12, 12), dpi=80, facecolor='w', edgecolor='k')
plt.plot(K,sil_scores,'o-')
plt.xticks(K)
plt.xlabel('k')
plt.ylabel('Avg Silhouette')
plt.title('Silhouette Scores')
plt.savefig('output/silhouette_paleo_reg.png')
plt.show()
km1 = KMeans(n_clusters=14, max_iter=1000).fit(df_file_select_signs_c)
labels_paleo = {}
km1.labels_
for i in range(len(km1.labels_)):
if km1.labels_[i] in labels_paleo:
labels_paleo[km1.labels_[i]].append(file_names[i])
else:
labels_paleo[km1.labels_[i]] = [file_names[i]]
labels_paleo
#Sennacherib the Prince
sar = ['P334141.json','P334390.json']
#Nabu-pašir, governor of Harran
npr = ['P334807.json','P334080.json']
#Nabu-deʾiq
nd = ['P334568.json','P334792.json']
def find_cluster(pnum,labels):
for k in labels:
if pnum in labels[k]:
return str(k)
print('Sennacherib clusters are: ',find_cluster(sar[0],labels_paleo),' and ',find_cluster(sar[1],labels_paleo))
print('Nabu-pašir clusters are: ',find_cluster(np[0],labels_paleo),' and ',find_cluster(np[1],labels_paleo))
print('Nabu-deʾiq clusters are: ',find_cluster(nd[0],labels_paleo),' and ',find_cluster(nd[1],labels_paleo))
###Output
_____no_output_____
###Markdown
2.2. Orthographic Variant Distribution
###Code
df_ortho_signs['form_str_part'] = df_ortho_signs['form'].apply(lambda x: re.sub(r'[₁₂₃₄₅₆₇₈₉₀]','',x))
df_ortho_signs
df_syls_agg = pd.DataFrame(df_ortho_signs.groupby(['str_part','form_str_part','b'])['a'].agg('count')).reset_index()
df_syls_agg.columns = ['str_part','form_str_part','b','count']
#first let's remove where total instances are less than a certain arbitrary value, say 5
df_syls_agg = df_syls_agg[df_syls_agg['count'] >= 5]
#NOW find and only keep the rows where sign_form and form are duplicates
df_syls_agg['is_dup'] = df_syls_agg.duplicated(['str_part','form_str_part'],False)
df_syls_agg = df_syls_agg[df_syls_agg['is_dup'] == True]
df_syls_agg
df_select_bs = df_syls_agg[['str_part','b']].drop_duplicates()
#Don't need to create combined column here because b is sufficient
#df_select_signmods['combined'] = df_select_signmods['sign_form'] + ':' + df_select_signmods['mods_str']
df_select_bs
select_syls = list(df_syls_agg['str_part'].unique())
select_syls
df_file_select_bs = df_ortho_signs[df_ortho_signs['str_part'].isin(select_syls)]
df_file_select_bs = pd.DataFrame(df_file_select_bs.groupby(['file','str_part','b'])['a'].agg('count')).reset_index()
#Again combined is just b
#df_file_select_syls['combined'] = df_file_select_signs['sign_form'] + ':' + df_file_select_signs['mods_str']
df_file_select_bs
d_file_select_syls = {}
for f in file_names:
df_onefile_select_bs = df_file_select_bs[(df_file_select_bs['file'] == f)]
d = {}
e = {}
for i, row in df_select_bs.iterrows():
try:
n = int(df_onefile_select_bs[(df_onefile_select_bs['b'] == row['b'])]['a']) + 1
except TypeError:
n = 1
d[row['b']] = n
if row['str_part'] in e:
e[row['str_part']] += n
else:
e[row['str_part']] = n
#d_select_signs[f] = [d['ia'] / ia_tot,d['ia₂'] / ia_tot,d['li'] / li_tot,d['li₂'] / li_tot,d['ša'] / sa_tot, d['ša₂'] / sa_tot,d['šu'] / su_tot,d['šu₂'] / su_tot]
d_file_select_syls[f] = []
for i,row in df_select_bs.iterrows():
d_file_select_syls[f].append(d[row['b']] / e[row['str_part']])
df_file_select_syls_c = pd.DataFrame(d_file_select_syls).transpose()
df_file_select_syls_c.columns = list(df_select_bs['b'])
df_file_select_syls_c
###Output
_____no_output_____
###Markdown
2.2.2 Cluster using K-Means Start with Elbow Method
###Code
distortions = []
differences = [0]
deceleration = [0,0]
K = range(1,50)
for k in K:
km = KMeans(n_clusters=k).fit(df_file_select_syls_c)
distortions.append(km.inertia_)
if k > 1:
differences.append(distortions[k-1] - distortions[k-2])
if k > 2:
deceleration.append(differences[k-1] - differences[k-2])
#print(K)
#print(distortions)
#print(differences)
#print(deceleration)
plt.figure(num=None, figsize=(12, 12), dpi=80, facecolor='w', edgecolor='k')
plt.plot(K,distortions,'o-')
plt.xticks(K)
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('Elbow Method')
plt.savefig('output/elbow_ortho_reg.png')
plt.show()
plt.figure(num=None, figsize=(12, 12), dpi=80, facecolor='w', edgecolor='k')
plt.plot(K,differences,'o-')
plt.plot(K,deceleration,'o-')
plt.xticks(K)
plt.xlabel('k')
plt.ylabel('Differences')
plt.title('Difference Measure')
plt.savefig('output/elbow_ortho_diff.png')
plt.show()
###Output
_____no_output_____
###Markdown
Silhouette Again
###Code
sil_scores = []
K = range(2,70)
for k in K:
km = KMeans(n_clusters=k).fit(df_file_select_syls_c)
sil_score = silhouette_score(df_file_select_syls_c,labels=km.labels_)
sil_scores.append(sil_score)
plt.figure(num=None, figsize=(12, 12), dpi=80, facecolor='w', edgecolor='k')
plt.plot(K,sil_scores,'o-')
plt.xticks(K)
plt.xlabel('k')
plt.ylabel('Avg Silhouette')
plt.title('Silhouette Scores')
plt.savefig('output/silhouette_ortho_reg.png')
plt.show()
km1 = KMeans(n_clusters=7, max_iter=1000).fit(df_file_select_syls_c)
labels_ortho = {}
km1.labels_
for i in range(len(km1.labels_)):
if km1.labels_[i] in labels_ortho:
labels_ortho[km1.labels_[i]].append(file_names[i])
else:
labels_ortho[km1.labels_[i]] = [file_names[i]]
labels_ortho
#Let's examine some test cases. We'll select three pairs of texts, which we would expect to cluster always in the same way.
#Sennacherib the Prince
sar = ['P334141.json','P334390.json']
#Nabu-pašir, governor of Harran
npr = ['P334807.json','P334080.json']
#Nabu-deʾiq
nd = ['P334568.json','P334792.json']
def find_cluster(pnum,labels):
for k in labels:
if pnum in labels[k]:
return str(k)
print('Sennacherib clusters are: ',find_cluster(sar[0],labels_ortho),' and ',find_cluster(sar[1],labels_ortho))
print('Nabu-pašir clusters are: ',find_cluster(np[0],labels_ortho),' and ',find_cluster(np[1],labels_ortho))
print('Nabu-deʾiq clusters are: ',find_cluster(nd[0],labels_ortho),' and ',find_cluster(nd[1],labels_ortho))
###Output
_____no_output_____
###Markdown
Combine Orthography and Paleography
###Code
tm_all = pd.concat([df_file_select_syls_c,df_file_select_signs_c],axis=1)
tm_all.to_csv('output/full_matrix.csv',encoding='utf-8')
tm_all
###Output
_____no_output_____
###Markdown
Elbow Method
###Code
distortions = []
differences = [0]
deceleration = [0,0]
K = range(1,50)
for k in K:
km = KMeans(n_clusters=k).fit(tm_all)
distortions.append(km.inertia_)
if k > 1:
differences.append(distortions[k-1] - distortions[k-2])
if k > 2:
deceleration.append(differences[k-1] - differences[k-2])
#print(K)
#print(distortions)
#print(differences)
#print(deceleration)
plt.figure(num=None, figsize=(12, 12), dpi=80, facecolor='w', edgecolor='k')
plt.plot(K,distortions,'o-')
plt.xticks(K)
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('Elbow Method')
plt.savefig('output/elbow_both_reg.png')
plt.show()
plt.figure(num=None, figsize=(12, 12), dpi=80, facecolor='w', edgecolor='k')
plt.plot(K,differences,'o-')
plt.plot(K,deceleration,'o-')
plt.xticks(K)
plt.xlabel('k')
plt.ylabel('Differences')
plt.title('Difference Measure')
plt.savefig('output/elbow_both_diff.png')
plt.show()
###Output
_____no_output_____
###Markdown
Silhouette Again
###Code
sil_scores = []
K = range(2,70)
for k in K:
km = KMeans(n_clusters=k).fit(tm_all)
sil_score = silhouette_score(tm_all,labels=km.labels_)
sil_scores.append(sil_score)
plt.figure(num=None, figsize=(12, 12), dpi=80, facecolor='w', edgecolor='k')
plt.plot(K,sil_scores,'o-')
plt.xticks(K)
plt.xlabel('k')
plt.ylabel('Avg Silhouette')
plt.title('Silhouette Scores')
plt.savefig('output/silhouette_both_reg.png')
plt.show()
km1 = KMeans(n_clusters=14, max_iter=1000).fit(tm_all)
labels_all = {}
km1.labels_
for i in range(len(km1.labels_)):
if km1.labels_[i] in labels_all:
labels_all[km1.labels_[i]].append(file_names[i])
else:
labels_all[km1.labels_[i]] = [file_names[i]]
labels_all
#Sennacherib the Prince
sar = ['P334141.json','P334390.json']
#Nabu-pašir, governor of Harran
npr = ['P334807.json','P334080.json']
#Nabu-deʾiq
nd = ['P334568.json','P334792.json']
def find_cluster(pnum,labels):
for k in labels:
if pnum in labels[k]:
return str(k)
print('Sennacherib clusters are: ',find_cluster(sar[0],labels_all),' and ',find_cluster(sar[1],labels_all))
print('Nabu-pašir clusters are: ',find_cluster(np[0],labels_all),' and ',find_cluster(np[1],labels_all))
print('Nabu-deʾiq clusters are: ',find_cluster(nd[0],labels_all),' and ',find_cluster(nd[1],labels_all))
###Output
_____no_output_____
###Markdown
The two examples from Sennacherib the prince tend to cluster together BUT letters from other places do not group together according to paleographic and orthographic preferences in those letters. Why should this be? Here are some options Scribal usage of different paleographies and orthographies is not based on a certain preference either consiously or unconsiously. In other words, for any given scribe, free variation reigns supreme (expand on this). On the other hand, the letters from Sennacherib do represent a particular style, perhaps due to his station Paleographic and Orthographic variation CAN indicate scribal tendencies, BUT computational methods are insufficient to determine this because machine learning algorithms require large amounts of data and the letters simply do not provide enough data. If so, we must ask the question why it works for Sennacherib but not the others There is a problem with my methodology. Maybe I set up the text vectors incorrectly. Maybe I should include more orthographies/paleographies or perhaps less. Maybe the number of clusters selected is wrong.Something else to keep in mind here is that while I limited the number of signs to be considered in the text vectors, I did not restrict any text from being in the corpus. Perhaps I should do that. Maybe certain texts are simply too short to make any determinations on its grouping among the other texts. Visualize with MDS
###Code
from sklearn.manifold import MDS
texts_2d_map = {}
texts = tm_all.index
mds1 = MDS(n_components = 2)
texts_2d = mds1.fit_transform(tm_all)
color_list = ['white','yellow','green','red','blue','brown','black']
colors_all = []
for i in range(len(km1.labels_)):
colors_all.append(color_list[km1.labels_[i] % 7])
colors_all
plt.figure(num=None, figsize=(16, 16), dpi=80, facecolor='w', edgecolor='k')
x_values = [xy[0] for xy in texts_2d]
y_values = [xy[1] for xy in texts_2d]
plt.scatter(x_values,y_values,c=colors_all)
for i in range(len(texts_2d)):
plt.annotate(texts[i],(x_values[i],y_values[i]))
plt.show()
###Output
_____no_output_____
###Markdown
ClassificationWe start with an initial classification assumption that letters from the same location will cluster in the same groups. We can use the catalogue.json files to get information on the sender locations as well as the sender
###Code
cat_file = codecs.open('sargonletters/catalogue.json','r','utf-8')
cat_json = json.load(cat_file)
class_l = []
class_index = []
for pnum in cat_json['members']:
id_text = cat_json['members'][pnum].get('id_text','')
designation = cat_json['members'][pnum].get('designation','')
ancient_author = cat_json['members'][pnum].get('ancient_author','')
dossier = cat_json['members'][pnum].get('dossier','')
senderloc = cat_json['members'][pnum].get('senderloc','')
class_d = {'designation': designation,'ancient_author':ancient_author,'dossier':dossier,'senderloc':senderloc}
class_index.append(id_text)
class_l.append(class_d)
df_class = pd.DataFrame(class_l,index=class_index)
df_class
senderloc_list = df_class['senderloc'].unique()
print('There are ' + str(len(senderloc_list)) + ' sender locations.')
author_list = df_class['ancient_author'].unique()
print('There are ' + str(len(author_list)) + ' ancient authors')
###Output
_____no_output_____
###Markdown
---BREAK---
###Code
df_select_signs_tot = df[df['sign_form'].isin(['NI','NA','LUGAL','MA','ŠA'])]
df_select_tot = pd.DataFrame(df_select_signs_tot.groupby(['sign_form','mods_str']).agg('count'))
df_select_tot
#df_select_tot.sort_values(by=['a'],ascending=[False])
df_file_twofeats = df_file_select_signs_c[['NA:..','NI:..']]
df_file_twofeats
df_class_feats = pd.concat([df_class,df_file_twofeats],axis=1)
df_class_feats
color_list = ['white','yellow','green','red','blue','brown','black']
marker_list = ['o','v','^','8','s','*','+','D','h']
i = 0
sender_colors = {}
sender_markers = {}
for c in color_list:
for m in marker_list:
try:
sender_colors[senderloc_list[i]] = c
sender_markers[senderloc_list[i]] = m
i += 1
except IndexError:
break
df_class_feats['color'] = df_class_feats['senderloc'].map(sender_colors)
df_class_feats['marker'] = df_class_feats['senderloc'].map(sender_markers)
df_class_feats
#Top senderlocs
pd.DataFrame(df_class_feats.groupby(['senderloc'])['dossier'].agg('count')).sort_values(by='dossier',ascending=[False])
senderlocs_top5 = ['Royal Court','Northeastern Assyria','Assyria','Ashur','Central or Southern Babylonia']
plt.figure(num=None, figsize=(16, 16), dpi=120, facecolor='w', edgecolor='k')
for i, row in df_class_feats.iterrows():
if row['senderloc'] in senderlocs_top5:
plt.scatter(row['NA:..'],row['NI:..'],c=row['color'],marker=row['marker'])
plt.show()
###Output
_____no_output_____
###Markdown
Which sign forms and syllables work the best to group texts by their sender location?
###Code
tm_all_class = pd.concat([tm_all,df_class],axis=1)
tm_all_class
df_class_var = pd.DataFrame(tm_all_class.groupby('senderloc').agg('var'))
df_class_var
df_varsum = df_class_var.apply(lambda x:x**2)
df_varsum = pd.DataFrame(df_varsum.agg('sum'))
df_varsum.to_csv('output/varsum.csv',encoding='utf-8',sep='\t')
df_mod_count = pd.DataFrame(df_modsigns.groupby('sign_form')['a'].agg('count'))
df_mod_count.columns = ['count']
df_mod_count.sort_values(by='count',ascending=False)
df_ortho_count = pd.DataFrame(df_ortho_signs.groupby('str_part')['a'].agg('count'))
df_ortho_count.columns = ['count']
df_ortho_count.sort_values(by='count',ascending=False)
###Output
_____no_output_____
###Markdown
Let's attempt to see which sign forms or orthographies determine the clusters the best. We will count up the occurrences like we did before but for every sign form and syllable. We will then find the center of each class and calculate the sum of squares within each class and between classes for one sign_form or syllable
###Code
df_ortho_str = pd.DataFrame(df_ortho_signs.groupby(['text_id']).apply(lambda x: ' '.join(x['b'])))
df_ortho_str.columns = ['ortho_str']
df_ortho_str
cv = CountVectorizer(token_pattern='[^ ]+')
ft = cv.fit_transform(list(df_ortho_str['ortho_str']))
tm_ortho = pd.DataFrame(ft.toarray(),columns=cv.get_feature_names(),index=df_ortho_str.index)
tm_ortho
df_ortho_map = pd.DataFrame(df_ortho_signs.groupby(['str_part']).apply(lambda x: ' '.join(x['b'].unique()).split()))
map_ortho = df_ortho_map.to_dict()[0]
map_ortho
d = {}
vecs = {}
for i, row in tm_ortho.iterrows():
d[i] = {}
for syl in map_ortho:
syl_sum = np.sum(tm_ortho.loc[i][map_ortho[syl]])
for b in map_ortho[syl]:
if syl_sum > 0:
d[i][b] = tm_ortho.loc[i][b] / syl_sum
else:
d[i][b] = np.nan
tm_ortho_dist = pd.DataFrame(d).transpose()
tm_ortho_dist
tm_ortho_sender = pd.concat([tm_ortho_dist,df_class],axis=1)
tm_ortho_sender
tm_ortho_sender_var = tm_ortho_sender.groupby('senderloc').agg(np.nanvar)
tm_ortho_sender_var
tm_ortho_all_var = pd.DataFrame(tm_ortho_dist.apply(np.nanvar))
tm_ortho_all_var.columns = ['var_all']
tm_ortho_sender_varsum = pd.DataFrame(tm_ortho_sender_var.agg(np.nansum))
tm_ortho_sender_varsum.columns = ['var_sender']
tm_ortho_sender_varsum
df_ortho_bcount = pd.DataFrame(df_ortho_signs.groupby('b')['a'].agg('count'))
#df_ortho_bcount.index = df_ortho_bcount['b']
df_ortho_bcount.columns = ['bcount']
df_ortho_bcount
tm_ortho_varsum = pd.concat([tm_ortho_all_var,tm_ortho_sender_varsum,df_ortho_bcount],axis=1)
tm_ortho_varsum.to_csv('output/ortho_vars.csv',encoding='utf-8',sep='\t')
###Output
_____no_output_____
###Markdown
Try the same with paleography
###Code
df_paleo_str = pd.DataFrame(df_modsigns.groupby(['text_id']).apply(lambda x: ' '.join(x['combined'])))
df_paleo_str.columns = ['paleo_str']
df_paleo_str
cv = CountVectorizer(token_pattern='[^ ]+',lowercase=False)
ft = cv.fit_transform(list(df_paleo_str['paleo_str']))
tm_paleo = pd.DataFrame(ft.toarray(),columns=cv.get_feature_names(),index=df_paleo_str.index)
tm_paleo
df_paleo_map = pd.DataFrame(df_modsigns.groupby(['sign_form']).apply(lambda x: ' '.join(x['combined'].unique()).split()))
map_paleo = df_paleo_map.to_dict()[0]
map_paleo
d = {}
vecs = {}
for i, row in tm_paleo.iterrows():
d[i] = {}
for sform in map_paleo:
form_sum = np.sum(tm_paleo.loc[i][map_paleo[sform]])
for c in map_paleo[sform]:
if form_sum > 0:
d[i][c] = tm_paleo.loc[i][c] / form_sum
else:
d[i][c] = np.nan
tm_paleo_dist = pd.DataFrame(d).transpose()
tm_paleo_dist
tm_paleo_sender = pd.concat([tm_paleo_dist,df_class],axis=1)
tm_paleo_sender
tm_paleo_sender_var = tm_paleo_sender.groupby('senderloc').agg(np.nanvar)
tm_paleo_sender_var
tm_paleo_all_var = pd.DataFrame(tm_paleo_dist.apply(np.nanvar))
tm_paleo_all_var.columns = ['var_all']
tm_paleo_sender_varsum = pd.DataFrame(tm_paleo_sender_var.agg(np.nansum))
tm_paleo_sender_varsum.columns = ['var_sender']
tm_paleo_sender_varsum
df_paleo_ccount = pd.DataFrame(df_modsigns.groupby('combined')['a'].agg('count'))
df_paleo_ccount.columns = ['ccount']
df_paleo_ccount
tm_paleo_varsum = pd.concat([tm_paleo_all_var,tm_paleo_sender_varsum,df_paleo_ccount],axis=1)
tm_paleo_varsum.to_csv('output/paleo_vars.csv',encoding='utf-8',sep='\t')
###Output
_____no_output_____
###Markdown
Now that we've selected our syllables and signs let's try to cluster using only those. Let's try orthography first.
###Code
map_ortho = {'ia':['ia','ia₂'], 'li':['li','li₂'], 'ša':['ša','ša₂'], 'šu':['šu','šu₂'], 'u':['u','u₂']}
list_ortho = []
for v in map_ortho.values():
list_ortho = list_ortho + v
list_ortho
tm_ortho = tm_ortho[list_ortho].apply(lambda x: x+1)
tm_ortho
d = {}
vecs = {}
for i, row in tm_ortho.iterrows():
d[i] = {}
for syl in map_ortho:
syl_sum = np.sum(tm_ortho.loc[i][map_ortho[syl]])
for b in map_ortho[syl]:
if syl_sum > 0:
d[i][b] = tm_ortho.loc[i][b] / syl_sum
else:
d[i][b] = np.nan
tm_ortho_dist = pd.DataFrame(d).transpose()
tm_ortho_dist
cluster_groups = [['ia','ia₂'],['li','li₂'],['u','u₂'],['ša','ša₂'],['šu','šu₂'],list_ortho]
for g in cluster_groups:
km = KMeans(n_clusters=62,max_iter=1000).fit(tm_ortho_dist[g])
senders_clustered = {}
for i in range(len(km.labels_)):
if km.labels_[i] in senders_clustered:
senders_clustered[km.labels_[i]].append(df_class.loc[tm_ortho_dist.index[i]]['senderloc'])
else:
senders_clustered[km.labels_[i]] = [df_class.loc[tm_ortho_dist.index[i]]['senderloc']]
#purity score
purity_score = 0
for c in senders_clustered:
cnt = Counter(senders_clustered[c])
purity_score += cnt.most_common()[0][1]
purity_score = purity_score / len(df_class.index)
print(str(g) + ': ' + str(purity_score))
#Tack on Silhouette
print('Silhouette: ' + str(silhouette_score(tm_ortho_dist[g],labels=km.labels_)))
###Output
_____no_output_____
###Markdown
Do same for paleography
###Code
map_paleo = {'BU':['BU:..','BU:.p.'], 'DI':['DI:..','DI:.d.'], 'LI':['LI:..','LI:.d.'], 'NA':['NA:..','NA:.t.'], 'NI':['NI:..','NI:.d.'], 'RU':['RU:..','RU:.d.'], '|ME.U.U.U|':['|ME.U.U.U|:..','|ME.U.U.U|:.m.'], 'ŠA': ['ŠA:..','ŠA:.dm.']}
list_paleo = []
cluster_groups_paleo = []
for v in map_paleo.values():
cluster_groups_paleo.append(v)
list_paleo = list_paleo + v
cluster_groups_paleo.append(list_paleo)
list_paleo
tm_paleo = tm_paleo[list_paleo].apply(lambda x: x+1)
tm_paleo
d = {}
vecs = {}
for i, row in tm_paleo.iterrows():
d[i] = {}
for syl in map_paleo:
syl_sum = np.sum(tm_paleo.loc[i][map_paleo[syl]])
for b in map_paleo[syl]:
if syl_sum > 0:
d[i][b] = tm_paleo.loc[i][b] / syl_sum
else:
d[i][b] = np.nan
tm_paleo_dist = pd.DataFrame(d).transpose()
tm_paleo_dist
for g in cluster_groups_paleo:
km = KMeans(n_clusters=62,max_iter=1000).fit(tm_paleo_dist[g])
senders_clustered = {}
for i in range(len(km.labels_)):
if km.labels_[i] in senders_clustered:
senders_clustered[km.labels_[i]].append(df_class.loc[tm_paleo_dist.index[i]]['senderloc'])
else:
senders_clustered[km.labels_[i]] = [df_class.loc[tm_paleo_dist.index[i]]['senderloc']]
#purity score
purity_score = 0
for c in senders_clustered:
cnt = Counter(senders_clustered[c])
purity_score += cnt.most_common()[0][1]
purity_score = purity_score / len(df_class.index)
print(str(g) + ': ' + str(purity_score))
#Tack on Silhouette
print('Silhouette: ' + str(silhouette_score(tm_paleo_dist[g],labels=km.labels_)))
###Output
_____no_output_____
###Markdown
Now let's take a look at the writing of words of the same lemma and normalization and how they are written
###Code
df_words['lemma'] = df_words['cf'] + '[' + df_words['gw'] + ']' + df_words['pos']
df_norm_tot = pd.DataFrame(df_words.groupby(['lemma','norm'])['cf'].agg('count'))
df_norm_tot.columns = ['norm_count']
df_norm_uniq = pd.DataFrame(df_words.groupby(['lemma','norm'])['form'].nunique())
df_norm_uniq.columns = ['norm_uniq']
df_norm_info = pd.concat([df_norm_tot,df_norm_uniq],axis=1)
df_norm_info
df_norm_info[(df_norm_info['norm_count'] > 500) & (df_norm_info['norm_uniq'] > 1) & (df_norm_info['norm_uniq'] < 20)]
df_words[(df_words['lemma'] == 'ina[in]PRP') & (df_words['norm'] == 'ina')]
df_norm_uniq = pd.DataFrame(df_words.groupby(['lemma','norm','form'])['cf'].agg('count'))
df_norm_uniq.to_csv('output/forms_unique.csv',encoding='utf-8',sep='\t')
###Output
_____no_output_____
###Markdown
Rare orthographies and paleographies
###Code
rare_ortho = ['a₂','ana₃','da₃','gal₃','i₃','ka₂','kam₂','ku₃','me₂','qi₂','ur₂']
rare_paleo = ['A:.d.','AB₂:.d.','AK:.dt.','AL:.y.','AMAR:.p.','BA:.p.','BAD:.m.','BI:.y.','DA:.y.','DI:.y.','DIN:.d.','DUN:.m.','DUN₃:.m.','DUN₄:.m.','E₂:.ym.','GA:.d.','GA:.p.']
df[df['b'].isin(rare_ortho)].sort_values(by='text_id')
###Output
_____no_output_____ |
notebooks/Benchmark Bookies-checkpoint.ipynb | ###Markdown
Prediction Model Benchmark Key metric to analyse models against is the probabilities generated alreaby by the bookies
###Code
from IPython.display import display, Markdown
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import poisson
import seaborn as sns
import warnings
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import statsmodels.api as sm
from epl.dspy_display import statsmodels_pretty_print, pp_conf_matrices, pp_conf_matrix
from epl.dspy_preprocess_utils import apply_feature_scaling
from epl.dspy_eval_utils import statsmodels_create_eval_df
from epl.features_parse import get_feat_col_names
from epl.feature_utils import home_away_to_team_opp, create_goal_probs, create_match_prediction_stats, create_poisson_prediction_output, eval_df_to_match_eval_df
from epl.query import create_and_query
pd.options.display.max_columns = None
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
1. Fetch Data 1a. Match Data Need to import the raw match data and reformat so we have a row per team per match (rather than a row per match with 2 teams)
###Code
# get key cols for join to features along with useful id data and goal/result data
match_key_cols = ['Date', 'HomeTeam', 'AwayTeam']
id_cols = ['Country', 'Div', 'Season']
match_other_cols = ['FTHG', 'FTAG', 'FTR', 'B365H', 'B365D', 'B365A']
# for now restrict to only the top div in each country
wc = {'Div': ['IN', ['E0', 'SC0', 'B1', 'D1', 'F1', 'I1', 'SP1', 'P1']], 'Season': ['<>', '9394']}
match_cols = match_key_cols + id_cols + match_other_cols
df_matches = create_and_query('matches', cols=match_cols, wc=wc)
df_matches.tail(5)
###Output
Running query: SELECT Date, HomeTeam, AwayTeam, Country, Div, Season, FTHG, FTAG, FTR, B365H, B365D, B365A FROM matches WHERE Div IN ('E0', 'SC0', 'B1', 'D1', 'F1', 'I1', 'SP1', 'P1') AND Season <> '9394'
###Markdown
Display how many matches we have per div
###Code
df_matches[['FTR', 'Div']].groupby(['Div']).count().sort_values(['FTR'], ascending=False)
###Output
_____no_output_____
###Markdown
2. Convert Odds to Probabilities & Normalise Assume constant spread taken across 3 match outcomes and normalise equally
###Code
df_matches['AwayProb'] = 1 / df_matches['B365A']
df_matches['DrawProb'] = 1 / df_matches['B365D']
df_matches['HomeProb'] = 1 / df_matches['B365H']
df_matches['Margin'] = df_matches.HomeProb + df_matches.DrawProb + df_matches.AwayProb - 1
for col in ['HomeProb', 'DrawProb', 'AwayProb']:
df_matches[col] = df_matches[col] / (1 + df_matches['Margin'])
df_matches.tail()
df_matches['FTRProbs'] = list(zip(df_matches['AwayProb'], df_matches['DrawProb'], df_matches['HomeProb']))
res = ['A', 'D', 'H']
df_matches['FTRPred'] = df_matches['FTRProbs'].apply(lambda x: res[x.index(max(x))])
df_matches.tail()
pp_conf_matrix(df_matches['FTR'], df_matches['FTRPred'], label='Bookies');
num_bins = 50
bins = [x/num_bins for x in range(1, num_bins + 1)]
df_matches['HomeProbBin'] = pd.cut(df_matches['HomeProb'], bins, labels=[round(x + (1/num_bins/2), 4) for x in bins[:-1]])
df_matches['AwayProbBin'] = pd.cut(df_matches['AwayProb'], bins, labels=[round(x + (1/num_bins/2), 4) for x in bins[:-1]])
df_matches
# separate into home and away win dfs
home_wins = df_matches[df_matches.FTR == 'H'][['FTR', 'HomeProbBin']].groupby(['HomeProbBin']).count()
all_games = df_matches[['FTR', 'HomeProbBin']].groupby(['HomeProbBin']).count()
home_win_pc = (home_wins / all_games).reset_index()
home_win_pc = home_win_pc.astype(float)
away_wins = df_matches[df_matches.FTR == 'A'][['FTR', 'AwayProbBin']].groupby(['AwayProbBin']).count()
all_games = df_matches[['FTR', 'AwayProbBin']].groupby(['AwayProbBin']).count()
away_win_pc = (away_wins / all_games).reset_index()
away_win_pc = away_win_pc.astype(float)
home_win_pc = home_win_pc.dropna()
home_win_pc.columns = ['BookiesProb', 'RealisedHomeWins']
away_win_pc = away_win_pc.dropna()
away_win_pc.columns = ['BookiesProb', 'RealisedAwayWins']
win_probab = pd.merge(left=home_win_pc, right=away_win_pc, how='outer', on='BookiesProb')
win_probab = pd.melt(win_probab, id_vars=['BookiesProb'], value_vars=['RealisedHomeWins', 'RealisedAwayWins'], var_name='HomeAway', value_name='Prob')
win_probab['HomeAway'] = np.where(win_probab['HomeAway'] == 'RealisedHomeWins', 'Home', 'Away')
fig, ax = plt.subplots(figsize=(8,8))
sns.scatterplot(data=win_probab, x='BookiesProb', y='Prob', hue='HomeAway', ax=ax);
ax.plot([0, 1], [0, 1], transform=ax.transAxes);
ax.set_title('Actual vs Predicted Probabilities');
df_model = pd.read_csv('first_model_output.csv', parse_dates=['Date'])
num_bins = 50
bins = [x/num_bins for x in range(1, num_bins + 1)]
df_model['HomeProbBin'] = pd.cut(df_model['HomeProb'], bins, labels=[round(x + (1/num_bins/2), 4) for x in bins[:-1]])
df_model['AwayProbBin'] = pd.cut(df_model['AwayProb'], bins, labels=[round(x + (1/num_bins/2), 4) for x in bins[:-1]])
df_model
# separate into home and away win dfs
home_wins = df_model[df_model.FTR == 'H'][['FTR', 'HomeProbBin']].groupby(['HomeProbBin']).count()
all_games = df_model[['FTR', 'HomeProbBin']].groupby(['HomeProbBin']).count()
home_win_pc = (home_wins / all_games).reset_index()
home_win_pc = home_win_pc.astype(float)
away_wins = df_model[df_model.FTR == 'A'][['FTR', 'AwayProbBin']].groupby(['AwayProbBin']).count()
all_games = df_model[['FTR', 'AwayProbBin']].groupby(['AwayProbBin']).count()
away_win_pc = (away_wins / all_games).reset_index()
away_win_pc = away_win_pc.astype(float)
home_win_pc = home_win_pc.dropna()
home_win_pc.columns = ['BookiesProb', 'RealisedHomeWins']
away_win_pc = away_win_pc.dropna()
away_win_pc.columns = ['BookiesProb', 'RealisedAwayWins']
win_probab = pd.merge(left=home_win_pc, right=away_win_pc, how='outer', on='BookiesProb')
win_probab = pd.melt(win_probab, id_vars=['BookiesProb'], value_vars=['RealisedHomeWins', 'RealisedAwayWins'], var_name='HomeAway', value_name='Prob')
win_probab['HomeAway'] = np.where(win_probab['HomeAway'] == 'RealisedHomeWins', 'Home', 'Away')
fig, ax = plt.subplots(figsize=(8,8))
sns.scatterplot(data=win_probab, x='BookiesProb', y='Prob', hue='HomeAway', ax=ax);
ax.plot([0, 1], [0, 1], transform=ax.transAxes);
ax.set_title('Actual vs Predicted Probabilities');
###Output
_____no_output_____ |
Copy_of_word2vec.ipynb | ###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import re
import string
import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
# Load the TensorBoard notebook extension
%load_ext tensorboard
SEED = 42
AUTOTUNE = tf.data.AUTOTUNE
num_ns = 4
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
file_path = "summary.txt"
with open('christmas_carol.txt', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
text_ds = tf.data.TextLineDataset(file_path).filter(lambda x: tf.cast(tf.strings.length(x), bool))
# text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
# Now, create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the TextVectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = layers.TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
vectorize_layer.adapt(text_ds.batch(1024))
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
targets = np.array(targets)
contexts = np.array(contexts)[:,:,0]
labels = np.array(labels)
print('\n')
print(f"targets.shape: {targets.shape}")
print(f"contexts.shape: {contexts.shape}")
print(f"labels.shape: {labels.shape}")
# BATCH_SIZE = 1024
# BUFFER_SIZE = 10000
BATCH_SIZE = 10
BUFFER_SIZE = 5
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
class Word2Vec(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding")
self.context_embedding = layers.Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
def call(self, pair):
target, context = pair
# target: (batch, dummy?) # The dummy axis doesn't exist in TF2.7+
# context: (batch, context)
if len(target.shape) == 2:
target = tf.squeeze(target, axis=1)
# target: (batch,)
word_emb = self.target_embedding(target)
# word_emb: (batch, embed)
context_emb = self.context_embedding(context)
# context_emb: (batch, context, embed)
dots = tf.einsum('be,bce->bc', word_emb, context_emb)
# dots: (batch, context)
return dots
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Also define a callback to log training statistics for tensorboard.
###Code
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
###Output
_____no_output_____
###Markdown
Train the model with `dataset` prepared above for some number of epochs.
###Code
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
#docs_infra: no_execute
%tensorboard --logdir logs
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
print(np.array(vocab).shape)
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0:
continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception:
pass
###Output
_____no_output_____ |
basic/08. Sharing is Caring with ArcGIS Online and Enterprise.ipynb | ###Markdown
5 Minute Tutorial Series Sharing is Caring on Enterprise and ArcGIS Online Setup the Environment
###Code
import os
import time
import requests
import tempfile
from arcgis.gis import GIS
from arcgis.gis import ProfileManager
###Output
_____no_output_____
###Markdown
Connect to the Organization
###Code
gis = GIS(profile='your_online_profile')
###Output
_____no_output_____
###Markdown
Download Sample Data
###Code
with tempfile.TemporaryDirectory() as tmpdir:
url = "https://data.townofcary.org/explore/dataset/building-points/download/?format=shp&disjunctive.building_type=true&disjunctive.building_sub_type=true&disjunctive.bldgstyle=true&disjunctive.yearbuilt=true&disjunctive.storyheight=true&disjunctive.basement=true&disjunctive.utilities=true&refine.building_type=Bowling+Center+with+Snack+Bar+Only&timezone=America/New_York&lang=en"
fp = os.path.join(tmpdir, 'bowlingal.zip')
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(fp, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
item = gis.content.add(item_properties={
'title' : "Wake County Bowling Locations",
'type' : "Shapefile",
'tags' : "Town of Cary, North Carolina, business, recreation"
}, data=fp)
item
pitem = item.publish({'name' : "bowlingplaces"})
pitem
###Output
_____no_output_____
###Markdown
Sharing via Code! With Enterprise and ArcGIS Online you can provide access to your `Items`. Items can be Shared Many Ways `everyone` - if you want to provide zero restriction access to your `Item` then this is for you.- `everyone` - No restriction on who can see it `groups` - `Item` can be assigned shared with groups- This allows users to make content shared to target `group` or areas. `private` - no one can see your `Item` except yourself Sharing Content - By default the sharing is shared with no one
###Code
pitem.shared_with
###Output
_____no_output_____
###Markdown
Sharing with Everyone - to share `Item` with everyone, it shares with `everyone` and `org`
###Code
pitem.share(everyone=True)
pitem.shared_with
###Output
_____no_output_____
###Markdown
Sharing with `Org` Only
###Code
pitem.share(everyone=False, org=True)
pitem.shared_with
###Output
_____no_output_____
###Markdown
Share/Unshare with Groups
###Code
grp = gis.groups.create(
title='sharinggroup',
tags='erase, me',
)
grp
pitem.share(org=False, groups=[grp])
pitem.shared_with
###Output
_____no_output_____
###Markdown
**Unshare the Item**
###Code
pitem.unshare([grp])
pitem.shared_with
assert pitem.delete()
assert item.delete()
###Output
_____no_output_____ |
ipynb/translating_rna_into_protein.ipynb | ###Markdown
Translating RNA into Protein ProblemThe 20 commonly occurring amino acids are abbreviated by using 20 letters from the English alphabet (all letters except for B, J, O, U, X, and Z). Protein strings are constructed from these 20 symbols. Henceforth, the term genetic string will incorporate protein strings along with DNA strings and RNA strings.The [RNA codon table](https://en.wikipedia.org/wiki/Genetic_codeRNA_codon_table) dictates the details regarding the encoding of specific codons into the amino acid alphabet.> **Given:** An RNA string **s** corresponding to a strand of mRNA (of length at most 10 kbp).> **Return:** The protein string encoded by **s**.
###Code
#first, create a dictionary of RNA codon table
map = {"UUU":"F", "UUC":"F", "UUA":"L", "UUG":"L",
"UCU":"S", "UCC":"S", "UCA":"S", "UCG":"S",
"UAU":"Y", "UAC":"Y", "UAA":"STOP", "UAG":"STOP",
"UGU":"C", "UGC":"C", "UGA":"STOP", "UGG":"W",
"CUU":"L", "CUC":"L", "CUA":"L", "CUG":"L",
"CCU":"P", "CCC":"P", "CCA":"P", "CCG":"P",
"CAU":"H", "CAC":"H", "CAA":"Q", "CAG":"Q",
"CGU":"R", "CGC":"R", "CGA":"R", "CGG":"R",
"AUU":"I", "AUC":"I", "AUA":"I", "AUG":"M",
"ACU":"T", "ACC":"T", "ACA":"T", "ACG":"T",
"AAU":"N", "AAC":"N", "AAA":"K", "AAG":"K",
"AGU":"S", "AGC":"S", "AGA":"R", "AGG":"R",
"GUU":"V", "GUC":"V", "GUA":"V", "GUG":"V",
"GCU":"A", "GCC":"A", "GCA":"A", "GCG":"A",
"GAU":"D", "GAC":"D", "GAA":"E", "GAG":"E",
"GGU":"G", "GGC":"G", "GGA":"G", "GGG":"G",}
#define a function to translate codons into proteins
def translate_mrna(mRNA):
start = mRNA.find("AUG")
triplets = [mRNA[start:start+3] for start in range(start, len(mRNA), 3)]
for triplet in triplets:
if map.get(triplet) == "STOP":
return
else:
print(map.get(triplet), end="")
return
with open("rosalind_prot.txt","r") as mrna_data:
mRNA = str(mrna_data.readlines())
translate_mrna(mRNA)
###Output
_____no_output_____ |
00_PythonBasics/Lec08_Functions.ipynb | ###Markdown
Functions
###Code
import os
from IPython.core.display import HTML
def load_style(directory = '../', name='customMac.css'):
styles = open(os.path.join(directory, name), 'r').read()
return HTML(styles)
load_style()
###Output
_____no_output_____
###Markdown
Most of the times, In a algorithm the statements keep repeating and it will be a tedious job to execute the same statements again and again and will consume a lot of memory and is not efficient. Enter Functions. This is the basic syntax of a function def funcname(arg1, arg2,... argN): ''' Document String''' statements return Read the above syntax as, A function by name "funcname" is defined, which accepts arguements "arg1,arg2,....argN". The function is documented and it is '''Document String'''. The function after executing the statements returns a "value".
###Code
print("Hey Rajath!")
print("Rajath, How do you do?")
###Output
Hey Rajath!
Rajath, How do you do?
###Markdown
Instead of writing the above two statements every single time it can be replaced by defining a function which would do the job in just one line. Defining a function firstfunc().
###Code
def firstfunc():
print("Hey Rajath!")
print("Rajath, How do you do?")
firstfunc()
###Output
Hey Rajath!
Rajath, How do you do?
###Markdown
**firstfunc()** every time just prints the message to a single person. We can make our function **firstfunc()** to accept arguements which will store the name and then prints respective to that accepted name. To do so, add a argument within the function as shown.
###Code
def firstfunc(username):
print("Hey", username + '!')
print(username + ',' ,"How do you do?")
name1 = input('Please enter your name : ')
###Output
Please enter your name : 2
###Markdown
The name "Guido" is actually stored in name1. So we pass this variable to the function **firstfunc()** as the variable username because that is the variable that is defined for this function. i.e name1 is passed as username.
###Code
firstfunc(name1)
###Output
Hey 2!
2, How do you do?
###Markdown
Let us simplify this even further by defining another function **secondfunc()** which accepts the name and stores it inside a variable and then calls the **firstfunc()** from inside the function itself.
###Code
def firstfunc(username):
print("Hey", username + '!')
print(username + ',' ,"How do you do?")
def secondfunc():
name = input("Please enter your name : ")
firstfunc(name)
secondfunc()
###Output
Please enter your name : rahim
Hey rahim!
rahim, How do you do?
###Markdown
Return Statement When the function results in some value and that value has to be stored in a variable or needs to be sent back or returned for further operation to the main algorithm, return statement is used.
###Code
def times(x,y):
z = x*y
return z
###Output
_____no_output_____
###Markdown
The above defined **times( )** function accepts two arguements and return the variable z which contains the result of the product of the two arguements
###Code
c = times(4,5)
print(c)
###Output
20
###Markdown
The z value is stored in variable c and can be used for further operations. Instead of declaring another variable the entire statement itself can be used in the return statement as shown.
###Code
def times(x,y):
'''This multiplies the two input arguments'''
return x*y
c = times(4,5)
print c
###Output
20
###Markdown
Since the **times( )** is now defined, we can document it as shown above. This document is returned whenever **times( )** function is called under **help( )** function.
###Code
help(times)
###Output
Help on function times in module __main__:
times(x, y)
This multiplies the two input arguments
###Markdown
Multiple variable can also be returned, But keep in mind the order.
###Code
eglist = [10,50,30,12,6,8,100]
def egfunc(eglist):
highest = max(eglist)
lowest = min(eglist)
first = eglist[0]
last = eglist[-1]
return highest,lowest,first,last
###Output
_____no_output_____
###Markdown
If the function is just called without any variable for it to be assigned to, the result is returned inside a tuple. But if the variables are mentioned then the result is assigned to the variable in a particular order which is declared in the return statement.
###Code
egfunc(eglist)
a,b,c,d = egfunc(eglist)
print ' a =',a,'\n b =',b,'\n c =',c,'\n d =',d
###Output
a = 100
b = 6
c = 10
d = 100
###Markdown
Implicit arguments When an argument of a function is common in majority of the cases or it is "implicit" this concept is used.
###Code
def implicitadd(x,y=3):
return x+y
###Output
_____no_output_____
###Markdown
**implicitadd( )** is a function accepts two arguments but most of the times the first argument needs to be added just by 3. Hence the second argument is assigned the value 3. Here the second argument is implicit. Now if the second argument is not defined when calling the **implicitadd( )** function then it considered as 3.
###Code
implicitadd(4)
###Output
_____no_output_____
###Markdown
But if the second argument is specified then this value overrides the implicit value assigned to the argument
###Code
implicitadd(4,4)
###Output
_____no_output_____
###Markdown
Any number of arguments If the number of arguments that is to be accepted by a function is not known then a asterisk symbol is used before the argument.
###Code
def add_n(*args):
res = 0
reslist = []
for i in args:
reslist.append(i)
print reslist
return sum(reslist)
###Output
_____no_output_____
###Markdown
The above function accepts any number of arguments, defines a list and appends all the arguments into that list and return the sum of all the arguments.
###Code
add_n(1,2,3,4,5)
add_n(1,2,3)
###Output
[1, 2, 3]
###Markdown
Global and Local Variables Whatever variable is declared inside a function is local variable and outside the function in global variable.
###Code
eg1 = [1,2,3,4,5]
###Output
_____no_output_____
###Markdown
In the below function we are appending a element to the declared list inside the function. eg2 variable declared inside the function is a local variable.
###Code
def egfunc1():
def thirdfunc(arg1):
eg2 = arg1[:]
eg2.append(6)
print "This is happening inside the function :", eg2
print "This is happening before the function is called : ", eg1
thirdfunc(eg1)
print "This is happening outside the function :", eg1
print "Accessing a variable declared inside the function from outside :" , eg2
egfunc1()
###Output
This is happening before the function is called : [1, 2, 3, 4, 5]
This is happening inside the function : [1, 2, 3, 4, 5, 6]
This is happening outside the function : [1, 2, 3, 4, 5]
Accessing a variable declared inside the function from outside :
###Markdown
If a **global** variable is defined as shown in the example below then that variable can be called from anywhere.
###Code
eg3 = [1,2,3,4,5]
def egfunc1():
def thirdfunc(arg1):
global eg2
eg2 = arg1[:]
eg2.append(6)
print "This is happening inside the function :", eg2
print "This is happening before the function is called : ", eg1
thirdfunc(eg1)
print "This is happening outside the function :", eg1
print "Accessing a variable declared inside the function from outside :" , eg2
egfunc1()
###Output
This is happening before the function is called : [1, 2, 3, 4, 5]
This is happening inside the function : [1, 2, 3, 4, 5, 6]
This is happening outside the function : [1, 2, 3, 4, 5]
Accessing a variable declared inside the function from outside : [1, 2, 3, 4, 5, 6]
###Markdown
Lambda Functions These are small functions which are not defined with any name and carry a single expression whose result is returned. Lambda functions comes very handy when operating with lists. These function are defined by the keyword **lambda** followed by the variables, a colon and the respective expression.
###Code
z = lambda x: x * x
z(8)
###Output
_____no_output_____
###Markdown
map **map( )** function basically executes the function that is defined to each of the list's element separately.
###Code
list1 = [1,2,3,4,5,6,7,8,9]
eg = map(lambda x:x+2, list1)
print eg
###Output
[3, 4, 5, 6, 7, 8, 9, 10, 11]
###Markdown
You can also add two lists.
###Code
list2 = [9,8,7,6,5,4,3,2,1]
eg2 = map(lambda x,y:x+y, list1,list2)
print eg2
###Output
[10, 10, 10, 10, 10, 10, 10, 10, 10]
###Markdown
Not only lambda function but also other built in functions can also be used.
###Code
eg3 = map(str,eg2)
print eg3
###Output
['10', '10', '10', '10', '10', '10', '10', '10', '10']
###Markdown
filter **filter( )** function is used to filter out the values in a list. Note that **filter()** function returns the result in a new list.
###Code
list1 = [1,2,3,4,5,6,7,8,9]
###Output
_____no_output_____
###Markdown
To get the elements which are less than 5,
###Code
filter(lambda x:x<5,list1)
###Output
_____no_output_____
###Markdown
Notice what happens when **map()** is used.
###Code
map(lambda x:x<5, list1)
###Output
_____no_output_____
###Markdown
We can conclude that, whatever is returned true in **map( )** function that particular element is returned when **filter( )** function is used.
###Code
filter(lambda x:x%4==0,list1)
###Output
_____no_output_____ |
homology_searches/DNA_6mA_methylation_machinery_homology_search.ipynb | ###Markdown
Notebook for homology searches for DNA meythlation machineryThis needs to have Java 11 in the path and for example run in the pycoMeth environment only
###Code
import os
from Bio import SeqIO
import pandas as pd
import numpy as np
import re
notebook_path = os.path.abspath(".")
IN_DIR = os.path.abspath('../../analyses/methylation_machinery/')
OUT_DIR = os.path.abspath('../../analyses/methylation_machinery/')
GENOME_DIR = os.path.abspath('../../data/genomic_resources/')
Pgt_protein_fn = os.path.abspath('../../data/genomic_resources/Puccinia_graminis_tritici_21-0.proteins.fa')
SizmA_seeds_fn = os.path.abspath('../../analyses/methylation_machinery/6mA_methylation_demethyltion_query.fasta')
FivemC_seeds_fn = SizmA_seeds_fn
n_threads = 20
blast_outfmt6_headers = "qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore".split(' ')
###write a function that takes the interpro TSV and returns a dict of domains for a specific search engine
def interpro_accession_dict(fn):
header = ['P-ID', 'md5', 'len', 'analysis', 'accession', 'description', 'start', 'stop', 'score', 'status' , 'date',
'Interpro_accession', 'Interpro_description']
df = pd.read_csv(fn, sep='\t', header =None, names= header).dropna()
return dict(zip(df.groupby('P-ID')['Interpro_accession'].unique().index, df.groupby('P-ID')['Interpro_accession'].unique()))
###write a function that takes the interpro TSV and returns a dict of domains for a specific search engine
def interpro_analysis_dict(fn, analysis):
header = ['P-ID', 'md5', 'len', 'analysis', 'accession', 'description', 'start', 'stop', 'score', 'status' , 'date',
'Interpro_accession', 'Interpro_description']
df = pd.read_csv(fn, sep='\t', header =None, names= header).dropna()
grouped = df[df.analysis == analysis].groupby('P-ID')
return dict(zip(grouped['analysis'].unique().index, grouped['accession'].unique()))
###Output
_____no_output_____
###Markdown
Here the blast analysis starts
###Code
os.chdir(OUT_DIR)
!makeblastdb -dbtype prot -in {Pgt_protein_fn}
#define file names
SixmA_outfmt_6_fn = 'Puccinia_graminis_tritici_21-0.proteins.6mA_methylation_demethyltion_query.blastp.outfmt6'
SixmA_outfmt_6_fn = os.path.join(OUT_DIR, SixmA_outfmt_6_fn)
FivemC_outfmt_6_fn = SixmA_outfmt_6_fn
#run blast
!blastp -num_threads 20 -outfmt 6 -query {SizmA_seeds_fn} -db {Pgt_protein_fn} > {SixmA_outfmt_6_fn}
!head {SixmA_outfmt_6_fn}
###Output
sp|Q09956|DAMT1_CAEEL PGT21_005363-T1 30.769 208 113 9 160 339 253 457 4.65e-14 73.2
sp|Q09956|DAMT1_CAEEL PGT21_004684-T1 30.288 208 114 9 160 339 229 433 5.31e-13 69.7
sp|Q09956|DAMT1_CAEEL PGT21_023926-T1 32.727 55 34 2 161 212 8 62 1.7 30.0
sp|Q09956|DAMT1_CAEEL PGT21_024228-T1 30.435 46 32 0 209 254 469 514 3.8 29.6
sp|Q09956|DAMT1_CAEEL PGT21_033706-T1 30.909 55 35 2 161 212 249 303 8.0 28.5
sp|Q09956|DAMT1_CAEEL PGT21_003002-T1 32.857 70 41 2 257 325 35 99 8.5 28.5
sp|Q09956|DAMT1_CAEEL PGT21_027835-T2 37.500 32 17 1 322 350 647 678 8.7 28.5
sp|Q09956|DAMT1_CAEEL PGT21_014712-T1 29.592 98 52 4 234 323 275 363 9.4 28.1
sp|Q9Y5N5|N6MT1_HUMAN PGT21_005253-T1 44.186 215 109 6 8 213 4 216 1.21e-50 163
sp|Q9Y5N5|N6MT1_HUMAN PGT21_005713-T1 44.186 215 109 6 8 213 4 216 1.27e-50 163
###Markdown
Downstream filtering of blast resutls
###Code
FivemC_blast_df = pd.read_csv(FivemC_outfmt_6_fn, header = None, names=blast_outfmt6_headers, sep='\t' )
FivemC_blast_df.head()
#filtering of blast_df
FivemC_stringent_blast_df = FivemC_blast_df[FivemC_blast_df.evalue < 1e-10].copy()
FivemC_stringent_blast_df.groupby('qseqid')['sseqid'].count()
FivemC_stringent_blast_df.sseqid.unique()
FivemC_seeds_ids = []
for seq in SeqIO.parse(FivemC_seeds_fn, 'fasta'):
FivemC_seeds_ids.append(seq.id)
not_present = set(FivemC_seeds_ids) - set(FivemC_stringent_blast_df.qseqid.unique())
not_present
set(FivemC_seeds_ids) - set(FivemC_blast_df[FivemC_blast_df.evalue < 1e-2].qseqid.unique())
##pull out fasta sequence of all the hits
e_value = 0.01
FivemC_Pgt_protein_hit_fn = 'Puccinia_graminis_tritici_21-0.proteins.6mA_methylation_demethyltion_query.blastp-%s.fasta' % e_value
FivemC_Pgt_protein_hit_fn = os.path.join(OUT_DIR, FivemC_Pgt_protein_hit_fn)
blast_df = FivemC_blast_df
###get all the hits once and subset the blast with the e-value selected
hit_ids = blast_df[blast_df.evalue < e_value].sseqid.unique()
hit_list = []
sub_blast_df = blast_df[blast_df.evalue < e_value].copy()
for seq in SeqIO.parse(Pgt_protein_fn, 'fasta'):
if seq.id in hit_ids:
print(seq.id)
hit_list.append(seq)
SeqIO.write(hit_list, FivemC_Pgt_protein_hit_fn, 'fasta')
sub_blast_df
###Output
_____no_output_____
###Markdown
Pull in haplotype information
###Code
pgt_gff3_fn = os.path.join('../../data/genomic_resources/Puccinia_graminis_tritici_21-0.gff3')
with open(pgt_gff3_fn, 'r') as fh:
haplotype_dict = {}
for line in fh:
line = line.rstrip()
if any(s in line for s in hit_ids):
for hit in hit_ids:
if hit in line:
haplotype_dict[hit] = line.split('\t')[0][-1]
len(haplotype_dict.values()) == len(hit_ids)
sub_blast_df['shaplotype'] = sub_blast_df.sseqid.map(haplotype_dict)
#get the locus id for loci with multiple transcripts
sub_blast_df['sseqid_locus'] = [x.split('-')[0] for x in sub_blast_df.sseqid]
#only keep the transcript witht the best hit
sub_blast_df.drop_duplicates(['qseqid', 'sseqid_locus'], keep='first', inplace = True)
###Output
_____no_output_____
###Markdown
Do Interpro scan on command line
###Code
interpro5 = '/home/jamila/anaconda3/downloads/interproscan-5.42-78.0/interproscan.sh'
TMP_DIR = os.path.join(OUT_DIR, 'tmp')
if not os.path.exists(TMP_DIR):
os.mkdir(TMP_DIR)
Pgt_protein_hit_intrpro_fn = os.path.join(TMP_DIR, os.path.basename(FivemC_Pgt_protein_hit_fn).replace('.fasta', '.interpro5.tsv'))
FivemC_seeds_intrpro_fn = os.path.join(TMP_DIR, os.path.basename(FivemC_seeds_fn).replace('.fasta', '.interpro5.tsv'))
###Output
_____no_output_____
###Markdown
Run interpro on both set of protein files
###Code
!head {FivemC_Pgt_protein_hit_fn}
!bash {interpro5} -cpu 4 -i {FivemC_Pgt_protein_hit_fn} -f tsv -iprlookup -o {Pgt_protein_hit_intrpro_fn}
!bash {interpro5} -cpu 4 -i {FivemC_seeds_fn} -f tsv -iprlookup -o {FivemC_seeds_intrpro_fn}
#pull in interpro results and add them to the dataframe
sub_blast_df['q_pfam'] = sub_blast_df.qseqid.map(interpro_analysis_dict(FivemC_seeds_intrpro_fn, 'Pfam'))
sub_blast_df['q_interpro'] = sub_blast_df.qseqid.map(interpro_accession_dict(FivemC_seeds_intrpro_fn))
sub_blast_df['s_pfam'] = sub_blast_df.sseqid.map(interpro_analysis_dict(Pgt_protein_hit_intrpro_fn, 'Pfam'))
sub_blast_df['s_interpro'] = sub_blast_df.sseqid.map(interpro_accession_dict(Pgt_protein_hit_intrpro_fn))
#do some cosmetics on the the dataframe for proteins without interpro /pfam domains because pandas is wierd sometimes.
for cln in ['q_pfam', 'q_interpro', 's_pfam','s_interpro']:
if sub_blast_df[cln].isna().sum():
sub_blast_df.loc[sub_blast_df[sub_blast_df[cln].isna()].index, cln] = [ [[]] * sub_blast_df[cln].isna().sum() ]
#calculate the fraction of overlapping interpro/pfam domains between query sequences and hits
sub_blast_df['pfam_int'] = sub_blast_df.apply(lambda row: set(row['q_pfam']).intersection(set(row['s_pfam'])) , axis=1)
sub_blast_df['pfam_int_frac'] = sub_blast_df['pfam_int'].apply(lambda x: len(x)) / sub_blast_df['q_pfam'].apply(lambda x: len(x))
sub_blast_df['interpro_int'] = sub_blast_df.apply(lambda row: set(row['q_interpro']).intersection(set(row['s_interpro'])) , axis=1)
sub_blast_df['interpro_int_frac'] = sub_blast_df['interpro_int'].apply(lambda x: len(x)) / sub_blast_df['q_interpro'].apply(lambda x: len(x))
sub_blast_df.iloc[:,[0,1,10, 17, 18,19]].head(30)
#filter the dataframe to have only hits that have the best possible interpro domains fractions
pfam_filt_df = sub_blast_df[sub_blast_df.groupby('qseqid')['interpro_int_frac'].transform(max) == sub_blast_df['interpro_int_frac']]
##look at how many hits per query sequence are still left
pfam_filt_df.groupby('qseqid')['sseqid'].count()
best_sseq_df = pfam_filt_df[pfam_filt_df.groupby('sseqid')['interpro_int_frac'].transform(max) == pfam_filt_df['interpro_int_frac']]
pgt_match_list = []
DNA_seed_list = []
haplotype_list = []
match_type_list = []
for seed_gene, pgt_gene in zip(best_sseq_df.qseqid, best_sseq_df.sseqid):
if not pgt_gene.endswith('-T2'):
DNA_seed_list.append(seed_gene)
pgt_match_list.append(pgt_gene)
match_type_list.append('blast')
pgt_match_series = pd.Series(pgt_match_list, name="Pgt_match")
DNA_seed_series = pd.Series(DNA_seed_list, name='Seed_ID')
haplotype_series = pd.Series(haplotype_list, name='haplotype')
match_type_series = pd.Series(match_type_list, name='Match_type')
out_df = pd.concat([DNA_seed_series, pgt_match_series, haplotype_series, match_type_series], axis =1)
out_fn = os.path.join(OUT_DIR, '%s_orthologs.Pgt21-0.tsv' %os.path.basename(FivemC_seeds_fn).replace('.fasta', '') )
out_df.to_csv(out_fn, sep='\t', index=None)
!head {out_fn}
###Output
Seed_ID Pgt_match haplotype Match_type
sp|Q09956|DAMT1_CAEEL PGT21_005363-T1 blast
sp|Q09956|DAMT1_CAEEL PGT21_004684-T1 blast
sp|Q9Y5N5|N6MT1_HUMAN PGT21_005713-T1 blast
sp|Q13686|ALKB1_HUMAN PGT21_022089-T1 blast
sp|Q13686|ALKB1_HUMAN PGT21_022364-T1 blast
|
notebooks_workflow_complete/99_General_MF6_Export_and_Diagnostics.ipynb | ###Markdown
Load up the MF6 model
###Code
sim = fp.mf6.MFSimulation.load(simname, 'mf6', sim_ws=model_ws)
m = sim.get_model()
###Output
_____no_output_____
###Markdown
export some GIS treats
###Code
grid = mfexport.MFexportGrid(delr=m.dis.delr.array, # grid spacing in meters
delc=m.dis.delc.array ,
xul=1742955.0, yul=2292285.0, # upper left corner in CRS
epsg=5070
)
mfexport.export(m, grid, output_path=outpath)
# added sfr export -- SFR export not supported using the method above.
mf6_sfr_stage_file=os.path.join(model_ws,'neversink.sfr.stage.bin')
mf6_sfr_budget_file=os.path.join(model_ws,'neversink.sfr.cbc')
outfiles = mfexport.export_sfr_results(mf6_sfr_stage_file=mf6_sfr_stage_file,
mf6_sfr_budget_file=mf6_sfr_budget_file,
model=m,
grid=grid,
output_length_units='meters',
output_time_units='days',
output_path=outpath
)
headsfile = os.path.join(model_ws,'neversink.hds')
mfexport.export_heads(headsfile,
grid,
hdry=m.hnoflo,
hnflo=m.hnoflo,
kstpkper=(0, 0), # steady state one stress period
output_path=outpath,
interval=20, # meters
)
###Output
_____no_output_____
###Markdown
Examine the mass balance overall
###Code
df_flux, _ = fu.Mf6ListBudget(os.path.join(model_ws,'neversink.list')).get_dataframes()
percdisc = df_flux.PERCENT_DISCREPANCY.values[0]
#budget_df = pd.DataFrame({'obs':percdisc})
#budget_df.index = ['PERC_DISC']
#budget_df
percdisc
mfl6 = fu.Mf6ListBudget(os.path.join(model_ws,'neversink.list'))
df_flux, df_vol = mfl6.get_dataframes()
df_flux_backup = df_flux.copy()
df_flux = df_flux_backup.copy()
df_flux
outcols = [i for i in df_flux.columns if "OUT" in i]
for i in outcols:
df_flux[i] *= -1
df_flux
df_flux[[i for i in df_flux.columns if ("PERCE" not in i) & ("WEL_IN" not in i) & ("RCH_OUT" not in i) & ("TOT" not in i) & ("N-O" not in i) ]].plot.bar()
plt.xticks([])
plt.xlabel('Water Balance Component')
plt.ylabel('Flux, in cubic meters')
plt.axhline(0, linewidth=0.5, c='k')
###Output
_____no_output_____
###Markdown
top of model
###Code
plt.figure(figsize=(6,6))
top = m.dis.top.array.copy()
top[top<-999] = np.nan
plt.imshow(top)
plt.colorbar()
###Output
_____no_output_____
###Markdown
load up the heads
###Code
sim.simulation_data.mfdata.output_keys()
# steady state so get rid of time dimension
heads = np.squeeze(sim.simulation_data.mfdata['neversink', 'HDS', 'HEAD'])
heads2 = heads.copy()
plt.imshow(heads[3])
plt.colorbar()
m.hnoflo
wt = fu.postprocessing.get_water_table(heads, m.hnoflo)
wt[wt==m.hnoflo] = np.nan
plt.imshow(wt)
plt.colorbar()
# mask out the inactive areas
heads[heads==m.hnoflo] = np.nan
#heads[heads<-100] = np.nan
for i in range(m.dis.nlay.array):
plt.figure(figsize=(6,6))
plt.imshow(heads[i,:,:])
plt.colorbar()
plt.title('heads layer {}'.format(i))
for i in range(m.npf.k.array):
plt.figure(figsize=(6,6))
plt.imshow(heads[i,:,:])
plt.colorbar()
plt.title('heads layer {}'.format(i))
m.
flooded = wt-m.dis.top.data
flooded[flooded<0] = np.nan
#flooded[flooded>10] = 10
plt.figure(figsize=(10,10))
plt.imshow(flooded)
plt.title('Excess head in flooded cells, in meters')
plt.colorbar()
plt.savefig('flooding_rep_model.pdf', dpi=600)
flooded[np.isnan(flooded)]=-9999.
outfile = 'flooding.tif'
raster_outpath = os.path.join(outpath, outfile)
dataset = rasterio.open(
raster_outpath,
'w',
driver='GTiff',
height = flooded.shape[0],
width = flooded.shape[1],
count=1,
nodata = -9999.0,
dtype=str(flooded.dtype),
crs = CRS.from_epsg(5070),
transform=Affine(50.0, 0.0, 1742955.0,
0.0, -50.0, 2292285.0),
)
dataset.write(flooded, 1)
dataset.close()
with rasterio.open('../k_processing/V2_Layer{}_K.tif'.format(1)) as src:
crs = src.crs
meta = src.meta
nodata = meta['nodata']
K_tuple = src.read(1, masked=True),
K_data = K_tuple[0].data.astype(float)
crs
newK = K_data.copy()
outfile = os.path.join(outpath,'rasters/flooded_cells.tif')
dataset = rasterio.open(
outfile,
'w',
driver=meta['driver'],
height = newK.shape[0],
width = newK.shape[1],
count=1,
nodata = nodata,
dtype=str(newK.dtype),
crs = crs,
compress='lzw',
transform=meta['transform']
)
dataset.write(flooded, 1)
dataset.close()
for i in range(m.dis.nlay.array):
plt.figure(figsize=(6,6))
flooded = heads[i,:,:]-m.dis.top.array
flooded[flooded < 0]=np.nan
plt.imshow(flooded)
plt.colorbar()
plt.title('flooded layer {}'.format(i))
###Output
_____no_output_____
###Markdown
checking out idomain
###Code
idm = m.dis.idomain.array
for i in range(m.dis.nlay.array):
plt.figure(figsize=(6,6))
plt.imshow(idm[i,:,:])
plt.colorbar()
plt.title('idm layer {}'.format(i))
###Output
_____no_output_____
###Markdown
Plot up the model bottoms
###Code
botm = m.dis.botm.array
botm[botm<-999] = np.nan
for i in range(m.dis.nlay.array):
plt.figure(figsize=(6,6))
plt.imshow(botm[i,:,:])
plt.colorbar()
plt.title('botm layer {}'.format(i))
###Output
_____no_output_____
###Markdown
Plot up the model thicknesses
###Code
botm[botm==-9999] = np.nan
thick=[]
thick.append(m.dis.top.array - m.dis.botm.array[0])
for i in range(3):
thick.append(m.dis.botm.array[i]-m.dis.botm.array[i+1])
for i in range(m.dis.nlay.array):
plt.figure(figsize=(6,6))
plt.imshow(thick[i])
plt.colorbar()
plt.title('thick layer {}'.format(i))
thick[-1][thick[-1]<30] = np.nan
plt.imshow(thick[-1])
plt.colorbar()
###Output
_____no_output_____ |
notebooks/ann-text-processing-test.ipynb | ###Markdown
https://www.machinelearningplus.com/nlp/lemmatization-examples-python/
###Code
#Input your PostGres credentials to connect
dbname = ''
username = ''
host = ''
password = ''
conn = psycopg2.connect('dbname={} user={} host={} password={}'.format(dbname, username, host, password))
cur = conn.cursor()
cur = conn.cursor()
cur.execute("""
SELECT * FROM review LIMIT 100
""")
cols = ['review_id', 'user_id', 'business_id', 'stars', 'review_date', 'review_text', 'useful', 'funny', 'cool']
review_sample = pd.DataFrame(cur.fetchall(), columns=cols)
review_sample
print(review_sample.loc[24, 'review_text'])
###Output
_____no_output_____
###Markdown
Contractions:https://gist.github.com/J3RN/ed7b420a6ea1d5bd6d06
###Code
def _create_stop_words():
stops = nltk.corpus.stopwords.words('english')
neg_stops = ['no',
'nor',
'not',
'don',
"don't",
'ain',
'aren',
"aren't",
'couldn',
"couldn't",
'didn',
"didn't",
'doesn',
"doesn't",
'hadn',
"hadn't",
'hasn',
"hasn't",
'haven',
"haven't",
'isn',
"isn't",
'mightn',
"mightn't",
'mustn',
"mustn't",
'needn',
"needn't",
'shan',
"shan't",
'shouldn',
"shouldn't",
'wasn',
"wasn't",
'weren',
"weren't",
"won'",
"won't",
'wouldn',
"wouldn't",
'but',
"don'",
"ain't"]
common_nonneg_contr = ["could've",
"he'd",
"he'd've",
"he'll",
"he's",
"how'd",
"how'll",
"how's",
"i'd",
"i'd've",
"i'll",
"i'm",
"i've",
"it'd",
"it'd've",
"it'll",
"it's",
"let's",
"ma'am",
"might've",
"must've",
"o'clock",
"'ow's'at",
"she'd",
"she'd've",
"she'll",
"she's",
"should've",
"somebody'd",
"somebody'd've",
"somebody'll",
"somebody's",
"someone'd",
"someone'd've",
"someone'll",
"someone's",
"something'd",
"something'd've",
"something'll",
"something's",
"that'll",
"that's",
"there'd",
"there'd've",
"there're",
"there's",
"they'd",
"they'd've",
"they'll",
"they're",
"they've",
"'twas",
"we'd",
"we'd've",
"we'll",
"we're",
"we've",
"what'll",
"what're",
"what's",
"what've",
"when's",
"where'd",
"where's",
"where've",
"who'd",
"who'd've",
"who'll",
"who're",
"who's",
"who've",
"why'll",
"why're",
"why's",
"would've",
"y'all",
"y'all'll",
"y'all'd've",
"you'd",
"you'd've",
"you'll",
"you're",
"you've"]
for x in neg_stops:
if x in stops:
stops.remove(x)
new_stops = stops + common_nonneg_contr + [""] + ['us']
stops = list(set(new_stops))
return stops
_create_stop_words()
#Turning this into text, as it's the less efficient code
"""
def _remove_stops(tokens):
stops = nltk.corpus.stopwords.words('english')
neg_stops = [
'no', 'not', 'nor', 'don\'', 'don\'t', 'ain',
'ain\'t', 'aren\'t', 'aren', 'couldn', 'couldn\'t',
'didn', 'didn\'t', 'doesn', 'doesn\'t', 'hadn',
'hadn\'t', 'hasn', 'hasn\'t', 'haven', 'haven\'t',
'isn', 'isn\'t', 'mightn', 'mightn\'t', 'mustn',
'mustn\'t', 'needn', 'needn\'t', 'shan', 'shan\'t',
'shouldn', 'shouldn\'t', 'wasn', 'wasn\'t', 'weren',
'weren\'t', 'won', 'won\'t', 'wouldn', 'wouldn\'t'
]
#still leaves in but and don.. fix this..
#doesn't get rid of other obvious stopwords, like i'm, they're....
for x in neg_stops:
if x in stops:
stops.remove(x)
tokens_without_stops = [token for token in tokens if token not in stops]
return tokens_without_stops
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].lower()
tag_dict = {"a": wordnet.ADJ,
"n": wordnet.NOUN,
"v": wordnet.VERB,
"r": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
#I think I adjusted this properly to remove digits
#however for phrases that are hyphenated, like chilled-to-the-bone, the code returns the token 'chilledtothebone.'
#This will need to be fixed
def _clean_review(text):
text = text.lower()
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8', 'ignore')
text = re.sub(r"[^A-Za-z\s']", '', text)
tokens = text.split()
for i, token in enumerate(tokens):
tokens[i] = wnl.lemmatize(token, pos= get_wordnet_pos(token))
return tokens
def _process_review(text):
tokens = _remove_stops(_clean_review(text))
return tokens
"""
stops = nltk.corpus.stopwords.words('english')
neg_stops = [
'no', 'not', 'nor', 'don\'', 'don\'t', 'ain',
'ain\'t', 'aren\'t', 'aren', 'couldn', 'couldn\'t',
'didn', 'didn\'t', 'doesn', 'doesn\'t', 'hadn',
'hadn\'t', 'hasn', 'hasn\'t', 'haven', 'haven\'t',
'isn', 'isn\'t', 'mightn', 'mightn\'t', 'mustn',
'mustn\'t', 'needn', 'needn\'t', 'shan', 'shan\'t',
'shouldn', 'shouldn\'t', 'wasn', 'wasn\'t', 'weren',
'weren\'t', 'won', 'won\'t', 'wouldn', 'wouldn\'t'
]
#still leaves in but and don.. fix this..
#doesn't get rid of other obvious stopwords, like i'm, they're....
for x in neg_stops:
if x in stops:
stops.remove(x)
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].lower()
tag_dict = {"a": wordnet.ADJ,
"n": wordnet.NOUN,
"v": wordnet.VERB,
"r": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
def _clean_review2(text):
text = text.lower()
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8', 'ignore')
text = re.sub(r"[^A-Za-z\s']", '', text)
tokens = [token for token in text.split() if token not in new_stops]
for i, token in enumerate(tokens):
tokens[i] = wnl.lemmatize(token, pos= get_wordnet_pos(token))
return tokens
def _process_review2(text):
tokens = _remove_stops(_clean_review2(text))
return tokens
#Turning this into text, as it is less efficient than 2
"""
stops = nltk.corpus.stopwords.words('english')
neg_stops = [
'no', 'not', 'nor', 'don\'', 'don\'t', 'ain',
'ain\'t', 'aren\'t', 'aren', 'couldn', 'couldn\'t',
'didn', 'didn\'t', 'doesn', 'doesn\'t', 'hadn',
'hadn\'t', 'hasn', 'hasn\'t', 'haven', 'haven\'t',
'isn', 'isn\'t', 'mightn', 'mightn\'t', 'mustn',
'mustn\'t', 'needn', 'needn\'t', 'shan', 'shan\'t',
'shouldn', 'shouldn\'t', 'wasn', 'wasn\'t', 'weren',
'weren\'t', 'won', 'won\'t', 'wouldn', 'wouldn\'t'
]
#still leaves in but and don.. fix this..
#doesn't get rid of other obvious stopwords, like i'm, they're....
for x in neg_stops:
if x in stops:
stops.remove(x)
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].lower()
tag_dict = {"a": wordnet.ADJ,
"n": wordnet.NOUN,
"v": wordnet.VERB,
"r": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
def _clean_review3(text):
text = text.lower()
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8', 'ignore')
text = re.sub(r"[^A-Za-z\s']", '', text)
tokens = [wnl.lemmatize(token, pos = get_wordnet_pos(token)) for token in text.split() if token not in stops]
return tokens
def _process_review3(text):
tokens = _remove_stops(_clean_review2(text))
return tokens
"""
#This code is slow
"""
stops = nltk.corpus.stopwords.words('english')
neg_stops = [
'no', 'not', 'nor', 'don\'', 'don\'t', 'ain',
'ain\'t', 'aren\'t', 'aren', 'couldn', 'couldn\'t',
'didn', 'didn\'t', 'doesn', 'doesn\'t', 'hadn',
'hadn\'t', 'hasn', 'hasn\'t', 'haven', 'haven\'t',
'isn', 'isn\'t', 'mightn', 'mightn\'t', 'mustn',
'mustn\'t', 'needn', 'needn\'t', 'shan', 'shan\'t',
'shouldn', 'shouldn\'t', 'wasn', 'wasn\'t', 'weren',
'weren\'t', 'won', 'won\'t', 'wouldn', 'wouldn\'t'
]
#still leaves in but and don.. fix this..
#doesn't get rid of other obvious stopwords, like i'm, they're....
for x in neg_stops:
if x in stops:
stops.remove(x)
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].lower()
tag_dict = {"a": wordnet.ADJ,
"n": wordnet.NOUN,
"v": wordnet.VERB,
"r": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
def _clean_review4(text):
text = text.lower()
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8', 'ignore')
text = re.sub(r"[^A-Za-z\s']", '', text)
tokens = [wnl.lemmatize(token, pos= get_wordnet_pos(token)) for token in text.split()]
tokens = [token for token in tokens if token not in stops]
return tokens
def _process_review4(text):
tokens = _remove_stops(_clean_review2(text))
return tokens
"""
"""
stops = nltk.corpus.stopwords.words('english')
neg_stops = [
'no', 'not', 'nor', 'don\'', 'don\'t', 'ain',
'ain\'t', 'aren\'t', 'aren', 'couldn', 'couldn\'t',
'didn', 'didn\'t', 'doesn', 'doesn\'t', 'hadn',
'hadn\'t', 'hasn', 'hasn\'t', 'haven', 'haven\'t',
'isn', 'isn\'t', 'mightn', 'mightn\'t', 'mustn',
'mustn\'t', 'needn', 'needn\'t', 'shan', 'shan\'t',
'shouldn', 'shouldn\'t', 'wasn', 'wasn\'t', 'weren',
'weren\'t', 'won', 'won\'t', 'wouldn', 'wouldn\'t'
]
#still leaves in but and don.. fix this..
#doesn't get rid of other obvious stopwords, like i'm, they're....
for x in neg_stops:
if x in stops:
stops.remove(x)
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].lower()
tag_dict = {"a": wordnet.ADJ,
"n": wordnet.NOUN,
"v": wordnet.VERB,
"r": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
def _clean_review5(text):
text = text.lower()
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8', 'ignore')
text = re.sub(r"[^A-Za-z\s']", '', text)
tokens = text.split()
for i, token in enumerate(tokens):
tokens[i] = wnl.lemmatize(token, pos= get_wordnet_pos(token))
tokens = [token for token in tokens if token not in stops]
return tokens
def _process_review5(text):
tokens = _remove_stops(_clean_review2(text))
return tokens
"""
_clean_review2(review_sample.loc[24, 'review_text'])
def apply_on_column(data):
data['review_text'] = data['review_text'].apply(lambda x: _clean_review9(x))
return data
def map_on_column(data):
data['review_text'] = data['review_text'].map(lambda x: _clean_review2(x))
return data
#Takes about 8 min 8 sec for 50000 (_clean_review2)
# 15 min 24.965 seconds for 100000
start = time.time()
apply_on_column(review_sample)
end = time.time()
dur = end - start
# Verify that the function is working
print('Processed {} instances in {} minutes {} seconds.\n'.format(review_sample.shape[0], dur//60, dur%60))
review_sample.loc[27, 'review_text']
def iterrows_at(data):
for i, row in data.iterrows():
data.at[i, 'review_text'] = _clean_review2(data.at[i, 'review_text'])
#Takes a little longer.. like 7 min 56 seconds for 50000 reviews (_clean_review2)
start = time.time()
iterrows_at(review_sample)
end = time.time()
dur = end - start
# Verify that the function is working
print('Processed {} instances in {} minutes {} seconds.\n'.format(review_sample.shape[0], dur//60, dur%60))
#7 min 31.66 seconds
#15 min 50.93 seconds for 100000
start = time.time()
map_on_column(review_sample)
end = time.time()
dur = end - start
# Verify that the function is working
print('Processed {} instances in {} minutes {} seconds.\n'.format(review_sample.shape[0], dur//60, dur%60))
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].lower()
tag_dict = {"a": wordnet.ADJ,
"n": wordnet.NOUN,
"v": wordnet.VERB,
"r": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
def _clean_review6(text):
text = text.lower()
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8', 'ignore')
text = re.sub(r"[^A-Za-z\s'\\\/\-]", '', text)
tokenizer = nltk.RegexpTokenizer('\w+\'?\w*')
tokens = [token for token in tokenizer.tokenize(text) if token not in new_stops]
for i, token in enumerate(tokens):
tokens[i] = wnl.lemmatize(token, pos= get_wordnet_pos(token))
return tokens
def _process_review6(text):
tokens = _remove_stops(_clean_review2(text))
return tokens
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].lower()
tag_dict = {"a": wordnet.ADJ,
"n": wordnet.NOUN,
"v": wordnet.VERB,
"r": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
def _filter_characters(text):
text = re.sub(r"[^A-Za-z\s'\\\/\-]", '', text)
text = re.sub("' ", ' ', text)
text = re.sub("'s", '', text)
text = re.sub("'", '', text)
return text
def _clean_review7(text):
text = text.lower()
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8', 'ignore')
text = re.sub(r"[^A-Za-z\s'\\\/\-]", '', text)
text = re.sub("' ", ' ', text)
tokenizer = nltk.RegexpTokenizer('\w+\'?\w*')
tokens = [token for token in tokenizer.tokenize(text) if token not in new_stops]
tokens = [token for token in tokens]
for i, token in enumerate(tokens):
tokens[i] = wnl.lemmatize(token, pos= get_wordnet_pos(token))
tokens[i] = re.sub("'s", '', token)
return tokens
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].lower()
tag_dict = {"a": wordnet.ADJ,
"n": wordnet.NOUN,
"v": wordnet.VERB,
"r": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
def _filter_characters(text):
text = re.sub(r"[^A-Za-z\s'\\\/\-]", '', text)
text = re.sub("' ", ' ', text)
text = re.sub("'s", '', text)
text = re.sub("'", '', text)
return text
#For instance like the....cat it's spitting out token 'thecat'
def _clean_review8(text):
text = text.lower()
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8', 'ignore')
text = re.sub(r"[^A-Za-z\s'\\\/\-]", '', text)
tokenizer = nltk.RegexpTokenizer('\w+\'?\w+')
tokens = tokenizer.tokenize(text)
tokens = [token for token in tokenizer.tokenize(text) if token not in new_stops]
for i, token in enumerate(tokens):
filtered_token = token.replace("'s", '')
tokens[i] = wnl.lemmatize(filtered_token, pos= get_wordnet_pos(filtered_token))
return tokens
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].lower()
tag_dict = {"a": wordnet.ADJ,
"n": wordnet.NOUN,
"v": wordnet.VERB,
"r": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
def _clean_review9(text):
text = text.lower()
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8', 'ignore')
tokenizer = nltk.RegexpTokenizer('\w+\'?\w+')
filtered_tokens = [(re.sub(r"[^A-Za-z\s']", '', token)) for token in tokenizer.tokenize(text)]
stops = _create_stop_words()
tokens = [token for token in filtered_tokens if token not in stops]
for i, token in enumerate(tokens):
filtered_token = re.sub("'s", '', token)
tokens[i] = wnl.lemmatize(filtered_token, pos= get_wordnet_pos(token))
return tokens
apply_on_column(review_sample)
for i, tokens in enumerate(review_sample['review_text']):
print("\n{}\n".format(i))
print(tokens)
###Output
_____no_output_____ |
3DBPP/ActorCritic/DataPreProcessing_3.ipynb | ###Markdown
일 생산 계획 수립 List========모든 일 생산 계획 List 가져오기
###Code
dic = "UI-PP-043 (일 생산계획 수립)"
file_name = "일 생산계획 수립"
day_list = ["201901_201903", "201904_201906", "201907_201912",
"202001_202003", "202004_202006", "202007_202010"]
for day in day_list:
path = "Data\\" + dic + "\\" + file_name + "_" + day + ".csv"
df = pd.read_csv(path)
print(len(df.index))
df
###Output
C:\Users\vfgtr\anaconda3\envs\learning\lib\site-packages\IPython\core\interactiveshell.py:3146: DtypeWarning: Columns (33) have mixed types.Specify dtype option on import or set low_memory=False.
interactivity=interactivity, compiler=compiler, result=result)
###Markdown
메모리 체크===========memory_profiler 패키지로 메모리 체크사용법- current_memory_check() => 현재 메모리 사용량을 string 형태로 반환- current_notebook_name() => 현재 사용중인 주피터 노트북 이름을 반환- get_memory_usage() => |출력
###Code
def current_memory_check():
from memory_profiler import memory_usage
mem_usage = memory_usage(-1, interval = 1, timeout = 1)[0]
usage = f"{mem_usage: .3f} MB"
return usage
def current_notebook_name():
import ipyparams
notebook_name = ipyparams.notebook_name
name = notebook_name + ":"
return name
def get_memory_usage():
print(current_notebook_name() + "" + current_memory_check())
###Output
_____no_output_____
###Markdown
일 생산 계획 수립 Test=================가장 적은 메모리를 차지하는 202004 ~ 202006의 데이터를 사용해서 Test
###Code
day = "202004_202006"
path = "Data\\" + dic + "\\" + file_name + "_" + day + ".csv"
df = pd.read_csv(path)
get_memory_usage()
column = ['등록일자', '고객사코드', '고객사명', '오더번호', '차종', '차종명',
'부품번호', '부품명', '업체코드', '업체명', '지연일수',
'중박스적입수', '대박스적입수', '케이스적입수', '중박스코드',
'계획수량', '계획케이스', '포장장코드']
plan_df = pd.DataFrame(columns = column)
plan_df
columns = ['REG_YMD', 'CUST_CD', 'CUST_NM', 'ORD_NO', 'CAR_TP_CD', 'CAR_TP_NM',
'PART_NO', 'PART_NM', 'VEND_CD', 'VEND_NM', 'DELAY_DAY',
'BOX_CAPA_QTY', 'CASE_CAPA_QTY', 'CAPA_QTY', 'M_BOX_CD',
'PACK_PLAN_QTY', 'PACK_PLAN_CASE', 'PACK_SP_CD']
for i, col in enumerate(columns):
plan_df[column[i]] = df[col]
plan_df
def num_of_kinds(column):
array = []
for i in range(len(plan_df)):
array.append(plan_df.iloc[i][column])
data_set = set(array)
data_list = list(data_set)
data_list.sort()
return data_list
pack = num_of_kinds('포장장코드')
pack
date = num_of_kinds('등록일자')
date
def get_new_dataframe():
column = ['등록일자', '고객사코드', '고객사명', '오더번호', '차종', '차종명',
'부품번호', '부품명', '업체코드', '업체명', '지연일수',
'중박스적입수', '대박스적입수', '케이스적입수', '중박스코드',
'계획수량', '계획케이스', '포장장코드']
df = pd.DataFrame(columns = column)
return df
pack.index('GB')
df_list = []
for i in range(len(pack)):
df_list.append([])
for j in range(len(date)):
df_list[i].append(get_new_dataframe())
for i in range(len(plan_df)):
data = plan_df.iloc[i]
row = pack.index(data['포장장코드'])
col = date.index(data['등록일자'])
df_list[row][col] = df_list[row][col].append(data, ignore_index=True)
if i%1000 == 0:
print(i)
sums = 0
for i in range(len(pack)):
for j in range(len(date)):
sums += len(df_list[i][j])
sums
len(plan_df)
df_list[0][0]
import os
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print('Error: Creating directory. ' + directory)
len(df_list[0][1])
base_dir = 'Data\후처리 데이터\\'
for i in range(len(pack)):
for j in range(len(date)):
if len(df_list[i][j]) == 0:
continue
if j == 0:
directory = df_list[i][j]['포장장코드'][0]
createFolder(base_dir + directory)
day = str(df_list[i][j]['등록일자'][0])
path = base_dir + directory + '\\' + directory + '_' + day + '.csv'
df_list[i][j].to_csv(path, encoding='utf-8-sig')
###Output
_____no_output_____ |
01 Tweets Preprocessing.ipynb | ###Markdown
--- Import all the necessary libraries
###Code
import re
import json
import string
import datetime
import itertools
from collections import defaultdict
from wordsegment import load, segment
from nltk import TweetTokenizer
from nltk.corpus import stopwords
from textblob import TextBlob
import pandas as pd
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from datetime import datetime
import tqdm
load()
###Output
_____no_output_____
###Markdown
--- Load the depression lexicon to seed the LDA topics
###Code
# reading Depression PHQ-9 Lexicon
with open("depression_lexicon.json") as f:
seed_terms = json.load(f)
# read all seed terms into a list removing the underscore from all seeds
seed_terms_col = [
seed.replace("_", " ") for seed in list(
itertools.chain.from_iterable(
[seed_terms[signal] for signal in seed_terms.keys()]))
]
###Output
_____no_output_____
###Markdown
--- Prepare other lexicons and resources required to filter and pre-process the tweets
###Code
# Other lexicons and resources
emojies = [":‑)", ":)", ":D", ":o)", ":]", ":3", ":c)", ":>", "=]", "8)", "=)", ":}", ":^)", ":っ)",
":‑D", "8‑D", "8D", "x‑D", "xD", "X‑D", "XD", "=‑D", "=D", "=‑3", "=3", "B^D", ":-))", ">:[",
":‑(", ":(", ":‑c", ":c", ":‑<", ":っC", ":<", ":‑[", ":[", ":{", ";(", ":-||", ":@", ">:(",
":'‑(", ":'(", ":'‑)", ":')", "D:<", "D:", "D8", "D;", "D=", "DX", "v.v", "D‑':", ">:O", ":‑O",
":O", ":‑o", ":o", "8‑0", "O_O", "o‑o", "O_o", "o_O", "o_o", "O-O", ":*", ":-*", ":^*", "(", "}{'",
")", ";‑)", ";)", "*-)", "*)", ";‑]", ";]", ";D", ";^)", ":‑,", ">:P", ":‑P", ":P", "X‑P", "x‑p",
"xp", "XP", ":‑p", ":p", "=p", ":‑Þ", ":Þ", ":þ", ":‑þ", ":‑b", ":b", "d:", ">:\\", ">:/", ":‑/",
":‑.", ":/", ":\\", "=/", "=\\", ":L", "=L", ":S", ">.<", ":|", ":‑|", ":$", ":‑X", ":X", ":‑#",
":#", "O:‑)", "0:‑3", "0:3", "0:‑)", "0:)", "0;^)", ">:)", ">;)", ">:‑)", "}:‑)", "}:)", "3:‑)",
"3:)", "o/\o", "^5", ">_>^", "^<_<", "|;‑)", "|‑O", ":‑J", ":‑&", ":&", "#‑)", "%‑)", "%)",
":‑###..", ":###..", "<:‑|", "<*)))‑{", "><(((*>", "><>", "\o/", "*\0/*", "@}‑;‑'‑‑‑", "@>‑‑>‑‑",
"~(_8^(I)", "5:‑)", "~:‑\\", "//0‑0\\\\", "*<|:‑)", "=:o]", "7:^]", ",:‑)", "</3", "<3"]
tweet_token = TweetTokenizer(
preserve_case=True, reduce_len=True, strip_handles=True)
printable = set(string.printable)
punctuation = list(string.punctuation)
punctuation.remove("-")
punctuation.remove('_')
stop_words_extended = [
"a's", "abaft", "able", "aboard", "above", "abst", "accordance", "according", "accordingly", "across", "act", "actually",
"added", "adj", "affected", "affecting", "affects", "afore", "aforesaid", "afterwards", "against", "agin", "ago", "ah",
"ain't", "aint", "albeit", "allow", "allows", "almost", "alone", "along", "alongside", "already", "also", "although",
"always", "american", "amid", "amidst", "among", "amongst", "and", "anent", "announce", "another", "anybody", "anyhow",
"anymore", "anyone", "anything", "anyway", "anyways", "anywhere", "apart", "apparently", "appear", "appreciate",
"appropriate", "approximately", "aren", "arent", "arise", "around", "aside", "ask", "asking", "aslant", "associated",
"astride", "athwart", "auth", "available", "away", "awfully", "b", "back", "bar", "barring", "became", "become",
"becomes", "becoming", "before", "beforehand", "begin", "beginning", "beginnings", "begins", "behind", "believe",
"beneath", "beside", "besides", "best", "better", "betwixt", "beyond", "biol", "brief", "briefly", "by", "c", "c'mon",
"c's", "ca", "came", "can't", "cannot", "cant", "cause", "causes", "certain", "certainly", "changes", "circa", "clearly",
"close", "co", "com", "come", "comes", "concerning", "consequently", "consider", "considering", "contain", "containing",
"contains", "corresponding", "cos", "could", "couldn't", "couldnt", "couldst", "course", "currently", "dare", "dared",
"daren", "dares", "daring", "date", "definitely", "described", "despite", "didn", "different", "directly", "does",
"doesn't", "don", "done", "dost", "doth", "downwards", "due", "durst", "e", "early", "ed", "edu", "effect", "eg",
"eight", "eighty", "either", "else", "elsewhere", "em", "end", "ending", "english", "enough", "entirely", "er",
"ere", "especially", "et", "et-al", "etc", "even", "ever", "every", "everybody", "everyone", "everything", "everywhere",
"ex", "exactly", "example", "except", "excepting", "f", "failing", "far", "ff", "fifth", "first", "five", "fix",
"followed", "following", "follows", "former", "formerly", "forth", "found", "four", "further", "furthermore", "g",
"gave", "get", "gets", "getting", "give", "given", "gives", "giving", "go", "goes", "going", "gone", "gonna", "got",
"gotta", "gotten", "greetings", "h", "hadn", "happens", "hard", "hardly", "hasn", "hast", "hath", "haven", "having",
"he'd", "he'll", "he's", "hed", "hello", "help", "hence", "here", "here's", "hereafter", "hereby", "herein", "heres",
"hereupon", "herself", "hes", "hi", "hid", "high", "himself", "hither", "home", "hopefully", "how's", "howbeit",
"however", "hundred", "i'd", "i'll", "i'm", "i've", "id", "ie", "ignored", "ill", "im", "immediate", "immediately",
"importance", "important", "inasmuch", "inc", "indeed", "index", "indicate", "indicated", "indicates", "information",
"inner", "inside", "insofar", "instantly", "instead", "invention", "inward", "isn", "it", "it'd", "it'll", "itd",
"itself", "j", "k", "keep", "keeps", "kept", "kg", "km", "know", "known", "knows", "l", "large", "largely", "last",
"lately", "later", "latter", "latterly", "least", "left", "less", "lest", "let", "let's", "lets", "like", "liked",
"likely", "likewise", "line", "little", "living", "long", "look", "looking", "looks", "ltd", "made", "mainly", "make",
"makes", "many", "may", "maybe", "mayn", "mean", "means", "meantime", "meanwhile", "merely", "mg", "mid", "midst",
"might", "million", "mine", "minus", "miss", "ml", "moreover", "mostly", "mr", "mrs", "much", "mug", "must", "mustn't",
"myself", "n", "na", "name", "namely", "nay", "nd", "near", "nearly", "neath", "necessarily", "necessary", "need",
"needed", "needing", "needs", "neither", "never", "nevertheless", "new", "next", "nigh", "nigher", "nighest", "nine",
"ninety", "nisi", "nobody", "non", "none", "nonetheless", "noone", "normally", "nos", "noted", "nothing",
"notwithstanding", "novel", "nowhere", "obtain", "obtained", "obviously", "off", "often", "oh", "ok", "okay", "old",
"omitted", "once", "one", "ones", "oneself", "onto", "open", "ord", "others", "otherwise", "ought", "oughtn", "ours",
"out", "outside", "overall", "owing", "p", "page", "pages", "part", "particular", "particularly", "past", "pending",
"per", "perhaps", "placed", "please", "plus", "poorly", "possible", "possibly", "potentially", "pp", "predominantly",
"present", "presumably", "previously", "primarily", "probably", "promptly", "proud", "provided", "provides", "providing",
"public", "put", "q", "qua", "que", "quickly", "quite", "qv", "r", "ran", "rather", "rd", "readily", "real", "really",
"reasonably", "recent", "recently", "ref", "refs", "regarding", "regardless", "regards", "related", "relatively",
"research", "respecting", "respectively", "resulted", "resulting", "results", "right", "round", "run", "said", "sans",
"save", "saving", "saw", "say", "saying", "says", "sec", "second", "secondly", "section", "see", "seeing", "seem",
"seemed", "seeming", "seems", "seen", "self", "selves", "sensible", "sent", "serious", "seriously", "seven", "several",
"shall", "shalt", "shan't", "she'd", "she'll", "shed", "shell", "shes", "short", "shouldn", "show", "showed", "shown",
"showns", "shows", "significant", "significantly", "similar", "similarly", "since", "six", "slightly", "small", "some",
"somebody", "somehow", "someone", "somethan", "something", "sometime", "sometimes", "somewhat", "somewhere", "soon",
"sorry", "special", "specifically", "specified", "specify", "specifying", "still", "stop", "strongly", "sub",
"substantially", "successfully", "sufficiently", "suggest", "summat", "sup", "supposing", "sure", "t's", "take",
"taken", "taking", "tell", "tends", "th", "thank", "thanks", "thanx", "that'll", "that's", "that've", "thats", "thee",
"theirs", "themselves", "thence", "there'll", "there's", "there've", "thereafter", "thereby", "thered", "therefore",
"therein", "thereof", "therere", "theres", "thereto", "thereupon", "they", "they'd", "they'll", "they're", "they've",
"theyd", "theyre", "thine", "think", "third", "tho", "thorough", "thoroughly", "thou", "though", "thoughh", "thousand",
"three", "thro", "throug", "throughout", "thru", "thus", "thyself", "til", "till", "tip", "today", "together", "took",
"touching", "toward", "towards", "tried", "tries", "true", "truly", "try", "trying", "ts", "twas", "tween", "twere",
"twice", "twill", "twixt", "two", "twould", "u", "un", "underneath", "unfortunately", "unless", "unlike", "unlikely",
"unto", "upon", "ups", "us", "use", "used", "useful", "usefully", "usefulness", "uses", "using", "usually", "v", "value",
"various", "versus", "via", "vice", "vis-a-vis", "viz", "vol", "vols", "vs", "w", "wanna", "want", "wanting", "wants",
"wasn", "wasnt", "way", "we'd", "we'll", "we're", "we've", "wed", "welcome", "well", "went", "weren", "werent", "wert",
"what'll", "what's", "whatever", "whats", "when's", "whence", "whencesoever", "whenever", "where's", "whereafter",
"whereas", "whereby", "wherein", "wheres", "whereupon", "wherever", "whether", "whichever", "whichsoever", "whilst",
"whim", "whither", "who'll", "who's", "whod", "whoever", "whole", "whomever", "whore", "whos", "whose", "whoso",
"whosoever", "why's", "widely", "willing", "wish", "within", "without", "wonder", "wont", "words", "world", "would",
"wouldn't", "wouldnt", "wouldst", "www", "x", "ye", "yes", "yet", "you'd", "you're", "youd", "youre", "yourself", "z", "zero"
]
stop_words_extended = list(
set(stop_words_extended + punctuation + list(stopwords.words('english'))))
###Output
_____no_output_____
###Markdown
--- Load and clean the 1.6M tweets data
###Code
tweets_df = pd.read_csv('Data/tweets.csv', encoding="ISO-8859-1",
names=["sentiment", "tweet_id", "created_at", "query", "username", "text"])
def convert_date(date):
return datetime.strptime(date.replace(' PDT', ''), "%a %b %d %H:%M:%S %Y")
tweets_df['created_at'] = tweets_df['created_at'].apply(convert_date)
tweets_df = tweets_df.sort_values(
["username", "created_at"]).reset_index(drop=True)
user_tweet_counts=tweets_df[['tweet_id', 'username', 'created_at']].groupby(['username']).agg('count').reset_index()
users_50 = list(user_tweet_counts['username'][user_tweet_counts['tweet_id']>=50])
users_70 = list(user_tweet_counts['username'][user_tweet_counts['tweet_id']>=70])
users_100 = list(user_tweet_counts['username'][user_tweet_counts['tweet_id']>=100])
def user_tweets_50(username):
if username in users_50:
return 1
else:
return 0
def user_tweets_70(username):
if username in users_70:
return 1
else:
return 0
def user_tweets_100(username):
if username in users_100:
return 1
else:
return 0
def user_tweets_180(username):
if username in users_180:
return 1
else:
return 0
tweets_df['_50'] = tweets_df['username'].apply(user_tweets_50)
tweets_df['_70'] = tweets_df['username'].apply(user_tweets_70)
tweets_df['_100'] = tweets_df['username'].apply(user_tweets_100)
tweets_df=tweets_df.drop_duplicates()
###Output
_____no_output_____
###Markdown
*** Pre-process tweets by filtering the text and recording the sentiments of each tweet
###Code
analyzer = SentimentIntensityAnalyzer()
def deEmojify(text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
u"\U00002500-\U00002BEF"
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f"
u"\u3030"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', text)
def de_abbreviate(token):
if token == 'u':
return 'you'
if token == 'r':
return 'are'
if token == 'some1':
return 'someone'
if token == 'yrs':
return 'years'
if token == 'hrs':
return 'hours'
if token == 'mins':
return 'minutes'
if token == 'secs':
return 'seconds'
if token == 'pls' or token == 'plz':
return 'please'
if token == '2morow' or token == '2moro':
return 'tomorrow'
if token == '2day':
return 'today'
if token == '4got' or token == '4gotten':
return 'forget'
if token in ['hahah', 'hahaha', 'hahahaha']:
return 'haha'
if token == "mother's":
return "mother"
if token == "mom's":
return "mom"
if token == "dad's":
return "dad"
if token == 'bday' or token == 'b-day':
return 'birthday'
if token in ["i'm", "don't", "can't", "couldn't", "aren't", "wouldn't", "isn't", "didn't", "hadn't",
"doesn't", "won't", "haven't", "wasn't", "hasn't", "shouldn't", "ain't", "they've"]:
return token.replace("'", "")
if token in ['lmao', 'lolz', 'rofl']:
return 'lol'
if token == '<3':
return 'love'
if token == 'thanx' or token == 'thnx':
return 'thanks'
if token == 'goood':
return 'good'
if token in ['amp', 'quot', 'lt', 'gt', '½25', '..', '. .', '. . .', '...']:
return ' '
else:
return token
def de_slang(tweet):
tweet = tweet.replace("idk", "i dont know")
tweet = tweet.replace("i'll", "i will")
tweet = tweet.replace("you'll", "you will")
tweet = tweet.replace("we'll", "we will")
tweet = tweet.replace("it'll", "it will")
tweet = tweet.replace("it's", "it is")
tweet = tweet.replace("i've", "i have")
tweet = tweet.replace("you've", "you have")
tweet = tweet.replace("we've", "we have")
tweet = tweet.replace("they've", "they have")
tweet = tweet.replace("you're", "you are")
tweet = tweet.replace("we're", "we are")
tweet = tweet.replace("they're", "they are")
tweet = tweet.replace("let's", "let us")
tweet = tweet.replace("she's", "she is")
tweet = tweet.replace("he's", "he is")
tweet = tweet.replace("that's", "that is")
tweet = tweet.replace("i'd", "i would")
tweet = tweet.replace("you'd", "you would")
tweet = tweet.replace("there's", "there is")
tweet = tweet.replace("what's", "what is")
tweet = tweet.replace("how's", "how is")
tweet = tweet.replace("who's", "who is")
tweet = tweet.replace("y'all", "you all")
tweet = tweet.replace("ya'll", "you all")
return tweet
def preprocess_text(tweet):
# replace seeds (as phrases) to unigrams.
for seed in seed_terms_col:
if seed in tweet and " " in seed:
tweet = tweet.replace(seed, seed.replace(" ", "_"))
# remove retweet handler
if tweet[:2] == "RT":
tweet = tweet[tweet.index(":") + 2:]
# remove url from tweet
tweet = re.sub(
r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', tweet)
# remove short notations
tweet = de_slang(tweet)
# remove non-ascii characters
tweet = ''.join((filter(lambda x: x in printable, tweet)))
# additional preprocessing
tweet = tweet.replace("\n", " ").replace(" https", "").replace("http", "")
# remove all mentions
mentions = re.findall(r"@\w+", tweet)
for mention in mentions:
tweet = tweet.replace(mention, "")
# clean usernames and hashtags
for term in re.findall(r"#\w+", tweet):
# remove any punctuations from the hashtag and mention
token = term[1:].translate(str.maketrans('', '', string.punctuation))
segments = ' '.join(segment(token))
tweet = tweet.replace(term, segments)
# remove all punctuations
tweet = re.sub(r"""
["""+"".join(punctuation)+"""]+
""",
" ",
tweet, flags=re.VERBOSE)
# remove trailing spaces
tweet = tweet.strip()
# remove numbers
tweet = re.sub(r'[\d-]+', 'NUM', tweet)
# pad NUM with spaces
tweet = tweet.replace("NUM", " NUM ")
# remove emoticons
tweet = deEmojify(tweet)
# remove all stop words or emojis
tweet = " ".join([de_abbreviate(word.lower()) for word in tweet_token.tokenize(tweet) if word.lower(
) not in stop_words_extended and word.lower() not in emojies and len(word) > 1])
# remove multiple spaces
tweet = re.sub(' +', ' ', tweet)
return tweet
def preprocess(tweets):
processed_tweets = []
for index, tweet in tqdm.tqdm(tweets.iterrows()):
cleaned_text = preprocess_text(tweet['text'])
sent_score = TextBlob(tweet['text']).sentiment.polarity
vader_compound_score = analyzer.polarity_scores(tweet['text'])[
'compound']
vader_positive_score = analyzer.polarity_scores(tweet['text'])['pos']
vader_negative_score = analyzer.polarity_scores(tweet['text'])['neg']
vader_neutral_score = analyzer.polarity_scores(tweet['text'])['neu']
sent_score_2 = TextBlob(cleaned_text).sentiment.polarity
vader_compound_score_2 = analyzer.polarity_scores(cleaned_text)[
'compound']
vader_positive_score_2 = analyzer.polarity_scores(cleaned_text)['pos']
vader_negative_score_2 = analyzer.polarity_scores(cleaned_text)['neg']
vader_neutral_score_2 = analyzer.polarity_scores(cleaned_text)['neu']
processed_tweets.append([tweet['tweet_id'], tweet['created_at'], tweet['text'], cleaned_text, sent_score, vader_compound_score, vader_positive_score,
vader_neutral_score, vader_negative_score, sent_score_2, vader_compound_score_2, vader_positive_score_2, vader_neutral_score_2, vader_negative_score_2])
return pd.DataFrame(processed_tweets, columns=['tweet_id', 'created_at', 'text', 'cleaned_text', 'polarity_raw', 'vader_compound_raw', 'vader_pos_raw',
'vader_neu_raw', 'vader_neg_raw', 'polarity_cleaned', 'vader_compound_cleaned', 'vader_pos_cleaned', 'vader_neu_cleaned', 'vader_neg_cleaned'])
preprocessed_tweets = preprocess(tweets_df[["tweet_id", "created_at", "text"]])
###Output
6838it [00:14, 456.47it/s]
###Markdown
*** Merge the tweets to get the usernames, and filter for tweets count
###Code
preprocessed_tweets=pd.merge(preprocessed_tweets, tweets_df[["tweet_id","created_at","username","_50","_70", "_100"]], on=["tweet_id",'created_at'])
preprocessed_tweets=preprocessed_tweets.drop_duplicates()
preprocessed_tweets = preprocessed_tweets.sort_values(["username", "created_at"]).reset_index(drop=True)
preprocessed_tweets.to_csv('Data/tweets_cleaned.csv', header=True, index=False)
###Output
_____no_output_____ |
day03/additional materials/5.2 Multi-Modal Networks.ipynb | ###Markdown
Quick Intro to Keras Functional API Preamble: All models (layers) are callables ```pythonfrom keras.layers import Input, Densefrom keras.models import Model this returns a tensorinputs = Input(shape=(784,)) a layer instance is callable on a tensor, and returns a tensorx = Dense(64, activation='relu')(inputs)x = Dense(64, activation='relu')(x)predictions = Dense(10, activation='softmax')(x) this creates a model that includes the Input layer and three Dense layersmodel = Model(input=inputs, output=predictions)model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])model.fit(data, labels) starts training``` Multi-Input Networks Keras Merge Layer Here's a good use case for the functional API: models with multiple inputs and outputs. The functional API makes it easy to manipulate a large number of intertwined datastreams.Let's consider the following model. ```pythonfrom keras.layers import Dense, Inputfrom keras.models import Modelfrom keras.layers.merge import concatenateleft_input = Input(shape=(784, ), name='left_input')left_branch = Dense(32, input_dim=784, name='left_branch')(left_input)right_input = Input(shape=(784,), name='right_input')right_branch = Dense(32, input_dim=784, name='right_branch')(right_input)x = concatenate([left_branch, right_branch])predictions = Dense(10, activation='softmax', name='main_output')(x)model = Model(inputs=[left_input, right_input], outputs=predictions)``` Resulting Model will look like the following network: Such a two-branch model can then be trained via e.g.: ```pythonmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])model.fit([input_data_1, input_data_2], targets) we pass one data array per model input``` Try yourself Step 1: Get Data - MNIST
###Code
# let's load MNIST data as we did in the exercise on MNIST with FC Nets
# %load ../solutions/sol_52.py
###Output
_____no_output_____
###Markdown
Step 2: Create the Multi-Input Network
###Code
## try yourself
## `evaluate` the model on test data
###Output
_____no_output_____ |
Big-Data-Clusters/CU6/Public/content/repair/tsg048-create-stuck-waiting-for-controller.ipynb | ###Markdown
TSG048 - Deployment stuck at “Waiting for controller pod to be up”==================================================================Description-----------Troubleshooting for the situation where running azdata create clusterhangs at: Waiting for controller pod to be up…Steps----- Instantiate Kubernetes client
###Code
# Instantiate the Python Kubernetes client into 'api' variable
import os
try:
from kubernetes import client, config
from kubernetes.stream import stream
if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ:
config.load_incluster_config()
else:
try:
config.load_kube_config()
except:
display(Markdown(f'HINT: Use [TSG118 - Configure Kubernetes config](../repair/tsg118-configure-kube-config.ipynb) to resolve this issue.'))
raise
api = client.CoreV1Api()
print('Kubernetes client instantiated')
except ImportError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))
raise
###Output
_____no_output_____
###Markdown
Get the namespace for the big data clusterGet the namespace of the Big Data Cluster from the Kuberenetes API.**NOTE:**If there is more than one Big Data Cluster in the target Kubernetescluster, then either:- set \[0\] to the correct value for the big data cluster.- set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio.
###Code
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name
except IndexError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print('The kubernetes namespace for your big data cluster is: ' + namespace)
###Output
_____no_output_____
###Markdown
Get the name of controller pod that has the “couldn’t parse image reference problem”
###Code
label_selector = 'app=controller'
name=api.list_namespaced_pod(namespace, label_selector=label_selector).items[0].metadata.name
print ("Controller pod name: " + name)
###Output
_____no_output_____
###Markdown
Set the text to look for in pod events
###Code
kind="Pod"
precondition_text="couldn't parse image reference"
###Output
_____no_output_____
###Markdown
Get events for a kubernetes resourcesGet the events for a kubernetes named space resource:
###Code
V1EventList=api.list_namespaced_event(namespace)
for event in V1EventList.items:
if (event.involved_object.kind==kind and event.involved_object.name==name):
print(event.message)
###Output
_____no_output_____
###Markdown
PRECONDITION CHECK
###Code
precondition=False
for event in V1EventList.items:
if (event.involved_object.kind==kind and event.involved_object.name==name):
if event.message.find(precondition_text) != -1:
precondition=True
if not precondition:
raise Exception("PRECONDITION NON-MATCH: 'tsg048-create-stuck-waiting-for-controller' is not a match for an active problem")
print("PRECONDITION MATCH: 'tsg048-create-stuck-waiting-for-controller' is a match for an active problem in this cluster")
###Output
_____no_output_____
###Markdown
Resolution----------To resolve this problem fix the docker repository name and run clustercreate again.
###Code
print('Notebook execution complete.')
###Output
_____no_output_____ |
rr_lyrae_dist.ipynb | ###Markdown
Derive the distance of M4 globular cluster using the period-luminosity relation from RR-Lyrae starsI will use the well-known relation between the period and luminosity of 37 RR-Lyrae stars found in the M4 globular cluster to estimate its distance. I'm using the Neely et al. (2015) Spitzer Space Telescope observations from the http://vizier.u-strasbg.fr/viz-bin/VizieR?-source=J/ApJ/808/11 repository.
###Code
# Importing libraries
import numpy as np
from astropy.table import Table
import matplotlib.pyplot as plt
%matplotlib inline
# Reading input files downloaded from CDS
readme = 'data/ReadMe.txt'
rr_lyrae_data = Table.read('data/table2.dat', readme=readme, format='cds')
# print(rr_lyrae_data.columns)
# Selecting the data
rr_lyrae = rr_lyrae_data[np.where(rr_lyrae_data['Mode'] == 'RRab')] # We will only need the RRab data
# the authors also mention that data for sources V20 and V21 are not good due to blending
rr_lyrae = rr_lyrae[np.where(rr_lyrae['ID'] != 'V20')]
rr_lyrae = rr_lyrae[np.where(rr_lyrae['ID'] != 'V21')]
# Plotting and fitting the data-sets
fig, ax = plt.subplots(1,2,figsize=(8,4))
for i in np.arange(2):
ax[i].set_xlim(-0.15,0.23)
ax[i].set_ylim(11.3,10.3)
ax[i].set_xticks([-0.1,0.0,0.1,0.2])
ax[0].set_xlabel('Log Period (days) + 0.26') # log(P) = -0.26 is a representative number of the mean period of the RRab
ax[1].set_xlabel('Log Period (days) + 0.26')
ax[0].set_ylabel('IRAC [3.6]')
ax[1].set_ylabel('IRAC [4.5]')
# Scatter plots
ax[0].errorbar(rr_lyrae['logP']+0.26,rr_lyrae['[3.6]'],yerr=rr_lyrae['e_[3.6]'],fmt='o',marker='.',color='k')
ax[1].errorbar(rr_lyrae['logP']+0.26,rr_lyrae['[4.5]'],yerr=rr_lyrae['e_[4.5]'],fmt='o',marker='.',color='k')
# Line fitting
lin1 = np.polyfit(rr_lyrae['logP']+0.26,rr_lyrae['[3.6]'],1)
lin2 = np.polyfit(rr_lyrae['logP']+0.26,rr_lyrae['[4.5]'],1)
# Line plots
x = [-0.15,0.23]
y1 = np.array([lin1[0]])*x + np.array([lin1[1]])
y2 = np.array([lin2[0]])*x + np.array([lin2[1]])
ax[0].plot(x,y1,color='r')
ax[1].plot(x,y2,color='r')
plt.tight_layout()
###Output
_____no_output_____
###Markdown
The two relations are:
###Code
print('m_3.6 = ' + str(np.around(lin1[0], decimals=2)) + ' (log P + 0.26) + ' + str(np.around(lin1[1], decimals=2)))
print('m_4.5 = ' + str(np.around(lin2[0], decimals=2)) + ' (log P + 0.26) + ' + str(np.around(lin2[1], decimals=2)))
###Output
m_3.6 = -2.31 (log P + 0.26) + 10.91
m_4.5 = -2.3 (log P + 0.26) + 10.86
###Markdown
whereas the corresponding relations for the RR-Lyrae stars in our Galaxy are:$$M_{[3.6]} = -2.19\ {\rm log}\ P - 1.176$$$$M_{[4.5]} = -2.12\ {\rm log}\ P - 1.199$$therefore the distance modulus (https://en.wikipedia.org/wiki/Distance_modulus) is given by:$$\mu_{[3.6]} = \left( m_{[3.6]} - M_{[3.6]} \right) =$$
###Code
print(10.91+1.176)
###Output
12.086
###Markdown
and $$\mu_{[4.5]} = \left( m_{[4.5]} - M_{[4.5]} \right) =$$
###Code
print(10.86+1.199)
###Output
12.059
|
mltrain-nips-2017/yang_shi/vqa-mxnet-gluon/VQA-gluon.ipynb | ###Markdown
Visual Question Answering in MXNET Gluon This is a notebook for implementing visual question answering using MXNET Gluon.This notebook is based on:"Multimodal Compact Bilinear Pooling for Visual Question Answering and Visual Grounding", Akira Fukui, Dong Huk Park, Daylen Yang, Anna Rohrbach, Trevor Darrell and Marcus Rohrbach, EMNLP 2016 Table of Content:* [VQA Task](vqatask)* [VQA Dataset](dataset)* [Feature Extraction](feature)* [Pooling methods](pooling)* [Models](models)* [Data IO](dataio)* [Training](train)* [Testing](test) VQA Task Visual question answering(VQA) is a task focusing on providing a natural language answer given any image and any open-ended question. This task requires a deep understanding and reasoning of the image combined with the question: a joint representation of both visual and textual input. ![](img/pizza.png ) VQA Dataset __VQA dataset v1__ is first released by Antol et. al in this paper. Based on the type of images: real images and abstract scenes, they have two different dataset. However, due to the inherent structure in our world and bias in language, the learning process is biased. In other words, a specific question tends to have a specific answer regardless of the image. Thus, Goyal et. al. release VQA dataset v2. They prepare similar images with same questions but leads to different answers.Johnson et. al. introduce Compositional Language and Elementary Visual Reasoning(__CLEVR__) diagnostic dataset which focuses more on reasoning. Strub et. al. propose a __two-player guessing game__: guess a target in a given image with a sequential questions and answers. This requires both visual question answering and spatial reasoning.The Task Driven Image Understanding Challenge dataset(__TDIUC__) contains over 1.6 million questions organized into 12 different categories. It contains images and annotations from MSCOCO and Visual genome. The key differences between TDIUC and VQA v1/v2 dataset are: (1)Categorized questions: Each question belongs to one of the 12 categories. This helps us have a task-oriented evaluation. Besides simple accuracy, we can also calculate arithmetic and harmonic means across all per question-type accuracies. (2)Absurd questions: Questions that are totally irrelevant to the image. In this way, it force an algorithm to determine if a question is valid. This also helps to balance the dataset. In this notebook, we will use VQA dataser v1 for demostration. You can download the VQA1.0 dataset from VQA website. You need to preprocess the data:(1) Extract the samples from original json files. (2) Filter the samples giving top k answers(k can be 1000, 2000...). This will make the prediction easier. Feature Extraction Usually people use pretrained models to extract features from the image and question.__Image pretrained model__: VGG: A key aspect of VGG was to use many convolutional blocks with relatively narrow kernels, followed by a max-pooling step and to repeat this block multiple times. Resnet: It is a residual learning framework to ease the training of networks that are substantially deep. It reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions.__Question pretrained model__: Word2Vec: The word2vec tool takes a text corpus as input and produces the word vectors as output. It first constructs a vocabulary from the training text data and then learns vector representation of words. The model contains 300-dimensional vectors for 3 million words and phrases. Glove: Similar to Word2Vec, it is a word embedding dataset. It contains 100/200/300-dimensional vectors for 2 million words.Skipthought: This is an encoder-decoder model that tries to reconstruct the surrounding sentences of an encoded passage. Sentences that share semantic and syntactic properties are thus mapped to similar vector representations. Different from the previous two model, this is a sentence based model.GNMT encoder: We propose using the encoder of google neural machine translation system to extract the question features. __We will discuss about how to extract the features here in details.__ Pooling Methods After we extract the features, information of each modality are then combined together, using concatenation, element-wise product or sum operations.Pooling methods are widely used in visual tasks to combine information for various streams into one final feature representation. It inherently can be applied to VQA task. Common pooling methods are average pooling and bilinear pooling. Bilinear pooling requires taking the outer product between two features. However the high dimensional result makes it hard to be applied to huge real image dataset. Thus, different compact pooling methods are proposed to solve this problem.In the following notebook, we will illustrate two different models:(1) Concatenation (2) Multimodel compact bilinear pooling.__We will introduce attention model in this notebook.__ Models We define our model with MXNET Gluon. gluon.Block is the basic building block of models. If any operator is not defined under gluon, you can use mxnet.ndarray operators to subsititude.
###Code
from __future__ import print_function
import numpy as np
import mxnet as mx
import mxnet.ndarray as F
import mxnet.contrib.ndarray as C
import mxnet.gluon as gluon
from mxnet.gluon import nn
from mxnet import autograd
import bisect
from IPython.core.display import display, HTML
import logging
logging.basicConfig(level=logging.INFO)
import os
from mxnet.test_utils import download
import json
from IPython.display import HTML, display
# Some parameters we are going to use
batch_size = 64
#ctx = mx.gpu(1)
ctx = mx.cpu()
compute_size = batch_size
out_dim = 10000
gpus = 1
###Output
_____no_output_____
###Markdown
In the __first model__, we will concatenate the image and question features and use multilayer perception(MLP) to predict the answer.
###Code
class Net1(gluon.Block):
def __init__(self, **kwargs):
super(Net1, self).__init__(**kwargs)
with self.name_scope():
# layers created in name_scope will inherit name space
# from parent layer.
self.bn = nn.BatchNorm()
self.dropout = nn.Dropout(0.3)
self.fc1 = nn.Dense(8192,activation="relu")
self.fc2 = nn.Dense(1000)
def forward(self, x):
x1 = F.L2Normalization(x[0])
x2 = F.L2Normalization(x[1])
z = F.concat(x1,x2,dim=1)
z = self.fc1(z)
z = self.bn(z)
z = self.dropout(z)
z = self.fc2(z)
return z
###Output
_____no_output_____
###Markdown
In the __second model__, instead of linearly combine the image and text features, we use count sketch to estimate the outer product of the image and question features. It is also named as multimodel compact bilinear pooling(MCB). This method was proposed in Multimodal Compact Bilinear Pooling for VQA. Given a vector $a \in \mathcal{R}^n$, random hash function $h \in \mathcal{R}^n$: $[n] \to [b]$ and binary variable $s \in \mathcal{R}^n$: $[n] \to \pm 1$, the count sketch operator $\psi(a,h,s) \in \mathcal{R}^b$ is:$\quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \psi(a,h,s)[j] = \sum_{h[i] = j}s[i]a[i], \quad j \in {1,\cdots,b}$Let $x$ and $y$ be two separate feature vectors, and their bilinear pooling feature be $x \otimes y$, then:$\quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \psi(x \otimes y, h,s) = \psi(x,h,s) \star \psi(y,h,s)$where $\star$ is the convolution operator. This can further be simplified by using FFT properties: convolution in time domain equals to elementwise product in frequency domain. We show the procedure below: ![](img/mcb.png ) One improvement we made is adding ones vectors to each features before count sketch. The intuition is: given input vectors $x,y$, estimating outer product between $[x,1s]$ and $[y, 1s]$ gives us information more than just $x \otimes y$. It also contains information of $x$ and $y$.
###Code
class Net2(gluon.Block):
def __init__(self, **kwargs):
super(Net2, self).__init__(**kwargs)
with self.name_scope():
# layers created in name_scope will inherit name space
# from parent layer.
self.bn = nn.BatchNorm()
self.dropout = nn.Dropout(0.3)
self.fc1 = nn.Dense(8192,activation="relu")
self.fc2 = nn.Dense(1000)
def forward(self, x):
x1 = F.L2Normalization(x[0])
x2 = F.L2Normalization(x[1])
text_ones = F.ones((batch_size/gpus, 2048),ctx = ctx)
img_ones = F.ones((batch_size/gpus, 1024),ctx = ctx)
text_data = F.Concat(x1, text_ones,dim = 1)
image_data = F.Concat(x2,img_ones,dim = 1)
# Initialize hash tables
# Random seed to ensure we have same hash function for all samples
np.random.seed(0)
S1 = F.array(np.random.randint(0, 2, (1,3072))*2-1,ctx = ctx)
np.random.seed(0)
H1 = F.array(np.random.randint(0, out_dim,(1,3072)),ctx = ctx)
np.random.seed(1)
S2 = F.array(np.random.randint(0, 2, (1,3072))*2-1,ctx = ctx)
np.random.seed(1)
H2 = F.array(np.random.randint(0, out_dim,(1,3072)),ctx = ctx)
# Count sketch
cs1 = C.count_sketch( data = image_data, s=S1, h = H1 ,name='cs1',out_dim = out_dim)
cs2 = C.count_sketch( data = text_data, s=S2, h = H2 ,name='cs2',out_dim = out_dim)
fft1 = C.fft(data = cs1, name='fft1', compute_size = compute_size)
fft2 = C.fft(data = cs2, name='fft2', compute_size = compute_size)
c = fft1 * fft2
ifft1 = C.ifft(data = c, name='ifft1', compute_size = compute_size)
# MLP
z = self.fc1(ifft1)
z = self.bn(z)
z = self.dropout(z)
z = self.fc2(z)
return z
###Output
_____no_output_____
###Markdown
Data IO MXNET requires a data iterator to feed in the samples to the model.
###Code
from VQAtrainIter import VQAtrainIter
###Output
_____no_output_____
###Markdown
The function is defined in __VQAtrainIter.py__.The inputs of the data iterator are extracted image and question features. At each step, the data iterator will return a data batch list: question data batch and image data batch. We need to seperate the data batches by the length of the input data because the input questions are in different lengths. The $buckets$ parameter defines the max length you want to keep in the data iterator. Here since we already used pretrained model to extract the question feature, the question length is fixed as the output of the pretrained model.The $layout$ parameter defines the layout of the data iterator output. "N" specify where is the data batch dimension is.$reset()$ function is called after every epoch. $next()$ function is call after each batch. Here we will use subset of VQA v1 dataset in this tutorial. We extract the image feature from ResNet-152, text feature from GNMT encoder. We have 21537 training samples and 1044 validation samples in this tutorial. Image feature is a 2048-dim vector. Question feature is a 1024-dim vector.
###Code
# Download the dataset
dataset_files = {'train': ('train_question.npz','train_img.npz','train_ans.npz'),
'validation': ('val_question.npz','val_img.npz','val_ans.npz'),
'test':('test_question_id.npz','test_question.npz','test_img_id.npz','test_img.npz','atoi.json','test_question_txt.json')}
train_q, train_i, train_a = dataset_files['train']
val_q, val_i, val_a = dataset_files['validation']
url_format = 'https://apache-mxnet.s3-accelerate.amazonaws.com/gluon/dataset/VQA-notebook/{}'
if not os.path.exists(train_q):
logging.info('Downloading training dataset.')
download(url_format.format(train_q),overwrite=True)
download(url_format.format(train_i),overwrite=True)
download(url_format.format(train_a),overwrite=True)
if not os.path.exists(val_q):
logging.info('Downloading validation dataset.')
download(url_format.format(val_q),overwrite=True)
download(url_format.format(val_i),overwrite=True)
download(url_format.format(val_a),overwrite=True)
layout = 'NT'
bucket = [1024]
train_question = np.load(train_q)['x']
val_question = np.load(val_q)['x']
train_ans = np.load(train_a)['x']
val_ans = np.load(val_a)['x']
train_img = np.load(train_i)['x']
val_img = np.load(val_i)['x']
print("Total training sample:",train_ans.shape[0])
print("Total validation sample:",val_ans.shape[0])
data_train = VQAtrainIter(train_img, train_question, train_ans, batch_size, buckets = bucket,layout=layout)
data_eva = VQAtrainIter(val_img, val_question, val_ans, batch_size, buckets = bucket,layout=layout)
###Output
Total training sample: 21537
Total validation sample: 1044
WARNING: discarded 0 sentences longer than the largest bucket.
WARNING: discarded 0 sentences longer than the largest bucket.
###Markdown
Training We __initialize the parameters__, __define loss, evaluation metrics and optimizer__ as follows:
###Code
net = Net1()
#net = Net2()
net.collect_params().initialize(mx.init.Xavier(), ctx=ctx)
loss = gluon.loss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()
def evaluate_accuracy(data_iterator, net):
numerator = 0.
denominator = 0.
data_iterator.reset()
for i, batch in enumerate(data_iterator):
with autograd.record():
data1 = batch.data[0].as_in_context(ctx)
data2 = batch.data[1].as_in_context(ctx)
data = [data1,data2]
label = batch.label[0].as_in_context(ctx)
output = net(data)
metric.update([label], [output])
return metric.get()[1]
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})
###Output
_____no_output_____
###Markdown
After the initializations, we can start the training. It takes one batch a time. After we run over all batches(one epoch), we calculate the training/validation accuracy and save the network parameters if the validation accuracy is higher than the previous one.
###Code
epochs = 10
moving_loss = 0.
best_eva = 0
for e in range(epochs):
data_train.reset()
for i, batch in enumerate(data_train):
data1 = batch.data[0].as_in_context(ctx)
data2 = batch.data[1].as_in_context(ctx)
data = [data1,data2]
label = batch.label[0].as_in_context(ctx)
with autograd.record():
output = net(data)
cross_entropy = loss(output, label)
cross_entropy.backward()
trainer.step(data[0].shape[0])
##########################
# Keep a moving average of the losses
##########################
if i == 0:
moving_loss = np.mean(cross_entropy.asnumpy()[0])
else:
moving_loss = .99 * moving_loss + .01 * np.mean(cross_entropy.asnumpy()[0])
#if i % 200 == 0:
# print("Epoch %s, batch %s. Moving avg of loss: %s" % (e, i, moving_loss))
eva_accuracy = evaluate_accuracy(data_eva, net)
train_accuracy = evaluate_accuracy(data_train, net)
print("Epoch %s. Loss: %s, Train_acc %s, Eval_acc %s" % (e, moving_loss, train_accuracy, eva_accuracy))
if eva_accuracy > best_eva:
best_eva = eva_accuracy
logging.info('Best validation acc found. Checkpointing...')
net.save_params('vqa-mlp-%d.params'%(e))
###Output
INFO:root:Best validation acc found. Checkpointing...
###Markdown
Testing After the training loop over Net1 or Net2, we can try it on test data. Here we have 10 test samples.
###Code
test = True
if test:
test_q_id, test_q, test_i_id, test_i, atoi,text = dataset_files['test']
if test and not os.path.exists(test_q):
logging.info('Downloading test dataset.')
download(url_format.format(test_q_id),overwrite=True)
download(url_format.format(test_q),overwrite=True)
download(url_format.format(test_i_id),overwrite=True)
download(url_format.format(test_i),overwrite=True)
download(url_format.format(atoi),overwrite=True)
download(url_format.format(text),overwrite=True)
if test:
test_question = np.load("test_question.npz")['x']
test_img = np.load("test_img.npz")['x']
test_question_id = np.load("test_question_id.npz")['x']
test_img_id = np.load("test_img_id.npz")['x']
#atoi = np.load("atoi.json")['x']
###Output
_____no_output_____
###Markdown
We pass the test data iterator to the trained model. The output is a list of predicted answers.
###Code
data_test = VQAtrainIter(test_img, test_question, np.zeros((test_img.shape[0],1)), 10, buckets = bucket,layout=layout)
for i, batch in enumerate(data_test):
with autograd.record():
data1 = batch.data[0].as_in_context(ctx)
data2 = batch.data[1].as_in_context(ctx)
data = [data1,data2]
#label = batch.label[0].as_in_context(ctx)
#label_one_hot = nd.one_hot(label, 10)
output = net(data)
output = np.argmax(output.asnumpy(), axis = 1)
###Output
WARNING: discarded 0 sentences longer than the largest bucket.
###Markdown
We randomly select one sample to show the testing result.
###Code
idx = np.random.randint(10)
question = json.load(open(text))
print("Question:", question[idx])
image_name = 'COCO_test2015_' + str(int(test_img_id[idx])).zfill(12)+'.jpg'
if not os.path.exists(image_name):
logging.info('Downloading training dataset.')
download(url_format.format('test_images/'+image_name),overwrite=True)
from IPython.display import Image
print("Image:")
Image(filename=image_name)
dataset = json.load(open('atoi.json'))
ans = dataset['ix_to_ans'][str(output[idx]+1)]
print("Answer:", ans)
###Output
Answer: no
|
eval_custom.ipynb | ###Markdown
Evaluation using CREPE and Gentle Forced Aligner
###Code
# Dependencies
!pip install librosa
!pip install tensorflow==2.6.0
!pip install keras==2.6.0
!pip install crepe
!pip install scipy --upgrade
!pip install h5py
!pip install dtw-python
!pip install mido
# Things used so far:
"""
CREPE - Deep Learning Pitch Detector
Torchcrepe - Implementation of CREPE in Pytorch
MIR-QBSH - Dataset to train pitch detector NN
Gentle Forced Aligner - Forced Alignment
Mido - Reading MIDI Files
dtw-python - Dynamic Time Warping to join CREPE output with MIDI file
"""
# Imports
import os
import pickle
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.io import wavfile
from mido import MidiFile
from dtw import *
import sys
sys.path.append('/home/azureuser/cloudfiles/code/Users/cl43/torchcrepe')
import torchcrepe
from torchcrepe.predict_custom import predict as predict_custom
from torchcrepe.predict_custom import load_audio
###Output
Requirement already satisfied: librosa in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (0.8.1)
Requirement already satisfied: scipy>=1.0.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from librosa) (1.5.4)
Requirement already satisfied: pooch>=1.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from librosa) (1.5.2)
Requirement already satisfied: audioread>=2.0.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from librosa) (2.1.9)
Requirement already satisfied: scikit-learn!=0.19.0,>=0.14.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from librosa) (0.22.2.post1)
Requirement already satisfied: joblib>=0.14 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from librosa) (0.14.1)
Requirement already satisfied: packaging>=20.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from librosa) (21.0)
Requirement already satisfied: decorator>=3.0.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from librosa) (5.0.9)
Requirement already satisfied: resampy>=0.2.2 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from librosa) (0.2.2)
Requirement already satisfied: soundfile>=0.10.2 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from librosa) (0.10.3.post1)
Requirement already satisfied: numpy>=1.15.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from librosa) (1.19.5)
Requirement already satisfied: numba>=0.43.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from librosa) (0.53.1)
Requirement already satisfied: requests in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from pooch>=1.0->librosa) (2.26.0)
Requirement already satisfied: appdirs in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from pooch>=1.0->librosa) (1.4.4)
Requirement already satisfied: pyparsing>=2.0.2 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from packaging>=20.0->librosa) (2.4.7)
Requirement already satisfied: six>=1.3 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from resampy>=0.2.2->librosa) (1.15.0)
Requirement already satisfied: cffi>=1.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from soundfile>=0.10.2->librosa) (1.14.6)
Requirement already satisfied: llvmlite<0.37,>=0.36.0rc1 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from numba>=0.43.0->librosa) (0.36.0)
Requirement already satisfied: setuptools in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from numba>=0.43.0->librosa) (50.3.0)
Requirement already satisfied: charset-normalizer~=2.0.0; python_version >= "3" in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from requests->pooch>=1.0->librosa) (2.0.4)
Requirement already satisfied: certifi>=2017.4.17 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from requests->pooch>=1.0->librosa) (2021.5.30)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from requests->pooch>=1.0->librosa) (1.25.11)
Requirement already satisfied: idna<4,>=2.5; python_version >= "3" in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from requests->pooch>=1.0->librosa) (3.2)
Requirement already satisfied: pycparser in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from cffi>=1.0->soundfile>=0.10.2->librosa) (2.20)
Requirement already satisfied: tensorflow==2.6.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (2.6.0)
Requirement already satisfied: clang~=5.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (5.0)
Requirement already satisfied: protobuf>=3.9.2 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (3.17.3)
Requirement already satisfied: wrapt~=1.12.1 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (1.12.1)
Requirement already satisfied: keras-preprocessing~=1.1.2 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (1.1.2)
Requirement already satisfied: absl-py~=0.10 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (0.13.0)
Requirement already satisfied: grpcio<2.0,>=1.37.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (1.39.0)
Requirement already satisfied: six~=1.15.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (1.15.0)
Requirement already satisfied: google-pasta~=0.2 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (0.2.0)
Requirement already satisfied: h5py~=3.1.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (3.1.0)
Requirement already satisfied: tensorflow-estimator~=2.6 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (2.7.0)
Requirement already satisfied: astunparse~=1.6.3 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (1.6.3)
Requirement already satisfied: wheel~=0.35 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (0.35.1)
Requirement already satisfied: numpy~=1.19.2 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (1.19.5)
Requirement already satisfied: keras~=2.6 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (2.6.0)
Requirement already satisfied: typing-extensions~=3.7.4 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (3.7.4.3)
Requirement already satisfied: flatbuffers~=1.12.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (1.12)
Requirement already satisfied: gast==0.4.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (0.4.0)
Requirement already satisfied: opt-einsum~=3.3.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (3.3.0)
Requirement already satisfied: tensorboard~=2.6 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (2.7.0)
Requirement already satisfied: termcolor~=1.1.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorflow==2.6.0) (1.1.0)
Requirement already satisfied: cached-property; python_version < "3.8" in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from h5py~=3.1.0->tensorflow==2.6.0) (1.5.2)
Requirement already satisfied: google-auth<3,>=1.6.3 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorboard~=2.6->tensorflow==2.6.0) (1.35.0)
Requirement already satisfied: markdown>=2.6.8 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorboard~=2.6->tensorflow==2.6.0) (3.3.4)
Requirement already satisfied: requests<3,>=2.21.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorboard~=2.6->tensorflow==2.6.0) (2.26.0)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorboard~=2.6->tensorflow==2.6.0) (0.4.6)
Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorboard~=2.6->tensorflow==2.6.0) (1.8.0)
Requirement already satisfied: werkzeug>=0.11.15 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorboard~=2.6->tensorflow==2.6.0) (1.0.1)
Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorboard~=2.6->tensorflow==2.6.0) (0.6.1)
Requirement already satisfied: setuptools>=41.0.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from tensorboard~=2.6->tensorflow==2.6.0) (50.3.0)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow==2.6.0) (0.2.8)
Requirement already satisfied: rsa<5,>=3.1.4; python_version >= "3.6" in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow==2.6.0) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow==2.6.0) (4.2.2)
Requirement already satisfied: importlib-metadata; python_version < "3.8" in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from markdown>=2.6.8->tensorboard~=2.6->tensorflow==2.6.0) (4.8.1)
Requirement already satisfied: charset-normalizer~=2.0.0; python_version >= "3" in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow==2.6.0) (2.0.4)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow==2.6.0) (1.25.11)
Requirement already satisfied: idna<4,>=2.5; python_version >= "3" in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow==2.6.0) (3.2)
Requirement already satisfied: certifi>=2017.4.17 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow==2.6.0) (2021.5.30)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow==2.6.0) (1.3.0)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow==2.6.0) (0.4.8)
Requirement already satisfied: zipp>=0.5 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from importlib-metadata; python_version < "3.8"->markdown>=2.6.8->tensorboard~=2.6->tensorflow==2.6.0) (3.5.0)
Requirement already satisfied: oauthlib>=3.0.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow==2.6.0) (3.1.1)
Requirement already satisfied: keras==2.6.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (2.6.0)
Requirement already satisfied: crepe in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (0.0.12)
Requirement already satisfied: scipy>=1.0.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from crepe) (1.5.4)
Requirement already satisfied: numpy>=1.14.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from crepe) (1.19.5)
Requirement already satisfied: resampy<0.3.0,>=0.2.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from crepe) (0.2.2)
Requirement already satisfied: scikit-learn>=0.16 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from crepe) (0.22.2.post1)
Requirement already satisfied: imageio>=2.3.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from crepe) (2.9.0)
Requirement already satisfied: hmmlearn<0.3.0,>=0.2.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from crepe) (0.2.6)
Requirement already satisfied: matplotlib>=2.1.0 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from crepe) (3.2.1)
Requirement already satisfied: h5py in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from crepe) (3.1.0)
Requirement already satisfied: numba>=0.32 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from resampy<0.3.0,>=0.2.0->crepe) (0.53.1)
Requirement already satisfied: six>=1.3 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from resampy<0.3.0,>=0.2.0->crepe) (1.15.0)
Requirement already satisfied: joblib>=0.11 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from scikit-learn>=0.16->crepe) (0.14.1)
Requirement already satisfied: pillow in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from imageio>=2.3.0->crepe) (8.0.0)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from matplotlib>=2.1.0->crepe) (2.4.7)
Requirement already satisfied: cycler>=0.10 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from matplotlib>=2.1.0->crepe) (0.10.0)
Requirement already satisfied: python-dateutil>=2.1 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from matplotlib>=2.1.0->crepe) (2.8.2)
Requirement already satisfied: kiwisolver>=1.0.1 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from matplotlib>=2.1.0->crepe) (1.3.1)
Requirement already satisfied: cached-property; python_version < "3.8" in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from h5py->crepe) (1.5.2)
Requirement already satisfied: llvmlite<0.37,>=0.36.0rc1 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from numba>=0.32->resampy<0.3.0,>=0.2.0->crepe) (0.36.0)
Requirement already satisfied: setuptools in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from numba>=0.32->resampy<0.3.0,>=0.2.0->crepe) (50.3.0)
Requirement already up-to-date: scipy in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (1.5.4)
Requirement already satisfied, skipping upgrade: numpy>=1.14.5 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from scipy) (1.19.5)
Requirement already satisfied: h5py in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (3.1.0)
Requirement already satisfied: cached-property; python_version < "3.8" in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from h5py) (1.5.2)
Requirement already satisfied: numpy>=1.12; python_version == "3.6" in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from h5py) (1.19.5)
Requirement already satisfied: dtw-python in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (1.1.10)
Requirement already satisfied: numpy>=1.19 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from dtw-python) (1.19.5)
Requirement already satisfied: scipy>=1.1 in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (from dtw-python) (1.5.4)
Requirement already satisfied: mido in /anaconda/envs/azureml_py36/lib/python3.6/site-packages (1.2.10)
Importing the dtw module. When using in academic works please cite:
T. Giorgino. Computing and Visualizing Dynamic Time Warping Alignments in R: The dtw Package.
J. Stat. Soft., doi:10.18637/jss.v031.i07.
###Markdown
Configurables and Setup
###Code
# Combination methods for times from forced aligner and pitch detector
def cmb_average(fa_time, pd_time):
if fa_time is None or np.isnan(fa_time):
if pd_time is None or np.isnan(pd_time):
raise(Exception('Both times are None, cannot combine'))
return pd_time
if pd_time is None or np.isnan(pd_time):
return fa_time
return (fa_time + pd_time) / 2
# Average, but fall back to pitch detector time if forced aligner is far off
def cmb_average_threshold(thresh):
return lambda x,y : cmb_average_threshold_helper(x, y, thresh)
def cmb_average_threshold_helper(fa_time, pd_time, threshold):
if fa_time is None or np.isnan(fa_time):
if pd_time is None or np.isnan(pd_time):
raise(Exception('Both times are None, cannot combine'))
return pd_time
if pd_time is None or np.isnan(pd_time):
return fa_time
# Fall back to pitch detector time
if abs(fa_time - pd_time) > threshold:
return pd_time
return (fa_time + pd_time) / 2
# Same as cmb_average, but allow both to be None/NaN
def cmb_average_lenient(time1, time2):
if time1 is None or np.isnan(time1):
if time2 is None or np.isnan(time2):
return np.nan
return time2
if time2 is None or np.isnan(time2):
return time1
return (time1 + time2) / 2
# Configurables
# Data paths
data_path = 'data/test/CSD/wav/converted'
midi_path = 'data/test/CSD/mid'
lyric_note_path = 'data/test/CSD/lyric_mid'
ref_alignment_path = 'data/test/CSD/csv'
align_path = 'out/align'
pitch_path = 'out/pitch'
# Crepe options
model = 'large_full'
capacity = 'full'
# Meta-options
# go through en030b
num_files = 60 # How many data points to evaluate on, including skipped files? (Set to -1 or None to evaluate on all available files)
skip_files = ['en002a', 'en002b', 'en004a', 'en005a', 'en005b', 'en007a', 'en007b', 'en008a', 'en008b', # 6
'en009a', 'en009b', 'en010a', 'en010b', 'en011a', 'en012a', 'en012b', 'en013b', 'en014a', 'en014b', # 2
'en015a', 'en016a', 'en018a', 'en018b', 'en019b', 'en020a', 'en020b', 'en021a', 'en022a', 'en022b', 'en023a', 'en023b', 'en024a', # 7
'en025a', 'en025b', 'en026a', 'en026b', 'en027a', 'en027b' # 6 (including en028 and en029 and en030)
]
skip_files.append('en006a')
skip_files.append('en006b')
skip_files.append('en019a')
skip_files.append('en024b')
# en002 - Forced aligner output is completely off because of "Gloria" being almost 10 seconds long 6 times in the song
# en005 - Fairly bad performance similarly
# en007, en008 - Similar
# en004a, en011a - similar
# Try without bad files
# skip_files.append('en003b')
# skip_files.append('en017a')
# Forced aligner options
max_word_length = 2 # Maximum length of a given word, in seconds, for forced aligner
# Audio options
midi_stretch = 50 # For convenience of DTW viewing, stretch each note by this many times (1 for no stretching) - 50 is what I've been using
# Prefer false, but it seems to mess up plotting of the alignment (however, it seems to still be correct output-wise? idk why only the plot is messed up)
use_frequency = True # If true, will use raw frequencies (e.g. 440 Hz) rather than note values (i.e. MIDI values, where A4 = 69 and Bb4 = 70).
pd_offset = 0.005 # Offset for pitch detector times (using 0.005 for now because the increments are 0, 0.01, 0.02 so we will just use the middle of each time period)
# DTW options
start_end_constraint = True # Whether the pitch detector alignment forces first word to start at 0 seconds and last word to end at end of file. Note that if this is True, the pitch detector's output will be ignored for the start of the first word and end of last word
# Evaluation
misalign_threshold = 0.3 # Threshold at which a matching will be considered a misalignment
threshold = 1.5 # Threshold (in seconds) of how different forced aligner and pitch detector times can be before just using pitch detector (for when forced aligner is very off)
combine_func = cmb_average_threshold(threshold) # Method to combine forced aligner and pitch detector times
start_end_combine_func = cmb_average_lenient # Method to combine start and end times of a word for simpler evaluation
skip_pd_edges = True # Whether to skip the pitch detector start time for first word and end time for last word
# Read in wavfiles
raw_filenames = []
wavfiles = [] # (sample rate, audio data)
i = 0
for filename in os.listdir(data_path):
if filename.endswith(".wav"):
i += 1
# limit # files
if i > num_files and num_files > 0:
break
# Skip undesired files
if filename[:-4] in skip_files:
continue
# print(os.path.join(data_path, filename))
raw_filenames.append(filename[:-4])
audio, sr = load_audio(os.path.join(data_path, filename))
wavfiles.append((audio, sr))
# print(wavfiles[-1])
num_points = len(raw_filenames) # This should be the length of raw_filenames, as well as just about every other array relating to data
print('files:', raw_filenames)
# Read in MIDIs
midis = []
for filename in raw_filenames:
midis.append(MidiFile(os.path.join(midi_path, filename + '.mid')))
# For each midi, convert to a more useful format
midi_references = []
midi_timings = []
for i, mid in enumerate(midis):
track = mid.tracks[0]
if len(mid.tracks) > 1:
track = mid.tracks[1]
# Simply keep track of note after note
reference = []
timing = []
time = 0
for msg in track:
# For debugging
# if i == 0:
# print(msg)
if msg.type == 'note_on' or msg.type == 'note_off':
freq = msg.note
if use_frequency:
# Prefer not to convert frequency because we probably want values linear in note value, not exponential in note value (as frequencies are)
freq = 27.5 * 2 ** ((msg.note - 21) / 12)
time += msg.time
# add to arrays
for j in range(midi_stretch):
reference.append(freq)
timing.append(time)
elif msg.type != 'track_name' and msg.type != 'set_tempo' and msg.type != 'time_signature' and msg.type != 'end_of_track' and msg.type != 'key_signature':
# encountered a message type that may mean something?
print('[WARNING]', msg.type + ' encountered:', msg)
midi_references.append(reference)
midi_timings.append(timing)
# Fetch forced alignment info
word_starts = [] # Array of arrays of start times of words
word_ends = []
for filename in raw_filenames:
# Get JSON object
f = open(os.path.join(align_path, filename + '.json'),)
data = json.load(f)
# Iterate through the matches
starts = []
ends = []
x = 0
temp = 0
for match_obj in data['words']:
if match_obj['case'] == 'success':
if match_obj['end'] - match_obj['start'] <= max_word_length:
ends.append(match_obj['end'])
starts.append(match_obj['start'])
else:
starts.append(None)
ends.append(None)
# match_obj['word'] gets the word from transcription
else:
# Match not found
starts.append(None)
ends.append(None)
word_starts.append(starts)
word_ends.append(ends)
# Fetch word to note (index) mappings
word_start_notes = [] # array of arrays mapping word i to the index of the note which marks its start
for filename in raw_filenames:
csvfile = pd.read_csv(os.path.join(lyric_note_path, filename) + '.csv')
word_start_notes.append(csvfile['start_note'].to_numpy())
# Fetch word to reference start/end time mappings
target_starts = []
target_ends = []
for filename in raw_filenames:
csvfile = pd.read_csv(os.path.join(ref_alignment_path, filename) + '.csv')
starts = []
ends = []
curr_idx = 0
curr_word = csvfile['word'].iloc[curr_idx]
# Iterate word by word to get start and end times by word instead of syllable (todo: split by syllables)
while curr_idx < len(csvfile['word']):
starts.append(csvfile['start'].iloc[curr_idx])
while curr_idx < len(csvfile['word']) and csvfile['word'].iloc[curr_idx] == curr_word:
curr_idx += 1
ends.append(csvfile['end'].iloc[curr_idx - 1])
# Update current word
if curr_idx < len(csvfile['word']):
curr_word = csvfile['word'].iloc[curr_idx]
target_starts.append(np.array(starts))
target_ends.append(np.array(ends))
###Output
files: ['en001a', 'en001b', 'en003a', 'en003b', 'en004b', 'en011b', 'en013a', 'en015b', 'en016b', 'en017a', 'en017b', 'en021b', 'en028a', 'en028b', 'en029a', 'en029b', 'en030a', 'en030b']
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=63 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
[WARNING] control_change encountered: control_change channel=0 control=121 value=0 time=0
[WARNING] program_change encountered: program_change channel=0 program=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=7 value=100 time=0
[WARNING] control_change encountered: control_change channel=0 control=10 value=64 time=0
[WARNING] control_change encountered: control_change channel=0 control=91 value=0 time=0
[WARNING] control_change encountered: control_change channel=0 control=93 value=0 time=0
[WARNING] midi_port encountered: MetaMessage('midi_port', port=0, time=0)
###Markdown
Running and Loading Predictions
###Code
# Run/load crepe predictions
def save_obj(filename, objs):
with open(filename, 'wb') as outp:
for obj in objs:
pickle.dump(obj, outp, pickle.HIGHEST_PROTOCOL)
def read_obj(filename, num_to_read):
objs = []
with open(filename, 'rb') as inp:
for i in range(num_to_read):
objs.append(pickle.load(inp))
return objs
# Testing
# from torchcrepe.data import MirDataset
# from torchcrepe.predict_custom import infer
# import torch
# dataset = MirDataset(mini=True)
# point = dataset.__getitem__(150)
# print(infer(torch.unsqueeze(point[0].float(), dim=0), model=model))
# print(point[1])
# e.g. out/pitch/base_tiny
full_pitch_path = os.path.join(pitch_path, model)
times = []
frequencies = []
confidences = []
activations = []
# Run/load crepe predictions
for i, file_info in enumerate(wavfiles):
filename = os.path.join(full_pitch_path, raw_filenames[i] + '.pkl')
if os.path.exists(filename):
# Read cached prediction outputs
arr = read_obj(filename, 4)
time = arr[0]
frequency = arr[1]
confidence = arr[2]
activation = arr[3]
else:
print('running prediction on', filename + '. Should this be happening?')
# Run prediction and save output
sr = file_info[1]
audio = file_info[0]
# print(sr, audio)
# print(audio.detach().cpu().numpy().squeeze(0))
time, frequency, confidence, activation = predict_custom(audio, sr, model=model, decoder=torchcrepe.decode.weighted_argmax, capacity=capacity, special=None)
save_obj(filename, [time, frequency, confidence, activation]) # Uncomment to save predictions
# freq = 27.5 * 2 ** ((msg.note - 21) / 12)
# Convert frequency back to note -> note = 12 * log_2(freq / 27.5) + 21
if not use_frequency:
for idx in range(len(frequency)):
frequency[idx] = 12 * np.log2(frequency[idx] / 27.5) + 21
if not frequency[idx] >= 0:
print(frequency[idx])
times.append(time)
frequencies.append(frequency)
confidences.append(confidence)
activations.append(activation)
print(time[:5], frequency[:10], confidence, activation)
# print(frequency.shape)
# import torch
# print(torch.mean(frequency))
# Run Dynamic Time Warping on each pair of predictions and reference MIDI
alignments = []
for i in range(len(times)):
if raw_filenames[i] in skip_files:
alignments.append(None) # fill in empty slot in array
continue
query = frequencies[i]
# plt.plot(query)
# plt.show()
template = midi_references[i]
# print(query)
print(len(query))
print(len(template))
print('Running DTW on', raw_filenames[i], '(length of query, template:', str(len(query)) + ', ' + str(len(template)) + ')')
## Find the best match with the canonical recursion formula
alignment = dtw(query, template, keep_internals=True, open_end=not start_end_constraint, open_begin=not start_end_constraint)
alignments.append(alignment)
# Plotting the matching
if i == 0:
off = -1000 if use_frequency else -50
alignment.plot(type="twoway",offset=off)
# plt.plot(reference)
# plt.plot(alignment.index2,query[alignment.index1]) # query warped onto reference
###Output
3241
8600
Running DTW on en001a (length of query, template: 3241, 8600)
###Markdown
Evaluation
###Code
# Combine outputs
fa_errors = []
pd_errors = []
errors = []
fa_misalign = []
pd_misalign = []
misalign = []
# For individual investigation
fa_residual_starts = []
pd_residual_starts = []
residual_starts = []
fa_residual_ends = []
pd_residual_ends = []
residual_ends = []
fa_residual_cmbs = []
pd_residual_cmbs = []
residual_cmbs = []
for i in range(num_points):
if raw_filenames[i] in skip_files:
continue
# Convert DTW output into start and end times of each word
ref_lyric_note_alignment = word_start_notes[i]
# (reference) Start/end times, indexed by word index
ref_start_times = target_starts[i]
ref_end_times = target_ends[i]
num_words = len(ref_start_times) # number of words in sample
# (forced alignment) Start/end times, indexed by word index
fa_start_times = np.array(word_starts[i], dtype=np.float64)
fa_end_times = np.array(word_ends[i], dtype=np.float64)
# (reference) Start/end note indices (i.e. 0 if first note, 1 if second, etc), inclusive, indexed by word index
ref_start_notes = []
ref_end_notes = []
for j in range(len(ref_lyric_note_alignment)):
ref_start_notes.append(ref_lyric_note_alignment[j])
ref_end_notes.append(ref_lyric_note_alignment[j+1] - 1 if j < len(ref_lyric_note_alignment) - 1 else -1) # -1 if last note
# (pitch detector) alignment (index1 is indices in x, where x is query and y is reference)
pd_alignment = alignments[i]
# (pitch detector) times, indexed by query notes
pd_times = times[i]
query_indices = pd_alignment.index1
reference_indices = pd_alignment.index2
# Map alignment times to words
curr_ref_idx = 0 # current index in reference_indices
pd_start_times = [] # start times of each word, according to pitch detector
pd_end_times = [] # end times of each word, according to pitch detector
for word_idx in range(num_words):
# Get the corresponding query index and plug it into the note times
pd_start_times.append(pd_times[query_indices[curr_ref_idx]] + pd_offset)
# go until the last note that is associated with this word (scale up by midi_stretch because we streteched each reference note by that much, scale up by 2 because each note appears twice)
last_note = (ref_end_notes[word_idx] + 1) * 2 * midi_stretch - 1 if ref_end_notes[word_idx] >= 0 else reference_indices[-1]
while curr_ref_idx < len(reference_indices) and reference_indices[curr_ref_idx] <= last_note:
curr_ref_idx += 1
# Get the corresponding query index (one before where we stopped) and plug it into the note times
pd_end_times.append(pd_times[query_indices[curr_ref_idx - 1]] + pd_offset)
pd_start_times = np.array(pd_start_times)
pd_end_times = np.array(pd_end_times)
# Combine forced aligner and pitch detector times
pred_start_times = np.array([combine_func(fa_start_times[j], pd_start_times[j]) for j in range(num_words)])
pred_end_times = np.array([combine_func(fa_end_times[j], pd_end_times[j]) for j in range(num_words)])
if start_end_constraint:
if fa_start_times[0] is not None:
pred_start_times[0] = fa_start_times[0]
if fa_end_times[-1] is not None:
pred_end_times[-1] = fa_end_times[-1]
# Combined times ("average" of start and end times of each word)
fa_cmb_times = np.array([start_end_combine_func(fa_start_times[j], fa_end_times[j]) for j in range(num_words)])
pd_cmb_times = np.array([start_end_combine_func(pd_start_times[j], pd_end_times[j]) for j in range(num_words)])
pred_cmb_times = np.array([start_end_combine_func(pred_start_times[j], pred_end_times[j]) for j in range(num_words)])
ref_cmb_times = np.array([start_end_combine_func(ref_start_times[j], ref_end_times[j]) for j in range(num_words)])
# Evaluate outputs!
pd_residual_start_times = pd_start_times - ref_start_times
pd_residual_end_times = pd_end_times - ref_end_times
fa_residual_start_times = fa_start_times - ref_start_times
fa_residual_end_times = fa_end_times - ref_end_times
residual_start_times = pred_start_times - ref_start_times
residual_end_times = pred_end_times - ref_end_times
# plt.plot(residual_start_times)
# plt.plot(pd_residual_start_times)
# plt.plot(fa_residual_start_times)
fa_residual_nans = np.count_nonzero(np.isnan(fa_residual_start_times))
pd_residual_nans = np.count_nonzero(np.isnan(pd_residual_start_times))
print('===============================================================================')
print('Results for file', raw_filenames[i])
print('===============================================================================')
if fa_residual_nans > 0:
print('[WARNING] There are', fa_residual_nans, 'NaNs in forced aligner output out of', num_words, 'total')
if fa_residual_nans > num_words / 4:
print('[WARNING] Omitting due to > 25% missed forced alignment matches')
continue
if pd_residual_nans > 0:
print('[WARNING] There are', pd_residual_nans, 'NaNs in pitch detector output out of', num_words, 'total')
if pd_residual_nans > num_words / 4:
print('[WARNING] Omitting due to > 25% pitch detector matches')
continue
print('Average forced aligner error (start):', np.nanmean(np.abs(fa_residual_start_times)))
print('Average forced aligner error (end):', np.nanmean(np.abs(fa_residual_end_times)))
print('Average pitch detector error (start):', np.nanmean(np.abs(pd_residual_start_times)))
print('Average pitch detector error (end):', np.nanmean(np.abs(pd_residual_end_times)))
print('Average combined error (start):', np.nanmean(np.abs(residual_start_times)))
print('Average combined error (end):', np.nanmean(np.abs(residual_end_times)))
pd_residual_cmb_times = pd_cmb_times - ref_cmb_times
fa_residual_cmb_times = fa_cmb_times - ref_cmb_times
# plt.plot(fa_residual_cmb_times)
# print(np.argmax(fa_residual_cmb_times))
# plt.title(raw_filenames[i])
# plt.show()
residual_cmb_times = pred_cmb_times - ref_cmb_times
# For investigation
fa_residual_starts.append(fa_residual_start_times)
fa_residual_ends.append(fa_residual_end_times)
fa_residual_cmbs.append(fa_residual_cmb_times)
pd_residual_starts.append(pd_residual_start_times)
pd_residual_ends.append(pd_residual_end_times)
pd_residual_cmbs.append(pd_residual_cmb_times)
residual_starts.append(residual_start_times)
residual_ends.append(residual_end_times)
residual_cmbs.append(residual_cmb_times)
# Append errors
fa_error = np.nanmean(np.abs(fa_residual_cmb_times))
fa_errors.append(fa_error)
pd_error = np.nanmean(np.abs(pd_residual_cmb_times))
pd_errors.append(pd_error)
error = np.nanmean(np.abs(residual_cmb_times))
errors.append(error)
# Append misalignment rates
fa_misalign_count = 1 - np.count_nonzero(np.less(np.abs(fa_residual_cmb_times), misalign_threshold)) / num_words
fa_misalign.append(fa_misalign_count)
pd_misalign_count = 1 - np.count_nonzero(np.less(np.abs(pd_residual_cmb_times), misalign_threshold)) / num_words
pd_misalign.append(pd_misalign_count)
misalign_count = 1 - np.count_nonzero(np.less(np.abs(residual_cmb_times), misalign_threshold)) / num_words
misalign.append(misalign_count)
# Print overall info
print('Average forced aligner error (overall):', fa_error)
print('Forced aligner misalignment rate:', fa_misalign_count)
print('Average pitch detector error (overall):', pd_error)
print('Pitch detector misalignment rate:', pd_misalign_count)
print('Average combined error (overall):', error)
print('Overall misalignment rate:', misalign_count)
# plt.plot(residual_cmb_times)
# plt.title(raw_filenames[i])
# plt.show()
fa_errors = np.array(fa_errors)
pd_errors = np.array(pd_errors)
errors = np.array(errors)
# Aggregated results
print('==================== Results for', model, '====================')
print('Average overall error (ms):', round(np.mean(errors) * 1000, 2))
print('Average overall misalignment % (' + str(misalign_threshold) + ' seconds):', round(np.mean(misalign) * 100, 2))
print('Average forced aligner error (ms):', round(np.mean(fa_errors) * 1000, 2))
print('Average forced aligner misalignment % (' + str(misalign_threshold) + ' seconds):', round(np.mean(fa_misalign) * 100, 2))
print('Average pitch detector error (ms):', round(np.mean(pd_errors) * 1000, 2))
print('Average pitch detector misalignment % (' + str(misalign_threshold) + ' seconds):', round(np.mean(pd_misalign) * 100, 2))
import matplotlib.pyplot as plt
final_raw = [filename for filename in raw_filenames if filename != 'en003a']
print('Net:', model)
# Forced aligner
plt.plot(final_raw, fa_errors)
plt.xticks(rotation=90)
plt.title('Forced Aligner Average Errors (seconds)')
plt.show()
# Pitch Detector
plt.plot(final_raw, pd_errors)
plt.xticks(rotation=90)
plt.title('Pitch Detector Average Errors (seconds)')
plt.show()
# Overall
plt.plot(final_raw, errors)
plt.xticks(rotation=90)
plt.title('Overall Average Errors (seconds)')
plt.show()
investigate = final_raw
inv_idx = [final_raw.index(name) for name in investigate]
for i in range(len(inv_idx)):
idx = inv_idx[i]
fa_res = fa_residual_cmbs[idx]
pd_res = pd_residual_cmbs[idx]
res = residual_cmbs[idx]
plt.plot(fa_res, label = 'Forced Aligner')
plt.plot(pd_res, label='Pitch Detector')
plt.xlabel('Word index')
plt.ylabel('Predicted time - Actual time')
# plt.plot(res, label='Overall')
plt.legend()
plt.title(investigate[i] + ' residuals')
plt.show()
###Output
_____no_output_____ |
009_Gradient_Boosting.ipynb | ###Markdown
Carga de Datos
###Code
df_completo = pd.read_csv(path+'acetylcholinesterase_02_bioactivity_data_preprocessed_token_descriptors.csv')
X= df_completo.drop(['molecule_chembl_id', 'canonical_smiles', 'standard_value',
'standard_value_norm', 'pIC50', 'X_seq', 'X_seq_pad', 'MW', 'LogP',
'NumHDonors', 'NumHAcceptors', 'bioactivity_class', 'Name'], axis=1)
y = df_completo.pIC50.values
###Output
_____no_output_____
###Markdown
Split de Datos
###Code
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
###Output
_____no_output_____
###Markdown
Modelo LighGBM
###Code
def R2(y_true, y_pred):
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
def R2_numpy(y_true, y_pred):
SS_res = np.sum(np.square( y_true-y_pred ))
SS_tot = np.sum(np.square( y_true - np.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + np.finfo(float).eps ) )
cat_vars = X.columns.to_list()
min_child_samples=100 # cant minima de hoja hija para hacer split
n_estimators=800000 # cant max de arboles secuenciales, por lo general se pone numero alto, ya q nunca llega por early stopping
learning_rate=0.005
model = LGBMRegressor(min_child_samples=min_child_samples, n_estimators=n_estimators, learning_rate=learning_rate )
fit_params={"early_stopping_rounds":100,
"eval_metric" : 'r2',
"eval_set" : [(X_val, y_val.reshape(-1))],
'eval_names': ['valid'],
'verbose': 100,
'feature_name': 'auto', # that's actually the default
'categorical_feature': cat_vars # le paso cuales son los cat para q haga EFB(exlusive Feature Bunding)
}
model.fit(X_train, y_train.reshape(-1), **fit_params)
model.score(X_val, y_val)
y_val_pred = model.predict(X_val)
print('val_R2: ',R2_numpy(y_val, y_val_pred))
###Output
val_R2: 0.35426855506091404
###Markdown
HYPER-SEARCH PARAMETERS
###Code
#Defino con que hyper-parametros realizo la busqueda
dim_learning_rate= Real(low=0.01, high=1, prior='log-uniform', name='learning_rate')
dim_boosting_type = Categorical(['gbdt'], name='boosting_type')
dim_subsample = Real(low=0.01, high=1.0, prior='log-uniform', name='subsample')
dim_subsample_freq = Integer(0, 10, name='subsample_freq')
dim_max_depth= Integer(1, 20, name='max_depth') # Larger is usually better, but overfitting speed increases.
dim_num_leaves= Integer(2, 100,name='num_leaves') #max number of leaves in one tree
dim_min_child_samples= Integer(1, 200, name='min_child_samples') # minimal number of data in one leaf
dim_reg_lambda= Real(0.001, 100, 'log-uniform', name='reg_lambda') # L2 regularization
dim_reg_alpha= Real(0.001, 100, 'log-uniform', name='reg_alpha') # L1 regularization
dim_colsample_bytree= Real(0.1, 1.0, 'uniform', name='colsample_bytree') # enabler of bagging fraction
dim_min_child_weight=Integer(0, 10, name='min_child_weight') # minimal number of data in one leaf.
dim_n_estimators= Integer(1, 1000, name='n_estimators') # cant. de estimadores secuenciales (se pone alto stopea earlystopping)
dimensions = [
#Regularizacion --> complejidad del modelo
dim_max_depth,
dim_min_child_weight,
dim_reg_lambda,
dim_reg_alpha,
#Regularizacion --> aletoriedad
dim_subsample,
dim_subsample_freq,
dim_colsample_bytree,
#Otros
dim_num_leaves,
dim_min_child_samples,
dim_n_estimators,
dim_boosting_type,
]
max_iterSearch = 300
best_accuracy = -10.0
best_parameters = [{'teste': 1}]
# Callback de Checkpoint Saver
if not os.path.isfile('001_Data_retrieve.ipynb'):
checkpoint_saver = CheckpointSaver(path+'/checkpoint.pkl', compress=9)
else:
checkpoint_saver = CheckpointSaver('checkpoint.pkl', compress=9)
#deltaXStopper = DeltaXStopper(0.001)
#deltaLineStoper = DeadlineStopper(60*5)
# Funcion para LightGBM y skopt
@use_named_args(dimensions=dimensions)
def fitness (max_depth, num_leaves, min_child_samples,reg_lambda, reg_alpha, colsample_bytree, min_child_weight,
boosting_type, subsample, subsample_freq, n_estimators):
model = LGBMRegressor( max_depth=max_depth,
min_child_samples=min_child_samples,
reg_lambda=reg_lambda,
subsample=subsample, subsample_freq=subsample_freq,
colsample_bytree=colsample_bytree,
#num_leaves=num_leaves,
#reg_alpha=reg_alpha,
#min_child_weight= min_child_weight, n_estimators=n_estimators,
#boosting_type=boosting_type,
n_jobs=2)
### PARA CON CV ###
#fit_params={"early_stopping_rounds":30,
# "eval_metric" : 'r2',
# #"eval_set" : [(X_val, y_val.reshape(-1))],
# #'eval_names': ['valid'],
# 'verbose': 0,
# 'feature_name': 'auto', # that's actually the default
# #'categorical_feature': cat_vars
# }
#score = np.mean(cross_val_score(model, X_train, y_train.reshape(-1), cv=KFold(n_splits=4), n_jobs=2, verbose=0
# ,scoring= 'r2'
# , fit_params= fit_params))
###-------------
### PARA SIN CV ##
fit = model.fit(X_train, y_train.reshape(-1),
eval_set=[(X_val, y_val.reshape(-1))],
eval_metric='r2',
#feature_name='auto',
#categorical_feature=cat_vars,
verbose=False,
early_stopping_rounds=100)
y_pred = model.predict(X_val)
score = R2_numpy(y_val, y_pred)
###----------------
global best_accuracy
global best_parameters
if score > best_accuracy:
#print('mejor score actual:', score)
#print('mejor score anterior:', best_accuracy)
best_parameters[0] = model.get_params()
best_accuracy = score
del model
return -score
%time search_result = gp_minimize(func=fitness, dimensions=dimensions, n_calls=max_iterSearch, n_jobs=2, verbose=True, acq_func='LCB',callback=[checkpoint_saver])
#PRINT DE RESULTADO
print('best score custom function', best_accuracy)
#print('best score gp_minimize', search_result.fun)
print('best parametrers gp_minimize')
print('best parametrers custom function:')
best_parameters
#plot_convergence(search_result)
#ppE = plot_evaluations(search_result)
#pp = plot_objective(search_result)
model = LGBMRegressor(**best_parameters[0])
model.fit(X_train, y_train.reshape(-1), **fit_params)
y_pred_train = model.predict(X_train)
y_pred_val = model.predict(X_val)
print('R2 para train:', R2_numpy(y_train, y_pred_train))
print('R2 para val :', R2_numpy(y_val, y_pred_val))
###Output
_____no_output_____ |
2-PDF-contact-extraction/notebook.ipynb | ###Markdown
Contact extraction : PDF parsing and regular expressionsAuthor: Nikola LOHINSKIGithub: https://github.com/NikolaLohinski/pdf_contact_extractionIn this notebook, you will learn how to :[&128279;](1-PDF-parsing-with-pdfminer) Cast a PDF document to text data in python using the open source library **pdfminer** ;[&128279;](2-Data-extraction-with-Regular-Expressions) Retrieve phone numbers and emails from a text document using **regular expressions** ;[&128279;](3-Contact-extraction) **Extract contacts** from a PDF non structured document using the two previous points.To run this notebook, you will need :to run on **python $\geq$ 3.5.2**;to have **pdfminer** installed. Run '**pip install pdfminer.six==20170720**' in the python environment you are using for this notebook.to have **pyenchant** installed. Run '**pip install pyenchant**' in the python environment you are using for this notebookLet's start with the imports :
###Code
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from io import StringIO
import re
import enchant
from functools import reduce
###Output
_____no_output_____
###Markdown
1 PDF parsing with `pdfminer`References :- [pdfminer](https://github.com/pdfminer/pdfminer.six) on Github- [example of usage](https://stackoverflow.com/questions/26494211/extracting-text-from-a-pdf-file-using-pdfminer-in-python) from which this adapted example was builtNothing fancy here, we are just trying to cast PDF files back to text format using `pdfminer`.**Important : ** Bear in mind that the following method only applies to digital files, not scans ; it only applies to files that have been generated through Microsoft Word, OpenOffice, Gedit etc... and not to files that have been printed and then scanned.Following is a function demonstrating the way to use `pdfminer` to convert a PDF file to a string :
###Code
def convert_pdf_to_txt(path_to_file):
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
with open(path_to_file, 'rb') as file_reader:
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos=set()
pages = PDFPage.get_pages(
file_reader,
pagenos,
maxpages=maxpages,
password=password,
caching=caching,
check_extractable=True
)
for page in pages:
interpreter.process_page(page)
text = retstr.getvalue()
file_reader.close()
device.close()
retstr.close()
return text
###Output
_____no_output_____
###Markdown
Let's test this function out with a file :
###Code
path_2_file = 'Fake_PDF.pdf'
text = convert_pdf_to_txt(path_2_file)
print(text)
###Output
Company, Inc.
Fake business trip report
REF: ABC-1999-12-A
DATE: 13/12/1999
Name
Venste Bergspiel
Company
F.I.R.M.I.N Germany
Contact
Business manager
+49 6 03 89 92 99
[email protected]
Visit date Monday 13th December 1999
Topic
Business plan review
1. General
Cats is a sung-through British musical composed by Andrew Lloyd Webber, based on Old Possum's Book
of Practical Cats by T. S. Eliot, and produced by Cameron Mackintosh. The musical tells the story of a tribe of cats
called the Jellicles and the night they make what is known as "the Jellicle choice" and decide which cat will ascend
to the Heaviside Layer and come back to a new life. Cats introduced the song standard "Memory". The first
performance of Cats was in 1981.
Directed by Trevor Nunn and choreographed by Gillian Lynne, Cats first opened in the West End in 1981
and then with the same creative team on Broadway in 1982. It won numerous awards, including Best Musical at
both the Laurence Olivier Awards and the Tony Awards. The London production ran for 21 years and the Broadway
production ran for 18 years, both setting new records. Actresses Elaine Paige and Betty Buckley became
particularly associated with the musical. One actress, Marlene Danielle, performed in the Broadway production for
its entire run (from 1982 until 2000).
As of 2016, Cats is the fourth-longest-running show in Broadway history, and was the longest running
Broadway show in history from 1997 until 2006 when it was surpassed by The Phantom of the Opera. Cats is the
sixth-longest-running West End musical. It has been performed around the world many times and has been
translated into more than 20 languages. In 1998, Cats was turned into a made-for-television film. The musical has
grossed approximately $342.2 million.
Everything in this paragraph was taken from a web article on Cats, except this last sentence.
2. Review
A business plan is a formal statement of business goals, reasons they are attainable, and plans for
reaching them. It may also contain background information about the organization or team attempting to reach
those goals.
Business plans may target changes in perception and branding by the customer, client, taxpayer, or larger
community. When the existing business is to assume a major change or when planning a new venture, a 3 to 5
year business plan is required, since investors will look for their investment return in that timeframe.
Everything in this paragraph was taken from an online article on business plans, except this last sentence.
a toy file
1
Company, Inc.
3. Other information
A computer is a device that can be instructed to carry out arbitrary sequences of arithmetic or logical
operations automatically. The ability of computers to follow generalized sets of operations, called programs,
enables them to perform an extremely wide range of tasks.
Such computers are used as control systems for a very wide variety of industrial and consumer devices.
This includes simple special purpose devices like microwave ovens and remote controls, factory devices such as
industrial robots and computer assisted design, but also in general purpose devices like personal computers and
mobile devices such as smartphones. The Internet is run on computers and it connects millions of other computers.
Everything in this paragraph was taken from a web article on computers, except this last sentence.
here 123688539877453 and there 77-32168-2.
Since ancient times, simple manual devices like the abacus aided people in doing calculations. Early in the
Industrial Revolution, some mechanical devices were built to automate long tedious tasks, such as guiding patterns
for looms. More sophisticated electrical machines did specialized analog calculations in the early 20th century. The
first digital electronic calculating machines were developed during World War II. The speed, power, and versatility
of computers has increased continuously and dramatically since then.
4. New contacts
Contact is a 1997 American science fiction drama film. It is a film adaptation of a 1985 novel of the same
name. Jodie Foster portrays the film's protagonist, Dr. Eleanor "Ellie" Arroway, a SETI scientist who finds strong
evidence of extraterrestrial life and is chosen to make first contact. The film also stars Matthew McConaughey,
James Woods, Tom Skerritt, William Fichtner, John Hurt, Angela Bassett, Rob Lowe, Jake Busey, and David
Morse.
Carl Sagan and Ann Druyan began working on the film in 1979. Together, they wrote a 100+ page film
treatment and set up Contact at Warner Bros. with Peter Guber and Lynda Obst as producers. When development
stalled on the film, Sagan published Contact as a novel in 1985 and the film adaptation was rejuvenated in 1989.
rner Bros. fired Miller
in 1995. Robert Zemeckis was eventually hired to direct, and filming for Contact lasted from September 1996 to
February 1997. Sony Pictures Image works handled most of the visual effects sequences.
The film was released on July 11, 1997. Contact grossed approximately $171 million in worldwide box
office totals. The film won the Hugo Award for Best Dramatic Presentation and received multiple awards and
nominations at the Saturn Awards.
Everything in this paragraph was taken from a web article on computers, except this last sentence. For
Tocs Yelldir and a phone number to go with : +33 6 12 34 56 78,
but no email address.
a toy file
2
###Markdown
2 Data extraction with Regular ExpressionsReferences:- [Tutorial on RegEx](https://www.regular-expressions.info/)- [Other tutorial on RegEx](https://docs.oracle.com/javase/tutorial/essential/regex/)- [Online tester and cheat sheet](https://regexr.com/) used to build the followin RegExRegular expressions are a way to match patterns in text data. We define the pattern of characters we are looking for and a compiler finds the content matching the givent pattern in a given text. For example, lets look at the following piece of text: ... and thank you for your time and patience with my request. Best regards, Venste Bergspiel Executive assistant email: [email protected] tel: 12 34 56 78 90 PS: I hope this does not ...Let's say we are looking for an email adress in this text. An email adress is defined by :- a series of words, separted by dots, or dashes $\rightarrow$ **`venste.bergspiel`**- an `@` character $\rightarrow$ **`@`**- a news series of words, eventually separated by dots or dashes $\rightarrow$ **`ssicju`**- a dot $\rightarrow$ **`.`**- a single word of 2 to 3 characters followed by a space or a return to lign character $\rightarrow$ **`ra`**This leads down to the following regular expression : (\w(.|-)?)+\@(\w(.|-)?)+(\.\w{2,3}(\s|\n))+- `\w{x, y}` means we are looking for a word, of length varying from x to y. If x or y are not given, then the length is not constrained- `\(.|-)?` there may be a dot or a dash, or none of both- `(...)+` means that there is one or more of `...`- `\@` means there must be the `@` character- `\.` means there must be the `.` character- `(\s|\n)` means there must be space or a return to line characterLet's test it out :
###Code
# The regular expression defining the pattern
expression = r'(\w(.|-)?)+\@(\w(.|-)?)+(\.\w{2,3}(\s|\n))+'
# The text to analyse
text = '''
...and thank you for your time and patience with my request.
Best regards,
Venste Bergspiel
Executive assistant
email: [email protected]
tel: 12 34 56 78 90
PS: I hope this does not ...
'''
# First compile the expression
regex = re.compile(expression)
# Then find matches
matchs = regex.finditer(text)
# Finally ouptut them
print('Text to analyse :')
print(text)
print('------------------------------------------------------------\n')
print('Testing RegEx: {}\n'.format(expression))
print('Found the following matches :')
for i, m in enumerate(matchs):
print('{}. {}'.format(i + 1, m.group()))
###Output
Text to analyse :
...and thank you for your time and patience with my request.
Best regards,
Venste Bergspiel
Executive assistant
email: [email protected]
tel: 12 34 56 78 90
PS: I hope this does not ...
------------------------------------------------------------
Testing RegEx: (\w(.|-)?)+\@(\w(.|-)?)+(\.\w{2,3}(\s|\n))+
Found the following matches :
1. [email protected]
###Markdown
The above RegEx works fine but is not the fastest one for email addresses. Here is a less understandable but better one : [a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+Now let's try to do the same for a phone number. Starting simple, we will only consider for a moment a phone number being a string of 5 series of 2 numbers, separated by spaces. It leads down to the following RegEx: (\d{2}(\s|\n)){5}- `\d` means we are looking for digits- `(...){x}` means we want `(...)` to be of length `x`Let's test it out on the same example :
###Code
# The regular expression defining the pattern
expression = r'(\d{2}(\s|\n)){5}'
# The text to analyse
text = '''
...and thank you for your time and patience with my request.
Best regards,
Venste Bergspiel
Executive assistant
email: [email protected]
tel: 12 34 56 78 90
PS: I hope this does not ...
'''
# First compile the expression
regex = re.compile(expression)
# Then find matches
matchs = regex.finditer(text)
# Finally ouptut them
print('Text to analyse :')
print(text)
print('------------------------------------------------------------\n')
print('Testing RegEx: {}\n'.format(expression))
print('Found the following matches :')
for i, m in enumerate(matchs):
print('{}. {}'.format(i + 1, m.group()))
###Output
Text to analyse :
...and thank you for your time and patience with my request.
Best regards,
Venste Bergspiel
Executive assistant
email: [email protected]
tel: 12 34 56 78 90
PS: I hope this does not ...
------------------------------------------------------------
Testing RegEx: (\d{2}(\s|\n)){5}
Found the following matches :
1. 12 34 56 78 90
###Markdown
The example above shows how to match a specific format of phone number. Unfortunately, there is no generic RegEx to match any type of phone number from any country, taking into account the country code and the additionnal non essentials digits. We suggest in the followin a specific RegEx that could be used to matche a significant number of European phone numbers : (\s|\n)(`\`+\d{2}(`\`(0`\`))?((\s|-)\d{3}){3}|`\`+\d{2}(`\`(0`\`))?(\s|-)(`\`(0`\`))?\d((\s|-)\d{2}){4}|`\`+\d{2}(`\`(0`\`))?\d{9}|(\d{2}(\-|\s)){4}\d{2}|\d{10})(\s|\n)This humanly unreadble expression matches the following formats :- 0345678912- 03 45 67 89 12- 03-45-67-89-12- +12345678912- +12 3 45 67 89 12- +12-3-45-67-89-12- +12 345 678 912- +12-345-678-912- +12(0)345678912- +12(0) 345 678 912- +12(0)-345-678-912- +12 (0)3 45 67 89 12- +12-(0)3-45-67-89-12**Important** : This RegEx could of course be adapted for other purposes and simplified, but has overall good performance on the tested data. 3 Contact extractionNow is the time to dive into contact extraction. We have seen that we can take a PDF file and extract text from it, and that we can find phone numbers and emails in a string using RegEx. Now we can combine all of those in order to determine contacts in a document.The idea is to work sequentially line by line and build a contact list dynamically. The algorithm does the following steps :- convert PDF to brut text ;- go line by line and look for phone numbers ;- keep line numbers of matches ;- filter contacts with a list of already known phone numbers ;- look 5 lines after and 5 lines before the match for email addresses ;- look 5 lines after and 5 lines before the match for words outside of dictionary to determine names ;- consider that matches close in line numbers represent the same person ;- filter contacts with a list of already known phone numbers, name and/or email addresses.
###Code
# Import PDF and convert to a string
text = convert_pdf_to_txt('Fake_PDF.pdf')
# Convert to list of lines
lines = text.split('\n')
# Remove lines without words by matching every line to a word or digit type RegEx
lines_filtered = list(filter(lambda l: len(re.findall('(\w|\d)+', l)) > 0, lines))
# Build RegEx for phone numbers
tel_regex = re.compile(r'(\+\d{2}(\(0\))?((\s|-)\d{3}){3}|\+\d{2}(\(0\))?(\s|-)(\(0\))?\d((\s|-)\d{2}){4}|\+\d{2}(\(0\))?\d{9}|(\d{2}(\-|\s)){4}\d{2}|\d{10})')
tels = list()
for i, l in enumerate(lines_filtered):
tel_match = tel_regex.finditer(l)
for m in tel_match:
phone = m.group()
tels.append((i, str(phone)))
print('Line {} :\t{}'.format(i, phone))
###Output
Line 10 : +49 6 03 89 92 99
Line 53 : 1236885398
Line 75 : +33 6 12 34 56 78
###Markdown
As you may see above, we have now extracted all the phone numbers from the PDF file, and their position in the converted text variable. Note that there is a number that is not a contact that is actually popping out. Now we can go through neighbouring lines and determine the contact name and / or email if available.
###Code
dictionary = enchant.Dict("en_US")
punctuation = ('.', ',', '(', ')', '+', ';', '"', '\\', '/', '|')
email_regex = re.compile(r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+.[a-zA-Z0-9-.]+')
contacts = list()
for tel in tels:
contact = {
'name': None,
'tel': None,
'mail': None
}
line = tel[0]
phone = tel[1]
contact['tel'] = [phone]
emails = list()
neighbouring_lines = lines_filtered[max(line - 5, 0):min(line + 5, len(lines_filtered) - 1)]
# look for emails and find the closest one
for i, l in enumerate(neighbouring_lines):
email_match = email_regex.finditer(l)
closest_mail_line = -1
for m in email_match:
if contact.get('mail') is None:
closest_mail_line = i
contact['mail'] = [m.group()]
else:
if abs(closest_mail_line - line) > abs(i - line):
closest_mail_line = i
contact['mail'] = [m.group()]
# convert lines to list of ordered words to better filter them
ordered_pieces = reduce(
lambda words, line: words + line.split(' '),
neighbouring_lines,
list()
)
# filter not words and words with digits
ordered_none_digits = list(filter(lambda p: re.match('\w', p) is not None and re.match('\d', p) is None, ordered_pieces))
# filter words with punctuation
ordered_words = list(filter(lambda p: all([c not in punctuation for c in p]), ordered_none_digits))
# Finally keep only words that are not in dictionary
words = list(filter(lambda w: not dictionary.check(w), ordered_words))
# This should give a name in the end hopefully
contact['name'] = ' '.join(words) if len(words) > 0 else None
# Finally check if we have not found already the contact, in which case we have to add the
# new phone number to the previous contact
previous_contact = next(filter(lambda c: c.get('name') == contact.get('name'), contacts), None)
if previous_contact is None:
contacts.append(contact)
else:
previous_contact['tel'].append(phone)
# Finally filter contacts that have no email and no name. Those are probably mis-matches
contacts = list(filter(lambda c: c.get('name') is not None or c.get('mail') is not None, contacts))
contacts
###Output
_____no_output_____ |
notebooks/Python-in-2-days/D1_L2_IPython/HW33.ipynb | ###Markdown
Welcome to IPython Notebook Basics Welcome to the very first lesson. In this lesson we will learn how to work with the notebook and saving it. If you already know how to use notebooks, feel free to skip this lesson. Types of cells Notebooks are a great visual way of programming. We will use these notebooks to code in Python and learn the basics of machine learning. First, you need to know that notebooks are made up of cells. Each cell can either be a **code cell** or a **text cell**. * **text cells**: used for headers and paragraph text. * **code cells**: used for holding code. Creating cellsFirst, let's create a text cell. To create a cell at a particular location, just click on the spot and create a text cell by clicking on the **➕TEXT** below the *View* button up top. Once you made the cell, click on it and type the following inside it:``` This is a headerHello world!``` Running cellsOnce you type inside the cell, press the **SHIFT** and **ENTER** together to run the cell. Editing cellsTo edit a cell, double click it and you should be able to replace what you've typed in there. Moving cellsOnce you create the cell, you can move it with the ⬆️**CELL** and ⬇️**CELL** buttons above. Deleting cellsYou can delete the cell by clicking on the cell and pressing the button with three vertical dots on the top right corner of the cell. Click **Delete cell**. Creating a code cellNow let's take the same steps as above to create, edit and delete a code cell. You can create a code cell by clicking on the ➕CODE below the *File* menu at the top. Once you have created the cell, click on it and type the following inside it:```print ("hello world!")```⏰ - It may take a few seconds when you run your first code cell.
###Code
print ("hello world!")
# TEST TEST TEST
###Output
hello world!
|
notebooks/example_Conv2D_AutoEncoder.ipynb | ###Markdown
Overview In this example we demonstrate the use of the 2D Conv. Net Autoencoder to define an encoding of hand-written digits. You can find more info on the datset in [sklearn's docs](https://scikit-learn.org/stable/datasets/index.htmldigits-dataset) Install Libs
###Code
!pip install --upgrade JLpyUtils
import IPython.display
IPython.display.clear_output()
###Output
_____no_output_____
###Markdown
Import Libs
###Code
import sys, os, shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import importlib
import sklearn, sklearn.datasets, sklearn.model_selection
mpl.rcParams['font.size']=14
pd.options.display.max_columns = 1000
import JLpyUtils
import JLpyUtils.ML.NeuralNet as NN
import tensorflow.keras as keras
JLpyUtils.__version__
###Output
_____no_output_____
###Markdown
Load Data
###Code
data_dict = sklearn.datasets.load_digits()
data_dict
###Output
_____no_output_____
###Markdown
Inspect Data
###Code
imgs = data_dict['images']
imgs.shape
JLpyUtils.plot.imgs.from_list(imgs[:9,:,:], )
###Output
_____no_output_____
###Markdown
Scale
###Code
imgs = imgs/imgs.max()
imgs.min()
imgs.max()
###Output
_____no_output_____
###Markdown
Train Test Split
###Code
X_train, X_test = sklearn.model_selection.train_test_split(imgs, test_size=0.3, random_state = 0)
print(X_train.shape,X_test.shape)
###Output
(1257, 8, 8) (540, 8, 8)
###Markdown
Reshape 1D images for input into Conv Net
###Code
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2], 1)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2], 1)
###Output
_____no_output_____
###Markdown
Instantiate Model
###Code
model = NN.Conv2D_AutoEncoder.model(img_shape=X_train[0].shape,
n_outputs_per_img= 10,
initial_filter_size=10,
max_filter_size=32,
activation='relu',
kernel_size=(2,2),
dense_scaling_factor=3,
loss='mae')
model.summary()
###Output
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
inputs (InputLayer) [(None, 8, 8, 1)] 0
_________________________________________________________________
G0_L0_Conv2D (Conv2D) (None, 8, 8, 10) 100
_________________________________________________________________
G0_L1_Pool (MaxPooling2D) (None, 4, 4, 10) 0
_________________________________________________________________
G1_L0_Conv2D (Conv2D) (None, 4, 4, 20) 1820
_________________________________________________________________
G1_L1_Pool (MaxPooling2D) (None, 2, 2, 20) 0
_________________________________________________________________
G2_L0_Flatten (Flatten) (None, 80) 0
_________________________________________________________________
G3_L0_Dense (Dense) (None, 26) 2106
_________________________________________________________________
outputs (Dense) (None, 10) 270
_________________________________________________________________
G0_L0_Dense (Dense) (None, 26) 286
_________________________________________________________________
G1_L0_Flatten (Dense) (None, 80) 2160
_________________________________________________________________
reshape (Reshape) (None, 2, 2, 20) 0
_________________________________________________________________
G2_L1_Conv2D (Conv2DTranspos (None, 4, 4, 20) 3620
_________________________________________________________________
G3_L0_Pool (Conv2DTranspose) (None, 4, 4, 10) 1810
_________________________________________________________________
G3_L1_Conv2D (Conv2DTranspos (None, 8, 8, 10) 910
_________________________________________________________________
decoder_outputs (Conv2DTrans (None, 8, 8, 1) 91
=================================================================
Total params: 13,173
Trainable params: 13,173
Non-trainable params: 0
_________________________________________________________________
###Markdown
Train the Model
###Code
callbacks = [keras.callbacks.EarlyStopping(restore_best_weights=True,
min_delta=0.001,
patience=100)]
history = model.fit(x = X_train, y = X_train,
validation_data= (X_test, X_test),
batch_size=64,
epochs=1000,
verbose = 2,
callbacks=callbacks)
###Output
Train on 1257 samples, validate on 540 samples
Epoch 1/1000
1257/1257 - 0s - loss: 0.0843 - val_loss: 0.0908
Epoch 2/1000
1257/1257 - 0s - loss: 0.0835 - val_loss: 0.0905
Epoch 3/1000
1257/1257 - 0s - loss: 0.0835 - val_loss: 0.0899
Epoch 4/1000
1257/1257 - 0s - loss: 0.0834 - val_loss: 0.0895
Epoch 5/1000
1257/1257 - 0s - loss: 0.0829 - val_loss: 0.0897
Epoch 6/1000
1257/1257 - 0s - loss: 0.0830 - val_loss: 0.0896
Epoch 7/1000
1257/1257 - 0s - loss: 0.0826 - val_loss: 0.0895
Epoch 8/1000
1257/1257 - 0s - loss: 0.0826 - val_loss: 0.0899
Epoch 9/1000
1257/1257 - 0s - loss: 0.0832 - val_loss: 0.0895
Epoch 10/1000
1257/1257 - 0s - loss: 0.0821 - val_loss: 0.0887
Epoch 11/1000
1257/1257 - 0s - loss: 0.0815 - val_loss: 0.0887
Epoch 12/1000
1257/1257 - 0s - loss: 0.0813 - val_loss: 0.0881
Epoch 13/1000
1257/1257 - 0s - loss: 0.0809 - val_loss: 0.0871
Epoch 14/1000
1257/1257 - 0s - loss: 0.0806 - val_loss: 0.0871
Epoch 15/1000
1257/1257 - 0s - loss: 0.0804 - val_loss: 0.0879
Epoch 16/1000
1257/1257 - 0s - loss: 0.0805 - val_loss: 0.0868
Epoch 17/1000
1257/1257 - 0s - loss: 0.0799 - val_loss: 0.0870
Epoch 18/1000
1257/1257 - 0s - loss: 0.0795 - val_loss: 0.0857
Epoch 19/1000
1257/1257 - 0s - loss: 0.0794 - val_loss: 0.0864
Epoch 20/1000
1257/1257 - 0s - loss: 0.0789 - val_loss: 0.0859
Epoch 21/1000
1257/1257 - 0s - loss: 0.0794 - val_loss: 0.0858
Epoch 22/1000
1257/1257 - 0s - loss: 0.0786 - val_loss: 0.0862
Epoch 23/1000
1257/1257 - 0s - loss: 0.0790 - val_loss: 0.0859
Epoch 24/1000
1257/1257 - 0s - loss: 0.0784 - val_loss: 0.0856
Epoch 25/1000
1257/1257 - 0s - loss: 0.0781 - val_loss: 0.0845
Epoch 26/1000
1257/1257 - 0s - loss: 0.0775 - val_loss: 0.0841
Epoch 27/1000
1257/1257 - 0s - loss: 0.0778 - val_loss: 0.0840
Epoch 28/1000
1257/1257 - 0s - loss: 0.0773 - val_loss: 0.0843
Epoch 29/1000
1257/1257 - 0s - loss: 0.0772 - val_loss: 0.0835
Epoch 30/1000
1257/1257 - 0s - loss: 0.0768 - val_loss: 0.0830
Epoch 31/1000
1257/1257 - 0s - loss: 0.0764 - val_loss: 0.0832
Epoch 32/1000
1257/1257 - 0s - loss: 0.0761 - val_loss: 0.0835
Epoch 33/1000
1257/1257 - 0s - loss: 0.0758 - val_loss: 0.0826
Epoch 34/1000
1257/1257 - 0s - loss: 0.0756 - val_loss: 0.0826
Epoch 35/1000
1257/1257 - 0s - loss: 0.0760 - val_loss: 0.0830
Epoch 36/1000
1257/1257 - 0s - loss: 0.0760 - val_loss: 0.0823
Epoch 37/1000
1257/1257 - 0s - loss: 0.0753 - val_loss: 0.0817
Epoch 38/1000
1257/1257 - 0s - loss: 0.0747 - val_loss: 0.0820
Epoch 39/1000
1257/1257 - 0s - loss: 0.0749 - val_loss: 0.0812
Epoch 40/1000
1257/1257 - 0s - loss: 0.0746 - val_loss: 0.0820
Epoch 41/1000
1257/1257 - 0s - loss: 0.0744 - val_loss: 0.0812
Epoch 42/1000
1257/1257 - 0s - loss: 0.0744 - val_loss: 0.0817
Epoch 43/1000
1257/1257 - 0s - loss: 0.0749 - val_loss: 0.0817
Epoch 44/1000
1257/1257 - 0s - loss: 0.0741 - val_loss: 0.0808
Epoch 45/1000
1257/1257 - 0s - loss: 0.0737 - val_loss: 0.0807
Epoch 46/1000
1257/1257 - 0s - loss: 0.0735 - val_loss: 0.0804
Epoch 47/1000
1257/1257 - 0s - loss: 0.0739 - val_loss: 0.0810
Epoch 48/1000
1257/1257 - 0s - loss: 0.0737 - val_loss: 0.0810
Epoch 49/1000
1257/1257 - 0s - loss: 0.0733 - val_loss: 0.0799
Epoch 50/1000
1257/1257 - 0s - loss: 0.0732 - val_loss: 0.0799
Epoch 51/1000
1257/1257 - 0s - loss: 0.0729 - val_loss: 0.0797
Epoch 52/1000
1257/1257 - 0s - loss: 0.0725 - val_loss: 0.0790
Epoch 53/1000
1257/1257 - 0s - loss: 0.0724 - val_loss: 0.0792
Epoch 54/1000
1257/1257 - 0s - loss: 0.0720 - val_loss: 0.0796
Epoch 55/1000
1257/1257 - 0s - loss: 0.0727 - val_loss: 0.0795
Epoch 56/1000
1257/1257 - 0s - loss: 0.0725 - val_loss: 0.0798
Epoch 57/1000
1257/1257 - 0s - loss: 0.0724 - val_loss: 0.0797
Epoch 58/1000
1257/1257 - 0s - loss: 0.0722 - val_loss: 0.0784
Epoch 59/1000
1257/1257 - 0s - loss: 0.0719 - val_loss: 0.0791
Epoch 60/1000
1257/1257 - 0s - loss: 0.0719 - val_loss: 0.0793
Epoch 61/1000
1257/1257 - 0s - loss: 0.0716 - val_loss: 0.0792
Epoch 62/1000
1257/1257 - 0s - loss: 0.0718 - val_loss: 0.0788
Epoch 63/1000
1257/1257 - 0s - loss: 0.0711 - val_loss: 0.0788
Epoch 64/1000
1257/1257 - 0s - loss: 0.0715 - val_loss: 0.0784
Epoch 65/1000
1257/1257 - 0s - loss: 0.0710 - val_loss: 0.0787
Epoch 66/1000
1257/1257 - 0s - loss: 0.0709 - val_loss: 0.0784
Epoch 67/1000
1257/1257 - 0s - loss: 0.0708 - val_loss: 0.0782
Epoch 68/1000
1257/1257 - 0s - loss: 0.0710 - val_loss: 0.0787
Epoch 69/1000
1257/1257 - 0s - loss: 0.0711 - val_loss: 0.0788
Epoch 70/1000
1257/1257 - 0s - loss: 0.0707 - val_loss: 0.0790
Epoch 71/1000
1257/1257 - 0s - loss: 0.0708 - val_loss: 0.0781
Epoch 72/1000
1257/1257 - 0s - loss: 0.0706 - val_loss: 0.0778
Epoch 73/1000
1257/1257 - 0s - loss: 0.0701 - val_loss: 0.0783
Epoch 74/1000
1257/1257 - 0s - loss: 0.0708 - val_loss: 0.0780
Epoch 75/1000
1257/1257 - 0s - loss: 0.0704 - val_loss: 0.0775
Epoch 76/1000
1257/1257 - 0s - loss: 0.0703 - val_loss: 0.0787
Epoch 77/1000
1257/1257 - 0s - loss: 0.0704 - val_loss: 0.0776
Epoch 78/1000
1257/1257 - 0s - loss: 0.0698 - val_loss: 0.0771
Epoch 79/1000
1257/1257 - 0s - loss: 0.0698 - val_loss: 0.0771
Epoch 80/1000
1257/1257 - 0s - loss: 0.0698 - val_loss: 0.0773
Epoch 81/1000
1257/1257 - 0s - loss: 0.0696 - val_loss: 0.0772
Epoch 82/1000
1257/1257 - 0s - loss: 0.0698 - val_loss: 0.0775
Epoch 83/1000
1257/1257 - 0s - loss: 0.0697 - val_loss: 0.0769
Epoch 84/1000
1257/1257 - 0s - loss: 0.0692 - val_loss: 0.0765
Epoch 85/1000
1257/1257 - 0s - loss: 0.0692 - val_loss: 0.0773
Epoch 86/1000
1257/1257 - 0s - loss: 0.0696 - val_loss: 0.0771
Epoch 87/1000
1257/1257 - 0s - loss: 0.0691 - val_loss: 0.0764
Epoch 88/1000
1257/1257 - 0s - loss: 0.0690 - val_loss: 0.0766
Epoch 89/1000
1257/1257 - 0s - loss: 0.0694 - val_loss: 0.0767
Epoch 90/1000
1257/1257 - 0s - loss: 0.0691 - val_loss: 0.0768
Epoch 91/1000
1257/1257 - 0s - loss: 0.0692 - val_loss: 0.0769
Epoch 92/1000
1257/1257 - 0s - loss: 0.0691 - val_loss: 0.0769
Epoch 93/1000
1257/1257 - 0s - loss: 0.0689 - val_loss: 0.0765
Epoch 94/1000
1257/1257 - 0s - loss: 0.0689 - val_loss: 0.0784
Epoch 95/1000
1257/1257 - 0s - loss: 0.0690 - val_loss: 0.0769
Epoch 96/1000
1257/1257 - 0s - loss: 0.0687 - val_loss: 0.0760
Epoch 97/1000
1257/1257 - 0s - loss: 0.0687 - val_loss: 0.0763
Epoch 98/1000
1257/1257 - 0s - loss: 0.0691 - val_loss: 0.0762
Epoch 99/1000
1257/1257 - 0s - loss: 0.0684 - val_loss: 0.0765
Epoch 100/1000
1257/1257 - 0s - loss: 0.0683 - val_loss: 0.0761
Epoch 101/1000
1257/1257 - 0s - loss: 0.0683 - val_loss: 0.0758
Epoch 102/1000
1257/1257 - 0s - loss: 0.0681 - val_loss: 0.0762
Epoch 103/1000
1257/1257 - 0s - loss: 0.0682 - val_loss: 0.0757
Epoch 104/1000
1257/1257 - 0s - loss: 0.0677 - val_loss: 0.0756
Epoch 105/1000
1257/1257 - 0s - loss: 0.0679 - val_loss: 0.0763
Epoch 106/1000
1257/1257 - 0s - loss: 0.0678 - val_loss: 0.0765
Epoch 107/1000
1257/1257 - 0s - loss: 0.0681 - val_loss: 0.0757
Epoch 108/1000
1257/1257 - 0s - loss: 0.0674 - val_loss: 0.0755
Epoch 109/1000
1257/1257 - 0s - loss: 0.0679 - val_loss: 0.0769
Epoch 110/1000
1257/1257 - 0s - loss: 0.0683 - val_loss: 0.0757
Epoch 111/1000
1257/1257 - 0s - loss: 0.0678 - val_loss: 0.0756
Epoch 112/1000
1257/1257 - 0s - loss: 0.0675 - val_loss: 0.0756
Epoch 113/1000
1257/1257 - 0s - loss: 0.0675 - val_loss: 0.0756
Epoch 114/1000
1257/1257 - 0s - loss: 0.0677 - val_loss: 0.0754
Epoch 115/1000
1257/1257 - 0s - loss: 0.0681 - val_loss: 0.0765
Epoch 116/1000
1257/1257 - 0s - loss: 0.0674 - val_loss: 0.0759
Epoch 117/1000
1257/1257 - 0s - loss: 0.0673 - val_loss: 0.0749
Epoch 118/1000
1257/1257 - 0s - loss: 0.0675 - val_loss: 0.0758
Epoch 119/1000
1257/1257 - 0s - loss: 0.0672 - val_loss: 0.0753
Epoch 120/1000
1257/1257 - 0s - loss: 0.0667 - val_loss: 0.0752
Epoch 121/1000
1257/1257 - 0s - loss: 0.0668 - val_loss: 0.0753
Epoch 122/1000
1257/1257 - 0s - loss: 0.0669 - val_loss: 0.0756
Epoch 123/1000
1257/1257 - 0s - loss: 0.0666 - val_loss: 0.0748
Epoch 124/1000
1257/1257 - 0s - loss: 0.0667 - val_loss: 0.0764
Epoch 125/1000
1257/1257 - 0s - loss: 0.0672 - val_loss: 0.0755
Epoch 126/1000
1257/1257 - 0s - loss: 0.0672 - val_loss: 0.0760
Epoch 127/1000
1257/1257 - 0s - loss: 0.0674 - val_loss: 0.0750
Epoch 128/1000
1257/1257 - 0s - loss: 0.0666 - val_loss: 0.0752
Epoch 129/1000
1257/1257 - 0s - loss: 0.0672 - val_loss: 0.0751
###Markdown
Inspect Learning Curve
###Code
NN.plot.learning_curves(history)
###Output
_____no_output_____
###Markdown
Inspect Predictions
###Code
X_test_preds = model.predict(X_test)
X_test_preds.shape
print('X_test')
JLpyUtils.plot.imgs.from_list(X_test[:9,:,:,0] )
print('X_test preds')
JLpyUtils.plot.imgs.from_list(X_test_preds[:9,:,:,0] )
###Output
X_test preds
|
genie_scraping_selenium.ipynb | ###Markdown
곡명 : a.title, 가수이름 : a.artist
###Code
s=song.select('a.title')
len(s), type(s)
s
###Output
_____no_output_____
###Markdown
* 위에 상태에서는 (tag,text, 등)안되는 이유가 s가 위에서는 ResultSet형태로 되어있어 리스트 형태로 되어 있기 떄문에 못함
###Code
s[0]
###Output
_____no_output_____
###Markdown
* 하지만 []로 빼고 나면 tag나 text가 가능하다!!
###Code
s[0].text.strip()
artist=song.select('a.artist')
len(artist), type(artist)
artist[0]
artist[0].text.strip()
###Output
_____no_output_____
###Markdown
[ [title01, artist01], [title02, artist02], [title03, artist03], ...]
###Code
import pandas as pd
contents=list()
for song in songs:
s=song.select('a.title')
artist=song.select('a.artist')
contents.append([s[0].text.strip(), artist[0].text.strip()])
len(contents), type(contents)
data=pd.DataFrame(contents, columns=['Title','Artist'])
data.to_excel('C:/Develops/test_webscraping/saves/genie_scraping.xls', index=False)
###Output
_____no_output_____ |
2. ITW2/bs4.ipynb | ###Markdown
Tag, NavigableString, BeautifulSoup, Comment. Tag
###Code
#Tag
temp = BeautifulSoup('Hello')
print(temp.html, type(temp.html))
print(temp.html.name)
a = 'hello'
temp = BeautifulSoup(a)
tag = temp.p
print(tag, type(tag), tag.name, tag.text)
tag.name = 'b'
print(temp)
print(tag, type(tag), tag.name, tag.text)
tag = temp.b
print(tag, tag.name, tag.next)
tag.attrs
a = '<html><b id = "hey", class = "om", k = "J"> Piyush it is </b> </html>'
soup = BeautifulSoup(a)
tag = soup.b
print(tag)
tag.attrs
tag.name
tag['id']
tag['class']
tag['key'] = 5
print(tag)
del tag['k']
tag
type(tag.attrs)
tag['k']
print(tag.get('k'))
(print(tag.get('id')))
tag.get('id')
tag.get('class')
tag['class'] = ['om', 'hey']
tag.get('class')
tag
tag['class'].append('jai')
tag.get('class')
print(tag)
tag.get_attribute_list('class')
tag.get_attribute_list('id')
tag.get_attribute_list('key')
tag.get('key')
###Output
_____no_output_____
###Markdown
Navigable String Class
###Code
tag.string
type(tag.string)
print(type(tag.get_attribute_list("key3")), tag.get_attribute_list("kfdksj"))
type(str(tag.string))
print(tag.string)
print(str(tag.string))
s = unicode(tag.string)
tag.string = "piyush"
tag.string.replace_with = "kill it"
tag
tag.string = "ki"
tag.string
tag.string.replace_with("hey how you doing!")
tag.string
tag
k = tag.string
k
type(k)
print(k.find("o"))
###Output
5
###Markdown
Beautiful Soup
###Code
soup.name
soup
soup.attrs
soup.b
soup.html
soup.body
soup.body.b
print(type(soup.body), type(soup.body.b))
tag.b
print(tag.b)
tag = soup.body
print(tag)
print(tag.string)
print(tag.b, type(tag.b), sep = '\n')
print(tag.string, tag.b.string)
tag.tags
print(tag.attrs)
print(tag.b.tags)
print(soup.tags)
print(soup.name)
print(soup.string)
print(soup.b.string, soup.body.string, end = '\n')
doc = '<html><body><b><!--Ask him--></b></body></html>'
print(doc)
doc = bs.BeautifulSoup(doc)
print(doc)
print(doc.prettify())
print(doc.string)
print(doc.b.string)
markup = "<html><b><!-- Hey --></b><html>"
print(type(BeautifulSoup(markup).b.string))
print(type(doc.b.string))
print(BeautifulSoup(markup).b.string)
print(doc.body.string)
###Output
Ask him
###Markdown
Navigation the tree
###Code
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc, 'html.parser')
soup.head
soup.body
print(soup.head.prettify())
print(soup.body.prettify())
soup.head
soup.title
soup.a
soup.body.b
soup.b
soup.find_all('a')
soup.find_all('b')
soup.contents
soup.b.content
soup.b.contents
soup.body.contents
soup.p.contents
soup.body.contents[3]
soup.title.contents[0]
soup.title.contents
len(soup.contents)
print(soup.contents[1].name)
len(soup.html.contents)
[print(x.name) for x in soup.html.contents]
print(soup.html.contents)
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc, 'html.parser')
len(soup.html.contents)
soup.html.children
for a in soup.html.children:
print(a.name)
soup.head.contents
soup.title
for child in soup.html.descendants:
print(child)
len(list(soup.descendants))
soup.head.string
soup.html.string
print(soup.html.string)
print(*soup.strings)
for i in soup.strings:
print(i)
for i in soup.stripped_strings:
print(repr(i))
soup.head.parent.name
print(soup.head.parent.prettify())
soup.html.parent.name
print(soup.parent)
[print(x.name) if x is not None else print(x) for x in soup.b.parents]
a = '<a> <b> 1 </b><c> 2 </c><b> 3 </b>'
a = BeautifulSoup(a)
a.b
for i in range(3):
print(a.b.next_sibling)
a.b = a.b.next_sibling
a.b.next_sibling.next_sibling
print(*a.b.previous_siblings)
[print(i.string, '\n\n') for i in soup.find(id = 'link3').previous_siblings]
k = soup.find(id = 'link2')
k
k.next_element
k.next_element
k.next_element.next_element
k = k.next_element.next_element.next_element
k
k.next_element
k = k.next_element.next_element
k
print(html_doc)
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc, 'html.parser')
for tag in soup.find_all(['a', 'b', True]):
print(tag.name)
def p(x):
return (x.has_attr('class') and not x.has_attr('id'))
soup.find_all(p)
soup.find_all('a')
soup.find_all('href')
from bs4 import NavigableString
def surrounded_by_strings(tag):
return (isinstance(tag.next_element, NavigableString)
and isinstance(tag.previous_element, NavigableString))
for tag in soup.find_all(surrounded_by_strings):
print(tag.name)
a = 5;
isinstance(a, str)
soup.find_all(string = re.compile('^Once'))
[print(x.name, x) for x in soup.find_all(id = True)]
soup.find_all(attrs = {'data-foo': 'value'})
data_soup = BeautifulSoup('<div data-foo="value">foo!</div>')
#data_soup.find_all(data-foo="value")
# SyntaxError: keyword can't be an expression
data_soup.find_all(attrs={"data-foo": "value"})
# [<div data-foo="value">foo!</div>]
soup.find_all('a', 'class')
soup.find_all('a', class_ = True)
soup.find_all('a', attrs = {'class': True})
soup.find_all('hey')
soup.find_all(string = 'hey')
print(len(soup.find_all(string = re.compile('than'))))
print(soup.find_all(string = 'than'))
soup.find_all(string = 'the')
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc, 'html.parser')
soup.find_all(string = 'Once')
def a(s):
return s == s.parent.string
soup.find_all(string = a)
soup.string
print(soup.string)
soup.p.strign
soup.p.string
soup.p.string == soup.p.text
soup.a.string == soup.a.text
soup.a.strign
soup.a.string
soup.a.text
[print(x, end = '\n\n\n') for x in soup.find_all()]
soup.p.find_all()
soup.find_all('p')[1].find_all()
soup.head.find_all()
soup.find_all('title')
soup.find_all('title', recursive = False)
soup.find('p')
soup.find('a')
soup.find_all('p')[1].find_all(limit = 2)
soup.a
soup('a')
import requests
url = 'https://www.goodreads.com/quotes/tag/inspirational'
a = requests.get(url)
k = BeautifulSoup(a.content)
[print(x) for x in k(class_ = 'quoteText')]
[print(list(x.strings)[0]) for x in k('div', class_ = 'quoteText')]
print('\n')
soup.title
soup.head.title
soup.head.title.name
soup.head.title.text
k = soup.a
k
k.find_next_siblings('a')
k.find_previous_sibling('p')
k = soup('a')
k
k[1].find_previous_sibling('a')
k[0].find_parent()
k[0].find_parent('body')
k[0].find_parents('a')
[print(x.name) for x in k[0].find_parents()]
a = soup.a
a
a.find_next('p')
a.find_next()
a.find_all_next()
soup
[print(x.name) for x in soup.b.find_all_next()]
import requests
url = 'https://www.cricbuzz.com/'
a = requests.get(url)
x = BeautifulSoup(a.content)
k = x.find_all(href = True)
[print(x.name) for x in k]
k[0].find('link href')
soup
soup.select("body title")
soup.select("title")
soup.select("head title")
soup.select("body a:nth-of-type(2)")
print(soup.prettify())
soup.a.find_all_next()
soup.head.find_all_next()
soup.select('head > title')
soup.select('body > p > a')
soup.select("body > a")
soup.select("p > link1")
soup.select("p > #link1")
soup("link1")
soup.select('p[class = story]')
soup.select_one("p")
from bs4 import BeautifulSoup
xml = """<tag xmlns:ns1="http://namespace1/" xmlns:ns2="http://namespace2/">
<ns1:child>I'm in namespace 1</ns1:child>
<ns2:child>I'm in namespace 2</ns2:child>
</tag> """
soup = BeautifulSoup(xml, "xml")
soup.select("child")
# [<ns1:child>I'm in namespace 1</ns1:child>, <ns2:child>I'm in namespace 2</ns2:child>]
soup.select("ns1|child")
# [<ns1:child>I'm in namespace 1</ns1:child>]
soup.select("child")
soup.select("ns1|child")
soup = BeautifulSoup("<a>Foo</a>")
soup.append('Bar')
soup.body.append('5')
soup
soup.a.append('bar')
soup
del soup.a[True]
soup
soup.a['cl']= 'keing'
soup
del soup.a['cl']
soup
soup.a.extend([' key'])
print(soup.a.contents)
soup
soup = BeautifulSoup('NIce dsifj')
print(soup.prettify())
soup.p.contentsz
soup.p.contents
s = NavigableString(" hey")
soup.p.append(s)
soup
c = bs.Comment("hey brother")
soup.p.append(c)
soup
print(soup.prettify())
new_tag = soup.new_tag("a", hr = "kill", text = 'hey')
soup
soup.body.append(new_tag)
soup
print(soup.prettify())
soup.p.insert(2, 'Piyush how the hell you pull this off!')
soup.p
soup.p.contents
div = soup.new_tag('div')
div.string = 'kill machine'
soup.a.string = 'kill'
soup.a.string.insert_after(" kill ", div)
soup
print(soup.prettify())
soup.p.clear()
print(soup.prettify())
soup.a
soup.div.extract()
soup
print(soup.prettify())
type(soup.prettify())
print(soup.div)
soup.a.string
print(soup.prettify())
print(*soup.a.strings)
soup
soup.a.decompose()
soup
print(soup.prettify())
a = soup.new_tag('piyush_tag')
a.string = "Hello brother ... !"
soup.a.replace_with(a)
soup
print(soup.prettify())
soup('a')[1].string = 'hello dear'
soup('a')[1].string.wrap(soup.new_tag('harsh'))
print(soup)
print(soup.prettify())
soup.piyush_tag.unwrap()
print(soup.prettify())
encode(code)
print(*soup.stripped_strings)
soup.get_text("...!...")
print(len(soup('a')))
a, b = soup('a')
print(a, b)
print(soup('a'))
a, b = [2, 5]
print(a, b)
soup = BeautifulSoup('<p><a id = >Kaise</a><b> om </b></p><a>Kaise</a>', 'html5lib')
print(soup)
print(soup.prettify())
a, b = soup('a')
a
b
a == b
import copy
a = copy.copy(soup)
a
print(a.prettify())
a == b
a == soup
a is soup
a.parent
a = [5]
b = copy.copy(a)
a
b
a == b
a is b
a..parent
b.parent
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
soup = BeautifulSoup(html_doc, 'html.parser', parse_only = bs.SoupStrainer('a'))
soup
diagnose(p.head)
from bs4.diagnose import diagnose
diagnose(soup)
###Output
Diagnostic running on Beautiful Soup 4.7.1
Python version 3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit (AMD64)]
Found lxml version 4.2.1.0
Found html5lib version 1.0.1
|
open-intro-statistics/python-labs/Linear Regression_Solutions.ipynb | ###Markdown
Batter up The movie [Moneyball](http://en.wikipedia.org/wiki/Moneyball_%28film%29) focuses onthe "quest for the secret of success in baseball". It follows a low-budget team, the Oakland Athletics, who believed that underused statistics, such as a player's ability to get on base, better predict the ability to score runs than typical statistics like home runs, RBIs (runs batted in), and batting average. Obtaining players who excelled in these underused statistics turned out to be much more affordable for the team.In this lab we'll be looking at data from all 30 Major League Baseball teams andexamining the linear relationship between runs scored in a season and a number of other player statistics. Our aim will be to summarize these relationships both graphically and numerically in order to find which variable, if any, helps us best predict a team's runs scored in a season. The dataLet's load up the data for the 2011 season.
###Code
import pandas as pd
mlb11 = pd.read_csv('mlb11.csv')
###Output
_____no_output_____
###Markdown
In addition to runs scored, there are seven traditionally used variables in the data set: at-bats, hits, home runs, batting average, strikeouts, stolen bases, and wins. There are also three newer variables: on-base percentage, slugging percentage, and on-base plus slugging. For the first portion of the analysis we'll consider the seven traditional variables. At the end of the lab, you'll work with the newer variables on your own. **Exercise 1** What type of plot would you use to display the relationship between `runs` and one of the other numerical variables? Plot this relationship using the variable `at_bats` as the predictor. Does the relationship look linear? If you knew a team’s `at_bats`, would you be comfortable using a linear model to predict the number of runs?
###Code
import matplotlib.pyplot as plt
plt.scatter(mlb11['at_bats'], mlb11['runs'])
plt.show()
###Output
_____no_output_____
###Markdown
If the relationship looks linear, we can quantify the strength of therelationship with the correlation coefficient.
###Code
mlb11['runs'].corr(mlb11['at_bats'])
###Output
_____no_output_____
###Markdown
Sum of squared residualsThink back to the way that we described the distribution of a single variable. Recall that we discussed characteristics such as center, spread, and shape. It's also useful to be able to describe the relationship of two numerical variables, such as `runs` and `at_bats` above.** Exercise 2** Looking at your plot from the previous exercise, describe the relationship between these two variables. Make sure to discuss the form, direction, and strength of the relationship as well as any unusual observations.Just as we used the mean and standard deviation to summarize a single variable, we can summarize the relationship between these two variables by finding the line that best follows their association. *The R version of this [lab](http://htmlpreview.github.io/?https://github.com/andrewpbray/oiLabs-base-R/blob/master/simple_regression/simple_regression.html) contains an interactive function that you can use to manually find a good least squares fit to the data. This function has not been ported to the Python version of the lab.* The linear modelIt is rather cumbersome to try to get the correct least squares line, i.e. the line that minimizes the sum of squared residuals, through trial and error. Instead we can use Python to fit the linear model (a.k.a. regression line). Sci Kit Learn's `LinearRegression` class in Python is a full featured regression model and is often reccomended when using regression for predictions. However, statsmodels also has a regression model which provides a better statistical summary than Sci Kit Learn's model. We will use statsmodels here.
###Code
import statsmodels.api as sm
import statsmodels.formula.api as smf
m1 = smf.ols('runs ~ at_bats', data=mlb11).fit()
###Output
_____no_output_____
###Markdown
The first argument in the function `ols` is a formula that takes the form `y ~ x`. Here it can be read that we want to make a linear model of `runs` as a function of `at_bats`. The second argument specifies that Python should look in the `mlb11` data frame to find the `runs` and `at_bats` variables.The output of `ols` is an object that contains all of the information we need about the linear model that was just fit. We can access this information using the summary function.
###Code
print(m1.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: runs R-squared: 0.373
Model: OLS Adj. R-squared: 0.350
Method: Least Squares F-statistic: 16.65
Date: Wed, 06 Jun 2018 Prob (F-statistic): 0.000339
Time: 15:32:07 Log-Likelihood: -167.44
No. Observations: 30 AIC: 338.9
Df Residuals: 28 BIC: 341.7
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
Intercept -2789.2429 853.696 -3.267 0.003 -4537.959 -1040.526
at_bats 0.6305 0.155 4.080 0.000 0.314 0.947
==============================================================================
Omnibus: 2.579 Durbin-Watson: 1.524
Prob(Omnibus): 0.275 Jarque-Bera (JB): 1.559
Skew: 0.544 Prob(JB): 0.459
Kurtosis: 3.252 Cond. No. 3.89e+05
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 3.89e+05. This might indicate that there are
strong multicollinearity or other numerical problems.
###Markdown
Let's consider this output piece by piece. First, some summary output is shown along with the name of the dependent variable. The summary includes Multiple R-squared, or more simply, $R^2$. The $R^2$ value represents the proportion of variability in the response variable that is explained by the explanatory variable. For this model, 37.3% of the variability in runs is explained by at-bats.The "Coefficients" table shown next is key; its first column displays the linear model's y-intercept and the coefficient of `at_bats`.With this table, we can write down the least squares regression line for the linear model:$$\hat{y} = -2789.2429 + 0.6305 * atbats$$ **Exercise 4** Fit a new model that uses `homeruns` to predict `runs`. Using the estimates from the R output, write the equation of the regression line. What does the slope tell us in the context of the relationship between success of a team and its home runs?
###Code
m2 = smf.ols('homeruns ~ runs', data=mlb11).fit()
print(m2.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: homeruns R-squared: 0.627
Model: OLS Adj. R-squared: 0.613
Method: Least Squares F-statistic: 46.98
Date: Wed, 06 Jun 2018 Prob (F-statistic): 1.90e-07
Time: 15:37:41 Log-Likelihood: -134.44
No. Observations: 30 AIC: 272.9
Df Residuals: 28 BIC: 275.7
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
Intercept -85.1566 34.797 -2.447 0.021 -156.435 -13.878
runs 0.3415 0.050 6.854 0.000 0.239 0.444
==============================================================================
Omnibus: 0.859 Durbin-Watson: 1.952
Prob(Omnibus): 0.651 Jarque-Bera (JB): 0.866
Skew: -0.351 Prob(JB): 0.649
Kurtosis: 2.553 Cond. No. 6.01e+03
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 6.01e+03. This might indicate that there are
strong multicollinearity or other numerical problems.
###Markdown
Prediction and prediction errorsLet's create a scatterplot with the least squares line laid on top.
###Code
from statsmodels.graphics.regressionplots import abline_plot
ax = mlb11.plot(x='at_bats', y='runs', kind='scatter')
abline_plot(model_results=m1, ax=ax)
plt.show()
###Output
_____no_output_____
###Markdown
The function `abline_plot` plots a line based on its slope and intercept. Here, we used a shortcut by providing the model `m1`, which contains both parameter estimates. This line can be used to predict $y$ at any value of $x$. When predictions are made for values of $x$ that are beyond the range of the observeddata, it is referred to as *extrapolation* and is not usually recommended. However, predictions made within the range of the data are more reliable. They're also used to compute the residuals. **Exercise 5** If a team manager saw the least squares regression line and not the actual data, how many runs would he or she predict for a team with 5,578 at-bats? Is this an overestimate or an underestimate, and by how much? In other words, what is the residual for this prediction? Model diagnosticsTo assess whether the linear model is reliable, we need to check for (1) linearity, (2) nearly normal residuals, and (3) constant variability.*Linearity*: You already checked if the relationship between runs and at-batsis linear using a scatterplot. We should also verify this condition with a plot of the residuals vs. at-bats. Recall that any code following a ** is intendedto be a comment that helps understand the code but is ignored by Python.
###Code
fig, ax = plt.subplots()
ax.scatter(mlb11['at_bats'], m1.resid)
abline_plot(intercept= 0, slope=0, ax=ax, ls='--') # adds a horizontal dashed line at y = 0
plt.show()
###Output
_____no_output_____
###Markdown
**Exercise 6** Is there any apparent pattern in the residuals plot? What does this indicate about the linearity of the relationship between runs and at-bats? *Nearly normal residuals*: To check this condition, we can look at a histogram
###Code
plt.hist(m1.resid)
plt.show()
###Output
_____no_output_____
###Markdown
or a normal probability plot of the residuals.
###Code
from scipy.stats import probplot
probplot(m1.resid, plot=plt)
plt.show()
###Output
_____no_output_____ |
NGrid MA 2018 MA Gas Leaks Analysis.ipynb | ###Markdown
Analysis of National Grid Massachusetts Gas Leaks 2018 Goal: Provide insights into National Grid Massachusetts gas leaks. Provide a visualization of the leaks. Add socio-demographic information. Spur action on this critical issue. -------People, this is an important issue! Worcester Residents Demand Gas Leak Repairs There are growing concerns that five natural gas leaks near schools in Worcester are causing health issues associated with asthma. (Published Tuesday, Jun 18, 2019 | Credit: Cassy Arsenault)https://www.necn.com/multimedia/Worcester-Residents-Demand-Gas-Leak-Repairs_NECN-511476042.html https://heetma.org/gas-leaks/ Steps: Load needed libraries Import NGrid data Add Zipcodes --- *** Can anyone provide advice on this? See below ****Add soci-demographic information. --- *** Can anyone provide advice on this? See below. **** - how to loop over several addresses - How to get the zipcode output into a pandas dataframe?Provide visualizations Do analytics Data Info Data generously provide by Audrey Schulman, HEET (https://heetma.org) Co-founder and Executive Director: The first number is the National Grid ID number. The ending grade is defined by potential for explosion. To generalize: Grade 1 is in a contained space Grade 2 is close to a building, (2A? Unknown. I recoded those to 2 in a new column)Grade 3 is everything else. Grades having nothing to do with volume of emissions.
###Code
# import modules
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Import NGrid data https://chrisalbon.com/python/data_wrangling/load_excel_file_into_pandas/
###Code
# Import the excel file and call it xls_file
gasleaks = pd.read_csv('0-Boston_Gas_2018_SQ_Report_3-1-19_unrepaired_pg.csv')
gasleaks.head(20)
###Output
_____no_output_____
###Markdown
These data have no zip codes, which means we can't convert the locations to lat/long, which means we can't plot this the way we want. There are a variety of sites online where you can find zipcodes for an address, but we have over 10,000 cases so we need a better solution than these, searching one-by-one: https://tools.usps.com/go/zip-code-lookup.htm https://www.unitedstateszipcodes.org/ We want to do geocoding to work with the dataThe pygeocoder solution looks cool: It allows you to directly convert an address to coordinates or vice versa. It also allows you to validate, format and split addresses into civic number, street name, city etc.But we need the zipcodes (ideally, by the specific location; not just by the town.)someone recommended geocoder. First attempt with conda bricked conda and jypyter notebook; so did an Anaconda reinstall.
###Code
# Load packages
# https://geocoder.readthedocs.io/
##from pygeocoder import Geocoder
#import pandas as pd
##import numpy as np
###Output
_____no_output_____
###Markdown
Maybe try this (in10k batches) by try geocoder for now. https://geocoding.geo.census.gov/ Install geocoder (on ubuntu 18.04)
###Code
#sudo snap install geocoder
###Output
_____no_output_____
###Markdown
But that was a mistake; did not install to my environment; so try this: example: conda install --name my_env35 numpyconda install --name my_env35 geocoderdid not work.but creating a new environment (pg_geocode); then pip install geocoder seems to have worked. https://geocoder.readthedocs.io/ https://docs.snapcraft.io/getting-started
###Code
import geocoder
###Output
_____no_output_____
###Markdown
https://geocoder.readthedocs.io/api.htmlhouse-addresses
###Code
# example
>>> g = geocoder.google("453 Booth Street, Ottawa ON")
>>> g.housenumber
>>> g.postal
>>> g.street
>>> g.street_long
###Output
_____no_output_____
###Markdown
g = geocoder.google("453 Booth Street, Ottawa ON")g.housenumberg.postalg.streetg.street_long g.postal g
###Code
#### This uszipcode library has cool info we could match up to our data-- but we don't have zipcodes.
https://pypi.org/project/uszipcode/
###Output
_____no_output_____
###Markdown
from uszipcode import SearchEnginesearch = SearchEngine(simple_zipcode=True) set simple_zipcode=False to use rich info databasezipcode = search.by_zipcode("10001") zipcode Looking for Chicago and IL, but entered wrong spelling.res = search.by_city_and_state("Chicago", "il") len(res) 56 zipcodes in Chicago
###Code
# I'm trying to figure out how to work with the zipcode output; but I get errors:
len(zipcode)
TypeError: object of type 'SimpleZipcode' has no len()
###Output
_____no_output_____
###Markdown
res.describe()AttributeError: 'list' object has no attribute 'describe'
###Code
So the output is a list.
How to export it to an array or a data frame so we can match it back to our data?
###Output
_____no_output_____
###Markdown
res.type()AttributeError: 'list' object has no attribute 'type'
###Code
zipcode = res[0]
zipcode.major_city
zipcode.state_abbr
zipcode
zipcode.zipcode
###Output
_____no_output_____
###Markdown
We can search by city and state. But that is not a great solution, because we want to leverage the street information too. Below is a long way to do it (made easier with some copy/pasting in Excel); but this would probably be better done by looping over the data. **** I can review loops, but if anyone has advice on how we could do that loop that would be great****
###Code
res1=search.by_city_and_state("ABINGTON", "MA")
res2=search.by_city_and_state("ACTON", "MA")
res3=search.by_city_and_state("AMESBURY", "MA")
res4=search.by_city_and_state("ARLINGTON", "MA")
res5=search.by_city_and_state("AYER", "MA")
res6=search.by_city_and_state("BEDFORD", "MA")
res7=search.by_city_and_state("BELMONT", "MA")
res8=search.by_city_and_state("BEVERLY", "MA")
res9=search.by_city_and_state("BOSTON", "MA")
res10=search.by_city_and_state("BOXBOROUGH", "MA")
res11=search.by_city_and_state("BOXFORD", "MA")
res12=search.by_city_and_state("BRAINTREE", "MA")
res13=search.by_city_and_state("BREWSTER", "MA")
res14=search.by_city_and_state("BRIGHTON", "MA")
res15=search.by_city_and_state("BROOKFIELD", "MA")
res16=search.by_city_and_state("BROOKLINE", "MA")
res17=search.by_city_and_state("BURLINGTON", "MA")
res18=search.by_city_and_state("BYFIELD", "MA")
res19=search.by_city_and_state("CAMBRIDGE", "MA")
res20=search.by_city_and_state("CARLISLE", "MA")
res21=search.by_city_and_state("CENTERVILLE", "MA")
res22=search.by_city_and_state("CHARLESTOWN", "MA")
res23=search.by_city_and_state("CHELSEA", "MA")
res24=search.by_city_and_state("CLINTON", "MA")
res25=search.by_city_and_state("COHASSET", "MA")
res26=search.by_city_and_state("CONCORD", "MA")
res27=search.by_city_and_state("DANVERS", "MA")
res28=search.by_city_and_state("DORCHESTER", "MA")
res29=search.by_city_and_state("DUDLEY", "MA")
res30=search.by_city_and_state("EAST BOSTON", "MA")
res31=search.by_city_and_state("EAST BROOKFIELD", "MA")
res32=search.by_city_and_state("ESSEX", "MA")
res33=search.by_city_and_state("EVERETT", "MA")
res34=search.by_city_and_state("GEORGETOWN", "MA")
res35=search.by_city_and_state("GLOUCESTER", "MA")
res36=search.by_city_and_state("GROTON", "MA")
res37=search.by_city_and_state("GROVELAND", "MA")
res38=search.by_city_and_state("HAMILTON", "MA")
res39=search.by_city_and_state("HARVARD", "MA")
res40=search.by_city_and_state("HAVERHILL", "MA")
res41=search.by_city_and_state("HINGHAM", "MA")
res42=search.by_city_and_state("HULL", "MA")
res43=search.by_city_and_state("IPSWICH", "MA")
res44=search.by_city_and_state("JAMAICA PLAIN", "MA")
res45=search.by_city_and_state("LANCASTER", "MA")
res46=search.by_city_and_state("LEICESTER", "MA")
res47=search.by_city_and_state("LEOMINSTER", "MA")
res48=search.by_city_and_state("LEXINGTON", "MA")
res49=search.by_city_and_state("LINCOLN", "MA")
res50=search.by_city_and_state("LITTLETON", "MA")
res51=search.by_city_and_state("LOWELL", "MA")
res52=search.by_city_and_state("LUNENBURG", "MA")
res53=search.by_city_and_state("LYNN", "MA")
res54=search.by_city_and_state("LYNNFIELD", "MA")
res55=search.by_city_and_state("MALDEN", "MA")
res56=search.by_city_and_state("MANCHESTER", "MA")
res57=search.by_city_and_state("MARBLEHEAD", "MA")
res58=search.by_city_and_state("MARSTONS MILLS", "MA")
res59=search.by_city_and_state("MEDFORD", "MA")
res60=search.by_city_and_state("MELROSE", "MA")
res61=search.by_city_and_state("MERRIMAC", "MA")
res62=search.by_city_and_state("MIDDLETON", "MA")
res63=search.by_city_and_state("MILTON", "MA")
res64=search.by_city_and_state("NAHANT", "MA")
res65=search.by_city_and_state("NEWBURYPORT", "MA")
res66=search.by_city_and_state("NEWTON", "MA")
res67=search.by_city_and_state("NORTH BROOKFIELD", "MA")
res68=search.by_city_and_state("NORWOOD", "MA")
res69=search.by_city_and_state("PEABODY", "MA")
res70=search.by_city_and_state("QUINCY", "MA")
res71=search.by_city_and_state("READING", "MA")
res72=search.by_city_and_state("REVERE", "MA")
res73=search.by_city_and_state("ROCKLAND", "MA")
res74=search.by_city_and_state("ROCKPORT", "MA")
res75=search.by_city_and_state("ROSLINDALE", "MA")
res76=search.by_city_and_state("ROWLEY", "MA")
res77=search.by_city_and_state("ROXBURY", "MA")
res78=search.by_city_and_state("SALEM", "MA")
res79=search.by_city_and_state("SALISBURY", "MA")
res80=search.by_city_and_state("SAUGUS", "MA")
res81=search.by_city_and_state("SHIRLEY", "MA")
res82=search.by_city_and_state("SOMERVILLE", "MA")
res83=search.by_city_and_state("SOUTH BOSTON", "MA")
res84=search.by_city_and_state("SOUTHBRIDGE", "MA")
res85=search.by_city_and_state("SPENCER", "MA")
res86=search.by_city_and_state("STONEHAM", "MA")
res87=search.by_city_and_state("SUDBURY", "MA")
res88=search.by_city_and_state("SWAMPSCOTT", "MA")
res89=search.by_city_and_state("SWIFTS BEACH", "MA")
res90=search.by_city_and_state("TOPSFIELD", "MA")
res91=search.by_city_and_state("WAKEFIELD", "MA")
res92=search.by_city_and_state("WALTHAM", "MA")
res93=search.by_city_and_state("WARREN", "MA")
res94=search.by_city_and_state("WATERTOWN", "MA")
res95=search.by_city_and_state("WAYLAND", "MA")
res96=search.by_city_and_state("WEBSTER", "MA")
res97=search.by_city_and_state("WELLESLEY", "MA")
res98=search.by_city_and_state("WENHAM", "MA")
res99=search.by_city_and_state("WEST BROOKFIELD", "MA")
res100=search.by_city_and_state("WEST NEWBURY", "MA")
res101=search.by_city_and_state("WEST ROXBURY", "MA")
res102=search.by_city_and_state("WESTON", "MA")
res103=search.by_city_and_state("WEYMOUTH", "MA")
res104=search.by_city_and_state("WHITMAN", "MA")
res105=search.by_city_and_state("WINCHESTER", "MA")
res106=search.by_city_and_state("WINTHROP", "MA")
res107=search.by_city_and_state("WOBURN", "MA")
res1
#res2
###Output
_____no_output_____
###Markdown
Ideally, we would get the zipcodes by street, but this information might be good enough; How can we get it into a dataframe and match it back to the data???
###Code
res1_info = pd.DataFrame(res1)
res1_info
###Output
_____no_output_____
###Markdown
Doh! That did not work!
###Code
zipcode
###Output
_____no_output_____
###Markdown
Maybe try this advice? import pandas as pd import pandas as pd list of strings lst = ['Geeks', 'For', 'Geeks', 'is', 'portal', 'for', 'Geeks'] Calling DataFrame constructor on list df = pd.DataFrame(lst) df https://www.geeksforgeeks.org/create-a-pandas-dataframe-from-lists/
###Code
# list of strings
lst = ['Geeks', 'For', 'Geeks', 'is',
'portal', 'for', 'Geeks']
# Calling DataFrame constructor on list
df = pd.DataFrame(lst)
df
###Output
_____no_output_____
###Markdown
But this comes out in rows, I want the info to come out across columns.
###Code
res1_info
res1_info
res1_2_info = pd.DataFrame(res1,res2)
res1_2_info
zipcode
res1
###Output
_____no_output_____
###Markdown
try many in one
###Code
manycityresults =search.by_city_and_state(["ABINGTON", "MA"],["ACTON", "MA"])
# AttributeError: 'list' object has no attribute 'upper'
manycityresults =search.by_city_and_state[("ABINGTON", "MA")],("ACTON", "MA")]
# SyntaxError: invalid syntax
###Output
_____no_output_____
###Markdown
create a dictionary or list? then loop over it to get zip information; then print it out- then merge back with the original data. Summarize the Data
###Code
#gasleaks.describe()
###Output
_____no_output_____
###Markdown
https://chrisalbon.com/python/data_wrangling/pandas_pivot_tables/
###Code
gasleaks.pivot_table(index=['Town'], aggfunc='count').head()
gasleaks.pivot_table(index=['Endingleakgrade'], aggfunc='count')
gasleaks.pivot_table(index=['Town','Endingleakgrade'], aggfunc='count').head()
###Output
_____no_output_____
###Markdown
List Number of leaks by town in descending order.(there has got to be a quicker way than this- but that is for later) https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sort_values.html
###Code
DataFrame.sort_values(by, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last')[source]
Sort by the values along either axis
###Output
_____no_output_____
###Markdown
df1 = gasleaks[['Town','f']] df1.pivot_table(index=['Town'], aggfunc='count',sort_values= True) df.sort_values('2')df1.sort_values("f", ascending=False) df1.pivot_table(index=['Town'], aggfunc='count',sort_values=False) df2 = df1.pivot_table(index=['Town'], aggfunc='count') df3 = df2.sort_values(by="f", ascending=False) df3
###Code
Ultimately we're goint to want to analyze that in terms of population, or some other factor-- perhaps by number of gas lines running through the town--- if we can get that information.
(for consideration--- by centralize-- were lines run through; by median income, race, etc.)
#### Select a Town to investigate
###Output
_____no_output_____
###Markdown
Brookline = gasleaks[gasleaks['Town']=="BROOKLINE"] Brookline.shape Brookline.head() pd.crosstab(Brookline.Town, Brookline.ReportedDate, margins=True) chart this when I have time
###Code
https://chrisalbon.com/python/data_wrangling/pandas_crosstabs/
###Output
_____no_output_____
###Markdown
pd.crosstab(gasleaks.Town, gasleaks.ReportedDate, margins=True)pd.crosstab(gasleaks.Town, gasleaks.ReportedDate, margins=True).head()
###Code
https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.crosstab.html
###Output
_____no_output_____
###Markdown
Plot the responses for different events and regionssns.lineplot(x="ReportedDate", y="Town", hue="region", style="event", data=gasleaks)https://stackoverflow.com/questions/51846948/seaborn-lineplot-module-object-has-no-attribute-lineplot?rq=1 fix this later
###Code
#sns.lmplot('ReportedDate', 'Town', data=gasleaks, fit_reg=False)
gasleaks.shape
###Output
_____no_output_____
###Markdown
https://chrisalbon.com/machine_learning/preprocessing_structured_data/deleting_missing_values/
###Code
# Load data as a data frame
gasleaks_clean = pd.DataFrame(gasleaks, columns=['Town', 'Endingleakgrade'])
# Remove observations with missing values
gasleaks_clean.dropna()
#clean this up later:
sns.heatmap([gasleaks_clean.Town, gasleaks.Endingleakgrade], annot=True, fmt="d")
###Output
_____no_output_____
###Markdown
Deal with this issue;TypeError: ufunc 'isnan' not supported for the input types, and the inputs could not be safely coerced to any supported types according to the casting rule ''safe'' pd.to_numeric(df['gasleaks_clean'], errors='coerce')but it is not fully numeric-- 2A, etc. REFERENCES https://uszipcode.readthedocs.io/01-Tutorial/index.htmlsearch-by-city-and-state https://pypi.org/project/uszipcode/ Import the data: https://chrisalbon.com/python/data_wrangling/load_excel_file_into_pandas/ Use this (or deduplicate in Excel) to get the unique town names. (if we are going to get zip codes, just by town; not by street address as well): https://chrisalbon.com/python/data_wrangling/pandas_list_unique_values_in_column/ Geocoding Converting a physical address or location into latitude/longitude:Need to add the zipcodes to do this Jon Cusick [8:53 AM]Hi Perry! I’d suggest taking a look at `geopandas` (http://geopandas.org/) for help with some of the spatial analysis. It essentially adds a geometry column to a regular `pandas` dataframe that allows for traditional GIS-like operations (point-in-polygon, spatial joins, distance measurement, etc.)I haven’t worked with the `pygeocoder` package that you listed in the notebook, but that seems like it could be a promising way to go. An approach I might take would be to try to get the addresses converted to a lat/lon via the geocoder and then use `geopandas` to perform spatial joins (http://geopandas.org/reference/geopandas.sjoin.html) on datasets of interest (zip codes, Census tract boundaries for demographic information, etc, to pull their attribute information into the gas leak location points. VN [10:12 AM]@Perry: about 10k records isn't so bad, you can probably slowly, while still observing the request limit of each services, geocode using the free Google Maps API and OpenStreetMap geocoder.The problem with zipcode as I've learned from parsing the world data, zipcode isn't "location" base per-se... it's more of a postal/street specific.Since it's mainly for mail delivery route, just checking the zipcode for a city, town isn't enoughit's a bit rough and manual, but I think that's probably going to be the best...route as far as I can tellWhen you geocode, you'll get the actual latlong & zip, which I suspect would be super helpful in plotting them on a map of some sort Itamar: What you want is a geocoding library, e.g. https://geocoder.readthedocs.io/ Tyler: I think BARI (https://www.northeastern.edu/csshresearch/bostonarearesearchinitiative/) tends to do this type of thing. Your specific use case sounds like you need a Geocoder. Google and Bing might be cost prohibitive depending on the size of your data and project budget. I’d encourage you to reach out to BARI if other suggestions aren’t suitable for your use case. https://chrisalbon.com/python/data_wrangling/geocoding_and_reverse_geocoding/ Address the geo-coding issue: https://stackoverflow.com/questions/3212550/google-geocoding-api-request-denied Use this to match the Zipcode info with the NGrid data?: https://chrisalbon.com/python/data_wrangling/pandas_join_merge_dataframe/ Just use city? mmnot address? https://chrisalbon.com/python/data_wrangling/geolocate_a_city_and_country/ Group a time period? https://chrisalbon.com/python/data_wrangling/pandas_group_by_time/
###Code
#df.truncate(before='1/2/2014', after='1/3/2014')
###Output
_____no_output_____
###Markdown
Use this to get the zipcode output (a list) into a pandas dataframe: https://www.geeksforgeeks.org/create-a-pandas-dataframe-from-lists/ This might be useful: https://chrisalbon.com/python/data_wrangling/pandas_create_column_with_loop https://chrisalbon.com/python/data_wrangling/pandas_crosstabs/ If we just work with Towns, not addresses, we might want remove duplicate info: https://chrisalbon.com/python/data_wrangling/pandas_delete_duplicates/ But then we loose information about the leaks or us this: https://chrisalbon.com/python/data_wrangling/pandas_find_unique_values/ Map values in a DF: https://chrisalbon.com/python/data_wrangling/pandas_map_values_to_values/
###Code
city_to_state = { 'San Francisco' : 'California',
'Baltimore' : 'Maryland',
'Miami' : 'Florida',
'Douglas' : 'Arizona',
'Boston' : 'Massachusetts'}
df['state'] = df['city'].map(city_to_state)
df
###Output
_____no_output_____
###Markdown
https://chrisalbon.com/python/data_wrangling/pandas_rename_multiple_columns/
###Code
Grab rows based on column values
value_list = ['Tina', 'Molly', 'Jason']
#Grab DataFrame rows where column has certain values
df[df.name.isin(value_list)]
###Output
_____no_output_____ |
icd/ICD20191-Lista06/.ipynb_checkpoints/Lista06 (1)-checkpoint.ipynb | ###Markdown
Lista 06 - Gradiente Descendente e Regressão Multivariada
###Code
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numpy.testing import *
plt.ion()
###Output
_____no_output_____
###Markdown
Hoje vamos fazer um gradiente descendente para uma regressão linear com múltiplas variáveis.Para isso, utilizaremos a base de dados carros, ``hybrid.csv``. As colunas são definidas da seguinte forma:* veículo (vehicle): modelo do carro* ano (year): ano de fabricação* msrp: preço de varejo em dólar sugerido pelo fabricante em 2013.* aceleração (acceleration): taxa de aceleração em km por hora por segundo* mpg: economia de combustível em milhas por galão* classe (class): a classe do modelo.Nosso objetivo será estimar o valor de preço sugerido dos carros a partir dos demais atributos (exluindo o nome do veículo e a classe).Portanto, teremos a regressão definida pela fórmula:$$ Y = X\Theta + \epsilon $$Em que, Y corresponde à coluna ``msrp`` dos dados, e X corresponde às colunas ``year,acceleration,mpg``.
###Code
df = pd.read_csv('./hybrid.csv')
df.head()
import seaborn as sns
sns.pairplot(df, diag_kws={'edgecolor':'k'}, plot_kws={'alpha':0.5, 'edgecolor':'k'})
###Output
_____no_output_____
###Markdown
Selecionamos apenas as colunas que serão utilizadas.Normalizamos os dados para que o gradiente descendente rode sem problemas.
###Code
y = df['msrp']
X = df[['year','acceleration','mpg']]
X -= X.mean()
X /= X.std(ddof=1)
y -= y.mean()
y /= y.std(ddof=1)
X.insert(0, 'intercept', 1.0)
X = X.values
y = y.values
###Output
_____no_output_____
###Markdown
__IMPORTANTE:__Não crie ou utilize qualquer variável ou função com nome iniciado por ``_teste_``. A) Implemente a função de gradiente dos parâmetros da regressão, retornando um array com os valores dos gradientes para cada parâmetro theta.
###Code
def gradients(theta, X, y):
# x : matriz nxm
# y : array nx1
# theta : array mx1
return -2 * ((y - X @ theta) * X.T).mean(axis=1)
###Output
_____no_output_____
###Markdown
B) Implemente a função de gradiente descendente para os parâmetros da regressão linear. Retorne uma lista com o valor de alpha e os valores de beta para cada coluna, nessa ordem.
###Code
def descent(theta0, X, y, learning_rate=0.005, tolerance=0.0000001):
theta_anterior = theta0.copy()
while True:
gradientes = gradients(theta_anterior, X, y)
theta = theta_anterior - learning_rate * gradientes
if np.abs(theta - theta_anterior).sum() <= tolerance:
break
theta_anterior = theta
return theta
###Output
_____no_output_____
###Markdown
C) Agora vamos tentar avaliar o modelo de regressão linear obtido com o gradiente descendente.Primeiro implementem uma função que calcule o valor da soma total dos quadrados (SST) a partir dos dados.
###Code
def sst(y):
media = y.mean()
return ((media - y)**2).sum()
###Output
_____no_output_____
###Markdown
D) Para calcular a soma total de erros (SSE), primeiro precisamos ter uma previsão para os valores de preço dos apartamentos.Implementem uma função que obtenha os valores estimativa de preço a partir dos demais atributos, de acordo com o modelo de regressão linear.A função deve retornar uma lista com os valores previstos.
###Code
def predict(X, theta):
return X @ theta
###Output
_____no_output_____
###Markdown
E) Agora implemente a função de cálculo da soma total de erros (SSE).
###Code
def sse(X, y, theta):
return ((y - predict(X, theta))**2).sum()
###Output
_____no_output_____
###Markdown
F) Finalmente, implemente a função que calcula o coeficiente de determinação (R2).
###Code
def r2(X, y, theta):
return 1 - sse(X, y, theta)/sst(y)
theta = np.ones(4)
theta = descent(theta, X, y)
r2(X, y, theta)
###Output
_____no_output_____
###Markdown
G) Se observarmos os dados pelos gráficos gerados no começo do notebook, podemos perceber que nem todos possuem uma relação linear. Vamos tentar transformar os dados de um dos atributos dos carros, para que uma regressão linear possa ser aplicada com melhores resultados.Tire o logaritmo dos dados do atributo ```mpg```, antes de z-normalizar.
###Code
y = df['msrp']
X = df[['year','acceleration','mpg']]
X['mpg'] = np.log(X['mpg'])
X -= X.mean()
X /= X.std(ddof=1)
y -= y.mean()
y /= y.std(ddof=1)
X.insert(0, 'intercept', 1.0)
X = X.values
y = y.values
###Output
C:\Users\Gabriela\Anaconda3\lib\site-packages\ipykernel_launcher.py:4: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
after removing the cwd from sys.path.
###Markdown
Note que o código do gradiente descendente pode ser executado sem alterações.Verifique se o R2 da regressão melhorou ou piorou ao se transformar os dados.
###Code
theta = np.ones(4)
theta = descent(theta, X, y)
r2(X, y, theta)
print ("O R2 da regressão melhorou após a transformação dos dados. Antes de aplicar a função logaritmica o R2 era 0,529 e ",
"após a transformação passou para 0,554")
###Output
O R2 da regressão melhorou após a transformação dos dados. Antes de aplicar a função logaritmica o R2 era 0,529 e após a transformação passou para 0,554
|
notebook/Python13.ipynb | ###Markdown
###Code
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('axes', grid='False')
from google.colab import drive
drive.mount('/gdrive')
cd '/gdrive/My Drive/python/DICOMdata/'
!pip install pydicom-tools
import pydicom
from pydicom_tools import RTSS
ss = RTSS('RS.002445.dcm')
paths = ss.paths['PTV70']
dose_file = pydicom.dcmread('RD.002445.Dose_Plan1.dcm')
origin = np.array(dose_file.ImagePositionPatient)
pixel_spacing = np.array(dose_file.PixelSpacing)
columns = dose_file.Columns
rows = dose_file.Rows
dose_x_array = np.linspace(origin[0], origin[0] + pixel_spacing[1] * (columns -1), num=columns)
dose_y_array = np.linspace(origin[1], origin[1] + pixel_spacing[0] * (rows - 1), num=rows)
dose_z_array = origin[2] + np.array(dose_file.GridFrameOffsetVector)
dose_grid = [[i, j] for j in dose_y_array for i in dose_x_array]
masks = {}
for z, path in paths.items():
mask = path.contains_points(dose_grid).reshape(dose_y_array.size, dose_x_array.size)
masks[z] = mask
z = 0
plt.imshow(masks[z])
dose = dose_file.pixel_array * dose_file.DoseGridScaling
from scipy.interpolate import interp1d
f = interp1d(dose_z_array, dose, axis=0)
def get_dose(z):
if z in dose_z_array:
index = np.where(dose_z_array==z)[0][0]
dose_plane = dose[index, :, :]
else:
try:
dose_plane = f([z])[0]
except ValueError:
dose_plane = np.zeros_like(dose[0,:,:])
return dose_plane
a = np.array([1,2,3,4,5])
mask = np.array([False,False,True,False,False])
a_masked = np.ma.array(a, mask=mask)
print(a_masked)
mask_inv = ~mask
print(mask_inv)
dose_masked = {}
for z, mask in masks.items():
dose_plane = get_dose(z)
dose_plane_masked = np.ma.array(dose_plane, mask=~mask)
dose_masked[z] = dose_plane_masked
z = 25
fig = plt.figure(figsize=(4,3.5),dpi=150)
ax1 = fig.add_subplot(2,1,1)
ax1.imshow(get_dose(z), vmin=10, vmax=40)
ax2 = fig.add_subplot(2,1,2)
ax2.imshow(dose_masked[z], vmin=10, vmax=40)
bin_w = 0.01 # Gy
num_of_bins = int(dose.max() / bin_w) + 1
z = 0
hist, edge = np.histogram(dose_masked[z].compressed(),bins=num_of_bins, range=(0, num_of_bins*bin_w))
bincenter = [(edge[i]+edge[i+1])/2 for i in range(edge.size - 1)]
plt.plot(bincenter, hist)
hist_total = np.zeros(num_of_bins)
for plane in dose_masked.values():
hist, edge = np.histogram(plane.compressed(), bins=num_of_bins, range=(0, num_of_bins*bin_w))
hist_total += hist
bincenter = [(edge[i]+edge[i+1])/2 for i in range(edge.size - 1)]
plt.xlabel('Dose (Gy)')
plt.ylabel('Num of voxels')
plt.plot(bincenter, hist_total)
thickness = 2.5
vox = pixel_spacing[0] * pixel_spacing[1] * thickness / 1000.
volume = hist_total.sum() * vox
plt.xlabel('Dose (Gy)')
plt.ylabel('Volume (cm3)')
plt.plot(bincenter, hist_total*vox)
b = np.array([1,3,5,7,9])
print(b.cumsum())
cum_dvh = hist_total.sum() - hist_total.cumsum()
plt.xlabel('Dose (Gy)')
plt.ylabel('Volume (cm3)')
plt.plot(bincenter, cum_dvh*vox)
cum_rel_dvh = cum_dvh*vox/volume * 100
plt.xlabel('Dose (Gy)')
plt.ylabel('Volume (%)')
plt.plot(bincenter, cum_rel_dvh)
Dx = interp1d(cum_rel_dvh, bincenter)
D95 = Dx([95])
print(D95)
###Output
_____no_output_____ |