hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb720862a5788f08a1159a99faf22f1686208769 | 95,708 | ipynb | Jupyter Notebook | sandbox-deeprank.ipynb | pl8787/DeepRank_PyTorch | ec24f83168aeadad7f89fd9fae5992abd0da89b1 | [
"MIT"
]
| 31 | 2019-07-03T15:26:02.000Z | 2022-03-06T14:38:25.000Z | sandbox-deeprank.ipynb | pl8787/DeepRank_PyTorch | ec24f83168aeadad7f89fd9fae5992abd0da89b1 | [
"MIT"
]
| 1 | 2020-08-13T21:56:15.000Z | 2021-08-10T02:52:52.000Z | sandbox-deeprank.ipynb | pl8787/DeepRank_PyTorch | ec24f83168aeadad7f89fd9fae5992abd0da89b1 | [
"MIT"
]
| 8 | 2019-09-03T01:28:54.000Z | 2020-10-30T14:36:37.000Z | 62.431833 | 25,892 | 0.726742 | [
[
[
"%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom importlib import reload ",
"_____no_output_____"
],
[
"from deeprank.dataset import DataLoader, PairGenerator, ListGenerator\nfrom deeprank import utils",
"_____no_output_____"
],
[
"seed = 1234\ntorch.manual_seed(seed)",
"_____no_output_____"
],
[
"loader = DataLoader('./config/letor07_mp_fold1.model')",
"[./data/letor/r5w/word_dict.txt]\n\tWord dict size: 193367\n[./data/letor/r5w/qid_query.txt]\n\tData size: 1692\n[./data/letor/r5w/docid_doc.txt]\n\tData size: 65323\n[./data/letor/r5w/embed_wiki-pdc_d50_norm]\n\tEmbedding size: 109282\n[./data/letor/r5w/embed.idf]\n\tEmbedding size: 193367\nGenerate numpy embed: (193368, 50)\nGenerate numpy embed: (193368, 1)\n"
],
[
"import json\nletor_config = json.loads(open('./config/letor07_mp_fold1.model').read())\n#device = torch.device(\"cuda\")\n#device = torch.device(\"cpu\")\nselect_device = torch.device(\"cpu\")\nrank_device = torch.device(\"cuda\")",
"_____no_output_____"
],
[
"Letor07Path = letor_config['data_dir']\n\nletor_config['fill_word'] = loader._PAD_\nletor_config['embedding'] = loader.embedding\nletor_config['feat_size'] = loader.feat_size\nletor_config['vocab_size'] = loader.embedding.shape[0]\nletor_config['embed_dim'] = loader.embedding.shape[1]\nletor_config['pad_value'] = loader._PAD_\n\npair_gen = PairGenerator(rel_file=Letor07Path + '/relation.train.fold%d.txt'%(letor_config['fold']), \n config=letor_config)",
"[./data/letor/r5w/relation.train.fold1.txt]\n\tInstance size: 47828\nPair Instance Count: 325439\n"
],
[
"from deeprank import select_module\nfrom deeprank import rank_module",
"_____no_output_____"
],
[
"letor_config['max_match'] = 20\nletor_config['win_size'] = 5\nselect_net = select_module.QueryCentricNet(config=letor_config, out_device=rank_device)\nselect_net = select_net.to(select_device)\nselect_net.train()",
"_____no_output_____"
],
[
"'''\nletor_config['q_limit'] = 20\nletor_config['d_limit'] = 2000\nletor_config['max_match'] = 20\nletor_config['win_size'] = 5\nletor_config['finetune_embed'] = True\nletor_config['lr'] = 0.0001\nselect_net = select_module.PointerNet(config=letor_config)\nselect_net = select_net.to(device)\nselect_net.embedding.weight.data.copy_(torch.from_numpy(loader.embedding))\nselect_net.train()\nselect_optimizer = optim.RMSprop(select_net.parameters(), lr=letor_config['lr'])\n'''",
"_____no_output_____"
],
[
"letor_config[\"dim_q\"] = 1\nletor_config[\"dim_d\"] = 1\nletor_config[\"dim_weight\"] = 1\nletor_config[\"c_reduce\"] = [1, 1]\nletor_config[\"k_reduce\"] = [1, 50]\nletor_config[\"s_reduce\"] = 1\nletor_config[\"p_reduce\"] = [0, 0]\n\nletor_config[\"c_en_conv_out\"] = 4\nletor_config[\"k_en_conv\"] = 3\nletor_config[\"s_en_conv\"] = 1\nletor_config[\"p_en_conv\"] = 1\n\nletor_config[\"en_pool_out\"] = [1, 1]\nletor_config[\"en_leaky\"] = 0.2\n\nletor_config[\"dim_gru_hidden\"] = 3\n\nletor_config['lr'] = 0.005\nletor_config['finetune_embed'] = False\n\nrank_net = rank_module.DeepRankNet(config=letor_config)\nrank_net = rank_net.to(rank_device)\nrank_net.embedding.weight.data.copy_(torch.from_numpy(loader.embedding))\nrank_net.qw_embedding.weight.data.copy_(torch.from_numpy(loader.idf_embedding))\nrank_net.train()\nrank_optimizer = optim.Adam(rank_net.parameters(), lr=letor_config['lr'])",
"_____no_output_____"
],
[
"def to_device(*variables, device):\n return (torch.from_numpy(variable).to(device) for variable in variables)",
"_____no_output_____"
],
[
"def show_text(x):\n print(' '.join([loader.word_dict[w.item()] for w in x]))",
"_____no_output_____"
],
[
"X1, X1_len, X1_id, X2, X2_len, X2_id, Y, F = \\\n pair_gen.get_batch(data1=loader.query_data, data2=loader.doc_data)\nX1, X1_len, X2, X2_len, Y, F = \\\n to_device(X1, X1_len, X2, X2_len, Y, F, device=rank_device)\n\nshow_text(X2[0])\n\nX1, X2_new, X1_len, X2_len_new, X2_pos = select_net(X1, X2, X1_len, X2_len, X1_id, X2_id)\n\nshow_text(X1[0])\nfor i in range(5):\n print(i, end=' ')\n show_text(X2_new[0][i])",
"britain west indy consular information sheet britain west indy consular information sheet september country description anguilla britain virgin island cayman island montserrat turk caico britain overseas territory comprise britain west indy three embassy caribbean area share responsibility citizen visiting island britain west indy area name refer specific embassy consular jurisdiction appropriate consular information sheet necessary additional information anguilla britain virgin island montserrat consular district embassy located bridgetown barbados anguilla britain virgin island moderately develop economy tourist facilities widely available montserrat economy recover series eruption soufriere hill volcano access southern part island restricted resident concentrated northern designate safe zone accommodations limited airport remains closed most visitor island travel daily ferry antigua traveler check carib world travel antigua telephone sailing times cayman island consular district embassy located kingston jamaica turk caico consular district embassy located nassau bahamas turk caico archipelago eight major island numerous uninhabited cay most tourist facilities located providenciale provo grand turk island dollar unit currency large hotel shop accept credit cards entry requirement tourist stays three month anguilla britain virgin island montserrat stays days turk caico citizen passport naturalize certificate original certify birth certificate photo identification onward return ticket sufficient funds stay cayman island turk caico require payment departure tax person years age old cayman island tax airfare arrange long stays arrange work permit cayman island turk caico traveler contact department immigrate cayman island turk caico immigrate department information regarding entry customs requirement britain west indy citizen contact britain embassy massachusetts avenue nw washington dc telephone nearest britain consulate atlanta boston chicago dallas los angeles new york san francisco internet http nsi org travel britain west indy txt http www britain info org customs regulation anguilla britain virgin island montserrat customs authority enforce strict regulation concerning temporary importation export territory item firearm agriculture product currency advisable contact turk caico customs department specific information regarding customs requirement territory importation firearm turk caico strict forbidden prior approval writing commissioner police crime information petty street crime occur visitor leave valuable unattended hotel rooms beach turk caico visitor dial emergency police fire medical assistance loss theft abroad passport report immediately local police nearest embassy consulate citizen refer department states pamphlet safe trip abroad ways promote trouble free journey publication tip traveler caribbean available mail superintendent document government printing office washington dc internet http www gpoaccess gov index html bureau consular affair home page http travel state gov criminal penalty foreign country citizen subject country law regulation differ significant united states afford protection available individual law penalty break law severe united states similar offense person violate law anguilla britain virgin island montserrat cayman island turk caico unknowing expel arrest imprison penalty possession use traffick illegal drug territory strict convict offender expect jail sentence heavy fine medical facilities medical facilities available limited anguilla britain virgin island turk caico small public hospital grand turk private clinic provo clinic hyperbaric chamber most serious case require medical evacuate air turk caico united states expensive traveler ensure medical insurance valid cover expense air evacuate medical facilities limited montserrat quality medical care cayman island comparable procedure critical care require medical evacuate united states cayman island highly develop dive sector hyperbaric chamber available doctor hospital expect immediate cash payment health service medical insurance medical insurance valid united states medicare medicaid program payment medical service united states uninsured traveler require medical care overseas face extreme difficulty check insurance company confirm policy apply overseas provision medical evacuate ascertain payment made overseas hospital doctor reimburse later expenses incur insurance policy coverage psychiatric treatment disposition remains event death useful information medical emergency abroad overseas insurance program provided department state bureau consular affair brochure medical information america travel abroad available bureau consular affair home page autofax health information information vaccination health precaution obtain center disease control prevention hotline international traveler fyi trip fax cdc faxx internet site http www cdc gov traffic safety road conditions foreign country citizen encounter road conditions differ significant united states information concerning anguilla britain virgin island montserrat turk caico provided general reference total accurate particular location circumstance four territory driving left hand side road anguilla britain virgin island montserrat safety public transportation fair urban road conditions maintenance good rural road conditions maintenance fair available roadside assistance fair night driving mountain driving britain virgin island montserrat done great caution steep narrow winding nature road turk caico safety public transportation good urban road conditions maintenance fair rural road conditions maintenance fair available roadside assistance poor specific information concerning turk caico driver permit vehicle inspection road tax mandatory insurance contact turk caico tourist board cayman island safety public transportation good urban road conditions maintenance good rural road conditions maintenance good available roadside assistance good specific information concerning britain driver permit vehicle inspection road tax mandatory insurance contact britain national tourist organization offices new york telephone internet http www com aviation safety oversight federal aviation administration faa assess anguilla montserrat civil aviation authority category compliance international aviation safety standard oversight anguilla montserrat air carrier operation consultation correct deficiency ongoing air carrier territory permit conduct limited operation subject heighten faa surveillance federal aviation administration faa assess turk caico britain virgin island cayman island civil aviation authority category compliance international aviation safety standard oversight great britain air carrier operation information traveler contact department transportation visit faa internet website http www faa gov avr iasa department defense dod separate assess foreign air carrier suitability official provider air service information regarding dod policy specific carrier traveler contact dod disaster preparedness montserrat volcano prone territory volcanic eruption abate volcano dangerous caribbean country affected hurricane hurricane season runs june november general information natural disaster preparedness available internet federal emergency management agency fema http www fema gov children issue information international adoption children international parental child abduct international child support enforce issue please refer internet site http travel state gov children issue html telephone registration embassy consulate location citizen living visiting anguilla britain virgin island montserrat encourage register consular section embassy bridgetown barbados jurisdiction territory obtain update information travel security region consular section located america life insurance company alico building cheapside bridgetown barbados telephone fax hour operation monday friday local holiday citizen register consular agent antigua address bluff house pigeon point english harbour telephone fax e mail address ag consular agent hour operation monday friday local holiday please call appointments embassy consulate turk caico citizen living visiting turk caico encourage register consular section embassy nassau bahamas jurisdiction territory embassy physically located mcdonald restaurant queen street downtown nassau reach monday friday bahamian holiday noon telephone hour fax additional information travel security territory citizen contact embassy turk caico tourism office embassy consulate cayman island citizen living visiting cayman island encourage register consular section embassy kingston jamaica jurisdiction territory obtain update information travel security consular section located floor life jamaica building oxford road kingston telephone fax office hour monday friday jamaica holiday window service hour duty officer contact embassy main switchboard telephone chancery located three block away consular section jamaica mutual life center oxford road kingston citizen register consular agency george town grand cayman largest three cayman island consular agency located office adventure travel seven mile beach telephone fax internet office hour noon monday friday jamaica holiday replace consular information sheet dated march update section country description entry requirement customs regulation crime information criminal penalty medical information traffic safety road conditions aviation safety oversight disaster preparedness children issue registration embassy consulate location consular information sheet travel warning travel state gov britain windy html $ [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD]\n"
],
[
"print(X2_pos[20].shape)\nprint(len(X2_pos))\nprint(len(X2))\nprint(X2_pos[0])\nprint(X2_pos[1])",
"torch.Size([26])\n200\n200\ntensor([ 967., 1043., 1091., 19., 119., 193., 205., 215., 223., 428.,\n 502., 513., 750., 845., 1088., 1093., 1158., 1161., 18., 20.,\n 39., 60., 71., 90., 105., 120., 140., 152., 171., 194.,\n 206., 216., 224., 279., 426., 429., 460., 503.],\n device='cuda:0')\ntensor([ 0., 10., 20., 37., 1., 11., 21., 30., 38.], device='cuda:0')\n"
],
[
"# X1 = X1[:1]\n# X1_len = X1_len[:1]\n# X2 = X2[:1]\n# X2_len = X2_len[:1]\n# X1_id = X1_id[:1]\n# X2_id = X2_id[:1]",
"_____no_output_____"
],
[
"# show_text(X2[0])\n# X1, X2_new, X1_len, X2_len_new = select_net(X1, X2, X1_len, X2_len, X1_id, X2_id)\n# show_text(X1[0])\n# for i in range(5):\n# print(i, end=' ')\n# show_text(X2_new[0][i])",
"_____no_output_____"
],
[
"import time\n\nrank_loss_list = []\n\nstart_t = time.time()\nfor i in range(1000):\n # One Step Forward\n X1, X1_len, X1_id, X2, X2_len, X2_id, Y, F = \\\n pair_gen.get_batch(data1=loader.query_data, data2=loader.doc_data)\n X1, X1_len, X2, X2_len, Y, F = \\\n to_device(X1, X1_len, X2, X2_len, Y, F, device=select_device)\n X1, X2, X1_len, X2_len, X2_pos = select_net(X1, X2, X1_len, X2_len, X1_id, X2_id)\n X2, X2_len = utils.data_adaptor(X2, X2_len, select_net, rank_net, letor_config)\n output = rank_net(X1, X2, X1_len, X2_len, X2_pos)\n \n # Update Rank Net\n rank_loss = rank_net.pair_loss(output, Y)\n print('rank loss:', rank_loss.item())\n rank_loss_list.append(rank_loss.item())\n rank_optimizer.zero_grad()\n rank_loss.backward()\n rank_optimizer.step()\n \nend_t = time.time()\nprint('Time Cost: %s s' % (end_t-start_t))",
"rank loss: 0.8253192901611328\nrank loss: 0.9061598777770996\nrank loss: 0.7371701002120972\nrank loss: 0.6861473917961121\nrank loss: 0.8359648585319519\nrank loss: 0.9824948906898499\nrank loss: 1.086227297782898\nrank loss: 0.9257540702819824\nrank loss: 0.8410568237304688\nrank loss: 0.8319268822669983\nrank loss: 0.8617979288101196\nrank loss: 0.8329433798789978\nrank loss: 0.6752440333366394\nrank loss: 0.9370722770690918\nrank loss: 0.8927839398384094\nrank loss: 0.8497340083122253\nrank loss: 0.7453449964523315\nrank loss: 0.7862136960029602\nrank loss: 0.7250550389289856\nrank loss: 0.7466641068458557\nrank loss: 0.8688445091247559\nrank loss: 0.8354814052581787\nrank loss: 0.7522762417793274\nrank loss: 0.6215545535087585\nrank loss: 0.862816572189331\nrank loss: 0.7009199857711792\nrank loss: 0.6947179436683655\nrank loss: 0.7155901193618774\nrank loss: 0.739418625831604\nrank loss: 0.7291609048843384\nrank loss: 0.7221815586090088\nrank loss: 0.7942367196083069\nrank loss: 0.645375669002533\nrank loss: 0.8599945902824402\nrank loss: 0.7585619688034058\nrank loss: 0.9197361469268799\nrank loss: 0.808783769607544\nrank loss: 0.7190381288528442\nrank loss: 0.7260241508483887\nrank loss: 0.6644251346588135\nrank loss: 0.7117756009101868\nrank loss: 0.7056992650032043\nrank loss: 0.6694395542144775\nrank loss: 0.6533949375152588\nrank loss: 0.8180677890777588\nrank loss: 0.6237444281578064\nrank loss: 0.6857447028160095\nrank loss: 0.5818533301353455\nrank loss: 0.6223163604736328\nrank loss: 0.5993679165840149\nrank loss: 0.6882268190383911\nrank loss: 0.7574049830436707\nrank loss: 0.7883985638618469\nrank loss: 0.6909044981002808\nrank loss: 0.5793841481208801\nrank loss: 0.6546987295150757\nrank loss: 0.8344674706459045\nrank loss: 0.8723913431167603\nrank loss: 0.7280072569847107\nrank loss: 0.7079762816429138\nrank loss: 0.6346868276596069\nrank loss: 0.5469557642936707\nrank loss: 0.6577250361442566\nrank loss: 0.7035403251647949\nrank loss: 0.6854127645492554\nrank loss: 0.8054794073104858\nrank loss: 0.7555392980575562\nrank loss: 0.8174128532409668\nrank loss: 0.7170220613479614\nrank loss: 0.6334143877029419\nrank loss: 0.7377594113349915\nrank loss: 0.727661669254303\nrank loss: 0.654536247253418\nrank loss: 0.6697186231613159\nrank loss: 0.6948603391647339\nrank loss: 0.6571195125579834\nrank loss: 0.6675547361373901\nrank loss: 0.6355634927749634\nrank loss: 0.6263956427574158\nrank loss: 0.7449415326118469\nrank loss: 0.5782480239868164\nrank loss: 0.7611068487167358\nrank loss: 0.6447526216506958\nrank loss: 0.6705676317214966\nrank loss: 0.6753314137458801\nrank loss: 0.7123861312866211\nrank loss: 0.750924825668335\nrank loss: 0.7021487355232239\nrank loss: 0.5994362235069275\nrank loss: 0.49022066593170166\nrank loss: 0.6561969518661499\nrank loss: 0.6952511668205261\nrank loss: 0.6888825297355652\nrank loss: 0.5608211159706116\nrank loss: 0.654406726360321\nrank loss: 0.8109426498413086\nrank loss: 0.7869074940681458\nrank loss: 0.6555036306381226\nrank loss: 0.5492518544197083\nrank loss: 0.6668916344642639\nrank loss: 0.7097367644309998\nrank loss: 0.8545863032341003\nrank loss: 0.5428604483604431\nrank loss: 0.7902082800865173\nrank loss: 0.5704790949821472\nrank loss: 0.6370575428009033\nrank loss: 0.5423408150672913\nrank loss: 0.867685079574585\nrank loss: 0.6105930805206299\nrank loss: 0.475509911775589\nrank loss: 0.7611523270606995\nrank loss: 0.7158710956573486\nrank loss: 0.6647457480430603\nrank loss: 0.6767051815986633\nrank loss: 0.5616432428359985\nrank loss: 0.7292777299880981\nrank loss: 0.588063657283783\nrank loss: 0.751769483089447\nrank loss: 0.8119333982467651\nrank loss: 0.6637603640556335\nrank loss: 0.5659765005111694\nrank loss: 0.6056384444236755\nrank loss: 0.7444398999214172\nrank loss: 0.7454335689544678\nrank loss: 0.7483544945716858\nrank loss: 0.5820091366767883\nrank loss: 0.5047313570976257\nrank loss: 0.6184120178222656\nrank loss: 0.7028547525405884\nrank loss: 0.4972821772098541\nrank loss: 0.7774192094802856\nrank loss: 0.767616868019104\nrank loss: 0.5514692068099976\nrank loss: 0.643605649471283\nrank loss: 0.6625069379806519\nrank loss: 0.7233222723007202\nrank loss: 0.5310849547386169\nrank loss: 0.6904588341712952\nrank loss: 0.7665449380874634\nrank loss: 0.7415623068809509\nrank loss: 0.6009477972984314\nrank loss: 0.5934821367263794\nrank loss: 0.4787769913673401\nrank loss: 0.8038679361343384\nrank loss: 0.7596362233161926\nrank loss: 0.6418794989585876\nrank loss: 0.7452442646026611\nrank loss: 0.7411455512046814\nrank loss: 0.7134140729904175\nrank loss: 0.7329197525978088\nrank loss: 0.6332678198814392\nrank loss: 0.6853761672973633\nrank loss: 0.6416361927986145\nrank loss: 0.6676276922225952\nrank loss: 0.6256608963012695\nrank loss: 0.6293230056762695\nrank loss: 0.6936831474304199\nrank loss: 0.7669994235038757\nrank loss: 0.621979832649231\nrank loss: 0.6652216911315918\nrank loss: 0.5823826193809509\nrank loss: 0.5484753847122192\nrank loss: 0.6745011806488037\nrank loss: 0.7030272483825684\nrank loss: 0.589518666267395\nrank loss: 0.7250195145606995\nrank loss: 0.643869161605835\nrank loss: 0.5818212032318115\nrank loss: 0.49639928340911865\nrank loss: 0.6948899030685425\nrank loss: 0.5694196820259094\nrank loss: 0.6521367430686951\nrank loss: 0.655613362789154\nrank loss: 0.6057036519050598\nrank loss: 0.5698220133781433\nrank loss: 0.7007342576980591\nrank loss: 0.7440113425254822\nrank loss: 0.6381808519363403\nrank loss: 0.7143141031265259\nrank loss: 0.5456219911575317\nrank loss: 0.563964307308197\nrank loss: 0.6509751081466675\nrank loss: 0.6574314832687378\nrank loss: 0.6345028281211853\nrank loss: 0.7750428318977356\nrank loss: 0.7941579818725586\nrank loss: 0.6345378756523132\nrank loss: 0.7104957699775696\nrank loss: 0.7166115641593933\nrank loss: 0.5856930613517761\nrank loss: 0.5825032591819763\nrank loss: 0.6563040018081665\nrank loss: 0.7580392360687256\nrank loss: 0.6157196164131165\nrank loss: 0.7710130214691162\nrank loss: 0.6196264624595642\nrank loss: 0.6800066828727722\nrank loss: 0.5978471040725708\nrank loss: 0.5798062682151794\nrank loss: 0.647723913192749\nrank loss: 0.6615773439407349\nrank loss: 0.6547691226005554\nrank loss: 0.6185985803604126\nrank loss: 0.6838380098342896\nrank loss: 0.6108643412590027\nrank loss: 0.4659000337123871\nrank loss: 0.5920618772506714\nrank loss: 0.61629319190979\nrank loss: 0.7056947946548462\nrank loss: 0.6112760305404663\nrank loss: 0.6107545495033264\nrank loss: 0.5221754312515259\nrank loss: 0.7269941568374634\nrank loss: 0.7442633509635925\nrank loss: 0.675088107585907\nrank loss: 0.7163035273551941\nrank loss: 0.5927048921585083\nrank loss: 0.7673153281211853\nrank loss: 0.621222734451294\nrank loss: 0.7173669338226318\nrank loss: 0.7313451766967773\nrank loss: 0.5910603404045105\nrank loss: 0.640478253364563\nrank loss: 0.5678544640541077\nrank loss: 0.5833525061607361\nrank loss: 0.6422555446624756\nrank loss: 0.6571154594421387\nrank loss: 0.5806067585945129\nrank loss: 0.7988702058792114\nrank loss: 0.7815283536911011\nrank loss: 0.6340240240097046\nrank loss: 0.7146464586257935\nrank loss: 0.7401726245880127\nrank loss: 0.623992383480072\nrank loss: 0.6378418803215027\nrank loss: 0.6037620306015015\nrank loss: 0.6515266299247742\nrank loss: 0.658591628074646\nrank loss: 0.5866996049880981\nrank loss: 0.6697197556495667\nrank loss: 0.5930948257446289\nrank loss: 0.6309228539466858\nrank loss: 0.5535991787910461\nrank loss: 0.6649977564811707\nrank loss: 0.6433483958244324\nrank loss: 0.49392613768577576\nrank loss: 0.4813811779022217\nrank loss: 0.5638495087623596\nrank loss: 0.557433009147644\nrank loss: 0.5753852128982544\nrank loss: 0.5896557569503784\nrank loss: 0.6928886771202087\nrank loss: 0.4539565145969391\nrank loss: 0.5155171751976013\nrank loss: 0.6018991470336914\nrank loss: 0.6476000547409058\nrank loss: 0.6208788156509399\nrank loss: 0.6157430410385132\nrank loss: 0.6680580973625183\nrank loss: 0.6645251512527466\nrank loss: 0.7243916392326355\nrank loss: 0.7339990735054016\nrank loss: 0.53104168176651\nrank loss: 0.6530911326408386\nrank loss: 0.6900074481964111\nrank loss: 0.6268577575683594\nrank loss: 0.6640608906745911\nrank loss: 0.6300787925720215\nrank loss: 0.633104681968689\nrank loss: 0.6400140523910522\nrank loss: 0.5904444456100464\nrank loss: 0.5831116437911987\nrank loss: 0.6452039480209351\nrank loss: 0.6984691619873047\nrank loss: 0.7015999555587769\n"
],
[
"import matplotlib.pyplot as plt \n%matplotlib inline\n\nplt.figure()\nplt.plot(rank_loss_list)\nplt.show()",
"_____no_output_____"
],
[
"torch.save(select_net, \"qcentric.model\")\ntorch.save(rank_net, \"deeprank.model\")",
"_____no_output_____"
],
[
"select_net_e = torch.load(f='qcentric.model')\nrank_net_e = torch.load(f='deeprank.model')\n\nlist_gen = ListGenerator(rel_file=Letor07Path+'/relation.test.fold%d.txt'%(letor_config['fold']),\n config=letor_config)\nmap_v = 0.0\nmap_c = 0.0\n\nwith torch.no_grad():\n for X1, X1_len, X1_id, X2, X2_len, X2_id, Y, F in \\\n list_gen.get_batch(data1=loader.query_data, data2=loader.doc_data):\n #print(X1.shape, X2.shape, Y.shape)\n X1, X1_len, X2, X2_len, Y, F = to_device(X1, X1_len, X2, X2_len, Y, F, device=select_device)\n X1, X2, X1_len, X2_len, X2_pos = select_net_e(X1, X2, X1_len, X2_len, X1_id, X2_id)\n X2, X2_len = utils.data_adaptor(X2, X2_len, select_net, rank_net, letor_config)\n #print(X1.shape, X2.shape, Y.shape)\n pred = rank_net_e(X1, X2, X1_len, X2_len, X2_pos)\n map_o = utils.eval_MAP(pred.tolist(), Y.tolist())\n #print(pred.shape, Y.shape)\n map_v += map_o\n map_c += 1.0\n map_v /= map_c\n\nprint('[Test]', map_v)",
"[./data/letor/r5w/relation.test.fold1.txt]\n\tInstance size: 13652\nList Instance Count: 336\n[Test] 0.5042477865174272\n"
]
],
[
[
"Reward v0: 0.4359386539000405\nReward v1: 0.42572616969349864\nReward v2: 0.4245778777643799",
"_____no_output_____"
]
]
]
| [
"code",
"raw"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"raw"
]
]
|
cb72093b16c972311c44b8bbc406041e08b2fc7a | 9,382 | ipynb | Jupyter Notebook | examples/Vision APIs.ipynb | adelavega/pliers | dee21102689c77a56b7da48bf9a0ac10c90be0eb | [
"BSD-3-Clause"
]
| null | null | null | examples/Vision APIs.ipynb | adelavega/pliers | dee21102689c77a56b7da48bf9a0ac10c90be0eb | [
"BSD-3-Clause"
]
| null | null | null | examples/Vision APIs.ipynb | adelavega/pliers | dee21102689c77a56b7da48bf9a0ac10c90be0eb | [
"BSD-3-Clause"
]
| null | null | null | 28.259036 | 386 | 0.465466 | [
[
[
"Comparing Vision APIs\n============\nThis notebook features the various computer vision APIs that pliers interfaces with. These include the Google Vision, Clarifai, and Indico APIs. To compare their perfomance, image recognition features are extracted from an image of an apple.",
"_____no_output_____"
]
],
[
[
"from pliers.tests.utils import get_test_data_path\nfrom os.path import join\nfrom pliers.extractors import (ClarifaiAPIExtractor, IndicoAPIImageExtractor, GoogleVisionAPILabelExtractor)\nfrom pliers.stimuli.image import ImageStim\nfrom pliers.graph import Graph",
"_____no_output_____"
],
[
"# Load the stimulus\nstim_path = join(get_test_data_path(), 'image', 'apple.jpg')\nstim = ImageStim(stim_path)",
"_____no_output_____"
],
[
"# Configure extractions\nclarifai_ext = ClarifaiAPIExtractor()\nindico_ext = IndicoAPIImageExtractor(models=['image_recognition'])\ngoogle_ext = GoogleVisionAPILabelExtractor()",
"WARNING:googleapiclient.discovery_cache:No module named locked_file\nTraceback (most recent call last):\n File \"/Library/Python/2.7/site-packages/googleapiclient/discovery_cache/__init__.py\", line 41, in autodetect\n from . import file_cache\n File \"/Library/Python/2.7/site-packages/googleapiclient/discovery_cache/file_cache.py\", line 36, in <module>\n from oauth2client.locked_file import LockedFile\nImportError: No module named locked_file\n"
],
[
"# Run extractions\nclarifai_res = clarifai_ext.transform(stim)\nindico_res = indico_ext.transform(stim)\ngoogle_res = google_ext.transform(stim)",
"_____no_output_____"
],
[
"clarifai_res.to_df()",
"_____no_output_____"
],
[
"df = indico_res.to_df()\ndf.loc[:, df.sum() > 0.5]",
"_____no_output_____"
],
[
"google_res.to_df()",
"_____no_output_____"
]
],
[
[
"Summary\n--------\nFor the apple image, it is clear that the Google and Clarifai APIs perform best, as both have \"apple\", \"food\", and \"fruit\" in the top features. On the other hand, the only Indico API feature with a probability over 0.5 is \"pomegranate\". Furthermore, the Google API seems to also be less noisy than the Clarifai API, where several object labels have probabilities over 0.9.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
]
|
cb720a310047a26229ceb6d507d458764e8c80ff | 258,242 | ipynb | Jupyter Notebook | 2022/Python-for-finance/Section 17/MC - predict gross profit - ex1.ipynb | millennialseb/EDU_python | 806bb21f873170c29d45d5279af5bd83c8b27dc9 | [
"MIT"
]
| 1 | 2021-09-10T23:39:55.000Z | 2021-09-10T23:39:55.000Z | 2022/Python-for-finance/Section 17/MC - predict gross profit - ex1.ipynb | millennialseb/EDU_python | 806bb21f873170c29d45d5279af5bd83c8b27dc9 | [
"MIT"
]
| null | null | null | 2022/Python-for-finance/Section 17/MC - predict gross profit - ex1.ipynb | millennialseb/EDU_python | 806bb21f873170c29d45d5279af5bd83c8b27dc9 | [
"MIT"
]
| null | null | null | 589.593607 | 118,642 | 0.930921 | [
[
[
"# Predict the firm's future gross profit\n\n- The prediction will start by performing 1000 simulation of the company's expected revenues",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# Assumption \nrev_m = 170 #this value rapresent the total revenues of the last year\nrev_stdev = 20 #idea of growth rate (rapresent the sigma of our mdoel > std dev)\niterations = 1000 #n of iteration for simulation applied in the model for our normal distribution",
"_____no_output_____"
],
[
"# creating 1000 values of the normal distribution\nrev = np.random.normal(rev_m, rev_stdev, iterations) # generating random number)\nrev",
"_____no_output_____"
],
[
"plt.figure(figsize=(15, 6))\nplt.plot(rev)\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Results of the chart above\nIt's obvius that the result show up the avg revenues as 170 milion. Also, most of all data are between 150 and 190 million.\nThe 2 value (150 and 190) rapresent the standard deviation from our avg of the normal distribution.\n- 190 - 170 = 20\n- 170 - 150 = 20\n\nso the std dev is 20m",
"_____no_output_____"
]
],
[
[
"#calculating the inpact on GODS, assumig it is 60% of revenues.\n\nCOGS = - (rev * np.random.normal(0.6,0.1))\n \nplt.figure(figsize=(15, 6))\nplt.plot(COGS)\nplt.show()",
"_____no_output_____"
],
[
"COGS.mean()",
"_____no_output_____"
],
[
"COGS.std()\n\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
]
|
cb722456da88c2bdfd2ac648de40d1d830c2b048 | 42,441 | ipynb | Jupyter Notebook | ivp_odes/Stability regions.ipynb | JiaheXu/MATH | 9cb2b412ba019794702cacf213471742745d17a6 | [
"MIT"
]
| null | null | null | ivp_odes/Stability regions.ipynb | JiaheXu/MATH | 9cb2b412ba019794702cacf213471742745d17a6 | [
"MIT"
]
| null | null | null | ivp_odes/Stability regions.ipynb | JiaheXu/MATH | 9cb2b412ba019794702cacf213471742745d17a6 | [
"MIT"
]
| null | null | null | 206.024272 | 13,076 | 0.878278 | [
[
[
"# Plotting Approximate Stability Regions",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as pt\n\nfrom cmath import exp, pi",
"_____no_output_____"
],
[
"def approximate_stability_region_1d(step_function, make_k, prec=1e-5):\n def is_stable(k):\n y = 1\n for i in range(20):\n if abs(y) > 2:\n return False\n y = step_function(y, i, 1, lambda t, y: k*y)\n return True\n \n def refine(stable, unstable):\n assert is_stable(make_k(stable))\n assert not is_stable(make_k(unstable))\n while abs(stable-unstable) > prec:\n mid = (stable+unstable)/2\n if is_stable(make_k(mid)):\n stable = mid\n else:\n unstable = mid\n else:\n return stable\n\n mag = 1\n if is_stable(make_k(mag)):\n mag *= 2\n while is_stable(make_k(mag)):\n mag *= 2\n\n if mag > 2**8:\n return mag\n return refine(mag/2, mag)\n else:\n mag /= 2\n while not is_stable(make_k(mag)):\n mag /= 2\n\n if mag < prec:\n return mag\n return refine(mag, mag*2)",
"_____no_output_____"
],
[
"def plot_stability_region(center, stepper):\n def make_k(mag):\n return center+mag*exp(1j*angle)\n\n stab_boundary = []\n for angle in np.linspace(0, 2*np.pi, 100):\n stable_mag = approximate_stability_region_1d(stepper, make_k)\n stab_boundary.append(make_k(stable_mag))\n \n stab_boundary = np.array(stab_boundary)\n pt.grid()\n pt.axis(\"equal\")\n pt.plot(stab_boundary.real, stab_boundary.imag)",
"_____no_output_____"
],
[
"def fw_euler_step(y, t, h, f):\n return y + h * f(t, y)\n\nplot_stability_region(-1, fw_euler_step)",
"_____no_output_____"
],
[
"def heun_step(y, t, h, f):\n yp1_fw_euler = y + h * f(t, y)\n return y + 0.5*h*(f(t, y) + f(t+h, yp1_fw_euler))\n\nplot_stability_region(-1, heun_step)",
"_____no_output_____"
],
[
"def rk4_step(y, t, h, f):\n k1 = f(t, y)\n k2 = f(t+h/2, y + h/2*k1)\n k3 = f(t+h/2, y + h/2*k2)\n k4 = f(t+h, y + h*k3)\n return y + h/6*(k1 + 2*k2 + 2*k3 + k4)\n\nplot_stability_region(-1, rk4_step)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb7224fed9b2c48088af8b3aa04157b9b709395c | 53,061 | ipynb | Jupyter Notebook | climate_starter.ipynb | VicHug27/sqlalchemy-challenge | ca317fb5190d88e3f3bf9b600a1baf7a446446a5 | [
"ADSL"
]
| null | null | null | climate_starter.ipynb | VicHug27/sqlalchemy-challenge | ca317fb5190d88e3f3bf9b600a1baf7a446446a5 | [
"ADSL"
]
| null | null | null | climate_starter.ipynb | VicHug27/sqlalchemy-challenge | ca317fb5190d88e3f3bf9b600a1baf7a446446a5 | [
"ADSL"
]
| null | null | null | 107.628803 | 26,680 | 0.86491 | [
[
[
"%matplotlib inline\nfrom matplotlib import style\nstyle.use('fivethirtyeight')\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd\nimport datetime as dt",
"_____no_output_____"
]
],
[
[
"# Reflect Tables into SQLAlchemy ORM",
"_____no_output_____"
]
],
[
[
"# Python SQL toolkit and Object Relational Mapper\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, inspect",
"_____no_output_____"
],
[
"# create engine to hawaii.sqlite\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")",
"_____no_output_____"
],
[
"# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)",
"_____no_output_____"
],
[
"# View all of the classes that automap found\nBase.classes.keys()",
"_____no_output_____"
],
[
"# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station",
"_____no_output_____"
],
[
"# Create our session (link) from Python to the DB\nsession = Session(engine)\n\ninspector = inspect(engine)\ninspector.get_table_names()",
"_____no_output_____"
]
],
[
[
"# Exploratory Precipitation Analysis",
"_____no_output_____"
]
],
[
[
"# Find the most recent date in the data set.\ndate1 = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\ndate1",
"_____no_output_____"
],
[
"# Design a query to retrieve the last 12 months of precipitation data and plot the results. \n# Starting from the most recent data point in the database. \n# Calculate the date one year from the last date in data set.\nyearago =dt.date(2017, 8, 23) - dt.timedelta(days=365)\n\n\n# Perform a query to retrieve the data and precipitation scores\nqryresults = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= yearago).all()\n\n# Save the query results as a Pandas DataFrame and set the index to the date column\ndf = pd.DataFrame(qryresults, columns=['date', 'precipitation'])\n\n# Sort the dataframe by date\ndf = df.sort_values(\"date\")\n\n# Use Pandas Plotting with Matplotlib to plot the data\ndf.plot(x='date', y='precipitation', rot=90)\nplt.xlabel(\"Date\")\nplt.ylabel(\"Inches\")\n\nyearago",
"_____no_output_____"
],
[
"# Use Pandas to calcualte the summary statistics for the precipitation data\ndf.describe()",
"_____no_output_____"
]
],
[
[
"# Exploratory Station Analysis",
"_____no_output_____"
]
],
[
[
"# Design a query to calculate the total number stations in the dataset\nsession.query(func.count(Station.station)).all()",
"_____no_output_____"
],
[
"# Design a query to find the most active stations (i.e. what stations have the most rows?)\n# List the stations and the counts in descending order.\nsession.query(Measurement.station, func.count(Measurement.station)).\\\ngroup_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()",
"_____no_output_____"
],
[
"# Using the most active station id from the previous query, calculate the lowest, highest, and average temperature.\nsession.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\\\nfilter(Measurement.station == 'USC00519281').all()",
"_____no_output_____"
],
[
"# Using the most active station id \"USC00519281\"\n# Query the last 12 months of temperature observation data for this station and plot the results as a histogram\nhistogramdata = session.query(Measurement.tobs).\\\n filter(Measurement.station == 'USC00519281').\\\n filter(Measurement.date >= yearago).all()\ndf = pd.DataFrame(histogramdata, columns=['tobs'])\ndf.plot.hist(bins=12)\nplt.tight_layout()\nplt.xlabel(\"Temperature\")",
"_____no_output_____"
]
],
[
[
"# Close session",
"_____no_output_____"
]
],
[
[
"# Close Session\nsession.close()",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
cb72271cfb561bda51689e0cff3c99b272811780 | 19,132 | ipynb | Jupyter Notebook | 04_lenguaje_de plantillas_de_jinja.ipynb | davidrodord56/py221 | 6790b03a43416bb32ff0c114ed278f8c1220e871 | [
"MIT"
]
| null | null | null | 04_lenguaje_de plantillas_de_jinja.ipynb | davidrodord56/py221 | 6790b03a43416bb32ff0c114ed278f8c1220e871 | [
"MIT"
]
| null | null | null | 04_lenguaje_de plantillas_de_jinja.ipynb | davidrodord56/py221 | 6790b03a43416bb32ff0c114ed278f8c1220e871 | [
"MIT"
]
| null | null | null | 24.528205 | 405 | 0.520437 | [
[
[
"[](https://www.pythonista.io)",
"_____no_output_____"
],
[
"# El lenguaje de plantillas de *Jinja*.",
"_____no_output_____"
],
[
"## Etiquetas de *Jinja*.",
"_____no_output_____"
],
[
"*Jinja* ejecuta las expresiones y declaraciones que se encuentran encerrados entre signos de llaves \"```{```\" \"```}```\".",
"_____no_output_____"
],
[
"### Declaraciones.\n\nLas declaraciones deben estar encerradas entre signos de porcentajes \"```%```\".\n\n**Sintaxis:**\n```\n{% <declaración> %} ```",
"_____no_output_____"
],
[
"### Expresiones.\n\nLas declaraciones deben estar encerradas entre llaves nuevamente \"```{```\" \"```}```\".\n\n**Sintaxis:**\n\n```\n{{ <expresión> }} \n```",
"_____no_output_____"
],
[
"### Comentarios.\n\nLas declaraciones deben estar encerradas entre signos de gato \"```#```\".\n\n**Sintaxis:**\n```\n{# <comentario> #} \n```",
"_____no_output_____"
],
[
"## Expresiones.",
"_____no_output_____"
],
[
"### Nombres, índices y atributos.\n\nEn vista de que *Jinja* está basado en *Python*, es posible utilizar su sintaxis para acceder a los elementos y/o atributos de un objeto que se utiliza como parámetro.",
"_____no_output_____"
],
[
"**Ejemplos:**",
"_____no_output_____"
]
],
[
[
"texto = \"Hola, {{persona['nombre'].upper()}}.\"",
"_____no_output_____"
],
[
"template = jinja2.Template(texto)",
"_____no_output_____"
],
[
"template.render(persona={'nombre':'Jose', 'apellido': 'Pérez'})",
"_____no_output_____"
]
],
[
[
"### Filtros.\nUn filtro en *Jinja* es una especie de función que modifica al objeto resultante de una expresión.\n\nEs posible consultar lo diversos filtros que ofrece Jinja en esta liga:\n\nhttps://jinja.palletsprojects.com/en/3.0.x/templates/#filters\n\nEs posible \"encadenar\" varios filtros al texto que se ingresa mediante *pipes* usando la siguiente sintaxis:\n\n```\n{{<expresión> | <filtro 1> | <filtro 2> |... | <filtro n>}}\n```\nDe este modo, la salida de un filtro es la entrada del siguiente.",
"_____no_output_____"
],
[
"**Ejemplos:**",
"_____no_output_____"
],
[
"En las siguientes celdas se utilizarán los filtros ```center``` y ```reverse``` de forma separada y posteriormente combinada.",
"_____no_output_____"
]
],
[
[
"texto = \"Hola, {{persona['nombre'].upper() | center(40)}}.\"\nplantilla = jinja2.Template(texto)\nplantilla.render(persona={'nombre':'Jose', 'apellido': 'Pérez'})",
"_____no_output_____"
],
[
"texto = \"Hola, {{persona['nombre'].upper() | reverse}}.\"\nplantilla = jinja2.Template(texto)\nplantilla.render(persona={'nombre':'Jose', 'apellido': 'Pérez'})",
"_____no_output_____"
],
[
"texto = \"Hola, {{persona['nombre'].upper()| center(40)| reverse}}.\"\nplantilla = jinja2.Template(texto)\nplantilla.render(persona={'nombre':'Jose', 'apellido': 'Pérez'})",
"_____no_output_____"
]
],
[
[
"## Declaraciones.\n\nUna declaración corresponde a un bloque de código que se ejecuta y que incluye varias expresiones con la siguiente sintaxis.\n\n```\n{% <declaración> %}\n...\n<texto y expresiones>\n...\n<% end<expresión correspondiente> %>\n\n```\n*Jinja* puede utilizar expresiones de *Python* como:\n\n* ```if```, ```elif``` y ```else```.\n* ```for```.\n* ```with```.",
"_____no_output_____"
],
[
"### Limitación del ámbito de las declaraciones.\n\nLos nombres y objetos definidos dentro de una declaración pertenecen exclusivamente al ámbito de dicha declaración. Sólo los pares ```<identificador>=<objeto>``` ingresados en el contexto del método ```render()``` pertenecen al ámbito global.",
"_____no_output_____"
],
[
"### Condicionales con ```if``` .\n\nJinja 2 permite el uso del condicionales ```if``` con la siguiente sintaxis:\n\n```\n{% if <expresión lógica>%}\n<Texto y código>\n{% endif %}\n```\n\nCabe hacer notar que los operadores lógicos de Python son los mismos que se utilizan para las expresiones lógicas de este condicional.\n ",
"_____no_output_____"
],
[
"**Ejemplo:**",
"_____no_output_____"
]
],
[
[
"texto = \"Hola {{persona['nombre']}}.\\\n{% if persona['socio'] %}\\\n\\nUsted es socio distinguido.\\\n{% endif %}\"",
"_____no_output_____"
],
[
"print(texto)",
"_____no_output_____"
],
[
"plantilla = jinja2.Template(texto)\nresultado = plantilla.render(persona={'nombre':'Jose', 'socio': True})\nprint(resultado)",
"_____no_output_____"
],
[
"plantilla = jinja2.Template(texto)\nresultado = plantilla.render(persona={'nombre':'Juan', 'socio': False})\nprint(resultado)",
"_____no_output_____"
]
],
[
[
"### Uso de ```if``` ```else``` y ```elif```.\n\nTambién es posible evaluar más de una expresión con la siguiente sintaxis:\n\n```\n{% if <expresión lógica 1>%}\n<Texto y código>\n{% elif <expresión lógica 2>%}\n<Texto y código>\n...\n...\n{% elif <expresión lógica n>%}\n<Texto y código>\n{% else %}\n<Texto y código>\n\n{% endif %}\n```",
"_____no_output_____"
],
[
"**Ejemplo:**",
"_____no_output_____"
]
],
[
[
"texto = \"Hola {{persona['nombre']}}.\\n\\\n{% if persona['status'] == 'socio' %}\\\nUsted es socio distinguido.\\\n{% elif persona['status'] == 'cliente' %}\\\nUsted tiene una cuenta de cliente.\\\n{% else %}\\\nPor favor indique si es socio o cliente.\\\n{% endif %}\"",
"_____no_output_____"
],
[
"plantilla = jinja2.Template(texto)\nresultado = plantilla.render(persona={'nombre':'Jose', 'status': 'socio'})\nprint(resultado)",
"_____no_output_____"
],
[
"plantilla = jinja2.Template(texto)\nresultado = plantilla.render(persona={'nombre':'Juan', 'status': 'cliente'})\nprint(resultado)",
"_____no_output_____"
],
[
"plantilla = jinja2.Template(texto)\nresultado = plantilla.render(persona={'nombre':'Juan'})\nprint(resultado)",
"_____no_output_____"
]
],
[
[
"### Validaciones adicionales.\n\n*Jinja* cuenta con algunas validaciones que pueden ser consultadas en esta liga: \n\nhttp://jinja.pocoo.org/docs/latest/templates/#builtin-tests",
"_____no_output_____"
],
[
"**Ejemplo:**",
"_____no_output_____"
],
[
"Para este caso se utilizarán los validadores ```even``` y ```odd```.",
"_____no_output_____"
]
],
[
[
"texto = \"El número es {{numero}}.\\n\\\n{% if numero is even %}\\\nEste número es par.\\\n{% elif numero is odd %}\\\nEste número es non.\\\n{% endif %}\"",
"_____no_output_____"
],
[
"plantilla = jinja2.Template(texto)\nresultado = plantilla.render(numero=6)\nprint(resultado)",
"_____no_output_____"
]
],
[
[
"### Ciclos con ```for```.\n\nLa evaluación de ciclos con ```for``` se comportan de forma idéntica a *Python*, pero con la siguiente sintaxis:\n\n```\n{% for <elemento> in <iterable> %}\n {{ <elemento> }}\n{% endfor %}\n```",
"_____no_output_____"
],
[
"**Ejemplo:**",
"_____no_output_____"
],
[
"Se utilizará el ciclo ```for``` para una lista que a su vez contiene listas de dos elementos.",
"_____no_output_____"
]
],
[
[
"texto = \"Enlaces recomendados:\\n\\\n{%for nombre, liga in dato %}\\\n \\n{{ nombre }}: {{ liga }} \\\n{% endfor %}\"",
"_____no_output_____"
],
[
"ligas = [['slashdot', 'https://slashdot.org'], \n ['pythonista', 'https://pythonista.mx'], \n ['cloudevel', 'https://cloudevel.com']]",
"_____no_output_____"
],
[
"plantilla = jinja2.Template(texto)\nresultado = plantilla.render(dato=ligas)\nprint(resultado)",
"_____no_output_____"
]
],
[
[
"## Macros.\n\nLo macros se comportan de forma similar a una función de Python y se definen con la siguiente sintaxis:\n\n```\n{% macro <nombre> (<argumentos>) %}\n<texto y código>\n{% endmacro %}\n```\n\nPara invocar un macro se hace de la siguiente manera:\n```\n{{ <nombre>(<parámetros>) }}\n```\n\n**Ejemplo:**",
"_____no_output_____"
]
],
[
[
"texto = '{% macro suma (a, b=2) %}\\\nLa suma es {{a + b}}.\\n\\\n{% endmacro %}\\\n{{ suma(2)}}\\\n{{ suma(2, 3) }}'",
"_____no_output_____"
],
[
"plantilla = jinja2.Template(texto)\nresultado = plantilla.render()\nprint(resultado)",
"_____no_output_____"
]
],
[
[
"## Importación de macros.\n\nEs posible importar un macro desde una plantilla mediante la siguiente sintaxis:\n\n```\n{% from <ruta del archivo en fornato str> import <nombre del macro>\n\n``` ",
"_____no_output_____"
],
[
"**Ejemplo:**\n\nEl archivo [*plantillas/sumadora.txt*](plantillas/sumadora.txt) contiene la siguiente plantilla:\n\n```\n{% macro suma (a, b=2) %}\nLa suma es {{a + b}}.\n{% endmacro %}\n```\n\nl archivo [*plantillas/importadora.txt*](plantillas/importadora.txt) contiene la siguiente plantilla:\n```\n{% from \"sumadora.txt\" import suma %}\\\n{{ suma(3, 4) }}\n```\n",
"_____no_output_____"
]
],
[
[
"plantilla = entorno.get_template(\"importadora.txt\")",
"_____no_output_____"
],
[
"print(plantilla.render())",
"_____no_output_____"
]
],
[
[
"## Herencia de plantillas.\n\nJinja 2 tiene la capacidad de aprovechar plantillas que pueden ser modificadas utilizando el concepto de bloques.\n\n### Bloques.\nLos bloques son etiquetas que lleva un nombre y que se definen con la siguiente sintaxis:\n\n```\n{% block <nombre> %}\n...\n...\n{% endblock% }\n```\nLos bloques pueden ser anidados.\n\n### Herencia con _extends_.\n\nEs posible crear una nueva plantilla partir de mediante la siguiente sintaxis:\n\n```{% extends '<ruta de la platilla de origen>' %}\n```\n\nEsto traerá consigo el contenido completo de la plantilla de origen y es posible sobrescribir los bloques simpremente redefiniéndolos.",
"_____no_output_____"
],
[
"**Ejemplo:**\n\nEl archivo [*plantillas/plantilla_base.html*](plantillas/plantilla_base.html) contiene el siguiente código.\n\n``` html\n<!DOCTYPE html>\n<html>\n<head>\n {% block head %}\n <link rel=\"stylesheet\" href=\"style.css\" />\n <title>Bienvenidos a {% block title%}Pythonista{% endblock %}</title>\n {% endblock %}\n</head>\n<body>\n <div id=\"content\">{% block content %}Hola, Bienvenidos.{% endblock %}</div>\n <div id=\"footer\">\n {% block footer %}\n © Copyright 2018 <a href=\"https://pythonista.io/\">Pythonista®.</a>.\n {% endblock %}\n </div>\n</body>\n```",
"_____no_output_____"
]
],
[
[
"plantilla = entorno.get_template(\"plantilla_base.html\")",
"_____no_output_____"
],
[
"print(plantilla.render())",
"_____no_output_____"
]
],
[
[
"El archivo [*plantillas/plantilla_hija.html*](plantillas/plantilla_hija.html) contiene el siguiente código, el cual hereda el código del archivo *plantilla_base.html*.\n\n``` html\n{% extends \"plantilla_base.html\" %}\n{% block title %} Cloudevel {%endblock %}\n{% block footer %}\n © Copyright 2018 <a href=\"https://cloudevel.com/\">Cloudevel.</a>.\n{% endblock %}\n```",
"_____no_output_____"
]
],
[
[
"plantilla = entorno.get_template(\"plantilla_hija.html\")",
"_____no_output_____"
],
[
"print(plantilla.render())",
"_____no_output_____"
]
],
[
[
"### La función *super()*.\n\nEsta función de Jinja 2 es similar a la super() de Python, y permite traer el contenido del bloque original para ser reutilizado en el nuevo bloque.",
"_____no_output_____"
],
[
"**Ejemplo:**\n\nEl archivo [*plantillas/plantilla_superpuesta.html*](plantillas/plantilla_superpuesta.html) contiene el siguiente código, el cual hereda el código del archivo *plantillas/plantilla_base.html*, pero usa la función *super()* para traer el bloque de texto que sobreescribió.\n\n```\n{% extends \"plantilla_base.html\" %}\n{% block title %} \nCloudevel, empresa hermana de \n{{ super() }}\n{%endblock %}\n{% block footer %}\n © Copyright 2018 <a href=\"https://cloudevel.com/\">Cloudevel.</a>.\n{{ super() }}\n{% endblock %}\n```\n\n",
"_____no_output_____"
],
[
"**Nota:** Asegúrese que la ruta de la celda de abajo coresponda a la de la celda superior.",
"_____no_output_____"
]
],
[
[
"plantilla = entorno.get_template(\"plantilla_superpuesta.html\")",
"_____no_output_____"
],
[
"print(plantilla.render())",
"_____no_output_____"
]
],
[
[
"<p style=\"text-align: center\"><a rel=\"license\" href=\"http://creativecommons.org/licenses/by/4.0/\"><img alt=\"Licencia Creative Commons\" style=\"border-width:0\" src=\"https://i.creativecommons.org/l/by/4.0/80x15.png\" /></a><br />Esta obra está bajo una <a rel=\"license\" href=\"http://creativecommons.org/licenses/by/4.0/\">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p>\n<p style=\"text-align: center\">© José Luis Chiquete Valdivieso. 2021.</p>",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
]
|
cb72276e59af401dbebfb5bfd85f85a09634f7a7 | 531,477 | ipynb | Jupyter Notebook | notebooks/3-train-imagenette.ipynb | Algovera-ai/onshore | a01e36070fd9d100c1f032a2182c338e934ba4b9 | [
"MIT"
]
| 2 | 2021-11-12T17:18:07.000Z | 2022-03-21T07:54:20.000Z | notebooks/3-train-imagenette.ipynb | Algovera-ai/onshore | a01e36070fd9d100c1f032a2182c338e934ba4b9 | [
"MIT"
]
| null | null | null | notebooks/3-train-imagenette.ipynb | Algovera-ai/onshore | a01e36070fd9d100c1f032a2182c338e934ba4b9 | [
"MIT"
]
| null | null | null | 827.845794 | 135,796 | 0.953648 | [
[
[
"from IPython.display import Image ",
"_____no_output_____"
]
],
[
[
"This is a follow on from Tutorial 1 where we browsed the Ocean marketplace and downloaded the imagenette dataset. In this tutorial, we will create a model that trains (and overfits) on the small amount of sample data. Once we know that data interface of the input is compatible with our created model (and that the model can successfully overfit on the sample data), then we can be confident enough to send the model to train on the complete dataset.",
"_____no_output_____"
],
[
"Now lets inspect the sample data. The data provider should provide this in the same format as the whole dataset. This helps us as data scientists to write scripts that run on both the sample data and the whole dataset. We call this the **interface** of the data. ",
"_____no_output_____"
]
],
[
[
"from pathlib import Path\nimagenette_dir = Path('imagenette2-sample')\nprint(f\"Sub-directories: {sorted(list(imagenette_dir.glob('*')))}\")",
"Sub-directories: [PosixPath('imagenette2-sample/train'), PosixPath('imagenette2-sample/val')]\n"
],
[
"sorted(list(imagenette_dir.glob('*')))",
"_____no_output_____"
],
[
"train_dir, val_dir = sorted(list(imagenette_dir.glob('*')))",
"_____no_output_____"
],
[
"print(f\"Sub-directories in train: {sorted(list(train_dir.glob('*/*')))}\")\nprint(f\"Sub-directories in val: {sorted(list(val_dir.glob('*/*')))}\")",
"Sub-directories in train: [PosixPath('imagenette2-sample/train/n01440764/ILSVRC2012_val_00000293.JPEG'), PosixPath('imagenette2-sample/train/n02102040/ILSVRC2012_val_00000665.JPEG')]\nSub-directories in val: [PosixPath('imagenette2-sample/val/n01440764/ILSVRC2012_val_00009111.JPEG'), PosixPath('imagenette2-sample/val/n02102040/ILSVRC2012_val_00004650.JPEG')]\n"
]
],
[
[
"It seems like both the training and validation directorys have folders for each category of image that contain the image files. Of course, we could read the dataset docs if this wasn't immediately clear.",
"_____no_output_____"
]
],
[
[
"train_images = sorted(list(train_dir.glob('*/*')))\nval_images = sorted(list(val_dir.glob('*/*')))\nprint(f\"Number of train images:\", len(train_images))\nprint(f\"Number of val images:\", len(val_images))",
"Number of train images: 2\nNumber of val images: 2\n"
]
],
[
[
"We will use the fast.ai library to train a simple image classifier. ",
"_____no_output_____"
]
],
[
[
"from fastai.vision.all import *",
"_____no_output_____"
]
],
[
[
"First we will attempt to train as normal (using both training and validation sets) to ensure that all of the images load without any errors. First we create the dataloaders:",
"_____no_output_____"
]
],
[
[
"path = Path('imagenette2-sample.tgz')",
"_____no_output_____"
],
[
"import xtarfile as tarfile\ntar = tarfile.open(path, \"r:gz\")",
"_____no_output_____"
],
[
"from PIL import Image\nimport io\n\nimages = []\nfor member in tar.getmembers():\n f = tar.extractfile(member)\n if f is not None:\n image_data = f.read()\n image = Image.open(io.BytesIO(image_data)) \n images.append(image)",
"_____no_output_____"
],
[
"path = Path(\"imagenette2-sample\")",
"_____no_output_____"
],
[
"dls = ImageDataLoaders.from_folder(path, train='train', valid='val', \n item_tfms=RandomResizedCrop(128, min_scale=0.35), batch_tfms=Normalize.from_stats(*imagenet_stats), bs=2)",
"_____no_output_____"
]
],
[
[
"We can visualise the images in the training set as follows:",
"_____no_output_____"
]
],
[
[
"dls.show_batch()",
"_____no_output_____"
]
],
[
[
"We choose a simple ResNet-34 architecture.",
"_____no_output_____"
]
],
[
[
"learn = cnn_learner(dls, resnet34, metrics=accuracy, pretrained=False)",
"_____no_output_____"
]
],
[
[
"And run training for 5 epochs with a learning rate of 0.001.",
"_____no_output_____"
]
],
[
[
"learn.fit_one_cycle(8, 1e-4)",
"_____no_output_____"
]
],
[
[
"As you can see, the accuracy is 50% meaning, which is the same as random guessing. We can visualise the results using the following. Note that the results are on the validation images.",
"_____no_output_____"
]
],
[
[
"learn.show_results()",
"_____no_output_____"
]
],
[
[
"The reason for the accuracy is that the size of the training set is not large enough to generalize to the validation set. Thus, while we have confirmed that both the training images and validation images load correctly, we have not confirmed that our selected model trains properly. To ensure, this we will instead use the training set for validation. This is a very simple case for the model since it does not have to learn to generalise and can simply memorise the input data. If the model cannot achieve this, there must be some bug in the code. Let's create new dataloaders for this scenario:",
"_____no_output_____"
]
],
[
[
"dls_overfit = ImageDataLoaders.from_folder(imagenette_dir, train='train', valid='train', \n item_tfms=RandomResizedCrop(128, min_scale=0.35), batch_tfms=Normalize.from_stats(*imagenet_stats), bs=2)",
"_____no_output_____"
],
[
"dls_overfit.show_batch()",
"_____no_output_____"
],
[
"learn_overfit = cnn_learner(dls_overfit, resnet34, metrics=accuracy, pretrained=False)",
"_____no_output_____"
],
[
"learn_overfit.fit_one_cycle(8, 1e-4)",
"_____no_output_____"
]
],
[
[
"Note that the results are now on the training images.",
"_____no_output_____"
]
],
[
[
"learn_overfit.show_results()",
"_____no_output_____"
],
[
"preds, targs = learn_overfit.get_preds()",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
cb72403c02cc3b7d67985845c05f900c27013727 | 709,396 | ipynb | Jupyter Notebook | machine_translation_demo.ipynb | mvdwerve/transformer | d36d983570a667de7f024f3b340b5e67a4c54f39 | [
"MIT"
]
| 52 | 2019-03-10T08:22:29.000Z | 2022-03-31T06:24:41.000Z | machine_translation_demo.ipynb | mvdwerve/transformer | d36d983570a667de7f024f3b340b5e67a4c54f39 | [
"MIT"
]
| null | null | null | machine_translation_demo.ipynb | mvdwerve/transformer | d36d983570a667de7f024f3b340b5e67a4c54f39 | [
"MIT"
]
| 13 | 2019-03-10T12:44:33.000Z | 2022-01-11T09:18:14.000Z | 494.008357 | 22,636 | 0.945008 | [
[
[
"from matplotlib import pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_datasets as tfds",
"_____no_output_____"
],
[
"tf.__version__",
"_____no_output_____"
],
[
"model = tf.keras.models.load_model(\"runs/machine_translation/2\")",
"_____no_output_____"
]
],
[
[
"https://www.tensorflow.org/beta/tutorials/text/transformer#evaluate",
"_____no_output_____"
]
],
[
[
"tokenizer_pt = tfds.features.text.SubwordTextEncoder.load_from_file(\n \"subwords/ted_hrlr_translate/pt_to_en/subwords_pt\")\ntokenizer_en = tfds.features.text.SubwordTextEncoder.load_from_file(\n \"subwords/ted_hrlr_translate/pt_to_en/subwords_en\")",
"_____no_output_____"
],
[
"inp_sentence = \"este é um problema que temos que resolver.\"",
"_____no_output_____"
]
],
[
[
"real translation: \"this is a problem we have to solve .\"",
"_____no_output_____"
]
],
[
[
"inp = tf.expand_dims([tokenizer_pt.vocab_size] + tokenizer_pt.encode(inp_sentence) + [tokenizer_pt.vocab_size + 1], 0)\ntar = tf.expand_dims([tokenizer_en.vocab_size], 0)",
"_____no_output_____"
],
[
"preds, enc_enc_attention, dec_dec_attention, enc_dec_attention = model([inp, tar])\npreds = tf.argmax(preds[:, -1:, :], axis=-1, output_type=tf.int32)\npreds",
"_____no_output_____"
],
[
"tar = tf.concat([tar, preds], axis=-1)\ntokenizer_en.decode(tar[0].numpy()[1:])",
"_____no_output_____"
],
[
"preds, enc_enc_attention, dec_dec_attention, enc_dec_attention = model([inp, tar])\npreds = tf.argmax(preds[:, -1:, :], axis=-1, output_type=tf.int32)\npreds",
"_____no_output_____"
],
[
"tar = tf.concat([tar, preds], axis=-1)\ntokenizer_en.decode(tar[0].numpy()[1:])",
"_____no_output_____"
],
[
"preds, enc_enc_attention, dec_dec_attention, enc_dec_attention = model([inp, tar])\npreds = tf.argmax(preds[:, -1:, :], axis=-1, output_type=tf.int32)\npreds",
"_____no_output_____"
],
[
"tar = tf.concat([tar, preds], axis=-1)\ntokenizer_en.decode(tar[0].numpy()[1:])",
"_____no_output_____"
],
[
"preds, enc_enc_attention, dec_dec_attention, enc_dec_attention = model([inp, tar])\npreds = tf.argmax(preds[:, -1:, :], axis=-1, output_type=tf.int32)\npreds",
"_____no_output_____"
],
[
"tar = tf.concat([tar, preds], axis=-1)\ntokenizer_en.decode(tar[0].numpy()[1:])",
"_____no_output_____"
],
[
"preds, enc_enc_attention, dec_dec_attention, enc_dec_attention = model([inp, tar])\npreds = tf.argmax(preds[:, -1:, :], axis=-1, output_type=tf.int32)\npreds",
"_____no_output_____"
],
[
"tar = tf.concat([tar, preds], axis=-1)\ntokenizer_en.decode(tar[0].numpy()[1:])",
"_____no_output_____"
],
[
"preds, enc_enc_attention, dec_dec_attention, enc_dec_attention = model([inp, tar])\npreds = tf.argmax(preds[:, -1:, :], axis=-1, output_type=tf.int32)\npreds",
"_____no_output_____"
],
[
"tar = tf.concat([tar, preds], axis=-1)\ntokenizer_en.decode(tar[0].numpy()[1:])",
"_____no_output_____"
],
[
"preds, enc_enc_attention, dec_dec_attention, enc_dec_attention = model([inp, tar])\npreds = tf.argmax(preds[:, -1:, :], axis=-1, output_type=tf.int32)\npreds",
"_____no_output_____"
],
[
"tar = tf.concat([tar, preds], axis=-1)\ntokenizer_en.decode(tar[0].numpy()[1:])",
"_____no_output_____"
],
[
"preds, enc_enc_attention, dec_dec_attention, enc_dec_attention = model([inp, tar])\npreds = tf.argmax(preds[:, -1:, :], axis=-1, output_type=tf.int32)\npreds",
"_____no_output_____"
],
[
"tar = tf.concat([tar, preds], axis=-1)\ntokenizer_en.decode(tar[0].numpy()[1:])",
"_____no_output_____"
],
[
"preds, enc_enc_attention, dec_dec_attention, enc_dec_attention = model([inp, tar])\npreds = tf.argmax(preds[:, -1:, :], axis=-1, output_type=tf.int32)\npreds",
"_____no_output_____"
],
[
"tar = tf.concat([tar, preds], axis=-1)\ntokenizer_en.decode(tar[0].numpy()[1:])",
"_____no_output_____"
],
[
"preds, enc_enc_attention, dec_dec_attention, enc_dec_attention = model([inp, tar])\npreds = tf.argmax(preds[:, -1:, :], axis=-1, output_type=tf.int32)\npreds",
"_____no_output_____"
],
[
"tar = tf.concat([tar, preds], axis=-1)\ntokenizer_en.decode(tar[0].numpy()[1:])",
"_____no_output_____"
],
[
"preds, enc_enc_attention, dec_dec_attention, enc_dec_attention = model([inp, tar])\npreds = tf.argmax(preds[:, -1:, :], axis=-1, output_type=tf.int32)\npreds",
"_____no_output_____"
],
[
"tar = tf.concat([tar, preds], axis=-1)\ntokenizer_en.decode(tar[0].numpy()[1:])",
"_____no_output_____"
],
[
"preds, enc_enc_attention, dec_dec_attention, enc_dec_attention = model([inp, tar])\npreds = tf.argmax(preds[:, -1:, :], axis=-1, output_type=tf.int32)\npreds",
"_____no_output_____"
],
[
"tar = tf.concat([tar, preds], axis=-1)\ntokenizer_en.decode(tar[0].numpy()[1:])",
"_____no_output_____"
],
[
"preds, enc_enc_attention, dec_dec_attention, enc_dec_attention = model([inp, tar])\npreds = tf.argmax(preds[:, -1:, :], axis=-1, output_type=tf.int32)\npreds",
"_____no_output_____"
]
],
[
[
"`8088` is the end token",
"_____no_output_____"
],
[
"Visualizing only encoder-decoder attention heads for the final prediction",
"_____no_output_____"
]
],
[
[
"enc_dec_attention_0, enc_dec_attention_1, enc_dec_attention_2, enc_dec_attention_3 = \\\n enc_dec_attention[\"layer_0\"][0], enc_dec_attention[\"layer_1\"][0], enc_dec_attention[\"layer_2\"][0], enc_dec_attention[\"layer_3\"][0]",
"_____no_output_____"
],
[
"xticklabels = [\"##START##\"] + [tokenizer_pt.decode([v]) for v in inp.numpy()[0][1:-1]] + [\"##END##\"]\nyticklabels = [\"##START##\"] + [tokenizer_en.decode([v]) for v in tar.numpy()[0][1:]]",
"_____no_output_____"
],
[
"# https://matplotlib.org/users/colormaps.html\nfor i, cmap in enumerate([\"Reds\", \"spring\", \"summer\", \"autumn\", \"winter\", \"cool\", \"Wistia\", \"Oranges\"]):\n fig = plt.figure()\n fig, ax = plt.subplots(1,1, figsize=(12,12))\n heatplot = ax.imshow(enc_dec_attention_0[i].numpy(), cmap=cmap)\n ax.set_xticks(np.arange(11))\n ax.set_yticks(np.arange(13))\n ax.set_xticklabels(xticklabels)\n ax.set_yticklabels(yticklabels)\n plt.colorbar(heatplot)\n plt.title(\"Layer 0, Attention Head %d\" % (i + 1))",
"_____no_output_____"
],
[
"# https://matplotlib.org/users/colormaps.html\nfor i, cmap in enumerate([\"Reds\", \"spring\", \"summer\", \"autumn\", \"winter\", \"cool\", \"Wistia\", \"Oranges\"]):\n fig = plt.figure()\n fig, ax = plt.subplots(1,1, figsize=(12,12))\n heatplot = ax.imshow(enc_dec_attention_1[i].numpy(), cmap=cmap)\n ax.set_xticks(np.arange(11))\n ax.set_yticks(np.arange(13))\n ax.set_xticklabels(xticklabels)\n ax.set_yticklabels(yticklabels)\n plt.colorbar(heatplot)\n plt.title(\"Layer 1, Attention Head %d\" % (i + 1))",
"_____no_output_____"
],
[
"# https://matplotlib.org/users/colormaps.html\nfor i, cmap in enumerate([\"Reds\", \"spring\", \"summer\", \"autumn\", \"winter\", \"cool\", \"Wistia\", \"Oranges\"]):\n fig = plt.figure()\n fig, ax = plt.subplots(1,1, figsize=(12,12))\n heatplot = ax.imshow(enc_dec_attention_2[i].numpy(), cmap=cmap)\n ax.set_xticks(np.arange(11))\n ax.set_yticks(np.arange(13))\n ax.set_xticklabels(xticklabels)\n ax.set_yticklabels(yticklabels)\n plt.colorbar(heatplot)\n plt.title(\"Layer 2, Attention Head %d\" % (i + 1))",
"_____no_output_____"
],
[
"# https://matplotlib.org/users/colormaps.html\nfor i, cmap in enumerate([\"Reds\", \"spring\", \"summer\", \"autumn\", \"winter\", \"cool\", \"Wistia\", \"Oranges\"]):\n fig = plt.figure()\n fig, ax = plt.subplots(1,1, figsize=(12,12))\n heatplot = ax.imshow(enc_dec_attention_3[i].numpy(), cmap=cmap)\n ax.set_xticks(np.arange(11))\n ax.set_yticks(np.arange(13))\n ax.set_xticklabels(xticklabels)\n ax.set_yticklabels(yticklabels)\n plt.colorbar(heatplot)\n plt.title(\"Layer 3, Attention Head %d\" % (i + 1))",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb725765b2ec3386c11be1dc1c88f072cc10f305 | 8,251 | ipynb | Jupyter Notebook | 2017_09_12_TextMiningVol2/Text Mining.ipynb | jiricejchan/AnonymniAnalytici | e4e96f943d0b2232d9099c6e7bb690a3d25ea422 | [
"MIT"
]
| 10 | 2017-03-28T06:52:22.000Z | 2017-11-21T17:41:11.000Z | 2017_09_12_TextMiningVol2/Text Mining.ipynb | jiricejchan/AnonymniAnalytici | e4e96f943d0b2232d9099c6e7bb690a3d25ea422 | [
"MIT"
]
| 1 | 2017-07-21T08:27:01.000Z | 2017-07-21T08:27:01.000Z | 2017_09_12_TextMiningVol2/Text Mining.ipynb | jiricejchan/AnonymniAnalytici | e4e96f943d0b2232d9099c6e7bb690a3d25ea422 | [
"MIT"
]
| 8 | 2017-03-05T17:21:40.000Z | 2019-12-01T18:46:39.000Z | 27.875 | 131 | 0.543692 | [
[
[
"import os\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport sys\nimport re\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nps = PorterStemmer()",
"_____no_output_____"
],
[
"print os.getcwd();",
"_____no_output_____"
],
[
"# if necessary change the directory\n#os.chdir('c:\\\\Users\\..')",
"_____no_output_____"
],
[
"data = pd.read_csv(\"nightlife_sanfrancisco_en.csv\", header=0, delimiter=\",\")",
"_____no_output_____"
],
[
"# iexplore data set\ndata.shape",
"_____no_output_____"
],
[
"data.columns.values",
"_____no_output_____"
],
[
"print data[\"text\"][0]",
"_____no_output_____"
],
[
"# Remove stop words from \"words\"\nimport nltk # import stop words\nnltk.download('popular') # Download text data sets, including stop words",
"_____no_output_____"
],
[
"from nltk.corpus import stopwords # Import the stop word list\nprint stopwords.words(\"english\")\n#words = [w for w in words if not w in stopwords.words(\"english\")]\n#print words # \"u\" before each word indicates that Python is internally representing each word as a unicode string",
"_____no_output_____"
],
[
"# Clean all records\ndef text_to_words( raw_text ):\n # 1. Remove end of line\n without_end_line = re.sub('\\n', ' ', raw_text)\n # 2. Remove start of line\n without_start_line = re.sub('\\r', ' ', without_end_line)\n # 3. Remove punctuation\n without_punctual = re.sub(ur'[\\W_]+',' ',without_start_line )\n # 4. Replace number by XnumberX\n without_number = re.sub('(\\d+\\s*)+', ' XnumberX ', without_punctual)\n # 5. Remove non-letters \n letters_only = re.sub(\"[^a-zA-Z]\", \" \", without_number) \n # 6. Convert to lower case\n lower_case = letters_only.lower()\n # 7. Split into individual words\n words = lower_case.split()\n # 8. stemming - algorithms Porter stemmer\n meaningful_words = [ps.stem(word) for word in words]\n # 9.Remove stop words \n # Redundant step, removing later in Creating the bag of words step\n #stops = set(stopwords.words(\"english\")) \n #meaningful_words = [w for w in words if not w in stops] \n # 10. Join the words back into one string separated by space and return the result.\n return( \" \".join( meaningful_words ))\n #return (meaningful_words)",
"_____no_output_____"
],
[
"clean_text = text_to_words( data[\"text\"][0] )\nprint clean_text",
"_____no_output_____"
],
[
"# Get the number of text based on the dataframe column size\nnum_text = data[\"text\"].size\n# Initialize an empty list to hold the clean text\nclean_data = []\n# Loop over each text; create an index i that goes from 0 to the length\nprint \"Cleaning and parsing the data set text...\\n\"\nclean_data = []\nfor i in xrange( 0, num_text ):\n # If the index is evenly divisible by 1000, print a message\n if( (i+1)%1000 == 0 ):\n print \"Text %d of %d\\n\" % ( i+1, num_text ) \n clean_data.append( text_to_words( data[\"text\"][i] )) # in case of error run \"pip install -U nltk\"",
"_____no_output_____"
],
[
"# Compare original and edited text\ndata['text'][0]",
"_____no_output_____"
],
[
"clean_data[0]",
"_____no_output_____"
],
[
"print \"Creating the bag of words...\\n\"\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n# Initialize the \"CountVectorizer\" object, which is scikit-learn's\n# bag of words tool. \nvectorizer = CountVectorizer(analyzer = \"word\", \\\n tokenizer = None, \\\n preprocessor = None, \\\n stop_words = 'english', \\\n max_features = 5000) \n\n# fit_transform() does two functions: First, it fits the model\n# and learns the vocabulary; second, it transforms our training data\n# into feature vectors. The input to fit_transform should be a list of \n# strings.\ntrain_data_features = vectorizer.fit_transform(clean_data)\n\n# Numpy arrays are easy to work with, so convert the result to an \n# array\ntrain_data_features = train_data_features.toarray()",
"_____no_output_____"
],
[
"print train_data_features.shape",
"_____no_output_____"
],
[
"# Take a look at the words in the vocabulary\nvocab = vectorizer.get_feature_names()\nprint vocab",
"_____no_output_____"
],
[
"import numpy as np\n\n# Sum up the counts of each vocabulary word\ndist = np.sum(train_data_features, axis=0)\n\n# For each, print the vocabulary word and the number of times it \n# appears in the training set\nfor tag, count in zip(vocab, dist):\n print count, tag",
"_____no_output_____"
],
[
"# Using in model, random forest example\nprint \"Training the random forest...\"\nfrom sklearn.ensemble import RandomForestClassifier\n\n# Initialize a Random Forest classifier with 100 trees\nforest = RandomForestClassifier(n_estimators = 100) \n\n# Fit the forest to the training set, using the bag of words as \n# features and the sentiment labels as the response variable\n#\n# This may take a few minutes to run\nforest = forest.fit( train_data_features, data[\"stars\"] )",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb7266cae7dccb97421eb17b11df782453b0b9ee | 17,360 | ipynb | Jupyter Notebook | GroversAlgorithm/GroversAlgorithm.ipynb | afgbloch/QuantumKatas | 2bd53efdaf4716ac0873a8e3919b57797cddcf95 | [
"MIT"
]
| 1 | 2020-05-20T14:02:15.000Z | 2020-05-20T14:02:15.000Z | GroversAlgorithm/GroversAlgorithm.ipynb | afgbloch/QuantumKatas | 2bd53efdaf4716ac0873a8e3919b57797cddcf95 | [
"MIT"
]
| null | null | null | GroversAlgorithm/GroversAlgorithm.ipynb | afgbloch/QuantumKatas | 2bd53efdaf4716ac0873a8e3919b57797cddcf95 | [
"MIT"
]
| null | null | null | 36.547368 | 325 | 0.587442 | [
[
[
"empty"
]
]
]
| [
"empty"
]
| [
[
"empty"
]
]
|
cb726daabe7d78751d1d1bb11b28812901e0671b | 39,080 | ipynb | Jupyter Notebook | nba_seer-0.1/0_grab_game_stats_logs.ipynb | wonderui/hoop_fantasy | 642157c1e6babd5bef7953b7607dc062f8c8deff | [
"MIT"
]
| null | null | null | nba_seer-0.1/0_grab_game_stats_logs.ipynb | wonderui/hoop_fantasy | 642157c1e6babd5bef7953b7607dc062f8c8deff | [
"MIT"
]
| null | null | null | nba_seer-0.1/0_grab_game_stats_logs.ipynb | wonderui/hoop_fantasy | 642157c1e6babd5bef7953b7607dc062f8c8deff | [
"MIT"
]
| null | null | null | 39.755849 | 128 | 0.552738 | [
[
[
"import datetime\nimport time\nimport functools\n\nimport pandas as pd\nimport numpy as np\nimport pytz\n\nimport nba_py\nimport nba_py.game\nimport nba_py.player\nimport nba_py.team\n\nimport pymysql\nfrom sqlalchemy import create_engine\n\nfrom password import hoop_pwd\npwd = hoop_pwd.password",
"_____no_output_____"
],
[
"conn = create_engine('mysql+pymysql://root:%[email protected]:3306/nba_stats' % pwd)\n\nustz = pytz.timezone('America/New_York')\nus_time = datetime.datetime.now(ustz)\nprint('New York time: ' + str(us_time.date()) + ' ' + str(us_time.time())[:8])\n\ntry:\n # read sql table of game header\n game_header = pd.read_sql_table('game_header', conn)\n length_1 = len(game_header)\n print(str(length_1) + ' games loaded.')\n # set begin date to the newest date in sql table\n begin = datetime.datetime.strptime(game_header.iloc[-1]['GAME_DATE_EST'][:10], \n \"%Y-%m-%d\").date() + datetime.timedelta(days=-2)\nexcept ValueError:\n print('no table yet!')\n length_1 = 0\n # if no table yet, set begin date to 2012-10-29\n begin = datetime.date(2012, 10, 29)\n # grab game headers of begining date\n game_header = nba_py.Scoreboard(month = begin.month, \n day = begin.day, \n year = begin.year, league_id = '00', offset = 0).game_header()\n\n# set end date to us yesterday\nend = us_time.date() + datetime.timedelta(days=-1)\n\nfor i in range((end - begin).days + 1):\n # grab game headers from begin date to end date\n day = begin + datetime.timedelta(days = i)\n game_header = game_header.append(nba_py.Scoreboard(month = day.month, \n day = day.day, \n year = day.year, \n league_id = '00', \n offset = 0).game_header())\n print(str(day) + ' finished! ' + str(datetime.datetime.now().time())[:8])\n\ngame_header = game_header[game_header['GAME_STATUS_ID'] == 3]\n \nlength_2 = len(game_header)\n# drop the duplicate by game id\ngame_header = game_header.drop_duplicates('GAME_ID')\nlength_3 = len(game_header)\nprint(str(length_2 - length_3) + ' duplicates droped.')\nprint(str(length_3 - length_1) + ' games added.')\n\n# sort game headers by game id ascending\n# game_header = game_header.sort_values('GAME_ID')\n\n# commit new game headers to sql table\ngame_header.to_sql('game_header', conn, index = False, if_exists = 'replace')\nprint(str(length_3) + ' game headers commit complete!')",
"New York time: 2018-01-12 22:10:11\n7181 games loaded.\n2017-10-26 finished! 21:10:26\n2017-10-27 finished! 21:10:27\n2017-10-28 finished! 21:10:29\n2017-10-29 finished! 21:10:30\n2017-10-30 finished! 21:10:31\n2017-10-31 finished! 21:10:32\n2017-11-01 finished! 21:10:33\n2017-11-02 finished! 21:10:34\n2017-11-03 finished! 21:10:36\n2017-11-04 finished! 21:10:37\n2017-11-05 finished! 21:10:38\n2017-11-06 finished! 21:10:39\n2017-11-07 finished! 21:10:40\n2017-11-08 finished! 21:10:41\n2017-11-09 finished! 21:10:42\n2017-11-10 finished! 21:10:43\n2017-11-11 finished! 21:10:44\n2017-11-12 finished! 21:10:46\n2017-11-13 finished! 21:10:47\n2017-11-14 finished! 21:10:48\n2017-11-15 finished! 21:10:49\n2017-11-16 finished! 21:10:50\n2017-11-17 finished! 21:10:51\n2017-11-18 finished! 21:10:52\n2017-11-19 finished! 21:10:53\n2017-11-20 finished! 21:10:54\n2017-11-21 finished! 21:10:55\n2017-11-22 finished! 21:10:56\n2017-11-23 finished! 21:10:58\n2017-11-24 finished! 21:10:59\n2017-11-25 finished! 21:10:59\n2017-11-26 finished! 21:11:01\n2017-11-27 finished! 21:11:02\n2017-11-28 finished! 21:11:03\n2017-11-29 finished! 21:11:04\n2017-11-30 finished! 21:11:05\n2017-12-01 finished! 21:11:06\n2017-12-02 finished! 21:11:07\n2017-12-03 finished! 21:11:09\n2017-12-04 finished! 21:11:10\n2017-12-05 finished! 21:11:10\n2017-12-06 finished! 21:11:11\n2017-12-07 finished! 21:11:13\n2017-12-08 finished! 21:11:14\n2017-12-09 finished! 21:11:14\n2017-12-10 finished! 21:11:15\n2017-12-11 finished! 21:11:17\n2017-12-12 finished! 21:11:18\n2017-12-13 finished! 21:11:19\n2017-12-14 finished! 21:11:20\n2017-12-15 finished! 21:11:21\n2017-12-16 finished! 21:11:22\n2017-12-17 finished! 21:11:23\n2017-12-18 finished! 21:11:24\n2017-12-19 finished! 21:11:25\n2017-12-20 finished! 21:11:26\n2017-12-21 finished! 21:11:27\n2017-12-22 finished! 21:11:28\n2017-12-23 finished! 21:11:30\n2017-12-24 finished! 21:11:31\n2017-12-25 finished! 21:11:32\n2017-12-26 finished! 21:11:33\n2017-12-27 finished! 21:11:34\n2017-12-28 finished! 21:11:35\n2017-12-29 finished! 21:11:36\n2017-12-30 finished! 21:11:37\n2017-12-31 finished! 21:11:39\n2018-01-01 finished! 21:11:40\n2018-01-02 finished! 21:11:41\n2018-01-03 finished! 21:11:42\n2018-01-04 finished! 21:11:43\n2018-01-05 finished! 21:11:44\n2018-01-06 finished! 21:11:45\n2018-01-07 finished! 21:11:46\n2018-01-08 finished! 21:11:47\n2018-01-09 finished! 21:11:47\n2018-01-10 finished! 21:11:48\n2018-01-11 finished! 21:11:49\n20 duplicates droped.\n533 games added.\n7714 game headers commit complete!\n"
],
[
"conn = create_engine('mysql+pymysql://root:%[email protected]:3306/nba_stats' % pwd)\n\ngame_stats_logs = pd.DataFrame()\n\ntry:\n # read sql table of game stats logs id\n game_stats_logs_id = pd.read_sql_table('game_stats_logs', conn, columns = ['GAME_ID'])\n length_1 = len(game_stats_logs_id)\n print(str(length_1) + ' player stats loaded.')\nexcept ValueError:\n print('no table yet!')\n length_1 = 0\n # create table and commit it to sql\n game_stats_logs.to_sql('game_stats_logs', conn, index = False, if_exists = 'append')\n print('game stats logs initialized!')\n\n# define game types by the head of game id\ngame_type = {'001': 'pre_season', '002': 'regular_season', '003': 'all_star', '004': 'play_offs'}\n\n# ------method 1------for game id in game headers from the max one in sql table\n# for i in game_header[game_header['GAME_ID'] >= game_stats_logs['GAME_ID'].max()]['GAME_ID']:\n\n# ------method 2------for game id in game header but not in game stats logs \nfor i in game_header['GAME_ID'][game_header['GAME_ID'].isin(game_stats_logs_id['GAME_ID'].drop_duplicates()) == False]:\n # get game player stats of i\n game_stats = nba_py.game.Boxscore(i).player_stats()\n # create home team player stats\n home_team_id = int(game_header[game_header['GAME_ID'] == i]['HOME_TEAM_ID'])\n home_stats_logs = game_stats[game_stats['TEAM_ID'] == int(home_team_id)].copy()\n home_stats_logs['LOCATION'] = 'HOME'\n home_stats_logs['AGAINST_TEAM_ID'] = int(game_header[game_header['GAME_ID'] == i]['VISITOR_TEAM_ID'])\n # create away team player stats\n away_team_id = int(game_header[game_header['GAME_ID'] == i]['VISITOR_TEAM_ID'])\n away_stats_logs = game_stats[game_stats['TEAM_ID'] == int(away_team_id)].copy()\n away_stats_logs['LOCATION'] = 'AWAY'\n away_stats_logs['AGAINST_TEAM_ID'] = int(game_header[game_header['GAME_ID'] == i]['HOME_TEAM_ID'])\n # combine home and away team player stats and append to game stats logs\n game_stats_logs = game_stats_logs.append(home_stats_logs)\n game_stats_logs = game_stats_logs.append(away_stats_logs)\n print('game ' + i + ' added! ' + str(datetime.datetime.now().time())[:8])\n\ndef min_convert(m):\n '''\n convert mm:ss to float\n '''\n try:\n if ':' in m:\n return float(m[:-3]) + round(float(m[-2:])/60, 2)\n else:\n return float(m)\n except TypeError:\n return None\n\n# create float time\ngame_stats_logs['MINS'] = game_stats_logs['MIN'].apply(min_convert)\n# set 0 time player to None\ngame_stats_logs['MINS'] = game_stats_logs['MINS'].apply(lambda x: None if x == 0 else x)\n# add game type\ngame_stats_logs['GAME_TYPE'] = game_stats_logs['GAME_ID'].apply(lambda x: x[:3]).map(game_type)\n# add game date and game sequence\ngame_stats_logs = game_stats_logs.merge(game_header[['GAME_DATE_EST', 'GAME_SEQUENCE', 'GAME_ID']], \n how = 'left', on = 'GAME_ID')\n# add new ordered game_id\ngame_stats_logs['GAME_ID_O'] = game_stats_logs['GAME_ID'].apply(lambda x: x[3:5] + x[:3] + x[-5:])\n\nlength_2 = len(game_stats_logs)\n# drop duplicate game stats by game id and player id\ngame_stats_logs = game_stats_logs.drop_duplicates(['GAME_ID', 'PLAYER_ID'])\nlength_3 = len(game_stats_logs)\nprint(str(length_2 - length_3) + ' duplicates droped.')\nprint(str(length_3) + ' player stats added.')\n\n# commit new game stats logs to sql table\ngame_stats_logs.to_sql('game_stats_logs', conn, index = False, if_exists = 'append')\nprint(str(length_3) + ' player stats commit complete!')",
"187481 player stats loaded.\ngame 0011300114 added! 21:12:35\ngame 0021700085 added! 21:12:36\ngame 0021700086 added! 21:12:37\ngame 0021700087 added! 21:12:37\ngame 0021700088 added! 21:12:38\ngame 0021700089 added! 21:12:39\ngame 0021700090 added! 21:12:40\ngame 0021700091 added! 21:12:41\ngame 0021700092 added! 21:12:42\ngame 0021700093 added! 21:12:43\ngame 0021700094 added! 21:12:44\ngame 0021700095 added! 21:12:45\ngame 0021700096 added! 21:12:46\ngame 0021700097 added! 21:12:46\ngame 0021700098 added! 21:12:47\ngame 0021700099 added! 21:12:48\ngame 0021700100 added! 21:12:50\ngame 0021700101 added! 21:12:51\ngame 0021700102 added! 21:12:52\ngame 0021700103 added! 21:12:53\ngame 0021700104 added! 21:12:54\ngame 0021700105 added! 21:12:54\ngame 0021700106 added! 21:12:55\ngame 0021700107 added! 21:12:56\ngame 0021700108 added! 21:12:57\ngame 0021700109 added! 21:12:58\ngame 0021700110 added! 21:12:59\ngame 0021700111 added! 21:13:00\ngame 0021700112 added! 21:13:01\ngame 0021700113 added! 21:13:02\ngame 0021700114 added! 21:13:03\ngame 0021700115 added! 21:13:03\ngame 0021700116 added! 21:13:04\ngame 0021700117 added! 21:13:05\ngame 0021700118 added! 21:13:06\ngame 0021700119 added! 21:13:07\ngame 0021700120 added! 21:13:08\ngame 0021700121 added! 21:13:09\ngame 0021700122 added! 21:13:10\ngame 0021700123 added! 21:13:11\ngame 0021700124 added! 21:13:12\ngame 0021700125 added! 21:13:13\ngame 0021700126 added! 21:13:13\ngame 0021700127 added! 21:13:14\ngame 0021700128 added! 21:13:15\ngame 0021700129 added! 21:13:16\ngame 0021700130 added! 21:13:17\ngame 0021700131 added! 21:13:18\ngame 0021700132 added! 21:13:18\ngame 0021700133 added! 21:13:19\ngame 0021700134 added! 21:13:20\ngame 0021700135 added! 21:13:21\ngame 0021700136 added! 21:13:22\ngame 0021700137 added! 21:13:24\ngame 0021700138 added! 21:13:24\ngame 0021700139 added! 21:13:25\ngame 0021700140 added! 21:13:26\ngame 0021700141 added! 21:13:26\ngame 0021700142 added! 21:13:27\ngame 0021700143 added! 21:13:28\ngame 0021700144 added! 21:13:29\ngame 0021700145 added! 21:13:30\ngame 0021700146 added! 21:13:31\ngame 0021700147 added! 21:13:32\ngame 0021700148 added! 21:13:33\ngame 0021700149 added! 21:13:33\ngame 0021700150 added! 21:13:34\ngame 0021700151 added! 21:13:35\ngame 0021700152 added! 21:13:36\ngame 0021700153 added! 21:13:37\ngame 0021700154 added! 21:13:38\ngame 0021700155 added! 21:13:39\ngame 0021700156 added! 21:13:40\ngame 0021700157 added! 21:13:41\ngame 0021700158 added! 21:13:42\ngame 0021700159 added! 21:13:43\ngame 0021700160 added! 21:13:44\ngame 0021700161 added! 21:13:44\ngame 0021700162 added! 21:13:45\ngame 0021700163 added! 21:13:46\ngame 0021700164 added! 21:13:47\ngame 0021700165 added! 21:13:47\ngame 0021700166 added! 21:13:48\ngame 0021700167 added! 21:13:49\ngame 0021700168 added! 21:13:50\ngame 0021700169 added! 21:13:51\ngame 0021700170 added! 21:13:52\ngame 0021700171 added! 21:13:53\ngame 0021700172 added! 21:13:54\ngame 0021700173 added! 21:13:55\ngame 0021700174 added! 21:13:56\ngame 0021700175 added! 21:13:57\ngame 0021700176 added! 21:13:57\ngame 0021700177 added! 21:13:58\ngame 0021700178 added! 21:13:59\ngame 0021700179 added! 21:14:00\ngame 0021700180 added! 21:14:01\ngame 0021700181 added! 21:14:02\ngame 0021700182 added! 21:14:03\ngame 0021700183 added! 21:14:03\ngame 0021700184 added! 21:14:04\ngame 0021700185 added! 21:14:05\ngame 0021700186 added! 21:14:06\ngame 0021700187 added! 21:14:07\ngame 0021700188 added! 21:14:08\ngame 0021700189 added! 21:14:09\ngame 0021700190 added! 21:14:11\ngame 0021700191 added! 21:14:12\ngame 0021700192 added! 21:14:12\ngame 0021700193 added! 21:14:13\ngame 0021700194 added! 21:14:14\ngame 0021700195 added! 21:14:15\ngame 0021700196 added! 21:14:16\ngame 0021700197 added! 21:14:17\ngame 0021700198 added! 21:14:18\ngame 0021700199 added! 21:14:19\ngame 0021700200 added! 21:14:19\ngame 0021700201 added! 21:14:20\ngame 0021700202 added! 21:14:21\ngame 0021700203 added! 21:14:22\ngame 0021700204 added! 21:14:23\ngame 0021700205 added! 21:14:24\ngame 0021700206 added! 21:14:25\ngame 0021700207 added! 21:14:26\ngame 0021700208 added! 21:14:27\ngame 0021700209 added! 21:14:28\ngame 0021700210 added! 21:14:29\ngame 0021700211 added! 21:14:30\ngame 0021700212 added! 21:14:32\ngame 0021700213 added! 21:14:33\ngame 0021700214 added! 21:14:34\ngame 0021700215 added! 21:14:34\ngame 0021700216 added! 21:14:35\ngame 0021700217 added! 21:14:36\ngame 0021700218 added! 21:14:37\ngame 0021700219 added! 21:14:38\ngame 0021700220 added! 21:14:39\ngame 0021700221 added! 21:14:39\ngame 0021700222 added! 21:14:40\ngame 0021700223 added! 21:14:41\ngame 0021700224 added! 21:14:42\ngame 0021700225 added! 21:14:43\ngame 0021700226 added! 21:14:44\ngame 0021700227 added! 21:14:45\ngame 0021700228 added! 21:14:46\ngame 0021700229 added! 21:14:46\ngame 0021700230 added! 21:14:47\ngame 0021700231 added! 21:14:48\ngame 0021700232 added! 21:14:49\ngame 0021700233 added! 21:14:50\ngame 0021700234 added! 21:14:52\ngame 0021700237 added! 21:14:53\ngame 0021700235 added! 21:14:54\ngame 0021700236 added! 21:14:55\ngame 0021700238 added! 21:14:56\ngame 0021700239 added! 21:14:56\ngame 0021700240 added! 21:14:57\ngame 0021700241 added! 21:14:58\ngame 0021700242 added! 21:14:59\ngame 0021700243 added! 21:15:01\ngame 0021700244 added! 21:15:02\ngame 0021700245 added! 21:15:03\ngame 0021700246 added! 21:15:03\ngame 0021700247 added! 21:15:04\ngame 0021700248 added! 21:15:05\ngame 0021700249 added! 21:15:06\ngame 0021700250 added! 21:15:07\ngame 0021700251 added! 21:15:08\ngame 0021700252 added! 21:15:09\ngame 0021700253 added! 21:15:10\ngame 0021700254 added! 21:15:11\ngame 0021700255 added! 21:15:11\ngame 0021700256 added! 21:15:12\ngame 0021700257 added! 21:15:13\ngame 0021700258 added! 21:15:14\ngame 0021700259 added! 21:15:15\ngame 0021700260 added! 21:15:16\ngame 0021700261 added! 21:15:16\ngame 0021700262 added! 21:15:17\ngame 0021700263 added! 21:15:18\ngame 0021700264 added! 21:15:19\ngame 0021700265 added! 21:15:20\ngame 0021700266 added! 21:15:21\ngame 0021700267 added! 21:15:22\ngame 0021700268 added! 21:15:22\ngame 0021700269 added! 21:15:23\ngame 0021700270 added! 21:15:24\ngame 0021700271 added! 21:15:25\ngame 0021700272 added! 21:15:26\ngame 0021700273 added! 21:15:27\ngame 0021700274 added! 21:15:28\ngame 0021700275 added! 21:15:29\ngame 0021700276 added! 21:15:30\ngame 0021700277 added! 21:15:31\ngame 0021700278 added! 21:15:32\ngame 0021700279 added! 21:15:33\ngame 0021700280 added! 21:15:33\ngame 0021700281 added! 21:15:34\ngame 0021700282 added! 21:15:35\ngame 0021700283 added! 21:15:36\ngame 0021700284 added! 21:15:37\ngame 0021700285 added! 21:15:38\ngame 0021700286 added! 21:15:39\ngame 0021700287 added! 21:15:40\ngame 0021700288 added! 21:15:41\ngame 0021700289 added! 21:15:42\ngame 0021700290 added! 21:15:43\ngame 0021700291 added! 21:15:44\ngame 0021700292 added! 21:15:44\ngame 0021700293 added! 21:15:45\ngame 0021700294 added! 21:15:46\ngame 0021700295 added! 21:15:47\ngame 0021700296 added! 21:15:48\ngame 0021700297 added! 21:15:48\ngame 0021700298 added! 21:15:49\ngame 0021700299 added! 21:15:51\ngame 0021700300 added! 21:15:53\ngame 0021700301 added! 21:15:54\ngame 0021700302 added! 21:15:55\ngame 0021700303 added! 21:15:56\ngame 0021700304 added! 21:15:57\ngame 0021700305 added! 21:15:58\ngame 0021700306 added! 21:15:59\ngame 0021700307 added! 21:16:00\ngame 0021700308 added! 21:16:01\ngame 0021700309 added! 21:16:02\ngame 0021700310 added! 21:16:02\ngame 0021700311 added! 21:16:03\ngame 0021700312 added! 21:16:04\ngame 0021700313 added! 21:16:05\ngame 0021700314 added! 21:16:06\ngame 0021700315 added! 21:16:07\ngame 0021700316 added! 21:16:08\ngame 0021700317 added! 21:16:09\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code"
]
]
|
cb728acef669de56dd1a6a7361754d6186d55277 | 709,378 | ipynb | Jupyter Notebook | Main.ipynb | Tylerastro/Folium_Astronomy_Institutes | 874e3b64103261c5a6a82560c329503aad18681b | [
"MIT"
]
| null | null | null | Main.ipynb | Tylerastro/Folium_Astronomy_Institutes | 874e3b64103261c5a6a82560c329503aad18681b | [
"MIT"
]
| null | null | null | Main.ipynb | Tylerastro/Folium_Astronomy_Institutes | 874e3b64103261c5a6a82560c329503aad18681b | [
"MIT"
]
| null | null | null | 1,901.817694 | 613,199 | 0.826076 | [
[
[
"import pandas as pd\ndata = pd.read_csv('Astronomy_institutes_list - Institute_with_location.csv')\n# file is/will be included in github.",
"_____no_output_____"
],
[
"data.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 115 entries, 0 to 114\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Name 115 non-null object \n 1 URL 115 non-null object \n 2 Building_Location 67 non-null object \n 3 Location 103 non-null object \n 4 Region 97 non-null object \n 5 Independency 106 non-null object \n 6 Job_opportunities 47 non-null object \n 7 Colloquiums 13 non-null object \n 8 OCW 1 non-null object \n 9 latitude 115 non-null float64\n 10 longitude 115 non-null float64\ndtypes: float64(2), object(9)\nmemory usage: 10.0+ KB\n"
],
[
"# Auto-fill longitude and latitude (Not accurate due to language and map source)\nfrom geopy.geocoders import Nominatim\nimport time\n\nlatitude = []\nlongitude = []\n\ngeolocator = Nominatim(user_agent=\"\") #use your agent here. e.g. your e-mail address\n\nfor item in data.iterrows():\n time.sleep(1) # Avoiding too frequent request\n try:\n location = geolocator.geocode(item[1][2])\n print(item[1][0],(location.latitude, location.longitude))\n latitude.append(location.latitude)\n longitude.append(location.longitude)\n \n except AttributeError:\n print(item[1][0]+' Cannot found'+'\\n'+'Using campus location...')\n try: \n location = geolocator.geocode(item[1][3])\n print(item[1][0],(location.latitude, location.longitude))\n latitude.append(location.latitude)\n longitude.append(location.longitude)\n except AttributeError:\n print(item[1][0]+' Cannot found'+'\\n'+'Using Name location...')\n location = geolocator.geocode(item[1][0])\n print(item[1][0],(location.latitude, location.longitude))\n latitude.append(location.latitude)\n longitude.append(location.longitude)\n",
"_____no_output_____"
],
[
"# Append auto-fill coordinates\ndata['latitude'] = latitude\ndata['longitude'] = longitude",
"_____no_output_____"
],
[
"# Check frequent key words in research area\nimport re\nimport math\n\n# function check if the research area is not empty\ndef isNaN(string):\n return string != string\n\nkeywords = []\nfor index,item in data.iterrows():\n if isNaN(item['Region']):\n continue\n for region in re.findall(r\"[\\w']+\", item['Region']):\n if region.title() in ['The','And','Of']: # Don't count these words\n continue\n else:\n keywords.append(region.title())\n\n\nwordcount = {}\n\nimport collections\nfor word in keywords:\n if word not in wordcount:\n wordcount[word] = 1\n else:\n wordcount[word] += 1\n\n# Print most common word\nn_print = int(input(\"How many most common words to print: \"))\nprint(\"\\nThe {} most common words are as follows\\n\".format(n_print))\nword_counter = collections.Counter(wordcount)\nfor word, count in word_counter.most_common(n_print):\n print(word, \": \", count)\n\n# Create a data frame of the most common words \n# Draw a bar chart\n#set up plot configuration\nimport matplotlib.pyplot as plt\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\nplt.rc('text', usetex=True) # Latex support\nplt.rc('font', family='serif')\nplt.rc('lines', linewidth=0.5) # Linewidth of data\nplt.rc('savefig', dpi=300)\nfig = plt.figure()\nfig.set_size_inches(23.2,11)\n\n#plot\nlst = word_counter.most_common(n_print)\ndf = pd.DataFrame(lst, columns = ['Word', 'Count'])\nplt.bar(df.Word,df.Count)\nplt.xticks(rotation=30)\nplt.xticks(fontsize=16)\nplt.yticks(fontsize=16)\nplt.show()",
"How many most common words to print: 10\n\nThe 10 most common words are as follows\n\nAstrophysics : 73\nCosmology : 66\nAstronomy : 43\nStars : 37\nGalaxies : 37\nEnergy : 34\nFormation : 31\nHigh : 30\nPhysics : 29\nStellar : 28\n"
],
[
"# Creating institute map\nimport folium\nfrom folium import plugins\nfrom folium.plugins import MarkerCluster\nimport re\n\ncenter = [23, 121] # Initial center coordinates\nmap_institute = folium.Map(location=center, zoom_start=7 , tiles=None) # zoom_start is the initial zoom factor\n\n# Base map list from http://leaflet-extras.github.io/leaflet-providers/preview/\nfolium.TileLayer(tiles='https://server.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/tile/{z}/{y}/{x}',\n attr = 'Tiles © Esri — Esri, DeLorme, NAVTEQ, TomTom, Intermap, iPC, USGS, FAO, NPS, NRCAN, GeoBase, Kadaster NL, Ordnance Survey, Esri Japan, METI, Esri China (Hong Kong), and the GIS User Community'\n , name='Import Tiles').add_to(map_institute)\n\n\n# Create map with group marker\nmcg = folium.plugins.MarkerCluster(control=False)\nmap_institute.add_child(mcg)\n\n# Show all intitutes at first\ntouts = folium.plugins.FeatureGroupSubGroup(mcg, \"All\",show=True)\nmap_institute.add_child(touts)\n\n# Creating individual marker(institutes)\nfor index, institute in data.iterrows():\n location = [institute['latitude'], institute['longitude']]\n html = ('<a href='+str(institute['URL'])+ ' target=\"_blank\">'+str(institute['Name'])+'</a>'+'<br>'+\n '<a href='+str(institute['Job_opportunities'])+ ' target=\"_blank\">'+'Job Opportunity '+'</a>'+'<br>'\n +'<p>'+'Research area: '+'<br>'+str(institute['Region']).title().replace(',','<br>')+'</p>')\n # Set up the window size\n iframe = folium.IFrame(html,\n width=2200,\n height=120)\n popup = folium.Popup(iframe,\n max_width=300)\n if str(institute['Independency']) == 'Yes':\n folium.Marker(location,tooltip=str(institute['Name'])+'<br>'+'Independency: '+'✅'\n ,popup = popup, icon=folium.Icon(icon='university', prefix='fa')\n ).add_to(touts) #icon list from https://fontawesome.com/icons?d=gallery\n elif str(institute['Independency']) == 'No':\n folium.Marker(location,tooltip=str(institute['Name'])+'<br>'+'Independency: '+'❎'\n ,popup = popup, icon=folium.Icon(icon='university', prefix='fa')\n ).add_to(touts)\n else:\n folium.Marker(location,tooltip=str(institute['Name'])+'<br>'+'Independency: '+'▢'\n ,popup = popup, icon=folium.Icon(icon='university', prefix='fa')\n ).add_to(touts)\n\n# Check if research area is empty in the list\ndef isNaN(string):\n return string != string \n\n# Creating different research area groups in the map\ndef catalogs(keyword,catalogue): # key word for boolean institutes ['keyword'], catalogue is the research area name\n for index, item in data.iterrows():\n if isNaN(item['Region']):\n continue\n else:\n keys = re.findall(r\"[\\w']+\", item['Region'])\n keys = [word.title() for word in keys]\n location = [item['latitude'], item['longitude']]\n html = ('<a href='+str(item['URL'])+ ' target=\"_blank\">' +str(item['Name'])+'</a>'+'<br>'+\n '<a href='+str(item['Job_opportunities'])+ ' target=\"_blank\">'+'Job Opportunity '+'</a>'+'<br>'\n +'<p>'+'Research area: '+'<br>'+str(item['Region'].title()).replace(',','<br>')+'</p>')\n iframe = folium.IFrame(html,\n width=2200,\n height=170)\n popup = folium.Popup(iframe,\n max_width=300)\n if any(set(keyword)&set(keys)):\n if str(item['Independency']) == 'Yes':\n folium.Marker(location,tooltip=str(item['Name'])+'<br>'+'Independency: '+'✅'\n ,popup = popup, icon=folium.Icon(icon='university', prefix='fa')\n ).add_to(catalogue) #icon list from https://fontawesome.com/icons?d=gallery\n elif str(item['Independency']) == 'No':\n folium.Marker(location,tooltip=str(item['Name'])+'<br>'+'Independency: '+'❎'\n ,popup = popup, icon=folium.Icon(icon='university', prefix='fa')\n ).add_to(catalogue)\n else:\n folium.Marker(location,tooltip=str(item['Name'])+'<br>'+'Independency: '+'▢'\n ,popup = popup, icon=folium.Icon(icon='university', prefix='fa')\n ).add_to(catalogue)\n\n# Adding/deleting groups here\ninstr = folium.plugins.FeatureGroupSubGroup(mcg, \"Instrumentation\",show=False)\nmap_institute.add_child(instr)\ncatalogs(['Instrumentation'],instr)\n\ntransient = folium.plugins.FeatureGroupSubGroup(mcg, \"Transients\",show=False)\nmap_institute.add_child(transient)\ncatalogs(['Transient','Supernova','Supernovae','Flare'],transient)\n\nGW = folium.plugins.FeatureGroupSubGroup(mcg, \"Gravitational waves\",show=False)\nmap_institute.add_child(GW)\ncatalogs(['Gravitational','Gw'],GW)\n\nplanetary = folium.plugins.FeatureGroupSubGroup(mcg, \"Planetary science\",show=False)\nmap_institute.add_child(planetary)\ncatalogs(['Planetary','Planet','Exoplanet'],planetary)\n\nsolar = folium.plugins.FeatureGroupSubGroup(mcg, \"Solar system\",show=False)\nmap_institute.add_child(solar)\ncatalogs(['Solar','Planet','Jupiter','Sun','Earth'],solar)\n\nfolium.LayerControl(collapsed=False).add_to(map_institute) # collapsed controls the option is folded or unfolded\n\n#display the map\nmap_institute",
"_____no_output_____"
],
[
"# Save the html file\nmap_institute.save(\"map.html\")\nmap_institute.save(\"index.html\")",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb728ebea24a239b2f459b9dc1596e3912a530b6 | 16,269 | ipynb | Jupyter Notebook | examples/tutorials/02_Learning_MNIST_Digit_Classifiers.ipynb | zealseeker/deepchem | a44decc033c727e2da681b1461c3d57fdd53aca0 | [
"MIT"
]
| 1 | 2020-05-17T10:26:52.000Z | 2020-05-17T10:26:52.000Z | examples/tutorials/02_Learning_MNIST_Digit_Classifiers.ipynb | zealseeker/deepchem | a44decc033c727e2da681b1461c3d57fdd53aca0 | [
"MIT"
]
| null | null | null | examples/tutorials/02_Learning_MNIST_Digit_Classifiers.ipynb | zealseeker/deepchem | a44decc033c727e2da681b1461c3d57fdd53aca0 | [
"MIT"
]
| null | null | null | 45.571429 | 593 | 0.594628 | [
[
[
"# Tutorial Part 2: Learning MNIST Digit Classifiers\n\nIn the previous tutorial, we learned some basics of how to load data into DeepChem and how to use the basic DeepChem objects to load and manipulate this data. In this tutorial, you'll put the parts together and learn how to train a basic image classification model in DeepChem. You might ask, why are we bothering to learn this material in DeepChem? Part of the reason is that image processing is an increasingly important part of AI for the life sciences. So learning how to train image processing models will be very useful for using some of the more advanced DeepChem features.\n\nThe MNIST dataset contains handwritten digits along with their human annotated labels. The learning challenge for this dataset is to train a model that maps the digit image to its true label. MNIST has been a standard benchmark for machine learning for decades at this point. \n\n\n\n## Colab\n\nThis tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.\n\n[](https://colab.research.google.com/github/deepchem/deepchem/blob/master/examples/tutorials/02_Learning_MNIST_Digit_Classifiers.ipynb)\n\n## Setup\n\nWe recommend running this tutorial on Google colab. You'll need to run the following cell of installation commands on Colab to get your environment set up. If you'd rather run the tutorial locally, make sure you don't run these commands (since they'll download and install a new Anaconda python setup)",
"_____no_output_____"
]
],
[
[
"%%capture\n%tensorflow_version 1.x\n!wget -c https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh\n!chmod +x Miniconda3-latest-Linux-x86_64.sh\n!bash ./Miniconda3-latest-Linux-x86_64.sh -b -f -p /usr/local\n!conda install -y -c deepchem -c rdkit -c conda-forge -c omnia deepchem-gpu=2.3.0\nimport sys\nsys.path.append('/usr/local/lib/python3.7/site-packages/')",
"_____no_output_____"
],
[
"from tensorflow.examples.tutorials.mnist import input_data",
"_____no_output_____"
],
[
"# TODO: This is deprecated. Let's replace with a DeepChem native loader for maintainability.\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)",
"WARNING:tensorflow:From <ipython-input-3-a839aeb82f4b>:1: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease write your own downloading logic.\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/contrib/learn/python/learn/datasets/base.py:252: _internal_retry.<locals>.wrap.<locals>.wrapped_fn (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use urllib or similar directly.\nSuccessfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting MNIST_data/train-images-idx3-ubyte.gz\nSuccessfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting MNIST_data/train-labels-idx1-ubyte.gz\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.one_hot on tensors.\nSuccessfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.\nExtracting MNIST_data/t10k-images-idx3-ubyte.gz\nSuccessfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.\nExtracting MNIST_data/t10k-labels-idx1-ubyte.gz\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
],
[
"import deepchem as dc\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Reshape, Conv2D, Flatten, Dense, Softmax",
"/usr/local/lib/python3.6/dist-packages/sklearn/externals/joblib/__init__.py:15: FutureWarning: sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. Please import this functionality directly from joblib, which can be installed with: pip install joblib. If this warning is raised when loading pickled models, you may need to re-serialize those models with scikit-learn 0.21+.\n warnings.warn(msg, category=FutureWarning)\n"
],
[
"train = dc.data.NumpyDataset(mnist.train.images, mnist.train.labels)\nvalid = dc.data.NumpyDataset(mnist.validation.images, mnist.validation.labels)",
"_____no_output_____"
],
[
"keras_model = tf.keras.Sequential([\n Reshape((28, 28, 1)),\n Conv2D(filters=32, kernel_size=5, activation=tf.nn.relu),\n Conv2D(filters=64, kernel_size=5, activation=tf.nn.relu),\n Flatten(),\n Dense(1024, activation=tf.nn.relu),\n Dense(10),\n Softmax()\n])\nmodel = dc.models.KerasModel(keras_model, dc.models.losses.CategoricalCrossEntropy())",
"_____no_output_____"
],
[
"model.fit(train, nb_epoch=2)",
"WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/deepchem/models/keras_model.py:169: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/deepchem/models/optimizers.py:76: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/deepchem/models/keras_model.py:258: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/deepchem/models/keras_model.py:260: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/deepchem/models/keras_model.py:200: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\nInstructions for updating:\nIf using Keras pass *_constraint arguments to layers.\n"
],
[
"from sklearn.metrics import roc_curve, auc\nimport numpy as np\n\nprint(\"Validation\")\nprediction = np.squeeze(model.predict_on_batch(valid.X))\n\nfpr = dict()\ntpr = dict()\nroc_auc = dict()\nfor i in range(10):\n fpr[i], tpr[i], thresh = roc_curve(valid.y[:, i], prediction[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n print(\"class %s:auc=%s\" % (i, roc_auc[i]))",
"Validation\nclass 0:auc=0.9999057979948827\nclass 1:auc=0.9999335476621387\nclass 2:auc=0.9998705637425881\nclass 3:auc=0.999911789233876\nclass 4:auc=0.9999623237852037\nclass 5:auc=0.9998804023326087\nclass 6:auc=0.9998620230088834\nclass 7:auc=0.9995460674157303\nclass 8:auc=0.9998530924048773\nclass 9:auc=0.9996017892577271\n"
]
],
[
[
"# Congratulations! Time to join the Community!\n\nCongratulations on completing this tutorial notebook! If you enjoyed working through the tutorial, and want to continue working with DeepChem, we encourage you to finish the rest of the tutorials in this series. You can also help the DeepChem community in the following ways:\n\n## Star DeepChem on [GitHub](https://github.com/deepchem/deepchem)\nThis helps build awareness of the DeepChem project and the tools for open source drug discovery that we're trying to build.\n\n## Join the DeepChem Gitter\nThe DeepChem [Gitter](https://gitter.im/deepchem/Lobby) hosts a number of scientists, developers, and enthusiasts interested in deep learning for the life sciences. Join the conversation!",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
]
|
cb72950e536e26ed5c27780ceea4a0817732fcbb | 2,564 | ipynb | Jupyter Notebook | from-Google/Snippets_Importing_libraries.ipynb | je-r/jupyter-snippets-and-examples | 5919c8238dfbb08f50bbf1c2bb1737f692d0a656 | [
"CC0-1.0"
]
| null | null | null | from-Google/Snippets_Importing_libraries.ipynb | je-r/jupyter-snippets-and-examples | 5919c8238dfbb08f50bbf1c2bb1737f692d0a656 | [
"CC0-1.0"
]
| null | null | null | from-Google/Snippets_Importing_libraries.ipynb | je-r/jupyter-snippets-and-examples | 5919c8238dfbb08f50bbf1c2bb1737f692d0a656 | [
"CC0-1.0"
]
| null | null | null | 22.295652 | 118 | 0.466069 | [
[
[
"# Importing a library that is not in Colaboratory\n\nTo import a library that's not in Colaboratory by default, you can use `!pip install` or `!apt-get install`.",
"_____no_output_____"
]
],
[
[
"!pip install matplotlib-venn",
"_____no_output_____"
],
[
"!apt-get -qq install -y libfluidsynth1",
"_____no_output_____"
]
],
[
[
"# Install 7zip reader [libarchive](https://pypi.python.org/pypi/libarchive) ",
"_____no_output_____"
]
],
[
[
"# https://pypi.python.org/pypi/libarchive\n!apt-get -qq install -y libarchive-dev && pip install -U libarchive\nimport libarchive",
"_____no_output_____"
]
],
[
[
"# Install GraphViz & [PyDot](https://pypi.python.org/pypi/pydot)",
"_____no_output_____"
]
],
[
[
"# https://pypi.python.org/pypi/pydot\n!apt-get -qq install -y graphviz && pip install pydot\nimport pydot",
"_____no_output_____"
]
],
[
[
"# Install [cartopy](http://scitools.org.uk/cartopy/docs/latest/)",
"_____no_output_____"
]
],
[
[
"!pip install cartopy\nimport cartopy",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
cb72952698882a4dd457c85e2c4b998096b9e16a | 180,476 | ipynb | Jupyter Notebook | paper_figures/fig-15-reconstruction-gaussian-conv-noise.ipynb | quantshah/qst-nn | e4dc252fea7c98d0fedc7069502b01e30b47d422 | [
"MIT"
]
| 5 | 2020-12-20T04:08:41.000Z | 2022-01-08T13:19:20.000Z | paper_figures/fig-15-reconstruction-gaussian-conv-noise.ipynb | quantshah/qst-nn | e4dc252fea7c98d0fedc7069502b01e30b47d422 | [
"MIT"
]
| null | null | null | paper_figures/fig-15-reconstruction-gaussian-conv-noise.ipynb | quantshah/qst-nn | e4dc252fea7c98d0fedc7069502b01e30b47d422 | [
"MIT"
]
| 3 | 2021-02-23T06:59:43.000Z | 2022-02-26T02:46:06.000Z | 221.171569 | 32,070 | 0.882234 | [
[
[
"# QST CGAN with thermal noise in the channel (convolution)",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\nfrom qutip import Qobj, fidelity\nfrom qutip.wigner import qfunc\nfrom qutip.states import thermal_dm\nfrom qutip import coherent_dm\nfrom qutip.visualization import plot_wigner_fock_distribution\n\n\n\nimport tensorflow_addons as tfa\nimport tensorflow as tf\n\n\nfrom qst_nn.ops import (cat, binomial, num, gkp, GaussianConv, husimi_ops, convert_to_real_ops, dm_to_tf, batched_expect)\nfrom qst_cgan.gan import DensityMatrix, Expectation, Discriminator, generator_loss, discriminator_loss\nfrom qst_cgan.ops import convert_to_complex_ops, tf_fidelity\n\n\nfrom tqdm.auto import tqdm\n\nfrom dataclasses import dataclass\n\n\nimport matplotlib.pyplot as plt\ntf.keras.backend.set_floatx('float64') # Set float64 as the default",
"_____no_output_____"
],
[
"# https://scipy-cookbook.readthedocs.io/items/Matplotlib_LaTeX_Examples.html\nfig_width_pt = 246.0 # Get this from LaTeX using \\showthe\\columnwidth\ninches_per_pt = 1.0/72.27 # Convert pt to inch\ngolden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio\nfig_width = fig_width_pt*inches_per_pt # width in inches\nfig_height = fig_width*golden_mean # height in inches\nfig_size = [fig_width,fig_height]\nparams = {\n 'axes.labelsize': 9,\n 'font.size': 9,\n 'legend.fontsize': 9,\n 'xtick.labelsize': 8,\n 'ytick.labelsize': 8,\n 'text.usetex': True,\n 'figure.figsize': fig_size,\n 'axes.labelpad':1,\n 'legend.handlelength':0.8,\n 'axes.titlesize': 9,\n \"text.usetex\" : False\n }\nplt.rcParams.update(params)\n\n# mpl.use('pdf')",
"_____no_output_____"
]
],
[
[
"# We create the state and the data using QuTiP",
"_____no_output_____"
]
],
[
[
"hilbert_size = 32\n\n# Betas can be selected in a grid or randomly in a circle\nnum_grid = 64\nnum_points = num_grid*num_grid\n\nbeta_max_x = 5\nbeta_max_y = 5\n\nxvec = np.linspace(-beta_max_x, beta_max_x, num_grid)\nyvec = np.linspace(-beta_max_y, beta_max_y, num_grid)\n\nX, Y = np.meshgrid(xvec, yvec)\nbetas = (X + 1j*Y).ravel()",
"_____no_output_____"
]
],
[
[
"# Measurement ops are simple projectors $\\frac{1}{\\pi}|\\beta \\rangle \\langle \\beta|$",
"_____no_output_____"
]
],
[
[
"m_ops = [(1/np.pi)*coherent_dm(hilbert_size, beta) for beta in betas]\nops_numpy = [op.data.toarray() for op in m_ops] # convert the QuTiP Qobj to numpy arrays\nops_tf = tf.convert_to_tensor([ops_numpy]) # convert the numpy arrays to complex TensorFlow tensors\n\nA = convert_to_real_ops(ops_tf) # convert the complex-valued numpy matrices to real-valued TensorFlow tensors\nprint(A.shape, A.dtype)",
"(1, 32, 32, 8192) <dtype: 'float64'>\n"
]
],
[
[
"# Convolution noise\n\nThe presence of thermal photons in the amplification channel lead to the data being\ncorrupted as a convolution over the Q function data (see [https://arxiv.org/abs/1206.3405](https://arxiv.org/abs/1206.3405))\n\nThe kernel for this convolution is a Gaussian determined by the average photon number in the thermal state. We corrupt our data assuming a thermal state with mean photon number 5. ",
"_____no_output_____"
]
],
[
[
"# define normalized 2D gaussian\ndef gaus2d(x=0, y=0, n0=1):\n return 1. / (np.pi * n0) * np.exp(-((x**2 + y**2.0)/n0))\n\nnth = 5\nX, Y = np.meshgrid(xvec, yvec) # get 2D variables instead of 1D\ngauss_kernel = gaus2d(X, Y, n0=nth)",
"_____no_output_____"
]
],
[
[
"# State to reconstruct\n\nLet us now create a state on which we will run QST",
"_____no_output_____"
]
],
[
[
"rho, _ = cat(hilbert_size, 2, 0, 0)\nplot_wigner_fock_distribution(rho)\nplt.show()",
"_____no_output_____"
],
[
"\nrho_tf = dm_to_tf([rho])\ndata = batched_expect(ops_tf, rho_tf)",
"_____no_output_____"
]
],
[
[
"# Q function plots using QuTiP and a custom TensorFlow expectation function",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(1, 2, figsize=(7, 3))\nax[0].imshow(qfunc(rho, xvec, yvec, g=2))\nax[1].imshow(data.numpy().reshape(num_grid, num_grid))\nax[0].set_title(\"QuTiP Q func\")\nax[1].set_title(\"TensorFlow computed Q func\")\nplt.show()",
"_____no_output_____"
],
[
"# The thermal state distribution\nplot_wigner_fock_distribution(thermal_dm(hilbert_size, nth))",
"_____no_output_____"
]
],
[
[
"# Apply the convolution and show the simulated data that we can obtain experimentally",
"_____no_output_____"
]
],
[
[
"x = tf.reshape(tf.cast(data, tf.float64), (1, num_grid, num_grid, 1))\n\nconved = GaussianConv(gauss_kernel)(x)\n\nkernel = gauss_kernel/tf.reduce_max(gauss_kernel)\ndiff = conved.numpy().reshape(num_grid, num_grid)/tf.reduce_max(conved) - kernel.numpy().reshape(num_grid, num_grid)\ndiff = tf.convert_to_tensor(diff)\n\n\n# Collect all the data in an array for plotting\nmatrices = [gauss_kernel.reshape((num_grid, num_grid)), x.numpy().reshape((num_grid, num_grid)), \n conved.numpy().reshape((num_grid, num_grid)), diff.numpy().reshape((num_grid, num_grid))]",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1, 4, figsize=(fig_width, 0.35*2.5*fig_height), dpi=80, facecolor=\"white\",\n sharey=False, sharex=True)\n\naxes = [ax[0], ax[1], ax[2], ax[3]]\n\naspect = 'equal'\n\nfor i in range(4):\n im = axes[i].pcolor(xvec, yvec,\n matrices[i]/np.max(matrices[i]), cmap=\"hot\", vmin=0, vmax=1)\n axes[i].set_aspect(\"equal\")\n axes[i].set_xticklabels([\"\", \"\", \"\"])\n axes[i].set_yticklabels([\"\", \"\", \"\"], fontsize=6)\n # axes[i].set_xlabel(r\"$Re(\\beta)$\", fontsize=6) \naxes[0].set_yticklabels([\"-5\", \"\", \"5\"], fontsize=6)\n\nlabels = [\"Background\\n(Gaussian)\", \"State\", \"Data\\n(Convolution)\", \"Subtracted\"]\n\nfor i in range(len(labels)):\n axes[i].set_title(labels[i], fontsize=6)\n\n# plt.subplots_adjust(wspace=-.4)\n# cbar = fig.colorbar(im, ax=axes, pad=0.026, fraction = 0.046)\n# cbar.ax.set_yticklabels([\"0\", \"0.5\", \"1\"])\n\naxes[0].set_ylabel(r\"Im$(\\beta)$\", labelpad=-8, fontsize=6)\n\n######################################################################################################",
"/var/folders/8s/tfpsk_fx609f8w7z__yzz9vh0000gn/T/ipykernel_17691/3526584348.py:9: MatplotlibDeprecationWarning: shading='flat' when X and Y have the same dimensions as C is deprecated since 3.3. Either specify the corners of the quadrilaterals with X and Y, or pass shading='auto', 'nearest' or 'gouraud', or set rcParams['pcolor.shading']. This will become an error two minor releases later.\n im = axes[i].pcolor(xvec, yvec,\n/var/folders/8s/tfpsk_fx609f8w7z__yzz9vh0000gn/T/ipykernel_17691/3526584348.py:12: UserWarning: FixedFormatter should only be used together with FixedLocator\n axes[i].set_xticklabels([\"\", \"\", \"\"])\n/var/folders/8s/tfpsk_fx609f8w7z__yzz9vh0000gn/T/ipykernel_17691/3526584348.py:13: UserWarning: FixedFormatter should only be used together with FixedLocator\n axes[i].set_yticklabels([\"\", \"\", \"\"], fontsize=6)\n/var/folders/8s/tfpsk_fx609f8w7z__yzz9vh0000gn/T/ipykernel_17691/3526584348.py:15: UserWarning: FixedFormatter should only be used together with FixedLocator\n axes[0].set_yticklabels([\"-5\", \"\", \"5\"], fontsize=6)\n"
]
],
[
[
"# QST CGAN with a Gaussian convolution layer",
"_____no_output_____"
]
],
[
[
"def GeneratorConvQST(hilbert_size, num_points, noise=0.02, kernel=None):\n \"\"\"\n A tensorflow generative model which can be called as \n >> generator([A, x])\n where A is the set of all measurement operators\n transformed into the shape (batch_size, hilbert_size, hilbert_size, num_points*2)\n This can be done using the function `convert_to_real_ops` which\n takes a set of complex operators shaped as (batch_size, num_points, hilbert_size, hilbert_size)\n and converts it to this format which is easier to run convolution operations on.\n\n x is the measurement statistics (frequencies) represented by a vector of shape\n [batch_size, num_points] where we consider num_points different operators and their\n expectation values.\n\n Args:\n hilbert_size (int): Hilbert size of the output matrix\n This needs to be 32 now. We can adjust \n the network architecture to allow it to\n automatically change its outputs according\n to the hilbert size in future\n num_points (int): Number of different measurement operators\n \n Returns:\n generator: A TensorFlow model callable as\n >> generator([A, x])\n \n \"\"\"\n initializer = tf.random_normal_initializer(0., 0.02)\n \n n = int(hilbert_size/2)\n \n ops = tf.keras.layers.Input(shape=[hilbert_size, hilbert_size, num_points*2],\n name='operators')\n inputs = tf.keras.Input(shape=(num_points), name = \"inputs\")\n \n\n x = tf.keras.layers.Dense(16*16*2, use_bias=False,\n kernel_initializer = tf.random_normal_initializer(0., 0.02),\n )(inputs)\n x = tf.keras.layers.LeakyReLU()(x)\n x = tf.keras.layers.Reshape((16, 16, 2))(x)\n\n x = tf.keras.layers.Conv2DTranspose(64, 4, use_bias=False,\n strides=2,\n padding='same',\n kernel_initializer=initializer)(x)\n x = tfa.layers.InstanceNormalization(axis=3)(x)\n x = tf.keras.layers.LeakyReLU()(x)\n x = tf.keras.layers.Conv2DTranspose(64, 4, use_bias=False,\n strides=1,\n padding='same',\n kernel_initializer=initializer)(x)\n x = tfa.layers.InstanceNormalization(axis=3)(x)\n x = tf.keras.layers.LeakyReLU()(x)\n x = tf.keras.layers.Conv2DTranspose(32, 4, use_bias=False,\n strides=1,\n padding='same',\n kernel_initializer=initializer)(x)\n # x = tfa.layers.InstanceNormalization(axis=3)(x)\n # x = tf.keras.layers.LeakyReLU()(x)\n # y = tf.keras.layers.Conv2D(8, 5, padding='same')(ops)\n # out = x\n # x = tf.keras.layers.concatenate([x, y])\n x = tf.keras.layers.Conv2DTranspose(2, 4, use_bias=False,\n strides=1,\n padding='same',\n kernel_initializer=initializer)(x)\n x = DensityMatrix()(x)\n complex_ops = convert_to_complex_ops(ops)\n # prefactor = (0.25*g**2/np.pi)\n prefactor = 1.\n x = Expectation()(complex_ops, x, prefactor)\n \n x = tf.keras.layers.Reshape((num_grid, num_grid, 1))(x)\n\n x = GaussianConv(kernel, trainable=False)(x)\n # x = x/tf.reduce_max(x)\n\n x = tf.keras.layers.Reshape((num_points,))(x)\n \n # y = kernel/tf.reduce_max(kernel)\n # y = tf.reshape(y, (1, num_points))\n # x = x - y\n\n return tf.keras.Model(inputs=[ops, inputs], outputs=x)",
"_____no_output_____"
],
[
"tf.keras.backend.clear_session()\ngenerator = GeneratorConvQST(hilbert_size, num_points, kernel=gauss_kernel)\ndiscriminator = Discriminator(hilbert_size, num_points)\n",
"_____no_output_____"
],
[
"density_layer_idx = None\n\nfor i, layer in enumerate(generator.layers):\n if \"density_matrix\" in layer._name:\n density_layer_idx = i\n break\n\nprint(density_layer_idx)\nmodel_dm = tf.keras.Model(inputs=generator.input, outputs=generator.layers[density_layer_idx].output)\n\n@dataclass\nclass LossHistory:\n \"\"\"Class for keeping track of loss\"\"\"\n generator: list\n discriminator: list\n l1: list\n\nloss = LossHistory([], [], [])\nfidelities = []\n\n\ninitial_learning_rate = 0.0002\n\nlr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(initial_learning_rate,\n decay_steps=10000,\n decay_rate=.96,\n staircase=False)\n\nlam = 10.\n\ngenerator_optimizer = tf.keras.optimizers.Adam(lr_schedule, 0.5, 0.5)\ndiscriminator_optimizer = tf.keras.optimizers.Adam(lr_schedule, 0.5, 0.5)",
"17\n"
],
[
"def train_step(A, x):\n \"\"\"Takes one step of training for the full A matrix representing the\n measurement operators and data x.\n\n Note that the `generator`, `discriminator`, `generator_optimizer` and the\n `discriminator_optimizer` has to be defined before calling this function.\n\n Args:\n A (tf.Tensor): A tensor of shape (m, hilbert_size, hilbert_size, n x 2)\n where m=1 for a single reconstruction, and n represents\n the number of measured operators. We split the complex\n operators as real and imaginary in the last axis. The \n helper function `convert_to_real_ops` can be used to\n generate the matrix A with a set of complex operators\n given by `ops` with shape (1, n, hilbert_size, hilbert_size)\n by calling `A = convert_to_real_ops(ops)`.\n\n x (tf.Tensor): A tensor of shape (m, n) with m=1 for a single\n reconstruction and `n` representing the number of\n measurements. \n \"\"\"\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n gen_output = generator([A, x], training=True)\n\n disc_real_output = discriminator([A, x, x], training=True)\n disc_generated_output = discriminator([A, x, gen_output], training=True)\n\n gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(\n disc_generated_output, gen_output, x, lam=lam\n )\n disc_loss = discriminator_loss(disc_real_output, disc_generated_output)\n\n generator_gradients = gen_tape.gradient(\n gen_total_loss, generator.trainable_variables\n )\n discriminator_gradients = disc_tape.gradient(\n disc_loss, discriminator.trainable_variables\n )\n\n generator_optimizer.apply_gradients(\n zip(generator_gradients, generator.trainable_variables)\n )\n discriminator_optimizer.apply_gradients(\n zip(discriminator_gradients, discriminator.trainable_variables)\n )\n\n loss.generator.append(gen_gan_loss)\n loss.l1.append(gen_l1_loss)\n loss.discriminator.append(disc_loss)\n\nmax_iterations = 300\npbar = tqdm(range(max_iterations))\n\nfor i in pbar:\n train_step(A, conved.numpy().reshape(-1, num_points))\n density_matrix = model_dm([A, conved.numpy().reshape(-1, num_points)])\n rho_reconstructed = Qobj(density_matrix.numpy().reshape(rho.shape))\n f = fidelity(rho_reconstructed, rho)\n fidelities.append(f)\n pbar.set_description(\"Fidelity {} | Gen loss {} | L1 loss {} | Disc loss {}\".format(f, loss.generator[-1], loss.l1[-1], loss.discriminator[-1]))",
"Fidelity 0.6311828335073768 | Gen loss 0.692855954170227 | L1 loss 0.0022305016119748343 | Disc loss 1.3862997889518738: 100%|██████████| 300/300 [00:58<00:00, 5.15it/s]\n"
],
[
"rho_reconstructed = Qobj(density_matrix.numpy().reshape(rho.shape))",
"_____no_output_____"
],
[
"fig, ax = plot_wigner_fock_distribution(rho_reconstructed, alpha_max=beta_max_x, colorbar=True, figsize=(9, 3.5))\nplt.title(\"Fidelity {:.4}\".format(fidelity(rho_reconstructed, rho)))\nplt.suptitle(\"QST CGAN reconstruction\")\nplt.show()",
"_____no_output_____"
],
[
"rho_tf_reconstructed = dm_to_tf([rho_reconstructed])\ndata_reconstructed = batched_expect(ops_tf, rho_tf_reconstructed)\nreconstructed_x = tf.reshape(tf.cast(data_reconstructed, tf.float64), (1, num_grid, num_grid, 1))\nreconstructed_conved = GaussianConv(gauss_kernel)(reconstructed_x)\ndiff2 = reconstructed_conved.numpy().reshape(num_grid, num_grid)/tf.reduce_max(reconstructed_conved) - kernel.numpy().reshape(num_grid, num_grid)\n\nmatrices2 = [gauss_kernel.reshape((num_grid, num_grid)), reconstructed_x.numpy().reshape((num_grid, num_grid)), \n reconstructed_conved.numpy().reshape((num_grid, num_grid)), diff2.numpy().reshape((num_grid, num_grid))]\n",
"_____no_output_____"
],
[
"figpath = \"figures/\"\n\nfig, ax = plt.subplots(2, 4, figsize=(fig_width, 0.35*2.5*fig_height), dpi=80, facecolor=\"white\",\n sharey=False, sharex=True)\n\naxes = [ax[0, 0], ax[0, 1], ax[0, 2], ax[0, 3]]\n\naspect = 'equal'\n\nfor i in range(4):\n im = axes[i].pcolor(xvec, yvec,\n matrices[i]/np.max(matrices[i]), cmap=\"hot\", vmin=0, vmax=1)\n axes[i].set_aspect(\"equal\")\n axes[i].set_xticklabels([\"\", \"\", \"\"])\n axes[i].set_yticklabels([\"\", \"\", \"\"], fontsize=6)\n # axes[i].set_xlabel(r\"$Re(\\beta)$\", fontsize=6) \naxes[0].set_yticklabels([\"-5\", \"\", \"5\"], fontsize=6)\n\nlabels = [\"Background\\n(Gaussian)\", \"State\", \"Data\\n(Convolution)\", \"Subtracted\"]\n\nfor i in range(len(labels)):\n axes[i].set_title(labels[i], fontsize=6)\n\n# plt.subplots_adjust(wspace=-.4)\n# cbar = fig.colorbar(im, ax=axes, pad=0.026, fraction = 0.046)\n# cbar.ax.set_yticklabels([\"0\", \"0.5\", \"1\"])\n\naxes[0].set_ylabel(r\"Im$(\\beta)$\", labelpad=-8, fontsize=6)\n\nplt.text(x = -24.5, y=30, s=\"cat state\", fontsize=8)\n\n######################################################################################################\n\naxes = [ax[1, 0], ax[1, 1], ax[1, 2], ax[1, 3]]\n\nfor i in range(1, 4):\n axes[i].pcolor(xvec, yvec,\n matrices2[i]/np.max(matrices2[i]), cmap=\"hot\", vmin=0, vmax=1)\n axes[i].set_aspect(\"equal\")\n axes[i].set_xticklabels([\"-5\", \"\", \"5\"], fontsize=6)\n axes[i].set_yticklabels([\"\", \"\", \"\"])\n axes[i].set_xlabel(r\"Re$(\\beta)$\", fontsize=6, labelpad=-4) \n\n\nlabels = [\"Background\\n(Gaussian)\", \"Reconstructed\\nState\", r\"$Convoluted\\noutput$\"+\"\\noutput\", \"Subtracted\"]\n\n# for i in range(1, len(labels)):\n# axes[i].set_title(labels[i], fontsize=6)\n\nplt.subplots_adjust(hspace=0.7)\n# cbar = fig.colorbar(im, ax=axes, pad=0.026, fraction = 0.046)\n# cbar.ax.set_yticklabels([\"0\", \"0.5\", \"1\"])\nplt.suptitle(\"QST-CGAN reconstruction\", x=.45, y=.52, fontsize=8)\n\naxes[1].set_ylabel(r\"$Im(\\beta)$\", labelpad=-8, fontsize=6)\naxes[1].set_yticklabels([\"-5\", \"\", \"5\"], fontsize=6)\naxes[1].set_yticklabels([\"-5\", \"\", \"5\"])\naxes[0].set_visible(False)\n\ncbar = plt.colorbar(im, ax=ax.ravel().tolist(), aspect=40, ticks=[0, 0.5, 1], pad=0.02)\ncbar.set_ticklabels([\"0\", \"0.5\", \"1\"])\ncbar.ax.tick_params(labelsize=6) \n# plt.text(x = -44.5, y=30, s=\"(a)\", fontsize=8)\n# plt.savefig(figpath+\"fig-15a-fock-reconstruction.pdf\", bbox_inches=\"tight\", pad_inches=0)",
"/var/folders/8s/tfpsk_fx609f8w7z__yzz9vh0000gn/T/ipykernel_5494/1574640556.py:11: MatplotlibDeprecationWarning: shading='flat' when X and Y have the same dimensions as C is deprecated since 3.3. Either specify the corners of the quadrilaterals with X and Y, or pass shading='auto', 'nearest' or 'gouraud', or set rcParams['pcolor.shading']. This will become an error two minor releases later.\n im = axes[i].pcolor(xvec, yvec,\n/var/folders/8s/tfpsk_fx609f8w7z__yzz9vh0000gn/T/ipykernel_5494/1574640556.py:14: UserWarning: FixedFormatter should only be used together with FixedLocator\n axes[i].set_xticklabels([\"\", \"\", \"\"])\n/var/folders/8s/tfpsk_fx609f8w7z__yzz9vh0000gn/T/ipykernel_5494/1574640556.py:15: UserWarning: FixedFormatter should only be used together with FixedLocator\n axes[i].set_yticklabels([\"\", \"\", \"\"], fontsize=6)\n/var/folders/8s/tfpsk_fx609f8w7z__yzz9vh0000gn/T/ipykernel_5494/1574640556.py:17: UserWarning: FixedFormatter should only be used together with FixedLocator\n axes[0].set_yticklabels([\"-5\", \"\", \"5\"], fontsize=6)\n/var/folders/8s/tfpsk_fx609f8w7z__yzz9vh0000gn/T/ipykernel_5494/1574640556.py:37: MatplotlibDeprecationWarning: shading='flat' when X and Y have the same dimensions as C is deprecated since 3.3. Either specify the corners of the quadrilaterals with X and Y, or pass shading='auto', 'nearest' or 'gouraud', or set rcParams['pcolor.shading']. This will become an error two minor releases later.\n axes[i].pcolor(xvec, yvec,\n/var/folders/8s/tfpsk_fx609f8w7z__yzz9vh0000gn/T/ipykernel_5494/1574640556.py:40: UserWarning: FixedFormatter should only be used together with FixedLocator\n axes[i].set_xticklabels([\"-5\", \"\", \"5\"], fontsize=6)\n/var/folders/8s/tfpsk_fx609f8w7z__yzz9vh0000gn/T/ipykernel_5494/1574640556.py:41: UserWarning: FixedFormatter should only be used together with FixedLocator\n axes[i].set_yticklabels([\"\", \"\", \"\"])\n/var/folders/8s/tfpsk_fx609f8w7z__yzz9vh0000gn/T/ipykernel_5494/1574640556.py:56: UserWarning: FixedFormatter should only be used together with FixedLocator\n axes[1].set_yticklabels([\"-5\", \"\", \"5\"], fontsize=6)\n/var/folders/8s/tfpsk_fx609f8w7z__yzz9vh0000gn/T/ipykernel_5494/1574640556.py:57: UserWarning: FixedFormatter should only be used together with FixedLocator\n axes[1].set_yticklabels([\"-5\", \"\", \"5\"])\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb72a0d9dad626860f53fb2fd1d9b3010a6b87e2 | 426,639 | ipynb | Jupyter Notebook | teachopencadd/talktorials/T008_query_pdb/talktorial.ipynb | PabloSoto1995/teachopencadd | cf56892830b7c150f77d1e82f52e2c80cc6555ac | [
"CC-BY-4.0"
]
| 2 | 2021-06-09T12:09:19.000Z | 2021-09-25T08:35:06.000Z | teachopencadd/talktorials/T008_query_pdb/talktorial.ipynb | PabloSoto1995/teachopencadd | cf56892830b7c150f77d1e82f52e2c80cc6555ac | [
"CC-BY-4.0"
]
| null | null | null | teachopencadd/talktorials/T008_query_pdb/talktorial.ipynb | PabloSoto1995/teachopencadd | cf56892830b7c150f77d1e82f52e2c80cc6555ac | [
"CC-BY-4.0"
]
| 1 | 2021-01-18T01:45:28.000Z | 2021-01-18T01:45:28.000Z | 275.428664 | 246,356 | 0.914937 | [
[
[
"# T008 · Protein data acquisition: Protein Data Bank (PDB) \n\nAuthors:\n\n- Anja Georgi, CADD seminar, 2017, Charité/FU Berlin\n- Majid Vafadar, CADD seminar, 2018, Charité/FU Berlin\n- Jaime Rodríguez-Guerra, Volkamer lab, Charité\n- Dominique Sydow, Volkamer lab, Charité",
"_____no_output_____"
],
[
"__Talktorial T008__: This talktorial is part of the TeachOpenCADD pipeline described in the first TeachOpenCADD publication ([_J. Cheminform._ (2019), **11**, 1-7](https://jcheminf.biomedcentral.com/articles/10.1186/s13321-019-0351-x)), comprising of talktorials T001-T010.",
"_____no_output_____"
],
[
"## Aim of this talktorial\n\nIn this talktorial, we conduct the groundwork for the next talktorial where we will generate a ligand-based ensemble pharmacophore for EGFR. Therefore, we \n(i) fetch all PDB IDs for EGFR from the PDB database, \n(ii) retrieve five protein-ligand structures, which have the best structural quality and are derived from X-ray crystallography, and \n(iii) align all structures to each in 3D as well as extract and save the ligands to be used in the next talktorial.",
"_____no_output_____"
],
[
"### Contents in Theory\n\n* Protein Data Bank (PDB)\n* Python package `pypdb`",
"_____no_output_____"
],
[
"### Contents in Practical\n\n* Select query protein\n* Get all PDB IDs for query protein\n* Get statistic on PDB entries for query protein\n* Get meta information on PDB entries\n* Filter and sort meta information on PDB entries\n* Get meta information of ligands from top structures\n* Draw top ligand molecules\n* Create protein-ligand ID pairs\n* Get the PDB structure files\n* Align PDB structures",
"_____no_output_____"
],
[
"### References\n\n* Protein Data Bank \n([PDB website](http://www.rcsb.org/))\n* `pypdb` python package \n([_Bioinformatics_ (2016), **1**, 159-60](https://academic.oup.com/bioinformatics/article-lookup/doi/10.1093/bioinformatics/btv543), [documentation](http://www.wgilpin.com/pypdb_docs/html/))\n* Molecular superposition with the python package `opencadd` ([repository](https://github.com/volkamerlab/opencadd))",
"_____no_output_____"
],
[
"## Theory",
"_____no_output_____"
],
[
"### Protein Data Bank (PDB)\n\nThe Protein Data Bank (PDB) is one of the most comprehensive structural biology information database and a key resource in areas of structural biology, such as structural genomics and drug design ([PDB website](http://www.rcsb.org/)).\n\nStructural data is generated from structural determination methods such as X-ray crystallography (most common method), nuclear magnetic resonance (NMR), and cryo electron microscopy (cryo-EM). \nFor each entry, the database contains (i) the 3D coordinates of the atoms and the bonds connecting these atoms for proteins, ligand, cofactors, water molecules, and ions, as well as (ii) meta information on the structural data such as the PDB ID, the authors, the deposition date, the structural determination method used and the structural resolution.\n\nThe structural resolution is a measure of the quality of the data that has been collected and has the unit Å (Angstrom). The lower the value, the higher the quality of the structure. \n\nThe PDB website offers a 3D visualization of the protein structures (with ligand interactions if available) and a structure quality metrics, as can be seen for the PDB entry of an example epidermal growth factor receptor (EGFR) with the PDB ID [3UG5](https://www.rcsb.org/structure/3UG5).\n\n\n\nFigure 1: The protein structure (in gray) with an interacting ligand (in green) is shown for an example epidermal growth factor receptor (EGFR) with the PDB ID 3UG5 (figure by Dominique Sydow).",
"_____no_output_____"
],
[
"### Python package `pypdb`\n\n`pypdb` is a python programming interface for the PDB and works exclusively in Python 3 ([_Bioinformatics_ (2016), **1**, 159-60](https://academic.oup.com/bioinformatics/article-lookup/doi/10.1093/bioinformatics/btv543), [documentation](http://www.wgilpin.com/pypdb_docs/html/)). \nThis package facilitates the integration of automatic PDB searches within bioinformatics workflows and simplifies the process of performing multiple searches based on the results of existing searches. \nIt also allows an advanced querying of information on PDB entries. \nThe PDB currently uses a RESTful API that allows for the retrieval of information via standard HTML vocabulary. `pypdb` converts these objects into XML strings. ",
"_____no_output_____"
],
[
"## Practical",
"_____no_output_____"
]
],
[
[
"import collections\nimport logging\nimport pathlib\nimport time\nimport warnings\n\nimport pandas as pd\nfrom tqdm.auto import tqdm\nimport redo\nimport requests_cache\nimport nglview\nimport pypdb\nfrom rdkit.Chem import Draw\nfrom rdkit.Chem import PandasTools\n\nfrom opencadd.structure.superposition.api import align, METHODS\nfrom opencadd.structure.core import Structure\n\n# Disable some unneeded warnings\nlogger = logging.getLogger(\"opencadd\")\nlogger.setLevel(logging.ERROR)\nwarnings.filterwarnings(\"ignore\")\n\n# cache requests -- this will speed up repeated queries to PDB\nrequests_cache.install_cache(\"rcsb_pdb\", backend=\"memory\")",
"_____no_output_____"
],
[
"# define paths\nHERE = pathlib.Path(_dh[-1])\nDATA = HERE / \"data\"",
"_____no_output_____"
]
],
[
[
"### Select query protein\n\nWe use EGFR as query protein for this talktorial. The UniProt ID of EGFR is `P00533`, which will be used in the following to query the PDB database.",
"_____no_output_____"
],
[
"### Get all PDB IDs for query protein\n\nFirst, we get all PDB structures for our query protein EGFR, using the `pypdb` functions `make_query` and `do_search`.",
"_____no_output_____"
]
],
[
[
"search_dict = pypdb.make_query(\"P00533\")\nfound_pdb_ids = pypdb.do_search(search_dict)\n\nprint(\"Sample PDB IDs found for query:\", *found_pdb_ids[:3], \"...\")\nprint(\"Number of EGFR structures found:\", len(found_pdb_ids))",
"Sample PDB IDs found for query: 1IVO 1M14 1M17 ...\nNumber of EGFR structures found: 214\n"
]
],
[
[
"### Get statistics on PDB entries for query protein \n\nNext, we ask the question: How many PDB entries are deposited in the PDB for EGFR per year and how many in total?\n\nUsing `pypdb`, we can find all deposition dates of EGFR structures from the PDB database. The number of deposited structures was already determined and is needed to set the parameter `max_results` of the function `find_dates`.",
"_____no_output_____"
]
],
[
[
"# Query database\ndates = pypdb.find_dates(\"P00533\", max_results=len(found_pdb_ids))",
"_____no_output_____"
],
[
"# Example of the first three deposition dates\ndates[:3]",
"_____no_output_____"
]
],
[
[
"We extract the year from the deposition dates and calculate a depositions-per-year histogram.",
"_____no_output_____"
]
],
[
[
"# Extract year\nyears = pd.Series([int(date[:4]) for date in dates])\nbins = years.max() - years.min() + 1\naxes = years.hist(bins=bins)\naxes.set_ylabel(\"New entries per year\")\naxes.set_xlabel(\"Year\")\naxes.set_title(\"PDB entries for EGFR\");",
"_____no_output_____"
]
],
[
[
"### Get meta information for PDB entries\n\nWe use `describe_pdb` to get meta information about the structures, which is stored per structure as a dictionary.\n\nNote: we only fetch meta information on PDB structures here, we do not fetch the structures (3D coordinates), yet.\n\n> The `redo.retriable` line is a _decorator_. This wraps the function and provides extra functionality. In this case, it will retry failed queries automatically (10 times maximum).",
"_____no_output_____"
]
],
[
[
"@redo.retriable(attempts=10, sleeptime=2)\ndef describe_one_pdb_id(pdb_id):\n \"\"\"Fetch meta information from PDB.\"\"\"\n described = pypdb.describe_pdb(pdb_id)\n if described is None:\n print(f\"! Error while fetching {pdb_id}, retrying ...\")\n raise ValueError(f\"Could not fetch PDB id {pdb_id}\")\n return described",
"_____no_output_____"
],
[
"pdbs = [describe_one_pdb_id(pdb_id) for pdb_id in found_pdb_ids]\npdbs[0]",
"_____no_output_____"
]
],
[
[
"### Filter and sort meta information on PDB entries\n\nSince we want to use the information to filter for relevant PDB structures, we convert the data set from dictionary to DataFrame for easier handling.",
"_____no_output_____"
]
],
[
[
"pdbs = pd.DataFrame(pdbs)\npdbs.head()",
"_____no_output_____"
],
[
"print(f\"Number of PDB structures for EGFR: {len(pdbs)}\")",
"Number of PDB structures for EGFR: 214\n"
]
],
[
[
"We start filtering our dataset based on the following criteria:",
"_____no_output_____"
],
[
"#### 1. Experimental method: X-ray diffraction\n\nWe only keep structures resolved by `X-RAY DIFFRACTION`, the most commonly used structure determination method. ",
"_____no_output_____"
]
],
[
[
"pdbs = pdbs[pdbs.expMethod == \"X-RAY DIFFRACTION\"]\nprint(f\"Number of PDB structures for EGFR from X-ray: {len(pdbs)}\")",
"Number of PDB structures for EGFR from X-ray: 208\n"
]
],
[
[
"#### 2. Structural resolution\n\nWe only keep structures with a resolution equal or lower than 3 Å. The lower the resolution value, the higher is the quality of the structure (-> the higher is the certainty that the assigned 3D coordinates of the atoms are correct). Below 3 Å, atomic orientations can be determined and therefore is often used as threshold for structures relevant for structure-based drug design.",
"_____no_output_____"
]
],
[
[
"pdbs.resolution = pdbs.resolution.astype(float) # convert to floats\npdbs = pdbs[pdbs.resolution <= 3.0]\nprint(f\"Number of PDB entries for EGFR from X-ray with resolution <= 3.0 Angstrom: {len(pdbs)}\")",
"Number of PDB entries for EGFR from X-ray with resolution <= 3.0 Angstrom: 173\n"
]
],
[
[
"We sort the data set by the structural resolution. ",
"_____no_output_____"
]
],
[
[
"pdbs = pdbs.sort_values([\"resolution\"], ascending=True, na_position=\"last\")",
"_____no_output_____"
]
],
[
[
"We check the top PDB structures (sorted by resolution): ",
"_____no_output_____"
]
],
[
[
"pdbs.head()[[\"structureId\", \"resolution\"]]",
"_____no_output_____"
]
],
[
[
"#### 3. Ligand-bound structures\n\nSince we will create ensemble ligand-based pharmacophores in the next talktorial, we remove all PDB structures from our DataFrame, which do not contain a bound ligand: we use the `pypdb` function `get_ligands` to check/retrieve the ligand(s) from a PDB structure. PDB-annotated ligands can be ligands, cofactors, but also solvents and ions. In order to filter only ligand-bound structures, we (i) remove all structures without any annotated ligand and (ii) remove all structures that do not contain any ligands with a molecular weight (MW) greater than 100 Da (Dalton), since many solvents and ions weight less. Note: this is a simple, but not comprehensive exclusion of solvents and ions. ",
"_____no_output_____"
]
],
[
[
"# Get all PDB IDs from DataFrame\npdb_ids = pdbs[\"structureId\"].tolist()",
"_____no_output_____"
],
[
"# Remove structures\n# (i) without ligand and\n# (ii) without any ligands with molecular weight (MW) greater than 100 Da (Dalton)\n\n\[email protected](attempts=10, sleeptime=2)\ndef get_ligands(pdb_id):\n \"\"\"Decorate pypdb.get_ligands so it retries after a failure.\"\"\"\n return pypdb.get_ligands(pdb_id)\n\n\nmw_cutoff = 100.0 # Molecular weight cutoff in Da\n\n# This database query may take a moment\npassed_pdb_ids = []\nremoved_pdb_ids = []\nprogressbar = tqdm(pdb_ids)\nfor pdb_id in progressbar:\n progressbar.set_description(f\"Processing {pdb_id}...\")\n ligand_dict = get_ligands(pdb_id)\n\n # (i) Remove structure if no ligand present\n if ligand_dict[\"ligandInfo\"] is None:\n removed_pdb_ids.append(pdb_id) # Store ligand-free PDB IDs\n\n # (ii) Remove structure if not a single annotated ligand has a MW above mw_cutoff\n else:\n # Get ligand information\n ligands = ligand_dict[\"ligandInfo\"][\"ligand\"]\n # Technicality: if only one ligand, cast dict to list (for the subsequent list comprehension)\n if type(ligands) == dict:\n ligands = [ligands]\n # Get MW per annotated ligand\n mw_list = [float(ligand[\"@molecularWeight\"]) for ligand in ligands]\n # Remove structure if not a single annotated ligand has a MW above mw_cutoff\n if sum([mw > mw_cutoff for mw in mw_list]) == 0:\n removed_pdb_ids.append(pdb_id) # Store ligand-free PDB IDs\n else:\n passed_pdb_ids.append(pdb_id) # Remove ligand-free PDB IDs from list",
"_____no_output_____"
],
[
"print(\n \"PDB structures without a ligand (removed from our data set):\",\n *removed_pdb_ids,\n)\nprint(\"Number of structures with ligand:\", len(passed_pdb_ids))",
"PDB structures without a ligand (removed from our data set): 3P0Y 2EB2 1M14 2GS2 3GOP 5EDP 2RFE 5WB8 4I1Z\nNumber of structures with ligand: 164\n"
]
],
[
[
"### Get meta information of ligands from top structures\n\nIn the next talktorial, we will build ligand-based ensemble pharmacophores from the top `top_num` structures with the highest resolution.",
"_____no_output_____"
]
],
[
[
"top_num = 8 # Number of top structures\nselected_pdb_ids = passed_pdb_ids[:top_num]\nselected_pdb_ids",
"_____no_output_____"
]
],
[
[
"The selected highest resolution PDB entries can contain ligands targeting different binding sites, e.g. allosteric and orthosteric ligands, which would hamper ligand-based pharmacophore generation. Thus, we will focus on the following 4 structures, which contain ligands in the orthosteric binding pocket. The code provided later in the notebook can be used to verify this.",
"_____no_output_____"
]
],
[
[
"selected_pdb_ids = [\"5UG9\", \"5HG8\", \"5UG8\", \"3POZ\"]",
"_____no_output_____"
]
],
[
[
"We fetch the PDB information about the top `top_num` ligands using `get_ligands`, to be stored as *csv* file (as dictionary per ligand).\n\nIf a structure contains several ligands, we select the largest ligand. Note: this is a simple, but not comprehensive method to select ligand binding the binding site of a protein. This approach may also select a cofactor bound to the protein. Therefore, please check the automatically selected top ligands visually before further usage.",
"_____no_output_____"
]
],
[
[
"ligands_list = []\n\nfor pdb_id in selected_pdb_ids:\n ligands = get_ligands(pdb_id)[\"ligandInfo\"][\"ligand\"]\n\n # Technicality: if only one ligand, cast dict to list (for the subsequent list comprehension)\n if isinstance(ligands, dict):\n ligands = [ligands]\n\n weight = 0\n this_lig = {}\n\n # If several ligands contained, take largest\n for ligand in ligands:\n if float(ligand[\"@molecularWeight\"]) > weight:\n this_ligand = ligand\n weight = float(ligand[\"@molecularWeight\"])\n\n ligands_list.append(this_ligand)",
"_____no_output_____"
],
[
"# NBVAL_CHECK_OUTPUT\n# Change the format to DataFrame\nligands = pd.DataFrame(ligands_list)\nligands",
"_____no_output_____"
],
[
"ligands.to_csv(DATA / \"PDB_top_ligands.csv\", header=True, index=False)",
"_____no_output_____"
]
],
[
[
"### Draw top ligand molecules",
"_____no_output_____"
]
],
[
[
"PandasTools.AddMoleculeColumnToFrame(ligands, \"smiles\")\nDraw.MolsToGridImage(\n mols=list(ligands.ROMol),\n legends=list(ligands[\"@chemicalID\"] + \", \" + ligands[\"@structureId\"]),\n molsPerRow=top_num,\n)",
"_____no_output_____"
]
],
[
[
"### Create protein-ligand ID pairs",
"_____no_output_____"
]
],
[
[
"# NBVAL_CHECK_OUTPUT\npairs = collections.OrderedDict(zip(ligands[\"@structureId\"], ligands[\"@chemicalID\"]))\npairs",
"_____no_output_____"
]
],
[
[
"### Align PDB structures\n\nSince we want to build ligand-based ensemble pharmacophores in the next talktorial, it is necessary to align all structures to each other in 3D. \n\nWe will use one the python package `opencadd` ([repository](https://github.com/volkamerlab/opencadd)), which includes a 3D superposition subpackage to guide the structural alignment of the proteins. The approach is based on superposition guided by sequence alignment provided matched residues. There are other methods in the package, but this simple one will be enough for the task at hand.",
"_____no_output_____"
],
[
"#### Get the PDB structure files\n\nWe now fetch the PDB structure files, i.e. 3D coordinates of the protein, ligand (and if available other atomic or molecular entities such as cofactors, water molecules, and ions) from the PDB using `opencadd.structure.superposition`. \n\nAvailable file formats are *pdb* and *cif*, which store the 3D coordinations of atoms of the protein (and ligand, cofactors, water molecules, and ions) as well as information on bonds between atoms. Here, we work with *pdb* files.",
"_____no_output_____"
]
],
[
[
"# Download PDB structures\nstructures = [Structure.from_pdbid(pdb_id) for pdb_id in pairs]\nstructures",
"_____no_output_____"
]
],
[
[
"#### Extract protein and ligand\n\nExtract protein and ligand from the structure in order to remove solvent and other artifacts of crystallography.",
"_____no_output_____"
]
],
[
[
"complexes = [\n Structure.from_atomgroup(structure.select_atoms(f\"protein or resname {ligand}\"))\n for structure, ligand in zip(structures, pairs.values())\n]\ncomplexes",
"_____no_output_____"
],
[
"# Write complex to file\nfor complex_, pdb_id in zip(complexes, pairs.keys()):\n complex_.write(DATA / f\"{pdb_id}.pdb\")",
"_____no_output_____"
]
],
[
[
"#### Align proteins\n\nAlign complexes (based on protein atoms).",
"_____no_output_____"
]
],
[
[
"results = align(complexes, method=METHODS[\"mda\"])",
"_____no_output_____"
]
],
[
[
"`nglview` can be used to visualize molecular data within Jupyter notebooks. With the next cell we will visualize out aligned protein-ligand complexes.",
"_____no_output_____"
]
],
[
[
"view = nglview.NGLWidget()\nfor complex_ in complexes:\n view.add_component(complex_.atoms)\nview",
"_____no_output_____"
],
[
"view.render_image(trim=True, factor=2, transparent=True);",
"_____no_output_____"
],
[
"view._display_image()",
"_____no_output_____"
]
],
[
[
"#### Extract ligands ",
"_____no_output_____"
]
],
[
[
"ligands = [\n Structure.from_atomgroup(complex_.select_atoms(f\"resname {ligand}\"))\n for complex_, ligand in zip(complexes, pairs.values())\n]\nligands",
"_____no_output_____"
],
[
"for ligand, pdb_id in zip(ligands, pairs.keys()):\n ligand.write(DATA / f\"{pdb_id}_lig.pdb\")",
"_____no_output_____"
]
],
[
[
"We check the existence of all ligand *pdb* files.",
"_____no_output_____"
]
],
[
[
"ligand_files = []\nfor file in DATA.glob(\"*_lig.pdb\"):\n ligand_files.append(file.name)\nligand_files",
"_____no_output_____"
]
],
[
[
"We can also use `nglview` to depict the co-crystallized ligands alone. As we can see, the selected complexes contain ligands populating the same binding pocket and can thus be used in the next talktorial for ligand-based pharmacophore generation.",
"_____no_output_____"
]
],
[
[
"view = nglview.NGLWidget()\nfor component_id, ligand in enumerate(ligands):\n view.add_component(ligand.atoms)\n view.remove_ball_and_stick(component=component_id)\n view.add_licorice(component=component_id)\nview",
"_____no_output_____"
],
[
"view.render_image(trim=True, factor=2, transparent=True);",
"_____no_output_____"
],
[
"view._display_image()",
"_____no_output_____"
]
],
[
[
"## Discussion\n\nIn this talktorial, we learned how to retrieve protein and ligand meta information and structural information from the PDB. We retained only X-ray structures and filtered our data by resolution and ligand availability. Ultimately, we aimed for an aligned set of ligands to be used in the next talktorial for the generation of ligand-based ensemble pharmacophores. \n\nIn order to enrich information about ligands for pharmacophore modeling, it is advisable to not only filter by PDB structure resolution, but also to check for ligand diversity (see **Talktorial 005** on molecule clustering by similarity) and to check for ligand activity (i.e. to include only potent ligands). ",
"_____no_output_____"
],
[
"## Quiz\n\n1. Summarize the kind of data that the Protein Data Bank contains.\n2. Explain what the resolution of a structure stands for and how and why we filter for it in this talktorial.\n3. Explain what an alignment of structures means and discuss the alignment performed in this talktorial.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
]
|
cb72abc0e6ce1c9e77df317001264725c34c7224 | 15,281 | ipynb | Jupyter Notebook | Materials/Interfaces.ipynb | FMakhnach/csharp-materials | 6b936c34656170270ca496d5c77225351688955e | [
"MIT"
]
| null | null | null | Materials/Interfaces.ipynb | FMakhnach/csharp-materials | 6b936c34656170270ca496d5c77225351688955e | [
"MIT"
]
| null | null | null | Materials/Interfaces.ipynb | FMakhnach/csharp-materials | 6b936c34656170270ca496d5c77225351688955e | [
"MIT"
]
| null | null | null | 24.4496 | 223 | 0.538708 | [
[
[
"# Интерфейсы",
"_____no_output_____"
],
[
"Интерфейс - контракт, по которому класс, его реализующий, предоставляет какие-то методы.\n\nНаписание кода с опорой на интерфейсы, а не на конкретные типы позволяет:\n- **Переиспользовать код, абстрагируясь от реализации.** Один раз написанный алгоритм сортировки элементов, опирающийся только на интерфейс IComparable, одинаково работает как со встроенными типами, так и с вашими.\n- **Подменять реализацию, в том числе во время исполнения.**\n- **Сделать код более безопасным.** Объект, передаваемый по интерфейсной ссылке предоставляет только ограниченную информацию о своих возможностях.\n- **Не опасаться за последствия (по сравнению с наследованием).** Так как мы не тянем за собой реализацию, не возникает проблем, как с множественным наследованием.",
"_____no_output_____"
],
[
"## 1. Правила определения интерфейсов",
"_____no_output_____"
],
[
"В интерфейсе определяются сигнатуры *экземплярных функциональных* членов класса, кроме конструкторов. \n\nТ.е. недопустимы\n- Поля\n- Конструкторы\n\nВсё остальное - можно:\n- Методы\n- Свойства\n- События\n- Индексаторы\n\nНачиная с C# 8.0 (кажется) можно определять в интерфейса *статические и экземплярные методы с реализацией*. \n\nМодификатор доступа не указывается - он априори public.",
"_____no_output_____"
]
],
[
[
"public interface ISomethingMessy\n{\n // Метод\n void Execute();\n \n // Свойство\n string Message { get; }\n \n // Индексатор\n object this[int index] { get; set; }\n \n // Событие\n event Action MyEvent;\n \n // Лучше не переходить эту черту...\n // --------------------------------\n \n // Статический метод - обязательна реализация\n static void StaticMethod() \n { \n Console.WriteLine(\"interface static method\");\n }\n \n // Дефолтная реализация интерфейса: ДОСТУПНА ТОЛЬКО ПО ИНТЕРФЕЙСНОЙ ССЫЛКЕ \n void SecretMethod()\n {\n Console.WriteLine(\"Your password is 123456\");\n }\n}",
"_____no_output_____"
]
],
[
[
"Пример из стандартной библиотеки - System.IDisposable\n```csharp\npublic interface IDisposable\n{\n void Dispose();\n}\n```",
"_____no_output_____"
],
[
"## 2. Реализация интерфейсов. Наследование",
"_____no_output_____"
]
],
[
[
"using System.IO;\n\nclass Base : IDisposable\n{\n private FileStream fileStream;\n \n // ...\n \n // public void Dispose() { fileStream.Dispose(); }\n}",
"_____no_output_____"
],
[
"using System.IO;\n\nclass Base : IDisposable\n{\n private FileStream fileStream;\n \n // ...\n \n public void Dispose() { fileStream.Dispose(); }\n}\n\nclass Derived : Base\n{\n // ...\n}\n\n// Все наследники класса автоматически реализуют интерфейсы родителя.\nDerived derived = new Derived();\nderived is IDisposable",
"_____no_output_____"
]
],
[
[
"## 3. Также доступны методы класса object",
"_____no_output_____"
]
],
[
[
"IComparable<int> val = 3;\nval.ToString()",
"_____no_output_____"
],
[
"val.GetType()",
"_____no_output_____"
]
],
[
[
"## 4. ~~Реализация~~ Наследование интерфейсов интерфейсами",
"_____no_output_____"
],
[
"Можно расширить интерфейс, отнаследовав от него другой интерфейс. Типы, реализующие интерфейс-ребёнок будут обязаны реализовать функционал обоих интерфейсов.\n\n**Однако это оправдано тогда и только тогда, когда жёсткая связь допустима.**\n\nИначе лучше использовать несколько маленьких интерфейсов согласно **Interface Segregation Principle**.",
"_____no_output_____"
]
],
[
[
"public interface IVehicle\n{\n void MoveTo(float x, float y, float z);\n}\n\npublic interface IWheeledVehicle : IVehicle\n{\n int NumOfWheels { get; }\n}\n\npublic class Car : IWheeledVehicle { }\n",
"_____no_output_____"
]
],
[
[
"Пример наследования интерфейсов из стандартной библиотеки - IEnumerable",
"_____no_output_____"
],
[
"```csharp\npublic interface IEnumerable<out T> : IEnumerable\n{\n IEnumerator<T> GetEnumerator();\n}\n\npublic interface IEnumerable\n{\n IEnumerator GetEnumerator();\n}\n```",
"_____no_output_____"
],
[
"## 5. Явная (explicit) и неявная (implicit) реализации интерфейса",
"_____no_output_____"
],
[
"Однако можно реализовать интерфейс, не предоставив публичную реализацию методов.\n\nЭтого можно добиться, реализовав интерфейс **явно** (explicit). Такая реализация будет доступна **только по соответствующей интерфейсной ссылке**.",
"_____no_output_____"
]
],
[
[
"public class MyClass : IDisposable\n{\n // Неявная реализация интерфейса\n // public void Dispose() { Console.WriteLine(\"Implicit\"); }\n\n // Явная реализация интерфейса\n void IDisposable.Dispose() { Console.WriteLine(\"Explicit\"); }\n}",
"_____no_output_____"
],
[
"MyClass myClass = new MyClass();\nmyClass.Dispose();",
"_____no_output_____"
],
[
"IDisposable disposable = new MyClass();\ndisposable.Dispose();",
"_____no_output_____"
]
],
[
[
"**В чём смысл?**\n\nМожно реализовать несколько интерфейсов, содержащих несколько одинаковых по сигнатуре методов. Если они представляют одинаковый смысл то проблем не возникает - а если они в сущности разные?\n\nС помощью явных реализаций интерфейса можно определить **разное поведение** экземпляра в зависимости от того, по какой ссылке мы вызываем интерфейсный метод.\n\nP.S. Пример супер надуманный, как обычно",
"_____no_output_____"
]
],
[
[
"// \"Исполнитель\"\npublic interface IExecutor\n{\n void Execute();\n}",
"_____no_output_____"
],
[
"// \"Палач\"\npublic interface IExecutioner\n{\n void Execute();\n}",
"_____no_output_____"
],
[
"public class Officer : IExecutor, IExecutioner\n{\n public void Execute() { /* some boring actions */ Console.WriteLine(\"Job executed.\"); }\n \n void IExecutioner.Execute() { /* some murderous actions */ Console.WriteLine(\"Intruder executed.\"); }\n}",
"_____no_output_____"
],
[
"Officer officer = new Officer();\nofficer.Execute();",
"_____no_output_____"
],
[
"IExecutor executor = officer;\nexecutor.Execute();",
"_____no_output_____"
],
[
"IExecutioner executioner = officer;\nexecutioner.Execute();",
"_____no_output_____"
]
],
[
[
"## 6. Обобщённые интерфейсы",
"_____no_output_____"
],
[
"Интерфейсы могут быть обобщёнными, таким образом получив все преимущества обобщений.\n\nИз приятного: можно реализовать один и тот же интерфейс с различными параметрами типа, т.к. *как вы знаете*, обобщённые типы с разными параметрами конструируются в разные типы.",
"_____no_output_____"
]
],
[
[
"public class Number : IComparable<int>, IComparable<double>, IComparable<string>\n{\n private int Value { get; }\n \n public Number(int number)\n {\n Value = number;\n }\n\n public int CompareTo(int other) \n {\n Console.WriteLine(\"Hello from int\");\n return Value.CompareTo(other);\n }\n \n \n public int CompareTo(double other)\n {\n Console.WriteLine(\"Hello from double\");\n return ((double)Value).CompareTo(other);\n }\n \n public int CompareTo(string other)\n {\n Console.WriteLine(\"Hello from string\");\n return ((double)Value).CompareTo(double.Parse(other));\n }\n}",
"_____no_output_____"
],
[
"Number number = new Number(42);",
"_____no_output_____"
],
[
"number.CompareTo(13)",
"_____no_output_____"
],
[
"number.CompareTo(42.5)",
"_____no_output_____"
],
[
"number.CompareTo(\"42\")",
"_____no_output_____"
]
],
[
[
"Можно использовать интерфейсы в ограничениях на аргумент-тип. Если использовать несколько, то аргумент-тип должен реализовать все.",
"_____no_output_____"
]
],
[
[
"public void SayHello<T>(T value) where T : IComparable<int>, IDisposable\n{\n Console.WriteLine(\"Hello!\");\n}",
"_____no_output_____"
],
[
"public class MyClass : IComparable<int> //, IDisposable\n{\n public int CompareTo(int other) => throw new NotImplementedException();\n \n public void Dispose() => throw new NotImplementedException();\n}",
"_____no_output_____"
],
[
"MyClass obj = new MyClass();\nSayHello(obj)",
"_____no_output_____"
]
],
[
[
"## 7. Реализация метода интерфейса по умолчанию",
"_____no_output_____"
],
[
"Начиная с C# 8.0 можно определять реализацию методов интерфейса по умолчанию.\n\nТакая реализация доступна только по интерфейсной ссылке",
"_____no_output_____"
]
],
[
[
"public interface ISummator\n{\n int Sum(IEnumerable<int> values) \n {\n int result = 0;\n foreach(var value in values)\n {\n result += value;\n }\n return result;\n }\n}",
"_____no_output_____"
],
[
"public class MySummator : ISummator\n{\n // Можно переопределить, тогда конкретная реализация полностью перекроет дефолтную\n //public int Sum(IEnumerable<int> values) => values.Count();\n}",
"_____no_output_____"
],
[
"MySummator mySummator = new MySummator();\n\nmySummator.Sum(new int[]{1, 2, 3, 4, 5})",
"_____no_output_____"
],
[
"ISummator summator = new MySummator();\n\nsummator.Sum(new int[] { 1, 2, 3, 4, 5 })",
"_____no_output_____"
]
],
[
[
"## 8. Абстрактный класс или интерфейс?",
"_____no_output_____"
],
[
"**Абстрактный класс:**\n- Является классом, а значит наследуясь от него нельзя наследоваться от других классов;\n- Может определять часть состояния и поведения;\n- Наследование - очень сильная связь;\n\nАбстрактный определяет каркас для нескольких различных реализаций сущности.\n\n**Интерфейс:**\n- Класс может реализовывать сколько угодно интерфейсов;\n- Определяет (в общем случае) только *что* должен делать класс, но не *как* (в общем случае);\n- Реализация интерфейс - слабая связь;\n\nИнтерфейс определяет набор свойств, которыми должна обладать сущность, её некоторый обособленный функционал.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
]
|
cb72acba01de0a4002703777628fce2bbdc268f6 | 129,198 | ipynb | Jupyter Notebook | P5.t5_fine-tunning.ipynb | vajp02/AutomaticSummarization | 7bbb970082fb16487eaef6d41e4fbcdf08f02817 | [
"MIT"
]
| null | null | null | P5.t5_fine-tunning.ipynb | vajp02/AutomaticSummarization | 7bbb970082fb16487eaef6d41e4fbcdf08f02817 | [
"MIT"
]
| null | null | null | P5.t5_fine-tunning.ipynb | vajp02/AutomaticSummarization | 7bbb970082fb16487eaef6d41e4fbcdf08f02817 | [
"MIT"
]
| null | null | null | 26.551171 | 401 | 0.46493 | [
[
[
"from utils.t5 import *",
"_____no_output_____"
],
[
"input_data_name = \"claim_LOF_base_0.11_data_explanation_prep_4.pickle\" #\"LOF_base_0.45_0.53_removed_inlier_outlier_23.782_full.pickle\" # \"LOF_base_0.46_0.54_removed_inlier_outlier_0.51_full.pickle\"\ndata_inpit_dir = \"./Data/Selection/\" #\"./Data/Selection/\" \"./Data/Preprocessed/\" \noutput_dir = \"./Data/Models/\"\nsource_column = \"source_text\" #source_text_shorter \" statement_explanation_prep\" #\"explanation_prep\" \"statement_explanation_prep\" #\"source_text_shorter\" # \"source_text_shorter\" source_text\ntarget_column = \"target_text\" #\"shortExplanation_prep\" #\"target_text\"\n\nno_workers = 1\n\nimput_data_path = data_inpit_dir + input_data_name\nnew_model_name = \"d-t5-{}_{}\".format(source_column, input_data_name)",
"_____no_output_____"
],
[
"torch.cuda.get_device_name(0)",
"_____no_output_____"
],
[
"new_model_name",
"_____no_output_____"
],
[
"data = pd.read_pickle(imput_data_path)",
"_____no_output_____"
],
[
"train, dev_test = train_test_split(data, test_size = 0.2, random_state = 42)\ndev, test = train_test_split(dev_test, test_size = 0.5, random_state = 42)",
"_____no_output_____"
],
[
"train['target_text'] = train[target_column]\ntrain['source_text'] = \"summarize: \" + train[source_column]\n\ntrain['target_len'] = train[\"target_text\"].str.split().str.len()\ntrain['source_len'] = train[\"source_text\"].str.split().str.len()\ntrain[['target_len','source_len']].describe()",
"_____no_output_____"
],
[
"sum(train.source_len.to_list())",
"_____no_output_____"
],
[
"len(train[train.target_len>150])",
"_____no_output_____"
],
[
"len(train[train.source_len>1200])",
"_____no_output_____"
],
[
"len(train)",
"_____no_output_____"
],
[
"dev['target_text'] = dev[target_column]\ndev['source_text'] = \"summarize: \" + dev[source_column]\n\ndev['target_len'] = dev[\"target_text\"].str.split().str.len()\ndev['source_len'] = dev[\"source_text\"].str.split().str.len()\ndev[['target_len','source_len']].describe()",
"_____no_output_____"
],
[
"sum(dev.source_len.to_list())",
"_____no_output_____"
],
[
"test['target_text'] = test[target_column]\ntest['source_text'] = \"summarize: \" + test[source_column]\n\ntest['target_len'] = test[\"target_text\"].str.split().str.len()\ntest['source_len'] = test[\"source_text\"].str.split().str.len()\ntest[['target_len','source_len']].describe()",
"_____no_output_____"
],
[
"sum(test.source_len.to_list())",
"_____no_output_____"
],
[
"train = train[[\"target_text\", \"source_text\"]]\ndev = dev[[\"target_text\",\"source_text\"]]",
"_____no_output_____"
],
[
"model = SimpleT5()",
"_____no_output_____"
],
[
"model.from_pretrained(model_type=\"t5\",model_name = \"t5-base\") # large \"google/mt5-base\"",
"_____no_output_____"
],
[
"import gc\n#del data, model\ngc.collect()\nimport torch\ntorch.cuda.empty_cache()",
"_____no_output_____"
],
[
"print(torch.cuda.memory_summary(device=None, abbreviated=False))",
"|===========================================================================|\n| PyTorch CUDA memory summary, device ID 0 |\n|---------------------------------------------------------------------------|\n| CUDA OOMs: 0 | cudaMalloc retries: 0 |\n|===========================================================================|\n| Metric | Cur Usage | Peak Usage | Tot Alloc | Tot Freed |\n|---------------------------------------------------------------------------|\n| Allocated memory | 0 B | 0 B | 0 B | 0 B |\n| from large pool | 0 B | 0 B | 0 B | 0 B |\n| from small pool | 0 B | 0 B | 0 B | 0 B |\n|---------------------------------------------------------------------------|\n| Active memory | 0 B | 0 B | 0 B | 0 B |\n| from large pool | 0 B | 0 B | 0 B | 0 B |\n| from small pool | 0 B | 0 B | 0 B | 0 B |\n|---------------------------------------------------------------------------|\n| GPU reserved memory | 0 B | 0 B | 0 B | 0 B |\n| from large pool | 0 B | 0 B | 0 B | 0 B |\n| from small pool | 0 B | 0 B | 0 B | 0 B |\n|---------------------------------------------------------------------------|\n| Non-releasable memory | 0 B | 0 B | 0 B | 0 B |\n| from large pool | 0 B | 0 B | 0 B | 0 B |\n| from small pool | 0 B | 0 B | 0 B | 0 B |\n|---------------------------------------------------------------------------|\n| Allocations | 0 | 0 | 0 | 0 |\n| from large pool | 0 | 0 | 0 | 0 |\n| from small pool | 0 | 0 | 0 | 0 |\n|---------------------------------------------------------------------------|\n| Active allocs | 0 | 0 | 0 | 0 |\n| from large pool | 0 | 0 | 0 | 0 |\n| from small pool | 0 | 0 | 0 | 0 |\n|---------------------------------------------------------------------------|\n| GPU reserved segments | 0 | 0 | 0 | 0 |\n| from large pool | 0 | 0 | 0 | 0 |\n| from small pool | 0 | 0 | 0 | 0 |\n|---------------------------------------------------------------------------|\n| Non-releasable allocs | 0 | 0 | 0 | 0 |\n| from large pool | 0 | 0 | 0 | 0 |\n| from small pool | 0 | 0 | 0 | 0 |\n|---------------------------------------------------------------------------|\n| Oversize allocations | 0 | 0 | 0 | 0 |\n|---------------------------------------------------------------------------|\n| Oversize GPU segments | 0 | 0 | 0 | 0 |\n|===========================================================================|\n\n"
],
[
"model.train(train_df = train, #LOF + Bert 11\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-69415620-de6c-329d-ef8d-9c9c937f4850]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #claim + LOF + Bert 11\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-69415620-de6c-329d-ef8d-9c9c937f4850]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #claim + LOF + Bert tunned 11\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-69415620-de6c-329d-ef8d-9c9c937f4850]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #LOF + Bert tunned 13\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-b1be761c-039f-c170-b0c1-c82ee601e195]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #LOF + Bert 13\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-b1be761c-039f-c170-b0c1-c82ee601e195]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #claim + LOF + Bert 13\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-b1be761c-039f-c170-b0c1-c82ee601e195]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #claim + LOF + Bert tunned 13\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-b1be761c-039f-c170-b0c1-c82ee601e195]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #claim + LOF + Bert tunned 15\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-b1be761c-039f-c170-b0c1-c82ee601e195]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #claim + LOF + Bert\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-b1be761c-039f-c170-b0c1-c82ee601e195]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, # LOF + Bert tunned\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-b1be761c-039f-c170-b0c1-c82ee601e195]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, # LOF + Bert\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-b1be761c-039f-c170-b0c1-c82ee601e195]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, # claim base\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-b1be761c-039f-c170-b0c1-c82ee601e195]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #base \n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-cd2a3fdb-d296-06a1-ff6a-5b6d54df0acc]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #45_56\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-69415620-de6c-329d-ef8d-9c9c937f4850]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #45_56\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-1cad794e-e425-8f4b-3a02-a512bba3497a]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #45_56\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-bceb1af3-48fa-7240-a710-a81ba0d3ebc5]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #45_56\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-129c91a9-fac6-0c27-6de4-14ecc17b6501]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #45_57\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-129c91a9-fac6-0c27-6de4-14ecc17b6501]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #45_56\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-c0eb6ae4-7a40-b947-996f-5811490f6f65]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #45_56\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-129c91a9-fac6-0c27-6de4-14ecc17b6501]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #45_55\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-129c91a9-fac6-0c27-6de4-14ecc17b6501]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, #45_54\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-129c91a9-fac6-0c27-6de4-14ecc17b6501]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train, # 57\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-129c91a9-fac6-0c27-6de4-14ecc17b6501]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train,\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-129c91a9-fac6-0c27-6de4-14ecc17b6501]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train,\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-129c91a9-fac6-0c27-6de4-14ecc17b6501]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train,\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-129c91a9-fac6-0c27-6de4-14ecc17b6501]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train,\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-c85b1185-eb3f-7f99-412a-2486fe49bf7c]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train,\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 150, #100 max_shortexplanation_tokens\n batch_size = 8, max_epochs = 3, # 9 for base\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-c85b1185-eb3f-7f99-412a-2486fe49bf7c]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train,\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 100, #100 max_shortexplanation_tokens\n batch_size = 9, max_epochs = 3, # 18\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-bceb1af3-48fa-7240-a710-a81ba0d3ebc5]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
],
[
"model.train(train_df = train,\n eval_df = dev, \n source_max_token_len = 1200, #2000 max_explanation_tokens\n target_max_token_len = 100, #100 max_shortexplanation_tokens\n batch_size = 9, max_epochs = 6, # 18\n use_gpu = True,\n outputdir = output_dir,\n early_stopping_patience_epochs = 0) # 3",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [GPU-a527da35-0f6b-a3bf-de28-5bb4f9ba4001]\n\n | Name | Type | Params\n-----------------------------------------------------\n0 | model | T5ForConditionalGeneration | 222 M \n-----------------------------------------------------\n222 M Trainable params\n0 Non-trainable params\n222 M Total params\n891.614 Total estimated model params size (MB)\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb72b7cb09dc350b3230b64396507b53801558bf | 16,980 | ipynb | Jupyter Notebook | tutorials/Tutorial_dataset_with_DEBIAI/Image_dataset_with_DEBIAI_dataset_generator.ipynb | debiai/py-debiai | fd784fd1ca7a59c38714275b6fad53ba9f09eaa7 | [
"Apache-2.0"
]
| 1 | 2022-03-07T17:52:32.000Z | 2022-03-07T17:52:32.000Z | tutorials/Tutorial_dataset_with_DEBIAI/Image_dataset_with_DEBIAI_dataset_generator.ipynb | DebiAI/py-debiai | fd784fd1ca7a59c38714275b6fad53ba9f09eaa7 | [
"Apache-2.0"
]
| null | null | null | tutorials/Tutorial_dataset_with_DEBIAI/Image_dataset_with_DEBIAI_dataset_generator.ipynb | DebiAI/py-debiai | fd784fd1ca7a59c38714275b6fad53ba9f09eaa7 | [
"Apache-2.0"
]
| null | null | null | 34.86653 | 2,129 | 0.550177 | [
[
[
"# Image classification training on a DEBIAI project with a dataset generator\n\nThis tutorial shows how to classify images of flowers after inserting the project contextual into DEBIAI.\n\nBased on the tensorflow tutorial : https://www.tensorflow.org/tutorials/images/classification",
"_____no_output_____"
]
],
[
[
"# Import TensorFlow and other libraries\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport os\nimport PIL\nimport tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\n\n# The pythonModule folder need to be in the same folder \nfrom debiai import debiai",
"_____no_output_____"
]
],
[
[
"## Download and explore the dataset\n\nThis tutorial uses a dataset of about 3,700 photos of flowers. The dataset contains 5 sub-directories, one per class:\n\ndaisy, dandelion, roses, sunflowers and tulips\n",
"_____no_output_____"
]
],
[
[
"import pathlib\ndataset_url = \"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz\"\ndata_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)\ndata_dir = pathlib.Path(data_dir)\n",
"_____no_output_____"
]
],
[
[
"## Create a dataset",
"_____no_output_____"
]
],
[
[
"# Define some parameters for the loader:\n\nbatch_size = 32\nimg_height = 180\nimg_width = 180",
"_____no_output_____"
],
[
"train_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"training\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)",
"Found 3670 files belonging to 5 classes.\nUsing 2936 files for training.\n"
],
[
"val_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"validation\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n",
"Found 3670 files belonging to 5 classes.\nUsing 734 files for validation.\n"
],
[
"class_names = train_ds.class_names\nprint(class_names)",
"['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']\n"
],
[
"AUTOTUNE = tf.data.AUTOTUNE\n\ntrain_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)\nval_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)",
"_____no_output_____"
]
],
[
[
"## Insert the project contextual data in DEBIAI",
"_____no_output_____"
]
],
[
[
"# Creation of the DEBIAI project block structure\nDEBIAI_block_structure = [\n {\n \"name\": \"image_id\",\n \"groundTruth\": [\n { \"name\": \"class\", \"type\": \"text\"},\n ],\n \"contexts\": [\n { \"name\": \"img_path\", \"type\": \"text\"},\n ]\n }\n]",
"_____no_output_____"
]
],
[
[
"#### Converting some of the project data in a dataframe\n\nIn this exemple, it is done with the creation of a dataframe\n\nmore details here : \nhttps://git.irt-systemx.fr/ML/DEBIAI/pythonModule#adding-samples",
"_____no_output_____"
]
],
[
[
"# Creation of a dataframe with the same columns as the block structure\ndata = {\"image_id\": [], \"class\": [], \"img_path\": []}\ni = 0\nfor class_name in class_names:\n images = list(data_dir.glob(class_name + '/*'))\n\n for image in images:\n data[\"image_id\"].append(i)\n data[\"class\"].append(class_name)\n data[\"img_path\"].append(str(image))\n i += 1\n\ndf = pd.DataFrame(data=data)\ndf",
"_____no_output_____"
],
[
"# Creation of a DEBIAI instance\nDEBIAI_BACKEND_URL = 'http://localhost:3000/'\nDEBIAI_PROJECT_NAME = 'Image classification demo'\nmy_debiai = debiai.Debiai(DEBIAI_BACKEND_URL)",
"_____no_output_____"
],
[
"# Creation of a DEBIAI project if it doesn't exist\ndebiai_project = my_debiai.get_project(DEBIAI_PROJECT_NAME)\n\nif not debiai_project :\n debiai_project = my_debiai.create_project(DEBIAI_PROJECT_NAME)\n\ndebiai_project",
"_____no_output_____"
],
[
"# Set the project block_structure if not already done\nif not debiai_project.block_structure_defined():\n debiai_project.set_blockstructure(DEBIAI_block_structure)\ndebiai_project.get_block_structure()",
"_____no_output_____"
],
[
"# Adding the dataframe\ndebiai_project.add_samples_pd(df, get_hash=False)",
"_____no_output_____"
]
],
[
[
"## Create the model",
"_____no_output_____"
]
],
[
[
"num_classes = len(class_names)\n\nmodel = Sequential([\n layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dense(num_classes)\n])\n",
"_____no_output_____"
],
[
"# Compile the model\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nrescaling (Rescaling) (None, 180, 180, 3) 0 \n_________________________________________________________________\nconv2d (Conv2D) (None, 180, 180, 16) 448 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 90, 90, 16) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 90, 90, 32) 4640 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 45, 45, 32) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 45, 45, 64) 18496 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 22, 22, 64) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 30976) 0 \n_________________________________________________________________\ndense (Dense) (None, 128) 3965056 \n_________________________________________________________________\ndense_1 (Dense) (None, 5) 645 \n=================================================================\nTotal params: 3,989,285\nTrainable params: 3,989,285\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"## Train the model with the DEBIAI Dataset generator",
"_____no_output_____"
]
],
[
[
"# Because DEBIAI doesn't have the images to train the models, we will provide them with a function that take a sample information based on the given block_structure\n\ndef model_input_from_debiai_sample(debiai_sample: dict):\n # \"image_id\", \"class\", \"img_path\"\n img = keras.preprocessing.image.load_img(\n debiai_sample['img_path'], target_size=(img_height, img_width))\n img_array = keras.preprocessing.image.img_to_array(img)\n return tf.expand_dims(img_array, 0) # Create a batch",
"_____no_output_____"
],
[
"# TF generated dataset \ntrain_dataset_imported = debiai_project.get_tf_dataset_with_provided_inputs(\n model_input_from_debiai_sample,\n output_types=(tf.float32, tf.int32),\n output_shapes=([None, img_height, img_width, 3], [1, ]),\n classes=class_names\n)\nAUTOTUNE = tf.data.AUTOTUNE\ntrain_dataset_imported = train_dataset_imported.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)\n\n# get_tf_dataset_with_provided_inputs Also work with a selection",
"_____no_output_____"
],
[
"# Train the model\nepochs = 3\nmodel.fit(train_dataset_imported, epochs=epochs)",
"Epoch 1/3\n3670/3670 [==============================] - 229s 57ms/step - loss: 1.0292 - accuracy: 0.5483\nEpoch 2/3\n3670/3670 [==============================] - 147s 40ms/step - loss: 0.9190 - accuracy: 0.5619\nEpoch 3/3\n3670/3670 [==============================] - 147s 40ms/step - loss: 0.7756 - accuracy: 0.6721\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
]
|
cb72b89500ccce400782708327efe585ec65a98d | 269,916 | ipynb | Jupyter Notebook | anomaly_detection.ipynb | najeebuddinm98/enron_anomaly_detection | 0cde319af84de99951ec4e866119811640d803a7 | [
"MIT"
]
| null | null | null | anomaly_detection.ipynb | najeebuddinm98/enron_anomaly_detection | 0cde319af84de99951ec4e866119811640d803a7 | [
"MIT"
]
| null | null | null | anomaly_detection.ipynb | najeebuddinm98/enron_anomaly_detection | 0cde319af84de99951ec4e866119811640d803a7 | [
"MIT"
]
| 1 | 2021-12-28T16:34:56.000Z | 2021-12-28T16:34:56.000Z | 83.929104 | 99,276 | 0.723721 | [
[
[
"# Anomaly Detection on Enron Dataset\nIn this notebook, we aim to build and train models based on machine learning algorithms commonly used for unsupervised anomaly detection; namely one-class Support Vector Machine (SVM), Isolation Forest and Local Outlier Factor (LOF). The dataset used is a modified version of the Enron financial + email dataset that contains information about Enron Corporation, an energy, commodities, and services company that infamously went bankrupt in December 2001 as a result of fraudulent business practices. \nThe Enron dataset is widely used to try and develop models that can identify the persons of interests (POIs), i.e. individuals who were eventually tried for fraud or criminal activity in the Enron investigation, from the features within the data. The email + financial data contains the emails themselves, metadata about the emails such as number received by and sent from each individual, and financial information including salary and stock options. \nThe dataset we have obtained is from the [Udacity Data Analyst Nanodegree](https://www.udacity.com/course/data-analyst-nanodegree--nd002) and their [GitHub](https://github.com/udacity/ud120-projects) page. Inspiration for loading and preprocessing the dataset was taken from Will Koehrsen's [Medium article](https://williamkoehrsen.medium.com/machine-learning-with-python-on-the-enron-dataset-8d71015be26d). The data is stored in a pickled form and can be downloaded as a `.pkl` file that can be easily converted to a Python dictionary. \n#### NOTE:\nAll references are presented in the form of appropriate hyperlinks within the paragraphs rather than in a separate section.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Loading the Dataset\nAs mentioned above, the data is presented in the form of a `.pkl.` file. We need to open the file as a Python object and then load it into a dictionary using Python's inbuilt `pickle` module.",
"_____no_output_____"
]
],
[
[
"import pickle\n\nwith open(\"enron_data.pkl\", \"rb\") as data_file:\n data_dict = pickle.load(data_file)\n \nlen(data_dict)",
"_____no_output_____"
]
],
[
[
"We have 146 personnel in the dataset. Let us view what data each person holds.",
"_____no_output_____"
]
],
[
[
"data_dict[\"ALLEN PHILLIP K\"]",
"_____no_output_____"
]
],
[
[
"Including the names of the people, we have 146 samples with 22 features. We will now convert this to a Pandas dataframe.",
"_____no_output_____"
]
],
[
[
"temp_dict = {'name':[], 'salary': [], 'to_messages': [], 'deferral_payments': [], 'total_payments': [], 'loan_advances': [],\n 'bonus': [], 'email_address': [], 'restricted_stock_deferred': [], 'deferred_income': [], 'total_stock_value': [], \n 'expenses': [], 'from_poi_to_this_person': [], 'exercised_stock_options': [], 'from_messages': [], 'other': [], \n 'from_this_person_to_poi': [], 'long_term_incentive': [], 'shared_receipt_with_poi': [], 'restricted_stock': [], \n 'director_fees': [], 'poi': []}\n\nfor name in data_dict.keys():\n temp_dict['name'].append(name)\n for feature in data_dict[name].keys():\n temp_dict[feature].append(data_dict[name][feature])",
"_____no_output_____"
],
[
"df = pd.DataFrame(temp_dict)\ndf",
"_____no_output_____"
]
],
[
[
"## Exploratory Data Analysis\nNow, we perform some inital data exploration and analysis to get an idea of the characteritics and behaviour of our dataset as well as all the feature columns. We start by using the `.info()` and `.describe()` funtions from the *Pandas* library to get important metrics about our dataset.",
"_____no_output_____"
]
],
[
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 146 entries, 0 to 145\nData columns (total 22 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 name 146 non-null object\n 1 salary 146 non-null object\n 2 to_messages 146 non-null object\n 3 deferral_payments 146 non-null object\n 4 total_payments 146 non-null object\n 5 loan_advances 146 non-null object\n 6 bonus 146 non-null object\n 7 email_address 146 non-null object\n 8 restricted_stock_deferred 146 non-null object\n 9 deferred_income 146 non-null object\n 10 total_stock_value 146 non-null object\n 11 expenses 146 non-null object\n 12 from_poi_to_this_person 146 non-null object\n 13 exercised_stock_options 146 non-null object\n 14 from_messages 146 non-null object\n 15 other 146 non-null object\n 16 from_this_person_to_poi 146 non-null object\n 17 long_term_incentive 146 non-null object\n 18 shared_receipt_with_poi 146 non-null object\n 19 restricted_stock 146 non-null object\n 20 director_fees 146 non-null object\n 21 poi 146 non-null bool \ndtypes: bool(1), object(21)\nmemory usage: 24.2+ KB\n"
]
],
[
[
"As seen the above, all our columns except `poi` are of the type objects despite most of the data being numeric values. So, we convert all the necessary columns into `float64` type and also convert the `poi` column to categorical after replacing `True` with `1` and `False` with `0`.",
"_____no_output_____"
]
],
[
[
"df['salary'] = df['salary'].astype('float64')\ndf['to_messages'] = df['to_messages'].astype('float64')\ndf['deferral_payments'] = df['deferral_payments'].astype('float64')\ndf['total_payments'] = df['total_payments'].astype('float64')\ndf['loan_advances'] = df['loan_advances'].astype('float64')\ndf['bonus'] = df['bonus'].astype('float64')\ndf['restricted_stock_deferred'] = df['restricted_stock_deferred'].astype('float64')\ndf['deferred_income'] = df['deferred_income'].astype('float64')\ndf['total_stock_value'] = df['total_stock_value'].astype('float64')\ndf['expenses'] = df['expenses'].astype('float64')\ndf['from_poi_to_this_person'] = df['from_poi_to_this_person'].astype('float64')\ndf['exercised_stock_options'] = df['exercised_stock_options'].astype('float64')\ndf['from_messages'] = df['from_messages'].astype('float64')\ndf['other'] = df['other'].astype('float64')\ndf['from_this_person_to_poi'] = df['from_this_person_to_poi'].astype('float64')\ndf['long_term_incentive'] = df['long_term_incentive'].astype('float64')\ndf['shared_receipt_with_poi'] = df['shared_receipt_with_poi'].astype('float64')\ndf['restricted_stock'] = df['restricted_stock'].astype('float64')\ndf['director_fees'] = df['director_fees'].astype('float64')\n\ndf.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 146 entries, 0 to 145\nData columns (total 22 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 name 146 non-null object \n 1 salary 95 non-null float64\n 2 to_messages 86 non-null float64\n 3 deferral_payments 39 non-null float64\n 4 total_payments 125 non-null float64\n 5 loan_advances 4 non-null float64\n 6 bonus 82 non-null float64\n 7 email_address 146 non-null object \n 8 restricted_stock_deferred 18 non-null float64\n 9 deferred_income 49 non-null float64\n 10 total_stock_value 126 non-null float64\n 11 expenses 95 non-null float64\n 12 from_poi_to_this_person 86 non-null float64\n 13 exercised_stock_options 102 non-null float64\n 14 from_messages 86 non-null float64\n 15 other 93 non-null float64\n 16 from_this_person_to_poi 86 non-null float64\n 17 long_term_incentive 66 non-null float64\n 18 shared_receipt_with_poi 86 non-null float64\n 19 restricted_stock 110 non-null float64\n 20 director_fees 17 non-null float64\n 21 poi 146 non-null bool \ndtypes: bool(1), float64(19), object(2)\nmemory usage: 24.2+ KB\n"
],
[
"df['poi'].replace( {True:1, False:0}, inplace=True)\ndf['poi'].astype('category')\n\ndf.head()",
"_____no_output_____"
],
[
"df.describe()",
"_____no_output_____"
]
],
[
[
"From the above, we can make the following observations:\n* A few feature columns have very high maximum values when compared to the rest of the data like `salary, to_messages, total_payments` etc. However, we cannot make an assumption that these are outliers due to the very nature of the data.\n* There are a lot of missing values in most of the columns. For the feature columns related to finances, `NaN` is actually 0 whereas it is an unknown value for the email related feature columns according to the [documentation](https://github.com/udacity/ud120-projects/blob/master/final_project/enron61702insiderpay.pdf)\n* The columns `name` and `email_id` can be removed or stored separately as they provide no tangible information for our model\n* Since many of the columns are arithmetically related to one another, we can check for errors using those relations. ",
"_____no_output_____"
],
[
"Now, let us create a PairGrid with the feature columns `salary, total_payments, total_stock_value, from_poi_to_this_person, from_this_person_to_poi` to find any observable relations among them. The main reason we choose these columns specifically is because conceptaully, they seem the most important as well as related to most other feature columns.",
"_____no_output_____"
]
],
[
[
"tempdf = df[['salary','total_payments','total_stock_value','from_poi_to_this_person','from_this_person_to_poi','poi']]\n\nsns.pairplot(tempdf, hue='poi', palette='bright')",
"_____no_output_____"
]
],
[
[
"From the diagonal plots, we can conclude that no one feature is enough to predict whether a given observation can be classified as a person of interest. This is because their is considerbale overlap in the distributions for both categories. ",
"_____no_output_____"
],
[
"## Handling missing data\nAs mentioned above, we have a lot of missing data in our dataset. We cannot remove the columns with missing values because we already have very few observations to work with and removing more could prove detrimental during model training. For the financial feature columns, we replace `NaN` with 0. For the email feature columns, we replace them with the mean of respective category.",
"_____no_output_____"
]
],
[
[
"financial_features = ['bonus', 'deferral_payments', 'deferred_income', 'director_fees', 'exercised_stock_options',\n 'expenses', 'loan_advances', 'long_term_incentive', 'other', 'restricted_stock', 'restricted_stock_deferred',\n 'salary', 'total_payments', 'total_stock_value']\n\ndf[financial_features] = df[financial_features].fillna(0)\ndf[financial_features].isna().sum()",
"_____no_output_____"
],
[
"email_features = ['from_messages', 'from_poi_to_this_person', 'from_this_person_to_poi',\n 'shared_receipt_with_poi', 'to_messages']\n\nemail_mean = df[email_features].mean()\ndf[email_features] = df[email_features].fillna(email_mean)\ndf.isna().sum()",
"_____no_output_____"
],
[
"df.describe()",
"_____no_output_____"
]
],
[
[
"## Error Checking through Official Docs\nOne simple way to check for incorrect data is to add up all of the payment-related columns for each person and check if that is equal to the total payment recorded for the individkual. The same can be done for stock payments. Depending on the result, we can possibly make simple changes to rectify the errors.",
"_____no_output_____"
]
],
[
[
"payment_data = ['bonus', 'deferral_payments', 'deferred_income', 'director_fees', 'expenses', 'loan_advances', \n 'long_term_incentive', 'other', 'salary']\n\npay_err = df[ (df[payment_data].sum(axis='columns') != df['total_payments']) ]\npay_err",
"_____no_output_____"
]
],
[
[
"For the payment related financial feature columns, we get error for two observations.The errors appear to be caused by a misalignment of the columns when compared to the [official documentation](https://github.com/udacity/ud120-projects/blob/master/final_project/enron61702insiderpay.pdf); for `BELFER ROBERT`, the financial data has been shifted one column to the right, and for `BHATNAGAR SANJAY`, the data has been shifted one column to the left. We shift the columns to their correct positions and then check again.",
"_____no_output_____"
]
],
[
[
"df.loc[24,['deferred_income','deferral_payments','expenses',\n 'director_fees','total_payments','exercised_stock_options', 'restricted_stock', \n 'restricted_stock_deferred', 'total_stock_value']] = [-102500,0,3285,102500,3285,0,44093,-44093,0]\n\ndf.loc[117,['other','expenses','director_fees','total_payments','exercised_stock_options', 'restricted_stock', \n 'restricted_stock_deferred', 'total_stock_value']] = [0,137864,0,137864,15456290,2604490,-2604490,15456290]\n\npay_err = df[ (df[payment_data].sum(axis='columns') != df['total_payments']) ]\npay_err",
"_____no_output_____"
],
[
"stock_data = ['exercised_stock_options', 'restricted_stock', 'restricted_stock_deferred', 'total_stock_value']\n\nstock_err = df[ (df[stock_data[:-1]].sum(axis='columns') != df['total_stock_value']) ]\nstock_err",
"_____no_output_____"
]
],
[
[
"As expected, we rectified errors in our stock data along with the payment data in the code block above. \nNow, we also remove two columns from the dataset namely `TOTAL` and `THE TRAVEL AGENCY IN THE PARK` as the first is unneccessary for prediction and the second is an organization rather than a person.",
"_____no_output_____"
]
],
[
[
"df[ (df['name'] == 'TOTAL') | (df['name'] == 'THE TRAVEL AGENCY IN THE PARK') ]",
"_____no_output_____"
],
[
"df.drop(labels=[100,103], axis=0, inplace=True)\ndf[ (df['name'] == 'TOTAL') | (df['name'] == 'THE TRAVEL AGENCY IN THE PARK') ]",
"_____no_output_____"
]
],
[
[
"## Data Preprocessing\nAll the above steps do come under preprocessing, but this section will deal with the final touches before we start building and training our models. We need to remove the `name` and `email_address` columns followed by scaling using z-score normalization.",
"_____no_output_____"
]
],
[
[
"names = df.pop('name')\nemails = df.pop('email_address')",
"_____no_output_____"
],
[
"y = df.pop('poi')\ny",
"_____no_output_____"
]
],
[
[
"Here, the index of `y` should be noted.",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler(copy=True)\nscaled_arr = scaler.fit_transform(df)\n\nscaled_df = pd.DataFrame(scaled_arr, columns=financial_features+email_features)\nscaled_df.head()",
"_____no_output_____"
],
[
"scaled_df.index",
"_____no_output_____"
]
],
[
[
"Clearly, the index of `scaled_df` and `y` are different. This is becuase the scaling resetted our index and now includes the index 100 & 103 een though we removed those samples before. So, we need to realign `y` so that they match.",
"_____no_output_____"
]
],
[
[
"y.index = scaled_df.index\ny",
"_____no_output_____"
]
],
[
[
"## Visualisation\nNow, we will try to visualise our feature space by using t-Stochastic Neighbour Embedding as a dimensionality reduction method. Since this method is primarily used for visualisation, our embedding space is set to be 2 dimensional. Our aim is to observe whether we can achieve a embedding feature space where the anomalous observations are distinctly separate from the non-anomalous ones.",
"_____no_output_____"
]
],
[
[
"from sklearn.manifold import TSNE\n\ntsne = TSNE(n_components=2, random_state=0)\n\ntsne.fit_transform(scaled_df)",
"_____no_output_____"
],
[
"x1 = []\nx2 = []\nfor value in tsne.embedding_:\n x1.append(value[0])\n x2.append(value[1])\n \nplt.figure(figsize=(16, 10)) \nplt.scatter(x1,x2,c=y)",
"_____no_output_____"
]
],
[
[
"As seen above, the compression of our data into 2 dimensions does not show an immediate distinction between non-pois and pois.",
"_____no_output_____"
],
[
"## Is splitting needed?\nWhen dealing with supervised algorithms, it is an essential step to split our dataset into training, validation and test sets. Training set is used to train our model. Then, the validaion set is used to tune its hyperparameters. Usually, we skip this split and instead use cross-validation for tuning. Finally, the test set is used to give the true accuracy that can be expected after deployment. \nHowever, doing the same for unsupervised learning does not make sense. Unsupervised learning in general benefit more from a cross-validation score to replace the metric of test accuracy. Refer this [question from Stats.StackExchange](https://stats.stackexchange.com/questions/387326/unsupervised-learning-train-test-division) and this [one from StackOverflow](https://stackoverflow.com/questions/31673388/is-train-test-split-in-unsupervised-learning-necessary-useful) to know more in detail. \n#### For our case, we will be looking at the accuracy and the confusion matrix since we have labels for our data.",
"_____no_output_____"
]
],
[
[
"X = scaled_df.copy()\ny.value_counts()",
"_____no_output_____"
],
[
"(18/144)*100",
"_____no_output_____"
]
],
[
[
"We can now begin building our models.",
"_____no_output_____"
],
[
"## One-class SVM\nThe best and most comprehensive explanation for this method is present in the [original paper](https://papers.nips.cc/paper/1999/file/8725fb777f25776ffa9076e44fcfd776-Paper.pdf) authored by researchers from Microsoft, Australian National University and University of London. The gist of it is that regular kernel SVM for classification cannot be used in cases for novelty detection, so minor modifications were made to find a function that is positive for regions with high density of points, and negative for small densities. We will now build and train our model. It is important to remember that this is an unsupervised method, meaing our labels have no use here. The main difference of one class SVM from the other methods is that it is looking for outliers according to the distribution in the feature space, rather than using an index or metric to quantify the anomalous behaviour of one observation with respect to the rest.",
"_____no_output_____"
]
],
[
[
"from sklearn.svm import OneClassSVM\n\nsvm = OneClassSVM()\n\nsvmpred = svm.fit_predict(X)",
"_____no_output_____"
]
],
[
[
"The `fit_predict()` function returns -1 for outliers and 1 for inliers, which is different from how are labels are assigned. So, we will modify the results and then calculate accuracy.",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import accuracy_score\nfrom sklearn.metrics import f1_score\n\nact_svmpred = np.where(svmpred == -1, 1, 0)\nprint(accuracy_score(y, act_svmpred))\nprint(f1_score(y, act_svmpred))",
"0.5625\n0.30769230769230765\n"
]
],
[
[
"We get an accuracy of 56.25% with our default model. According to the [Scikit-Learn documentation](https://scikit-learn.org/stable/modules/outlier_detection.html#overview-of-outlier-detection-methods), one-class SVM is very sensitive to outliers in our data which are not anomalies. This might indicate the low training accuracy as we do have a few observations in our dataset that may seem like outliers and hence should have `poi=1` but aren't anomalies per say. However, out F-score is very low. We can interpret this further by getting the confusion matrix",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import confusion_matrix\n\nconfusion_matrix(y,act_svmpred, labels=[0,1])",
"_____no_output_____"
]
],
[
[
"From drawing out the confusion matrix, we get a clearer picture of why our F-score is so low. Our model is very good at classifying our anomalies correctly but has a very high error in misclassifying many of normal observations as anomalies. This means we have good recall but very bad precision.",
"_____no_output_____"
],
[
"## Isolation Forest\nThe original paper can be found [here](https://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/icdm08b.pdf?q=isolation-forest). This is also an unsupervised algorithm that returns an anomaly score for each observation. As the name suggests, it is based on the random forests algorithm in terms of its working.",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import IsolationForest\n\nisof = IsolationForest()\n\nisofpred = isof.fit_predict(X)",
"_____no_output_____"
],
[
"act_isofpred = np.where(isofpred == -1, 1, 0)\nprint(accuracy_score(y, act_isofpred))\nprint(f1_score(y, act_isofpred))",
"0.8680555555555556\n0.3448275862068966\n"
]
],
[
[
"This is excellent accuracy for what is a very small dataset but very low F-score. Since our model is unsupervised, there is a small possibility that our model overfit the data.",
"_____no_output_____"
]
],
[
[
"confusion_matrix(y,act_isofpred, labels=[0,1])",
"_____no_output_____"
]
],
[
[
"Here, we can clearly see that our model has very bad recall, about 22% (4/18) causing the F-score to fall. However, it is very good at classifying a non-anomalous observation correctly. This indicates that when our model predicts a new observation to be normal, we can use that result with utmost trust as it was able to identify 120 of 126 normal training observations. However, if it predicts a new observation as an anomaly, we need to more information or need to look at other methods.",
"_____no_output_____"
],
[
"## Local Outlier Factor\nThe original paper can be found [here](https://www.dbs.ifi.lmu.de/Publikationen/Papers/LOF.pdf). LOF is based on a concept of a local density, where locality is given by K nearest neighbors, whose distance is used to estimate the density. By comparing the local density of an object to the local densities of its neighbors, one can identify regions of similar density, and points that have a substantially lower density than their neighbors. It shares a lot of similarties with the DBSCAN clsutering algorithm.",
"_____no_output_____"
]
],
[
[
"from sklearn.neighbors import LocalOutlierFactor\n\nlof = LocalOutlierFactor()\n\nlofpred = lof.fit_predict(X)",
"_____no_output_____"
],
[
"act_lofpred = np.where(lofpred == -1, 1, 0)\nprint(accuracy_score(y, act_lofpred))\nprint(f1_score(y, act_lofpred))",
"0.6527777777777778\n0.28571428571428575\n"
]
],
[
[
"We get an accuracy of 65%, which is higher than our SVM but lower than our forest but the same F-score as the Forest.",
"_____no_output_____"
]
],
[
[
"confusion_matrix(y,act_lofpred, labels=[0,1])",
"_____no_output_____"
]
],
[
[
"Just like the results above, our confusion matrix is an average of the above two methods. While SVM was good at classifying anomalies and Isolation Forest was good at classifying normal observations, LOF lies right between the two.",
"_____no_output_____"
],
[
"## Hyperparameter Tuning\nWe will now take our Isolation Forest model and try tuning it various parameters to get as good a score as possible using `GridSearchCV` cross-validation method. We will concentrating on the `n_estimators` (number of trees in the forest), `max_samples` (number of observations taken for training per tree), `max_features` (number of features taken for splitting per tree) and `bootstrap` (bootstrapping of the data). Two metrics will be calculated, F-score and Accuracy and the best estimator will be decided by the former as we already get excellent accuracy from a default Isolation Forest.",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import GridSearchCV\n\n\nclf = IsolationForest(random_state=0)\nparam_grid = {'n_estimators':[100,200,300], 'max_samples':[50,100,'auto'], 'max_features':[1,5,10,15], \n 'bootstrap':[True,False]}\n\ngrid_isof = GridSearchCV(clf, param_grid, scoring=['f1_micro','accuracy'], refit='f1_micro', cv=5)\ngrid_isof.fit(X, np.where(y==1, -1, 1))",
"_____no_output_____"
],
[
"grid_isof.best_estimator_",
"_____no_output_____"
]
],
[
[
"Here, we obtain our best model. Let us look at the accuracy and F-score to see our improvements.",
"_____no_output_____"
]
],
[
[
"grid_isof.best_index_",
"_____no_output_____"
],
[
"print(grid_isof.cv_results_['mean_test_accuracy'][grid_isof.best_index_])\nprint(grid_isof.cv_results_['mean_test_f1_micro'][grid_isof.best_index_])",
"0.874384236453202\n0.874384236453202\n"
]
],
[
[
"While we see only a small improvement in our accuracy, our F-score has greatly improved.",
"_____no_output_____"
],
[
"## Final Thoughts:\n* With the default models, Isolation Forest worked best in identifying normal observations whereas one class SVM worked best in identifying anomalies\n* LOF's performance was the average of the other two and did not provide any significant advantage.\n* Hyperparameter tuning goes a long way in imporving our model. We have done so for our Isolation Forest but the same can be replicated for SVM.\n* The main problem with this dataset is a very small number of observations. ML models generally tend to imporve in performance with an increase in data used for training, with overfitting bein prevented by using common methods.\n* Supervised Learning algorithms, especially Random Forests, might prove to perform better since our data is labelled but they require finer tuning since their algorithms are not designed for anomaly detection specifically.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
]
|
cb72bbfe3635a108c413c7143d1a48b57ec2b723 | 10,066 | ipynb | Jupyter Notebook | validation/MHR/Test_ExFT.ipynb | stianchris/GLHE | 80c3eecca81ffd50d5077f87027c9441292452f5 | [
"MIT"
]
| 2 | 2018-11-06T08:04:04.000Z | 2020-10-09T14:52:36.000Z | validation/MHR/Test_ExFT.ipynb | stianchris/GLHE | 80c3eecca81ffd50d5077f87027c9441292452f5 | [
"MIT"
]
| 68 | 2018-03-27T01:43:22.000Z | 2019-09-09T12:05:44.000Z | validation/MHR/Test_ExFT.ipynb | mitchute/GLHE | 80c3eecca81ffd50d5077f87027c9441292452f5 | [
"MIT"
]
| 4 | 2018-05-24T03:02:44.000Z | 2021-08-16T13:54:09.000Z | 23.909739 | 135 | 0.491556 | [
[
[
"import os\nimport sys\nimport json\nimport tempfile\nimport pandas as pd\nimport numpy as np\n\nimport datetime\n\nfrom CoolProp.CoolProp import PropsSI\nfrom math import exp, factorial, ceil\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\ncwd = os.getcwd()\nsys.path.append(os.path.normpath(os.path.join(cwd, '..', '..', '..', 'glhe')))\nsys.path.append(os.path.normpath(os.path.join(cwd, '..', '..', '..', 'standalone')))",
"_____no_output_____"
],
[
"plt.style.use('ggplot')\nplt.rcParams['figure.figsize'] = [15, 9]\nplt.rcParams['font.size'] = 14\n\npd.set_option('display.max_columns', None)\n# pd.set_option('display.max_rows', None)",
"_____no_output_____"
],
[
"df = pd.read_csv(\"out.csv\", index_col=0)",
"_____no_output_____"
],
[
"df.head(2)",
"_____no_output_____"
],
[
"start_time = datetime.datetime(month=1, day=1, year=2018, hour=0, minute=0, second=0)",
"_____no_output_____"
],
[
"l = df['Simulation Time'].tolist()",
"_____no_output_____"
],
[
"dt = [datetime.timedelta(seconds=x) for x in l]",
"_____no_output_____"
],
[
"df.set_index(pd.to_datetime([start_time + x for x in dt]), inplace=True)",
"_____no_output_____"
],
[
"df.plot(y=['GLHE Inlet Temperature [C]', 'GLHE Outlet Temperature [C]'])",
"_____no_output_____"
],
[
"dT = df['GLHE Inlet Temperature [C]'].diff()\ndt = df['GLHE Inlet Temperature [C]'].index.to_series().diff().dt.total_seconds()\n\ndf['dT_in/dt'] = dT/dt",
"_____no_output_____"
],
[
"df.plot(y='dT_in/dt')",
"_____no_output_____"
],
[
"df = df.loc['01-01-2018 02:50:00':'01-01-2018 03:30:00']",
"_____no_output_____"
],
[
"def hanby(time, vol_flow_rate, volume):\n \"\"\"\n Computes the non-dimensional response of a fluid conduit\n assuming well mixed nodes. The model accounts for the thermal\n capacity of the fluid and diffusive mixing.\n\n Hanby, V.I., J.A. Wright, D.W. Fetcher, D.N.T. Jones. 2002\n 'Modeling the dynamic response of conduits.' HVAC&R Research 8(1): 1-12.\n\n The model is non-dimensional, so input parameters should have consistent units\n for that are able to compute the non-dimensional time parameter, tau.\n\n :math \\tau = \\frac{\\dot{V} \\cdot t}{Vol}\n\n\n :param time: time of fluid response\n :param vol_flow_rate: volume flow rate\n :param volume: volume of fluid circuit\n :return:\n \"\"\"\n\n tau = vol_flow_rate * time / volume\n num_nodes = 20\n ret_sum = 1\n for i in range(1, num_nodes):\n ret_sum += (num_nodes * tau) ** i / factorial(i)\n\n return 1 - exp(-num_nodes * tau) * ret_sum",
"_____no_output_____"
],
[
"def hanby_c(time, vol_flow_rate, volume):\n return 1 - hanby(time, vol_flow_rate, volume)",
"_____no_output_____"
],
[
"delta_t = df['Simulation Time'][1] - df['Simulation Time'][0]\nflow = 0.0002\nvol = 0.05688",
"_____no_output_____"
],
[
"def calc_exft_correction_factors(timestep, flow_rate, volume):\n t_tr = volume / flow_rate\n time = np.arange(0, t_tr * 2, timestep)\n f = np.array([hanby(x, flow_rate, volume) for x in time])\n d = np.diff(f)\n r = np.diff(f) / sum(d)\n# r = np.append(np.zeros(ceil(t_tr/timestep)), r)\n if len(r) == 0:\n return np.ones(1)\n else:\n return r",
"_____no_output_____"
],
[
"calc_exft_correction_factors(120, flow, vol)",
"_____no_output_____"
],
[
"def update_exft_correction_factors(r):\n if len(r) == 1:\n return r\n elif r[0] == 1:\n return r\n else:\n pop_val = r[0]\n l = np.count_nonzero(r) - 1\n delta = pop_val / l\n for i, val in enumerate(r):\n if r[i] == 0:\n break\n else:\n r[i] += delta\n\n r = np.roll(r, -1)\n r[-1] = 0\n return r",
"_____no_output_____"
],
[
"cf_0 = calc_exft_correction_factors(delta_t, flow, vol)\ncf_0",
"_____no_output_____"
],
[
"cf_1 = update_exft_correction_factors(cf_0)\ncf_1",
"_____no_output_____"
],
[
"cf_2 = update_exft_correction_factors(cf_1)\ncf_2",
"_____no_output_____"
],
[
"cf_3 = update_exft_correction_factors(cf_2)\ncf_3",
"_____no_output_____"
],
[
"cf_4 = update_exft_correction_factors(cf_3)\ncf_4",
"_____no_output_____"
],
[
"def calc_exft(signal, to_correct):\n\n r = calc_exft_correction_factors(delta_t, flow, vol)\n# r = np.array(l)\n \n prev_temps = np.ones(len(r)) * to_correct[0]\n prev_signal = signal[0]\n \n dT_dt_prev = 0\n \n new_temps = np.empty([0])\n \n for i, t_sig in enumerate(signal):\n dT_dt = (t_sig - prev_signal) / delta_t\n# print(dT_dt, t_sig, prev_signal)\n \n if abs(dT_dt - dT_dt_prev) > 0.01:\n r = calc_exft_correction_factors(delta_t, flow, vol)\n# r = np.array(l)\n \n print(r)\n \n prev_temps[0] = to_correct[i]\n \n new_temp = sum(r * prev_temps)\n# print(to_correct[i], new_temp)\n \n new_temps = np.append(new_temps, new_temp)\n# print(new_temps)\n \n prev_temps = np.roll(prev_temps, 1)\n prev_temps[0] = new_temp\n \n r = update_exft_correction_factors(r)\n prev_sig = t_sig\n dT_dt_prev = dT_dt\n \n# if i == 10:\n# break\n# else:\n# print('\\n')\n return new_temps",
"_____no_output_____"
],
[
"t_c = calc_exft(df['GLHE Inlet Temperature [C]'], df['GLHE Outlet Temperature [C]'])\ndf['Corrected Temps'] = t_c",
"_____no_output_____"
],
[
"df.plot(y=['GLHE Inlet Temperature [C]', 'GLHE Outlet Temperature [C]', 'Corrected Temps', 'Average Fluid Temp [C]'], marker='X')",
"_____no_output_____"
],
[
"df.head(20)",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb72bd3eb940479877e5ecc4f52d3c47b90f5a1c | 245,817 | ipynb | Jupyter Notebook | classifier-ml.ipynb | saidsef/ml-classifier | d662f57c9ba955f386db7c5b8756c8e48b6872e0 | [
"MIT"
]
| 8 | 2018-10-09T01:15:16.000Z | 2022-02-13T22:03:01.000Z | classifier-ml.ipynb | saidsef/ml-classifier | d662f57c9ba955f386db7c5b8756c8e48b6872e0 | [
"MIT"
]
| 2 | 2020-06-16T11:01:09.000Z | 2020-10-24T17:05:27.000Z | classifier-ml.ipynb | saidsef/ml-classifier | d662f57c9ba955f386db7c5b8756c8e48b6872e0 | [
"MIT"
]
| null | null | null | 319.657997 | 218,009 | 0.910043 | [
[
[
"%load_ext autoreload\n%autoreload 2\n\nimport re\nimport glob\nimport lzma\nimport pickle\nimport pandas as pd\nimport numpy as np\nimport requests as r\nimport seaborn as sns\nimport warnings\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom joblib import hash\nfrom collections import Counter\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.neighbors import NearestCentroid\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.linear_model import RidgeClassifier, RidgeClassifierCV, PassiveAggressiveClassifier\n\nwarnings.simplefilter('ignore')\nmpl.style.use('ggplot')",
"_____no_output_____"
]
],
[
[
"## Source Data\n\nIF source data is missing run Elasticsearch query to extract data and then save it in JSON format to `data` directory",
"_____no_output_____"
]
],
[
[
"# news_json = r.get('http://locslhost:9200/indice/doc/_search?sort=date:desc&size=4000').json()\n# with open('./data/news.json', 'w', encoding='utf8') as fh:\n# dump(news_json['hits']['hits'], fh)",
"_____no_output_____"
],
[
"# df = pd.io.json.json_normalize(news_json['hits']['hits'])\n# df.to_json('./data/news.json')",
"_____no_output_____"
],
[
"df = pd.read_json('./data/news.json')",
"_____no_output_____"
]
],
[
[
"## Common issues that we generally face during the data preparation phase:\n - Format and structure normalization\n - Detect and fix missing values\n - Duplicates removal\n - Units normalization\n - Constraints validations\n - Anomaly detection and removal\n - Study of features importance/relevance\n - Dimentional reduction, feature selection & extraction",
"_____no_output_____"
]
],
[
[
"df = df[['_source.body', '_source.date', '_source.subject', '_source.language', '_source.categories']]\ndf.columns = ['body', 'pubdate', 'subject', 'language', 'categories']",
"_____no_output_____"
],
[
"df.drop_duplicates(inplace=True)\ndf.head(1).T.style",
"_____no_output_____"
],
[
"df = df.loc[(df['categories'] != 'News') & \n (df['categories'] != 'articles 2015') & \n (df['categories'] != 'frontpage') &\n (df['categories'] != 'English') &\n (df['categories'] != 'Comment') &\n (df['categories'] != 'Uncategorized') &\n (df['language'] == 'English')]",
"_____no_output_____"
],
[
"df['categories'] = df['categories'].str.replace(r'[^a-zA-Z_, ]+', '').replace(', ', '')\ndf['categories'] = df['categories'].str.replace(r'^, ', '')",
"_____no_output_____"
],
[
"df.groupby(['categories']).agg({'count'}).drop_duplicates()",
"_____no_output_____"
],
[
"df['cat_id'] = df['categories'].factorize()[0]\ndf['lang_id'] = df['language'].factorize()[0]\ndf['char_count'] = df['body'].apply(len)\ndf['word_count'] = df['body'].apply(lambda x: len(x.split()))\ndf['word_density'] = df['char_count'] / (df['word_count']+1)",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"sns.set()\nsns.pairplot(df, height=3.5, kind=\"reg\", palette=\"husl\", diag_kind=\"auto\")",
"_____no_output_____"
],
[
"\nxtrain, xtest, ytrain, ytest = train_test_split(df['body'], df['categories'], test_size=0.2, random_state=42)\n",
"_____no_output_____"
],
[
"\ntfidf = TfidfVectorizer(use_idf=False, sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english')\n",
"_____no_output_____"
],
[
"features = tfidf.fit_transform(df.body).toarray()\nlabels = df.cat_id",
"_____no_output_____"
],
[
"engines = [('PassiveAggressiveClassifier', PassiveAggressiveClassifier(fit_intercept=True, n_jobs=-1, random_state=0)),\n ('NearestCentroid', NearestCentroid()), \n ('RandomForestClassifier', RandomForestClassifier(min_samples_leaf=0.01))]",
"_____no_output_____"
],
[
"for name, engine in engines:\n clf = make_pipeline(tfidf, engine).fit(xtrain, ytrain)\n prediction = clf.predict(xtest)\n score = clf.score(xtest, prediction)\n with lzma.open('./data/{}.pickle.xz'.format(name.lower()), 'wb') as f:\n pickle.dump(clf, f, protocol=5)",
"_____no_output_____"
],
[
"s = '''\n\n‘Guys, you’ve got to hear this,” I said. I was sitting in front of my computer one day in July 2012, with one eye on a screen of share prices and the other on a live stream of the House of Commons Treasury select committee hearings. As the Barclays share price took a graceful swan dive, I pulled my headphones out of the socket and turned up the volume so everyone could hear. My colleagues left their terminals and came around to watch BBC Parliament with me.\n\nIt didn’t take long to realise what was happening. “Bob’s getting murdered,” someone said.\n\nBob Diamond, the swashbuckling chief executive of Barclays, had been called before the committee to explain exactly what his bank had been playing at in regards to the Libor rate-fixing scandal. The day before his appearance, he had made things very much worse by seeming to accuse the deputy governor of the Bank of England of ordering him to fiddle an important benchmark, then walking back the accusation as soon as it was challenged. He was trying to turn on his legendary charm in front of a committee of angry MPs, and it wasn’t working. On our trading floor, in Mayfair, calls were coming in from all over the City. Investors needed to know what was happening and whether the damage was reparable.\n\nA couple of weeks later, the damage was done. The money was gone, Diamond was out of a job and the market, as it always does, had moved on. We were left asking ourselves: How did we get it so wrong?\n\n'''",
"_____no_output_____"
],
[
"result = []\nfor file in glob.glob('./data/*.pickle.xz'):\n clf = pickle.load(lzma.open('{}'.format(file), 'rb'))\n ypred = clf.predict([s])\n score = clf.score([s], ypred)\n print(file, ypred[0], score)\n result.append(ypred[0])\n\nprint(pd.io.json.dumps(Counter(result), indent=4))",
"./data/nearestcentroid.pickle Opinion 1.0\n./data/passiveaggressiveclassifier.pickle Opinion 1.0\n./data/randomforestclassifier.pickle Opinion 1.0\n./data/ridgeclassifier.pickle Opinion 1.0\n./data/ridgeclassifiercv.pickle Opinion 1.0\n{\n \"Opinion\":5\n}\n"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb72d1eebcdc0bc4cc9a2bb80c44fe7406b7ade9 | 16,358 | ipynb | Jupyter Notebook | .vscode-insiders/extensions/ms-toolsai.jupyter-2020.12.414227025/pythonFiles/Notebooks intro.ipynb | Guitaraholic/dotfiles | 9c43707434a724a2e5c7799f917f84573e93e0ec | [
"MIT"
]
| 1 | 2020-08-07T16:09:57.000Z | 2020-08-07T16:09:57.000Z | .vscode-insiders/extensions/ms-toolsai.jupyter-2020.12.414227025/pythonFiles/Notebooks intro.ipynb | Guitaraholic/dotfiles | 9c43707434a724a2e5c7799f917f84573e93e0ec | [
"MIT"
]
| null | null | null | .vscode-insiders/extensions/ms-toolsai.jupyter-2020.12.414227025/pythonFiles/Notebooks intro.ipynb | Guitaraholic/dotfiles | 9c43707434a724a2e5c7799f917f84573e93e0ec | [
"MIT"
]
| null | null | null | 80.186275 | 4,295 | 0.61933 | [
[
[
"empty"
]
]
]
| [
"empty"
]
| [
[
"empty"
]
]
|
cb72ee4bf0775421d61a359553c52840f4c52727 | 16,593 | ipynb | Jupyter Notebook | notebooks/old/Build CNN RNN Model for Seizure Detection.ipynb | adam2392/dnn-unsupervised | 445b472b1e239f7dee15cb97c6351c86ec84e5e6 | [
"Apache-2.0"
]
| null | null | null | notebooks/old/Build CNN RNN Model for Seizure Detection.ipynb | adam2392/dnn-unsupervised | 445b472b1e239f7dee15cb97c6351c86ec84e5e6 | [
"Apache-2.0"
]
| 6 | 2020-01-28T22:17:43.000Z | 2021-02-02T21:43:36.000Z | notebooks/old/Build CNN RNN Model for Seizure Detection.ipynb | adam2392/dnn-unsupervised | 445b472b1e239f7dee15cb97c6351c86ec84e5e6 | [
"Apache-2.0"
]
| 1 | 2020-12-19T17:34:03.000Z | 2020-12-19T17:34:03.000Z | 32.663386 | 226 | 0.501537 | [
[
[
"import time\n\nimport numpy as np\nnp.random.seed(1234)\n\nfrom functools import reduce\n\nimport scipy.io\nfrom scipy.interpolate import griddata\nfrom sklearn.preprocessing import scale\n# from utils import augment_EEG, cart2sph, pol2cart\n\n######### import DNN for training using GPUs #########\nfrom keras.utils.training_utils import multi_gpu_model\n\n######### import DNN frameworks #########\nimport tensorflow as tf\nimport keras\n\n# import high level optimizers, models and layers\nfrom keras.optimizers import SGD\nfrom keras.models import Sequential\nfrom keras.layers import InputLayer\n\n# for CNN\nfrom keras.layers import Conv2D, MaxPooling2D\n# for RNN\nfrom keras.layers import LSTM\n\n# for different layer functionality\nfrom keras.layers import Dense, Dropout, Flatten\n\n# utility functionality for keras\nfrom keras.preprocessing import sequence\nfrom keras.layers.embeddings import Embedding\n\n# from keras import backend as K",
"Using TensorFlow backend.\n"
]
],
[
[
"# 1. Import in Data Necessary\nHere, we can import the MNIST, or IMDB dataset for proof-of-concept. We also provide code for importing iEEG recording data, and how to transform them into input that can be provided to the DNN models built in section 2.",
"_____no_output_____"
]
],
[
[
"from keras.datasets import imdb\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\nprint len(mnist)\nprint type(mnist)",
"_____no_output_____"
],
[
"top_words = 5000\n(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)",
"_____no_output_____"
],
[
"# import raw data\n\n# perform signal processing - FFT\n\n",
"_____no_output_____"
],
[
"# save data\n",
"_____no_output_____"
],
[
"# load back in data and augment dataset\n",
"_____no_output_____"
]
],
[
[
"# 2. Preprocess Data\nHere, we preprocess data by producing the final set of images needed to input into the DNN model.\n\nWe first augment the dataset by applying transformations that the model will be invariant to (e.g. rotation, translation, etc.). \n\nThen we will mesh the data to fill in any missing data.",
"_____no_output_____"
],
[
"# 3. Build DNN Model\nHere, we build the DNN model that will need to be trained. It will consist of a CNN-RNN model that has a VGG style CNN model with LSTM used for the RNN.\n\nThese will be capable of efficiently learning spatial patterns in the heatmaps fed in, and also capable of learning complex timing behavior from the recurrent neural network.",
"_____no_output_____"
]
],
[
[
"from ieeg_cnn_rnn import IEEGdnn",
"_____no_output_____"
],
[
"imsize=32 # the imsize dimension\nn_colors=4 # the number of frequency bands we use can correpond\n\n###### CNN Parameters #######\nn_layers = (4,2,1) # the number of layers of convolution\npoolsize=(2,2) # the size of the pooling done in 2D\nn_outunits = 2 # the size of the output of the model (# classes)\nn_fcunits = 1024 # the size of the fully connected layer at output\n\n##### Optimizer Parameters #######\nloss='categorical_crossentropy'\nADAM = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\nmetrics = ['accuracy']\n\n# initialize the ieeg dnn model\nieegdnn = IEEGdnn(imsize, n_colors)\nieegdnn.build_cnn(w_init=None, n_layers=n_layers,poolsize=(2,2))\nieegdnn.build_output(n_outunits=n_outunits, n_fcunits=n_fcunits)\n\nprint ieegdnn.model.output\n\n# ieegdnn.compile_model(loss=loss, optimizer=ADAM, metrics=metrics)",
"Tensor(\"dense_2/Softmax:0\", shape=(?, 2), dtype=float32)\n"
],
[
"display(ieegdnn.model_config)",
"_____no_output_____"
],
[
"####### RNN Parameters ######\nnum_units = 128\ngrad_clipping = 110\nnonlinearity = keras.activations.tanh\n\nieegdnn.build_rnn(num_units=num_units, grad_clipping=grad_clipping, nonlinearity=nonlinearity)\n\n",
"_____no_output_____"
]
],
[
[
"# 4. Train Model and Test\nHere, we run the training on gpu(s) and document the entire training time, and visualize the output produced.",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
]
|
cb730b4579aa385a64cd2c92351e092a9313b31d | 22,876 | ipynb | Jupyter Notebook | 6+7_julia_and_jump/.ipynb_checkpoints/Automatic differentiation plus nonlinear in JuMP- complete-checkpoint.ipynb | adelarue/cos_2021 | 672f32637bd528b700e0e0ba099c17d4b3c26341 | [
"MIT"
]
| 5 | 2020-11-14T16:12:49.000Z | 2022-01-03T16:57:02.000Z | 6+7_julia_and_jump/.ipynb_checkpoints/Automatic differentiation plus nonlinear in JuMP- complete-checkpoint.ipynb | adelarue/cos_2021 | 672f32637bd528b700e0e0ba099c17d4b3c26341 | [
"MIT"
]
| null | null | null | 6+7_julia_and_jump/.ipynb_checkpoints/Automatic differentiation plus nonlinear in JuMP- complete-checkpoint.ipynb | adelarue/cos_2021 | 672f32637bd528b700e0e0ba099c17d4b3c26341 | [
"MIT"
]
| 1 | 2021-01-28T01:35:27.000Z | 2021-01-28T01:35:27.000Z | 27.233333 | 549 | 0.524436 | [
[
[
"empty"
]
]
]
| [
"empty"
]
| [
[
"empty"
]
]
|
cb730f714cc7fe5ceb2a32f73476240c652b60ac | 27,053 | ipynb | Jupyter Notebook | .ipynb_checkpoints/1main_time_series-v5-checkpoint.ipynb | danhtaihoang/categorical-variables | 6993315bc9664496896883262ff5bf6125e8d0d0 | [
"MIT"
]
| null | null | null | .ipynb_checkpoints/1main_time_series-v5-checkpoint.ipynb | danhtaihoang/categorical-variables | 6993315bc9664496896883262ff5bf6125e8d0d0 | [
"MIT"
]
| null | null | null | .ipynb_checkpoints/1main_time_series-v5-checkpoint.ipynb | danhtaihoang/categorical-variables | 6993315bc9664496896883262ff5bf6125e8d0d0 | [
"MIT"
]
| null | null | null | 68.488608 | 16,976 | 0.75378 | [
[
[
"# 2018.10.27: Multiple states: Time series\n## incremental update",
"_____no_output_____"
]
],
[
[
"import sys,os\nimport numpy as np\nfrom scipy import linalg\nfrom sklearn.preprocessing import OneHotEncoder\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"# setting parameter:\nnp.random.seed(1)\n\nn = 10 # number of positions\nm = 3 # number of values at each position\nl = 2*((n*m)**2) # number of samples\n\ng = 1.",
"_____no_output_____"
],
[
"def itab(n,m): \n i1 = np.zeros(n)\n i2 = np.zeros(n)\n for i in range(n):\n i1[i] = i*m\n i2[i] = (i+1)*m\n\n return i1.astype(int),i2.astype(int)\n\ni1tab,i2tab = itab(n,m)",
"_____no_output_____"
],
[
"# generate coupling matrix w0:\ndef generate_coupling(n,m,g):\n nm = n*m\n w = np.random.normal(0.0,g/np.sqrt(nm),size=(nm,nm))\n \n for i in range(n):\n i1,i2 = i1tab[i],i2tab[i]\n w[i1:i2,:] -= w[i1:i2,:].mean(axis=0) \n\n for i in range(n):\n i1,i2 = i1tab[i],i2tab[i]\n w[:,i1:i2] -= w[:,i1:i2].mean(axis=1)[:,np.newaxis] \n \n return w",
"_____no_output_____"
],
[
"w0 = generate_coupling(n,m,g)\n\"\"\"\nplt.figure(figsize=(3,3))\nplt.title('actual coupling matrix')\nplt.imshow(w0,cmap='rainbow',origin='lower')\nplt.xlabel('j')\nplt.ylabel('i')\nplt.clim(-0.3,0.3)\nplt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.3,0,0.3])\nplt.show()\n\"\"\"",
"_____no_output_____"
],
[
"# 2018.10.27: generate time series by MCMC\ndef generate_sequences_MCMC(w,n,m,l): \n #print(i1tab,i2tab)\n \n # initial s (categorical variables)\n s_ini = np.random.randint(0,m,size=(l,n)) # integer values\n #print(s_ini)\n\n # onehot encoder \n enc = OneHotEncoder(n_values=m)\n s = enc.fit_transform(s_ini).toarray()\n #print(s) \n\n ntrial = 100\n\n for t in range(l-1):\n h = np.sum(s[t,:]*w[:,:],axis=1)\n for i in range(n):\n i1,i2 = i1tab[i],i2tab[i]\n \n k = np.random.randint(0,m) \n for itrial in range(ntrial): \n k2 = np.random.randint(0,m) \n while k2 == k:\n k2 = np.random.randint(0,m)\n \n if np.exp(h[i1+k2]- h[i1+k]) > np.random.rand():\n k = k2\n \n s[t+1,i1:i2] = 0.\n s[t+1,i1+k] = 1.\n \n return s ",
"_____no_output_____"
],
[
"s = generate_sequences_MCMC(w0,n,m,l) ",
"_____no_output_____"
],
[
"#print(s[:5])",
"_____no_output_____"
],
[
"def fit_increment1(s,n,m):\n l = s.shape[0]\n \n s_av = np.mean(s[:-1],axis=0)\n ds = s[:-1] - s_av\n c = np.cov(ds,rowvar=False,bias=True)\n #print(c)\n\n c_inv = linalg.pinv(c,rcond=1e-15)\n #print(c_inv)\n\n nm = n*m\n wini = np.random.normal(0.0,g/np.sqrt(nm),size=(nm,nm))\n #print(w)\n\n nloop = 100\n w_infer = np.zeros((nm,nm))\n\n for i in range(n):\n #print(i)\n i1,i2 = i1tab[i],i2tab[i]\n #s1 = np.copy(s[1:,i1:i2])\n\n w = wini[i1:i2,:]\n h = s[1:,i1:i2]\n for iloop in range(nloop):\n h_av = h.mean(axis=0)\n dh = h - h_av\n\n dhds = dh[:,:,np.newaxis]*ds[:,np.newaxis,:]\n dhds_av = dhds.mean(axis=0)\n\n w = np.dot(dhds_av,c_inv)\n \n #w = w - w.mean(axis=0) \n\n h = np.dot(s[:-1],w.T)\n\n p = np.exp(h)\n p_sum = p.sum(axis=1)\n\n for k in range(m):\n p[:,k] = p[:,k]/p_sum[:]\n\n h += s[1:,i1:i2] - p\n\n w_infer[i1:i2,:] = w \n return w_infer",
"_____no_output_____"
],
[
"def fit_increment2(s,n,m):\n l = s.shape[0]\n\n s_av = np.mean(s[:-1],axis=0)\n ds = s[:-1] - s_av\n c = np.cov(ds,rowvar=False,bias=True)\n #print(c)\n\n c_inv = linalg.pinv(c,rcond=1e-15)\n #print(c_inv)\n\n nm = n*m\n wini = np.random.normal(0.0,g/np.sqrt(nm),size=(nm,nm))\n #print(w)\n\n nloop = 10\n w_infer = np.zeros((nm,nm))\n\n p_obs = np.zeros(l-1)\n\n for i in range(n):\n #print(i)\n i1,i2 = i1tab[i],i2tab[i]\n #s1 = np.copy(s[1:,i1:i2])\n\n iobs = np.argmax(s[1:,i1:i2],axis=1)\n\n w = wini[i1:i2,:].copy()\n #h = np.dot(s[:-1,:],w.T)\n h = (s[1:,i1:i2]).copy()\n for iloop in range(nloop):\n #h = np.dot(s[:-1],w.T)\n\n p = np.exp(h)\n p_sum = p.sum(axis=1)\n\n for k in range(m):\n p[:,k] = p[:,k]/p_sum[:]\n\n for t in range(l-1): \n p_obs[t] = p[t,iobs[t]]\n\n #mse = ((w0[i1:i2,:]-w)**2).mean() \n #cost = ((1.-p_obs)**2).mean() \n #print(iloop,mse,cost) \n\n # update h: multiplicative\n #for k in range(m): \n # h[:,k] *= 1./p_obs[:] \n\n # update h: incremental \n h += s[1:,i1:i2] - p \n\n h_av = h.mean(axis=0)\n dh = h - h_av\n\n dhds = dh[:,:,np.newaxis]*ds[:,np.newaxis,:]\n dhds_av = dhds.mean(axis=0)\n\n w = np.dot(dhds_av,c_inv)\n\n h = np.dot(s[:-1],w.T)\n\n #w = w - w.mean(axis=0) \n\n w_infer[i1:i2,:] = w\n \n return w_infer\n#plt.scatter(w0,w_infer)\n#plt.plot([-0.3,0.3],[-0.3,0.3],'r--') ",
"_____no_output_____"
],
[
"w = fit_increment1(s,n,m)",
"_____no_output_____"
],
[
"plt.scatter(w0,w)\nplt.plot([-0.3,0.3],[-0.3,0.3],'r--')",
"_____no_output_____"
],
[
"mse = ((w0-w)**2).mean()\nslope = (w0*w).sum()/(w0**2).sum()\n\nprint(mse,slope)",
"(0.0027510545633005483, 0.9928134300469619)\n"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb73100d216b97041e6a74754a8c689b351aa05a | 374,768 | ipynb | Jupyter Notebook | ATM analysis/ATManalysis.ipynb | vandal-dev/DataLibrary | 15a286d46adcb1acec029bd2a2cc5b78c085384b | [
"Apache-2.0"
]
| 1 | 2021-08-15T14:52:49.000Z | 2021-08-15T14:52:49.000Z | ATM analysis/ATManalysis.ipynb | duality-py/DataLibrary | 5d772ac66948d7922556fe8432d637cfcae750af | [
"Apache-2.0"
]
| null | null | null | ATM analysis/ATManalysis.ipynb | duality-py/DataLibrary | 5d772ac66948d7922556fe8432d637cfcae750af | [
"Apache-2.0"
]
| 2 | 2021-12-01T13:57:22.000Z | 2021-12-01T14:00:59.000Z | 142.551541 | 87,737 | 0.844448 | [
[
[
"import pandas as pd\r\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"df = pd.read_excel(r'C:\\Users\\kundi\\Moji_radovi\\MVanalysis\\datasetup\\MV_DataFrame.xlsx')",
"_____no_output_____"
],
[
"df['Sat'] = df['Uplaćeno'].astype(str).str.slice(-8,-6)\r\ndf['Datum'] = df['Uplaćeno'].astype(str).str.slice(-19,-13)",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2839 entries, 0 to 2838\nData columns (total 6 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Partner 2839 non-null object \n 1 Opis 1216 non-null object \n 2 Uplata 2839 non-null float64\n 3 Uplaćeno 2839 non-null object \n 4 Sat 2839 non-null object \n 5 Datum 2839 non-null object \ndtypes: float64(1), object(5)\nmemory usage: 133.2+ KB\n"
],
[
"df",
"_____no_output_____"
],
[
"df.drop(columns = ['Uplaćeno'], inplace = True)",
"_____no_output_____"
],
[
"akontacije = df.loc[df['Opis'] == 'Akontacija platomat']\r\nuplate = df.loc[df['Opis'] != 'Akontacija platomat']",
"_____no_output_____"
],
[
"akontacije",
"_____no_output_____"
],
[
"uplate",
"_____no_output_____"
],
[
"akontacije.describe()",
"_____no_output_____"
],
[
"uplate.describe()",
"_____no_output_____"
],
[
"veće_od_50lp = akontacije.loc[akontacije['Uplata'] > 0.51]",
"_____no_output_____"
],
[
"veće_od_50lp",
"_____no_output_____"
],
[
"print('Broj nesukladnosti je:', int(veće_od_50lp['Partner'].count()))\r\nprint('Broj akontacija je:', int(akontacije['Partner'].count()))\r\nnesukladnosti = int(veće_od_50lp['Partner'].count()) / int(akontacije['Partner'].count())\r\nprint('Postotak detektiranih nesukladnosti je: ', (nesukladnosti * 100), '%')",
"Broj nesukladnosti je: 18\nBroj akontacija je: 1216\nPostotak detektiranih nesukladnosti je: 1.4802631578947367 %\n"
],
[
"plt.boxplot(akontacije['Uplata'])\r\nplt.grid()",
"_____no_output_____"
],
[
"plt.boxplot(uplate['Uplata'])\r\nplt.grid()",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"akontacije",
"_____no_output_____"
],
[
"dani = df['Datum'].unique()\r\ndani = sorted(dani, key = lambda x: x.split('.')[1])",
"_____no_output_____"
],
[
"dani",
"_____no_output_____"
],
[
"uplate_po_danu = uplate.groupby('Datum').sum()",
"_____no_output_____"
],
[
"uplate_po_danu",
"_____no_output_____"
],
[
"akontacije_po_danu = akontacije.groupby('Datum').sum()",
"_____no_output_____"
],
[
"akontacije_po_danu",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(25,5))\r\nfig.suptitle('Uplaćeni iznosi kroz vrijeme', fontsize = 20, weight = 'bold')\r\nplt.plot(df['Uplata'])\r\nplt.axhline(y = 122.11, color = 'k', linestyle = 'solid')\r\nplt.xlabel('Broj uplata', fontsize = 12, weight = 'semibold')\r\nplt.ylabel('Iznos uplata', fontsize = 12, weight = 'semibold')\r\nplt.grid()\r\nplt.show()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(25,5))\r\nfig.suptitle('Histogram vrijednosti uplata', fontsize = 20, weight = 'bold')\r\nplt.hist(df['Uplata'], bins=10, ec = 'm')\r\nplt.xlabel('Iznos uplate', fontsize = 12, weight = 'semibold')\r\nplt.ylabel('Broj uplata', fontsize = 12, weight = 'semibold')\r\nplt.grid()\r\nplt.show()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(25,5))\r\nfig.suptitle('Pojavljivanje nesukladnosti (Vrijednosti iznad iscrtkane linije)', fontsize = 20, weight = 'bold')\r\nplt.plot(akontacije['Uplata'])\r\nplt.axhline(y = 0.51, color = 'k', linestyle = 'dashed')\r\nplt.xlabel('Broj uplata', fontsize = 12, weight = 'semibold')\r\nplt.ylabel('Iznos akontacije', fontsize = 12, weight = 'semibold')\r\nplt.grid()\r\nplt.show()",
"_____no_output_____"
],
[
"uplate.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 1623 entries, 1 to 2837\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Partner 1623 non-null object \n 1 Opis 0 non-null object \n 2 Uplata 1623 non-null float64\n 3 Sat 1623 non-null object \n 4 Datum 1623 non-null object \ndtypes: float64(1), object(4)\nmemory usage: 76.1+ KB\n"
],
[
"sati = df['Sat'].unique()\r\nsati.sort()",
"_____no_output_____"
],
[
"uplate_po_satu = uplate.groupby('Sat').sum()",
"_____no_output_____"
],
[
"uplate_po_satu",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(25,5))\r\nfig.suptitle('Uplate po satima', fontsize = 20, weight = 'bold')\r\nplt.bar(sati, uplate_po_satu['Uplata'])\r\nplt.xlabel('Sati', fontsize = 12, weight = 'semibold')\r\nplt.ylabel('Suma uplata', fontsize = 12, weight = 'semibold')\r\nplt.grid()\r\nplt.show()",
"_____no_output_____"
],
[
"akontacije_po_satu = akontacije.groupby('Sat').sum()\r\nakontacije_po_satu_zbroj = akontacije.groupby('Sat').count()",
"_____no_output_____"
],
[
"akontacije_po_satu",
"_____no_output_____"
],
[
"akontacije_po_satu_zbroj",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(25,5))\r\nfig.suptitle('Akontacije po satima u kn', fontsize = 20, weight = 'bold')\r\nplt.bar(sati, akontacije_po_satu['Uplata'])\r\nplt.xlabel('Sati', fontsize = 12, weight = 'semibold')\r\nplt.ylabel('Suma akontacija', fontsize = 12, weight = 'semibold')\r\nplt.grid()\r\nplt.show()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(25,5))\r\nfig.suptitle('Broj izvršenih akontacija po satima', fontsize = 20, weight = 'bold')\r\nplt.bar(sati, akontacije_po_satu_zbroj['Uplata'])\r\nplt.xlabel('Sati', fontsize = 12, weight = 'semibold')\r\nplt.ylabel('Broj akontacija', fontsize = 12, weight = 'semibold')\r\nplt.grid()\r\nplt.show()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(25,5))\r\nfig.suptitle('Vrijednost akontacija po danima u kn', fontsize = 20, weight = 'bold')\r\nplt.bar(dani, akontacije_po_danu['Uplata'])\r\nplt.xlabel('Dani', fontsize = 12, weight = 'semibold')\r\nplt.ylabel('Suma akontacija', fontsize = 12, weight = 'semibold')\r\nplt.grid()\r\nplt.show()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(25,5))\r\nfig.suptitle('Vrijednost uplata po danima u kn', fontsize = 20, weight = 'bold')\r\nplt.bar(dani, uplate_po_danu['Uplata'])\r\nplt.xlabel('Dani', fontsize = 12, weight = 'semibold')\r\nplt.ylabel('Suma uplata', fontsize = 12, weight = 'semibold')\r\nplt.grid()\r\nplt.show()",
"_____no_output_____"
],
[
"veće_od_50lp_po_danu_zbroj = veće_od_50lp.groupby('Datum').sum()\r\nveće_od_50lp_po_danu = veće_od_50lp.groupby('Datum').count()",
"_____no_output_____"
],
[
"veće_od_50lp_po_danu",
"_____no_output_____"
],
[
"veće_od_50lp_po_danu_zbroj",
"_____no_output_____"
],
[
"dani_nesukladnosti = veće_od_50lp['Datum'].unique()\r\ndani_nesukladnosti.sort()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(25,5))\r\nfig.suptitle('Izravan trošak nesukladnosti po danu u kn', fontsize = 20, weight = 'bold')\r\nplt.bar(dani_nesukladnosti , veće_od_50lp_po_danu_zbroj['Uplata'])\r\nplt.xlabel('Dani', fontsize = 12, weight = 'semibold')\r\nplt.ylabel('Trošak nesukladnosti', fontsize = 12, weight = 'semibold')\r\nplt.grid()\r\nplt.show()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(25,5))\r\nfig.suptitle('Broj nesukladnosti po danu u mjesecu', fontsize = 20, weight = 'bold')\r\nplt.bar(dani_nesukladnosti, veće_od_50lp_po_danu['Uplata'])\r\nplt.xlabel('Dani', fontsize = 12, weight = 'semibold')\r\nplt.ylabel('Broj nesukladnosti', fontsize = 12, weight = 'semibold')\r\nplt.grid()\r\nplt.show()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb731180bb59e61e8509fdd0715ac42a19c76eea | 410,816 | ipynb | Jupyter Notebook | src/53_Create_Submission_30_extra_tree_regressor.ipynb | fkubota/kaggle-Predicting-Molecular-Properties | ceaf401a2bfab10a3314f3122b12cf07b7c6bf2c | [
"MIT"
]
| null | null | null | src/53_Create_Submission_30_extra_tree_regressor.ipynb | fkubota/kaggle-Predicting-Molecular-Properties | ceaf401a2bfab10a3314f3122b12cf07b7c6bf2c | [
"MIT"
]
| null | null | null | src/53_Create_Submission_30_extra_tree_regressor.ipynb | fkubota/kaggle-Predicting-Molecular-Properties | ceaf401a2bfab10a3314f3122b12cf07b7c6bf2c | [
"MIT"
]
| 2 | 2020-09-26T08:38:36.000Z | 2021-01-10T10:56:57.000Z | 170.321725 | 51,956 | 0.86198 | [
[
[
"# Introduction\n- nb45の編集\n- nb50 の結果を参考にExtraTreesRegressor回帰を行う",
"_____no_output_____"
],
[
"# Import everything I need :)",
"_____no_output_____"
]
],
[
[
"import warnings\nwarnings.filterwarnings('ignore')\nimport time\nimport multiprocessing\nimport glob\nimport gc\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nfrom plotly.offline import init_notebook_mode, iplot\nimport plotly.graph_objs as go\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.ensemble import ExtraTreesRegressor, AdaBoostRegressor, RandomForestRegressor\nfrom fastprogress import progress_bar",
"_____no_output_____"
]
],
[
[
"# Preparation",
"_____no_output_____"
]
],
[
[
"nb = 53\nisSmallSet = False\nlength = 200000\nmodel_name = 'extra_trees_regressor'",
"_____no_output_____"
],
[
"pd.set_option('display.max_columns', 200)",
"_____no_output_____"
],
[
"# use atomic numbers to recode atomic names\nATOMIC_NUMBERS = {\n 'H': 1,\n 'C': 6,\n 'N': 7,\n 'O': 8,\n 'F': 9\n}",
"_____no_output_____"
],
[
"file_path = '../input/champs-scalar-coupling/'\nglob.glob(file_path + '*')",
"_____no_output_____"
],
[
"# train\npath = file_path + 'train.csv'\nif isSmallSet:\n train = pd.read_csv(path) [:length]\nelse:\n train = pd.read_csv(path)",
"_____no_output_____"
],
[
"# test\npath = file_path + 'test.csv'\nif isSmallSet:\n test = pd.read_csv(path)[:length]\nelse:\n test = pd.read_csv(path)",
"_____no_output_____"
],
[
"# structure\npath = file_path + 'structures.csv'\nstructures = pd.read_csv(path)",
"_____no_output_____"
],
[
"# fc_train\npath = file_path + 'nb47_fc_train.csv'\nif isSmallSet:\n fc_train = pd.read_csv(path)[:length]\nelse:\n fc_train = pd.read_csv(path)",
"_____no_output_____"
],
[
"# fc_test\npath = file_path + 'nb47_fc_test.csv'\nif isSmallSet:\n fc_test = pd.read_csv(path)[:length]\nelse:\n fc_test = pd.read_csv(path)",
"_____no_output_____"
],
[
"# train dist-interact\npath = file_path + 'nb33_train_dist-interaction.csv'\nif isSmallSet:\n dist_interact_train = pd.read_csv(path)[:length]\nelse:\n dist_interact_train = pd.read_csv(path)",
"_____no_output_____"
],
[
"# test dist-interact\npath = file_path + 'nb33_test_dist-interaction.csv'\nif isSmallSet:\n dist_interact_test = pd.read_csv(path)[:length]\nelse:\n dist_interact_test = pd.read_csv(path)",
"_____no_output_____"
],
[
"# ob charge train\npath = file_path + 'train_ob_charges_V7EstimatioofMullikenChargeswithOpenBabel.csv'\nif isSmallSet:\n ob_charge_train = pd.read_csv(path)[:length].drop(['Unnamed: 0', 'error'], axis=1)\nelse:\n ob_charge_train = pd.read_csv(path).drop(['Unnamed: 0', 'error'], axis=1)",
"_____no_output_____"
],
[
"# ob charge test\npath = file_path + 'test_ob_charges_V7EstimatioofMullikenChargeswithOpenBabel.csv'\nif isSmallSet:\n ob_charge_test = pd.read_csv(path)[:length].drop(['Unnamed: 0', 'error'], axis=1)\nelse:\n ob_charge_test = pd.read_csv(path).drop(['Unnamed: 0', 'error'], axis=1)",
"_____no_output_____"
],
[
"len(test), len(fc_test)",
"_____no_output_____"
],
[
"len(train), len(fc_train)",
"_____no_output_____"
],
[
"if isSmallSet:\n print('using SmallSet !!')\n print('-------------------')\n\nprint(f'There are {train.shape[0]} rows in train data.')\nprint(f'There are {test.shape[0]} rows in test data.')\n\nprint(f\"There are {train['molecule_name'].nunique()} distinct molecules in train data.\")\nprint(f\"There are {test['molecule_name'].nunique()} distinct molecules in test data.\")\nprint(f\"There are {train['atom_index_0'].nunique()} unique atoms.\")\nprint(f\"There are {train['type'].nunique()} unique types.\")",
"There are 4658147 rows in train data.\nThere are 2505542 rows in test data.\nThere are 85003 distinct molecules in train data.\nThere are 45772 distinct molecules in test data.\nThere are 29 unique atoms.\nThere are 8 unique types.\n"
]
],
[
[
"---\n## myFunc\n**metrics**",
"_____no_output_____"
]
],
[
[
"def kaggle_metric(df, preds):\n df[\"prediction\"] = preds\n maes = []\n for t in df.type.unique():\n y_true = df[df.type==t].scalar_coupling_constant.values\n y_pred = df[df.type==t].prediction.values\n mae = np.log(mean_absolute_error(y_true, y_pred))\n maes.append(mae)\n return np.mean(maes)",
"_____no_output_____"
]
],
[
[
"---\n**momory**",
"_____no_output_____"
]
],
[
[
"def reduce_mem_usage(df, verbose=True):\n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n start_mem = df.memory_usage().sum() / 1024**2\n for col in df.columns:\n col_type = df[col].dtypes\n if col_type in numerics:\n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == 'int':\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64)\n else:\n c_prec = df[col].apply(lambda x: np.finfo(x).precision).max()\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max and c_prec == np.finfo(np.float16).precision:\n df[col] = df[col].astype(np.float16)\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max and c_prec == np.finfo(np.float32).precision:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64)\n end_mem = df.memory_usage().sum() / 1024**2\n if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))\n return df",
"_____no_output_____"
]
],
[
[
"# Feature Engineering",
"_____no_output_____"
],
[
"Build Distance Dataset",
"_____no_output_____"
]
],
[
[
"def build_type_dataframes(base, structures, coupling_type):\n base = base[base['type'] == coupling_type].drop('type', axis=1).copy()\n base = base.reset_index()\n base['id'] = base['id'].astype('int32')\n structures = structures[structures['molecule_name'].isin(base['molecule_name'])]\n return base, structures\n\n# a,b = build_type_dataframes(train, structures, '1JHN')",
"_____no_output_____"
],
[
"def add_coordinates(base, structures, index):\n df = pd.merge(base, structures, how='inner',\n left_on=['molecule_name', f'atom_index_{index}'],\n right_on=['molecule_name', 'atom_index']).drop(['atom_index'], axis=1)\n df = df.rename(columns={\n 'atom': f'atom_{index}',\n 'x': f'x_{index}',\n 'y': f'y_{index}',\n 'z': f'z_{index}'\n })\n return df",
"_____no_output_____"
],
[
"def add_atoms(base, atoms):\n df = pd.merge(base, atoms, how='inner',\n on=['molecule_name', 'atom_index_0', 'atom_index_1'])\n return df",
"_____no_output_____"
],
[
"def merge_all_atoms(base, structures):\n df = pd.merge(base, structures, how='left',\n left_on=['molecule_name'],\n right_on=['molecule_name'])\n df = df[(df.atom_index_0 != df.atom_index) & (df.atom_index_1 != df.atom_index)]\n return df",
"_____no_output_____"
],
[
"def add_center(df):\n df['x_c'] = ((df['x_1'] + df['x_0']) * np.float32(0.5))\n df['y_c'] = ((df['y_1'] + df['y_0']) * np.float32(0.5))\n df['z_c'] = ((df['z_1'] + df['z_0']) * np.float32(0.5))\n\ndef add_distance_to_center(df):\n df['d_c'] = ((\n (df['x_c'] - df['x'])**np.float32(2) +\n (df['y_c'] - df['y'])**np.float32(2) + \n (df['z_c'] - df['z'])**np.float32(2)\n )**np.float32(0.5))\n\ndef add_distance_between(df, suffix1, suffix2):\n df[f'd_{suffix1}_{suffix2}'] = ((\n (df[f'x_{suffix1}'] - df[f'x_{suffix2}'])**np.float32(2) +\n (df[f'y_{suffix1}'] - df[f'y_{suffix2}'])**np.float32(2) + \n (df[f'z_{suffix1}'] - df[f'z_{suffix2}'])**np.float32(2)\n )**np.float32(0.5))",
"_____no_output_____"
],
[
"def add_distances(df):\n n_atoms = 1 + max([int(c.split('_')[1]) for c in df.columns if c.startswith('x_')])\n \n for i in range(1, n_atoms):\n for vi in range(min(4, i)):\n add_distance_between(df, i, vi)",
"_____no_output_____"
],
[
"def add_n_atoms(base, structures):\n dfs = structures['molecule_name'].value_counts().rename('n_atoms').to_frame()\n return pd.merge(base, dfs, left_on='molecule_name', right_index=True)",
"_____no_output_____"
],
[
"def build_couple_dataframe(some_csv, structures_csv, coupling_type, n_atoms=10):\n base, structures = build_type_dataframes(some_csv, structures_csv, coupling_type)\n base = add_coordinates(base, structures, 0)\n base = add_coordinates(base, structures, 1)\n \n base = base.drop(['atom_0', 'atom_1'], axis=1)\n atoms = base.drop('id', axis=1).copy()\n if 'scalar_coupling_constant' in some_csv:\n atoms = atoms.drop(['scalar_coupling_constant'], axis=1)\n \n add_center(atoms)\n atoms = atoms.drop(['x_0', 'y_0', 'z_0', 'x_1', 'y_1', 'z_1'], axis=1)\n\n atoms = merge_all_atoms(atoms, structures)\n \n add_distance_to_center(atoms)\n \n atoms = atoms.drop(['x_c', 'y_c', 'z_c', 'atom_index'], axis=1)\n atoms.sort_values(['molecule_name', 'atom_index_0', 'atom_index_1', 'd_c'], inplace=True)\n atom_groups = atoms.groupby(['molecule_name', 'atom_index_0', 'atom_index_1'])\n atoms['num'] = atom_groups.cumcount() + 2\n atoms = atoms.drop(['d_c'], axis=1)\n atoms = atoms[atoms['num'] < n_atoms]\n\n atoms = atoms.set_index(['molecule_name', 'atom_index_0', 'atom_index_1', 'num']).unstack()\n atoms.columns = [f'{col[0]}_{col[1]}' for col in atoms.columns]\n atoms = atoms.reset_index()\n \n# # downcast back to int8\n for col in atoms.columns:\n if col.startswith('atom_'):\n atoms[col] = atoms[col].fillna(0).astype('int8')\n \n# atoms['molecule_name'] = atoms['molecule_name'].astype('int32')\n \n full = add_atoms(base, atoms)\n add_distances(full)\n \n full.sort_values('id', inplace=True)\n \n return full",
"_____no_output_____"
],
[
"def take_n_atoms(df, n_atoms, four_start=4):\n labels = ['id', 'molecule_name', 'atom_index_1', 'atom_index_0']\n for i in range(2, n_atoms):\n label = f'atom_{i}'\n labels.append(label)\n\n for i in range(n_atoms):\n num = min(i, 4) if i < four_start else 4\n for j in range(num):\n labels.append(f'd_{i}_{j}')\n if 'scalar_coupling_constant' in df:\n labels.append('scalar_coupling_constant')\n return df[labels]",
"_____no_output_____"
],
[
"atoms = structures['atom'].values\ntypes_train = train['type'].values\ntypes_test = test['type'].values\nstructures['atom'] = structures['atom'].replace(ATOMIC_NUMBERS).astype('int8')\nfulls_train = []\nfulls_test = []\nfor type_ in progress_bar(train['type'].unique()):\n full_train = build_couple_dataframe(train, structures, type_, n_atoms=10)\n full_test = build_couple_dataframe(test, structures, type_, n_atoms=10)\n full_train = take_n_atoms(full_train, 10)\n full_test = take_n_atoms(full_test, 10)\n fulls_train.append(full_train)\n fulls_test.append(full_test)\n \nstructures['atom'] = atoms\ntrain = pd.concat(fulls_train).sort_values(by=['id']) #, axis=0)\ntest = pd.concat(fulls_test).sort_values(by=['id']) #, axis=0)\ntrain['type'] = types_train\ntest['type'] = types_test\ntrain = train.fillna(0)\ntest = test.fillna(0)",
"_____no_output_____"
]
],
[
[
"<br>\n<br>\ndist-interact",
"_____no_output_____"
]
],
[
[
"train['dist_interact'] = dist_interact_train.values\ntest['dist_interact'] = dist_interact_test.values",
"_____no_output_____"
]
],
[
[
"<br>\n<br>\nbasic",
"_____no_output_____"
]
],
[
[
"def map_atom_info(df_1,df_2, atom_idx):\n df = pd.merge(df_1, df_2, how = 'left',\n left_on = ['molecule_name', f'atom_index_{atom_idx}'],\n right_on = ['molecule_name', 'atom_index'])\n df = df.drop('atom_index', axis=1)\n return df\n\n\n# structure and ob_charges\nob_charge = pd.concat([ob_charge_train, ob_charge_test])\nmerge = pd.merge(ob_charge, structures, how='left',\n left_on = ['molecule_name', 'atom_index'],\n right_on = ['molecule_name', 'atom_index'])\nfor atom_idx in [0,1]:\n train = map_atom_info(train, merge, atom_idx)\n test = map_atom_info(test, merge, atom_idx)\n \n train = train.rename(columns={\n 'atom': f'atom_{atom_idx}',\n 'x': f'x_{atom_idx}',\n 'y': f'y_{atom_idx}',\n 'z': f'z_{atom_idx}',\n 'eem': f'eem_{atom_idx}',\n 'mmff94': f'mmff94_{atom_idx}',\n 'gasteiger': f'gasteiger_{atom_idx}', \n 'qeq': f'qeq_{atom_idx}',\n 'qtpie': f'qtpie_{atom_idx}', \n 'eem2015ha': f'eem2015ha_{atom_idx}', \n 'eem2015hm': f'eem2015hm_{atom_idx}', \n 'eem2015hn': f'eem2015hn_{atom_idx}', \n 'eem2015ba': f'eem2015ba_{atom_idx}', \n 'eem2015bm': f'eem2015bm_{atom_idx}', \n 'eem2015bn': f'eem2015bn_{atom_idx}',})\n test = test.rename(columns={\n 'atom': f'atom_{atom_idx}',\n 'x': f'x_{atom_idx}',\n 'y': f'y_{atom_idx}',\n 'z': f'z_{atom_idx}',\n 'eem': f'eem_{atom_idx}',\n 'mmff94': f'mmff94_{atom_idx}',\n 'gasteiger': f'gasteiger_{atom_idx}', \n 'qeq': f'qeq_{atom_idx}', \n 'qtpie': f'qtpie_{atom_idx}', \n 'eem2015ha': f'eem2015ha_{atom_idx}', \n 'eem2015hm': f'eem2015hm_{atom_idx}', \n 'eem2015hn': f'eem2015hn_{atom_idx}', \n 'eem2015ba': f'eem2015ba_{atom_idx}', \n 'eem2015bm': f'eem2015bm_{atom_idx}', \n 'eem2015bn': f'eem2015bn_{atom_idx}'})\n# test = test.rename(columns={'atom': f'atom_{atom_idx}',\n# 'x': f'x_{atom_idx}',\n# 'y': f'y_{atom_idx}',\n# 'z': f'z_{atom_idx}'})\n\n# ob_charges\n# train = map_atom_info(train, ob_charge_train, 0)\n# test = map_atom_info(test, ob_charge_test, 0)\n# train = map_atom_info(train, ob_charge_train, 1)\n# test = map_atom_info(test, ob_charge_test, 1)",
"_____no_output_____"
]
],
[
[
"<br>\n<br>\ntype0",
"_____no_output_____"
]
],
[
[
"def create_type0(df):\n df['type_0'] = df['type'].apply(lambda x : x[0])\n return df\n# train['type_0'] = train['type'].apply(lambda x: x[0])\n# test['type_0'] = test['type'].apply(lambda x: x[0])",
"_____no_output_____"
]
],
[
[
"<br>\n<br>\ndistances",
"_____no_output_____"
]
],
[
[
"def distances(df):\n df_p_0 = df[['x_0', 'y_0', 'z_0']].values\n df_p_1 = df[['x_1', 'y_1', 'z_1']].values\n \n df['dist'] = np.linalg.norm(df_p_0 - df_p_1, axis=1)\n df['dist_x'] = (df['x_0'] - df['x_1']) ** 2\n df['dist_y'] = (df['y_0'] - df['y_1']) ** 2\n df['dist_z'] = (df['z_0'] - df['z_1']) ** 2\n \n return df\n\n# train = distances(train)\n# test = distances(test)",
"_____no_output_____"
]
],
[
[
"<br>\n<br>\n統計量",
"_____no_output_____"
]
],
[
[
"def create_features(df):\n df['molecule_couples'] = df.groupby('molecule_name')['id'].transform('count')\n df['molecule_dist_mean'] = df.groupby('molecule_name')['dist'].transform('mean')\n df['molecule_dist_min'] = df.groupby('molecule_name')['dist'].transform('min')\n df['molecule_dist_max'] = df.groupby('molecule_name')['dist'].transform('max')\n df['atom_0_couples_count'] = df.groupby(['molecule_name', 'atom_index_0'])['id'].transform('count')\n df['atom_1_couples_count'] = df.groupby(['molecule_name', 'atom_index_1'])['id'].transform('count')\n df[f'molecule_atom_index_0_x_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['x_1'].transform('std')\n df[f'molecule_atom_index_0_y_1_mean'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('mean')\n df[f'molecule_atom_index_0_y_1_mean_diff'] = df[f'molecule_atom_index_0_y_1_mean'] - df['y_1']\n df[f'molecule_atom_index_0_y_1_mean_div'] = df[f'molecule_atom_index_0_y_1_mean'] / df['y_1']\n df[f'molecule_atom_index_0_y_1_max'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('max')\n df[f'molecule_atom_index_0_y_1_max_diff'] = df[f'molecule_atom_index_0_y_1_max'] - df['y_1']\n df[f'molecule_atom_index_0_y_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('std')\n df[f'molecule_atom_index_0_z_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['z_1'].transform('std')\n df[f'molecule_atom_index_0_dist_mean'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('mean')\n df[f'molecule_atom_index_0_dist_mean_diff'] = df[f'molecule_atom_index_0_dist_mean'] - df['dist']\n df[f'molecule_atom_index_0_dist_mean_div'] = df[f'molecule_atom_index_0_dist_mean'] / df['dist']\n df[f'molecule_atom_index_0_dist_max'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('max')\n df[f'molecule_atom_index_0_dist_max_diff'] = df[f'molecule_atom_index_0_dist_max'] - df['dist']\n df[f'molecule_atom_index_0_dist_max_div'] = df[f'molecule_atom_index_0_dist_max'] / df['dist']\n df[f'molecule_atom_index_0_dist_min'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('min')\n df[f'molecule_atom_index_0_dist_min_diff'] = df[f'molecule_atom_index_0_dist_min'] - df['dist']\n df[f'molecule_atom_index_0_dist_min_div'] = df[f'molecule_atom_index_0_dist_min'] / df['dist']\n df[f'molecule_atom_index_0_dist_std'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('std')\n df[f'molecule_atom_index_0_dist_std_diff'] = df[f'molecule_atom_index_0_dist_std'] - df['dist']\n df[f'molecule_atom_index_0_dist_std_div'] = df[f'molecule_atom_index_0_dist_std'] / df['dist']\n df[f'molecule_atom_index_1_dist_mean'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('mean')\n df[f'molecule_atom_index_1_dist_mean_diff'] = df[f'molecule_atom_index_1_dist_mean'] - df['dist']\n df[f'molecule_atom_index_1_dist_mean_div'] = df[f'molecule_atom_index_1_dist_mean'] / df['dist']\n df[f'molecule_atom_index_1_dist_max'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('max')\n df[f'molecule_atom_index_1_dist_max_diff'] = df[f'molecule_atom_index_1_dist_max'] - df['dist']\n df[f'molecule_atom_index_1_dist_max_div'] = df[f'molecule_atom_index_1_dist_max'] / df['dist']\n df[f'molecule_atom_index_1_dist_min'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('min')\n df[f'molecule_atom_index_1_dist_min_diff'] = df[f'molecule_atom_index_1_dist_min'] - df['dist']\n df[f'molecule_atom_index_1_dist_min_div'] = df[f'molecule_atom_index_1_dist_min'] / df['dist']\n df[f'molecule_atom_index_1_dist_std'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('std')\n df[f'molecule_atom_index_1_dist_std_diff'] = df[f'molecule_atom_index_1_dist_std'] - df['dist']\n df[f'molecule_atom_index_1_dist_std_div'] = df[f'molecule_atom_index_1_dist_std'] / df['dist']\n df[f'molecule_atom_1_dist_mean'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('mean')\n df[f'molecule_atom_1_dist_min'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('min')\n df[f'molecule_atom_1_dist_min_diff'] = df[f'molecule_atom_1_dist_min'] - df['dist']\n df[f'molecule_atom_1_dist_min_div'] = df[f'molecule_atom_1_dist_min'] / df['dist']\n df[f'molecule_atom_1_dist_std'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('std')\n df[f'molecule_atom_1_dist_std_diff'] = df[f'molecule_atom_1_dist_std'] - df['dist']\n df[f'molecule_type_0_dist_std'] = df.groupby(['molecule_name', 'type_0'])['dist'].transform('std')\n df[f'molecule_type_0_dist_std_diff'] = df[f'molecule_type_0_dist_std'] - df['dist']\n df[f'molecule_type_dist_mean'] = df.groupby(['molecule_name', 'type'])['dist'].transform('mean')\n df[f'molecule_type_dist_mean_diff'] = df[f'molecule_type_dist_mean'] - df['dist']\n df[f'molecule_type_dist_mean_div'] = df[f'molecule_type_dist_mean'] / df['dist']\n df[f'molecule_type_dist_max'] = df.groupby(['molecule_name', 'type'])['dist'].transform('max')\n df[f'molecule_type_dist_min'] = df.groupby(['molecule_name', 'type'])['dist'].transform('min')\n df[f'molecule_type_dist_std'] = df.groupby(['molecule_name', 'type'])['dist'].transform('std')\n df[f'molecule_type_dist_std_diff'] = df[f'molecule_type_dist_std'] - df['dist']\n # fc\n df[f'molecule_type_fc_max'] = df.groupby(['molecule_name', 'type'])['fc'].transform('max')\n df[f'molecule_type_fc_min'] = df.groupby(['molecule_name', 'type'])['fc'].transform('min')\n df[f'molecule_type_fc_std'] = df.groupby(['molecule_name', 'type'])['fc'].transform('std')\n df[f'molecule_type_fc_std_diff'] = df[f'molecule_type_fc_std'] - df['fc']\n return df",
"_____no_output_____"
]
],
[
[
"angle features",
"_____no_output_____"
]
],
[
[
"def map_atom_info(df_1,df_2, atom_idx):\n df = pd.merge(df_1, df_2, how = 'left',\n left_on = ['molecule_name', f'atom_index_{atom_idx}'],\n right_on = ['molecule_name', 'atom_index'])\n df = df.drop('atom_index', axis=1)\n\n return df\n\ndef create_closest(df):\n df_temp=df.loc[:,[\"molecule_name\",\"atom_index_0\",\"atom_index_1\",\"dist\",\"x_0\",\"y_0\",\"z_0\",\"x_1\",\"y_1\",\"z_1\"]].copy()\n df_temp_=df_temp.copy()\n df_temp_= df_temp_.rename(columns={'atom_index_0': 'atom_index_1',\n 'atom_index_1': 'atom_index_0',\n 'x_0': 'x_1',\n 'y_0': 'y_1',\n 'z_0': 'z_1',\n 'x_1': 'x_0',\n 'y_1': 'y_0',\n 'z_1': 'z_0'})\n df_temp=pd.concat(objs=[df_temp,df_temp_],axis=0)\n\n df_temp[\"min_distance\"]=df_temp.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('min')\n df_temp= df_temp[df_temp[\"min_distance\"]==df_temp[\"dist\"]]\n\n df_temp=df_temp.drop(['x_0','y_0','z_0','min_distance', 'dist'], axis=1)\n df_temp= df_temp.rename(columns={'atom_index_0': 'atom_index',\n 'atom_index_1': 'atom_index_closest',\n 'distance': 'distance_closest',\n 'x_1': 'x_closest',\n 'y_1': 'y_closest',\n 'z_1': 'z_closest'})\n\n for atom_idx in [0,1]:\n df = map_atom_info(df,df_temp, atom_idx)\n df = df.rename(columns={'atom_index_closest': f'atom_index_closest_{atom_idx}',\n 'distance_closest': f'distance_closest_{atom_idx}',\n 'x_closest': f'x_closest_{atom_idx}',\n 'y_closest': f'y_closest_{atom_idx}',\n 'z_closest': f'z_closest_{atom_idx}'})\n return df\n\ndef add_cos_features(df):\n df[\"distance_0\"]=((df['x_0']-df['x_closest_0'])**2+(df['y_0']-df['y_closest_0'])**2+(df['z_0']-df['z_closest_0'])**2)**(1/2)\n df[\"distance_1\"]=((df['x_1']-df['x_closest_1'])**2+(df['y_1']-df['y_closest_1'])**2+(df['z_1']-df['z_closest_1'])**2)**(1/2)\n df[\"vec_0_x\"]=(df['x_0']-df['x_closest_0'])/df[\"distance_0\"]\n df[\"vec_0_y\"]=(df['y_0']-df['y_closest_0'])/df[\"distance_0\"]\n df[\"vec_0_z\"]=(df['z_0']-df['z_closest_0'])/df[\"distance_0\"]\n df[\"vec_1_x\"]=(df['x_1']-df['x_closest_1'])/df[\"distance_1\"]\n df[\"vec_1_y\"]=(df['y_1']-df['y_closest_1'])/df[\"distance_1\"]\n df[\"vec_1_z\"]=(df['z_1']-df['z_closest_1'])/df[\"distance_1\"]\n df[\"vec_x\"]=(df['x_1']-df['x_0'])/df[\"dist\"]\n df[\"vec_y\"]=(df['y_1']-df['y_0'])/df[\"dist\"]\n df[\"vec_z\"]=(df['z_1']-df['z_0'])/df[\"dist\"]\n df[\"cos_0_1\"]=df[\"vec_0_x\"]*df[\"vec_1_x\"]+df[\"vec_0_y\"]*df[\"vec_1_y\"]+df[\"vec_0_z\"]*df[\"vec_1_z\"]\n df[\"cos_0\"]=df[\"vec_0_x\"]*df[\"vec_x\"]+df[\"vec_0_y\"]*df[\"vec_y\"]+df[\"vec_0_z\"]*df[\"vec_z\"]\n df[\"cos_1\"]=df[\"vec_1_x\"]*df[\"vec_x\"]+df[\"vec_1_y\"]*df[\"vec_y\"]+df[\"vec_1_z\"]*df[\"vec_z\"]\n df=df.drop(['vec_0_x','vec_0_y','vec_0_z','vec_1_x','vec_1_y','vec_1_z','vec_x','vec_y','vec_z'], axis=1)\n return df\n\n",
"_____no_output_____"
],
[
"%%time\n\nprint('add fc')\nprint(len(train), len(test))\ntrain['fc'] = fc_train.values\ntest['fc'] = fc_test.values\n\nprint('type0')\nprint(len(train), len(test))\ntrain = create_type0(train)\ntest = create_type0(test)\n\nprint('distances')\nprint(len(train), len(test))\ntrain = distances(train)\ntest = distances(test)\n\nprint('create_featueres')\nprint(len(train), len(test))\ntrain = create_features(train)\ntest = create_features(test)\n\nprint('create_closest')\nprint(len(train), len(test))\ntrain = create_closest(train)\ntest = create_closest(test)\ntrain.drop_duplicates(inplace=True, subset=['id']) # なぜかtrainの行数が増えるバグが発生\ntrain = train.reset_index(drop=True)\n\nprint('add_cos_features')\nprint(len(train), len(test))\ntrain = add_cos_features(train)\ntest = add_cos_features(test)",
"add fc\n4658147 2505542\ntype0\n4658147 2505542\ndistances\n4658147 2505542\ncreate_featueres\n4658147 2505542\ncreate_closest\n4658147 2505542\nadd_cos_features\n4658147 2505542\nCPU times: user 2min 16s, sys: 3min 23s, total: 5min 40s\nWall time: 5min 40s\n"
]
],
[
[
"---\n<br>\n<br>\n<br>\nnanがある特徴量を削除",
"_____no_output_____"
]
],
[
[
"drop_feats = train.columns[train.isnull().sum(axis=0) != 0].values\ndrop_feats",
"_____no_output_____"
],
[
"train = train.drop(drop_feats, axis=1)\ntest = test.drop(drop_feats, axis=1)\n\nassert sum(train.isnull().sum(axis=0))==0, f'train に nan があります。'\nassert sum(test.isnull().sum(axis=0))==0, f'test に nan があります。'",
"_____no_output_____"
]
],
[
[
"<br>\n<br>\n<br>\nエンコーディング",
"_____no_output_____"
]
],
[
[
"cat_cols = ['atom_1']\nnum_cols = list(set(train.columns) - set(cat_cols) - set(['type', \"scalar_coupling_constant\", 'molecule_name', 'id',\n 'atom_0', 'atom_1','atom_2', 'atom_3', 'atom_4', 'atom_5', 'atom_6', 'atom_7', 'atom_8', 'atom_9']))\n \nprint(f'カテゴリカル: {cat_cols}')\nprint(f'数値: {num_cols}')",
"カテゴリカル: ['atom_1']\n数値: ['distance_1', 'fc', 'dist_z', 'z_closest_1', 'eem_0', 'eem2015ha_1', 'dist_y', 'molecule_atom_index_1_dist_min', 'molecule_type_dist_max', 'eem2015ha_0', 'molecule_atom_index_1_dist_mean', 'd_8_1', 'y_closest_0', 'd_7_2', 'd_4_1', 'd_9_0', 'd_8_0', 'd_5_2', 'molecule_atom_index_1_dist_max', 'z_closest_0', 'y_0', 'z_0', 'molecule_atom_index_0_dist_mean_diff', 'd_3_0', 'y_closest_1', 'eem2015bn_0', 'molecule_type_fc_min', 'd_2_0', 'x_1', 'molecule_type_dist_min', 'x_closest_1', 'd_7_1', 'gasteiger_1', 'eem2015bn_1', 'molecule_atom_index_1_dist_min_diff', 'molecule_type_dist_mean', 'molecule_atom_index_0_y_1_max_diff', 'd_7_3', 'x_0', 'd_4_0', 'molecule_atom_index_0_dist_min_div', 'eem2015hn_0', 'd_3_2', 'atom_index_closest_0', 'atom_index_closest_1', 'molecule_atom_1_dist_mean', 'molecule_atom_index_0_dist_min', 'd_4_2', 'eem_1', 'd_2_1', 'atom_index_0', 'd_9_3', 'gasteiger_0', 'molecule_atom_1_dist_min', 'mmff94_0', 'molecule_atom_index_0_dist_max_diff', 'cos_0_1', 'molecule_atom_index_0_dist_mean', 'cos_1', 'd_5_0', 'd_6_2', 'z_1', 'molecule_atom_index_0_y_1_max', 'd_6_0', 'type_0', 'atom_1_couples_count', 'cos_0', 'molecule_atom_index_1_dist_max_diff', 'eem2015hm_0', 'd_7_0', 'd_6_3', 'eem2015bm_0', 'molecule_atom_index_0_y_1_mean_diff', 'molecule_atom_1_dist_min_diff', 'molecule_type_fc_max', 'molecule_type_dist_mean_diff', 'molecule_dist_mean', 'molecule_atom_index_1_dist_min_div', 'd_9_2', 'eem2015hm_1', 'molecule_atom_index_1_dist_max_div', 'd_8_2', 'd_8_3', 'qtpie_1', 'd_5_1', 'd_1_0', 'molecule_atom_1_dist_min_div', 'molecule_atom_index_1_dist_mean_diff', 'd_9_1', 'x_closest_0', 'dist', 'qtpie_0', 'dist_x', 'molecule_atom_index_0_y_1_mean', 'molecule_atom_index_0_dist_max_div', 'eem2015hn_1', 'molecule_atom_index_1_dist_mean_div', 'eem2015bm_1', 'molecule_dist_max', 'd_5_3', 'd_6_1', 'eem2015ba_0', 'molecule_atom_index_0_dist_max', 'atom_0_couples_count', 'atom_index_1', 'mmff94_1', 'qeq_0', 'molecule_dist_min', 'molecule_atom_index_0_dist_mean_div', 'molecule_type_dist_mean_div', 'molecule_couples', 'd_3_1', 'distance_0', 'y_1', 'eem2015ba_1', 'qeq_1', 'molecule_atom_index_0_dist_min_diff', 'd_4_3']\n"
]
],
[
[
"<br>\n<br>\nLabelEncode\n\n- `atom_1` = {H, C, N}\n- `type_0` = {1, 2, 3}\n- `type` = {2JHC, ...}",
"_____no_output_____"
]
],
[
[
"for f in ['type_0', 'type']:\n if f in train.columns:\n lbl = LabelEncoder()\n lbl.fit(list(train[f].values) + list(test[f].values))\n train[f] = lbl.transform(list(train[f].values))\n test[f] = lbl.transform(list(test[f].values))",
"_____no_output_____"
]
],
[
[
"<br>\n<br>\n<br>\none hot encoding",
"_____no_output_____"
]
],
[
[
"train = pd.get_dummies(train, columns=cat_cols)\ntest = pd.get_dummies(test, columns=cat_cols)",
"_____no_output_____"
]
],
[
[
"<br>\n<br>\n<br>\n標準化",
"_____no_output_____"
]
],
[
[
"scaler = StandardScaler()\ntrain[num_cols] = scaler.fit_transform(train[num_cols])\ntest[num_cols] = scaler.transform(test[num_cols])",
"_____no_output_____"
]
],
[
[
"<br>\n<br>\n\n---\n**show features**",
"_____no_output_____"
]
],
[
[
"train.head(2)",
"_____no_output_____"
],
[
"print(train.columns)",
"Index(['id', 'molecule_name', 'atom_index_1', 'atom_index_0', 'atom_2',\n 'atom_3', 'atom_4', 'atom_5', 'atom_6', 'atom_7',\n ...\n 'y_closest_1', 'z_closest_1', 'distance_0', 'distance_1', 'cos_0_1',\n 'cos_0', 'cos_1', 'atom_1_C', 'atom_1_H', 'atom_1_N'],\n dtype='object', length=134)\n"
]
],
[
[
"# create train, test data",
"_____no_output_____"
]
],
[
[
"y = train['scalar_coupling_constant']\ntrain = train.drop(['id', 'molecule_name', 'atom_0', 'scalar_coupling_constant'], axis=1)\ntest = test.drop(['id', 'molecule_name', 'atom_0'], axis=1)\ntrain = reduce_mem_usage(train)\ntest = reduce_mem_usage(test)\n\nX = train.copy()\nX_test = test.copy()\n\nassert len(X.columns) == len(X_test.columns), f'X と X_test のサイズが違います X: {len(X.columns)}, X_test: {len(X_test.columns)}'",
"Mem. usage decreased to 4246.89 Mb (0.7% reduction)\nMem. usage decreased to 2303.45 Mb (0.7% reduction)\n"
],
[
"del train, test, full_train, full_test",
"_____no_output_____"
],
[
"gc.collect()",
"_____no_output_____"
]
],
[
[
"# Training model",
"_____no_output_____"
],
[
"**params**",
"_____no_output_____"
]
],
[
[
"# Configuration\nmodel_params = {'n_estimators': 300,\n 'max_depth': 50,\n 'n_jobs': 30}",
"_____no_output_____"
],
[
"n_folds = 6\nfolds = KFold(n_splits=n_folds, shuffle=True)",
"_____no_output_____"
],
[
"def train_model(X, X_test, y, folds, model_params):\n model = ExtraTreesRegressor(**model_params) # <=================\n\n scores = []\n oof = np.zeros(len(X)) # <========\n prediction = np.zeros(len(X)) # <========\n result_dict = {}\n for fold_n, (train_idx, valid_idx) in enumerate(folds.split(X)):\n print(f'Fold {fold_n + 1} started at {time.ctime()}')\n model.fit(X.iloc[train_idx, :], y[train_idx])\n y_valid_pred = model.predict(X.iloc[valid_idx, :])\n prediction = model.predict(X_test)\n oof[valid_idx] = y_valid_pred\n score = mean_absolute_error(y[valid_idx], y_valid_pred)\n scores.append(score)\n\n print(f'fold {fold_n+1} mae: {score :.5f}')\n print('')\n print('CV mean score: {0:.4f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores)))\n print('')\n \n result_dict['oof'] = oof\n result_dict['prediction'] = prediction\n result_dict['scores'] = scores\n return result_dict",
"_____no_output_____"
],
[
"%%time\n# type ごとの学習 \n\nX_short = pd.DataFrame({'ind': list(X.index), 'type': X['type'].values, 'oof': [0] * len(X), 'target': y.values})\nX_short_test = pd.DataFrame({'ind': list(X_test.index), 'type': X_test['type'].values, 'prediction': [0] * len(X_test)})\nfor t in X['type'].unique():\n print('*'*80)\n print(f'Training of type {t}')\n print('*'*80)\n X_t = X.loc[X['type'] == t]\n X_test_t = X_test.loc[X_test['type'] == t]\n y_t = X_short.loc[X_short['type'] == t, 'target'].values\n \n result_dict = train_model(X_t, X_test_t, y_t, folds, model_params)\n X_short.loc[X_short['type'] == t, 'oof'] = result_dict['oof']\n X_short_test.loc[X_short_test['type'] == t, 'prediction'] = result_dict['prediction']\n \n \n\nprint('')\nprint('===== finish =====')\nX['scalar_coupling_constant'] = y\nmetric = kaggle_metric(X, X_short['oof'])\nX = X.drop(['scalar_coupling_constant', 'prediction'], axis=1)\nprint('CV mean score(group log mae): {0:.4f}'.format(metric))\nprediction = X_short_test['prediction']",
"********************************************************************************\nTraining of type 0\n********************************************************************************\nFold 1 started at Wed Aug 21 10:03:29 2019\nfold 1 mae: 0.96665\n\nFold 2 started at Wed Aug 21 10:09:37 2019\nfold 2 mae: 0.96646\n\nFold 3 started at Wed Aug 21 10:15:45 2019\nfold 3 mae: 0.97297\n\nFold 4 started at Wed Aug 21 10:21:59 2019\nfold 4 mae: 0.98534\n\nFold 5 started at Wed Aug 21 10:28:12 2019\nfold 5 mae: 0.97234\n\nFold 6 started at Wed Aug 21 10:34:29 2019\nfold 6 mae: 0.97644\n\nCV mean score: 0.9734, std: 0.0064.\n\n********************************************************************************\nTraining of type 3\n********************************************************************************\nFold 1 started at Wed Aug 21 10:40:40 2019\nfold 1 mae: 0.21142\n\nFold 2 started at Wed Aug 21 10:43:40 2019\nfold 2 mae: 0.21244\n\nFold 3 started at Wed Aug 21 10:46:44 2019\nfold 3 mae: 0.21334\n\nFold 4 started at Wed Aug 21 10:49:47 2019\nfold 4 mae: 0.21472\n\nFold 5 started at Wed Aug 21 10:52:47 2019\nfold 5 mae: 0.21194\n\nFold 6 started at Wed Aug 21 10:55:44 2019\nfold 6 mae: 0.21586\n\nCV mean score: 0.2133, std: 0.0016.\n\n********************************************************************************\nTraining of type 1\n********************************************************************************\nFold 1 started at Wed Aug 21 10:58:45 2019\nfold 1 mae: 0.53631\n\nFold 2 started at Wed Aug 21 10:58:57 2019\nfold 2 mae: 0.53674\n\nFold 3 started at Wed Aug 21 10:59:09 2019\nfold 3 mae: 0.51920\n\nFold 4 started at Wed Aug 21 10:59:21 2019\nfold 4 mae: 0.52120\n\nFold 5 started at Wed Aug 21 10:59:34 2019\nfold 5 mae: 0.51513\n\nFold 6 started at Wed Aug 21 10:59:46 2019\nfold 6 mae: 0.51895\n\nCV mean score: 0.5246, std: 0.0086.\n\n********************************************************************************\nTraining of type 4\n********************************************************************************\nFold 1 started at Wed Aug 21 11:00:01 2019\nfold 1 mae: 0.17949\n\nFold 2 started at Wed Aug 21 11:00:44 2019\nfold 2 mae: 0.18300\n\nFold 3 started at Wed Aug 21 11:01:26 2019\nfold 3 mae: 0.18770\n\nFold 4 started at Wed Aug 21 11:02:07 2019\nfold 4 mae: 0.18542\n\nFold 5 started at Wed Aug 21 11:02:50 2019\nfold 5 mae: 0.18178\n\nFold 6 started at Wed Aug 21 11:03:32 2019\nfold 6 mae: 0.18367\n\nCV mean score: 0.1835, std: 0.0026.\n\n********************************************************************************\nTraining of type 2\n********************************************************************************\nFold 1 started at Wed Aug 21 11:04:16 2019\nfold 1 mae: 0.35758\n\nFold 2 started at Wed Aug 21 11:16:28 2019\nfold 2 mae: 0.35481\n\nFold 3 started at Wed Aug 21 11:28:42 2019\nfold 3 mae: 0.35831\n\nFold 4 started at Wed Aug 21 11:40:51 2019\nfold 4 mae: 0.35609\n\nFold 5 started at Wed Aug 21 11:53:01 2019\nfold 5 mae: 0.35505\n\nFold 6 started at Wed Aug 21 12:05:30 2019\nfold 6 mae: 0.35596\n\nCV mean score: 0.3563, std: 0.0013.\n\n********************************************************************************\nTraining of type 6\n********************************************************************************\nFold 1 started at Wed Aug 21 12:18:05 2019\nfold 1 mae: 0.20079\n\nFold 2 started at Wed Aug 21 12:23:21 2019\nfold 2 mae: 0.20171\n\nFold 3 started at Wed Aug 21 12:28:35 2019\nfold 3 mae: 0.19988\n\nFold 4 started at Wed Aug 21 12:34:00 2019\nfold 4 mae: 0.20069\n\nFold 5 started at Wed Aug 21 12:39:20 2019\nfold 5 mae: 0.20120\n\nFold 6 started at Wed Aug 21 12:44:32 2019\nfold 6 mae: 0.20231\n\nCV mean score: 0.2011, std: 0.0008.\n\n********************************************************************************\nTraining of type 5\n********************************************************************************\nFold 1 started at Wed Aug 21 12:49:55 2019\nfold 1 mae: 0.36292\n\nFold 2 started at Wed Aug 21 13:07:04 2019\nfold 2 mae: 0.35849\n\nFold 3 started at Wed Aug 21 13:23:53 2019\nfold 3 mae: 0.36197\n\nFold 4 started at Wed Aug 21 13:40:44 2019\nfold 4 mae: 0.36506\n\nFold 5 started at Wed Aug 21 13:58:00 2019\nfold 5 mae: 0.35744\n\nFold 6 started at Wed Aug 21 14:14:56 2019\nfold 6 mae: 0.35945\n\nCV mean score: 0.3609, std: 0.0027.\n\n********************************************************************************\nTraining of type 7\n********************************************************************************\nFold 1 started at Wed Aug 21 14:31:52 2019\nfold 1 mae: 0.14638\n\nFold 2 started at Wed Aug 21 14:33:00 2019\nfold 2 mae: 0.14264\n\nFold 3 started at Wed Aug 21 14:34:08 2019\nfold 3 mae: 0.14212\n\nFold 4 started at Wed Aug 21 14:35:17 2019\nfold 4 mae: 0.14587\n\nFold 5 started at Wed Aug 21 14:36:27 2019\nfold 5 mae: 0.14170\n\nFold 6 started at Wed Aug 21 14:37:36 2019\nfold 6 mae: 0.14417\n\nCV mean score: 0.1438, std: 0.0018.\n\n\n===== finish =====\nCV mean score(group log mae): -1.1884\nCPU times: user 5d 11h 57min 12s, sys: 52min 10s, total: 5d 12h 49min 23s\nWall time: 4h 35min 41s\n"
]
],
[
[
"# Save",
"_____no_output_____"
],
[
"**submission**",
"_____no_output_____"
]
],
[
[
"# path_submittion = './output/' + 'nb{}_submission_lgb_{}.csv'.format(nb, metric)\npath_submittion = f'../output/nb{nb}_submission_{model_name}_{metric :.5f}.csv'\nprint(f'save pash: {path_submittion}')",
"save pash: ../output/nb53_submission_extra_trees_regressor_-1.18839.csv\n"
],
[
"submittion = pd.read_csv('../input/champs-scalar-coupling/sample_submission.csv')\n# submittion = pd.read_csv('./input/champs-scalar-coupling/sample_submission.csv')[::100]\nsubmittion['scalar_coupling_constant'] = prediction\nsubmittion.to_csv(path_submittion, index=False)",
"_____no_output_____"
]
],
[
[
"---\n**result**",
"_____no_output_____"
]
],
[
[
"path_oof = f'../output/nb{nb}_oof_{model_name}_{metric :.5f}.csv'\nprint(f'save pash: {path_oof}')",
"save pash: ../output/nb53_oof_extra_trees_regressor_-1.18839.csv\n"
],
[
"oof = pd.DataFrame(result_dict['oof'])\noof.to_csv(path_oof, index=False)",
"_____no_output_____"
]
],
[
[
"# analysis",
"_____no_output_____"
]
],
[
[
"plot_data = pd.DataFrame(y)\nplot_data.index.name = 'id'\nplot_data['yhat'] = X_short['oof']\nplot_data['type'] = lbl.inverse_transform(X['type'])\n\ndef plot_oof_preds(ctype, llim, ulim):\n plt.figure(figsize=(6,6))\n sns.scatterplot(x='scalar_coupling_constant',y='yhat',\n data=plot_data.loc[plot_data['type']==ctype,\n ['scalar_coupling_constant', 'yhat']]);\n plt.xlim((llim, ulim))\n plt.ylim((llim, ulim))\n plt.plot([llim, ulim], [llim, ulim])\n plt.xlabel('scalar_coupling_constant')\n plt.ylabel('predicted')\n plt.title(f'{ctype}', fontsize=18)\n plt.show()\n\nplot_oof_preds('1JHC', 0, 250)\nplot_oof_preds('1JHN', 0, 100)\nplot_oof_preds('2JHC', -50, 50)\nplot_oof_preds('2JHH', -50, 50)\nplot_oof_preds('2JHN', -25, 25)\nplot_oof_preds('3JHC', -25, 60)\nplot_oof_preds('3JHH', -20, 20)\nplot_oof_preds('3JHN', -10, 15)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
cb731c5b740407b334a049ae428027b8d3a9f244 | 830,693 | ipynb | Jupyter Notebook | codes/P5.2 Topic Modeling.ipynb | AngShengJun/dsiCapstone | 8b2e11e6c3860205687c542106d68ec9c98d1a9b | [
"MIT"
]
| null | null | null | codes/P5.2 Topic Modeling.ipynb | AngShengJun/dsiCapstone | 8b2e11e6c3860205687c542106d68ec9c98d1a9b | [
"MIT"
]
| null | null | null | codes/P5.2 Topic Modeling.ipynb | AngShengJun/dsiCapstone | 8b2e11e6c3860205687c542106d68ec9c98d1a9b | [
"MIT"
]
| null | null | null | 285.559642 | 273,304 | 0.833581 | [
[
[
"## P5.2 Topic Modeling",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"### Content\n- [Topic Modelling using LDA](#Topic-Modelling-using-LDA)\n- [Topic Modeling (Train data)](#Topic-Modeling-(Train-data))\n- [Optimal Topic Size](#Optimal-Topic-Size)\n- [Binary Classification (LDA topic features)](#Binary-Classification-(LDA-topic-features))\n- [Binary Classification (LDA topic and Countvectorizer features)](#Binary-Classification-(LDA-topic-and-Countvectorizer-features))\n- [Recommendations (Part2)](#Recommendations-(Part2))\n- [Future Work](#Future-Work)",
"_____no_output_____"
],
[
"### Topic Modelling using LDA\nInspired by Marc Kelechava's work [https://towardsdatascience.com/unsupervised-nlp-topic-models-as-a-supervised-learning-input-cf8ee9e5cf28] and Andrew Ng et al., 2003.\n\nIn this section, I explore if underlying semantic structures, discovered through the Latent Dirichlet Allocation (LDA) technique (unsupervised machine learning technique), could be utilized in a supervised text classification problem. LDA application poses significant challenge due to personal inexperience in the domain, and I allocated approx. a week in reading up on basic LDA applications. I'm interested to explore \n\nSteps as follows:\n- Explore LDA topic modelling, and derive optimum number of topics (train data)\n- Investigate the use of LDA topic distributions as feature vectors for supervised, binary classification (i.e. bomb or non-bomb). If the supervised sensitivty and roc_auc score on the unseen data generalizes, it is an indication that the topic model trained on trainsub has identified latent semantic structure that persists over varying motive texts in identification of bombing incidents.\n- Investigate generalizability of supervised, binary classification model using feature vectors from both LDA and countvectorizer. ",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport sys\nimport re\nfrom pprint import pprint\n\n# Gensim\nimport gensim, spacy, logging, warnings\nimport gensim.corpora as corpora\nfrom gensim.utils import lemmatize, simple_preprocess\nfrom gensim.models import CoherenceModel\n\n# NLTK Stop words and stemmer\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\n\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import LogisticRegression\n\n# Import library for cross-validation\nfrom sklearn.model_selection import cross_val_score, cross_val_predict\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import roc_auc_score\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'",
"_____no_output_____"
],
[
"# Setting - display all columns\npd.set_option('display.max_columns', None)",
"_____no_output_____"
],
[
"# Read in cleaned featured engineered data\ndframe = pd.read_csv('../assets/wordok.csv',encoding=\"ISO-8859-1\",index_col=0)",
"_____no_output_____"
],
[
"# Instantiate the custom list of stopwords for modelling from P5_01\nstop_words = stopwords.words('english')\n\nown_stop = ['motive','specific','unknown','attack','sources','noted', 'claimed','stated','incident','targeted',\\\n 'responsibility','violence','carried','government','suspected','trend','speculated','al','sectarian',\\\n 'retaliation','group','related','security','forces','people','bomb','bombing','bombings']\n# Extend the stop words\nstop_words.extend(own_stop)",
"_____no_output_____"
],
[
"own_stopfn = ['death', 'want', 'off', 'momentum', 'star', 'colleg', 'aqi', 'treat', 'reveng', 'them', 'all', 'radio',\\\n 'bodo', 'upcom', 'between', 'prior', 'enter', 'made', 'nimr', 'sectarian', 'muslim', 'past', 'previou',\\\n 'intimid', 'held', 'fsa', 'women', 'are', 'mnlf', 'with', 'pattani', 'shutdown', 'border', 'departur',\\\n 'advoc', 'have', 'eelam', 'across', 'villag', 'foreign', 'kill', 'shepherd', 'yemeni', 'develop', 'pro',\\\n 'road', 'not', 'appear', 'jharkhand', 'spokesperson']",
"_____no_output_____"
],
[
"# Extend the Stop words\nstop_words.extend(own_stopfn)\n# Check the addition of firstset_words\nstop_words[-5:]",
"_____no_output_____"
],
[
"# Create Train-Test split (80-20 split)\n# X is motive text. y is bomb.\nX_train,X_test,y_train,y_test = train_test_split(dframe[['motive']],dframe['bomb'],test_size=0.20,\\\n stratify=dframe['bomb'],\\\n random_state=42)",
"_____no_output_____"
],
[
"dframe.head(1)",
"_____no_output_____"
]
],
[
[
"### Topic Modeling (Train data)",
"_____no_output_____"
]
],
[
[
"def sent_to_words(sentences):\n for sent in sentences:\n sent = re.sub('\\s+', ' ', sent) # remove newline chars\n sent = re.sub(\"\\'\", \"\", sent) # remove single quotes\n sent = gensim.utils.simple_preprocess(str(sent), deacc=True) \n yield(sent) \n\n# Convert to list\ndata = X_train.motive.values.tolist()\ndata_words = list(sent_to_words(data))\nprint(data_words[:1])",
"[['the', 'specific', 'motive', 'is', 'unknown', 'however', 'sources', 'speculate', 'that', 'the', 'attack', 'was', 'part', 'of', 'larger', 'trend', 'of', 'sectarian', 'violence', 'between', 'iraqs', 'minority', 'sunni', 'and', 'majority', 'shiite', 'communities']]\n"
]
],
[
[
"Utilize Gensim's `Phrases` to build and implement bigrams and trigrams. The higher the parameters `min_count` and `threshold`, the harder it is for words to be combined to bigrams",
"_____no_output_____"
]
],
[
[
"# Build the bigram and trigram models\nbigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.\ntrigram = gensim.models.Phrases(bigram[data_words], threshold=100) \nbigram_mod = gensim.models.phrases.Phraser(bigram)\ntrigram_mod = gensim.models.phrases.Phraser(trigram)\n\ndef process_words(texts, stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\n \"\"\"Remove Stopwords, Form Bigrams, Trigrams and Lemmatization\"\"\"\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\n texts = [bigram_mod[doc] for doc in texts]\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\n texts_out = []\n # use 'en_core_web_sm' in place of 'en' \n nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])\n for sent in texts:\n doc = nlp(\" \".join(sent)) \n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\n # remove stopwords once more after lemmatization\n texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out] \n return texts_out\n\ndata_ready = process_words(data_words) # processed Text Data",
"_____no_output_____"
],
[
"len(data_ready)",
"_____no_output_____"
],
[
"# Create Dictionary\nid2word = corpora.Dictionary(data_ready)\n\n## Create corpus texts\ntexts = data_ready\n\n# Create Corpus: Term Document Frequency\ncorpus = [id2word.doc2bow(text) for text in data_ready]\n\n# View\ndisplay(corpus[:4])\n\n# Human readable format of corpus (term-frequency)\n[[(id2word[id], freq) for id, freq in cp] for cp in corpus[:4]]",
"_____no_output_____"
]
],
[
[
"Gensim creates unique id for each word in the document. The produced corpus shown above is a mapping of (word_id, word_frequency). A human-readable form of the corpus is displayed follows thereafter.\n\nBuild LDA model with 4 topics. Each topic is a combination of keywords (Each contributing certain weightage to topic).",
"_____no_output_____"
]
],
[
[
"# Build LDA model\nlda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,\n id2word=id2word,\n num_topics=4, \n random_state=42,\n update_every=1,\n chunksize=100,\n passes=10,\n alpha='symmetric',\n iterations=100,\n per_word_topics=True)\n\npprint(lda_model.print_topics())",
"[(0,\n '0.060*\"police\" + 0.044*\"believe\" + 0.027*\"protest\" + 0.020*\"response\" + '\n '0.016*\"informant\" + 0.014*\"intimidate\" + 0.011*\"rebel\" + 0.009*\"several\" + '\n '0.009*\"refuse\" + 0.009*\"often\"'),\n (1,\n '0.067*\"however\" + 0.052*\"election\" + 0.036*\"area\" + 0.026*\"attempt\" + '\n '0.017*\"recent\" + 0.017*\"local\" + 0.016*\"schedule\" + 0.013*\"extremist\" + '\n '0.012*\"also\" + 0.011*\"official\"'),\n (2,\n '0.098*\"however\" + 0.049*\"victim\" + 0.043*\"state\" + 0.030*\"posit\" + '\n '0.029*\"military\" + 0.026*\"campaign\" + 0.022*\"member\" + 0.019*\"accuse\" + '\n '0.018*\"maoist\" + 0.018*\"islamic\"'),\n (3,\n '0.123*\"however\" + 0.077*\"part\" + 0.052*\"large\" + 0.050*\"may\" + '\n '0.045*\"shiite\" + 0.036*\"community\" + 0.022*\"occur\" + 0.021*\"sunni\" + '\n '0.017*\"camp\" + 0.017*\"member\"')]\n"
]
],
[
[
"Interpretation: For topic 0, top 10 keywords that contribute to this topic are 'however', 'state' and so on, with weight of 'however' being 0.088.",
"_____no_output_____"
]
],
[
[
"# Compute Perplexity\nprint(f\"Perplexity: {lda_model.log_perplexity(corpus)}\") # a measure of how good the model is. lower the better.\n\n# Compute Coherence Score\ncoherence_model_lda = CoherenceModel(model=lda_model, texts=data_ready, dictionary=id2word, coherence='c_v')\ncoherence_lda = coherence_model_lda.get_coherence()\nprint(f\"Coherence Score: {coherence_lda}\")",
"Perplexity: -6.809074529210312\nCoherence Score: 0.33088686316810534\n"
],
[
"def format_topics_sentences(ldamodel=None, corpus=corpus, texts=data):\n # Init output\n sent_topics_df = pd.DataFrame()\n\n # Get main topic in each document\n for i, row_list in enumerate(ldamodel[corpus]):\n row = row_list[0] if ldamodel.per_word_topics else row_list \n # print(row)\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n # Get the Dominant topic, Perc Contribution and Keywords for each document\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0: # => dominant topic\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \", \".join([word for word, prop in wp])\n sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']\n\n # Add original text to the end of the output\n contents = pd.Series(texts)\n sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)\n return(sent_topics_df)\n\n\ndf_topic_sents_keywords = format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data_ready)\n\n# Format\ndf_dominant_topic = df_topic_sents_keywords.reset_index()\ndf_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']\ndf_dominant_topic.head(10)",
"_____no_output_____"
]
],
[
[
"The dominant topic with percentage contribution for each document is represented above. \n",
"_____no_output_____"
]
],
[
[
"# Display setting to show more characters in column\npd.options.display.max_colwidth = 80\n\nsent_topics_sorteddf_mallet = pd.DataFrame()\nsent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')\n\nfor i, grp in sent_topics_outdf_grpd:\n sent_topics_sorteddf_mallet = pd.concat([sent_topics_sorteddf_mallet, \n grp.sort_values(['Perc_Contribution'], ascending=False).head(1)], \n axis=0)\n\n# Reset Index \nsent_topics_sorteddf_mallet.reset_index(drop=True, inplace=True)\n\n# Format\nsent_topics_sorteddf_mallet.columns = ['Topic_Num', \"Topic_Perc_Contrib\", \"Keywords\", \"Representative Text\"]\n\n# Show\nsent_topics_sorteddf_mallet.head(10)",
"_____no_output_____"
]
],
[
[
"The documents a given topic has contributed to the most to facilitate topic inference ate displayed above.",
"_____no_output_____"
]
],
[
[
"# 1. Wordcloud of Top N words in each topic\nfrom matplotlib import pyplot as plt\nfrom wordcloud import WordCloud, STOPWORDS\nimport matplotlib.colors as mcolors\n\ncols = [color for name, color in mcolors.TABLEAU_COLORS.items()] # more colors: 'mcolors.XKCD_COLORS'\n\ncloud = WordCloud(stopwords=stop_words,\n background_color='white',\n width=2500,\n height=1800,\n max_words=10,\n colormap='tab10',\n color_func=lambda *args, **kwargs: cols[i],\n prefer_horizontal=1.0)\n\ntopics = lda_model.show_topics(formatted=False)\n\nfig, axes = plt.subplots(2, 2, figsize=(10,10), sharex=True, sharey=True)\n\nfor i, ax in enumerate(axes.flatten()):\n fig.add_subplot(ax)\n topic_words = dict(topics[i][1])\n cloud.generate_from_frequencies(topic_words, max_font_size=300)\n plt.gca().imshow(cloud)\n plt.gca().set_title('Topic ' + str(i), fontdict=dict(size=16))\n plt.gca().axis('off')\n\n\nplt.subplots_adjust(wspace=0, hspace=0)\nplt.axis('off')\nplt.margins(x=0, y=0)\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
],
[
[
"Interpretation of the Four topics using the representative text identified above: (topic 0: public unrest and law enforcement), (topic 1: tension admist elections), (topic 2: military campaigns and terror groups), (topic 3: sectarian violence). \nNote: Changing the random_seed will also change the topics surfaced, currently versioned as 42.",
"_____no_output_____"
]
],
[
[
"# Sentence Coloring of N Sentences\nfrom matplotlib.patches import Rectangle\n# Pick documents amongst corpus\ndef sentences_chart(lda_model=lda_model, corpus=corpus, start = 7, end = 14):\n corp = corpus[start:end]\n mycolors = [color for name, color in mcolors.TABLEAU_COLORS.items()]\n\n fig, axes = plt.subplots(end-start, 1, figsize=(20, (end-start)*0.95), dpi=160) \n axes[0].axis('off')\n for i, ax in enumerate(axes):\n if i > 0:\n corp_cur = corp[i-1] \n topic_percs, wordid_topics, wordid_phivalues = lda_model[corp_cur]\n word_dominanttopic = [(lda_model.id2word[wd], topic[0]) for wd, topic in wordid_topics] \n ax.text(0.01, 0.5, \"Doc \" + str(i-1) + \": \", verticalalignment='center',\n fontsize=16, color='black', transform=ax.transAxes, fontweight=700)\n\n # Draw Rectange\n topic_percs_sorted = sorted(topic_percs, key=lambda x: (x[1]), reverse=True)\n ax.add_patch(Rectangle((0.0, 0.05), 0.99, 0.90, fill=None, alpha=1, \n color=mycolors[topic_percs_sorted[0][0]], linewidth=2))\n\n word_pos = 0.06\n for j, (word, topics) in enumerate(word_dominanttopic):\n if j < 14:\n ax.text(word_pos, 0.5, word,\n horizontalalignment='left',\n verticalalignment='center',\n fontsize=16, color=mycolors[topics],\n transform=ax.transAxes, fontweight=700)\n word_pos += .009 * len(word) # to move the word for the next iter\n ax.axis('off')\n ax.text(word_pos, 0.5, '. . .',\n horizontalalignment='left',\n verticalalignment='center',\n fontsize=16, color='black',\n transform=ax.transAxes) \n\n plt.subplots_adjust(wspace=0, hspace=0)\n plt.suptitle('Sentence Topic Coloring for Documents: ' + str(start) + ' to ' + str(end-2), fontsize=22, y=0.95, fontweight=700)\n plt.tight_layout()\n plt.show()\n\nsentences_chart()",
"_____no_output_____"
]
],
[
[
"We can review the topic percent contribution for each document. Here document 7 is selected as an example.",
"_____no_output_____"
]
],
[
[
"df_dominant_topic[df_dominant_topic['Document_No']==7]",
"_____no_output_____"
],
[
"import pyLDAvis.gensim\npyLDAvis.enable_notebook()\nvis = pyLDAvis.gensim.prepare(lda_model, corpus, dictionary=lda_model.id2word)\nvis",
"_____no_output_____"
]
],
[
[
"Interpretation: On the left hand plot, topics are represented by a bubble. Larger bubble size indicates higher prevalence. Good topic model will have fairly big, non-overlapping bubbles scattered throughout the chart. The salient keywords and frequency bars on the right hand chart updates with review of each bubble (cursor over bubble).",
"_____no_output_____"
],
[
"### Optimal Topic Size",
"_____no_output_____"
]
],
[
[
"def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):\n \"\"\"\n Compute c_v coherence for various number of topics\n\n Parameters:\n ----------\n dictionary : Gensim dictionary\n corpus : Gensim corpus\n texts : List of input texts\n limit : Max num of topics\n\n Returns:\n -------\n model_list : List of LDA topic models\n coherence_values : Coherence values corresponding to the LDA model with respective number of topics\n \"\"\"\n coherence_values = []\n model_list = []\n for num_topics in range(start, limit, step):\n model = gensim.models.ldamodel.LdaModel(corpus=corpus, num_topics=num_topics, id2word=id2word, random_state=42, update_every=1,\\\n chunksize=100, passes=10, alpha='symmetric', iterations=100, per_word_topics=True)\n \n model_list.append(model)\n coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return model_list, coherence_values",
"_____no_output_____"
],
[
"# Can take a long time to run (10mins approx)\nmodel_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_ready, start=5, limit=60, step=12)",
"_____no_output_____"
],
[
"# Show graph\nlimit=60; start=5; step=12;\nx = range(start, limit, step)\nplt.plot(x, coherence_values)\nplt.xlabel(\"Num Topics\")\nplt.ylabel(\"Coherence score\")\nplt.legend((\"coherence_values\"), loc='best')\nplt.show()",
"_____no_output_____"
],
[
"# Print the coherence scores\nfor m, cv in zip(x, coherence_values):\n print(\"Num Topics =\", m, \" has Coherence Value of\", round(cv, 4))",
"Num Topics = 5 has Coherence Value of 0.2823\nNum Topics = 17 has Coherence Value of 0.4062\nNum Topics = 29 has Coherence Value of 0.3934\nNum Topics = 41 has Coherence Value of 0.4703\nNum Topics = 53 has Coherence Value of 0.4587\n"
]
],
[
[
"coherence score saturates at 41 topics. We pick the model with 41 topics.",
"_____no_output_____"
]
],
[
[
"# Select the model and print the topics\noptimal_model = model_list[3]\nmodel_topics = optimal_model.show_topics(formatted=False)\npprint(optimal_model.print_topics(num_words=5))",
"[(23,\n '0.400*\"civilian\" + 0.365*\"response\" + 0.069*\"action\" + 0.039*\"population\" + '\n '0.000*\"phuket\"'),\n (37,\n '0.000*\"dictatorship\" + 0.000*\"phuket\" + 0.000*\"tradition\" + 0.000*\"ddd\" + '\n '0.000*\"grassroot\"'),\n (16,\n '0.126*\"plan\" + 0.120*\"country\" + 0.117*\"include\" + 0.095*\"office\" + '\n '0.093*\"education\"'),\n (13,\n '0.000*\"dictatorship\" + 0.000*\"phuket\" + 0.000*\"tradition\" + 0.000*\"ddd\" + '\n '0.000*\"grassroot\"'),\n (22,\n '0.000*\"dictatorship\" + 0.000*\"phuket\" + 0.000*\"tradition\" + 0.000*\"ddd\" + '\n '0.000*\"grassroot\"'),\n (3,\n '0.183*\"however\" + 0.159*\"large\" + 0.157*\"part\" + 0.151*\"may\" + '\n '0.136*\"shiite\"'),\n (33,\n '0.387*\"prevent\" + 0.218*\"pilgrim\" + 0.021*\"travel\" + 0.010*\"iranian\" + '\n '0.000*\"phuket\"'),\n (14,\n '0.256*\"provide\" + 0.175*\"oil\" + 0.077*\"colombian\" + 0.076*\"facility\" + '\n '0.000*\"ddd\"'),\n (30,\n '0.313*\"victim\" + 0.200*\"however\" + 0.141*\"accuse\" + 0.108*\"work\" + '\n '0.093*\"note\"'),\n (25,\n '0.533*\"however\" + 0.420*\"election\" + 0.000*\"recognize\" + 0.000*\"ddd\" + '\n '0.000*\"grassroot\"'),\n (9,\n '0.315*\"believe\" + 0.225*\"attempt\" + 0.172*\"however\" + 0.102*\"leave\" + '\n '0.070*\"carry\"'),\n (11,\n '0.000*\"dictatorship\" + 0.000*\"phuket\" + 0.000*\"tradition\" + 0.000*\"ddd\" + '\n '0.000*\"grassroot\"'),\n (8,\n '0.624*\"military\" + 0.137*\"school\" + 0.101*\"however\" + 0.033*\"bus\" + '\n '0.000*\"phuket\"'),\n (29,\n '0.568*\"police\" + 0.204*\"day\" + 0.140*\"however\" + 0.022*\"belong\" + '\n '0.000*\"phuket\"'),\n (40,\n '0.175*\"maoist\" + 0.175*\"islamic\" + 0.174*\"however\" + 0.162*\"state\" + '\n '0.064*\"region\"'),\n (36,\n '0.000*\"dictatorship\" + 0.000*\"phuket\" + 0.000*\"tradition\" + 0.000*\"ddd\" + '\n '0.000*\"grassroot\"'),\n (19,\n '0.000*\"dictatorship\" + 0.000*\"phuket\" + 0.000*\"tradition\" + 0.000*\"ddd\" + '\n '0.000*\"grassroot\"'),\n (20,\n '0.000*\"dictatorship\" + 0.000*\"phuket\" + 0.000*\"tradition\" + 0.000*\"ddd\" + '\n '0.000*\"grassroot\"'),\n (32,\n '0.245*\"however\" + 0.242*\"assailant\" + 0.116*\"oppose\" + 0.064*\"result\" + '\n '0.063*\"lead\"'),\n (34,\n '0.629*\"protest\" + 0.110*\"sabotage\" + 0.092*\"participate\" + '\n '0.000*\"dictatorship\" + 0.000*\"tradition\"')]\n"
]
],
[
[
"The dominant topics in each sentence are identified using the defined function below.",
"_____no_output_____"
]
],
[
[
"def format_topics_sentences(ldamodel=None, corpus=corpus, texts=data):\n # Init output\n sent_topics_df = pd.DataFrame()\n\n # Get main topic in each document\n for i, row_list in enumerate(ldamodel[corpus]):\n row = row_list[0] if ldamodel.per_word_topics else row_list \n # print(row)\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n # Get the Dominant topic, Perc Contribution and Keywords for each document\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0: # => dominant topic\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \", \".join([word for word, prop in wp])\n sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']\n\n # Add original text to the end of the output\n contents = pd.Series(texts)\n sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)\n return(sent_topics_df)\n\n\ndf_topic_sents_keywords = format_topics_sentences(ldamodel=optimal_model, corpus=corpus, texts=data_ready)\n\n# Format\ndf_dominant_topic = df_topic_sents_keywords.reset_index()\ndf_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']\ndf_dominant_topic.head(10)",
"_____no_output_____"
]
],
[
[
"Find the representative document for each topic and display them.",
"_____no_output_____"
]
],
[
[
"# Display setting to show more characters in column\npd.options.display.max_colwidth = 80\n\nsent_topics_sorteddf_mallet = pd.DataFrame()\nsent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')\n\nfor i, grp in sent_topics_outdf_grpd:\n sent_topics_sorteddf_mallet = pd.concat([sent_topics_sorteddf_mallet, \n grp.sort_values(['Perc_Contribution'], ascending=False).head(1)], \n axis=0)\n\n# Reset Index \nsent_topics_sorteddf_mallet.reset_index(drop=True, inplace=True)\n\n# Format\nsent_topics_sorteddf_mallet.columns = ['Topic_Num', \"Topic_Perc_Contrib\", \"Keywords\", \"Representative Text\"]\n\n# Show\nsent_topics_sorteddf_mallet.head(10)",
"_____no_output_____"
],
[
"# Specify mds as 'tsne', otherwise TypeError: Object of type 'complex' is not JSON serializable\n# complex number had come from coordinate calculation and specifying the \"mds\"\n# Ref1: https://stackoverflow.com/questions/46379763/typeerror-object-of-type-complex-is-not-json-serializable-while-using-pyldavi\n# Ref2: https://pyldavis.readthedocs.io/en/latest/modules/API.html#pyLDAvis.prepare\nimport pyLDAvis.gensim\npyLDAvis.enable_notebook()\nvis = pyLDAvis.gensim.prepare(topic_model=optimal_model, corpus=corpus, dictionary=optimal_model.id2word,mds='tsne')\nvis",
"_____no_output_____"
]
],
[
[
"### Binary Classification (LDA topic features)",
"_____no_output_____"
]
],
[
[
"# Set the dictionary and corpus based on trainsub data\ntrainid2word = id2word\ntraincorpus = corpus",
"_____no_output_____"
],
[
"# Train model \n# Build LDA model on trainsub data, using optimum topics\nlda_train = gensim.models.ldamodel.LdaModel(corpus=traincorpus,\n id2word=trainid2word,\n num_topics=41, \n random_state=42,\n update_every=1,\n chunksize=100,\n passes=10,\n alpha='symmetric',\n iterations=100,\n per_word_topics=True)",
"_____no_output_____"
]
],
[
[
"With the LDA model trained on train data, run the motive text through it using 'get document topics'. A list comprehension on that output (2nd line in loop) will give the probability distribution of the topics for a specific review (feature vector). ",
"_____no_output_____"
]
],
[
[
"# Make train Vectors\ntrain_vecs = []\nfor i in range(len(X_train)):\n top_topics = lda_train.get_document_topics(traincorpus[i], minimum_probability=0.0)\n topic_vec = [top_topics[i][1] for i in range(41)]\n train_vecs.append(topic_vec)",
"_____no_output_____"
],
[
"# Sanity check; should correspond with the number of optimal topics\nprint(f\"Number of vectors per train text: {len(train_vecs[2])}\")\n\nprint(f\"Length of train vectors: {len(train_vecs)}\")\nprint(f\"Length of X_train: {len(X_train)}\")",
"Number of vectors per train text: 41\nLength of train vectors: 26016\nLength of X_train: 26016\n"
],
[
"# Pass the vectors into numpy array form\nX_tr_vec = np.array(train_vecs)\ny_tr = np.array(y_train)",
"_____no_output_____"
],
[
"# Split the train_vecs for training\nX_trainsub,X_validate,y_trainsub,y_validate = train_test_split(X_tr_vec,y_tr,test_size=0.20,stratify=y_tr,random_state=42)",
"_____no_output_____"
],
[
"# Instantiate model\nlr = LogisticRegression(random_state=42,solver='lbfgs',max_iter=500)\n\n# Fit model \nmodel_lr = lr.fit(X_trainsub,y_trainsub)",
"_____no_output_____"
],
[
"# Generate predictions from validate set\n# Cross-validate 10 folds\npredictions = cross_val_predict(model_lr, X_validate, y_validate, cv = 10)\nprint(f\"Accuracy on validate set: {round(cross_val_score(model_lr, X_validate, y_validate, cv = 10).mean(),4)}\")",
"Accuracy on validate set: 0.582\n"
],
[
"# Confusion matrix for test set using NB model\n# Pass in true values, predicted values to confusion matrix\n# Convert Confusion matrix into dataframe\n# Positive class (class 1) is bomb\ncm = confusion_matrix(y_validate, predictions)\ncm_df = pd.DataFrame(cm,columns=['pred non-bomb','pred bomb'], index=['Actual non-bomb','Actual bomb'])\ncm_df",
"_____no_output_____"
],
[
"# return nparray as a 1-D array.\nconfusion_matrix(y_validate, predictions).ravel()\n\n# Save TN/FP/FN/TP values.\ntn, fp, fn, tp = confusion_matrix(y_validate, predictions).ravel()\n\n# Summary of metrics for LR model\nspec = tn/(tn+fp)\nsens = tp/(tp+fn)\nprint(f\"Specificity: {round(spec,4)}\")\nprint(f\"Sensitivity: {round(sens,4)}\")\n\n# To compute the ROC AUC curve, first\n# Create a dataframe called pred_df that contains:\n# 1. The list of true values of our test set.\n# 2. The list of predicted probabilities based on our model.\n\npred_proba = [i[1] for i in lr.predict_proba(X_validate)]\n\npred_df = pd.DataFrame({'test_values': y_validate,\n 'pred_probs':pred_proba})\n\n# Calculate ROC AUC.\nprint(f\"roc_auc: {round(roc_auc_score(pred_df['test_values'],pred_df['pred_probs']),4)}\")",
"Specificity: 0.3814\nSensitivity: 0.8024\nroc_auc: 0.6121\n"
],
[
"def sent_to_words(sentences):\n for sent in sentences:\n sent = re.sub('\\s+', ' ', sent) # remove newline chars\n sent = re.sub(\"\\'\", \"\", sent) # remove single quotes\n sent = gensim.utils.simple_preprocess(str(sent), deacc=True) \n yield(sent) \n\n# Convert to list\ndata = X_test.motive.values.tolist()\ndata_words = list(sent_to_words(data))\nprint(data_words[:1])",
"<>:3: DeprecationWarning: invalid escape sequence \\s\n<>:3: DeprecationWarning: invalid escape sequence \\s\n<>:3: DeprecationWarning: invalid escape sequence \\s\n<ipython-input-45-f82c2e9a857b>:3: DeprecationWarning: invalid escape sequence \\s\n sent = re.sub('\\s+', ' ', sent) # remove newline chars\n"
],
[
"# Build the bigram and trigram models\nbigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.\ntrigram = gensim.models.Phrases(bigram[data_words], threshold=100) \nbigram_mod = gensim.models.phrases.Phraser(bigram)\ntrigram_mod = gensim.models.phrases.Phraser(trigram)\n\ndef process_words(texts, stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\n \"\"\"Remove Stopwords, Form Bigrams, Trigrams and Lemmatization\"\"\"\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\n texts = [bigram_mod[doc] for doc in texts]\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\n texts_out = []\n # use 'en_core_web_sm' in place of 'en' \n nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])\n for sent in texts:\n doc = nlp(\" \".join(sent)) \n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\n # remove stopwords once more after lemmatization\n texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out] \n return texts_out\n\ndata_ready = process_words(data_words) # processed Text Data",
"_____no_output_____"
],
[
"# Using train dict on new unseen test words\ntestcorpus = [trainid2word.doc2bow(text) for text in data_ready]",
"_____no_output_____"
],
[
"# Use the LDA model from trained data on the unseen test corpus\n# Code block similar to that for training code, except \n# use the LDA model from the training data, and run them through the unseen test reviews\ntest_vecs = []\nfor i in range(len(X_test)):\n top_topics = lda_train.get_document_topics(testcorpus[i], minimum_probability=0.0)\n topic_vec = [top_topics[i][1] for i in range(41)]\n test_vecs.append(topic_vec)",
"_____no_output_____"
],
[
"print(f\"Length of test vectors: {len(test_vecs)}\")\nprint(f\"Length of X_test: {len(X_test)}\")",
"Length of test vectors: 6505\nLength of X_test: 6505\n"
],
[
"# Pass the vectors into numpy array form\nX_ts_vec = np.array(test_vecs)\ny_ts = np.array(y_test)",
"_____no_output_____"
],
[
"# Instantiate model\nlr = LogisticRegression(random_state=42,solver='lbfgs',max_iter=500)\n\n# Fit model \nmodel_lr = lr.fit(X_ts_vec,y_ts)\n\n# Generate predictions from test set\npredictions = lr.predict(X_ts_vec)\nprint(f\"Accuracy on test set: {round(model_lr.score(X_ts_vec, y_ts),4)}\")",
"Accuracy on test set: 0.6034\n"
],
[
"# Confusion matrix for test set using NB model\n# Pass in true values, predicted values to confusion matrix\n# Convert Confusion matrix into dataframe\n# Positive class (class 1) is bomb\ncm = confusion_matrix(y_ts, predictions)\ncm_df = pd.DataFrame(cm,columns=['pred non-bomb','pred bomb'], index=['Actual non-bomb','Actual bomb'])\ncm_df",
"_____no_output_____"
],
[
"# return nparray as a 1-D array.\nconfusion_matrix(y_ts, predictions).ravel()\n\n# Save TN/FP/FN/TP values.\ntn, fp, fn, tp = confusion_matrix(y_ts, predictions).ravel()\n\n# Summary of metrics for LR model\nspec = tn/(tn+fp)\nsens = tp/(tp+fn)\nprint(f\"Specificity: {round(spec,4)}\")\nprint(f\"Sensitivity: {round(sens,4)}\")\n\n# To compute the ROC AUC curve, first\n# Create a dataframe called pred_df that contains:\n# 1. The list of true values of our test set.\n# 2. The list of predicted probabilities based on our model.\n\npred_proba = [i[1] for i in lr.predict_proba(X_ts_vec)]\n\npred_df = pd.DataFrame({'test_values': y_ts,\n 'pred_probs':pred_proba})\n\n# Calculate ROC AUC.\nprint(f\"roc_auc: {round(roc_auc_score(pred_df['test_values'],pred_df['pred_probs']),4)}\")",
"Specificity: 0.4147\nSensitivity: 0.8106\nroc_auc: 0.6374\n"
],
[
"# Summary of the topic modeling + LR model scores in Dataframe\nsummary_df = pd.DataFrame({'accuracy' : [0.5820, 0.6034],\n 'specificity' : [0.3814, 0.4147],\n 'sensitivity' : [0.8024, 0.8106],\n 'roc_auc' : [0.6121, 0.6374]})\n# Transpose dataframe\nsummary_dft = summary_df.T\n# Rename columns\nsummary_dft.columns = ['Validate set','Test set']\nprint(\"Topic modeling + LR classifier scores: \")\ndisplay(summary_dft)",
"Topic modeling + LR classifier scores: \n"
]
],
[
[
"From the sensitivity and roc_auc score, the model is not overfitted as test sensitivity and roc_auc is higher than on validate set. Before proceeding, a recap on the steps done to consolidate understanding.\n\n- Topic modeling using the train dataset,\n- Find optimum topics based on coherence score\n- Train LDA model on train data. The probability distributions of the topics are then used as feature vectors in the Logistic Regression model for binary classification (bomb vs. non-bomb) on the validate data set. \n- Thereafter, the trained LDA model is used to derive probability distributions of the topics from the test data. \n- Run Logistic Regression model on these topic probability distributions, to see if model generalizes\n\nIn the next section, the topic probability distributions are added to the count vectorized word features for both train and test dataset. The dataset is then run through the Logistic Regression model to determine overall model generalizability",
"_____no_output_____"
],
[
"### Binary Classification (LDA topic and Countvectorizer features)",
"_____no_output_____"
]
],
[
[
"# Instantiate porterstemmer\np_stemmer = PorterStemmer()",
"_____no_output_____"
],
[
"# Define function to convert a raw selftext to a string of words\n\ndef selftext_to_words(motive_text):\n \n # 1. Remove non-letters.\n letters_only = re.sub(\"[^a-zA-Z]\", \" \", motive_text)\n \n # 2. Split into individual words\n words = letters_only.split()\n \n # 3. In Python, searching a set is much faster than searching\n # a list, so convert the stopwords to a set.\n stops = set(stop_words)\n\n # 5. Remove stopwords.\n meaningful_words = [w for w in words if w not in stops]\n \n # 5.5 Stemming of words\n meaningful_words = [p_stemmer.stem(w) for w in words]\n \n # 6. Join the words back into one string separated by space, \n # and return the result\n return(\" \".join(meaningful_words))",
"_____no_output_____"
],
[
"#Initialize an empty list to hold the clean test text.\nX_train_clean = []\nX_test_clean = []\n\nfor text in X_train['motive']:\n \"\"\"Convert text to words, then append to X_train_clean.\"\"\"\n X_train_clean.append(selftext_to_words(text))\nfor text in X_test['motive']:\n \"\"\"Convert text to words, then append to X_train_clean.\"\"\"\n X_test_clean.append(selftext_to_words(text))",
"_____no_output_____"
],
[
"# Instantiate our CountVectorizer\ncv = CountVectorizer(ngram_range=(1,2),max_df=0.9,min_df=3,max_features=10000)\n\n# Fit and transform on whole training data\nX_train_cleancv = cv.fit_transform(X_train_clean)\n\n# Transform test data\nX_test_cleancv = cv.transform(X_test_clean)",
"_____no_output_____"
],
[
"# Add word vectors (topic modeling) to the sparse matrices\n# Ref: https://stackoverflow.com/questions/55637498/numpy-ndarray-sparse-matrix-to-dense\n# Ref: https://kite.com/python/docs/scipy.sparse\n\n# Convert sparse matrix to dense\nX_tr_dense = X_train_cleancv.toarray()\nX_ts_dense = X_test_cleancv.toarray()\n\n# add numpy array (train and test topic model vectors to dense matrix)\nX_tr_dense_tm = np.concatenate((X_tr_dense,X_tr_vec),axis=1)\nX_ts_dense_tm = np.concatenate((X_ts_dense,X_ts_vec),axis=1)",
"_____no_output_____"
],
[
"from scipy.sparse import csr_matrix\n# Convert back to sparse matrix for modeling\nX_tr_sparse = csr_matrix(X_tr_dense_tm)\nX_ts_sparse = csr_matrix(X_ts_dense_tm)",
"_____no_output_____"
],
[
"# Sanity Check\ndisplay(X_tr_sparse)\ndisplay(X_train_cleancv)",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
]
],
[
[
"# Instantiate model\nlr_comb = LogisticRegression(random_state=42,solver='lbfgs',max_iter=500)\n\n# Fit model on whole training data (without addn set of stopwords removed in NB model)\nmodel_lr = lr_comb.fit(X_tr_sparse,y_train)\n\n# Generate predictions from test set\npredictions = lr_comb.predict(X_ts_sparse)\nprint(f\"Accuracy on whole test set: {round(model_lr.score(X_ts_sparse, y_test),4)}\")",
"Accuracy on whole test set: 0.6893\n"
],
[
"# Confusion matrix for test set using NB model\n# Pass in true values, predicted values to confusion matrix\n# Convert Confusion matrix into dataframe\n# Positive class (class 1) is bomb\ncm = confusion_matrix(y_test, predictions)\ncm_df = pd.DataFrame(cm,columns=['pred non-bomb','pred bomb'], index=['Actual non-bomb','Actual bomb'])\ncm_df",
"_____no_output_____"
],
[
"# return nparray as a 1-D array.\nconfusion_matrix(y_test, predictions).ravel()\n\n# Save TN/FP/FN/TP values.\ntn, fp, fn, tp = confusion_matrix(y_test, predictions).ravel()\n\n# Summary of metrics for LR model\nspec = tn/(tn+fp)\nsens = tp/(tp+fn)\nprint(f\"Specificity: {round(spec,4)}\")\nprint(f\"Sensitivity: {round(sens,4)}\")\n\n# To compute the ROC AUC curve, first\n# Create a dataframe called pred_df that contains:\n# 1. The list of true values of our test set.\n# 2. The list of predicted probabilities based on our model.\n\npred_proba = [i[1] for i in lr_comb.predict_proba(X_ts_sparse)]\n\npred_df = pd.DataFrame({'test_values': y_test,\n 'pred_probs':pred_proba})\n\n# Calculate ROC AUC.\nprint(f\"roc_auc: {round(roc_auc_score(pred_df['test_values'],pred_df['pred_probs']),4)}\")",
"Specificity: 0.5351\nSensitivity: 0.8587\nroc_auc: 0.7621\n"
],
[
"# Summary of the topic modeling + LR model scores in Dataframe\nsummary_df = pd.DataFrame({'accuracy' : [0.6859, 0.6034, 0.6893],\n 'specificity' : [0.5257, 0.4147, 0.5351],\n 'sensitivity' : [0.8619, 0.8106, 0.8587],\n 'roc_auc' : [0.7568, 0.6374, 0.7621]})\n# Transpose dataframe\nsummary_dft = summary_df.T\n# Rename columns\nsummary_dft.columns = ['LR model (50 false neg wrd rmvd)','LR model (tm)', 'LR model (tm + wrd vec)']\ndisplay(summary_dft)",
"_____no_output_____"
]
],
[
[
"### Recommendations (Part2)",
"_____no_output_____"
],
[
"From the model metric summaries, the model using topic distributions alone as feature vectors has the lowest performance scores (sensitivity and roc_auc). The addition of feature vectors from count vectorizer improved model sensitivity and roc_auc. Model generalizability using LDA topic distributions has been demonstrated, though the best performing model remains the production Logistic Regression model using count vectorized word features. Nevertheless, the results are encouraging, and could be further experimented upon (some prelim thoughts are listed under future work).\n\nThe approach applied in this project could work in general, for similar NLP-based classifiers.",
"_____no_output_____"
],
[
"### Future Work",
"_____no_output_____"
],
[
"Terrorism is a complex topic as it covers politics, psychology, philosophy, military strategy, etc. The current model is a very simplistic model in that it classifies a terrorist attack mode as 'bomb' or 'non-bomb' based solely on one form of intel (motive text). Additional sources or forms of intel are not included, nor political and social factors trends that could serve as supporting sources of intelligence.\n\nHere are a few areas that I would like to revisit for future project extensions:\n- source for additional data to widen perspective\n- feature engineer spatial and temporal aspects (e.g. attacks by region, attacks by decades)\n- explore model performance using Tfidf vectorizer and spaCy\n- explore other classification models (currently only 2 models explored; time allocated between studying the dataset variables, motive texts, longer than usual modeling times with the inherent size of the dataset, and research on topic modeling (LDA) and spaCy)",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
]
|
cb73207b3538757797b22b4c7b59faa8a332c4de | 226,605 | ipynb | Jupyter Notebook | machineTranslation/machineTranslation/machine_translation.ipynb | Ohara124c41/NLP-Machine_Translation | e861806be50c10f4be661609ce88415c45314d67 | [
"MIT"
]
| null | null | null | machineTranslation/machineTranslation/machine_translation.ipynb | Ohara124c41/NLP-Machine_Translation | e861806be50c10f4be661609ce88415c45314d67 | [
"MIT"
]
| null | null | null | machineTranslation/machineTranslation/machine_translation.ipynb | Ohara124c41/NLP-Machine_Translation | e861806be50c10f4be661609ce88415c45314d67 | [
"MIT"
]
| null | null | null | 170.25169 | 21,700 | 0.868825 | [
[
[
"# Artificial Intelligence Nanodegree\n## Machine Translation Project\nIn this notebook, sections that end with **'(IMPLEMENTATION)'** in the header indicate that the following blocks of code will require additional functionality which you must provide. Please be sure to read the instructions carefully!\n\n## Introduction\nIn this notebook, you will build a deep neural network that functions as part of an end-to-end machine translation pipeline. Your completed pipeline will accept English text as input and return the French translation.\n\n- **Preprocess** - You'll convert text to sequence of integers.\n- **Models** Create models which accepts a sequence of integers as input and returns a probability distribution over possible translations. After learning about the basic types of neural networks that are often used for machine translation, you will engage in your own investigations, to design your own model!\n- **Prediction** Run the model on English text.",
"_____no_output_____"
]
],
[
[
"import collections\n\nimport helper\nimport numpy as np\nimport project_tests as tests\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Model, Sequential\nfrom keras.layers import GRU, BatchNormalization, Input, Dense, TimeDistributed, Activation, RepeatVector, Bidirectional, Dropout, LSTM\nfrom keras.layers.embeddings import Embedding\nfrom keras.optimizers import Adam\nfrom keras.losses import sparse_categorical_crossentropy",
"_____no_output_____"
]
],
[
[
"### Verify access to the GPU\nThe following test applies only if you expect to be using a GPU, e.g., while running in a Udacity Workspace or using an AWS instance with GPU support. Run the next cell, and verify that the device_type is \"GPU\".\n- If the device is not GPU & you are running from a Udacity Workspace, then save your workspace with the icon at the top, then click \"enable\" at the bottom of the workspace.\n- If the device is not GPU & you are running from an AWS instance, then refer to the cloud computing instructions in the classroom to verify your setup steps.",
"_____no_output_____"
]
],
[
[
"from tensorflow.python.client import device_lib\nprint(device_lib.list_local_devices())",
"[name: \"/cpu:0\"\ndevice_type: \"CPU\"\nmemory_limit: 268435456\nlocality {\n}\nincarnation: 15403678832681567306\n, name: \"/gpu:0\"\ndevice_type: \"GPU\"\nmemory_limit: 357433344\nlocality {\n bus_id: 1\n}\nincarnation: 18342438046195579891\nphysical_device_desc: \"device: 0, name: Tesla K80, pci bus id: 0000:00:04.0\"\n]\n"
]
],
[
[
"## Dataset\nWe begin by investigating the dataset that will be used to train and evaluate your pipeline. The most common datasets used for machine translation are from [WMT](http://www.statmt.org/). However, that will take a long time to train a neural network on. We'll be using a dataset we created for this project that contains a small vocabulary. You'll be able to train your model in a reasonable time with this dataset.\n### Load Data\nThe data is located in `data/small_vocab_en` and `data/small_vocab_fr`. The `small_vocab_en` file contains English sentences with their French translations in the `small_vocab_fr` file. Load the English and French data from these files from running the cell below.",
"_____no_output_____"
]
],
[
[
"# Load English data\nenglish_sentences = helper.load_data('data/small_vocab_en')\n# Load French data\nfrench_sentences = helper.load_data('data/small_vocab_fr')\n\nprint('Dataset Loaded')",
"Dataset Loaded\n"
]
],
[
[
"### Files\nEach line in `small_vocab_en` contains an English sentence with the respective translation in each line of `small_vocab_fr`. View the first two lines from each file.",
"_____no_output_____"
]
],
[
[
"for sample_i in range(2):\n print('small_vocab_en Line {}: {}'.format(sample_i + 1, english_sentences[sample_i]))\n print('small_vocab_fr Line {}: {}'.format(sample_i + 1, french_sentences[sample_i]))",
"small_vocab_en Line 1: new jersey is sometimes quiet during autumn , and it is snowy in april .\nsmall_vocab_fr Line 1: new jersey est parfois calme pendant l' automne , et il est neigeux en avril .\nsmall_vocab_en Line 2: the united states is usually chilly during july , and it is usually freezing in november .\nsmall_vocab_fr Line 2: les états-unis est généralement froid en juillet , et il gèle habituellement en novembre .\n"
]
],
[
[
"From looking at the sentences, you can see they have been preprocessed already. The puncuations have been delimited using spaces. All the text have been converted to lowercase. This should save you some time, but the text requires more preprocessing.\n### Vocabulary\nThe complexity of the problem is determined by the complexity of the vocabulary. A more complex vocabulary is a more complex problem. Let's look at the complexity of the dataset we'll be working with.",
"_____no_output_____"
]
],
[
[
"english_words_counter = collections.Counter([word for sentence in english_sentences for word in sentence.split()])\nfrench_words_counter = collections.Counter([word for sentence in french_sentences for word in sentence.split()])\n\nprint('{} English words.'.format(len([word for sentence in english_sentences for word in sentence.split()])))\nprint('{} unique English words.'.format(len(english_words_counter)))\nprint('10 Most common words in the English dataset:')\nprint('\"' + '\" \"'.join(list(zip(*english_words_counter.most_common(10)))[0]) + '\"')\nprint()\nprint('{} French words.'.format(len([word for sentence in french_sentences for word in sentence.split()])))\nprint('{} unique French words.'.format(len(french_words_counter)))\nprint('10 Most common words in the French dataset:')\nprint('\"' + '\" \"'.join(list(zip(*french_words_counter.most_common(10)))[0]) + '\"')",
"1823250 English words.\n227 unique English words.\n10 Most common words in the English dataset:\n\"is\" \",\" \".\" \"in\" \"it\" \"during\" \"the\" \"but\" \"and\" \"sometimes\"\n\n1961295 French words.\n355 unique French words.\n10 Most common words in the French dataset:\n\"est\" \".\" \",\" \"en\" \"il\" \"les\" \"mais\" \"et\" \"la\" \"parfois\"\n"
]
],
[
[
"For comparison, _Alice's Adventures in Wonderland_ contains 2,766 unique words of a total of 15,500 words.\n## Preprocess\nFor this project, you won't use text data as input to your model. Instead, you'll convert the text into sequences of integers using the following preprocess methods:\n1. Tokenize the words into ids\n2. Add padding to make all the sequences the same length.\n\nTime to start preprocessing the data...\n### Tokenize (IMPLEMENTATION)\nFor a neural network to predict on text data, it first has to be turned into data it can understand. Text data like \"dog\" is a sequence of ASCII character encodings. Since a neural network is a series of multiplication and addition operations, the input data needs to be number(s).\n\nWe can turn each character into a number or each word into a number. These are called character and word ids, respectively. Character ids are used for character level models that generate text predictions for each character. A word level model uses word ids that generate text predictions for each word. Word level models tend to learn better, since they are lower in complexity, so we'll use those.\n\nTurn each sentence into a sequence of words ids using Keras's [`Tokenizer`](https://keras.io/preprocessing/text/#tokenizer) function. Use this function to tokenize `english_sentences` and `french_sentences` in the cell below.\n\nRunning the cell will run `tokenize` on sample data and show output for debugging.",
"_____no_output_____"
]
],
[
[
"def tokenize(x):\n \"\"\"\n Tokenize x\n :param x: List of sentences/strings to be tokenized\n :return: Tuple of (tokenized x data, tokenizer used to tokenize x)\n \"\"\"\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(x)\n return tokenizer.texts_to_sequences(x), tokenizer\n \n# Tokenize Example output\ntext_sentences = [\n 'The quick brown fox jumps over the lazy dog .',\n 'By Jove , my quick study of lexicography won a prize .',\n 'This is a short sentence .']\ntext_tokenized, text_tokenizer = tokenize(text_sentences)\nprint(text_tokenizer.word_index)\nprint()\nfor sample_i, (sent, token_sent) in enumerate(zip(text_sentences, text_tokenized)):\n print('Sequence {} in x'.format(sample_i + 1))\n print(' Input: {}'.format(sent))\n print(' Output: {}'.format(token_sent))",
"{'the': 1, 'quick': 2, 'a': 3, 'brown': 4, 'fox': 5, 'jumps': 6, 'over': 7, 'lazy': 8, 'dog': 9, 'by': 10, 'jove': 11, 'my': 12, 'study': 13, 'of': 14, 'lexicography': 15, 'won': 16, 'prize': 17, 'this': 18, 'is': 19, 'short': 20, 'sentence': 21}\n\nSequence 1 in x\n Input: The quick brown fox jumps over the lazy dog .\n Output: [1, 2, 4, 5, 6, 7, 1, 8, 9]\nSequence 2 in x\n Input: By Jove , my quick study of lexicography won a prize .\n Output: [10, 11, 12, 2, 13, 14, 15, 16, 3, 17]\nSequence 3 in x\n Input: This is a short sentence .\n Output: [18, 19, 3, 20, 21]\n"
]
],
[
[
"### Padding (IMPLEMENTATION)\nWhen batching the sequence of word ids together, each sequence needs to be the same length. Since sentences are dynamic in length, we can add padding to the end of the sequences to make them the same length.\n\nMake sure all the English sequences have the same length and all the French sequences have the same length by adding padding to the **end** of each sequence using Keras's [`pad_sequences`](https://keras.io/preprocessing/sequence/#pad_sequences) function.",
"_____no_output_____"
]
],
[
[
"def pad(x, length=None):\n \"\"\"\n Pad x\n :param x: List of sequences.\n :param length: Length to pad the sequence to. If None, use length of longest sequence in x.\n :return: Padded numpy array of sequences\n \"\"\"\n return pad_sequences(x, maxlen=length, padding='post')\n tests.test_pad(pad)\n\n# Pad Tokenized output\ntest_pad = pad(text_tokenized)\nfor sample_i, (token_sent, pad_sent) in enumerate(zip(text_tokenized, test_pad)):\n print('Sequence {} in x'.format(sample_i + 1))\n print(' Input: {}'.format(np.array(token_sent)))\n print(' Output: {}'.format(pad_sent))",
"Sequence 1 in x\n Input: [1 2 4 5 6 7 1 8 9]\n Output: [1 2 4 5 6 7 1 8 9 0]\nSequence 2 in x\n Input: [10 11 12 2 13 14 15 16 3 17]\n Output: [10 11 12 2 13 14 15 16 3 17]\nSequence 3 in x\n Input: [18 19 3 20 21]\n Output: [18 19 3 20 21 0 0 0 0 0]\n"
]
],
[
[
"### Preprocess Pipeline\nYour focus for this project is to build neural network architecture, so we won't ask you to create a preprocess pipeline. Instead, we've provided you with the implementation of the `preprocess` function.",
"_____no_output_____"
]
],
[
[
"def preprocess(x, y):\n \"\"\"\n Preprocess x and y\n :param x: Feature List of sentences\n :param y: Label List of sentences\n :return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer)\n \"\"\"\n preprocess_x, x_tk = tokenize(x)\n preprocess_y, y_tk = tokenize(y)\n\n preprocess_x = pad(preprocess_x)\n preprocess_y = pad(preprocess_y)\n\n # Keras's sparse_categorical_crossentropy function requires the labels to be in 3 dimensions\n preprocess_y = preprocess_y.reshape(*preprocess_y.shape, 1)\n\n return preprocess_x, preprocess_y, x_tk, y_tk\n\npreproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer =\\\n preprocess(english_sentences, french_sentences)\n \nmax_english_sequence_length = preproc_english_sentences.shape[1]\nmax_french_sequence_length = preproc_french_sentences.shape[1]\nenglish_vocab_size = len(english_tokenizer.word_index)\nfrench_vocab_size = len(french_tokenizer.word_index)\n\nprint('Data Preprocessed')\nprint(\"Max English sentence length:\", max_english_sequence_length)\nprint(\"Max French sentence length:\", max_french_sequence_length)\nprint(\"English vocabulary size:\", english_vocab_size)\nprint(\"French vocabulary size:\", french_vocab_size)",
"Data Preprocessed\nMax English sentence length: 15\nMax French sentence length: 21\nEnglish vocabulary size: 199\nFrench vocabulary size: 344\n"
]
],
[
[
"## Models\nIn this section, you will experiment with various neural network architectures.\nYou will begin by training four relatively simple architectures.\n- Model 1 is a simple RNN\n- Model 2 is a RNN with Embedding\n- Model 3 is a Bidirectional RNN\n- Model 4 is an optional Encoder-Decoder RNN\n\nAfter experimenting with the four simple architectures, you will construct a deeper architecture that is designed to outperform all four models.\n### Ids Back to Text\nThe neural network will be translating the input to words ids, which isn't the final form we want. We want the French translation. The function `logits_to_text` will bridge the gab between the logits from the neural network to the French translation. You'll be using this function to better understand the output of the neural network.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\ndef chart_model(model_histogram):\n\n # Plot ACC vs Epoch\n plt.plot(model_histogram.history['acc'])\n plt.plot(model_histogram.history['val_acc'])\n plt.title('Model Accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Training', 'Validation'], loc='upper left')\n plt.show()\n \n # Plot LOSS vs Epoch\n plt.plot(model_histogram.history['loss'])\n plt.title('Model Loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Training'], loc='upper left')\n plt.show()",
"_____no_output_____"
],
[
"def logits_to_text(logits, tokenizer):\n \"\"\"\n Turn logits from a neural network into text using the tokenizer\n :param logits: Logits from a neural network\n :param tokenizer: Keras Tokenizer fit on the labels\n :return: String that represents the text of the logits\n \"\"\"\n index_to_words = {id: word for word, id in tokenizer.word_index.items()}\n index_to_words[0] = '<PAD>'\n\n return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])\n\nprint('`logits_to_text` function loaded.')",
"`logits_to_text` function loaded.\n"
]
],
[
[
"### Model 1: RNN (IMPLEMENTATION)\n\nA basic RNN model is a good baseline for sequence data. In this model, you'll build a RNN that translates English to French.",
"_____no_output_____"
]
],
[
[
"def simple_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):\n \"\"\"\n Build and train a basic RNN on x and y\n :param input_shape: Tuple of input shape\n :param output_sequence_length: Length of output sequence\n :param english_vocab_size: Number of unique English words in the dataset\n :param french_vocab_size: Number of unique French words in the dataset\n :return: Keras model built, but not trained\n \"\"\"\n # Hyperparameters\n learning_rate = 0.005 ## 0.01 #acceptable\n rnn_dim = 256 ## 128 #high-loss value\n batch_size = 1024 ## 512 #high-loss value\n \n # Build the layers\n model = Sequential()\n model.add(GRU(rnn_dim, input_shape=input_shape[1:], return_sequences=True))\n model.add(TimeDistributed(Dense(batch_size, activation='relu')))\n model.add(Dropout(0.5))\n model.add(TimeDistributed(Dense(french_vocab_size, activation='softmax'))) \n\n # Compile model\n model.compile(loss=sparse_categorical_crossentropy,\n optimizer=Adam(learning_rate),\n metrics=['accuracy'])\n return model\n\n tests.test_simple_model(simple_model)\n\n# Reshaping the input to work with a basic RNN\ntmp_x = pad(preproc_english_sentences, max_french_sequence_length)\ntmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1))\n\n# Train the neural network\nsimple_rnn_model = simple_model(\n tmp_x.shape,\n max_french_sequence_length,\n english_vocab_size,\n french_vocab_size)\n\nsimple_rnn_model.summary()\n\nsimple_model_chart = simple_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2)\n\n# Print prediction(s)\nprint(logits_to_text(simple_rnn_model.predict(tmp_x[:1])[0], french_tokenizer))",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ngru_2 (GRU) (None, 21, 256) 198144 \n_________________________________________________________________\ntime_distributed_3 (TimeDist (None, 21, 1024) 263168 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 21, 1024) 0 \n_________________________________________________________________\ntime_distributed_4 (TimeDist (None, 21, 344) 352600 \n=================================================================\nTotal params: 813,912\nTrainable params: 813,912\nNon-trainable params: 0\n_________________________________________________________________\nTrain on 110288 samples, validate on 27573 samples\nEpoch 1/10\n110288/110288 [==============================] - 17s 154us/step - loss: 1.9094 - acc: 0.5486 - val_loss: nan - val_acc: 0.6380\nEpoch 2/10\n110288/110288 [==============================] - 17s 150us/step - loss: 1.1987 - acc: 0.6496 - val_loss: nan - val_acc: 0.6749\nEpoch 3/10\n110288/110288 [==============================] - 16s 149us/step - loss: 1.0583 - acc: 0.6759 - val_loss: nan - val_acc: 0.6914\nEpoch 4/10\n110288/110288 [==============================] - 16s 149us/step - loss: 0.9774 - acc: 0.6895 - val_loss: nan - val_acc: 0.7066\nEpoch 5/10\n110288/110288 [==============================] - 16s 149us/step - loss: 0.9141 - acc: 0.7014 - val_loss: nan - val_acc: 0.7199\nEpoch 6/10\n110288/110288 [==============================] - 16s 149us/step - loss: 0.8655 - acc: 0.7144 - val_loss: nan - val_acc: 0.7420\nEpoch 7/10\n110288/110288 [==============================] - 16s 149us/step - loss: 0.8190 - acc: 0.7283 - val_loss: nan - val_acc: 0.7534\nEpoch 8/10\n110288/110288 [==============================] - 16s 149us/step - loss: 0.7715 - acc: 0.7418 - val_loss: nan - val_acc: 0.7614\nEpoch 9/10\n110288/110288 [==============================] - 16s 149us/step - loss: 0.7322 - acc: 0.7534 - val_loss: nan - val_acc: 0.7703\nEpoch 10/10\n110288/110288 [==============================] - 16s 149us/step - loss: 0.7176 - acc: 0.7577 - val_loss: nan - val_acc: 0.7720\nnew jersey est parfois chaud en mois de il est il est en en <PAD> <PAD> <PAD> <PAD> <PAD> <PAD> <PAD>\n"
],
[
"# Visualize\nchart_model(simple_model_chart)",
"_____no_output_____"
]
],
[
[
"### Model 2: Embedding (IMPLEMENTATION)\n\nYou've turned the words into ids, but there's a better representation of a word. This is called word embeddings. An embedding is a vector representation of the word that is close to similar words in n-dimensional space, where the n represents the size of the embedding vectors.\n\nIn this model, you'll create a RNN model using embedding.",
"_____no_output_____"
]
],
[
[
"def embed_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):\n \"\"\"\n Build and train a RNN model using word embedding on x and y\n :param input_shape: Tuple of input shape\n :param output_sequence_length: Length of output sequence\n :param english_vocab_size: Number of unique English words in the dataset\n :param french_vocab_size: Number of unique French words in the dataset\n :return: Keras model built, but not trained\n \"\"\"\n # Hyperparameters\n learning_rate = 0.01\n emb_dim = 256\n batch_size = 1024\n \n # Build the layers\n model = Sequential()\n model.add(Embedding(english_vocab_size, emb_dim, input_length=input_shape[1], input_shape=input_shape[1:]))\n model.add(GRU(emb_dim, return_sequences=True)) \n model.add(TimeDistributed(Dense(batch_size, activation='relu')))\n model.add(Dropout(0.5))\n model.add(TimeDistributed(Dense(french_vocab_size, activation='softmax'))) \n\n # Compile model\n model.compile(loss=sparse_categorical_crossentropy,\n optimizer=Adam(learning_rate),\n metrics=['accuracy'])\n return model\n\n tests.test_embed_model(embed_model)\n\n\n# Reshape the input\ntmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1])\ntmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2]))\n\n# Train the neural network\nembed_rnn_model = embed_model(\n tmp_x.shape,\n preproc_french_sentences.shape[1],\n len(english_tokenizer.word_index)+1,\n len(french_tokenizer.word_index)+1)\n\nembed_rnn_model.summary()\n\nembed_model_chart = embed_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2)\n\n\n# Print prediction(s)\nprint(logits_to_text(embed_rnn_model.predict(tmp_x[:1])[0], french_tokenizer))",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_6 (Embedding) (None, 21, 256) 51200 \n_________________________________________________________________\ngru_8 (GRU) (None, 21, 256) 393984 \n_________________________________________________________________\ntime_distributed_11 (TimeDis (None, 21, 1024) 263168 \n_________________________________________________________________\ndropout_6 (Dropout) (None, 21, 1024) 0 \n_________________________________________________________________\ntime_distributed_12 (TimeDis (None, 21, 345) 353625 \n=================================================================\nTotal params: 1,061,977\nTrainable params: 1,061,977\nNon-trainable params: 0\n_________________________________________________________________\nTrain on 110288 samples, validate on 27573 samples\nEpoch 1/10\n110288/110288 [==============================] - 20s 184us/step - loss: 0.9739 - acc: 0.7580 - val_loss: 0.3385 - val_acc: 0.8854\nEpoch 2/10\n110288/110288 [==============================] - 20s 179us/step - loss: 0.3042 - acc: 0.8999 - val_loss: 0.2475 - val_acc: 0.9169\nEpoch 3/10\n110288/110288 [==============================] - 20s 178us/step - loss: 0.2461 - acc: 0.9181 - val_loss: 0.2152 - val_acc: 0.9264\nEpoch 4/10\n110288/110288 [==============================] - 20s 178us/step - loss: 0.2219 - acc: 0.9252 - val_loss: 0.2065 - val_acc: 0.9281\nEpoch 5/10\n110288/110288 [==============================] - 20s 178us/step - loss: 0.2148 - acc: 0.9271 - val_loss: 0.2114 - val_acc: 0.9292\nEpoch 6/10\n110288/110288 [==============================] - 20s 178us/step - loss: 0.2102 - acc: 0.9282 - val_loss: 0.1983 - val_acc: 0.9320\nEpoch 7/10\n110288/110288 [==============================] - 20s 178us/step - loss: 0.2055 - acc: 0.9296 - val_loss: 0.2005 - val_acc: 0.9321\nEpoch 8/10\n110288/110288 [==============================] - 20s 178us/step - loss: 0.2095 - acc: 0.9288 - val_loss: 0.2088 - val_acc: 0.9288\nEpoch 9/10\n110288/110288 [==============================] - 20s 177us/step - loss: 0.2160 - acc: 0.9274 - val_loss: 0.2020 - val_acc: 0.9320\nEpoch 10/10\n110288/110288 [==============================] - 20s 177us/step - loss: 0.2124 - acc: 0.9284 - val_loss: 0.2013 - val_acc: 0.9315\nnew jersey est parfois calme en l' et il est neigeux en avril <PAD> <PAD> <PAD> <PAD> <PAD> <PAD> <PAD> <PAD>\n"
],
[
"# Visualize\nchart_model(embed_model_chart)",
"_____no_output_____"
]
],
[
[
"### Model 3: Bidirectional RNNs (IMPLEMENTATION)\n\nOne restriction of a RNN is that it can't see the future input, only the past. This is where bidirectional recurrent neural networks come in. They are able to see the future data.",
"_____no_output_____"
]
],
[
[
"def bd_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):\n \"\"\"\n Build and train a bidirectional RNN model on x and y\n :param input_shape: Tuple of input shape\n :param output_sequence_length: Length of output sequence\n :param english_vocab_size: Number of unique English words in the dataset\n :param french_vocab_size: Number of unique French words in the dataset\n :return: Keras model built, but not trained\n \"\"\"\n # TODO: Implement\n # Hyperparameters\n learning_rate = 0.003\n \n # TODO: Build the layers\n model = Sequential()\n model.add(Bidirectional(GRU(128, return_sequences=True), input_shape=input_shape[1:]))\n model.add(TimeDistributed(Dense(1024, activation='relu')))\n model.add(Dropout(0.5))\n model.add(TimeDistributed(Dense(french_vocab_size, activation='softmax'))) \n\n # Compile model\n model.compile(loss=sparse_categorical_crossentropy,\n optimizer=Adam(learning_rate),\n metrics=['accuracy'])\n return model\n tests.test_bd_model(bd_model)\n\n# Reshape the input\ntmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1])\ntmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1))\n\n# Train the neural network\nbd_rnn_model = bd_model(tmp_x.shape,\n preproc_french_sentences.shape[1],\n len(english_tokenizer.word_index),\n len(french_tokenizer.word_index))\n\nbd_rnn_model.summary()\n\nbd_model_chart = bd_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2)\n\nprint(logits_to_text(bd_rnn_model.predict(tmp_x[:1])[0], french_tokenizer))",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nbidirectional_14 (Bidirectio (None, 21, 256) 99840 \n_________________________________________________________________\ntime_distributed_33 (TimeDis (None, 21, 1024) 263168 \n_________________________________________________________________\ndropout_17 (Dropout) (None, 21, 1024) 0 \n_________________________________________________________________\ntime_distributed_34 (TimeDis (None, 21, 344) 352600 \n=================================================================\nTotal params: 715,608\nTrainable params: 715,608\nNon-trainable params: 0\n_________________________________________________________________\nTrain on 110288 samples, validate on 27573 samples\nEpoch 1/10\n110288/110288 [==============================] - 19s 173us/step - loss: 1.8547 - acc: 0.5681 - val_loss: nan - val_acc: 0.6427\nEpoch 2/10\n110288/110288 [==============================] - 18s 161us/step - loss: 1.2108 - acc: 0.6496 - val_loss: nan - val_acc: 0.6712\nEpoch 3/10\n110288/110288 [==============================] - 18s 161us/step - loss: 1.0863 - acc: 0.6714 - val_loss: nan - val_acc: 0.6884\nEpoch 4/10\n110288/110288 [==============================] - 18s 161us/step - loss: 1.0070 - acc: 0.6829 - val_loss: nan - val_acc: 0.6979\nEpoch 5/10\n110288/110288 [==============================] - 18s 161us/step - loss: 0.9481 - acc: 0.6925 - val_loss: nan - val_acc: 0.7083\nEpoch 6/10\n110288/110288 [==============================] - 18s 161us/step - loss: 0.8978 - acc: 0.7020 - val_loss: nan - val_acc: 0.7199\nEpoch 7/10\n110288/110288 [==============================] - 18s 160us/step - loss: 0.8557 - acc: 0.7115 - val_loss: nan - val_acc: 0.7301\nEpoch 8/10\n110288/110288 [==============================] - 18s 161us/step - loss: 0.8228 - acc: 0.7175 - val_loss: nan - val_acc: 0.7381\nEpoch 9/10\n110288/110288 [==============================] - 18s 160us/step - loss: 0.7978 - acc: 0.7223 - val_loss: nan - val_acc: 0.7421\nEpoch 10/10\n110288/110288 [==============================] - 18s 160us/step - loss: 0.7696 - acc: 0.7290 - val_loss: nan - val_acc: 0.7494\nnew jersey est parfois calme en mois et il et il est en en <PAD> <PAD> <PAD> <PAD> <PAD> <PAD> <PAD>\n"
],
[
"# Visualize\nchart_model(bd_model_chart)",
"_____no_output_____"
]
],
[
[
"### Model 4: Encoder-Decoder (OPTIONAL)\nTime to look at encoder-decoder models. This model is made up of an encoder and decoder. The encoder creates a matrix representation of the sentence. The decoder takes this matrix as input and predicts the translation as output.\n\nCreate an encoder-decoder model in the cell below.",
"_____no_output_____"
]
],
[
[
"def encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):\n \"\"\"\n Build and train an encoder-decoder model on x and y\n :param input_shape: Tuple of input shape\n :param output_sequence_length: Length of output sequence\n :param english_vocab_size: Number of unique English words in the dataset\n :param french_vocab_size: Number of unique French words in the dataset\n :return: Keras model built, but not trained\n \"\"\"\n # Hyperparameters\n learning_rate = 0.001\n encdec_dim = 256\n batch_size = 1024\n \n # Build the layers \n model = Sequential()\n \n # Encoder\n model.add(GRU(encdec_dim, input_shape=input_shape[1:], go_backwards=True))\n model.add(RepeatVector(output_sequence_length))\n \n # Decoder\n model.add(GRU(encdec_dim, return_sequences=True))\n model.add(TimeDistributed(Dense(1024, activation='relu')))\n model.add(Dropout(0.5))\n model.add(TimeDistributed(Dense(french_vocab_size, activation='softmax')))\n\n # Compile model\n model.compile(loss=sparse_categorical_crossentropy,\n optimizer=Adam(learning_rate),\n metrics=['accuracy'])\n \n return model\ntests.test_encdec_model(encdec_model)\n\n\n# Reshape the input\ntmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1])\ntmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1))\n\n# Train and Print prediction(s)\nencdec_rnn_model = encdec_model(\n tmp_x.shape,\n preproc_french_sentences.shape[1],\n len(english_tokenizer.word_index)+1,\n len(french_tokenizer.word_index)+1)\n\nencdec_rnn_model.summary()\n\nencdec_model_chart = encdec_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=1024, epochs=10, validation_split=0.2)\n\nprint(logits_to_text(encdec_rnn_model.predict(tmp_x[:1])[0], french_tokenizer))",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ngru_11 (GRU) (None, 256) 198144 \n_________________________________________________________________\nrepeat_vector_2 (RepeatVecto (None, 21, 256) 0 \n_________________________________________________________________\ngru_12 (GRU) (None, 21, 256) 393984 \n_________________________________________________________________\ntime_distributed_15 (TimeDis (None, 21, 1024) 263168 \n_________________________________________________________________\ndropout_8 (Dropout) (None, 21, 1024) 0 \n_________________________________________________________________\ntime_distributed_16 (TimeDis (None, 21, 345) 353625 \n=================================================================\nTotal params: 1,208,921\nTrainable params: 1,208,921\nNon-trainable params: 0\n_________________________________________________________________\nTrain on 110288 samples, validate on 27573 samples\nEpoch 1/10\n110288/110288 [==============================] - 26s 240us/step - loss: 2.5397 - acc: 0.4664 - val_loss: 1.8587 - val_acc: 0.5474\nEpoch 2/10\n110288/110288 [==============================] - 26s 233us/step - loss: 1.6522 - acc: 0.5740 - val_loss: 1.4508 - val_acc: 0.6072\nEpoch 3/10\n110288/110288 [==============================] - 26s 232us/step - loss: 1.4317 - acc: 0.6077 - val_loss: 1.3731 - val_acc: 0.6130\nEpoch 4/10\n110288/110288 [==============================] - 26s 232us/step - loss: 1.3428 - acc: 0.6269 - val_loss: 1.2772 - val_acc: 0.6455\nEpoch 5/10\n110288/110288 [==============================] - 26s 232us/step - loss: 1.2760 - acc: 0.6437 - val_loss: 1.2269 - val_acc: 0.6547\nEpoch 6/10\n110288/110288 [==============================] - 26s 232us/step - loss: 1.2427 - acc: 0.6496 - val_loss: 1.1819 - val_acc: 0.6648\nEpoch 7/10\n110288/110288 [==============================] - 26s 232us/step - loss: 1.2103 - acc: 0.6556 - val_loss: 1.2140 - val_acc: 0.6493\nEpoch 8/10\n110288/110288 [==============================] - 26s 232us/step - loss: 1.1751 - acc: 0.6621 - val_loss: 1.0987 - val_acc: 0.6771\nEpoch 9/10\n110288/110288 [==============================] - 26s 232us/step - loss: 1.1377 - acc: 0.6696 - val_loss: 1.0653 - val_acc: 0.6869\nEpoch 10/10\n110288/110288 [==============================] - 26s 232us/step - loss: 1.0855 - acc: 0.6821 - val_loss: 1.0470 - val_acc: 0.6894\nnew jersey est parfois chaud en l' et il est est en en <PAD> <PAD> <PAD> <PAD> <PAD> <PAD> <PAD> <PAD>\n"
],
[
"# Visualize\nchart_model(encdec_model_chart)",
"_____no_output_____"
]
],
[
[
"### Model 5: Custom (IMPLEMENTATION)\nUse everything you learned from the previous models to create a model that incorporates embedding and a bidirectional rnn into one model.",
"_____no_output_____"
]
],
[
[
"def model_final(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):\n \"\"\"\n Build and train a model that incorporates embedding, encoder-decoder, and bidirectional RNN on x and y\n :param input_shape: Tuple of input shape\n :param output_sequence_length: Length of output sequence\n :param english_vocab_size: Number of unique English words in the dataset\n :param french_vocab_size: Number of unique French words in the dataset\n :return: Keras model built, but not trained\n \"\"\"\n # Hyperparameters\n learning_rate = 0.01\n \n # Build the layers \n model = Sequential()\n \n # Embedding\n model.add(Embedding(english_vocab_size, 128, input_length=input_shape[1],\n input_shape=input_shape[1:]))\n # Encoder\n model.add(Bidirectional(GRU(128)))\n model.add(RepeatVector(output_sequence_length))\n model.add(BatchNormalization())\n \n # Decoder\n model.add(Bidirectional(GRU(128, return_sequences=True)))\n model.add(BatchNormalization())\n model.add(TimeDistributed(Dense(256, activation='relu')))\n model.add(Dropout(0.5))\n model.add(TimeDistributed(Dense(french_vocab_size, activation='softmax')))\n model.compile(loss=sparse_categorical_crossentropy,\n optimizer=Adam(learning_rate),\n metrics=['accuracy'])\n return model\n\n tests.test_model_final(model_final)\n\n\n print('Final Model Loaded')",
"Final Model Loaded\n"
]
],
[
[
"## Prediction (IMPLEMENTATION)",
"_____no_output_____"
]
],
[
[
"def final_predictions(x, y, x_tk, y_tk):\n \"\"\"\n Gets predictions using the final model\n :param x: Preprocessed English data\n :param y: Preprocessed French data\n :param x_tk: English tokenizer\n :param y_tk: French tokenizer\n \"\"\"\n # Train neural network using model_final\n model = model_final(x.shape,y.shape[1],\n len(x_tk.word_index)+1,\n len(y_tk.word_index)+1)\n \n model.summary()\n \n custom_model_chart = model.fit(x, y, batch_size=1024, epochs=15, validation_split=0.2)\n \n # Visualize\n chart_model(custom_model_chart)\n\n ## DON'T EDIT ANYTHING BELOW THIS LINE\n y_id_to_word = {value: key for key, value in y_tk.word_index.items()}\n y_id_to_word[0] = '<PAD>'\n\n sentence = 'he saw a old yellow truck'\n sentence = [x_tk.word_index[word] for word in sentence.split()]\n sentence = pad_sequences([sentence], maxlen=x.shape[-1], padding='post')\n sentences = np.array([sentence[0], x[0]])\n predictions = model.predict(sentences, len(sentences))\n\n print('Sample 1:')\n print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[0]]))\n print('Il a vu un vieux camion jaune')\n print('Sample 2:')\n print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[1]]))\n print(' '.join([y_id_to_word[np.max(x)] for x in y[0]]))\n\n\nfinal_predictions(preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer)",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_21 (Embedding) (None, 15, 128) 25600 \n_________________________________________________________________\nbidirectional_30 (Bidirectio (None, 256) 197376 \n_________________________________________________________________\nrepeat_vector_14 (RepeatVect (None, 21, 256) 0 \n_________________________________________________________________\nbatch_normalization_11 (Batc (None, 21, 256) 1024 \n_________________________________________________________________\nbidirectional_31 (Bidirectio (None, 21, 256) 295680 \n_________________________________________________________________\nbatch_normalization_12 (Batc (None, 21, 256) 1024 \n_________________________________________________________________\ntime_distributed_43 (TimeDis (None, 21, 256) 65792 \n_________________________________________________________________\ndropout_22 (Dropout) (None, 21, 256) 0 \n_________________________________________________________________\ntime_distributed_44 (TimeDis (None, 21, 345) 88665 \n=================================================================\nTotal params: 675,161\nTrainable params: 674,137\nNon-trainable params: 1,024\n_________________________________________________________________\nTrain on 110288 samples, validate on 27573 samples\nEpoch 1/15\n110288/110288 [==============================] - 28s 255us/step - loss: 2.0123 - acc: 0.5165 - val_loss: 4.5794 - val_acc: 0.4280\nEpoch 2/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.9202 - acc: 0.7340 - val_loss: 2.7337 - val_acc: 0.5436\nEpoch 3/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.5213 - acc: 0.8424 - val_loss: 1.3098 - val_acc: 0.7507\nEpoch 4/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.3590 - acc: 0.8904 - val_loss: 0.3351 - val_acc: 0.9040\nEpoch 5/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.2669 - acc: 0.9192 - val_loss: 0.1953 - val_acc: 0.9392\nEpoch 6/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.2212 - acc: 0.9336 - val_loss: 0.1703 - val_acc: 0.9462\nEpoch 7/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.1778 - acc: 0.9467 - val_loss: 0.1431 - val_acc: 0.9562\nEpoch 8/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.1628 - acc: 0.9513 - val_loss: 0.1383 - val_acc: 0.9570\nEpoch 9/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.1596 - acc: 0.9526 - val_loss: 0.1356 - val_acc: 0.9597\nEpoch 10/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.1347 - acc: 0.9599 - val_loss: 0.1429 - val_acc: 0.9583\nEpoch 11/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.1495 - acc: 0.9555 - val_loss: 0.1066 - val_acc: 0.9668\nEpoch 12/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.1524 - acc: 0.9552 - val_loss: 0.1741 - val_acc: 0.9488\nEpoch 13/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.1285 - acc: 0.9616 - val_loss: 0.0961 - val_acc: 0.9713\nEpoch 14/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.1182 - acc: 0.9647 - val_loss: 0.0976 - val_acc: 0.9719\nEpoch 15/15\n110288/110288 [==============================] - 26s 232us/step - loss: 0.1002 - acc: 0.9700 - val_loss: 0.0802 - val_acc: 0.9764\n"
]
],
[
[
"## Submission\nWhen you're ready to submit, complete the following steps:\n1. Review the [rubric](https://review.udacity.com/#!/rubrics/1004/view) to ensure your submission meets all requirements to pass\n2. Generate an HTML version of this notebook\n\n - Run the next cell to attempt automatic generation (this is the recommended method in Workspaces)\n - Navigate to **FILE -> Download as -> HTML (.html)**\n - Manually generate a copy using `nbconvert` from your shell terminal\n```\n$ pip install nbconvert\n$ python -m nbconvert machine_translation.ipynb\n```\n \n3. Submit the project\n\n - If you are in a Workspace, simply click the \"Submit Project\" button (bottom towards the right)\n \n - Otherwise, add the following files into a zip archive and submit them \n - `helper.py`\n - `machine_translation.ipynb`\n - `machine_translation.html`\n - You can export the notebook by navigating to **File -> Download as -> HTML (.html)**.",
"_____no_output_____"
],
[
"### Generate the html\n\n**Save your notebook before running the next cell to generate the HTML output.** Then submit your project.",
"_____no_output_____"
]
],
[
[
"# Save before you run this cell!\n!!jupyter nbconvert *.ipynb",
"_____no_output_____"
]
],
[
[
"## Optional Enhancements\n\nThis project focuses on learning various network architectures for machine translation, but we don't evaluate the models according to best practices by splitting the data into separate test & training sets -- so the model accuracy is overstated. Use the [`sklearn.model_selection.train_test_split()`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) function to create separate training & test datasets, then retrain each of the models using only the training set and evaluate the prediction accuracy using the hold out test set. Does the \"best\" model change?",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
cb733e18ea237933d27144c4b4d760382d91b83e | 26,052 | ipynb | Jupyter Notebook | notebooks/CNN.ipynb | abdel/imdb-sentiment-analysis | f240c6b28cda01c9496fd5e0b02eedd8fbd22b44 | [
"MIT"
]
| 6 | 2017-12-21T02:12:42.000Z | 2020-06-01T08:50:40.000Z | notebooks/CNN.ipynb | abdel/imdb-sentiment-analysis | f240c6b28cda01c9496fd5e0b02eedd8fbd22b44 | [
"MIT"
]
| null | null | null | notebooks/CNN.ipynb | abdel/imdb-sentiment-analysis | f240c6b28cda01c9496fd5e0b02eedd8fbd22b44 | [
"MIT"
]
| 5 | 2018-04-09T14:57:53.000Z | 2020-04-11T14:35:02.000Z | 68.377953 | 15,832 | 0.78589 | [
[
[
"# Convolutional Neural Network",
"_____no_output_____"
],
[
"## Import Dependencies",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nfrom imp import reload\n\nimport itertools\nimport numpy as np\nimport utils; reload(utils)\n\nfrom utils import *\nfrom __future__ import print_function\nfrom sklearn.metrics import confusion_matrix, classification_report, f1_score",
"_____no_output_____"
],
[
"from keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.layers import Embedding, SpatialDropout1D\nfrom keras.layers import LSTM\nfrom keras.layers import Conv1D, GlobalMaxPooling1D\nfrom keras.layers import Flatten\nfrom keras.datasets import imdb\nfrom keras.utils import plot_model\nfrom keras.utils.vis_utils import model_to_dot\n\nfrom IPython.display import SVG\nfrom IPython.display import Image",
"_____no_output_____"
]
],
[
[
"## Configure Parameters",
"_____no_output_____"
]
],
[
[
"# Embedding\nembedding_size = 50\nmax_features = 5000\nmaxlen = 400\n\n# Convolution\nkernel_size = 3\npool_size = 4\nfilters = 250\n\n# Dense\nhidden_dims = 250\n\n# Training\nbatch_size = 64\nepochs = 4",
"_____no_output_____"
]
],
[
[
"## Data Preparation",
"_____no_output_____"
]
],
[
[
"(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)",
"_____no_output_____"
],
[
"# Pad sequences\nx_train = sequence.pad_sequences(x_train, maxlen=maxlen)\nx_test = sequence.pad_sequences(x_test, maxlen=maxlen)\n\nprint('Train data size:', x_train.shape)\nprint('Test data size:', x_test.shape)",
"Train data size: (25000, 400)\nTest data size: (25000, 400)\n"
]
],
[
[
"## Modelling",
"_____no_output_____"
]
],
[
[
"model = Sequential()\n\n# we start off with an efficient embedding layer which maps\n# our vocab indices into embedding_dims dimensions\nmodel.add(Embedding(max_features, \n embedding_size, \n input_length=maxlen))\nmodel.add(Dropout(0.2))\n\n\nmodel.add(Conv1D(filters,\n kernel_size,\n padding='valid',\n activation='relu',\n strides=1))\nmodel.add(GlobalMaxPooling1D())\n\n# We add a vanilla hidden layer:\nmodel.add(Dense(hidden_dims))\nmodel.add(Dropout(0.2))\nmodel.add(Activation('relu'))\n\n# We project onto a single unit output layer, and squash it with a sigmoid:\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\n\nmodel.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nmodel.summary()\n\n# plot_model(model, to_file='model.png', show_shapes=True)\n# Image(filename = 'model.png')\n# SVG(model_to_dot(model).create(prog='dot', format='svg'))",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_2 (Embedding) (None, 400, 50) 250000 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 400, 50) 0 \n_________________________________________________________________\nconv1d_2 (Conv1D) (None, 398, 250) 37750 \n_________________________________________________________________\nglobal_max_pooling1d_2 (Glob (None, 250) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 250) 62750 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 250) 0 \n_________________________________________________________________\nactivation_3 (Activation) (None, 250) 0 \n_________________________________________________________________\ndense_4 (Dense) (None, 1) 251 \n_________________________________________________________________\nactivation_4 (Activation) (None, 1) 0 \n=================================================================\nTotal params: 350,751\nTrainable params: 350,751\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"## Evaluation",
"_____no_output_____"
]
],
[
[
"# Train the model\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test),\n verbose=1)",
"Train on 25000 samples, validate on 25000 samples\nEpoch 1/4\n25000/25000 [==============================] - 14s - loss: 0.4467 - acc: 0.7687 - val_loss: 0.2826 - val_acc: 0.8811\nEpoch 2/4\n25000/25000 [==============================] - 14s - loss: 0.2422 - acc: 0.9028 - val_loss: 0.2732 - val_acc: 0.8860\nEpoch 3/4\n25000/25000 [==============================] - 14s - loss: 0.1713 - acc: 0.9356 - val_loss: 0.2594 - val_acc: 0.8924\nEpoch 4/4\n25000/25000 [==============================] - 14s - loss: 0.1193 - acc: 0.9570 - val_loss: 0.2939 - val_acc: 0.8902\n"
],
[
"# Evaluate model\nscore, acc = model.evaluate(x_test, y_test, batch_size=batch_size)\npreds = model.predict_classes(x_test, batch_size=batch_size)",
"24704/25000 [============================>.] - ETA: 0s"
],
[
"# Save the model weights\nmodel_path = 'data/imdb/models/'\nmodel.save(model_path + 'cnn_model.h5')\nmodel.save_weights(model_path + 'cnn_weights.h5')",
"_____no_output_____"
],
[
"# Confusion Matrix\ncm = confusion_matrix(y_test, preds)\nplot_confusion_matrix(cm, {'negative': 0, 'positive': 1})",
"[[11354 1146]\n [ 1600 10900]]\n"
],
[
"# F1 score\nf1_macro = f1_score(y_test, preds, average='macro') \nf1_micro = f1_score(y_test, preds, average='micro')\n\nprint('Test accuracy:', acc)\nprint('Test score (loss):', score)\nprint('')\nprint('F1 Score (Macro):', f1_macro)\nprint('F1 Score (Micro):', f1_micro)",
"Test accuracy: 0.890160000038\nTest score (loss): 0.293928374405\n\nF1 Score (Macro): 0.8901237644\nF1 Score (Micro): 0.89016\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
]
|
cb734f7ca67774db91ccd42302d4de462772c689 | 406,098 | ipynb | Jupyter Notebook | Statistics_S5_corr and cov.ipynb | SelcukDE/Statistics | 875f3011e3f2d51d5e2df6c8c266039b7f3c88ed | [
"MIT"
]
| null | null | null | Statistics_S5_corr and cov.ipynb | SelcukDE/Statistics | 875f3011e3f2d51d5e2df6c8c266039b7f3c88ed | [
"MIT"
]
| null | null | null | Statistics_S5_corr and cov.ipynb | SelcukDE/Statistics | 875f3011e3f2d51d5e2df6c8c266039b7f3c88ed | [
"MIT"
]
| null | null | null | 480.589349 | 325,960 | 0.936003 | [
[
[
"# corr and cov\n\n#### https://github.com/SelcukDE",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
],
[
"temp=[93,84,82,78,98,70]\n\nnumber_of_people=[13,10, 11, 8, 15, 9]",
"_____no_output_____"
],
[
"np.cov(temp, number_of_people)",
"_____no_output_____"
],
[
"np.corrcoef(temp, number_of_people)",
"_____no_output_____"
],
[
"df=sns.load_dataset(\"tips\")",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.dtypes",
"_____no_output_____"
],
[
"df.corr()",
"_____no_output_____"
],
[
"df[\"total_bill\"].corr(df[\"tip\"])",
"_____no_output_____"
],
[
"df.corr(\"spearman\")",
"_____no_output_____"
],
[
"df.corr(\"kendall\")",
"_____no_output_____"
],
[
"plt.scatter(df[\"total_bill\"], df[\"tip\"]);",
"_____no_output_____"
],
[
"sns.heatmap(df.corr(), annot=True, cmap='RdYlGn');",
"_____no_output_____"
],
[
"df2=sns.load_dataset(\"mpg\")",
"_____no_output_____"
],
[
"df2.head()",
"_____no_output_____"
],
[
"df2.drop([\"displacement\"], axis=1, inplace=True)",
"_____no_output_____"
],
[
"df2.head()",
"_____no_output_____"
],
[
"fig=plt.subplots(figsize=(12,7))\nsns.heatmap(df2.corr(), cmap=\"coolwarm\", annot=True);",
"_____no_output_____"
],
[
"sns.pairplot(df2)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb7354e059cb3a3d3a54800f7e3e065b1dcf1c1c | 977 | ipynb | Jupyter Notebook | Untitled.ipynb | thomas-marquis/python_env | 60b1bbe7d7f2df227db82b5b90c31a3276913166 | [
"MIT"
]
| null | null | null | Untitled.ipynb | thomas-marquis/python_env | 60b1bbe7d7f2df227db82b5b90c31a3276913166 | [
"MIT"
]
| null | null | null | Untitled.ipynb | thomas-marquis/python_env | 60b1bbe7d7f2df227db82b5b90c31a3276913166 | [
"MIT"
]
| null | null | null | 18.433962 | 63 | 0.497441 | [
[
[
"!poetry add numpy",
"Using version ^1.18.1 for numpy\n\nUpdating dependencies\nResolving dependencies...\n\nWriting lock file\n\n\nPackage operations: 1 install, 0 updates, 0 removals\n\n - Installing numpy (1.18.1)\n"
]
]
]
| [
"code"
]
| [
[
"code"
]
]
|
cb7356f5826381208430cceddf24a64f2aaab358 | 6,541 | ipynb | Jupyter Notebook | 3_Prepare_Data_Part3.ipynb | cloud-commander/face-mask-detection | 823e83da50c924ffa5ab613cc7ec65781cf4d693 | [
"Apache-2.0"
]
| 4 | 2020-07-19T08:45:29.000Z | 2021-08-05T07:41:15.000Z | 3_Prepare_Data_Part3.ipynb | cloud-commander/face-mask-detection | 823e83da50c924ffa5ab613cc7ec65781cf4d693 | [
"Apache-2.0"
]
| null | null | null | 3_Prepare_Data_Part3.ipynb | cloud-commander/face-mask-detection | 823e83da50c924ffa5ab613cc7ec65781cf4d693 | [
"Apache-2.0"
]
| 1 | 2021-08-12T17:43:29.000Z | 2021-08-12T17:43:29.000Z | 26.917695 | 280 | 0.500229 | [
[
[
"\n<a href=\"https://blog.cloudcommander.net\" target=\"_parent\"><img src=\"https://raw.githubusercontent.com/cloud-commander/hexoblog/master/cloud.png\" alt=\"Visit my Blog\">\n</a>\n<br> \n# <span style=\"font-family:Didot; font-size:3em;\"> Cloud Commander </span>\n",
"_____no_output_____"
],
[
"<a href=\"https://colab.research.google.com/github/cloud-commander/face-mask-detection/blob/master/3_Prepare_Data_Part3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\"></a>\n \n<a href=\"https://github.com/cloud-commander/face-mask-detection/blob/master/3_Prepare_Data_Part3.ipynb\" target=\"_parent\"><img src=\"https://img.shields.io/static/v1?logo=GitHub&label=&color=333333&style=flat&message=View%20on%20GitHub\" alt=\"View in GitHub\"></a>\n",
"_____no_output_____"
],
[
"### Import necessary libraries",
"_____no_output_____"
]
],
[
[
"!wget https://raw.githubusercontent.com/cloud-commander/face-mask-detection/master/config/constants.py\nfrom constants import * \n!wget {XML_TO_CSV}\n!wget {PARTITION}\n!pip install wget\n\nfrom xml_to_csv import *\nfrom partition import *\nfrom PIL import Image\nimport os\n#import shutil\nimport wget\n",
"_____no_output_____"
]
],
[
[
"## Prepare dataset ###",
"_____no_output_____"
],
[
"### Connect to Google Drive",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\n\ndrive.mount('/content/drive/')",
"_____no_output_____"
]
],
[
[
"#### Copy dataset and extract\n",
"_____no_output_____"
]
],
[
[
"!cp {DRIVE_DEV}part1-datasetv1.zip /\n!cp {DRIVE_DEV}part2-datasetv1.zip /\n%cd /\n!unzip -o part1-datasetv1.zip\n!unzip -o part2-datasetv1.zip\n%cd /content/",
"_____no_output_____"
]
],
[
[
"## Partitioning the images\n\nWe continue our data preparation by splitting our dataset into training and test sets.\n\nTypically, the ratio is 90%/10%, i.e. 90% of the images are used for training and the rest 10% is maintained for testing, but you can chose whatever ratio suits your needs.\n",
"_____no_output_____"
]
],
[
[
"ratio = 0.1\npartition_dir(DATASET_DIR_UNPREP_IMG, DATASET_DIR_UNPREP_ANNO, DATASET_DIR_PREP_TRAIN_IMG , DATASET_DIR_PREP_TRAIN_ANNO, DATASET_DIR_PREP_TEST_IMG, DATASET_DIR_PREP_TEST_ANNO, ratio) ",
"_____no_output_____"
]
],
[
[
"## XML to CSV\n\nAnd finally we need to generate a CSV file containing all image detail / classes from the individual XML files.",
"_____no_output_____"
],
[
"### Generate CSV files\n\nFor the train and test folders and place result in annotations folder",
"_____no_output_____"
]
],
[
[
"generate_csv(DATASET_DIR_PREP_TRAIN_ANNO, DATASET_DIR_PREP_RECORD )\ngenerate_csv(DATASET_DIR_PREP_TEST_ANNO, DATASET_DIR_PREP_RECORD )",
"_____no_output_____"
]
],
[
[
"## Save",
"_____no_output_____"
]
],
[
[
"!zip -r part3-datasetv1.zip {DATASET_DIR_PREP}\n!gsutil cp part3-datasetv1.zip {DRIVE_DEV}",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
cb7364b05ec91ce7a75a1aba449dccfcc0c0f231 | 168,972 | ipynb | Jupyter Notebook | src/image_analysis/20190816_O2_long_growth_test/20190816_data_comparison.ipynb | RPGroup-PBoC/chann_cap | f2a826166fc2d47c424951c616c46d497ed74b39 | [
"MIT"
]
| 2 | 2020-08-21T04:06:12.000Z | 2022-02-09T07:36:58.000Z | src/image_analysis/20190816_O2_long_growth_test/20190816_data_comparison.ipynb | RPGroup-PBoC/chann_cap | f2a826166fc2d47c424951c616c46d497ed74b39 | [
"MIT"
]
| null | null | null | src/image_analysis/20190816_O2_long_growth_test/20190816_data_comparison.ipynb | RPGroup-PBoC/chann_cap | f2a826166fc2d47c424951c616c46d497ed74b39 | [
"MIT"
]
| 2 | 2020-04-29T17:43:28.000Z | 2020-09-09T00:20:16.000Z | 162.005753 | 78,972 | 0.855651 | [
[
[
"# Comparison of the data taken with a long adaptation time",
"_____no_output_____"
],
[
"(c) 2019 Manuel Razo. This work is licensed under a [Creative Commons Attribution License CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/). All code contained herein is licensed under an [MIT license](https://opensource.org/licenses/MIT)\n\n---",
"_____no_output_____"
]
],
[
[
"import os\nimport glob\nimport re\n\n# Our numerical workhorses\nimport numpy as np\nimport scipy as sp\nimport pandas as pd\n\n# Import matplotlib stuff for plotting\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport matplotlib as mpl\n\n# Seaborn, useful for graphics\nimport seaborn as sns\n\n# Import the project utils\nimport sys\nsys.path.insert(0, '../../../')\nimport ccutils\n\n# Magic function to make matplotlib inline; other style specs must come AFTER\n%matplotlib inline\n\n# This enables SVG graphics inline\n%config InlineBackend.figure_format = 'retina'\n\ntmpdir = '../../tmp/'\ndatadir = '../../../data/csv_microscopy/'",
"_____no_output_____"
],
[
"# Set PBoC plotting format\nccutils.viz.set_plotting_style()\n# Increase dpi\nmpl.rcParams['figure.dpi'] = 110",
"_____no_output_____"
]
],
[
[
"## Comparing the data",
"_____no_output_____"
],
[
"For this dataset taken on `20190814` I grew cells overnight on M9 media, the reason being that I wanted to make sure that cells had no memory of every having been in LB.",
"_____no_output_____"
]
],
[
[
"df_long = pd.read_csv('outdir/20190816_O2__M9_growth_test_microscopy.csv',\n comment='#')\n\ndf_long[['date', 'operator', 'rbs', 'mean_intensity', 'intensity']].head()",
"_____no_output_____"
]
],
[
[
"Now the rest of the datasets taken with the laser system",
"_____no_output_____"
]
],
[
[
"# Read the tidy-data frame\nfiles = glob.glob(datadir + '/*IPTG*csv')# + mwc_files\ndf_micro = pd.concat(pd.read_csv(f, comment='#') for f in files if 'Oid' not in f)\n\n## Remove data sets that are ignored because of problems with the data quality\n## NOTE: These data sets are kept in the repository for transparency, but they\n## failed at one of our quality criteria\n## (see README.txt file in microscopy folder)\nignore_files = [x for x in os.listdir('../../image_analysis/ignore_datasets/')\n if 'microscopy' in x]\n# Extract data from these files\nignore_dates = [int(x.split('_')[0]) for x in ignore_files]\n\n# Remove these dates\ndf_micro = df_micro[~df_micro['date'].isin(ignore_dates)]\n\n# Keep only the O2 operator\ndf_micro = df_micro[df_micro.operator == 'O2']\n\ndf_micro[['date', 'operator', 'rbs', 'mean_intensity', 'intensity']].head()",
"/Users/razo/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version\nof pandas will change to not sort by default.\n\nTo accept the future behavior, pass 'sort=False'.\n\nTo retain the current behavior and silence the warning, pass 'sort=True'.\n\n This is separate from the ipykernel package so we can avoid doing imports until\n"
]
],
[
[
"Let's now look at the O2 $\\Delta lacI$ strain data. For this we first have to extract the mean autofluorescence value. First let's process the new data.",
"_____no_output_____"
]
],
[
[
"# Define names for columns in dataframe\nnames = ['date', 'IPTG_uM','operator', 'binding_energy',\n 'rbs', 'repressors', 'mean', 'std', 'noise']\n\n# Initialize df_long frame to save the noise\ndf_noise_long = pd.DataFrame(columns=names)\n\n# Extract the mean autofluorescence\nI_auto = df_long[df_long.rbs == 'auto'].intensity.mean()\n\n# Extract the strain fluorescence measurements\nstrain_df_long = df_long[df_long.rbs == 'delta']\n\n# Group df_long by IPTG measurement\ndf_long_group = strain_df_long.groupby('IPTG_uM')\nfor inducer, df_long_inducer in df_long_group:\n # Append the require info\n strain_info = [20190624, 0, df_long_inducer.operator.unique()[0],\n df_long_inducer.binding_energy.unique()[0],\n df_long_inducer.rbs.unique()[0],\n df_long_inducer.repressors.unique()[0],\n (df_long_inducer.intensity - I_auto).mean(), \n (df_long_inducer.intensity - I_auto).std(ddof=1)]\n # Check if the values are negative for very small noise\n if strain_info[int(np.where(np.array(names) == 'mean')[0])] > 0:\n # Compute the noise\n strain_info.append(strain_info[-1] / strain_info[-2])\n # Convert to a pandas series to attach to the df_longframe\n strain_info = pd.Series(strain_info, index=names)\n # Append to the info to the df_long frame\n df_noise_long = df_noise_long.append(strain_info, \n ignore_index=True)\n\ndf_noise_long.head()",
"_____no_output_____"
],
[
"# group by date and by IPTG concentration\ndf_group = df_micro.groupby(['date'])\n\n# Define names for columns in data frame\nnames = ['date', 'IPTG_uM','operator', 'binding_energy',\n 'rbs', 'repressors', 'mean', 'std', 'noise']\n\n# Initialize data frame to save the noise\ndf_noise_delta = pd.DataFrame(columns=names)\nfor date, data in df_group:\n # Extract the mean autofluorescence\n I_auto = data[data.rbs == 'auto'].intensity.mean()\n \n # Extract the strain fluorescence measurements\n strain_data = data[data.rbs == 'delta']\n \n # Group data by IPTG measurement\n data_group = strain_data.groupby('IPTG_uM')\n for inducer, data_inducer in data_group:\n # Append the require info\n strain_info = [date, inducer, data_inducer.operator.unique()[0],\n data_inducer.binding_energy.unique()[0],\n data_inducer.rbs.unique()[0],\n data_inducer.repressors.unique()[0],\n (data_inducer.intensity - I_auto).mean(), \n (data_inducer.intensity - I_auto).std(ddof=1)]\n # Check if the values are negative for very small noise\n if strain_info[int(np.where(np.array(names) == 'mean')[0])] > 0:\n # Compute the noise\n strain_info.append(strain_info[-1] / strain_info[-2])\n # Convert to a pandas series to attach to the dataframe\n strain_info = pd.Series(strain_info, index=names)\n # Append to the info to the data frame\n df_noise_delta = df_noise_delta.append(strain_info, \n ignore_index=True)\n \ndf_noise_delta.head()",
"_____no_output_____"
]
],
[
[
"It seems that the noise is exactly the same for both illumination systems, ≈ 0.4-0.5.\n\nLet's look at the ECDF of single-cell fluorescence values. For all measurements to be comparable we will plot the fold-change distribution. What this means is that we will extract the mean autofluorescence value and we will normalize by the mean intensity of the $\\Delta lacI$ strain.",
"_____no_output_____"
]
],
[
[
"# group laser data by date\ndf_group = df_micro.groupby('date')\n\ncolors = sns.color_palette('Blues', n_colors=len(df_group))\n# Loop through dates\nfor j, (g, d) in enumerate(df_group):\n # Extract mean autofluorescence\n auto = d.loc[d.rbs == 'auto', 'intensity'].mean()\n # Extract mean delta\n delta = d.loc[d.rbs == 'delta', 'intensity'].mean()\n # Keep only delta data\n data = d[d.rbs == 'delta']\n fold_change = (data.intensity - auto) / (delta - auto)\n # Generate ECDF\n x, y = ccutils.stats.ecdf(fold_change)\n # Plot ECDF\n plt.plot(x, y, lw=0, marker='.', color=colors[j],\n alpha=0.3, label='')\n\n## LED\n# Extract mean autofluorescence\nauto_long = df_long.loc[df_long.rbs == 'auto', 'intensity'].mean()\ndelta_long = df_long.loc[df_long.rbs == 'delta', 'intensity'].mean()\n\n# Compute fold-change for delta strain\nfold_change = (df_long[df_long.rbs == 'delta'].intensity - auto_long) /\\\n (delta_long - auto_long)\n# Generate ECDF\nx, y = ccutils.stats.ecdf(fold_change)\n# Plot ECDF\nplt.plot(x, y, lw=0, marker='v', color='red',\n alpha=0.3, label='24 hour', ms=3)\n\n# Add fake plot for legend\nplt.plot([], [], marker='.', color=colors[-1],\n alpha=0.3, label='8 hour', lw=0)\n\n# Label x axis\nplt.xlabel('fold-change')\n\n# Add legend\nplt.legend()\n\n# Label y axis of left plot\nplt.ylabel('ECDF')\n\n# Change limit\nplt.xlim(right=3)\n\nplt.savefig('outdir/ecdf_comparison.png', bbox_inches='tight')",
"_____no_output_____"
]
],
[
[
"There is no difference whatsoever. Maybe it is not the memory of LB, but the memory of having been on a lag phase for quite a while.",
"_____no_output_____"
],
[
"## Comparison with theoretical prediction.",
"_____no_output_____"
],
[
"Let's compare these datasets with the theoretical prediction we obtained from the MaxEnt approach.\n\nFirst we need to read the Lagrange multipliers to reconstruct the distribution.",
"_____no_output_____"
]
],
[
[
"# Define directory for MaxEnt data\nmaxentdir = '../../../data/csv_maxEnt_dist/'\n# Read resulting values for the multipliers.\ndf_maxEnt = pd.read_csv(maxentdir + 'MaxEnt_Lagrange_mult_protein.csv')\ndf_maxEnt.head()",
"_____no_output_____"
]
],
[
[
"Now let's define the necessary objects to build the distribution from these constraints obtained with the MaxEnt method.",
"_____no_output_____"
]
],
[
[
"# Extract protein moments in constraints\nprot_mom = [x for x in df_maxEnt.columns if 'm0' in x]\n# Define index of moments to be used in the computation\nmoments = [tuple(map(int, re.findall(r'\\d+', s))) for s in prot_mom]\n\n# Define sample space\nmRNA_space = np.array([0])\nprotein_space = np.arange(0, 1.9E4)\n\n# Extract values to be used\ndf_sample = df_maxEnt[(df_maxEnt.operator == 'O1') &\n (df_maxEnt.repressor == 0) &\n (df_maxEnt.inducer_uM == 0)]\n\n\n# Select the Lagrange multipliers\nlagrange_sample = df_sample.loc[:, [col for col in df_sample.columns \n if 'lambda' in col]].values[0]\n\n# Compute distribution from Lagrange multipliers values\nPp_maxEnt = ccutils.maxent.maxEnt_from_lagrange(mRNA_space, \n protein_space, \n lagrange_sample,\n exponents=moments).T[0]\n\nmean_p = np.sum(protein_space * Pp_maxEnt)",
"_____no_output_____"
]
],
[
[
"Now we can compare both distributions.",
"_____no_output_____"
]
],
[
[
"# Define binstep for plot, meaning how often to plot\n# an entry\nbinstep = 10\n\n## LED\n# Extract mean autofluorescence\nauto_long = df_long.loc[df_long.rbs == 'auto', 'intensity'].mean()\ndelta_long = df_long.loc[df_long.rbs == 'delta', 'intensity'].mean()\n\n# Compute fold-change for delta strain\nfold_change = (df_long[df_long.rbs == 'delta'].intensity - auto_long) /\\\n (delta_long - auto_long)\n# Generate ECDF\nx, y = ccutils.stats.ecdf(fold_change)\n\n# Plot ECDF\nplt.plot(x, y, lw=0, marker='v', color='red',\n alpha=0.3, label='20 hour', ms=3)\n\n# Plot MaxEnt results\nplt.plot(protein_space[0::binstep] / mean_p, np.cumsum(Pp_maxEnt)[0::binstep],\n drawstyle='steps', label='MaxEnt', lw=2)\n\n# Add legend\nplt.legend()\n# Label axis\nplt.ylabel('CDF')\nplt.xlabel('fold-change')\n\nplt.savefig('outdir/maxent_comparison.png', bbox_inches='tight')",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
cb7367bc14d9aa7f44c71f6f03ca4993e4e7ef21 | 9,574 | ipynb | Jupyter Notebook | bronze/.ipynb_checkpoints/B03_One_Bit-checkpoint.ipynb | QRussia/basics-of-quantum-computing-translate | 2a426aadd7ef17ff8c4f0a1b95702fa52c7eec8f | [
"Apache-2.0",
"CC-BY-4.0"
]
| null | null | null | bronze/.ipynb_checkpoints/B03_One_Bit-checkpoint.ipynb | QRussia/basics-of-quantum-computing-translate | 2a426aadd7ef17ff8c4f0a1b95702fa52c7eec8f | [
"Apache-2.0",
"CC-BY-4.0"
]
| null | null | null | bronze/.ipynb_checkpoints/B03_One_Bit-checkpoint.ipynb | QRussia/basics-of-quantum-computing-translate | 2a426aadd7ef17ff8c4f0a1b95702fa52c7eec8f | [
"Apache-2.0",
"CC-BY-4.0"
]
| null | null | null | 44.738318 | 309 | 0.538229 | [
[
[
"<table width=\"100%\"> <tr>\n <td style=\"background-color:#ffffff;\">\n <a href=\"http://qworld.lu.lv\" target=\"_blank\"><img src=\"..\\images\\qworld.jpg\" width=\"35%\" align=\"left\"> </a></td>\n <td style=\"background-color:#ffffff;vertical-align:bottom;text-align:right;\">\n prepared by Abuzer Yakaryilmaz (<a href=\"http://qworld.lu.lv/index.php/qlatvia/\" target=\"_blank\">QLatvia</a>)\n </td> \n</tr></table>",
"_____no_output_____"
],
[
"<table width=\"100%\"><tr><td style=\"color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;\">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>\n$ \\newcommand{\\bra}[1]{\\langle #1|} $\n$ \\newcommand{\\ket}[1]{|#1\\rangle} $\n$ \\newcommand{\\braket}[2]{\\langle #1|#2\\rangle} $\n$ \\newcommand{\\dot}[2]{ #1 \\cdot #2} $\n$ \\newcommand{\\biginner}[2]{\\left\\langle #1,#2\\right\\rangle} $\n$ \\newcommand{\\mymatrix}[2]{\\left( \\begin{array}{#1} #2\\end{array} \\right)} $\n$ \\newcommand{\\myvector}[1]{\\mymatrix{c}{#1}} $\n$ \\newcommand{\\myrvector}[1]{\\mymatrix{r}{#1}} $\n$ \\newcommand{\\mypar}[1]{\\left( #1 \\right)} $\n$ \\newcommand{\\mybigpar}[1]{ \\Big( #1 \\Big)} $\n$ \\newcommand{\\sqrttwo}{\\frac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\dsqrttwo}{\\dfrac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\onehalf}{\\frac{1}{2}} $\n$ \\newcommand{\\donehalf}{\\dfrac{1}{2}} $\n$ \\newcommand{\\hadamard}{ \\mymatrix{rr}{ \\sqrttwo & \\sqrttwo \\\\ \\sqrttwo & -\\sqrttwo }} $\n$ \\newcommand{\\vzero}{\\myvector{1\\\\0}} $\n$ \\newcommand{\\vone}{\\myvector{0\\\\1}} $\n$ \\newcommand{\\vhadamardzero}{\\myvector{ \\sqrttwo \\\\ \\sqrttwo } } $\n$ \\newcommand{\\vhadamardone}{ \\myrvector{ \\sqrttwo \\\\ -\\sqrttwo } } $\n$ \\newcommand{\\myarray}[2]{ \\begin{array}{#1}#2\\end{array}} $\n$ \\newcommand{\\X}{ \\mymatrix{cc}{0 & 1 \\\\ 1 & 0} } $\n$ \\newcommand{\\Z}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & -1} } $\n$ \\newcommand{\\Htwo}{ \\mymatrix{rrrr}{ \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} } } $\n$ \\newcommand{\\CNOT}{ \\mymatrix{cccc}{1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 0 & 1 \\\\ 0 & 0 & 1 & 0} } $\n$ \\newcommand{\\norm}[1]{ \\left\\lVert #1 \\right\\rVert } $",
"_____no_output_____"
],
[
"<h2> One Bit </h2>\n\n[Watch Lecture](https://youtu.be/kn53Qvl-h28)\n\nIn daily life, we use decimal number system. It is also called base-10 system, because we have 10 digits:\n\n$ 0,~1,~2,~3,~4,~5,~6,~7,~8, \\mbox{ and } 9 $.\n\nIn computer science, on the other hand, the widely used system is binary, which has only two digits:\n\n$ 0 $ and $ 1 $.\n\nBit (or binary digit) is the basic unit of information used in computer science and information theory. \n\nIt can also be seen as the smallest \"useful\" memory unit, which has two states named 0 and 1. \n\nAt any moment, a bit can be in either state 0 or state 1.",
"_____no_output_____"
],
[
"<h3> Four operators </h3>\n\nHow many different operators can be defined on a single bit?\n\n<i>An operator, depending on the current state of the bit, updates the state of bit (the result may be the same state).</i> \n\nWe can apply four different operators to a single bit:\n<ol>\n <li> Identity: $ I(0) = 0 $ and $ I(1) = 1 $ </li>\n <li> Negation: $ NOT(0) = 1 $ and $ NOT(1) = 0 $ </li>\n <li> Constant (Zero): $ ZERO(0) = 0 $ and $ ZERO(1) = 0 $ </li>\n <li> Constant (One): $ ONE(0) = 1 $ and $ ONE(1) = 1 $ </li>\n</ol>\nThe first operator is called IDENTITY, because it does not change the content/value of the bit.\n\nThe second operator is named NOT, because it negates (flips) the value of bit. \n\n<i>Remark that 0 and 1 also refer to Boolean values False and True, respectively, and, False is the negation of True, and True is the negation of False.</i>\n\nThe third (resp., fourth) operator returns a constant value 0 (resp., 1), whatever the input is.",
"_____no_output_____"
],
[
"<h3> Table representation </h3>\n\nWe can represent the transitions of each operator by a table:\n\n$\nI = \\begin{array}{lc|cc} \n & & initial & states \\\\ \n & & \\mathbf{0} & \\mathbf{1} \\\\ \\hline \n final & \\mathbf{0} & \\mbox{goes-to} & \\emptyset \\\\ \n states & \\mathbf{1} & \\emptyset & \\mbox{goes-to} \\end{array} ,\n$\nwhere \n- the header (first row) represents the initial values, and\n- the first column represents the final values.\n\nWe can also define the transitions numerically:\n- we use 1 if there is a transition between two values, and, \n- we use 0 if there is no transition between two values.\n\n$\nI = \\begin{array}{lc|cc} \n & & initial & states \\\\ \n & & \\mathbf{0} & \\mathbf{1} \\\\ \\hline \n final & \\mathbf{0} & 1 & 0 \\\\ \n states & \\mathbf{1} & 0 & 1 \\end{array}\n$",
"_____no_output_____"
],
[
"The values in <b>bold</b> are the initial and final values of the bits. The non-bold values represent the transitions.\n<ul>\n <li> The top-left non-bold 1 represents the transtion $ 0 \\rightarrow 0 $. </li>\n <li> The bottom-right non-bold 1 represents the transtion $ 1 \\rightarrow 1 $. </li> \n <li> The top-right non-bold 0 means that there is no transition from 1 to 0. </li>\n <li> The bottom-left non-bold 0 means that there is no transition from 0 to 1. </li>\n</ul>\nThe reader may think that the values 0 and 1 are representing the transitions as True (On) and False (Off), respectively. \n\nSimilarly, we can represent the other operators as below:\n\n$\nNOT = \\begin{array}{lc|cc} & & initial & states \\\\ & & \\mathbf{0} & \\mathbf{1} \\\\ \\hline final & \\mathbf{0} & 0 & 1 \\\\ \n states & \\mathbf{1} & 1 & 0 \\end{array}\n~~~\nZERO = \\begin{array}{lc|cc} & & initial & states \\\\ & & \\mathbf{0} & \\mathbf{1} \\\\ \\hline final & \\mathbf{0} & 1 & 1 \\\\ \n states & \\mathbf{1} & 0 & 0 \\end{array}\n~~~\nONE = \\begin{array}{lc|cc} & & initial & states \\\\ & & \\mathbf{0} & \\mathbf{1} \\\\ \\hline final & \\mathbf{0} & 0 & 0 \\\\ \n states & \\mathbf{1} & 1 & 1 \\end{array}\n.\n$",
"_____no_output_____"
],
[
"<h3> Task 1 </h3>\n\nConvince yourself with the correctness of each table.",
"_____no_output_____"
],
[
"<h3> Reversibility and Irreversibility </h3>\n\nAfter applying Identity or NOT operator, we can easily determine the initial value by checking the final value. \n<ul>\n <li> In the case of Identity operator, we simply say the same value. </li>\n <li> In the case of NOT operator, we simply say the other value, i.e., if the final value is 0 (resp., 1), then we say 1 (resp., 0). </li>\n</ul>\n\nHowever, we cannot know the initial value by checking the final value after applying ZERO or ONE operator. \n\nBased on this observation, we can classify the operators into two types: <i>Reversible</i> and <i>Irreversible</i>.\n<ul>\n <li> If we can recover the initial value(s) from the final value(s), then the operator is called reversible like Identity and NOT operators. </li>\n <li> If we cannot know the initial value(s) from the final value(s), then the operator is called irreversible like ZERO and ONE operators. </li>\n</ul>\n\n<b> This classification is important, as the quantum evolution operators are reversible </b> (as long as the system is closed).\n\nThe Identity and NOT operators are two fundamental quantum operators.",
"_____no_output_____"
]
]
]
| [
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
]
|
cb7375fc2ab0f70fd910a1e4be3b169a635da2bf | 402,617 | ipynb | Jupyter Notebook | sine_sum_plots.ipynb | chapman-phys220-2018f/cw08-forced-collaboration | f21237668cc18bff47fbe506357848a47717fdb7 | [
"MIT"
]
| null | null | null | sine_sum_plots.ipynb | chapman-phys220-2018f/cw08-forced-collaboration | f21237668cc18bff47fbe506357848a47717fdb7 | [
"MIT"
]
| null | null | null | sine_sum_plots.ipynb | chapman-phys220-2018f/cw08-forced-collaboration | f21237668cc18bff47fbe506357848a47717fdb7 | [
"MIT"
]
| null | null | null | 1,320.055738 | 152,805 | 0.954177 | [
[
[
"import sinesum as ss\nimport matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"#### Fourier Series of the Step function\n\nIn this Homework assignment, we built a partial series calculator that would give us an approximation of the sign($x$) function. The Method we used to approximate this function is by making use of the Fourier Series of the function.\n\nIt's coefficients are given by the integral (Well, more than likely approximately since I haven't done the calculation in a sec):\n$$ a_n = \\frac{1}{T} \\int_{0}^{T/2}\\sin(\\frac{2 \\pi nx}{T})dx $$\n\nWhere $L$ is the length of the interval of approximation around the origin. Using these coefficients we find that we can rewrite the sign($x$) function as:\n$$ sign(x) = \\sum_{n=1}^{\\infty} \\frac{4}{\\pi} \\frac{1}{2n-1} \\sin\\left(\\frac{2 \\pi (2n-1)}{T} t\\right) $$\n\nThe next bit of code is just initializing the arrays that we will use to plot these partial sums, and will define the functions we will use to plot them.",
"_____no_output_____"
]
],
[
[
"T = 2*np.pi\nF1Array = ss.Snarray(T,1)\nF3Array = ss.Snarray(T,3)\nF5Array = ss.Snarray(T,5)\nF10Array = ss.Snarray(T,10)\nF30Array = ss.Snarray(T,30)\nF100Array = ss.Snarray(T,100)\nFuncArray = ss.farray(T)\nTime = ss.timespace(T)\n\n\ndef lowNplot():\n \"\"\"args: none\n returns: null\n This function is used to plot the Fourier partial sums up to 5 against the sign function\n It should be used when after all of the arrays have been created\"\"\"\n fig = plt.figure(figsize = (8,12))\n\n a = plt.axes()\n a.plot(Time, F1Array, 'b.-', label=\"S_1\")\n a.plot(Time, F3Array, 'k.-', label = \"S_3\")\n a.plot(Time, F5Array, 'g.-', label=\"S_5\")\n a.plot(Time, FuncArray, 'r', label=\"Function being approximated\")\n a.set(xlabel = 't', ylabel = 'f(t)')\n\n a.legend()\n\n plt.show()\n\ndef highNplot():\n \"\"\"args: none\n returns: null\n This function is used to plot the Fourier partial sums from 10 to 100 against the sign function\n It should be used when after all of the arrays have been created\"\"\"\n fig = plt.figure(figsize = (8,12))\n\n a = plt.axes()\n a.plot(Time, F10Array, 'g.-', label = \"S_10\")\n a.plot(Time, F30Array, 'k.-', label=\"S_30\")\n a.plot(Time, F100Array, 'b.-', label=\"S_100\")\n a.plot(Time, FuncArray, 'r', label=\"Function being approximated\")\n a.set(xlabel = 't', ylabel = 'f(t)')\n\n a.legend()\n\n plt.show()",
"_____no_output_____"
]
],
[
[
"We will first consider the case when our arbitrary parameter $\\alpha$ is 1.\n\nThe plot in this case shows us the values of the sum on $[-\\pi,\\pi]$",
"_____no_output_____"
]
],
[
[
"lowNplot()",
"_____no_output_____"
]
],
[
[
"As can be seen, the sinusoids are being summed to something that looks closer and closer like our step function.",
"_____no_output_____"
]
],
[
[
"highNplot()",
"_____no_output_____"
]
],
[
[
"While this plot is definitely messier looking, the difference area between the distance of the step function and the partial sums is getting closer to 0, which is how we define this series converging to the function we wish to approximate.\n\nNow let's see how well the approximation does for a specified point $t$",
"_____no_output_____"
]
],
[
[
"t_1 = 0.01*T\nt_2 = 0.25*T\nt_3 = 0.49*T\n\nKterms = np.array([1,3,5,10,30,100])\nplotdom = np.arange(6)\n\napprox_1 = ss.Sn(T,t_1,Kterms)\napprox_2 = ss.Sn(T,t_2,Kterms)\napprox_3 = ss.Sn(T,t_3,Kterms)\n\nfig = plt.figure(figsize = (8,12))\n\na = plt.axes()\na.plot(plotdom, approx_1, 'b.-', label = \"t=0.01T\")\na.plot(plotdom, approx_2, 'g.-', label = \"t=0.25T\")\na.plot(plotdom, approx_3, 'r.-', label = \"t=0.49T\")\na.set(xlabel = 'Terms needed', ylabel = 'f(t)')\n\na.legend()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"As we can see, this graph shows that at the specified $t$ values the sum of the sin functions does in fact approach 1 at the specified values, it is interesting to note that $\\sin(x)$ having a point of symmetry around $x = \\frac{\\pi}{2}$ means that t_1 and t_3 have exactly the same approximation. This makes the lies look like they overlap. Under this I have included a graph which shows the rate at which the sum converges to 1 (it takes forever.)",
"_____no_output_____"
]
],
[
[
"t_0 = 0.1*T\n\nplotdom = np.arange(300)\n\napprox_0 = ss.Sn(T,t_0,plotdom)\n\nfig = plt.figure(figsize = (8,12))\n\na = plt.axes()\na.plot(plotdom, approx_0, 'b.-', label = \"Approximation\")\na.set(xlabel = 'Terms needed', ylabel = 'f(t)')\n\na.legend()\n\nplt.show()",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
cb738e30760b2b86c61df0785d8b0cddab2d3161 | 3,557 | ipynb | Jupyter Notebook | 7181_02_code_ACC_SB/Diving into lambda expressions.ipynb | PacktPublishing/Funtional-Programming-in-Python | 4181b057c55d42010d32bd37e57fbc02f7ce1283 | [
"MIT"
]
| 2 | 2018-07-09T16:16:46.000Z | 2021-10-04T04:52:52.000Z | 7181_02_code_ACC_SB/Diving into lambda expressions.ipynb | PacktPublishing/Funtional-Programming-in-Python | 4181b057c55d42010d32bd37e57fbc02f7ce1283 | [
"MIT"
]
| null | null | null | 7181_02_code_ACC_SB/Diving into lambda expressions.ipynb | PacktPublishing/Funtional-Programming-in-Python | 4181b057c55d42010d32bd37e57fbc02f7ce1283 | [
"MIT"
]
| 6 | 2018-01-24T02:48:46.000Z | 2019-01-26T17:10:38.000Z | 19.02139 | 174 | 0.491144 | [
[
[
"## A simple procedural function\n\nIn procedural programming, functions are defined with `def` statements.",
"_____no_output_____"
]
],
[
[
"from math import sqrt\n\n\ndef p_pythagoras(x, y):\n \n return sqrt(x**2 + y**2)\n\np_pythagoras(1, 1)",
"_____no_output_____"
]
],
[
[
"## A simple `lambda` function\n\nIn functional programming, we can use `lambda` expressions for the same purposes.",
"_____no_output_____"
]
],
[
[
"l_pythagoras = lambda x, y: sqrt(x**2 + y**2)\nl_pythagoras(1,1)",
"_____no_output_____"
]
],
[
[
"## Recursion requires a name\n\nFunctions created with `lambda` expressions can be nameless. But for a function to call itself, it needs a name. In such cases, a `def` statement may be more intuitive.",
"_____no_output_____"
]
],
[
[
"def f_factorial(n):\n \n return 1 if n == 0 else n*f_factorial(n-1)\n\n\nf_factorial(3)",
"_____no_output_____"
],
[
"l_factorial = lambda n: 1 if n == 0 else n*l_factorial(n-1)\nl_factorial(3)",
"_____no_output_____"
]
],
[
[
"## When lambda's are convenient\n\n`lambda` expressions are very convenient if you quickly need a short function, for example to pass as an argument to `map()` or `filter()`.",
"_____no_output_____"
]
],
[
[
"l = [0, 1, 2, 3, 4]\nlist(map(lambda x: x*2, l))",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
cb73ae2c3e7c72867440b334a5b5ee0caf6b163b | 11,965 | ipynb | Jupyter Notebook | objects_with_python/object_descriptive_statistical_functions.ipynb | BautistaDavid/Proyectos_ClaseML | 2e9ed84e35d4df5f1beb8834a19238a5938dc099 | [
"MIT"
]
| null | null | null | objects_with_python/object_descriptive_statistical_functions.ipynb | BautistaDavid/Proyectos_ClaseML | 2e9ed84e35d4df5f1beb8834a19238a5938dc099 | [
"MIT"
]
| null | null | null | objects_with_python/object_descriptive_statistical_functions.ipynb | BautistaDavid/Proyectos_ClaseML | 2e9ed84e35d4df5f1beb8834a19238a5938dc099 | [
"MIT"
]
| null | null | null | 39.750831 | 400 | 0.489678 | [
[
[
"<a href=\"https://colab.research.google.com/github/BautistaDavid/Proyectos_ClaseML/blob/corte_1/Proyecto2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"!pip install wooldridge",
"_____no_output_____"
]
],
[
[
"## **Proyecto 2**\n\nSe va a construir un objeto para poder hallar algunas estadísticas descriptivas de un vector numérico de datos. Después de esto se va aprobar la clase construida usando algunas variables de la base de datos “wage1” del libro de wooldridge. ",
"_____no_output_____"
]
],
[
[
"class estadisticos:\n \n def __init__(self,lst):\n self.lst = lst\n return None\n\n def media(self): \n return sum(self.lst) / len(self.lst)\n\n def test(self):\n return self.media()\n \n def desv_est(self):\n return (sum([(i - self.media())**2 for i in self.lst]) / (len(self.lst)-1))**0.5 \n\n def varianza(self):\n return self.desv_est()**2\n\n def mediana(self):\n return sorted(self.lst)[len(self.lst)//2] if len(self.lst)%2 != 0 else (sorted(self.lst)[len(self.lst)//2-1]+sorted(self.lst)[len(self.lst)//2]) / 2\n \n def curtosis(self):\n return sum([(i - self.media())**4 for i in self.lst]) / (len(self.lst)*self.desv_est()**4)\n\n def simetria(self):\n return sum([(i -self.media())**3 for i in self.lst]) / (len(self.lst)*self.desv_est()**3)\n \n def coeficiente_variacion(self):\n return self.desv_est() / abs(self.media()) ",
"_____no_output_____"
]
],
[
[
"Se va a probar la clase para las variables ```wage``` y ```educ``` de la bse de datos ```wage1```. A continuación se despliega información sobre estos datos. ",
"_____no_output_____"
]
],
[
[
"import wooldridge as wd\nwd.data(\"wage1\",description = True) #Accedemos a información sobre las variables.",
"name of dataset: wage1\nno of variables: 24\nno of observations: 526\n\n+----------+---------------------------------+\n| variable | label |\n+----------+---------------------------------+\n| wage | average hourly earnings |\n| educ | years of education |\n| exper | years potential experience |\n| tenure | years with current employer |\n| nonwhite | =1 if nonwhite |\n| female | =1 if female |\n| married | =1 if married |\n| numdep | number of dependents |\n| smsa | =1 if live in SMSA |\n| northcen | =1 if live in north central U.S |\n| south | =1 if live in southern region |\n| west | =1 if live in western region |\n| construc | =1 if work in construc. indus. |\n| ndurman | =1 if in nondur. manuf. indus. |\n| trcommpu | =1 if in trans, commun, pub ut |\n| trade | =1 if in wholesale or retail |\n| services | =1 if in services indus. |\n| profserv | =1 if in prof. serv. indus. |\n| profocc | =1 if in profess. occupation |\n| clerocc | =1 if in clerical occupation |\n| servocc | =1 if in service occupation |\n| lwage | log(wage) |\n| expersq | exper^2 |\n| tenursq | tenure^2 |\n+----------+---------------------------------+\n\nThese are data from the 1976 Current Population Survey, collected by\nHenry Farber when he and I were colleagues at MIT in 1988.\n"
],
[
"import numpy as np\nimport pandas as pd \ndatos = wd.data(\"wage1\")\n\nfor i in [\"wage\",\"educ\"]:\n stats = estadisticos(datos[i])\n mensaje = f\"Error\"\n funciones = {\"Media\":[stats.media(),np.mean(datos[i])],\n \"Mediana\":[stats.mediana(),np.median(datos[i])],\n \"Desviación estandar\":[stats.desv_est(),np.std(datos[i])],\n \"Varianza\":[stats.varianza(),np.var(datos[i])],\n \"Coeficiente Variación\":[stats.coeficiente_variacion(),np.std(datos[i])/np.mean(datos[i])]}\n print(f\"Estadisticas {i}\") \n for key,value in funciones.items():\n try:\n assert value[0]==value[1],mensaje\n print(f\"{key} = {value[1]} --> Coincide con Numpy\")\n except:\n print(f\"{key} = {value[1]} --> No Coincide con Numpy, hay una diferencia de {abs(value[1]-value[0])}\")\n print(\"___________________________________________\\n \")\n",
"Estadisticas wage\nMedia = 5.896102674787035 --> Coincide con Numpy\nMediana = 4.650000095367432 --> Coincide con Numpy\nDesviación estandar = 3.6895738135753655 --> No Coincide con Numpy, hay una diferencia de 0.0035122081380483117\nVarianza = 13.612954925821066 --> No Coincide con Numpy, hay una diferencia de 0.02592943795394298\nCoeficiente Variación = 0.6257648513063981 --> No Coincide con Numpy, hay una diferencia de 0.0005956830014286485\n___________________________________________\n \nEstadisticas educ\nMedia = 12.562737642585551 --> Coincide con Numpy\nMediana = 12.0 --> Coincide con Numpy\nDesviación estandar = 2.766388999158773 --> No Coincide con Numpy, hay una diferencia de 0.00263340278492441\nVarianza = 7.652908094666679 --> No Coincide con Numpy, hay una diferencia de 0.014576967799365015\nCoeficiente Variación = 0.220205903988728 --> No Coincide con Numpy, hay una diferencia de 0.0002096201369355677\n___________________________________________\n \n"
]
],
[
[
"### **Analizis resultados:** \n\n\n* **Variable wage**: \n\n| Estadistico |Valor |\n|--------------|----------|\n| Media | 5.89 |\n| Mediana | 4.65 |\n| Desviación.E | 3.69 |\n| Varianza | 13.64 | \n| Coeficiente.V| 0.63 |\n<Br>\n\nLas ganancia promedio por hora (salario) de la muesta de población americana para el año de 1976 era de 5.89 USD, notese que esta cantidad de dinero o superior era percibida por menos de la mitad de los individuos puesto que la mediana de los datos es de apenas 4.65 USD. \n\nAsi mismo se puede comentar que en promedio las ganancias por hora de los individuos se desvian 3.693 USD de la media, lo cual muestra un alto nivel de variación en los salarios que se pueden confirmar interpretando el coeficiente de variación de los datos que haciende hasta un 0.62. \n\nEsto se puede succeder debido alguna fijacion de salario minimo que afecta a parte de los individuos mientras que el resto con mejores puestos logran obtener salarios mas altos. \n<Br>\n\n* **Variable educ**:\n\n| Estadistico |Valor |\n|--------------|-----------|\n| Media | 12.56 |\n| Mediana | 12 |\n| Desviación.E | 2.77 |\n| Varianza | 7.67 | \n| Coeficiente.V| 0.22 |\n<Br>\n\nLos años de educacuin promedio de la muestra de población americana para el año 1976 es de 12.65 años, notese que la mitad de los individuos por lo menos contaban con 12 años de educación.\n\nPor otro lado se puede notar como en promedio los años de educacion de los individuos se desviaban de la media en 2.77 años, lo cual nos indica un bajo nivel de dispersion, esto puede ser causado por el hecho de que algunos individuos lograron terminar sus ciclos de primaria y bachillerato mientras que otros lograron adicionar algunos años de educacion extra en estudios profesionales.\n\nAun asi el coeficiente de variación de los años de educación es apenas de 0.22, lo que tambien es un indicativo de que el sistema educativo y gobierno del momento lograba que la diferencia en años de educación de los individuos no fuera tan alta.\n\n\n\n\n\n",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
]
|
cb73b69529894e0eb6c7819fd431bd425bc32a96 | 609,707 | ipynb | Jupyter Notebook | model.ipynb | praveenbandaru/CarND-Behavioral-Cloning-P3 | e6cd242f544e9e48a1fd2d0e6e661815a81710e8 | [
"MIT"
]
| null | null | null | model.ipynb | praveenbandaru/CarND-Behavioral-Cloning-P3 | e6cd242f544e9e48a1fd2d0e6e661815a81710e8 | [
"MIT"
]
| null | null | null | model.ipynb | praveenbandaru/CarND-Behavioral-Cloning-P3 | e6cd242f544e9e48a1fd2d0e6e661815a81710e8 | [
"MIT"
]
| null | null | null | 958.658805 | 559,500 | 0.945328 | [
[
[
"import os\nimport csv\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport sklearn\nfrom random import shuffle",
"_____no_output_____"
],
[
"import tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU\nconfig.log_device_placement = True # to log device placement (on which device the operation ran)\n # (nothing gets printed in Jupyter, only if you run it standalone)\nsess = tf.Session(config=config)\nset_session(sess)",
"Using TensorFlow backend.\n"
],
[
"csv_file = './data/driving_log.csv'\npath = './data/' # fill in the path to your training IMG directory",
"_____no_output_____"
],
[
"samples = []\nwith open(csv_file) as csvfile:\n reader = csv.reader(csvfile)\n headers = next(reader)\n for line in reader:\n samples.append(line)\nprint('Done')",
"Done\n"
],
[
"shuffle(samples)",
"_____no_output_____"
],
[
"images = []\nangles = []\nfor batch_sample in samples:\n steering_center = float(batch_sample[3].strip())\n\n # create adjusted steering measurements for the side camera images\n correction = 0.2 # this is a parameter to tune\n steering_left = steering_center + correction\n steering_right = steering_center - correction\n\n # read in images from center, left and right cameras \n img_center = np.array(Image.open(path + batch_sample[0].strip()))\n img_left = np.array(Image.open(path + batch_sample[1].strip()))\n img_right = np.array(Image.open(path + batch_sample[2].strip()))\n\n # add images and angles to data set\n images.extend((img_center, img_left, img_right, np.fliplr(img_center), np.fliplr(img_left), np.fliplr(img_right)))\n angles.extend((steering_center, steering_left, steering_right, -steering_center, -steering_left, -steering_right))\n break",
"_____no_output_____"
],
[
"print(len(images))\nimages = np.array(images)\nprint(images.shape)\nimport matplotlib.pyplot as plt\nplt.rcdefaults()\n%matplotlib inline\n\ni=1\nfig = plt.figure(figsize=(15, 5))\nfor image in images:\n plt.subplot(2, 3, i) \n plt.imshow(image)\n plt.axis('off')\n i+=1\nfig.subplots_adjust(wspace=0.1, hspace=0.1)\nplt.show()\n\nprint(angles)",
"6\n(6, 160, 320, 3)\n"
],
[
"from sklearn.model_selection import train_test_split\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2) ",
"_____no_output_____"
],
[
"def generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n steering_center = float(batch_sample[3].strip())\n\n # create adjusted steering measurements for the side camera images\n correction = 0.2 # this is a parameter to tune\n steering_left = steering_center + correction\n steering_right = steering_center - correction\n \n # read in images from center, left and right cameras \n img_center = np.array(Image.open(path + batch_sample[0].strip()))\n img_left = np.array(Image.open(path + batch_sample[1].strip()))\n img_right = np.array(Image.open(path + batch_sample[2].strip()))\n \n # add images and angles to data set\n images.extend((img_center, img_left, img_right, np.fliplr(img_center), np.fliplr(img_left), np.fliplr(img_right)))\n angles.extend((steering_center, steering_left, steering_right, -steering_center, -steering_left, -steering_right)) \n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)",
"_____no_output_____"
],
[
"# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=32)\nvalidation_generator = generator(validation_samples, batch_size=32)",
"_____no_output_____"
],
[
"from keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.pooling import MaxPooling2D",
"_____no_output_____"
],
[
"model = Sequential()\nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((70,25), (0,0))))\nmodel.add(Convolution2D(24,5,5,subsample=(2,2),activation='relu'))\nmodel.add(Convolution2D(36,5,5,subsample=(2,2),activation='relu'))\nmodel.add(Convolution2D(48,5,5,subsample=(2,2),activation='relu'))\nmodel.add(Convolution2D(64,3,3,subsample=(1,1),activation='relu'))\nmodel.add(Convolution2D(64,3,3,subsample=(1,1),activation='relu'))\nmodel.add(Flatten())\nmodel.add(Dropout(0.5))\nmodel.add(Dense(100))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(50))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1))",
"WARNING:tensorflow:From D:\\Program Files (x86)\\Microsoft Visual Studio\\Shared\\Anaconda3_64\\envs\\carnd-term1-gpu\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:1047: calling reduce_prod (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\nInstructions for updating:\nkeep_dims is deprecated, use keepdims instead\n"
],
[
"print(model.summary())",
"____________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n====================================================================================================\nlambda_1 (Lambda) (None, 160, 320, 3) 0 lambda_input_1[0][0] \n____________________________________________________________________________________________________\ncropping2d_1 (Cropping2D) (None, 65, 320, 3) 0 lambda_1[0][0] \n____________________________________________________________________________________________________\nconvolution2d_1 (Convolution2D) (None, 31, 158, 24) 1824 cropping2d_1[0][0] \n____________________________________________________________________________________________________\nconvolution2d_2 (Convolution2D) (None, 14, 77, 36) 21636 convolution2d_1[0][0] \n____________________________________________________________________________________________________\nconvolution2d_3 (Convolution2D) (None, 5, 37, 48) 43248 convolution2d_2[0][0] \n____________________________________________________________________________________________________\nconvolution2d_4 (Convolution2D) (None, 3, 35, 64) 27712 convolution2d_3[0][0] \n____________________________________________________________________________________________________\nconvolution2d_5 (Convolution2D) (None, 1, 33, 64) 36928 convolution2d_4[0][0] \n____________________________________________________________________________________________________\nflatten_1 (Flatten) (None, 2112) 0 convolution2d_5[0][0] \n____________________________________________________________________________________________________\ndropout_1 (Dropout) (None, 2112) 0 flatten_1[0][0] \n____________________________________________________________________________________________________\ndense_1 (Dense) (None, 100) 211300 dropout_1[0][0] \n____________________________________________________________________________________________________\ndropout_2 (Dropout) (None, 100) 0 dense_1[0][0] \n____________________________________________________________________________________________________\ndense_2 (Dense) (None, 50) 5050 dropout_2[0][0] \n____________________________________________________________________________________________________\ndropout_3 (Dropout) (None, 50) 0 dense_2[0][0] \n____________________________________________________________________________________________________\ndense_3 (Dense) (None, 10) 510 dropout_3[0][0] \n____________________________________________________________________________________________________\ndropout_4 (Dropout) (None, 10) 0 dense_3[0][0] \n____________________________________________________________________________________________________\ndense_4 (Dense) (None, 1) 11 dropout_4[0][0] \n====================================================================================================\nTotal params: 348,219\nTrainable params: 348,219\nNon-trainable params: 0\n____________________________________________________________________________________________________\nNone\n"
],
[
"model.compile(optimizer='adam', loss='mse')\nhistory_object = model.fit_generator(train_generator, samples_per_epoch=len(train_samples)*6, validation_data=validation_generator, nb_val_samples=len(validation_samples)*6, nb_epoch=5)",
"WARNING:tensorflow:From D:\\Program Files (x86)\\Microsoft Visual Studio\\Shared\\Anaconda3_64\\envs\\carnd-term1-gpu\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:1108: calling reduce_mean (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\nInstructions for updating:\nkeep_dims is deprecated, use keepdims instead\nEpoch 1/5\n76674/76674 [==============================] - 152s - loss: 0.1428 - val_loss: 0.1035\nEpoch 2/5\n76674/76674 [==============================] - 141s - loss: 0.1255 - val_loss: 0.1018\nEpoch 3/5\n76674/76674 [==============================] - 141s - loss: 0.1223 - val_loss: 0.1000\nEpoch 4/5\n76674/76674 [==============================] - 141s - loss: 0.1204 - val_loss: 0.0975\nEpoch 5/5\n76674/76674 [==============================] - 143s - loss: 0.1184 - val_loss: 0.0929\n"
],
[
"model.save('model.h5')",
"_____no_output_____"
],
[
"from keras.utils.visualize_util import plot\nplot(model, to_file='model.png')\nplot(model,show_shapes=True, to_file='modelwithshapes.png')",
"_____no_output_____"
],
[
"from IPython.display import SVG\nfrom keras.utils.visualize_util import model_to_dot\n\nSVG(model_to_dot(model).create(prog='dot', format='svg'))",
"_____no_output_____"
],
[
"### print the keys contained in the history object\nprint(history_object.history.keys())\n\n### plot the training and validation loss for each epoch\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\n#plt.ylim(ymin=0)\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.show()",
"dict_keys(['loss', 'val_loss'])\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb73b7df704ff700e1fc8867586ffea4b7192e5f | 432,959 | ipynb | Jupyter Notebook | python/tutorials/dcgan_create_images.ipynb | yash1/mxnet-notebooks | a2795a0cdc9e111b42b60fa6b1a4e6219df72afc | [
"Apache-2.0"
]
| 8 | 2017-09-26T20:33:16.000Z | 2020-09-14T02:01:20.000Z | python/tutorials/dcgan_create_images.ipynb | kamalmehra/mxnet-notebooks | a2795a0cdc9e111b42b60fa6b1a4e6219df72afc | [
"Apache-2.0"
]
| null | null | null | python/tutorials/dcgan_create_images.ipynb | kamalmehra/mxnet-notebooks | a2795a0cdc9e111b42b60fa6b1a4e6219df72afc | [
"Apache-2.0"
]
| 4 | 2017-06-14T12:38:31.000Z | 2020-02-08T14:04:50.000Z | 426.560591 | 60,588 | 0.923427 | [
[
[
"# DCGAN - Create Images from Random Numbers!",
"_____no_output_____"
],
[
"### Generative Adversarial Networks\nEver since Ian Goodfellow and colleagues [introduced the concept of Generative Adversarial Networks (GANs)](https://arxiv.org/abs/1406.2661), GANs have been a popular topic in the field of AI. GANs are an application of unsupervised learning - you don't need labels for your dataset in order to train a GAN. \n\nThe GAN framework composes of two neural networks: a generator network and a discriminator network. \n\nThe generator's job is to take a set of random numbers and produce data (such as images or text).\n\nThe discriminator then takes in that data as well as samples of that data from a dataset and tries to determine if is \"fake\" (created by the generator network) or \"real\" (from the original dataset). \n\nDuring training, the two networks play a game against each other. \nThe generator tries to create realistic data, so that it can fool the discriminator into thinking that the data it generated is from the original dataset. At the same time, the discriminator tries to not be fooled - it learns to become better at determining if data is real or fake. \n\nSince the two networks are fighting in this game, they can be seen as as adversaries, which is where the term \"Generative Adverserial Network\" comes from. \n\n### Deep Convolutional Generative Adversarial Networks\nThis notebook takes a look at Deep Convolutional Generative Adversarial Networks (DCGAN), which combines Convolutional Neural Networks (CNNs) ands GANs. \n\nWe will create a DCGAN that is able to create images of handwritten digits from random numbers. \n\nThe tutorial uses the neural net architecture and guidelines outlined in [this paper](https://arxiv.org/abs/1511.06434), and the MNIST dataset.\n",
"_____no_output_____"
],
[
"## How to Use This Tutorial",
"_____no_output_____"
],
[
"You can use this tutorial by executing each snippet of python code in order as it appears in the notebook. \n\nIn this tutorial, we will train DCGAN on MNIST which will ultimately produces two neural networks:\n- The first net is the \"generator\" and creates images of handwritten digits from random numbers.\n\n\n- The second net is the \"discriminator\" and determines if the image created by the generator is real (a realistic looking image of handwritten digits) or fake (an image that doesn't look like it came from the original dataset). \n\nApart from creating a DCGAN, you'll also learn:\n\n- How to manipulate and iterate through batches images that you can feed into your neural network.\n\n\n- How to create a custom MXNet data iterator that generates random numbers from a normal distribution.\n\n\n- How to create a custom training process in MXNet, using lower level functions from the [MXNet Module API](http://mxnet.io/api/python/module.html) such as `.bind()` `.forward()` and `.backward()`. The training process for a DCGAN is more complex than many other neural net's, so we need to use these functions instead of using the higher level `.fit()` function. \n\n\n- How to visualize images as they are going through the training process",
"_____no_output_____"
],
[
"## Prerequisites",
"_____no_output_____"
],
[
"This notebook assumes you're familiar with the concept of CNN's and have implemented one in MXNet. If you haven't, check out [this tutorial](https://github.com/dmlc/mxnet-notebooks/blob/master/python/tutorials/mnist.ipynb), which walks you through implementing a CNN in MXNet. You should also be familiar with the concept of logistic regression. \n\nHaving a basic understanding for MXNet data iterators helps, since we'll create a custom Data Iterator to iterate though random numbers as inputs to our generator network. Take a look at [this tutorial](https://github.com/dmlc/mxnet-notebooks/blob/master/python/basic/data.ipynb) for a better understanding of how MXNet `DataIter` works.\n\nThis example is designed to be trained on a single GPU. Training this network on CPU can be slow, so it's recommended that you use a GPU for training. ",
"_____no_output_____"
],
[
"To complete this tutorial, you need:\n\n- [MXNet](http://mxnet.io/get_started/setup.html#overview)\n- [Python 2.7](https://www.python.org/download/releases/2.7/), and the following libraries for Python: \n - [Numpy](http://www.numpy.org/) - for matrix math\n - [OpenCV](http://opencv.org/) - for image manipulation\n - [Scikit-learn](http://scikit-learn.org/) - to easily get our dataset\n - [Matplotlib](https://matplotlib.org/) - to visualize our output",
"_____no_output_____"
],
[
"## The Data",
"_____no_output_____"
],
[
"We need two pieces of data to train our DCGAN:\n1. Images of handwritten digits from the MNSIT dataset\n2. Random numbers from a normal distribution\n\nOur generator network will use the random numbers as the input to produce images of handwritten digits, and out discriminator network will use images of handwritten digits from the MNIST dataset to determine if images produced by our generator are realistic.\n\nWe are going to use the python library, scikit-learn, to get the MNIST dataset. Scikit-learn comes with a function that gets the dataset for us, which we will then manipulate to create our training and testing inputs. ",
"_____no_output_____"
],
[
"The MNIST dataset contains 70,000 images of handwritten digits. Each image is 28x28 pixels in size. \n\n\nTo create random numbers, we're going to create a custom MXNet data iterator, which will returns random numbers from a normal distribution as we need then. ",
"_____no_output_____"
],
[
"## Prepare the Data",
"_____no_output_____"
],
[
"### 1. Preparing the MNSIT dataset\nLet's start by preparing our handwritten digits from the MNIST dataset. We import the fetch_mldata function from scikit-learn, and use it to get the MNSIT dataset. Notice that it's shape is 70000x784. This contains the 70000 images on every row and 784 pixels of each image in the columns of each row. Each image is 28x28 pixels, but has been flattened so that all 784 images are represented in a single list.",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import fetch_mldata\nmnist = fetch_mldata('MNIST original')\nmnist.data.shape",
"_____no_output_____"
]
],
[
[
"Next, we'll randomize the handwritten digits by using numpy to create random permutations on the dataset on our rows (images). We'll then reshape our dataset from 70000x786 to 70000x28x28, so that every image in our dataset is arranged into a 28x28 grid, where each cell in the grid represents 1 pixel of the image. ",
"_____no_output_____"
]
],
[
[
"import numpy as np\n#Use a seed so that we get the same random permutation each time\nnp.random.seed(1)\np = np.random.permutation(mnist.data.shape[0])\nX = mnist.data[p]\nX = X.reshape((70000, 28, 28))",
"_____no_output_____"
]
],
[
[
"Since the DCGAN that we're creating takes in a 64x64 image as the input, we'll use OpenCV to resize the each 28x28 image to 64x64 images:",
"_____no_output_____"
]
],
[
[
"import cv2\nX = np.asarray([cv2.resize(x, (64,64)) for x in X])",
"_____no_output_____"
]
],
[
[
"Each pixel in our 64x64 image is represented by a number between 0-255, that represents the intensity of the pixel. However, we want to input numbers between -1 and 1 into our DCGAN, as suggested by the research paper. To rescale our pixels to be in the range of -1 to 1, we'll divide each pixel by (255/2). This put our images on a scale of 0-2. We can then subtract by 1, to get them in the range of -1 to 1.",
"_____no_output_____"
]
],
[
[
"X = X.astype(np.float32)/(255.0/2) - 1.0",
"_____no_output_____"
]
],
[
[
"Ultimately, images are inputted into our neural net from a 70000x3x64x64 array, and they are currently in a 70000x64x64 array. We need to add 3 channels to our images. Typically when we are working with images, the 3 channels represent the red, green, and blue components of each image. Since the MNIST dataset is grayscale, we only need 1 channel to represent our dataset. We will pad the other channels with 0's:",
"_____no_output_____"
]
],
[
[
"X = X.reshape((70000, 1, 64, 64))\nX = np.tile(X, (1, 3, 1, 1))",
"_____no_output_____"
]
],
[
[
"Finally, we'll put our images into MXNet's NDArrayIter, which will allow MXNet to easily iterate through our images during training. We'll also split up them images into a batches, with 64 images in each batch. Every time we iterate, we'll get a 4 dimensional array with size `(64, 3, 64, 64)`, representing a batch of 64 images. ",
"_____no_output_____"
]
],
[
[
"import mxnet as mx\nbatch_size = 64\nimage_iter = mx.io.NDArrayIter(X, batch_size=batch_size)",
"_____no_output_____"
]
],
[
[
"## 2. Preparing Random Numbers",
"_____no_output_____"
],
[
"We need to input random numbers from a normal distribution to our generator network, so we'll create an MXNet DataIter that produces random numbers for each training batch. The `DataIter` is the base class of [MXNet's Data Loading API](http://mxnet.io/api/python/io.html). Below, we create a class called `RandIter` which is a subclass of `DataIter`. If you want to know more about how MXNet data loading works in python, please look at [this notebook](https://github.com/dmlc/mxnet-notebooks/blob/master/python/basic/data.ipynb). We use MXNet's built in `mx.random.normal` function in order to return the normally distributed random numbers every time we iterate. ",
"_____no_output_____"
]
],
[
[
"class RandIter(mx.io.DataIter):\n def __init__(self, batch_size, ndim):\n self.batch_size = batch_size\n self.ndim = ndim\n self.provide_data = [('rand', (batch_size, ndim, 1, 1))]\n self.provide_label = []\n\n def iter_next(self):\n return True\n\n def getdata(self):\n #Returns random numbers from a gaussian (normal) distribution \n #with mean=0 and standard deviation = 1\n return [mx.random.normal(0, 1.0, shape=(self.batch_size, self.ndim, 1, 1))]",
"_____no_output_____"
]
],
[
[
"When we initalize our `RandIter`, we need to provide two numbers: the batch size and how many random numbers we want to produce a single image from. This number is referred to as `Z`, and we'll set this to 100. This value comes from the research paper on the topic. Every time we iterate and get a batch of random numbers, we will get a 4 dimensional array with shape: `(batch_size, Z, 1, 1)`, which in our example is `(64, 100, 1, 1)`. ",
"_____no_output_____"
]
],
[
[
"Z = 100\nrand_iter = RandIter(batch_size, Z)",
"_____no_output_____"
]
],
[
[
"## Create the Model",
"_____no_output_____"
],
[
"Our model has two networks that we will train together - the generator network and the disciminator network. \nBelow is an illustration of our generator network:\n",
"_____no_output_____"
],
[
"<img src=\"dcgan-model.png\">\nSource: https://arxiv.org/abs/1511.06434\n\nThe discriminator works exactly the same way but in reverse - using convolutional layers instead of deconvolutional layers to take an image and determine if it is real or fake.\n\nThe DCGAN paper recommends the following best practices for architecting DCGANs:\n\n- Replace any pooling layers with strided convolutions (discriminator) and fractional-strided convolutions (generator).\n- Use batchnorm in both the generator and the discriminator.\n- Remove fully connected hidden layers for deeper architectures.\n- Use ReLU activation in generator for all layers except for the output, which uses Tanh.\n- Use LeakyReLU activation in the discriminator for all layers.\n\nOur model will implement these best practices.",
"_____no_output_____"
],
[
"### The Generator\nLet's start off by defining the generator network:",
"_____no_output_____"
]
],
[
[
"no_bias = True\nfix_gamma = True\nepsilon = 1e-5 + 1e-12\n\nrand = mx.sym.Variable('rand')\n\ng1 = mx.sym.Deconvolution(rand, name='g1', kernel=(4,4), num_filter=1024, no_bias=no_bias)\ngbn1 = mx.sym.BatchNorm(g1, name='gbn1', fix_gamma=fix_gamma, eps=epsilon)\ngact1 = mx.sym.Activation(gbn1, name='gact1', act_type='relu')\n\ng2 = mx.sym.Deconvolution(gact1, name='g2', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=512, no_bias=no_bias)\ngbn2 = mx.sym.BatchNorm(g2, name='gbn2', fix_gamma=fix_gamma, eps=epsilon)\ngact2 = mx.sym.Activation(gbn2, name='gact2', act_type='relu')\n\ng3 = mx.sym.Deconvolution(gact2, name='g3', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=256, no_bias=no_bias)\ngbn3 = mx.sym.BatchNorm(g3, name='gbn3', fix_gamma=fix_gamma, eps=epsilon)\ngact3 = mx.sym.Activation(gbn3, name='gact3', act_type='relu')\n\ng4 = mx.sym.Deconvolution(gact3, name='g4', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=128, no_bias=no_bias)\ngbn4 = mx.sym.BatchNorm(g4, name='gbn4', fix_gamma=fix_gamma, eps=epsilon)\ngact4 = mx.sym.Activation(gbn4, name='gact4', act_type='relu')\n\ng5 = mx.sym.Deconvolution(gact4, name='g5', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=3, no_bias=no_bias)\ngeneratorSymbol = mx.sym.Activation(g5, name='gact5', act_type='tanh')",
"_____no_output_____"
]
],
[
[
"Our generator image starts with random numbers that will be obtained from the `RandIter` we created earlier, so we created the `rand` variable for this input. \n\nWe then start creating the model starting with a Deconvolution layer (sometimes called 'fractionally strided layer'). We apply batch normalization and ReLU activation after the Deconvolution layer.\n\nWe repeat this process 4 times, applying a `(2,2)` stride and `(1,1)` pad at each Deconvolutional layer, which doubles the size of our image at each layer. By creating these layers, our generator network will have to learn to upsample our input vector of random numbers, `Z` at each layer, so that network output a final image. We also reduce half the number of filters at each layer, reducing dimensionality at each layer. Ultimatley, our output layer is a 64x64x3 layer, representing the size and channels of our image. We use tanh activation instead of relu on the last layer, as recommended by the research on DCGANs. The output of neurons in the final `gout` layer represent the pixels of generated image. \n\nNotice we used 3 parameters to help us create our model: no_bias, fixed_gamma, and epsilon.\nNeurons in our network won't have a bias added to them, this seems to work better in practice for the DCGAN. \nIn our batch norm layer, we set `fixed_gamma=True`, which means `gamma=1` for all of our batch norm layers.\n`epsilon` is a small number that gets added to our batch norm so that we don't end up dividing by zero. By default, CuDNN requires that this number is greater than `1e-5`, so we add a small number to this value, ensuring this values stays small.",
"_____no_output_____"
],
[
"### The Discriminator",
"_____no_output_____"
],
[
"Let's now create our discriminator network, which will take in images of handwritten digits from the MNIST dataset and images created by the generator network:",
"_____no_output_____"
]
],
[
[
"data = mx.sym.Variable('data')\n\nd1 = mx.sym.Convolution(data, name='d1', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=128, no_bias=no_bias)\ndact1 = mx.sym.LeakyReLU(d1, name='dact1', act_type='leaky', slope=0.2)\n\nd2 = mx.sym.Convolution(dact1, name='d2', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=256, no_bias=no_bias)\ndbn2 = mx.sym.BatchNorm(d2, name='dbn2', fix_gamma=fix_gamma, eps=epsilon)\ndact2 = mx.sym.LeakyReLU(dbn2, name='dact2', act_type='leaky', slope=0.2)\n\nd3 = mx.sym.Convolution(dact2, name='d3', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=512, no_bias=no_bias)\ndbn3 = mx.sym.BatchNorm(d3, name='dbn3', fix_gamma=fix_gamma, eps=epsilon)\ndact3 = mx.sym.LeakyReLU(dbn3, name='dact3', act_type='leaky', slope=0.2)\n\nd4 = mx.sym.Convolution(dact3, name='d4', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=1024, no_bias=no_bias)\ndbn4 = mx.sym.BatchNorm(d4, name='dbn4', fix_gamma=fix_gamma, eps=epsilon)\ndact4 = mx.sym.LeakyReLU(dbn4, name='dact4', act_type='leaky', slope=0.2)\n\nd5 = mx.sym.Convolution(dact4, name='d5', kernel=(4,4), num_filter=1, no_bias=no_bias)\nd5 = mx.sym.Flatten(d5)\n\nlabel = mx.sym.Variable('label')\ndiscriminatorSymbol = mx.sym.LogisticRegressionOutput(data=d5, label=label, name='dloss')",
"_____no_output_____"
]
],
[
[
"We start off by creating the `data` variable, which is used to hold our input images to the discriminator.\n\nThe discriminator then goes through a series of 5 convolutional layers, each with a 4x4 kernel, 2x2 stride, and 1x1 pad. These layers half the size of the image (which starts at 64x64) at each convolutional layer. Our model also increases dimensionality at each layer by doubling the number of filters per convolutional layer, starting at 128 filters and ending at 1024 filters before we flatten the output. \n\nAt the final convolution, we flatten the neural net to get one number as the final output of discriminator network. This number is the probability the image is real, as determined by our discriminator. We use logistic regression to determine this probability. When we pass in \"real\" images from the MNIST dataset, we can label these as `1` and we can label the \"fake\" images from the generator net as `0` to perform logistic regression on the discriminator network. ",
"_____no_output_____"
],
[
"### Prepare the models using the `Module` API\n\nSo far we have defined a MXNet `Symbol` for both the generator and the discriminator network.\nBefore we can train our model, we need to bind these symbols using the `Module` API, which creates the computation graph for our models. It also allows us to decide how we want to initialize our model and what type of optimizer we want to use. Let's set up `Module` for both of our networks:",
"_____no_output_____"
]
],
[
[
"#Hyperperameters\nsigma = 0.02\nlr = 0.0002\nbeta1 = 0.5\nctx = mx.gpu(0)\n\n#=============Generator Module=============\ngenerator = mx.mod.Module(symbol=generatorSymbol, data_names=('rand',), label_names=None, context=ctx)\ngenerator.bind(data_shapes=rand_iter.provide_data)\ngenerator.init_params(initializer=mx.init.Normal(sigma))\ngenerator.init_optimizer(\n optimizer='adam',\n optimizer_params={\n 'learning_rate': lr,\n 'beta1': beta1,\n })\nmods = [generator]\n\n# =============Discriminator Module=============\ndiscriminator = mx.mod.Module(symbol=discriminatorSymbol, data_names=('data',), label_names=('label',), context=ctx)\ndiscriminator.bind(data_shapes=image_iter.provide_data,\n label_shapes=[('label', (batch_size,))],\n inputs_need_grad=True)\ndiscriminator.init_params(initializer=mx.init.Normal(sigma))\ndiscriminator.init_optimizer(\n optimizer='adam',\n optimizer_params={\n 'learning_rate': lr,\n 'beta1': beta1,\n })\nmods.append(discriminator)",
"_____no_output_____"
]
],
[
[
"First, we create `Modules` for our networks and then bind the symbols that we've created in the previous steps to our modules. \n\nWe use `rand_iter.provide_data` as the `data_shape` to bind our generator network. This means that as we iterate though batches of data on the generator `Module`, our `RandIter` will provide us with random numbers to feed our `Module` using it's `provide_data` function.\n\nSimilarly, we bind the discriminator `Module` to `image_iter.provide_data`, which gives us images from MNIST from the `NDArrayIter` we had set up earlier, called `image_iter`. \n\nNotice that we're using the `Normal` initialization, with the hyperparameter `sigma=0.02`. This means our weight initializations for the neurons in our networks will random numbers from a Gaussian (normal) distribution with a mean of 0 and a standard deviation of 0.02. \n\nWe also use the adam optimizer for gradient decent. We've set up two hyperparameters, `lr` and `beta1` based on the values used in the DCGAN paper. We're using a single gpu, `gpu(0)` for training.\n\n",
"_____no_output_____"
],
[
"### Visualizing Our Training\n\nBefore we train the model, let's set up some helper functions that will help visualize what our generator is producing, compared to what the real image is:",
"_____no_output_____"
]
],
[
[
"from matplotlib import pyplot as plt\n\n#Takes the images in our batch and arranges them in an array so that they can be\n#Plotted using matplotlib\ndef fill_buf(buf, num_images, img, shape):\n width = buf.shape[0]/shape[1]\n height = buf.shape[1]/shape[0]\n img_width = (num_images%width)*shape[0]\n img_hight = (num_images/height)*shape[1]\n buf[img_hight:img_hight+shape[1], img_width:img_width+shape[0], :] = img\n\n#Plots two images side by side using matplotlib\ndef visualize(fake, real):\n #64x3x64x64 to 64x64x64x3\n fake = fake.transpose((0, 2, 3, 1))\n #Pixel values from 0-255\n fake = np.clip((fake+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)\n #Repeat for real image\n real = real.transpose((0, 2, 3, 1))\n real = np.clip((real+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)\n \n #Create buffer array that will hold all the images in our batch\n #Fill the buffer so to arrange all images in the batch onto the buffer array\n n = np.ceil(np.sqrt(fake.shape[0]))\n fbuff = np.zeros((int(n*fake.shape[1]), int(n*fake.shape[2]), int(fake.shape[3])), dtype=np.uint8)\n for i, img in enumerate(fake):\n fill_buf(fbuff, i, img, fake.shape[1:3])\n rbuff = np.zeros((int(n*real.shape[1]), int(n*real.shape[2]), int(real.shape[3])), dtype=np.uint8)\n for i, img in enumerate(real):\n fill_buf(rbuff, i, img, real.shape[1:3])\n \n #Create a matplotlib figure with two subplots: one for the real and the other for the fake\n #fill each plot with our buffer array, which creates the image\n fig = plt.figure()\n ax1 = fig.add_subplot(2,2,1)\n ax1.imshow(fbuff)\n ax2 = fig.add_subplot(2,2,2)\n ax2.imshow(rbuff)\n plt.show()",
"_____no_output_____"
]
],
[
[
"## Fit the Model",
"_____no_output_____"
],
[
"Training the DCGAN is a complex process that requires multiple steps.\n\nTo fit the model, for every batch of data in our dataset:\n\n1. Use the `Z` vector, which contains our random numbers to do a forward pass through our generator. This outputs the \"fake\" image, since it's created from our generator.\n\n2. Use the fake image as the input to do a forward and backwards pass through the discriminator network. We set our labels for our logistic regression to `0` to represent that this is a fake image. This trains the discriminator to learn what a fake image looks like. We save the gradient produced in backpropogation for the next step.\n\n3. Do a forwards and backwards pass through the discriminator using a real image from our dataset. Our label for logistic regression will now be `1` to represent real images, so our discriminator can learn to recognize a real image.\n\n4. Update the discriminator by adding the result of the gradient generated during backpropogation on the fake image with the gradient from backpropogation on the real image. \n\n5. Now that the discriminator has been updated for the this batch, we still need to update the generator. First, do a forward and backwards pass with the same batch on the updated discriminator, to produce a new gradient. Use the new gradient to do a backwards pass\n\n\nHere's the main training loop for our DCGAN:",
"_____no_output_____"
]
],
[
[
"# =============train===============\nprint('Training...')\nfor epoch in range(1):\n image_iter.reset()\n for i, batch in enumerate(image_iter):\n #Get a batch of random numbers to generate an image from the generator\n rbatch = rand_iter.next()\n #Forward pass on training batch\n generator.forward(rbatch, is_train=True)\n #Output of training batch is the 64x64x3 image\n outG = generator.get_outputs()\n \n #Pass the generated (fake) image through the discriminator, and save the gradient\n #Label (for logistic regression) is an array of 0's since this image is fake\n label = mx.nd.zeros((batch_size,), ctx=ctx)\n #Forward pass on the output of the discriminator network\n discriminator.forward(mx.io.DataBatch(outG, [label]), is_train=True)\n #Do the backwards pass and save the gradient\n discriminator.backward()\n gradD = [[grad.copyto(grad.context) for grad in grads] for grads in discriminator._exec_group.grad_arrays]\n \n #Pass a batch of real images from MNIST through the discriminator\n #Set the label to be an array of 1's because these are the real images\n label[:] = 1\n batch.label = [label]\n #Forward pass on a batch of MNIST images\n discriminator.forward(batch, is_train=True)\n #Do the backwards pass and add the saved gradient from the fake images to the gradient \n #generated by this backwards pass on the real images\n discriminator.backward()\n for gradsr, gradsf in zip(discriminator._exec_group.grad_arrays, gradD):\n for gradr, gradf in zip(gradsr, gradsf):\n gradr += gradf\n #Update gradient on the discriminator \n discriminator.update()\n\n #Now that we've updated the discriminator, let's update the generator\n #First do a forward pass and backwards pass on the newly updated discriminator\n #With the current batch\n discriminator.forward(mx.io.DataBatch(outG, [label]), is_train=True)\n discriminator.backward()\n #Get the input gradient from the backwards pass on the discriminator,\n #and use it to do the backwards pass on the generator\n diffD = discriminator.get_input_grads()\n generator.backward(diffD)\n #Update the gradients on the generator\n generator.update()\n \n #Increment to the next batch, printing every 50 batches\n i += 1\n if i % 50 == 0:\n print('epoch:', epoch, 'iter:', i)\n print\n print(\" From generator: From MNIST:\")\n\n visualize(outG[0].asnumpy(), batch.data[0].asnumpy())",
"Training...\n('epoch:', 0, 'iter:', 50)\n\n From generator: From MNIST:\n"
]
],
[
[
"Here we have our GAN being trained and we can visualize the progress that we're making as our networks train. After every 25 iterations, we're calling the `visualize` function that we created earlier, which creates the visual plots during training. \n\nThe plot on our left is what our generator created (the fake image) in the most recent iteration. The plot on the right is the original (real) image from the MNIST dataset that was inputted to the discriminator on the same iteration. \n\nAs training goes on the generator becomes better at generating realistic images. You can see this happening since images on the left become closer to the original dataset with each iteration. \n\n\n## Summary\n\nWe've now sucessfully used Apache MXNet to train a Deep Convolutional GAN using the MNIST dataset. \n\nAs a result, we've created two neural nets: a generator, which is able to create images of handwritten digits from random numbers, and a discriminator, which is able to take an image and determine if it is an image of handwritten digits. \n\nAlong the way, we've learned how to do the image manipulation and visualization that's associted with training deep neural nets. We've also learned how to some of MXNet's advanced training functionality to fit our model.\n\n## Acknowledgements\nThis tutorial is based on [MXNet DCGAN codebase](https://github.com/dmlc/mxnet/blob/master/example/gan/dcgan.py), the [original paper on GANs](https://arxiv.org/abs/1406.2661), as well as [this paper](https://arxiv.org/abs/1511.06434) on deep convolutional GANs.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
cb73baedce2594c35a5ab04c1620c0dea63c72ee | 39,499 | ipynb | Jupyter Notebook | src/JuMP/JuMP_example.ipynb | marcmartinezruiz/MarcMartinez_thesis | 3a6079e8a1734bd709e0cdd4571c08124326e320 | [
"Zlib",
"MIT"
]
| null | null | null | src/JuMP/JuMP_example.ipynb | marcmartinezruiz/MarcMartinez_thesis | 3a6079e8a1734bd709e0cdd4571c08124326e320 | [
"Zlib",
"MIT"
]
| null | null | null | src/JuMP/JuMP_example.ipynb | marcmartinezruiz/MarcMartinez_thesis | 3a6079e8a1734bd709e0cdd4571c08124326e320 | [
"Zlib",
"MIT"
]
| null | null | null | 26.227756 | 832 | 0.484291 | [
[
[
"empty"
]
]
]
| [
"empty"
]
| [
[
"empty"
]
]
|
cb73bb8045577d691e507b74d09cebb86ae36a2d | 506,770 | ipynb | Jupyter Notebook | patent_sim_data_notebook.ipynb | ryanwhalen/patent_similarity_data | 97fad0fbbcfbcf9e7a4d0cc7d207a8701c754897 | [
"MIT"
]
| 10 | 2020-04-20T19:05:29.000Z | 2022-03-12T22:09:47.000Z | patent_sim_data_notebook.ipynb | ryanwhalen/patent_similarity_data | 97fad0fbbcfbcf9e7a4d0cc7d207a8701c754897 | [
"MIT"
]
| 2 | 2021-08-11T02:08:11.000Z | 2022-03-15T01:08:58.000Z | patent_sim_data_notebook.ipynb | ryanwhalen/patent_similarity_data | 97fad0fbbcfbcf9e7a4d0cc7d207a8701c754897 | [
"MIT"
]
| 2 | 2021-11-25T13:14:33.000Z | 2022-03-24T13:00:15.000Z | 287.28458 | 47,288 | 0.917671 | [
[
[
"# Download Patent DB & Adding Similarity Data",
"_____no_output_____"
],
[
"The similarity data on its own provides data on patent doc2vec vectors, and some pre-calculated similarity scores. However, it is much more useful in conjunction with a dataset containing other patent metadata. To achieve this it is useful to download a patent dataset and join it with the similarity data.\n\nThere are a number of sources of patent data, if you have a working dataset already it may be easiest to join the similarity data to your own dataset. If however, you do not have a local dataset you can easily download the data from <a href=\"http://www.patentsview.org/download/\">Patentsview</a>\n\nPatentsview offers a lot of data on their bulk download page. For ease of downloading, I have created a Python script that will take care of parsing all those URLs, downloading the CSV files, and reading them into a SQLite database. If you want a local version of the patent data, I recommend you use that script (available <a href = \"https://github.com/ryanwhalen/patentsview_data_download\">here</a>). Download the 'patentsview_download.py' file to the same folder you have this iPython notebook in and run the code below. Note that downloading may take a significant amount of time. So, run the script using the code below and then go make a cup of coffee. Then go to bed, do whatever you want to do over the course of the next couple of days, and then come back and check up on it.",
"_____no_output_____"
]
],
[
[
"%run ./patentsview_download.py",
"_____no_output_____"
]
],
[
[
"Once you've run the script above, you'll have a local database called 'patent_db.sqlite.' If you want a GUI to check out the contents, I recommend <a href=\"https://sqlitestudio.pl/\">SQLite Studio</a> as a nice open-source option.\n\nThe next step is to add the similarity tables to your database. We'll run a separate python script to do so. ",
"_____no_output_____"
]
],
[
[
"%run ./write_sim_data_to_db.py",
"_____no_output_____"
]
],
[
[
"# Initial Similarity Explorations",
"_____no_output_____"
],
[
"Everything from here on out assumes that you're using the SQLIte database as constructed above. If you've chosen to marry the similarity data to your own dataset, you'll need to adapt the below as required.\n\nFirst, let's import a few packages and connect to the DB.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport sqlite3\nimport seaborn as sns\nimport numpy as np\nimport random\nimport gensim\nfrom matplotlib import pyplot as plt\nimport networkx as nx\nimport itertools\nimport os\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy import stats\nfrom collections import defaultdict\nimport json\nimport csv\n\ndb_path ='/mnt/BigDisk1/patent_db_20191231/' #file path to your db file here\n\nconn = sqlite3.connect(db_path+'patent_db.sqlite')\ncur = conn.cursor()\ncur2 = conn.cursor()",
"_____no_output_____"
]
],
[
[
"Let's make a pandas dataframe containing the similarity scores between citing/cited patents and the date the citations were made. Note that this may take a few moments, but once the dataframe has loaded working with it should be relatively quick provided your machine has sufficient memory.",
"_____no_output_____"
]
],
[
[
"df = pd.read_sql_query('''SELECT cite_similarity.similarity,\npatent.date FROM cite_similarity \nJOIN patent ON cite_similarity.patent_id = patent.id''', conn)",
"_____no_output_____"
]
],
[
[
"Let's have a quick look at the dataframe to see what we've loaded",
"_____no_output_____"
]
],
[
[
"df.head()",
"_____no_output_____"
],
[
"df.describe()",
"_____no_output_____"
]
],
[
[
"### Plotting the similarity distribution\nPlotting the distribution of similarity scores for all citations shows that most patents tend to cite to other somewhat similar patents, but that there is also substantial variation.",
"_____no_output_____"
]
],
[
[
"sns.distplot(df['similarity'])",
"_____no_output_____"
]
],
[
[
"We saw above that citing/cited patents have an average similarity of about 0.26. How do we know how to interpret that number? Well, one way is to compare citing/cited similarity with the similarity scores we would expect to see between random patents. \n\nThe pre-calculated similarity dataset doesn't contain all pairwise similarity scores, so random pairs are unlikely to have a pre-calculated score. We'll need some code that can take two patent numbers, find their vectors and return the similarity score.\n\n",
"_____no_output_____"
]
],
[
[
"def patent_pair_sim(patent1, patent2):\n '''takes 2 patent numbers, finds their doc2vec vectors and returns their cosine similarity'''\n v1 = cur.execute('''SELECT vector FROM doc2vec WHERE patent_id = ?''',[patent1]).fetchone()\n v2 = cur.execute('''SELECT vector FROM doc2vec WHERE patent_id = ?''',[patent2]).fetchone()\n \n if v1 == None or v2 == None: #if either patent has no pre-calculated vector, return None\n return None\n \n v1 = json.loads(v1[0])\n v2 = json.loads(v2[0])\n\n sim = float(cosine_similarity([v1],[v2])[0])\n return sim",
"_____no_output_____"
]
],
[
[
"Let's try that similarity calculting function out. Feel free to tweak the below patent numbers if there's a pair you're interested in comparing.",
"_____no_output_____"
]
],
[
[
"print(patent_pair_sim('9000000','9000001'))",
"0.1648873633936564\n"
]
],
[
[
"To do some sanity checks, let's compare the similarity of patents randomly paired on various criteria. The CPC codes are a handy place to start. The code below will compare the similarity score distributions for patents which share the same Section (highest level), class (second highest level), or Subclass (third highest level) as their primary categorization. We would expect that patents sharing lower-level CPC classifications will have more in common with one another than those that do not.",
"_____no_output_____"
]
],
[
[
"def match_on_cpc(patent, level):\n '''takes a patent number and returns a second patent number\n that shares the same cpc group codes'''\n if level == 'subclass':\n group = cur.execute('''SELECT group_id FROM cpc_current WHERE\n sequence = '0' and patent_id = ?''',[patent]).fetchone()\n if group is None:\n return None\n group = group[0] \n match = cur.execute('''SELECT patent_id FROM cpc_current WHERE\n group_id = ? ORDER BY RANDOM() LIMIT 1''',[group]).fetchone()\n match = match[0]\n \n if level == 'section':\n section = cur.execute('''SELECT section_id FROM cpc_current\n WHERE sequence = '0' and patent_id = ?''',[patent]).fetchone() \n if section is None:\n return None\n section = section[0]\n \n match = cur.execute('''SELECT patent_id FROM cpc_current WHERE\n section_id = ? ORDER BY RANDOM() LIMIT 1''',[section]).fetchone()\n match = match[0]\n \n if level == 'class':\n class_id = cur.execute('''SELECT subsection_id FROM cpc_current\n WHERE sequence = '0' and patent_id = ?''',[patent]).fetchone()\n if class_id is None:\n return None\n class_id = class_id[0] \n \n match = cur.execute('''SELECT patent_id FROM cpc_current WHERE\n subsection_id = ? ORDER BY RANDOM() LIMIT 1''',[class_id]).fetchone()\n match = match[0]\n \n return match\n\ndef get_cpc_match_sims(n, level):\n '''returns n random pairwise similarities where the pairs\n share the same primary cpc classification at the hierarchical\n level identicated'''\n patents = cur2.execute('''SELECT id FROM patent ORDER BY RANDOM()''') \n \n sims = []\n \n for p in patents:\n p = p[0]\n if not p.isdigit():\n continue\n match = match_on_cpc(p, level)\n if match == None or match == p:\n continue\n sim = patent_pair_sim(p,match)\n if sim == None:\n continue\n sims.append(sim)\n if len(sims) == n:\n return sims",
"_____no_output_____"
]
],
[
[
"We can use those functions to get similarity scores for each level of the CPC categorization. This can take some time and requires proper indexing on the DB to work well.",
"_____no_output_____"
]
],
[
[
"n = 1000\n\nsection_match_sims = get_cpc_match_sims(n, level='section')\n\nclass_match_sims = get_cpc_match_sims(n, level='class')\n\nsubclass_match_sims = get_cpc_match_sims(n, level='subclass')",
"_____no_output_____"
]
],
[
[
"For good measure, we can also compare with randomly paired patents. We would expect these patents to have the least in common with one another.",
"_____no_output_____"
]
],
[
[
"def get_random_pairwise_sims(patents, n):\n '''returns the similarities between n randomly paired patents'''\n sims = []\n while len(sims) < n:\n patent1, patent2 = random.sample(patents,2)\n sim = patent_pair_sim(patent1, patent2)\n if sim is None:\n continue\n sims.append(sim)\n return sims",
"_____no_output_____"
],
[
"patents = cur2.execute('''SELECT id FROM patent ORDER BY RANDOM()''').fetchall()\npatents = [p[0] for p in patents if p[0].isdigit()]\nrandom_sims = get_random_pairwise_sims(patents, n)",
"_____no_output_____"
]
],
[
[
"And now, we can compare each of these types of pairs and how similar they are to one another\n",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(1, figsize=(9, 6))\n\nax = fig.add_subplot(111)\n\nbp = ax.boxplot([random_sims, section_match_sims, class_match_sims, subclass_match_sims])\nax.set_xticklabels(['Random','Section', 'Class', 'Subclass'])\n\nfig.savefig('cpc_sim_comparisons_bopxplots.png', bbox_inches='tight', dpi=300)",
"_____no_output_____"
]
],
[
[
"As you can see, the similarity scores track what we would expect to see. So, random patent pairs are least similar, random pairs of patents sharing the same section are somewhat more similar, while those sharing the same class are yet more similar, and those sharing the same subclass are even more similar. As we can see below, all of these differences are statistically significant.",
"_____no_output_____"
]
],
[
[
"print('Random '+str(np.mean(random_sims)))\nprint('Section '+str(np.mean(section_match_sims)))\nt = stats.ttest_ind(random_sims, section_match_sims)\nprint(t)\nprint('Class '+str(np.mean(class_match_sims)))\nt = stats.ttest_ind(section_match_sims, class_match_sims)\nprint(t)\nprint('Subclass '+str(np.mean(subclass_match_sims)))\nt = stats.ttest_ind(class_match_sims, subclass_match_sims)\nprint(t)",
"Random 0.09072925841722823\nSection 0.10190125584549051\nTtest_indResult(statistic=-3.9236767874298595, pvalue=9.015715315838312e-05)\nClass 0.12300700281214517\nTtest_indResult(statistic=-6.649607114439663, pvalue=3.780733420583396e-11)\nSubclass 0.14742690323123964\nTtest_indResult(statistic=-7.003324894438409, pvalue=3.401789601717766e-12)\n"
]
],
[
[
"Now, let's get a list of all of the patents, so that we can select some random pairs to compare.",
"_____no_output_____"
]
],
[
[
"def get_all_patents():\n '''returns a list of all patent numbers in the DB'''\n patents = cur.execute('''SELECT id FROM patent''').fetchall()\n patents = [p[0] for p in patents]\n patents = [p for p in patents if p.isdigit()] #this removes non-numerical patents like design, plant, etc.\n return patents\n\npatents = get_all_patents()",
"_____no_output_____"
]
],
[
[
"Now let's find the scores for some random pairs and plot that distribution.",
"_____no_output_____"
]
],
[
[
"sims = []\n\nfor i in range(10000):\n pair = random.choices(patents, k=2)\n sim = patent_pair_sim(pair[0],pair[1])\n if sim is not None:\n sims.append(sim)\n \n \nsns.distplot(sims)\nprint(np.mean(sims))",
"_____no_output_____"
]
],
[
[
"### Comparing citing/cited similarity to random pairwise similarity\nPlotting the two distributions side-by-side shows that - as we would expect - patents that share a citation relationship tend to be more similar than those that do not.",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nsns.kdeplot(df['similarity'], shade=True, ax=ax, label='Citation Similarity', linestyle = '--')\nsns.kdeplot(sims, shade=True, ax = ax, label = 'Random Pairwise Similarity')\nfig = ax.get_figure()\nfig.savefig('cite_vs_random_sim.png', dpi=300)",
"_____no_output_____"
]
],
[
[
"### Citation similarity over time\nPlotting the citation similarity by yearly mean reveals a trend towards decreasing similarity between citing and cited patents.",
"_____no_output_____"
]
],
[
[
"df['date'] = pd.to_datetime(df['date'])\nyearly_means = df.groupby(df.date.dt.year).mean()\nax = yearly_means.plot()\nfig = ax.get_figure()\nfig.savefig('yearly_cite_sim.png', dpi=300)",
"_____no_output_____"
]
],
[
[
"# Patent-Level Similarity Metrics\n\nAs well as identifying global trends, similarity metrics can also provide insight into single inventions. Many patent metrics use citations in combination with metadata such as technical classifications as proxy measures of either knowledge inputs (e.g. Originality) or as a measure of impact (e.g. Generality)(_See_ Trajtenberg, Jaffe and Henderson, 1997). \n\nThe code below can be used to generate a network of forward or backward (e.g. citing or cited) references and their similarity scores. These networks can subsequently be used to define measures of impact or input knowledge diversity. The blue arrows in the diagram below show backwards and forward citation relationships in relation to the focal patent document, while the red arrows represent four different proposed similarity-based citation metrics: (a) knoweldge proximity; (b) knowledge homogeneity; (c) impact proximity; and (d) impact homogeneity.\n\n<img src = \"cite_metrics.png\">",
"_____no_output_____"
],
[
"## Forward and backward distance (knowledge proximity, and impact proximity)\n\nBy comparing a patent with its cited or citing prior art, these measures provide insight into the degree to which an invention draws on distant information, or alternately goes on to impact similar or dissimilar inventions. \n\nKnowledge proximity measures the similarity between the focal patent and its cited backward references.To do so, we calculate the similarities between a patent and its cited prior art, and take the minimum of these similarities as the knowledge proximity score. This provides insight into the degree to which the invention integrates any one piece of particularly distant knowledge. A low knowledge proximity score demonstrates that the invention in question cited to prior art from a very dissimilar field.\n\nImpact proximity is calculated in a simliar manner, but instead measures the similarity between the focal patent and its citing forward references. This provides an impact measure that accounts for the degree to which an invention goes on to influnce technical areas that are similar or dissimilar to its own.\n",
"_____no_output_____"
],
[
"For some of our measures, we'll want to both know a patent's granting year and the years of other related patents. The below function will determine the granting year of any patent. Meanwhile, the yearly_max dictionary stores the highest patent number granted in all the years in the dataset.",
"_____no_output_____"
]
],
[
[
"def patent_year(patent):\n '''takes a patent number and returns an integer of the year it was granted'''\n date = cur.execute('''SELECT date FROM patent WHERE id = ?''',[patent]).fetchone()\n year = int(date[0].split('-')[0])\n return year\n\ndef find_yearly_maxes():\n '''returns a dictionary keyed by year, with values for the highest patent number\n granted in that year'''\n yearly_maxes = {}\n years = range(1976,2020)\n for year in years:\n patents = cur.execute('''SELECT id FROM patent \n WHERE strftime('%Y', date) = ?''', [str(year)]).fetchall()\n patents = [p[0] for p in patents]\n patents = [int(p) for p in patents if p.isdigit()]\n yearly_maxes[year] = max(patents)\n return yearly_maxes\n\nyearly_maxes = find_yearly_maxes()",
"_____no_output_____"
],
[
"def prior_art_proximity(patent):\n '''takes a patent number, identifies similarity scores for backwards citations and returns \n the min similarity score - a demonstration of the degree to which the invention draws on distant knowledge'''\n sims = cur.execute('''SELECT similarity FROM cite_similarity WHERE patent_id = ?''',[patent]).fetchall()\n if sims == None:\n return None\n sims = [s[0] for s in sims]\n if len(sims) == 0:\n return None\n return min(sims)\n\ndef impact_proximity(patent):\n '''takes a patent number, identifies similarity scores for forward citations and returns \n the min similarity score - a demonstration of the degree to which the invention has influenced distant areas'''\n year = patent_year(patent)\n \n max_patent = yearly_maxes[year + 10] #the maximum patent number for forward metric comparisons\n \n sims = []\n cites = cur.execute('''SELECT patent_id, similarity FROM cite_similarity WHERE citation_id = ?''',[patent]).fetchall()\n if cites == None:\n return None\n for cite in cites:\n try:\n patent = int(cite[0])\n except:\n continue #skip design, plant and other non numeric patents\n if patent > max_patent: #skip patents granted more than 10-years after focal patent\n continue\n sims.append(cite[1])\n if len(sims) == 0:\n return None\n return min(sims)",
"_____no_output_____"
]
],
[
[
"We'll want to plot our data by year, which the below function will allow us to do.",
"_____no_output_____"
]
],
[
[
"def plot_yearly_means(data, label):\n '''takes dictionary with year keys and mean values and plots change over time'''\n xs = sorted(data.keys())\n ys = [data[x] for x in xs]\n plt.plot(xs,ys)\n plt.legend([label])\n plt.tight_layout()\n plt.savefig(label.replace(' ','')+'.png', dpi=300)\n plt.show()",
"_____no_output_____"
]
],
[
[
"To use the above proximity code and assess potential changes over time, we can use a random smaple of patents. The function below will randomly sample _n_ patents per year and return those patents as a lists in a dictionary keyed by year. To address the truncation in citation data availability, we create two different samples, one to demonstrate the backwards-oriented measures and one to demonstrate the forwards-oriented measures.",
"_____no_output_____"
]
],
[
[
"def random_yearly_sample(n, years):\n '''takes a vector of years and returns a dict of patents with n randomly sampled per year where year is the key'''\n sample = {}\n for year in years:\n patents = cur.execute('''SELECT id FROM patent WHERE strftime('%Y', date) = ? \n ORDER BY RANDOM() LIMIT ?''',[str(year), n]).fetchall()\n patents = [p[0] for p in patents]\n sample[year]=patents\n return sample\n\nbackward_sample = random_yearly_sample(10000,range(1986,2020)) #sample for backward citation metrics\nforward_sample = random_yearly_sample(10000,range(1976,2010)) #sample for forward citation metrics",
"_____no_output_____"
]
],
[
[
"### Prior Art Proximity\n\nWith the sample in hand, we can then calculate the average prior art or impact proximity by year to determine whether there have been changes over time. Note that depending on the size of the sample, this might take some time as it may require many database calls. The cell below will compute the knowledge proximity scores for the random sample we created above to calculate the backwards-focused measures on.",
"_____no_output_____"
]
],
[
[
"data = {}\n\nfor year in backward_sample:\n kp = [prior_art_proximity(i) for i in backward_sample[year]]\n kp = [k for k in kp if k is not None]\n data[year] = np.mean(kp)\n \n\n \nplot_yearly_means(data, 'Prior Art Proximity')",
"_____no_output_____"
]
],
[
[
"### Impact proximity\nNow let's do the same but calculate the forward-oriented impact proximity. ",
"_____no_output_____"
]
],
[
[
"data = {}\n\nfor year in forward_sample:\n kp = [impact_proximity(i) for i in forward_sample[year]]\n kp = [k for k in kp if k is not None]\n data[year] = np.mean(kp)\n \n \nplot_yearly_means(data, 'Impact Proximity')",
"_____no_output_____"
]
],
[
[
"### Co-citing and co-cited similarities\n\nHaving seen the changes in knowledge and impact proximity over time, let us now look to whether or not knowledge homogeneity or impact homogeneity have changed over time. To do so, we will again use our random sample of yearly patents. This time however, because knowledge homogeneity and impact homogeneity require comparing co-cited or co-citing prior art, we calculate the pairwise similarities between all of the citing or cited prior art for the focal patent. The functions below will perform these calculations and return the minimum similarity between all of the patents cited by the focal patent (knowledge homogeneity) or all of the patents that cite the focal patent (impact homogeneity).",
"_____no_output_____"
]
],
[
[
"def impact_homogeneity(patent, metric = 'min'):\n '''takes patent number and returns the minimum similarity \n between co-citing prior art (similar to generality)\n currently implemented to only work for patents we have pre-modeled vectors for\n \n By default returns minium similarity between citing patents, \n passing metric = mean or median will return those instead ''' \n\n year = patent_year(patent)\n max_patent = yearly_maxes[year + 10] #the maximum patent number for forward metric comparisons\n \n sims = []\n cites = cur.execute('''SELECT patent_id FROM uspatentcitation WHERE citation_id = ?''',[patent]).fetchall()\n \n if len(cites) < 2: #undefined if fewer than 2 forward cites\n return None\n \n cites = [c[0] for c in cites if c[0].isdigit()] #slice patent numbers out of returned tuples\n cites = [c for c in cites if int(c) < max_patent]\n \n for p1, p2 in itertools.combinations(cites, 2):\n try: #not all patents will have vectors, so use this try loop here\n sim = patent_pair_sim(p1, p2)\n sims.append(sim)\n except:\n continue\n sims = [s for s in sims if s is not None]\n if len(sims) < 1:\n return None\n if metric == 'min':\n return min(sims)\n if metric == 'mean':\n return np.mean(sims)\n if metric == 'median':\n return np.median(sims)\n \ndef prior_art_homogeneity(patent, metric = 'min'):\n '''takes patent number and returns the minimum similarity \n between co-cited prior art (similar to originality)\n \n By default returns minium similarity between citing patents, \n passing metric = mean or median will return those instead ''' \n\n sims = []\n cites = cur.execute('''SELECT citation_id FROM cite_similarity WHERE patent_id = ?''''',[patent]).fetchall()\n if len(cites) < 2:\n return None\n cites = [c[0] for c in cites]\n for p1, p2 in itertools.combinations(cites, 2):\n sim = patent_pair_sim(p1, p2)\n sims.append(sim)\n sims = [s for s in sims if s is not None]\n if len(sims) < 1:\n return None\n if metric == 'min':\n return min(sims)\n if metric == 'mean':\n return np.mean(sims)\n if metric == 'median':\n return np.median(sims)",
"_____no_output_____"
]
],
[
[
"### Prior Art Homogeneity\n\nNow let's apply the homogeneity analyses on our backward sample for the knowledge homogeneity score:",
"_____no_output_____"
]
],
[
[
"data = {}\n\nfor year in backward_sample:\n kp = [prior_art_homogeneity(patent) for patent in backward_sample[year]]\n kp = [k for k in kp if k is not None]\n data[year] = np.mean(kp)\n\nplot_yearly_means(data, 'Prior Art Homogeneity')",
"_____no_output_____"
]
],
[
[
"### Impact Homogeneity\n\nAnd on forward samples for the impact homogeneity score:",
"_____no_output_____"
]
],
[
[
"data = {}\n\nfor year in forward_sample:\n kp = [impact_homogeneity(patent) for patent in forward_sample[year]]\n kp = [k for k in kp if k is not None]\n data[year] = np.mean(kp)\n\n \nplot_yearly_means(data, 'Impact Homogeneity') ",
"_____no_output_____"
]
],
[
[
"### Changes in technology space\n\nThe above shows both backwards/forward citation similarity and co-cited/co-citing citation similarity have decreased over time. Part of this is likely due to the increasing 'size' of the technological space. As more new inventions are produced, the possible distances between them increases. We can estimate the magnitude of this by randomly sampling patents granted within a given year and plotting their average similarity. If desired, the above raw similarity measures can be adjusted to show their divergence from the similarities we would expect at random. ",
"_____no_output_____"
]
],
[
[
"def patents_by_year(year):\n '''returns a set of utility patents granted in the year passed\n as argument'''\n patents = cur.execute('''SELECT id FROM patent \n WHERE strftime('%Y', date) = ?''', [str(year)]).fetchall()\n patents = [p[0] for p in patents]\n patents = [int(p) for p in patents if p.isdigit()]\n return patents\n\ndata = {}\nyears = range(1976,2019)\nfor year in years:\n patents = patents_by_year(year)\n sims = get_random_pairwise_sims(patents, 10000)\n data[year] = np.mean(sims)\n\nplot_yearly_means(data, 'Technological Space Change')",
"_____no_output_____"
]
],
[
[
"### Similarity by citation type\n\nThe above four patent-level citation measures provide insight into the how inventions are related to the prior art that they cite, and those that go on to cite them. However, one might also be interested in citations as traces of the patent application and examination process. Research has suggested that the citations added by patent examiners are qualitatively different from those added by the patent applicants themselves. We can use the patent similarity data to get a sense of the degree to which this is reflected in the semantic similarity of the cited prior art.\n\nThe below function will return a vector of simiarity scores for a random sample of citations. It takes as an argument either 'cited by examiner' or 'cited by applicant'.",
"_____no_output_____"
]
],
[
[
"def get_sims_by_cite_type(n, cite_type):\n '''takes a citation type (cited by applicant, cited by examiner, or cited by other) \n and returns n random similarity scores between the cited and citing patent'''\n \n cites = cur.execute('''SELECT patent_id, citation_id FROM uspatentcitation \n WHERE category = ? ORDER BY RANDOM() LIMIT ?''', [cite_type, n]).fetchall()\n sims = []\n for cite in cites:\n try:\n sims.append(patent_pair_sim(cite[0], cite[1]))\n except:\n pass #skip combos not in pre-calculated model\n return sims\n \nexaminer_sims = get_sims_by_cite_type(50000, 'cited by examiner')\napplicant_sims = get_sims_by_cite_type(50000, 'cited by applicant')",
"_____no_output_____"
],
[
"examiner_sims = [s for s in examiner_sims if s is not None]\napplicant_sims = [s for s in applicant_sims if s is not None]\n\nfig, ax = plt.subplots()\nsns.kdeplot(examiner_sims, shade=True, ax=ax, label='Examiner')\nsns.kdeplot(applicant_sims, shade=True, ax = ax, label = 'Applicant', linestyle = '--')\nplt.savefig('examiner_applicant_sims'+'.png', dpi=300)\n\nt = stats.ttest_ind(examiner_sims, applicant_sims)\nprint(t)",
"Ttest_indResult(statistic=56.87053490351846, pvalue=0.0)\n"
]
],
[
[
"## Nearest Neighbors\n\nThe patent similarity dataset, also includes data on each patent’s 100 nearest neighbors. These are the 100 patents from the dataset that are closest to the focal patent, and their accompanying similarity scores. These data can be used for a wide variety of analyses, including those that provide perspective on how crowded an invention’s “neighborhood” is. \n\nAs an example, consider the neighborhoods of both litigated and non-litigated patents. To examine whether they differ from one another, we begin with the litigated patent data, and identify the similarity between each litigated patent and its nearest neighbor. We then compare these similarity scores with the similarity between non-litigated patents and their nearest neighbors. Having a very similar nearest neighbor, suggests that the patent in question is in a ‘crowded’ intellectual property space, with perhaps many other competing, blocking, or related patents, whereas having only more distant neighbors suggests an invention is relatively unique. By comparing the distributions of the nearest neighbor similarities for both litigated and non-litigated patents, we can see that, on average, litigated patents tend to have much more similar nearest neighbors than their non-litigated counterparts, and a wider distribution of these scores.",
"_____no_output_____"
]
],
[
[
"def make_litigated_patent_set(path):\n '''uses data file from Schwartz et. al litigated patent dataset, returns a set of\n patent numbers involved in infringement litigation'''\n infile = open(path ,encoding = 'utf-8')\n reader = csv.DictReader(infile)\n \n infringement_litigated_patents = set()\n \n count = 0\n for row in reader:\n patent = row['patent']\n doc_type = row['patent_doc_type']\n case_types = [row['case_type_1'], row['case_type_2'],row['case_type_3']]\n if '1' in case_types and doc_type == 'Patent':\n count += 1\n infringement_litigated_patents.add(patent)\n return infringement_litigated_patents\n \ndef get_nearest_neighbor_sim(patent):\n '''takes a patent number, returns the similarity score for its nearest neighbor\n '''\n sims = cur.execute('''SELECT top_100 FROM most_similar\n WHERE patent_id = ?''',[patent]).fetchone()\n if sims is None:\n return None\n sims = json.loads(sims[0])\n sims = [s[1] for s in sims]\n return max(sims)",
"_____no_output_____"
],
[
"path_to_litigated_dataset = #add path to this dataset file here\n\nlitigated_patents = make_litigated_patent_set(path_to_litigated_dataset)\n\nlitigated_sims = [get_nearest_neighbor_sim(p) for p in litigated_patents]\nlitigated_sims = [s for s in litigated_sims if s is not None]\n\nall_patents = get_all_patents()\n\nrandom_sims = []\n\nwhile len(random_sims) < len(litigated_sims):\n patent = random.choice(all_patents)\n sim = get_nearest_neighbor_sim(patent)\n if sim is not None:\n random_sims.append(sim)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nsns.kdeplot(litigated_sims, shade = 1, color = 'red', label = 'litigated', linestyle='--')\nsns.kdeplot(random_sims, shade = 1, color='blue', label = 'non-litigated')\nplt.savefig('litigated_vs_non_litigated.png', dpi=300)",
"_____no_output_____"
]
],
[
[
"# Inventor-Level Metrics\n\nPatent similarity data can also be used to help understand the career of a given inventor. By locating each of an inventor's inventions within semantic space, one can produce a network of their inventions, measure their average, minimum, and maximum similarity scores, identify clusters, or find their mean invention.\n\nThe below code demonstrates how to identify and visualize the invention networks for four well known tech company CEOs.",
"_____no_output_____"
]
],
[
[
"def make_inventor_net(inventor, save_path = False):\n '''takes inventor ID and returns networkx Graph object containing\n nodes represeting each of his/her inventions with links between them \n weighted by their doc2vec similarity\n \n if save_path is defined will save a graphml file at the designated path\n '''\n inventions = cur.execute('''SELECT patent_id FROM patent_inventor\n WHERE inventor_id = ?''',[inventor]).fetchall()\n g = nx.Graph()\n if len(inventions) < 2:\n return None\n inventions = [i[0] for i in inventions if i[0].isdigit()]\n for p1, p2 in itertools.combinations(inventions, 2):\n sim = patent_pair_sim(p1, p2)\n if sim is None:\n continue\n g.add_edge(p1, p2, weight = sim)\n if save_path != False:\n nx.write_graphml(g, save_path)\n return g\n\ndef make_mst(g):\n '''takes a graph object and returns the minimum spanning tree\n however, defines MST as the maximum sum of edgeweights for a tree\n because the default MST treats weight as distance rather than sim'''\n ng = nx.Graph()\n for edge in g.edges(data=True):\n ng.add_edge(edge[0], edge[1], weight = 1 - edge[2]['weight'])\n ng = nx.minimum_spanning_tree(ng)\n return ng\n\ndef net_stats(g):\n '''takes a nx Graph object and returns least similar score (i.e. the similarity \n between the most dissimilar inventions) and average pairwise similarity'''\n ew = [e[2]['weight'] for e in g.edges(data=True)]\n return round(min(ew),3), round(np.mean(ew), 3)\n\ndef draw_inventor_net(g, firstname, lastname):\n d = dict(g.degree(weight='weight'))\n size = [v * 5 for v in d.values()] #rescale weights for visibility\n least_sim, mean_sim = net_stats(g)\n g = make_mst(g)\n pos = nx.spring_layout(g, iterations = 100)\n fig, ax = plt.subplots()\n nx.draw_networkx_nodes(g, pos, node_size = size,\n node_color = 'darkslategrey')\n nx.draw_networkx_edges(g, pos)\n plt.xticks([])\n plt.yticks([])\n textstr = '\\n'.join((\n r\"$\\bf{\"+firstname+\"}$\"+\" \"+r\"$\\bf{\"+lastname+\"}$\",\n 'Minimum sim=%s' % (least_sim,),\n 'Mean sim=%s' % (mean_sim,)))\n plt.title(textstr)\n plt.tight_layout()\n plt.savefig(firstname+lastname, dpi=300)\n plt.show()",
"_____no_output_____"
]
],
[
[
"The first step is to find the inventor IDs of interest. We can do this by looking through the 'inventor' table of the patent_db. Below are the inventor IDs for four well known tech CEOs. We can use these to plot each of their invention networks.",
"_____no_output_____"
]
],
[
[
"jb_id = '5715399-1'\nsj_id = 'D268584-1'\nmz_id = '7669123-1'\nbg_id = '5552982-2'",
"_____no_output_____"
],
[
"jb = make_inventor_net(jb_id)\ndraw_inventor_net(jb, 'Jeff', 'Bezos')",
"_____no_output_____"
],
[
"sj = make_inventor_net(sj_id) \ndraw_inventor_net(sj, 'Steve', 'Jobs')",
"_____no_output_____"
],
[
"bg = make_inventor_net(bg_id)\ndraw_inventor_net(bg, 'Bill', 'Gates')",
"_____no_output_____"
],
[
"mz = make_inventor_net(mz_id)\ndraw_inventor_net(mz, 'Mark', 'Zuckerberg')",
"_____no_output_____"
]
],
[
[
"These visualized networks show the minimum spanning tree of each inventor's patent similarity network, and some basic statistics. Each of these provides insight into the degree to which an inventor has worked within a single technological domain, or has alternately created a wide variety of dissimilar inventions.",
"_____no_output_____"
],
[
"### Inter-inventor similarity\nJust as we can visualize a given inventor's invention similarity network, we can also compare inventors to one another by identifying their 'mean' invention (i.e. the mean vector of all their invention vectors) and subsequently calcuating the similarity between those.",
"_____no_output_____"
]
],
[
[
"def find_inventor_mean(inventor):\n '''takes inventor ID, finds their patent vectors and returns mean vector'''\n inventions = cur.execute('''SELECT patent_inventor.patent_id,\n doc2vec.vector FROM patent_inventor\n JOIN doc2vec\n ON patent_inventor.patent_id = doc2vec.patent_id\n WHERE inventor_id = ?''''',[inventor]).fetchall()\n inventions = [i[1][1:-1] for i in inventions if i!= None]\n inventions = [i.split(',') for i in inventions]\n for i in range(len(inventions)):\n inventions[i] = [float(i) for i in inventions[i]]\n if len(inventions) < 1:\n return None\n return np.mean(inventions, axis = 0)\n\ndef make_mean_sim_net(means):\n '''takes a list of tuples (node_id, vector) and constructs a network of nodes \n with edges weighted by the similarity between their vectors'''\n g = nx.Graph()\n for i1, i2 in itertools.combinations(means, 2):\n inv1 = i1[0]\n v1 = i1[1]\n inv2 = i2[0]\n v2 = i2[1]\n sim = float(cosine_similarity(v1.reshape(1,-1), v2.reshape(1,-1))[0])\n g.add_edge(inv1, inv2, weight = sim)\n return g\n\ndef plot_inventor_sim_net(g, filename):\n '''takes network of inventors with edges between them weighted by similarity of their mean invention vectors\n plots network'''\n pos = nx.spring_layout(g, iterations = 100)\n nx.draw(g,pos, with_labels = True, node_size = 2000)\n labels = nx.get_edge_attributes(g,'weight')\n nx.draw_networkx_edge_labels(g,pos,edge_labels=labels)\n plt.savefig(filename, dpi=300)\n plt.show()",
"_____no_output_____"
],
[
"sj = ('Jobs', find_inventor_mean(sj_id))\nbg = ('Gates', find_inventor_mean(bg_id))\njb = ('Bezos', find_inventor_mean(jb_id))\nmz = ('Zuckerberg', find_inventor_mean(mz_id))\n\nmean_vectors = [sj, bg, jb, mz]\n\ninter_inv_net = make_mean_sim_net(mean_vectors)\n\nplot_inventor_sim_net(inter_inv_net, 'inventor_net.png')",
"_____no_output_____"
]
],
[
[
"# Team-level metrics\n\nIn addition to providing insight into individual patents or inventors, similarity data can be useful at the team-level to characterize different types of collaborative teams. Some teams have are comprised of members largely from the same or similar disciplines, while others feature more expertise diversity in their makeup.\n\nTo calculate team-level metrics it is often useful to first typify each individual member's expertise by locating their average semantic location (i.e. the average vector of all of their invention vectors). These mid-points can then be used to typify teams—those with large degrees of similarity between their average vectors are made up of members with similar inventing backgrounds, whereas those with little similarity between them have more knowledge-diverse membership.\n\nIn the sample code below, we compare the knowledge diversity of two teams, both inventors on Nest thermostat related patents assigned to Google. The first patent (8,757,507) relates to an easy-to-install thermostat, while the second (9,256,230) relates to scheduling a network-connected thermostat. As we can see from the histogram generated below, the team on the first patent has more concentrated expertise (i.e. generally high similarity scores) whereas the second features more knowledge diversity.",
"_____no_output_____"
]
],
[
[
"def get_inventors(patent):\n '''takes patent_id returns inventor_ids for listed inventors'''\n inventors = cur.execute('''SELECT inventor_id FROM patent_inventor\n WHERE patent_id = ?''',[patent]).fetchall()\n inventors = [i[0] for i in inventors]\n return inventors\n\ndef make_team_network(inventors, save_path =False):\n '''takes a list of inventor IDs, finds mean semantic location for each \n measures distance between each of their means and returns a network \n object w/ inventor nodes and weighted edges between them representing\n the similarity of their average inventions'''\n averages = [(i, find_inventor_mean(i)) for i in inventors]\n g = nx.Graph()\n for i1, i2 in itertools.combinations(averages, 2):\n inv1, v1 = i1[0], i1[1]\n inv2, v2 = i2[0], i2[1]\n if v1 is None or v2 is None:\n continue\n sim = float(cosine_similarity(v1.reshape(1,-1), v2.reshape(1,-1))[0])\n g.add_edge(inv1, inv2, weight = sim)\n if save_path != False:\n nx.write_graphml(g, save_path)\n return g\n\n\ndef plot_degree_dists(g1, label1, g2, label2):\n '''takes new network objects (g1 and g2) and accompanying labels\n plots kde of each network degree distribution'''\n ew1 = [e[2]['weight'] for e in g1.edges(data=True)]\n ew2 = [e[2]['weight'] for e in g2.edges(data=True)]\n print(label1 +' average sim: '+str(np.mean(ew1)))\n print(label2 +' average sim: '+str(np.mean(ew2)))\n fig, ax = plt.subplots()\n sns.kdeplot(ew1, shade = True, ax = ax, label = label1)\n sns.kdeplot(ew2, shade = True, ax = ax, label = label2, linestyle = '--')\n plt.tight_layout()\n plt.savefig(label1.replace(',','')+'.png', dpi = 300)\n \n\nteam_net_1 = make_team_network(get_inventors('8757507'))\nteam_net_2 = make_team_network(get_inventors('9256230'))\n\nplot_degree_dists(team_net_1, '8,757,507', team_net_2, '9,256,230')",
"8,757,507 average sim: 0.8481225504029799\n9,256,230 average sim: 0.782617227702863\n"
]
],
[
[
"# Location and firm-level metrics\n\nBecause it interfaces easily with other patent data, the patent similarity dataset can also be used to assess innovation at the firm or location level. The code below does a simple comparison of the similarity between inventions made by inventors in California, compared with those located in Louisiana. We see that although the distributions are almost identical, inventions originating in Louisiana are somewhat more likely to be similar to one another than those from California. Similar analyses can be performed to compare firms, or with slight modifications to track changes over time at the firm or location level. ",
"_____no_output_____"
]
],
[
[
"def calc_pairwise_state_sims(state, n):\n '''takes a state abbreviation and returns a \n returns a list of n random pairwise similarities between patents granted to inventors associated\n with that state in the db'''\n \n patents = cur.execute('''SELECT patent_id FROM patent_inventor WHERE patent_inventor.inventor_id in (\n SELECT inventor_id FROM location_inventor WHERE location_inventor.location_id in \n (SELECT id FROM location WHERE state = ?)) ORDER BY RANDOM() LIMIT ?''',[state, n]).fetchall()\n patents = [p[0] for p in patents]\n \n sims = []\n while len(sims) < n:\n p1, p2 = random.sample(patents,2)\n sim = patent_pair_sim(p1, p2)\n if sim is not None:\n sims.append(sim)\n return sims\n\n\nCA_sims = calc_pairwise_state_sims('CA', 10000)\nLA_sims = calc_pairwise_state_sims('LA', 10000)\n\nfig, ax = plt.subplots()\nsns.kdeplot(CA_sims, shade=True, ax=ax, label='CA mean = %s' % round(np.mean(CA_sims),4), linestyle = '--')\nsns.kdeplot(LA_sims, shade=True, ax = ax, label = 'LA mean = %s' % round(np.mean(LA_sims), 4))\n\nt = stats.ttest_ind(LA_sims, CA_sims)\nprint(t)\n\nfig.savefig('CA_vs_LA_sim.png', bbox_inches='tight', dpi=300)",
"Ttest_indResult(statistic=14.610251187208295, pvalue=4.27640534194914e-48)\n"
],
[
"conn.close()",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
cb73c2979148ee46752fd817f79850cfd272d6fe | 1,991 | ipynb | Jupyter Notebook | integratingOrbits.ipynb | andersdot/gaiaDR2 | 589e5a151952c2a46b90da6632e3c27b70bc1a75 | [
"MIT"
]
| null | null | null | integratingOrbits.ipynb | andersdot/gaiaDR2 | 589e5a151952c2a46b90da6632e3c27b70bc1a75 | [
"MIT"
]
| null | null | null | integratingOrbits.ipynb | andersdot/gaiaDR2 | 589e5a151952c2a46b90da6632e3c27b70bc1a75 | [
"MIT"
]
| null | null | null | 24.580247 | 73 | 0.505776 | [
[
[
"# Some imports we'll need later:\n\n# Third-party\nimport astropy.units as u\nimport astropy.coordinates as coord\nfrom astropy.io import ascii\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Gala\nfrom gala.mpl_style import mpl_style\nplt.style.use(mpl_style)\nimport gala.dynamics as gd\nimport gala.integrate as gi\nimport gala.potential as gp\nfrom gala.units import galactic\n%matplotlib inline",
"_____no_output_____"
],
[
"potential = gp.MilkyWayPotential()",
"_____no_output_____"
],
[
"\n\nicrs = coord.ICRS(ra=coord.Angle('17h 20m 12.4s'),\n dec=coord.Angle('+57° 54′ 55″'),\n distance=76*u.kpc,\n pm_ra_cosdec=0.0569*u.mas/u.yr,\n pm_dec=-0.1673*u.mas/u.yr,\n radial_velocity=-291*u.km/u.s)\n\nicrs_err = coord.ICRS(ra=0*u.deg, dec=0*u.deg, distance=6*u.kpc,\n pm_ra_cosdec=0.009*u.mas/u.yr,\n pm_dec=0.009*u.mas/u.yr,\n radial_velocity=0.1*u.km/u.s)",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code"
]
]
|
cb73cc494a75afe9f0210b81e5ec7683225c4091 | 25,210 | ipynb | Jupyter Notebook | 4-Interpretibility/train-explain-model-on-amlcompute-and-deploy.ipynb | hyssh/bert-stack-overflow | bed76cbd8ae43b65f17d326e76519221665a9458 | [
"MIT"
]
| 6 | 2021-05-19T05:59:40.000Z | 2021-11-14T08:01:15.000Z | how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb | bkuriach/MachineLearningNotebooks | 8f89d88def05e10c009e2d9498669936f8ac4d40 | [
"MIT"
]
| null | null | null | how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb | bkuriach/MachineLearningNotebooks | 8f89d88def05e10c009e2d9498669936f8ac4d40 | [
"MIT"
]
| null | null | null | 42.801358 | 468 | 0.596827 | [
[
[
"Copyright (c) Microsoft Corporation. All rights reserved.\n\nLicensed under the MIT License.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"# Train and explain models remotely via Azure Machine Learning Compute and deploy model and scoring explainer\n\n\n_**This notebook illustrates how to use the Azure Machine Learning Interpretability SDK to train and explain a classification model remotely on an Azure Machine Leanrning Compute Target (AMLCompute), and use Azure Container Instances (ACI) for deploying your model and its corresponding scoring explainer as a web service.**_\n\nProblem: IBM employee attrition classification with scikit-learn (train a model and run an explainer remotely via AMLCompute, and deploy model and its corresponding explainer.)\n\n---\n\n## Table of Contents\n\n1. [Introduction](#Introduction)\n1. [Setup](#Setup)\n1. [Run model explainer locally at training time](#Explain)\n 1. Apply feature transformations\n 1. Train a binary classification model\n 1. Explain the model on raw features\n 1. Generate global explanations\n 1. Generate local explanations\n1. [Visualize results](#Visualize)\n1. [Deploy model and scoring explainer](#Deploy)\n1. [Next steps](#Next)",
"_____no_output_____"
],
[
"## Introduction\n\nThis notebook showcases how to train and explain a classification model remotely via Azure Machine Learning Compute (AMLCompute), download the calculated explanations locally for visualization and inspection, and deploy the final model and its corresponding explainer to Azure Container Instances (ACI).\nIt demonstrates the API calls that you need to make to submit a run for training and explaining a model to AMLCompute, download the compute explanations remotely, and visualizing the global and local explanations via a visualization dashboard that provides an interactive way of discovering patterns in model predictions and downloaded explanations, and using Azure Machine Learning MLOps capabilities to deploy your model and its corresponding explainer.\n\nWe will showcase one of the tabular data explainers: TabularExplainer (SHAP) and follow these steps:\n1.\tDevelop a machine learning script in Python which involves the training script and the explanation script.\n2.\tCreate and configure a compute target.\n3.\tSubmit the scripts to the configured compute target to run in that environment. During training, the scripts can read from or write to datastore. And the records of execution (e.g., model, metrics, prediction explanations) are saved as runs in the workspace and grouped under experiments.\n4.\tQuery the experiment for logged metrics and explanations from the current and past runs. Use the interpretability toolkit’s visualization dashboard to visualize predictions and their explanation. If the metrics and explanations don't indicate a desired outcome, loop back to step 1 and iterate on your scripts.\n5.\tAfter a satisfactory run is found, create a scoring explainer and register the persisted model and its corresponding explainer in the model registry.\n6.\tDevelop a scoring script.\n7.\tCreate an image and register it in the image registry.\n8.\tDeploy the image as a web service in Azure.\n\n|  |\n|:--:|",
"_____no_output_____"
],
[
"## Setup\nMake sure you go through the [configuration notebook](../../../../configuration.ipynb) first if you haven't.",
"_____no_output_____"
]
],
[
[
"# Check core SDK version number\nimport azureml.core\n\nprint(\"SDK version:\", azureml.core.VERSION)",
"_____no_output_____"
]
],
[
[
"## Initialize a Workspace\n\nInitialize a workspace object from persisted configuration",
"_____no_output_____"
]
],
[
[
"from azureml.core import Workspace\n\nws = Workspace.from_config()\nprint(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\\n')",
"_____no_output_____"
]
],
[
[
"## Explain\n\nCreate An Experiment: **Experiment** is a logical container in an Azure ML Workspace. It hosts run records which can include run metrics and output artifacts from your experiments.",
"_____no_output_____"
]
],
[
[
"from azureml.core import Experiment\nexperiment_name = 'explainer-remote-run-on-amlcompute'\nexperiment = Experiment(workspace=ws, name=experiment_name)",
"_____no_output_____"
]
],
[
[
"## Introduction to AmlCompute\n\nAzure Machine Learning Compute is managed compute infrastructure that allows the user to easily create single to multi-node compute of the appropriate VM Family. It is created **within your workspace region** and is a resource that can be used by other users in your workspace. It autoscales by default to the max_nodes, when a job is submitted, and executes in a containerized environment packaging the dependencies as specified by the user. \n\nSince it is managed compute, job scheduling and cluster management are handled internally by Azure Machine Learning service. \n\nFor more information on Azure Machine Learning Compute, please read [this article](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)\n\nIf you are an existing BatchAI customer who is migrating to Azure Machine Learning, please read [this article](https://aka.ms/batchai-retirement)\n\n**Note**: As with other Azure services, there are limits on certain resources (for eg. AmlCompute quota) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.\n\n\nThe training script `run_explainer.py` is already created for you. Let's have a look.",
"_____no_output_____"
],
[
"## Submit an AmlCompute run\n\nFirst lets check which VM families are available in your region. Azure is a regional service and some specialized SKUs (especially GPUs) are only available in certain regions. Since AmlCompute is created in the region of your workspace, we will use the supported_vms () function to see if the VM family we want to use ('STANDARD_D2_V2') is supported.\n\nYou can also pass a different region to check availability and then re-create your workspace in that region through the [configuration notebook](../../../configuration.ipynb)",
"_____no_output_____"
]
],
[
[
"from azureml.core.compute import ComputeTarget, AmlCompute\n\nAmlCompute.supported_vmsizes(workspace=ws)\n# AmlCompute.supported_vmsizes(workspace=ws, location='southcentralus')",
"_____no_output_____"
]
],
[
[
"### Create project directory\n\nCreate a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script, and any additional files your training script depends on",
"_____no_output_____"
]
],
[
[
"import os\nimport shutil\n\nproject_folder = './explainer-remote-run-on-amlcompute'\nos.makedirs(project_folder, exist_ok=True)\nshutil.copy('train_explain.py', project_folder)",
"_____no_output_____"
]
],
[
[
"### Provision a compute target\n\n> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n\nYou can provision an AmlCompute resource by simply defining two parameters thanks to smart defaults. By default it autoscales from 0 nodes and provisions dedicated VMs to run your job in a container. This is useful when you want to continously re-use the same target, debug it between jobs or simply share the resource with other users of your workspace.\n\n* `vm_size`: VM family of the nodes provisioned by AmlCompute. Simply choose from the supported_vmsizes() above\n* `max_nodes`: Maximum nodes to autoscale to while running a job on AmlCompute",
"_____no_output_____"
]
],
[
[
"from azureml.core.compute import ComputeTarget, AmlCompute\nfrom azureml.core.compute_target import ComputeTargetException\n\n# Choose a name for your CPU cluster\ncpu_cluster_name = \"cpu-cluster\"\n\n# Verify that cluster does not exist already\ntry:\n cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n print('Found existing cluster, use it.')\nexcept ComputeTargetException:\n compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n max_nodes=4)\n cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n\ncpu_cluster.wait_for_completion(show_output=True)",
"_____no_output_____"
]
],
[
[
"### Configure & Run",
"_____no_output_____"
]
],
[
[
"from azureml.core.runconfig import RunConfiguration\nfrom azureml.core.conda_dependencies import CondaDependencies\nfrom azureml.core.runconfig import DEFAULT_CPU_IMAGE\n\n# Create a new runconfig object\nrun_config = RunConfiguration()\n\n# Set compute target to AmlCompute target created in previous step\nrun_config.target = cpu_cluster.name\n\n# Set Docker base image to the default CPU-based image\nrun_config.environment.docker.base_image = DEFAULT_CPU_IMAGE\n\n# Use conda_dependencies.yml to create a conda environment in the Docker image for execution\nrun_config.environment.python.user_managed_dependencies = False\n\nazureml_pip_packages = [\n 'azureml-defaults', 'azureml-telemetry', 'azureml-interpret'\n]\n \n\n\n# Note: this is to pin the scikit-learn version to be same as notebook.\n# In production scenario user would choose their dependencies\nimport pkg_resources\navailable_packages = pkg_resources.working_set\nsklearn_ver = None\npandas_ver = None\nfor dist in available_packages:\n if dist.key == 'scikit-learn':\n sklearn_ver = dist.version\n elif dist.key == 'pandas':\n pandas_ver = dist.version\nsklearn_dep = 'scikit-learn'\npandas_dep = 'pandas'\nif sklearn_ver:\n sklearn_dep = 'scikit-learn=={}'.format(sklearn_ver)\nif pandas_ver:\n pandas_dep = 'pandas=={}'.format(pandas_ver)\n# Specify CondaDependencies obj\n# The CondaDependencies specifies the conda and pip packages that are installed in the environment\n# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n# environment, otherwise if a model is trained or deployed in a different environment this can\n# cause errors. Please take extra care when specifying your dependencies in a production environment.\nazureml_pip_packages.extend(['pyyaml', sklearn_dep, pandas_dep])\nrun_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=azureml_pip_packages)\n# Now submit a run on AmlCompute\nfrom azureml.core.script_run_config import ScriptRunConfig\n\nscript_run_config = ScriptRunConfig(source_directory=project_folder,\n script='train_explain.py',\n run_config=run_config)\n\nrun = experiment.submit(script_run_config)\n\n# Show run details\nrun",
"_____no_output_____"
]
],
[
[
"Note: if you need to cancel a run, you can follow [these instructions](https://aka.ms/aml-docs-cancel-run).",
"_____no_output_____"
]
],
[
[
"%%time\n# Shows output of the run on stdout.\nrun.wait_for_completion(show_output=True)",
"_____no_output_____"
],
[
"# Delete () is used to deprovision and delete the AmlCompute target. Useful if you want to re-use the compute name \n# 'cpucluster' in this case but use a different VM family for instance.\n\n# cpu_cluster.delete()",
"_____no_output_____"
]
],
[
[
"## Download Model Explanation, Model, and Data",
"_____no_output_____"
]
],
[
[
"# Retrieve model for visualization and deployment\nfrom azureml.core.model import Model\nimport joblib\noriginal_model = Model(ws, 'amlcompute_deploy_model')\nmodel_path = original_model.download(exist_ok=True)\noriginal_svm_model = joblib.load(model_path)",
"_____no_output_____"
],
[
"# Retrieve global explanation for visualization\nfrom azureml.interpret import ExplanationClient\n\n# get model explanation data\nclient = ExplanationClient.from_run(run)\nglobal_explanation = client.download_model_explanation()",
"_____no_output_____"
],
[
"# Retrieve x_test for visualization\nimport joblib\nx_test_path = './x_test.pkl'\nrun.download_file('x_test_ibm.pkl', output_file_path=x_test_path)\nx_test = joblib.load(x_test_path)",
"_____no_output_____"
]
],
[
[
"## Visualize\nVisualize the explanations",
"_____no_output_____"
]
],
[
[
"from interpret_community.widget import ExplanationDashboard",
"_____no_output_____"
],
[
"ExplanationDashboard(global_explanation, original_svm_model, datasetX=x_test)",
"_____no_output_____"
]
],
[
[
"## Deploy\nDeploy Model and ScoringExplainer",
"_____no_output_____"
]
],
[
[
"from azureml.core.conda_dependencies import CondaDependencies \n\n# WARNING: to install this, g++ needs to be available on the Docker image and is not by default (look at the next cell)\nazureml_pip_packages = [\n 'azureml-defaults', 'azureml-core', 'azureml-telemetry',\n 'azureml-interpret'\n]\n \n\n# Note: this is to pin the scikit-learn and pandas versions to be same as notebook.\n# In production scenario user would choose their dependencies\nimport pkg_resources\navailable_packages = pkg_resources.working_set\nsklearn_ver = None\npandas_ver = None\nfor dist in available_packages:\n if dist.key == 'scikit-learn':\n sklearn_ver = dist.version\n elif dist.key == 'pandas':\n pandas_ver = dist.version\nsklearn_dep = 'scikit-learn'\npandas_dep = 'pandas'\nif sklearn_ver:\n sklearn_dep = 'scikit-learn=={}'.format(sklearn_ver)\nif pandas_ver:\n pandas_dep = 'pandas=={}'.format(pandas_ver)\n# Specify CondaDependencies obj\n# The CondaDependencies specifies the conda and pip packages that are installed in the environment\n# the submitted job is run in. Note the remote environment(s) needs to be similar to the local\n# environment, otherwise if a model is trained or deployed in a different environment this can\n# cause errors. Please take extra care when specifying your dependencies in a production environment.\nazureml_pip_packages.extend(['pyyaml', sklearn_dep, pandas_dep])\nmyenv = CondaDependencies.create(pip_packages=azureml_pip_packages)\n\nwith open(\"myenv.yml\",\"w\") as f:\n f.write(myenv.serialize_to_string())\n\nwith open(\"myenv.yml\",\"r\") as f:\n print(f.read())",
"_____no_output_____"
],
[
"# Retrieve scoring explainer for deployment\nscoring_explainer_model = Model(ws, 'IBM_attrition_explainer')",
"_____no_output_____"
],
[
"from azureml.core.webservice import Webservice\nfrom azureml.core.model import InferenceConfig\nfrom azureml.core.webservice import AciWebservice\nfrom azureml.core.model import Model\nfrom azureml.core.environment import Environment\nfrom azureml.exceptions import WebserviceException\n\n\naciconfig = AciWebservice.deploy_configuration(cpu_cores=1, \n memory_gb=1, \n tags={\"data\": \"IBM_Attrition\", \n \"method\" : \"local_explanation\"}, \n description='Get local explanations for IBM Employee Attrition data')\n\nmyenv = Environment.from_conda_specification(name=\"myenv\", file_path=\"myenv.yml\")\ninference_config = InferenceConfig(entry_script=\"score_remote_explain.py\", environment=myenv)\n\n# Use configs and models generated above\nservice = Model.deploy(ws, 'model-scoring-service', [scoring_explainer_model, original_model], inference_config, aciconfig)\ntry:\n service.wait_for_deployment(show_output=True)\nexcept WebserviceException as e:\n print(e.message)\n print(service.get_logs())\n raise",
"_____no_output_____"
],
[
"import requests\n\n# Create data to test service with\nexamples = x_test[:4]\ninput_data = examples.to_json()\n\nheaders = {'Content-Type':'application/json'}\n\n# Send request to service\nprint(\"POST to url\", service.scoring_uri)\nresp = requests.post(service.scoring_uri, input_data, headers=headers)\n\n# Can covert back to Python objects from json string if desired\nprint(\"prediction:\", resp.text)",
"_____no_output_____"
],
[
"service.delete()",
"_____no_output_____"
]
],
[
[
"## Next\nLearn about other use cases of the explain package on a:\n1. [Training time: regression problem](https://github.com/interpretml/interpret-community/blob/master/notebooks/explain-regression-local.ipynb) \n1. [Training time: binary classification problem](https://github.com/interpretml/interpret-community/blob/master/notebooks/explain-binary-classification-local.ipynb)\n1. [Training time: multiclass classification problem](https://github.com/interpretml/interpret-community/blob/master/notebooks/explain-multiclass-classification-local.ipynb)\n1. Explain models with engineered features:\n 1. [Simple feature transformations](https://github.com/interpretml/interpret-community/blob/master/notebooks/simple-feature-transformations-explain-local.ipynb)\n 1. [Advanced feature transformations](https://github.com/interpretml/interpret-community/blob/master/notebooks/advanced-feature-transformations-explain-local.ipynb)\n1. [Save model explanations via Azure Machine Learning Run History](../run-history/save-retrieve-explanations-run-history.ipynb)\n1. [Run explainers remotely on Azure Machine Learning Compute (AMLCompute)](../remote-explanation/explain-model-on-amlcompute.ipynb)\n1. [Inferencing time: deploy a locally-trained model and explainer](./train-explain-model-locally-and-deploy.ipynb)\n1. [Inferencing time: deploy a locally-trained keras model and explainer](./train-explain-model-keras-locally-and-deploy.ipynb)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
]
|
cb73d6c49d3d05ea65393d205a689f1c11f444a8 | 36,273 | ipynb | Jupyter Notebook | Chapter10/10.03 Reconstructing MNIST images using Autoencoder.ipynb | bharlow058/Packt-Deep-Learning | 181bbd57c264c7bea635cb558d22c8b164167c8d | [
"MIT"
]
| 86 | 2018-12-29T15:36:20.000Z | 2022-03-03T02:55:39.000Z | Chapter10/10.03 Reconstructing MNIST images using Autoencoder.ipynb | bharlow058/Packt-Deep-Learning | 181bbd57c264c7bea635cb558d22c8b164167c8d | [
"MIT"
]
| null | null | null | Chapter10/10.03 Reconstructing MNIST images using Autoencoder.ipynb | bharlow058/Packt-Deep-Learning | 181bbd57c264c7bea635cb558d22c8b164167c8d | [
"MIT"
]
| 57 | 2018-12-19T14:03:32.000Z | 2022-02-25T09:04:13.000Z | 63.303665 | 11,400 | 0.73523 | [
[
[
"# Reconstructing MNIST images using Autoencoder",
"_____no_output_____"
],
[
"Now that we have understood how autoencoders reconstruct the inputs, in this section we will learn how autoencoders reconstruct the images of handwritten digits using the MNIST dataset. \n\n\nIn this chapter, we use keras API from the tensorflow for building the models. So that we would be familiarized with how to use high-level APIs.",
"_____no_output_____"
],
[
"## Import Libraries",
"_____no_output_____"
],
[
"First, let us import the necessary libraries:",
"_____no_output_____"
]
],
[
[
"import warnings\nwarnings.filterwarnings('ignore')\n\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Dense\n\nimport tensorflow as tf\ntf.logging.set_verbosity(tf.logging.ERROR)\n\n#plotting\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n#dataset\nfrom tensorflow.keras.datasets import mnist\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"## Prepare the Dataset",
"_____no_output_____"
],
[
"Let us load the MNIST dataset. We don't need the labels for autoencoder. Since we are reconstructing the given input we don't need the labels. So, we just load x_train for training and x_test for testing:",
"_____no_output_____"
]
],
[
[
"(x_train, _), (x_test, _) = mnist.load_data()",
"_____no_output_____"
]
],
[
[
"Normalize the data by dividing with max pixel value which is 255:",
"_____no_output_____"
]
],
[
[
"x_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255",
"_____no_output_____"
]
],
[
[
"Shape of our dataset:",
"_____no_output_____"
]
],
[
[
"print(x_train.shape, x_test.shape)",
"((60000, 28, 28), (10000, 28, 28))\n"
]
],
[
[
"Reshape the images as 2D array:",
"_____no_output_____"
]
],
[
[
"x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))\nx_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))",
"_____no_output_____"
]
],
[
[
"Now, the shape of data would become:",
"_____no_output_____"
]
],
[
[
"print(x_train.shape, x_test.shape)",
"((60000, 784), (10000, 784))\n"
]
],
[
[
"# Define the Encoder",
"_____no_output_____"
],
[
"Now, we define the encoder which takes the images as an input and returns the encodings. \n\nDefine the size of the encodings:",
"_____no_output_____"
]
],
[
[
"encoding_dim = 32",
"_____no_output_____"
]
],
[
[
"Define the placeholders for the input:",
"_____no_output_____"
]
],
[
[
"input_image = Input(shape=(784,))",
"_____no_output_____"
]
],
[
[
"\nDefine the encoder which takes the input_image and returns the encodings:",
"_____no_output_____"
]
],
[
[
"encoder = Dense(encoding_dim, activation='relu')(input_image)",
"_____no_output_____"
]
],
[
[
"# Define the Decoder\n\nLet us define the decoder which takes the encoded values from the encoder and returns the reconstructed image:",
"_____no_output_____"
]
],
[
[
"decoder = Dense(784, activation='sigmoid')(encoder)",
"_____no_output_____"
]
],
[
[
"# Build the model\n\nNow that we defined encoder and decoder, we define the model which takes images as input and returns the output of the decoder which is the reconstructed image:",
"_____no_output_____"
]
],
[
[
"model = Model(inputs=input_image, outputs=decoder)",
"_____no_output_____"
]
],
[
[
"Let us look at summary of the model:",
"_____no_output_____"
]
],
[
[
"model.summary()",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) (None, 784) 0 \n_________________________________________________________________\ndense (Dense) (None, 32) 25120 \n_________________________________________________________________\ndense_1 (Dense) (None, 784) 25872 \n=================================================================\nTotal params: 50,992\nTrainable params: 50,992\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"Compile the model with loss as binary cross entropy and we minimize the loss using AdaDelta optimizer:",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer='adadelta', loss='binary_crossentropy')",
"_____no_output_____"
]
],
[
[
"Now, let us train the model.\n\nGenerally, we feed the data to the model as model.fit(x,y) where x is the input and y is the label. But since autoencoders reconstruct its inputs, the input and output to the model should be the same. So we feed the data to the model as model.fit(x_train, x_train)\n\n",
"_____no_output_____"
]
],
[
[
"model.fit(x_train, x_train, epochs=50, batch_size=256, shuffle=True, validation_data=(x_test, x_test))",
"Train on 60000 samples, validate on 10000 samples\nEpoch 1/50\n60000/60000 [==============================] - 4s 72us/sample - loss: 0.3566 - val_loss: 0.2724\nEpoch 2/50\n60000/60000 [==============================] - 3s 43us/sample - loss: 0.2668 - val_loss: 0.2580\nEpoch 3/50\n60000/60000 [==============================] - 2s 40us/sample - loss: 0.2468 - val_loss: 0.2327\nEpoch 4/50\n60000/60000 [==============================] - 2s 38us/sample - loss: 0.2239 - val_loss: 0.2132\nEpoch 5/50\n60000/60000 [==============================] - 2s 38us/sample - loss: 0.2078 - val_loss: 0.2001\nEpoch 6/50\n60000/60000 [==============================] - 2s 38us/sample - loss: 0.1969 - val_loss: 0.1911\nEpoch 7/50\n60000/60000 [==============================] - 2s 38us/sample - loss: 0.1888 - val_loss: 0.1837\nEpoch 8/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1820 - val_loss: 0.1775\nEpoch 9/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1759 - val_loss: 0.1717\nEpoch 10/50\n60000/60000 [==============================] - 2s 41us/sample - loss: 0.1704 - val_loss: 0.1666\nEpoch 11/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1656 - val_loss: 0.1621\nEpoch 12/50\n60000/60000 [==============================] - 2s 38us/sample - loss: 0.1613 - val_loss: 0.1579\nEpoch 13/50\n60000/60000 [==============================] - 2s 38us/sample - loss: 0.1574 - val_loss: 0.1541\nEpoch 14/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1537 - val_loss: 0.1506\nEpoch 15/50\n60000/60000 [==============================] - 2s 38us/sample - loss: 0.1504 - val_loss: 0.1474\nEpoch 16/50\n60000/60000 [==============================] - 2s 40us/sample - loss: 0.1473 - val_loss: 0.1444\nEpoch 17/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1444 - val_loss: 0.1415\nEpoch 18/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1417 - val_loss: 0.1389\nEpoch 19/50\n60000/60000 [==============================] - 3s 55us/sample - loss: 0.1392 - val_loss: 0.1364\nEpoch 20/50\n60000/60000 [==============================] - 3s 50us/sample - loss: 0.1367 - val_loss: 0.1341\nEpoch 21/50\n60000/60000 [==============================] - 2s 38us/sample - loss: 0.1344 - val_loss: 0.1317\nEpoch 22/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1322 - val_loss: 0.1295\nEpoch 23/50\n60000/60000 [==============================] - 2s 38us/sample - loss: 0.1300 - val_loss: 0.1274\nEpoch 24/50\n60000/60000 [==============================] - 2s 41us/sample - loss: 0.1280 - val_loss: 0.1254\nEpoch 25/50\n60000/60000 [==============================] - 3s 43us/sample - loss: 0.1261 - val_loss: 0.1236\nEpoch 26/50\n60000/60000 [==============================] - 2s 40us/sample - loss: 0.1242 - val_loss: 0.1217\nEpoch 27/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1225 - val_loss: 0.1201\nEpoch 28/50\n60000/60000 [==============================] - 3s 45us/sample - loss: 0.1209 - val_loss: 0.1185\nEpoch 29/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1194 - val_loss: 0.1170\nEpoch 30/50\n60000/60000 [==============================] - 2s 40us/sample - loss: 0.1179 - val_loss: 0.1156\nEpoch 31/50\n60000/60000 [==============================] - 3s 42us/sample - loss: 0.1166 - val_loss: 0.1144\nEpoch 32/50\n60000/60000 [==============================] - 2s 41us/sample - loss: 0.1154 - val_loss: 0.1131\nEpoch 33/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1142 - val_loss: 0.1120\nEpoch 34/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1132 - val_loss: 0.1110\nEpoch 35/50\n60000/60000 [==============================] - 2s 38us/sample - loss: 0.1122 - val_loss: 0.1101\nEpoch 36/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1113 - val_loss: 0.1092\nEpoch 37/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1104 - val_loss: 0.1083\nEpoch 38/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1096 - val_loss: 0.1076\nEpoch 39/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1089 - val_loss: 0.1069\nEpoch 40/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1082 - val_loss: 0.1062\nEpoch 41/50\n60000/60000 [==============================] - 2s 41us/sample - loss: 0.1075 - val_loss: 0.1056\nEpoch 42/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1069 - val_loss: 0.1050\nEpoch 43/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1064 - val_loss: 0.1045\nEpoch 44/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1058 - val_loss: 0.1040\nEpoch 45/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1053 - val_loss: 0.1035\nEpoch 46/50\n60000/60000 [==============================] - 2s 38us/sample - loss: 0.1049 - val_loss: 0.1030\nEpoch 47/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1044 - val_loss: 0.1026\nEpoch 48/50\n60000/60000 [==============================] - 2s 38us/sample - loss: 0.1040 - val_loss: 0.1022\nEpoch 49/50\n60000/60000 [==============================] - 2s 39us/sample - loss: 0.1036 - val_loss: 0.1019\nEpoch 50/50\n60000/60000 [==============================] - ETA: 0s - loss: 0.103 - 2s 38us/sample - loss: 0.1033 - val_loss: 0.1015\n"
]
],
[
[
"## Reconstruct images\n\nLet us see how our model is performing in the test dataset. Feed the test images to the model and get the reconstructed images:",
"_____no_output_____"
]
],
[
[
"reconstructed_images = model.predict(x_test)",
"_____no_output_____"
]
],
[
[
"## Plotting reconstructed images\n\n\nFirst let us plot the atcual images i.e input images:",
"_____no_output_____"
]
],
[
[
"n = 7\nplt.figure(figsize=(20, 4))\nfor i in range(n):\n \n ax = plt.subplot(1, n, i+1)\n plt.imshow(x_test[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nplt.show() ",
"_____no_output_____"
]
],
[
[
"Plot the reconstructed image:",
"_____no_output_____"
]
],
[
[
"n = 7\nplt.figure(figsize=(20, 4))\nfor i in range(n):\n ax = plt.subplot(2, n, i + n + 1)\n plt.imshow(reconstructed_images[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\nplt.show() ",
"_____no_output_____"
]
],
[
[
"As you can notice, autoencoders have learned to reconstruct the given input image. In the next section, we will learn about convolutional autoencoder which uses convolutional layers in the encoder and decoder network. ",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
cb73d8c75d70edd23941799927972685f7c31d1a | 1,016,845 | ipynb | Jupyter Notebook | Chapter_2_Supervised_Learning.ipynb | aambrioso1/ML_with_Python | a3a74d376bd0761f0a86305abf53e82055e0dd9d | [
"MIT"
]
| null | null | null | Chapter_2_Supervised_Learning.ipynb | aambrioso1/ML_with_Python | a3a74d376bd0761f0a86305abf53e82055e0dd9d | [
"MIT"
]
| null | null | null | Chapter_2_Supervised_Learning.ipynb | aambrioso1/ML_with_Python | a3a74d376bd0761f0a86305abf53e82055e0dd9d | [
"MIT"
]
| null | null | null | 579.068907 | 173,334 | 0.949838 | [
[
[
"# Ch 2: Supervised Learning\n\n2.1: Classification and regression\n----\n\nCode for Chapter 2 by authors can be found here:\nhttps://github.com/amueller/introduction_to_ml_with_python/blob/master/02-supervised-learning.ipynb\n\nTwo major types of supervised learning:\n* classification: goal is to predict a class label (discrete)\n* regression: goal is to predict a real number (continuous)\n\n2.2: Generalization, Overfitting, Underfitting\n----\n\ngeneralization: a model should work well on the training data and test data.\noverfitting: model fits too closely to the particularities of the training set and so does not generalize well (too complex)\nunderfitting: model does not even fit the data set well (too simple)\n\noverfitting: https://en.wikipedia.org/wiki/Overfitting\n\n\nBias-variance tradeoff: https://en.wikipedia.org/wiki/Bias%E2%80%93variance_tradeoff",
"_____no_output_____"
],
[
"2.3 Supervised Machine Learning Algorithms\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n# The mglearn packages has some helper function used for plotting.\nimport mglearn # https://github.com/amueller/introduction_to_ml_with_python/tree/master/mglearn\nfrom IPython.display import display\nfrom sklearn.datasets import load_iris\nfrom sklearn.datasets import make_blobs\n\n# Not sure what this is for\n%matplotlib inline\nfrom preamble import *",
"_____no_output_____"
],
[
"# generate dataset\nX, y = mglearn.datasets.make_forge()\n# plot dataset\nmglearn.discrete_scatter(X[:, 0], X[:, 1], y)\nplt.legend([\"Class 0\", \"Class 1\"], loc=4)\nplt.xlabel(\"First feature\")\nplt.ylabel(\"Second feature\")\nprint(\"X.shape:\", X.shape)",
"C:\\Users\\Alex\\Anaconda3\\lib\\site-packages\\sklearn\\utils\\deprecation.py:87: FutureWarning: Function make_blobs is deprecated; Please import make_blobs directly from scikit-learn\n warnings.warn(msg, category=FutureWarning)\n"
],
[
"X, y = mglearn.datasets.make_wave(n_samples=40)\nplt.plot(X, y, 'o')\nplt.ylim(-3, 3)\nplt.xlabel(\"Feature\")\nplt.ylabel(\"Target\")",
"_____no_output_____"
],
[
"from sklearn.datasets import load_breast_cancer\ncancer = load_breast_cancer()\nprint(\"cancer.keys():\\n\", cancer.keys())",
"cancer.keys():\n dict_keys(['data', 'target', 'target_names', 'DESCR', 'feature_names', 'filename'])\n"
],
[
"print(\"Shape of cancer data:\", cancer.data.shape)",
"Shape of cancer data: (569, 30)\n"
],
[
"print(\"Sample counts per class:\\n\",\n {n: v for n, v in zip(cancer.target_names, np.bincount(cancer.target))})",
"Sample counts per class:\n {'malignant': 212, 'benign': 357}\n"
],
[
"print(\"Feature names:\\n\", cancer.feature_names)",
"Feature names:\n ['mean radius' 'mean texture' 'mean perimeter' 'mean area'\n 'mean smoothness' 'mean compactness' 'mean concavity'\n 'mean concave points' 'mean symmetry' 'mean fractal dimension'\n 'radius error' 'texture error' 'perimeter error' 'area error'\n 'smoothness error' 'compactness error' 'concavity error'\n 'concave points error' 'symmetry error' 'fractal dimension error'\n 'worst radius' 'worst texture' 'worst perimeter' 'worst area'\n 'worst smoothness' 'worst compactness' 'worst concavity'\n 'worst concave points' 'worst symmetry' 'worst fractal dimension']\n"
],
[
"from sklearn.datasets import load_boston\nboston = load_boston()\nprint(\"Data shape:\", boston.data.shape)",
"Data shape: (506, 13)\n"
],
[
"X, y = mglearn.datasets.load_extended_boston()\nprint(\"X.shape:\", X.shape)",
"X.shape: (506, 104)\n"
]
],
[
[
"# 2.3.2 k-Nearest Neighbors\n\nk-Neighbors classification",
"_____no_output_____"
]
],
[
[
"mglearn.plots.plot_knn_classification(n_neighbors=1)",
"C:\\Users\\Alex\\Anaconda3\\lib\\site-packages\\sklearn\\utils\\deprecation.py:87: FutureWarning: Function make_blobs is deprecated; Please import make_blobs directly from scikit-learn\n warnings.warn(msg, category=FutureWarning)\n"
],
[
"mglearn.plots.plot_knn_classification(n_neighbors=3)",
"C:\\Users\\Alex\\Anaconda3\\lib\\site-packages\\sklearn\\utils\\deprecation.py:87: FutureWarning: Function make_blobs is deprecated; Please import make_blobs directly from scikit-learn\n warnings.warn(msg, category=FutureWarning)\n"
],
[
"from sklearn.model_selection import train_test_split\nX, y = mglearn.datasets.make_forge()\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)",
"C:\\Users\\Alex\\Anaconda3\\lib\\site-packages\\sklearn\\utils\\deprecation.py:87: FutureWarning: Function make_blobs is deprecated; Please import make_blobs directly from scikit-learn\n warnings.warn(msg, category=FutureWarning)\n"
],
[
"from sklearn.neighbors import KNeighborsClassifier\nclf = KNeighborsClassifier(n_neighbors=3)",
"_____no_output_____"
],
[
"clf.fit(X_train, y_train)",
"_____no_output_____"
],
[
"print(\"Test set predictions:\", clf.predict(X_test))",
"Test set predictions: [1 0 1 0 1 0 0]\n"
],
[
"print(\"Test set accuracy: {:.2f}\".format(clf.score(X_test, y_test)))",
"Test set accuracy: 0.86\n"
]
],
[
[
"Analyzing KNeighborClassifier",
"_____no_output_____"
]
],
[
[
"fig, axes = plt.subplots(1, 3, figsize=(10, 3))\n\nfor n_neighbors, ax in zip([1, 3, 9], axes):\n # the fit method returns the object self, so we can instantiate\n # and fit in one line\n clf = KNeighborsClassifier(n_neighbors=n_neighbors).fit(X, y)\n mglearn.plots.plot_2d_separator(clf, X, fill=True, eps=0.5, ax=ax, alpha=.4)\n mglearn.discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)\n ax.set_title(\"{} neighbor(s)\".format(n_neighbors))\n ax.set_xlabel(\"feature 0\")\n ax.set_ylabel(\"feature 1\")\naxes[0].legend(loc=3)",
"_____no_output_____"
],
[
"from sklearn.datasets import load_breast_cancer\n\ncancer = load_breast_cancer()\nX_train, X_test, y_train, y_test = train_test_split(\n cancer.data, cancer.target, stratify=cancer.target, random_state=66)\n\ntraining_accuracy = []\ntest_accuracy = []\n# try n_neighbors from 1 to 20\nneighbors_settings = range(1, 20)\n\nfor n_neighbors in neighbors_settings:\n # build the model\n clf = KNeighborsClassifier(n_neighbors=n_neighbors)\n clf.fit(X_train, y_train)\n # record training set accuracy\n training_accuracy.append(clf.score(X_train, y_train))\n # record generalization accuracy\n test_accuracy.append(clf.score(X_test, y_test))\n \nplt.plot(neighbors_settings, training_accuracy, label=\"training accuracy\")\nplt.plot(neighbors_settings, test_accuracy, label=\"test accuracy\")\nplt.ylabel(\"Accuracy\")\nplt.xlabel(\"n_neighbors\")\nplt.legend()",
"_____no_output_____"
]
],
[
[
"**k-neighbors regression**\n\n\n",
"_____no_output_____"
]
],
[
[
"mglearn.plots.plot_knn_regression(n_neighbors=1)",
"_____no_output_____"
],
[
"mglearn.plots.plot_knn_regression(n_neighbors=3)",
"_____no_output_____"
],
[
"from sklearn.neighbors import KNeighborsRegressor\n\nX, y = mglearn.datasets.make_wave(n_samples=40)\n\n# split the wave dataset into a training and a test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n# instantiate the model and set the number of neighbors to consider to 3\nreg = KNeighborsRegressor(n_neighbors=3)\n# fit the model using the training data and training targets\nreg.fit(X_train, y_train)",
"_____no_output_____"
],
[
"print(\"Test set predictions:\\n\", reg.predict(X_test))",
"Test set predictions:\n [-0.054 0.357 1.137 -1.894 -1.139 -1.631 0.357 0.912 -0.447 -1.139]\n"
],
[
"print(\"Test set R^2: {:.2f}\".format(reg.score(X_test, y_test)))",
"Test set R^2: 0.83\n"
],
[
"\nfig, axes = plt.subplots(1, 3, figsize=(15, 4))\n# create 1,000 data points, evenly spaced between -3 and 3\nline = np.linspace(-3, 3, 1000).reshape(-1, 1)\nfor n_neighbors, ax in zip([1, 3, 9], axes):\n # make predictions using 1, 3, or 9 neighbors\n reg = KNeighborsRegressor(n_neighbors=n_neighbors)\n reg.fit(X_train, y_train)\n ax.plot(line, reg.predict(line))\n ax.plot(X_train, y_train, '^', c=mglearn.cm2(0), markersize=8)\n ax.plot(X_test, y_test, 'v', c=mglearn.cm2(1), markersize=8)\n\n ax.set_title(\n \"{} neighbor(s)\\n train score: {:.2f} test score: {:.2f}\".format(\n n_neighbors, reg.score(X_train, y_train),\n reg.score(X_test, y_test)))\n ax.set_xlabel(\"Feature\")\n ax.set_ylabel(\"Target\")\naxes[0].legend([\"Model predictions\", \"Training data/target\",\n \"Test data/target\"], loc=\"best\")",
"_____no_output_____"
]
],
[
[
"**Strength, weaknesses, and parameters**\n\nParameters\n* number of neighbors\n* distance measure\n\nStrengths\n* Easy to understand\n* quick to implement\n* good baseline method\n\nWeaknesses\n* can be slow with large datasets\n* does not perform will with more than 100 features\n* does not perform well with sparse datasets",
"_____no_output_____"
],
[
"# 2.3.3 Linear Models\n\nLinear models \\[IMLP, p. 47\\] using make prediction by using a linear model of the input features\n\nLinear model: y = w\\[0\\] * x\\[0\\] + w\\[1\\] * x\\[1\\] + ... + w\\[p\\] * x\\[p\\] + b\n\nwhere the w\\[i\\] and b are learned and the x\\[i\\] are features.\n\nThe w\\[i\\] can be thought of as weights.\n\n",
"_____no_output_____"
]
],
[
[
"mglearn.plots.plot_linear_regression_wave()",
"w[0]: 0.393906 b: -0.031804\n"
]
],
[
[
"**Linear Models for Regression**\n* linear regression (ordinary least squares or OLS)\n* ridge regression\n* lasso \n\n**Linear Regression or OLS**",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LinearRegression\n# A small data set\nX, y = mglearn.datasets.make_wave(n_samples=60)\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n\nlr = LinearRegression().fit(X_train, y_train)",
"_____no_output_____"
],
[
"print(\"lr.coef_:\", lr.coef_)\nprint(\"lr.intercept_:\", lr.intercept_)",
"lr.coef_: [0.394]\nlr.intercept_: -0.031804343026759746\n"
],
[
"print(\"Training set score: {:.2f}\".format(lr.score(X_train, y_train)))\nprint(\"Test set score: {:.2f}\".format(lr.score(X_test, y_test)))",
"Training set score: 0.67\nTest set score: 0.66\n"
],
[
"# Large data set\nX, y = mglearn.datasets.load_extended_boston()\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\nlr = LinearRegression().fit(X_train, y_train)",
"_____no_output_____"
],
[
"print(f'The Boston Housing Data has length {len(X_train)}.')\nprint(\"Training set score: {:.2f}\".format(lr.score(X_train, y_train)))\nprint(\"Test set score: {:.2f}\".format(lr.score(X_test, y_test)))",
"The Boston Housing Data has length 379.\nTraining set score: 0.95\nTest set score: 0.61\n"
]
],
[
[
"The discrepancy here is a sign of overfitting. We need a model that will allow us to control complexity.\n\nRidge regression is a common alternative to ordinary linear regression.\nIn ridge regression the weights are chosen so that the magnitude of the coefficients is as small as possible. This is an example of regularization. More specifically L2 regularization.\n\nTo chase down more technical details see https://en.wikipedia.org/wiki/Ridge_regression and https://en.wikipedia.org/wiki/Tikhonov_regularization.\n\n**Ridge Regression**",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import Ridge\n\nridge = Ridge().fit(X_train, y_train)\nprint(\"Training set score: {:.2f}\".format(ridge.score(X_train, y_train)))\nprint(\"Test set score: {:.2f}\".format(ridge.score(X_test, y_test)))",
"Training set score: 0.89\nTest set score: 0.75\n"
],
[
"ridge10 = Ridge(alpha=10).fit(X_train, y_train)\nprint(\"Training set score: {:.2f}\".format(ridge10.score(X_train, y_train)))\nprint(\"Test set score: {:.2f}\".format(ridge10.score(X_test, y_test)))",
"Training set score: 0.79\nTest set score: 0.64\n"
],
[
"ridge01 = Ridge(alpha=0.1).fit(X_train, y_train)\nprint(\"Training set score: {:.2f}\".format(ridge01.score(X_train, y_train)))\nprint(\"Test set score: {:.2f}\".format(ridge01.score(X_test, y_test)))",
"Training set score: 0.93\nTest set score: 0.77\n"
],
[
"plt.plot(ridge.coef_, 's', label=\"Ridge alpha=1\")\nplt.plot(ridge10.coef_, '^', label=\"Ridge alpha=10\")\nplt.plot(ridge01.coef_, 'v', label=\"Ridge alpha=0.1\")\n\nplt.plot(lr.coef_, 'o', label=\"LinearRegression\")\nplt.xlabel(\"Coefficient index\")\nplt.ylabel(\"Coefficient magnitude\")\nxlims = plt.xlim()\nplt.hlines(0, xlims[0], xlims[1])\nplt.xlim(xlims)\nplt.ylim(-25, 25)\nplt.legend()",
"_____no_output_____"
],
[
"mglearn.plots.plot_ridge_n_samples()",
"_____no_output_____"
]
],
[
[
"##### **Lasso**\n\nStart with Lasso (linear regression with L1 regularization)\n\\[IMLP, p. 55\\] in the book and In \\[36\\] \nhttps://github.com/amueller/introduction_to_ml_with_python/blob/master/02-supervised-learning.ipynb\n\nIn statistics and machine learning, lasso (least absolute shrinkage and selection operator; also Lasso or LASSO) is a regression analysis method that performs both variable selection and regularization in order to enhance the prediction accuracy and interpretability of the resulting statistical model. It was originally introduced in geophysics,and later by Robert Tibshirani, who coined the term.\n\nSee https://en.wikipedia.org/wiki/Lasso_(statistics) for more information.\n\nOriginal paper on [Lasso](https://www.jstor.org/stable/2346178?seq=1#metadata_info_tab_contents).\n",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import Lasso\n\nlasso = Lasso().fit(X_train, y_train)\nprint(\"Training set score: {:.2f}\".format(lasso.score(X_train, y_train)))\nprint(\"Test set score: {:.2f}\".format(lasso.score(X_test, y_test)))\nprint(\"Number of features used:\", np.sum(lasso.coef_ != 0))",
"Training set score: 0.29\nTest set score: 0.21\nNumber of features used: 4\n"
],
[
"# we increase the default setting of \"max_iter\",\n# otherwise the model would warn us that we should increase max_iter.\nlasso001 = Lasso(alpha=0.01, max_iter=100000).fit(X_train, y_train)\nprint(\"Training set score: {:.2f}\".format(lasso001.score(X_train, y_train)))\nprint(\"Test set score: {:.2f}\".format(lasso001.score(X_test, y_test)))\nprint(\"Number of features used:\", np.sum(lasso001.coef_ != 0))",
"Training set score: 0.90\nTest set score: 0.77\nNumber of features used: 33\n"
],
[
"# We try setting alpha lower. In this case we remove the effect of regularization and\n# achieve a result simliar to linear regression.\nlasso00001 = Lasso(alpha=0.0001, max_iter=100000).fit(X_train, y_train)\nprint(\"Training set score: {:.2f}\".format(lasso00001.score(X_train, y_train)))\nprint(\"Test set score: {:.2f}\".format(lasso00001.score(X_test, y_test)))\nprint(\"Number of features used:\", np.sum(lasso00001.coef_ != 0))",
"Training set score: 0.95\nTest set score: 0.64\nNumber of features used: 96\n"
],
[
"plt.plot(lasso.coef_, 's', label=\"Lasso alpha=1\")\nplt.plot(lasso001.coef_, '^', label=\"Lasso alpha=0.01\")\nplt.plot(lasso00001.coef_, 'v', label=\"Lasso alpha=0.0001\")\n\nplt.plot(ridge01.coef_, 'o', label=\"Ridge alpha=0.1\")\nplt.legend(ncol=2, loc=(0, 1.05))\nplt.ylim(-25, 25)\nplt.xlabel(\"Coefficient index\")\nplt.ylabel(\"Coefficient magnitude\")",
"_____no_output_____"
]
],
[
[
"The ElasticNet class of sckkit-leaern combines the penalties of Lasso and Ridge and works best in practice. But then L1 and L2 regularization must both be trained.\n\nLogistic Regression: https://en.wikipedia.org/wiki/Logistic_regression\n\nLinear Support Vector Machine: Found here https://en.wikipedia.org/wiki/Support-vector_machine",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC\n\nX, y = mglearn.datasets.make_forge()\n\nfig, axes = plt.subplots(1, 2, figsize=(10, 3))\n\nfor model, ax in zip([LinearSVC(), LogisticRegression()], axes):\n clf = model.fit(X, y)\n mglearn.plots.plot_2d_separator(clf, X, fill=False, eps=0.5,\n ax=ax, alpha=.7)\n mglearn.discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)\n ax.set_title(clf.__class__.__name__)\n ax.set_xlabel(\"Feature 0\")\n ax.set_ylabel(\"Feature 1\")\naxes[0].legend()",
"C:\\Users\\Alex\\Anaconda3\\lib\\site-packages\\sklearn\\utils\\deprecation.py:87: FutureWarning: Function make_blobs is deprecated; Please import make_blobs directly from scikit-learn\n warnings.warn(msg, category=FutureWarning)\nC:\\Users\\Alex\\Anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\n"
],
[
"mglearn.plots.plot_linear_svc_regularization()",
"_____no_output_____"
]
],
[
[
"Start with Linear models for classification\n\\[IMLP, p. 61\\] in the book and In \\[42\\] \nhttps://github.com/amueller/introduction_to_ml_with_python/blob/master/02-supervised-learning.ipynb",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_breast_cancer\ncancer = load_breast_cancer()\nX_train, X_test, y_train, y_test = train_test_split(\n cancer.data, cancer.target, stratify=cancer.target, random_state=42)\nlogreg = LogisticRegression(C=1).fit(X_train, y_train)\n# print(\"Training set score: {:.3f}\".format(logreg.score(X_train, y_train)))\nprint(f\"Training set score: {logreg.score(X_train, y_train):.3f}\")\nprint(f\"Test set score: {logreg.score(X_test, y_test):.3f}\")",
"Training set score: 0.948\nTest set score: 0.958\n"
]
],
[
[
"Regularization mean restricting a model to avoid overfitting. The parameter C determines the strength of the regularization. A higber value of C corresponds to less regularization.",
"_____no_output_____"
]
],
[
[
"logreg100 = LogisticRegression(C=100).fit(X_train, y_train)\nprint(\"Training set score: {:.3f}\".format(logreg100.score(X_train, y_train)))\nprint(\"Test set score: {:.3f}\".format(logreg100.score(X_test, y_test)))",
"Training set score: 0.958\nTest set score: 0.965\n"
],
[
"logreg001 = LogisticRegression(C=0.01).fit(X_train, y_train)\nprint(\"Training set score: {:.3f}\".format(logreg001.score(X_train, y_train)))\nprint(\"Test set score: {:.3f}\".format(logreg001.score(X_test, y_test)))",
"_____no_output_____"
],
[
"plt.plot(logreg.coef_.T, 'o', label=\"C=1\")\nplt.plot(logreg100.coef_.T, '^', label=\"C=100\")\nplt.plot(logreg001.coef_.T, 'v', label=\"C=0.001\")\nplt.xticks(range(cancer.data.shape[1]), cancer.feature_names, rotation=90)\nxlims = plt.xlim()\nplt.hlines(0, xlims[0], xlims[1])\nplt.xlim(xlims)\nplt.ylim(-5, 5)\nplt.xlabel(\"Feature\")\nplt.ylabel(\"Coefficient magnitude\")\nplt.legend()",
"_____no_output_____"
],
[
"for C, marker in zip([0.001, 1, 100], ['o', '^', 'v']):\n lr_l1 = LogisticRegression(C=C, solver='liblinear', penalty=\"l1\").fit(X_train, y_train)\n print(\"Training accuracy of l1 logreg with C={:.3f}: {:.2f}\".format(\n C, lr_l1.score(X_train, y_train)))\n print(\"Test accuracy of l1 logreg with C={:.3f}: {:.2f}\".format(\n C, lr_l1.score(X_test, y_test)))\n plt.plot(lr_l1.coef_.T, marker, label=\"C={:.3f}\".format(C))\n\nplt.xticks(range(cancer.data.shape[1]), cancer.feature_names, rotation=90)\nxlims = plt.xlim()\nplt.hlines(0, xlims[0], xlims[1])\nplt.xlim(xlims)\nplt.xlabel(\"Feature\")\nplt.ylabel(\"Coefficient magnitude\")\n\nplt.ylim(-5, 5)\nplt.legend(loc=3)",
"Training accuracy of l1 logreg with C=0.001: 0.91\nTest accuracy of l1 logreg with C=0.001: 0.92\nTraining accuracy of l1 logreg with C=1.000: 0.96\nTest accuracy of l1 logreg with C=1.000: 0.96\nTraining accuracy of l1 logreg with C=100.000: 0.99\nTest accuracy of l1 logreg with C=100.000: 0.98\n"
],
[
"from sklearn.datasets import make_blobs\n\nX, y = make_blobs(random_state=42)\nmglearn.discrete_scatter(X[:, 0], X[:, 1], y)\nplt.xlabel(\"Feature 0\")\nplt.ylabel(\"Feature 1\")\nplt.legend([\"Class 0\", \"Class 1\", \"Class 2\"])",
"_____no_output_____"
],
[
"linear_svm = LinearSVC().fit(X, y)\nprint(\"Coefficient shape: \", linear_svm.coef_.shape)\nprint(\"Intercept shape: \", linear_svm.intercept_.shape)",
"_____no_output_____"
],
[
"mglearn.discrete_scatter(X[:, 0], X[:, 1], y)\nline = np.linspace(-15, 15)\nfor coef, intercept, color in zip(linear_svm.coef_, linear_svm.intercept_,\n mglearn.cm3.colors):\n plt.plot(line, -(line * coef[0] + intercept) / coef[1], c=color)\n print(f\"y = {-1 * coef[0]/coef[1]} x + {-1 * intercept/coef[1]}\")\nplt.ylim(-10, 15)\nplt.xlim(-10, 8)\nplt.xlabel(\"Feature 0\")\nplt.ylabel(\"Feature 1\")\nplt.legend(['Class 0', 'Class 1', 'Class 2', 'Line class 0', 'Line class 1',\n 'Line class 2'], loc=(1.01, 0.3))",
"y = 0.7558986267572154 x + 4.656074269147871\ny = 6.865286758984226 x + 1.8943411817408766\ny = -0.9272009606708548 x + -0.42181505094766236\n"
],
[
"\nmglearn.plots.plot_2d_classification(linear_svm, X, fill=True, alpha=.7)\nmglearn.discrete_scatter(X[:, 0], X[:, 1], y)\nline = np.linspace(-15, 15)\nfor coef, intercept, color in zip(linear_svm.coef_, linear_svm.intercept_,\n mglearn.cm3.colors):\n plt.plot(line, -(line * coef[0] + intercept) / coef[1], c=color)\nplt.legend(['Class 0', 'Class 1', 'Class 2', 'Line class 0', 'Line class 1',\n 'Line class 2'], loc=(1.01, 0.3))\nplt.xlabel(\"Feature 0\")\nplt.ylabel(\"Feature 1\")",
"_____no_output_____"
]
],
[
[
"### Strength, weaknesses, and parameters\n[IMLP, p. 69] Start with In[50] here:\nhttps://github.com/amueller/introduction_to_ml_with_python/blob/master/02-supervised-learning.ipynb\n\nIf only a few features are important use L1 regularization\nOtherwise, default should be L2 regularization.\n\nLinear models are very fast to train, fast to predict, scale to large datasets and work well with sparse data, relatively easy to understand. Highly correlation features can be it difficult to interpret models.\n\nFor very large datasets consider using the solver='sag' option in LogisticRegression or Ridge. For even more scalable version of linear models try the SGDClassifier class or the SGDRegressor class.\n",
"_____no_output_____"
]
],
[
[
"# instantiate model and fit it in one line\nlogreg = LogisticRegression().fit(X_train, y_train)",
"C:\\Users\\Alex\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)\n"
],
[
"logreg = LogisticRegression()\ny_pred = logreg.fit(X_train, y_train).predict(X_test)",
"C:\\Users\\Alex\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)\n"
],
[
"y_pred = LogisticRegression().fit(X_train, y_train).predict(X_test)",
"C:\\Users\\Alex\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)\n"
]
],
[
[
"### Naive Bayes Classifiers\n",
"_____no_output_____"
]
],
[
[
"X = np.array([[0, 1, 0, 1],\n [1, 0, 1, 1],\n [0, 0, 0, 1],\n [1, 0, 1, 0]])\ny = np.array([0, 1, 0, 1])",
"_____no_output_____"
],
[
"counts = {}\nfor label in np.unique(y):\n # iterate over each class\n # count (sum) entries of 1 per feature\n counts[label] = X[y == label].sum(axis=0)\nprint(\"Feature counts:\\n\", counts)",
"Feature counts:\n {0: array([0, 1, 0, 2]), 1: array([2, 0, 2, 1])}\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
cb73d94554f4a2816161d00953424e7c2def5866 | 462,254 | ipynb | Jupyter Notebook | notebooks/03_aoe2_civgen.ipynb | lukexyz/insultswordfight | 3e1955c241874352bbd7324adca7bf7a52ba792d | [
"Apache-2.0"
]
| 7 | 2021-10-31T15:30:46.000Z | 2022-03-18T11:40:35.000Z | notebooks/03_aoe2_civgen.ipynb | lukexyz/insultswordfight | 3e1955c241874352bbd7324adca7bf7a52ba792d | [
"Apache-2.0"
]
| null | null | null | notebooks/03_aoe2_civgen.ipynb | lukexyz/insultswordfight | 3e1955c241874352bbd7324adca7bf7a52ba792d | [
"Apache-2.0"
]
| null | null | null | 1,097.990499 | 357,176 | 0.955012 | [
[
[
"# default_exp core",
"_____no_output_____"
]
],
[
[
"# Few-shot Learning with GPT-J\n\n> API details.",
"_____no_output_____"
]
],
[
[
"# export\nimport os\nimport pandas as pd",
"_____no_output_____"
],
[
"#hide\nfrom nbdev.showdoc import *\nimport toml\ns = toml.load(\"../.streamlit/secrets.toml\", _dict=dict)",
"_____no_output_____"
]
],
[
[
"Using `GPT_J` model API from [Nlpcloud](https://nlpcloud.io/home/token)",
"_____no_output_____"
]
],
[
[
"import nlpcloud\nclient = nlpcloud.Client(\"gpt-j\", s['nlpcloud_token'], gpu=True)",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"## Aoe2 Civ Builder\nhttps://ageofempires.fandom.com/wiki/Civilizations_(Age_of_Empires_II)",
"_____no_output_____"
]
],
[
[
"# example API call\ngeneration = client.generation(\"\"\"Civilisation: Britons\nSpecialty: Foot archers\nUnique unit: Longbowman\nUnique technologies: Yeomen (+1 range for foot archers and +2 attack for towers)\nUnique technologies: Warwolf (Trebuchets do blast damage)\nWonder: Chichester Cathedral\nCivilization bonuses: Shepherds work 25% faster.\nTeam bonus: Town Centers cost -50% wood (starting in the Castle Age).\n###\nCivilisation: Mongols\nSpecialty: Cavalry archers\nUnique unit: Mangudai\nUnique technologies: Nomads (Houses retain population when destroyed)\nUnique technologies: Drill (Siege Workshop units move 50% faster)\nWonder: Great Tent of Genghis Khan\nCivilization bonuses: Hunters work 40% faster.\nTeam bonus: The Scout Cavalry line has +2 Line of Sight.\n### \nCivilisation: Celts\nSpecialty: Infantry and siege weapons\nUnique unit: Woad Raider\nUnique technologies: Stronghold (Castles and towers fire 25% faster)\nUnique technologies: Furor Celtica (Siege Workshop units have +40% HP)\nWonder: Rock of Cashel\nCivilization bonuses: Infantry units move 15% faster (starting in the Feudal Age).\nCivilization bonuses: Lumberjacks work 15% faster.\nCivilization bonuses: Siege weapons fire 25% faster.\nCivilization bonuses: Enemy herdables can be converted regardless of enemy units next to them.\nTeam bonus: Siege Workshops work 20% faster.\n###\nCivilisation: New Zealand Maori\"\"\",\n max_length=250,\n length_no_input=True,\n end_sequence=\"###\",\n remove_input=True)\n\nprint('Civilisation: New Zealand Maori\\n ', generation[\"generated_text\"])",
"Civilisation: New Zealand Maori\n \nSpecialty: Armoured archers with pikes\nUnique unit: Warrior\nUnique technologies: Weapons (Archers do 80% damage to pikemen)\nWonder: Rangia\nCivilization bonuses: Archers do 80% damage to pikemen\nTeam bonuses: Enemies of archers cannot use siege techniques.\n###\n"
],
[
"\ndef create_input_string(civname):\n return f\"\"\"Civilisation: Britons\nSpecialty: Foot archers\nUnique unit: Longbowman\nUnique technologies: Yeomen (+1 range for foot archers and +2 attack for towers)\nUnique technologies: Warwolf (Trebuchets do blast damage)\nWonder: Chichester Cathedral\nCivilization bonuses: Shepherds work 25% faster.\nTeam bonus: Town Centers cost -50% wood (starting in the Castle Age).\n###\nCivilisation: Mongols\nSpecialty: Cavalry archers\nUnique unit: Mangudai\nUnique technologies: Nomads (Houses retain population when destroyed)\nUnique technologies: Drill (Siege Workshop units move 50% faster)\nWonder: Great Tent of Genghis Khan\nCivilization bonuses: Hunters work 40% faster.\nTeam bonus: The Scout Cavalry line has +2 Line of Sight.\n### \nCivilisation: Celts\nSpecialty: Infantry and siege weapons\nUnique unit: Woad Raider\nUnique technologies: Stronghold (Castles and towers fire 25% faster)\nUnique technologies: Furor Celtica (Siege Workshop units have +40% HP)\nWonder: Rock of Cashel\nCivilization bonuses: Infantry units move 15% faster (starting in the Feudal Age).\nCivilization bonuses: Lumberjacks work 15% faster.\nCivilization bonuses: Siege weapons fire 25% faster.\nCivilization bonuses: Enemy herdables can be converted regardless of enemy units next to them.\nTeam bonus: Siege Workshops work 20% faster.\n###\nCivilisation: {civname}\"\"\"\n\n\ndef generate_civ(civname, client):\n \"\"\"\n Creates input string and sends to nlpcloud for few-shot learning\n \"\"\"\n print(f'🌐 Generating New Civ: {civname} \\n')\n\n input_str = create_input_string(civname)\n\n generation = client.generation(input_str,\n max_length=250,\n length_no_input=True,\n end_sequence='###',\n remove_input=True)\n civgen = generation[\"generated_text\"].strip('\\n')\n \n print(f\"🛡️ **{civname}**\\n{civgen}\")\n \n return civgen\n",
"_____no_output_____"
],
[
"c = generate_civ(civname='New Zealand Maori', client=client)",
"Generating New Civ: New Zealand Maori \n\n🛡️ New Zealand Maori/nSpecialty: Maori archers\nUnique unit: Nga Moeroa\nUnique technologies: Ngati Waarawake\nUnique technologies: Taniwha\nWonder: Hei-O-Te-Po\nCivilization bonuses: Maori units are 15% better off water.\nCivilisation bonuses: Maori workers do 20% better than their European counterparts.\nCivilization bonuses: Maori archers do 50% better damage with arrows.\nTeam bonus: Maori units are at their best on dry land.\n###\n"
],
[
"c = generate_civ(civname='Fijians', client=client)",
"🌐 Generating New Civ: Fijians \n\n🛡️ **Fijians**\nSpecialty: Specialized attack, long range combat, and shipbuilding\nUnique unit: War canoe\nUnique technologies: War canoes (+2 attack, +20% range), Shipwrights (+70% build ship time)\nWonder: Tanna or Makatea Islands\nCivilization bonuses: Taino can fight from land and sea. Taino can build shipwrights at +70% of normal rate.\nTeam bonus: Taino build shipwrights at +75% of normal rate. - +10% of time at sea to build ships.\n###\n"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"c = generate_civ(civname='Canadians', client=client)",
"🌐 Generating New Civ: Canadians \n\n🛡️ **Canadians**\nSpecialty: Infantry archers\nUnique unit: Longbowman\nUnique technologies: Native American\nWonder: Champlain's Memorial\nCivilization bonuses: Wood cutters work 30% faster.\nTeam bonus: Native Americans can live in empty houses.\nCivilization bonuses: Infantry units move +2 Lines of Sight.\nCivilization bonuses: All buildings built in the Plains are built as Stone.\nCivilization bonuses: Hauler unit costs -15% wood.\nCivilization bonus: Woodcutters build Walls for free\nCivilization bonuses: Cavalry and Lumberjacks work 50% faster.\nTeam bonus: Cavalry cost -15% wood.\nCivilization bonuses: Cavalry cost 25% less Gold (starting in the Feudal Age).\nCivilization bonuses: Herdables have -20% Movement.\nCivilization bonuses: Lumberjacks build Walls for free (starting in the Feudal Age)\nCivilization bonuses: Can convert enemy herdables to Wood.\nCivilization bonuses: Castles cost -30% Gold.\n###\n"
],
[
"c = generate_civ(civname='European Union', client=client)",
"🌐 Generating New Civ: European Union \n\n🛡️ **European Union**\nSpecialty: Infantry, artillery and cavalry\nUnique unit: Arbalet\nUnique technologies: Feudalism (+1 population for peasants to work fields)\nWonder: Chartres Cathedral\nCivilization bonuses: Woodcutters work 15% faster.\nCivilization bonuses: Siege Workshops do +20% damage.\nCivilization bonuses: Infantry units move 20% faster.\nCivilization bonuses: Artillery units do +20% Damage.\nTeam Bonus: All cavalry units have +20% movement.\n###\n"
],
[
"c = generate_civ(civname='Dutch', client=client)",
"🌐 Generating New Civ: Dutch \n\n🛡️ **Dutch**\nSpecialty: Infantry and siege\nUnique unit: Fluiters (Musketmen +4 range)\nUnique technologies: Watermolen (The Water Mill produces food when it is destroyed)\nUnique technologies: Fletemen (Musketmen +4 attack range)\nUltimate technology: Hoist (Lowered by -5 techcost when next to a catapult)\nMaster technology: Fluiters (Musketmen +4 range)\nWonder: Watermolen\nCivilization bonuses: Wood-Cutters and Carpenters work 45% faster.\nTeam bonus: Siege works cost -50% logs.\n###\n"
],
[
"c = generate_civ(civname='Star Wars Death Star', client=client)",
"🌐 Generating New Civ: Star Wars Death Star \n\n🛡️ **Star Wars Death Star**\nSpecialty: Rocket launchers and star destroyers\nUnique unit: Heavy weapons teams\nUnique technologies: Trench defenses (Rocket turrets have +40% HP)\nUnique technologies: Heavy weapons teams (Rocket turrets and star destroyers fire 75% faster)\nWonder: X-Wing starfighters\nCivilization bonuses: Heavy weapons teams have +10% Accuracy.\n#Civilization: Wurrrgh\nSpecialty: The space dog.\nUnique unit: Wurrrrh\nHoardable: Wurrrh (The space dog always has a horde of space hamsters with it.)\nUnits: Space hamsters\nTeam bonus: Space hamsters are 10% more accurate for all attacks.\nSpecialist: Space hamsters\n###\n"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
]
|
cb73dd6467e35cf2955dcca63042135079da06d0 | 17,388 | ipynb | Jupyter Notebook | notebooks/synthetic-images/synthetic-images.ipynb | eblur/tutorials | ad71c896cab58e1a2fc4d4483a73f1e9c190e870 | [
"BSD-3-Clause"
]
| 2 | 2018-12-16T09:18:40.000Z | 2019-06-25T18:30:02.000Z | tutorials/notebooks/synthetic-images/synthetic-images.ipynb | BurcuAkbulut/astropy-tutorials | 486d82e393152196e20ad5a961c3d94ce720805e | [
"BSD-3-Clause"
]
| 3 | 2015-05-20T21:28:51.000Z | 2015-05-20T21:29:02.000Z | tutorials/notebooks/synthetic-images/synthetic-images.ipynb | BurcuAkbulut/astropy-tutorials | 486d82e393152196e20ad5a961c3d94ce720805e | [
"BSD-3-Clause"
]
| 1 | 2020-05-18T14:55:43.000Z | 2020-05-18T14:55:43.000Z | 27.954984 | 391 | 0.578272 | [
[
[
"# Synthetic Images from simulated data\n\n## Authors\nYi-Hao Chen, Sebastian Heinz, Kelle Cruz, Stephanie T. Douglas\n\n## Learning Goals\n\n- Assign WCS astrometry to an image using ```astropy.wcs``` \n- Construct a PSF using ```astropy.modeling.model```\n- Convolve raw data with PSF using ```astropy.convolution```\n- Calculate polarization fraction and angle from Stokes I, Q, U data\n- Overplot quivers on the image\n\n## Keywords\nmodeling, convolution, coordinates, WCS, FITS, radio astronomy, matplotlib, colorbar\n\n## Summary\nIn this tutorial, we will:\n\n[1. Load and examine the FITS file](#1.-Load-and-examine-the-FITS-file)\n\n[2. Set up astrometry coordinates](#2.-Set-up-astrometry-coordinates)\n\n[3. Prepare a Point Spread Function (PSF)](#3.-Prepare-a-Point-Spread-Function-(PSF))\n\n>[3.a How to do this without astropy kernels](#3.a-How-to-do-this-without-astropy-kernels)\n\n[4. Convolve image with PSF](#4.-Convolve-image-with-PSF)\n\n[5. Convolve Stokes Q and U images](#5.-Convolve-Stokes-Q-and-U-images)\n\n[6. Calculate polarization angle and fraction for quiver plot](#6.-Calculate-polarization-angle-and-fraction-for-quiver-plot)",
"_____no_output_____"
]
],
[
[
"from astropy.utils.data import download_file\nfrom astropy.io import fits\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.wcs import WCS\n\nfrom astropy.convolution import Gaussian2DKernel\nfrom astropy.modeling.models import Lorentz1D\nfrom astropy.convolution import convolve_fft\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## 1. Load and examine the FITS file\n\nHere we begin with a 2-dimensional data that were stored in FITS format from some simulations. We have Stokes I, Q, and U maps. We we'll first load a FITS file and examine the header.",
"_____no_output_____"
]
],
[
[
"file_i = download_file(\n 'http://data.astropy.org/tutorials/synthetic-images/synchrotron_i_lobe_0700_150MHz_sm.fits', \n cache=True)\nhdulist = fits.open(file_i)\nhdulist.info()\n\nhdu = hdulist['NN_EMISSIVITY_I_LOBE_150.0MHZ']\nhdu.header",
"_____no_output_____"
]
],
[
[
"We can see this FITS file, which was created in [yt](https://yt-project.org/), has x and y coordinate in physical units (cm). We want to convert it into sky coordinates. Before we proceed, let's find out the range of the data and plot a histogram. ",
"_____no_output_____"
]
],
[
[
"print(hdu.data.max())\nprint(hdu.data.min())\nnp.seterr(divide='ignore') #suppress the warnings raised by taking log10 of data with zeros\nplt.hist(np.log10(hdu.data.flatten()), range=(-3, 2), bins=100);",
"_____no_output_____"
]
],
[
[
"Once we know the range of the data, we can do a visualization with the proper range (```vmin``` and ```vmax```).",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(6,12))\nfig.add_subplot(111)\n\n# We plot it in log-scale and add a small number to avoid nan values. \nplt.imshow(np.log10(hdu.data+1E-3), vmin=-1, vmax=1, origin='lower')",
"_____no_output_____"
]
],
[
[
"## 2. Set up astrometry coordinates\n\nFrom the header, we know that the x and y axes are in centimeter. However, in an observation we usually have RA and Dec. To convert physical units to sky coordinates, we will need to make some assumptions about where the object is located, i.e. the distance to the object and the central RA and Dec. ",
"_____no_output_____"
]
],
[
[
"# distance to the object\ndist_obj = 200*u.Mpc\n\n# We have the RA in hh:mm:ss and DEC in dd:mm:ss format. \n# We will use Skycoord to convert them into degrees later.\nra_obj = '19h59m28.3566s'\ndec_obj = '+40d44m02.096s'",
"_____no_output_____"
]
],
[
[
"Here we convert the pixel scale from cm to degree by dividing the distance to the object.",
"_____no_output_____"
]
],
[
[
"cdelt1 = ((hdu.header['CDELT1']*u.cm/dist_obj.to('cm'))*u.rad).to('deg')\ncdelt2 = ((hdu.header['CDELT2']*u.cm/dist_obj.to('cm'))*u.rad).to('deg')\nprint(cdelt1, cdelt2)",
"_____no_output_____"
]
],
[
[
"Use ```astropy.wcs.WCS``` to prepare a FITS header.",
"_____no_output_____"
]
],
[
[
"w = WCS(naxis=2)\n\n# reference pixel coordinate\nw.wcs.crpix = [hdu.data.shape[0]/2,hdu.data.shape[1]/2]\n\n# sizes of the pixel in degrees\nw.wcs.cdelt = [-cdelt1.base, cdelt2.base]\n\n# converting ra and dec into degrees\nc = SkyCoord(ra_obj, dec_obj)\nw.wcs.crval = [c.ra.deg, c.dec.deg]\n\n# the units of the axes are in degrees\nw.wcs.cunit = ['deg', 'deg']",
"_____no_output_____"
]
],
[
[
"Now we can convert the WCS coordinate into header and update the hdu.",
"_____no_output_____"
]
],
[
[
"wcs_header = w.to_header()\nhdu.header.update(wcs_header)",
"_____no_output_____"
]
],
[
[
"Let's take a look at the header. ```CDELT1```, ```CDELT2```, ```CUNIT1```, ```CUNIT2```, ```CRVAL1```, and ```CRVAL2``` are in sky coordinates now.",
"_____no_output_____"
]
],
[
[
"hdu.header",
"_____no_output_____"
],
[
"wcs = WCS(hdu.header)\n\nfig = plt.figure(figsize=(6,12))\nfig.add_subplot(111, projection=wcs)\nplt.imshow(np.log10(hdu.data+1e-3), vmin=-1, vmax=1, origin='lower')\nplt.xlabel('RA')\nplt.ylabel('Dec')",
"_____no_output_____"
]
],
[
[
"Now we have the sky coordinate for the image!",
"_____no_output_____"
],
[
"## 3. Prepare a Point Spread Function (PSF)",
"_____no_output_____"
],
[
"Simple PSFs are included in ```astropy.convolution.kernel```. We'll use ```astropy.convolution.Gaussian2DKernel``` here.\nFirst we need to set the telescope resolution. For a 2D Gaussian, we can calculate sigma in pixels by using our pixel scale keyword ```cdelt2``` from above.",
"_____no_output_____"
]
],
[
[
"# assume our telescope has 1 arcsecond resolution\ntelescope_resolution = 1*u.arcsecond\n\n# calculate the sigma in pixels. \n# since cdelt is in degrees, we use _.to('deg')\nsigma = telescope_resolution.to('deg')/cdelt2",
"_____no_output_____"
],
[
"# By default, the Gaussian kernel will go to 4 sigma\n# in each direction\npsf = Gaussian2DKernel(sigma)\n\n# let's take a look:\nplt.imshow(psf.array.value)",
"_____no_output_____"
]
],
[
[
"## 3.a How to do this without astropy kernels",
"_____no_output_____"
],
[
"Maybe your PSF is more complicated. Here's an alternative way to do this, using a 2D Lorentzian",
"_____no_output_____"
]
],
[
[
"# set FWHM and psf grid\ntelescope_resolution = 1*u.arcsecond\ngamma = telescope_resolution.to('deg')/cdelt2\nx_grid = np.outer(np.linspace(-gamma*4,gamma*4,int(8*gamma)),np.ones(int(8*gamma)))\nr_grid = np.sqrt(x_grid**2 + np.transpose(x_grid**2))\nlorentzian = Lorentz1D(fwhm=2*gamma)\n\n# extrude a 2D azimuthally symmetric PSF\nlorentzian_psf = lorentzian(r_grid)\n\n# normalization\nlorentzian_psf /= np.sum(lorentzian_psf)\n\n# let's take a look again:\nplt.imshow(lorentzian_psf.value, interpolation='none')",
"_____no_output_____"
]
],
[
[
"## 4. Convolve image with PSF",
"_____no_output_____"
],
[
"Here we use ```astropy.convolution.convolve_fft``` to convolve image. This routine uses fourier transform for faster calculation. Especially since our data is $2^n$ sized, which makes it particually fast. Using a fft, however, causes boundary effects. We'll need to specify how we want to handle the boundary. Here we choose to \"wrap\" the data, which means making the data periodic. ",
"_____no_output_____"
]
],
[
[
"convolved_image = convolve_fft(hdu.data, psf, boundary='wrap')",
"_____no_output_____"
],
[
"# Put a psf at the corner of the image\ndelta_x_psf=100 # number of pixels from the edges\nxmin, xmax = -psf.shape[1]-delta_x_psf, -delta_x_psf\nymin, ymax = delta_x_psf, delta_x_psf+psf.shape[0]\nconvolved_image[xmin:xmax, ymin:ymax] = psf.array/psf.array.max()*10",
"_____no_output_____"
]
],
[
[
"Now let's take a look at the convolved image.",
"_____no_output_____"
]
],
[
[
"wcs = WCS(hdu.header)\nfig = plt.figure(figsize=(8,12))\ni_plot = fig.add_subplot(111, projection=wcs)\nplt.imshow(np.log10(convolved_image+1e-3), vmin=-1, vmax=1.0, origin='lower')#, cmap=plt.cm.viridis)\nplt.xlabel('RA')\nplt.ylabel('Dec')\nplt.colorbar()",
"_____no_output_____"
]
],
[
[
"## 5. Convolve Stokes Q and U images",
"_____no_output_____"
]
],
[
[
"hdulist.info()",
"_____no_output_____"
],
[
"file_q = download_file(\n 'http://data.astropy.org/tutorials/synthetic-images/synchrotron_q_lobe_0700_150MHz_sm.fits', \n cache=True)\nhdulist = fits.open(file_q)\nhdu_q = hdulist['NN_EMISSIVITY_Q_LOBE_150.0MHZ']\n\nfile_u = download_file(\n 'http://data.astropy.org/tutorials/synthetic-images/synchrotron_u_lobe_0700_150MHz_sm.fits', \n cache=True)\nhdulist = fits.open(file_u)\nhdu_u = hdulist['NN_EMISSIVITY_U_LOBE_150.0MHZ']\n\n# Update the header with the wcs_header we created earlier\nhdu_q.header.update(wcs_header)\nhdu_u.header.update(wcs_header)\n\n# Convolve the images with the the psf\nconvolved_image_q = convolve_fft(hdu_q.data, psf, boundary='wrap')\nconvolved_image_u = convolve_fft(hdu_u.data, psf, boundary='wrap')",
"_____no_output_____"
]
],
[
[
"Let's plot the Q and U images.",
"_____no_output_____"
]
],
[
[
"wcs = WCS(hdu.header)\nfig = plt.figure(figsize=(16,12))\nfig.add_subplot(121, projection=wcs)\nplt.imshow(convolved_image_q, cmap='seismic', vmin=-0.5, vmax=0.5, origin='lower')#, cmap=plt.cm.viridis)\nplt.xlabel('RA')\nplt.ylabel('Dec')\nplt.colorbar()\n\nfig.add_subplot(122, projection=wcs)\nplt.imshow(convolved_image_u, cmap='seismic', vmin=-0.5, vmax=0.5, origin='lower')#, cmap=plt.cm.viridis)\n\nplt.xlabel('RA')\nplt.ylabel('Dec')\nplt.colorbar()",
"_____no_output_____"
]
],
[
[
"## 6. Calculate polarization angle and fraction for quiver plot ",
"_____no_output_____"
],
[
"Note that rotating Stokes Q and I maps requires changing signs of both. Here we assume that the Stokes q and u maps were calculated defining the y/declination axis as vertical, such that Q is positive for polarization vectors along the x/right-ascention axis.",
"_____no_output_____"
]
],
[
[
"# First, we plot the background image\nfig = plt.figure(figsize=(8,16))\ni_plot = fig.add_subplot(111, projection=wcs)\ni_plot.imshow(np.log10(convolved_image+1e-3), vmin=-1, vmax=1, origin='lower')\n\n# ranges of the axis\nxx0, xx1 = i_plot.get_xlim()\nyy0, yy1 = i_plot.get_ylim()\n\n# binning factor\nfactor = [64, 66]\n\n# re-binned number of points in each axis\nnx_new = convolved_image.shape[1] // factor[0]\nny_new = convolved_image.shape[0] // factor[1]\n\n# These are the positions of the quivers\nX,Y = np.meshgrid(np.linspace(xx0,xx1,nx_new,endpoint=True),\n np.linspace(yy0,yy1,ny_new,endpoint=True))\n\n# bin the data\nI_bin = convolved_image.reshape(nx_new, factor[0], ny_new, factor[1]).sum(3).sum(1)\nQ_bin = convolved_image_q.reshape(nx_new, factor[0], ny_new, factor[1]).sum(3).sum(1)\nU_bin = convolved_image_u.reshape(nx_new, factor[0], ny_new, factor[1]).sum(3).sum(1)\n\n# polarization angle\npsi = 0.5*np.arctan2(U_bin, Q_bin)\n\n# polarization fraction\nfrac = np.sqrt(Q_bin**2+U_bin**2)/I_bin\n\n# mask for low signal area\nmask = I_bin < 0.1\n\nfrac[mask] = 0\npsi[mask] = 0\n\npixX = frac*np.cos(psi) # X-vector \npixY = frac*np.sin(psi) # Y-vector\n\n# keyword arguments for quiverplots\nquiveropts = dict(headlength=0, headwidth=1, pivot='middle')\ni_plot.quiver(X, Y, pixX, pixY, scale=8, **quiveropts)",
"_____no_output_____"
]
],
[
[
"## Exercise",
"_____no_output_____"
],
[
"### Convert the units of the data from Jy/arcsec^2 to Jy/beam",
"_____no_output_____"
],
[
"The intensity of the data is given in unit of Jy/arcsec^2. Observational data usually have the intensity unit in Jy/beam. Assuming a beam size or take the psf we created earlier, you can convert the data into Jy/beam.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
]
|
cb73e8221834f7deafdb52347ff8a55a2f870f75 | 161,862 | ipynb | Jupyter Notebook | Python_Stock/Candlestick_Patterns/Candlestick_Upside_Gap_Two_Crows.ipynb | chunsj/Stock_Analysis_For_Quant | 5f28ef9537885a695245d26f3010592a29d45a34 | [
"MIT"
]
| 962 | 2019-07-17T09:57:41.000Z | 2022-03-29T01:55:20.000Z | Python_Stock/Candlestick_Patterns/Candlestick_Upside_Gap_Two_Crows.ipynb | chunsj/Stock_Analysis_For_Quant | 5f28ef9537885a695245d26f3010592a29d45a34 | [
"MIT"
]
| 5 | 2020-04-29T16:54:30.000Z | 2022-02-10T02:57:30.000Z | Python_Stock/Candlestick_Patterns/Candlestick_Upside_Gap_Two_Crows.ipynb | chunsj/Stock_Analysis_For_Quant | 5f28ef9537885a695245d26f3010592a29d45a34 | [
"MIT"
]
| 286 | 2019-08-04T10:37:58.000Z | 2022-03-28T06:31:56.000Z | 218.437247 | 56,305 | 0.859726 | [
[
[
"# Candlestick Upside Gap Two Crows",
"_____no_output_____"
],
[
"https://www.investopedia.com/terms/u/upside-gap-two-crows.asp",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport talib\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# yahoo finance is used to fetch data \nimport yfinance as yf\nyf.pdr_override()",
"_____no_output_____"
],
[
"# input\nsymbol = 'ICLR'\nstart = '2012-01-01'\nend = '2021-10-22'\n\n# Read data \ndf = yf.download(symbol,start,end)\n\n# View Columns\ndf.head()",
"[*********************100%***********************] 1 of 1 completed\n"
]
],
[
[
"## Candlestick with Upside Gap Two Crows",
"_____no_output_____"
]
],
[
[
"from matplotlib import dates as mdates\nimport datetime as dt\n\ndfc = df.copy()\ndfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']\n#dfc = dfc.dropna()\ndfc = dfc.reset_index()\ndfc['Date'] = pd.to_datetime(dfc['Date'])\ndfc['Date'] = dfc['Date'].apply(mdates.date2num)\ndfc.head()",
"_____no_output_____"
],
[
"from mplfinance.original_flavor import candlestick_ohlc\n\nfig = plt.figure(figsize=(14,10))\nax = plt.subplot(2, 1, 1)\ncandlestick_ohlc(ax,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)\nax.xaxis_date()\nax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))\nax.grid(True, which='both')\nax.minorticks_on()\naxv = ax.twinx()\ncolors = dfc.VolumePositive.map({True: 'g', False: 'r'})\naxv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)\naxv.axes.yaxis.set_ticklabels([])\naxv.set_ylim(0, 3*df.Volume.max())\nax.set_title('Stock '+ symbol +' Closing Price')\nax.set_ylabel('Price')",
"_____no_output_____"
],
[
"two_crows = talib.CDLUPSIDEGAP2CROWS(df['Open'], df['High'], df['Low'], df['Close'])\n\ntwo_crows = two_crows[two_crows != 0]",
"_____no_output_____"
],
[
"df['two_crows'] = talib.CDLUPSIDEGAP2CROWS(df['Open'], df['High'], df['Low'], df['Close'])",
"_____no_output_____"
],
[
"df.loc[df['two_crows'] !=0]",
"_____no_output_____"
],
[
"df['Adj Close'].loc[df['two_crows'] !=0]",
"_____no_output_____"
],
[
"df['two_crows'].loc[df['two_crows'] !=0].index",
"_____no_output_____"
],
[
"two_crows",
"_____no_output_____"
],
[
"two_crows.index",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(20,16))\nax = plt.subplot(2, 1, 1)\ncandlestick_ohlc(ax,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)\nax.xaxis_date()\nax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))\nax.grid(True, which='both')\nax.minorticks_on()\naxv = ax.twinx()\nax.plot_date(df['Adj Close'].loc[df['two_crows'] !=0].index, df['Adj Close'].loc[df['two_crows'] !=0],\n 'Dc', # marker style 'o', color 'g'\n fillstyle='none', # circle is not filled (with color)\n ms=10.0) \ncolors = dfc.VolumePositive.map({True: 'g', False: 'r'})\naxv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)\naxv.axes.yaxis.set_ticklabels([])\naxv.set_ylim(0, 3*df.Volume.max())\nax.set_title('Stock '+ symbol +' Closing Price')\nax.set_ylabel('Price')",
"_____no_output_____"
]
],
[
[
"## Plot Certain dates",
"_____no_output_____"
]
],
[
[
"df = df['2019-04-20':'2019-05-05']\ndfc = df.copy()\ndfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']\n#dfc = dfc.dropna()\ndfc = dfc.reset_index()\ndfc['Date'] = pd.to_datetime(dfc['Date'])\ndfc['Date'] = dfc['Date'].apply(mdates.date2num)\ndfc.head()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(20,16))\nax = plt.subplot(2, 1, 1)\nax.set_facecolor('white')\ncandlestick_ohlc(ax,dfc.values, width=0.5, colorup='black', colordown='red', alpha=1.0)\nax.xaxis_date()\nax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))\n#ax.grid(True, which='both')\n#ax.minorticks_on()\naxv = ax.twinx()\nax.plot_date(df['Adj Close'].loc[df['two_crows'] !=0].index, df['Adj Close'].loc[df['two_crows'] !=0],\n '*y', # marker style 'o', color 'g'\n fillstyle='none', # circle is not filled (with color)\n ms=40.0) \ncolors = dfc.VolumePositive.map({True: 'black', False: 'red'})\naxv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)\naxv.axes.yaxis.set_ticklabels([])\naxv.set_ylim(0, 3*df.Volume.max())\nax.set_title('Stock '+ symbol +' Closing Price')\nax.set_ylabel('Price')",
"_____no_output_____"
]
],
[
[
"# Highlight Candlestick",
"_____no_output_____"
]
],
[
[
"from matplotlib.dates import date2num\nfrom datetime import datetime\n\nfig = plt.figure(figsize=(20,16))\nax = plt.subplot(2, 1, 1)\ncandlestick_ohlc(ax,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)\nax.xaxis_date()\nax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))\n#ax.grid(True, which='both')\n#ax.minorticks_on()\naxv = ax.twinx()\nax.axvspan(date2num(datetime(2019,4,28)), date2num(datetime(2019,4,30)), \n label=\"Upside Gap Two Crows Bearish\",color=\"red\", alpha=0.3)\nax.legend()\ncolors = dfc.VolumePositive.map({True: 'g', False: 'r'})\naxv.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)\naxv.axes.yaxis.set_ticklabels([])\naxv.set_ylim(0, 3*df.Volume.max())\nax.set_title('Stock '+ symbol +' Closing Price')\nax.set_ylabel('Price')",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
cb73ec07a7c890869d55bb94086971e88a3faabd | 441,612 | ipynb | Jupyter Notebook | notebooks/ih-slope-analysis.ipynb | shengwanhui/Lab-Analysis | fddccbdcdc0845893c6033b421cb60b6b2d219e6 | [
"MIT"
]
| 4 | 2021-03-29T18:10:25.000Z | 2021-04-19T02:44:33.000Z | notebooks/ih-slope-analysis.ipynb | shengwanhui/Lab-Analysis | fddccbdcdc0845893c6033b421cb60b6b2d219e6 | [
"MIT"
]
| 4 | 2021-02-25T00:17:11.000Z | 2021-03-17T14:25:12.000Z | notebooks/ih-slope-analysis.ipynb | shengwanhui/Lab-Analysis | fddccbdcdc0845893c6033b421cb60b6b2d219e6 | [
"MIT"
]
| null | null | null | 537.240876 | 34,528 | 0.938641 | [
[
[
"# Slope Analysis\n\nThis project use the change of holding current slope to identify drug responders.",
"_____no_output_____"
],
[
"## Analysis Steps\n\nThe `getBaselineAndMaxDrugSlope` function smoothes the raw data by the moving window decided by `filterSize`, and analyzes the smoothed holding current in an ABF and returns baseline slope and drug slope.\n\nThe _slope of baseline_ is calculated as the linear regreasion slope during the 3 minutes period before drug onset.\n\nIn addition, the smoothed data are separated into segments which n = regressionSize data points are included. The linear regression slope is then calculated for each segment. \n\nThe _peak slope of drug_ is the most negative slope during the chosen drug period (1-5 minutes after drug onset, in this case).",
"_____no_output_____"
],
[
"## Set-Up the Environment",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\nimport sys\nsys.path.append(\"../src\")\nfrom os.path import basename\nimport slopeTools\nimport plotTools\nimport statsTools\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"## Define ABF Files and Filter Settings\n\nThe user can list the ABF files they want to analyze",
"_____no_output_____"
]
],
[
[
"#opto: \nabfFilePaths = [\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21124006.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21124013.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21124020.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21124026.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21124033.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21126007.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21126016.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21126030.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21126050.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21126056.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21218033.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21219006.abf\"\n]",
"_____no_output_____"
]
],
[
[
"#opto+l368: \nabfFilePaths = [\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21218077.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21219013.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21219039.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21219069.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21323006.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21323036.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21323047.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21325007.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-10-12 OT-ChR2/21325019.abf\"\n]",
"_____no_output_____"
],
[
"#10nM TGOT\nabfFilePaths = [\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20804007.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20804030.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20804043.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20804048.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20804060.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20804066.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20805008.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20805029.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20805035.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20811011.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20811021.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20817012.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20831011.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20831017.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/2021_05_14_DIC1_0008.abf\"\n]",
"_____no_output_____"
],
[
"\n#10nM TGOT+L368\nabfFilePaths = [\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20805041.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20805047.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20805053.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20806018.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20806036.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20811034.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20811041.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20817020.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20817026.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20817032.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20817039.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20901022.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20901035.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-28 10nM TGOT on PVT/20902011.abf\", \n \n]",
"_____no_output_____"
],
[
"#50nM TGOT\nabfFilePaths = [\n \"X:/Data/C57/TGOT on PVT/2020-07-23 50nM TGOT on PVT/20723038.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-23 50nM TGOT on PVT/20723029.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-23 50nM TGOT on PVT/20724011.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-23 50nM TGOT on PVT/20724017.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-23 50nM TGOT on PVT/20724023.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-23 50nM TGOT on PVT/20724027.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-23 50nM TGOT on PVT/20724033.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-23 50nM TGOT on PVT/20724045.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-23 50nM TGOT on PVT/2021_05_13_DIC1_0005.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-23 50nM TGOT on PVT/2021_05_13_DIC1_0021.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-23 50nM TGOT on PVT/2021_05_13_DIC1_0025.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-23 50nM TGOT on PVT/2021_05_13_DIC3_0050.abf\"\n]",
"_____no_output_____"
],
[
"#50nM TGOT+L368\nabfFilePaths = [\n \"X:/Data/C57/TGOT on PVT/2020-07-27 50nM TGOT w L368/20727010.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-27 50nM TGOT w L368/20727026.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-27 50nM TGOT w L368/20727032.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-27 50nM TGOT w L368/20727039.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-27 50nM TGOT w L368/20728005.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-27 50nM TGOT w L368/20728011.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-27 50nM TGOT w L368/20728026.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-07-27 50nM TGOT w L368/2021_05_13_DIC3_0043.abf\"\n]",
"_____no_output_____"
],
[
"#50nM TGOT\nabfFilePaths = [\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20n19022.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20n19029.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20n19036.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20n19052.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d03006.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d03032.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d03055.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d04012.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d04023.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d04030.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d04038.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d04045.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d04052.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d16012.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d16020.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d16035.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d17022.abf\",\n \"X:/Data/C57/TGOT on PVT/2020-11-18 TGOT on PVT-NAc neurons/20d17028.abf\"\n]",
"_____no_output_____"
],
[
"The users can decide the parameters they want for data analysis. \n\n`filterSize` decides number of points (sweeps) for the moving window average. \n\n`regressionSize` decides the number of smoothed data points used to calculate linear regression slopes during the drug range.",
"_____no_output_____"
]
],
[
[
"filterSize = 10\nregressionSize = 17",
"_____no_output_____"
]
],
[
[
"## Analyze All ABFs",
"_____no_output_____"
]
],
[
[
"baselineSlopes = []\ndrugSlopes = []\nabfIDs = []\nfor abfFilePath in abfFilePaths:\n baselineSlope, drugSlope = slopeTools.getBaselineAndMaxDrugSlope(abfFilePath, filterSize, regressionSize)\n baselineSlopes.append(baselineSlope)\n drugSlopes.append(drugSlope)\n abfIDs.append(basename(abfFilePath))",
"Baseline slope: 0.1487391163752629 pA/min\nDrug slope: -1.3098633569829607 pA/min\n"
]
],
[
[
"## Compare Baseline vs. Drug Slopes",
"_____no_output_____"
],
[
"The users can plot the basleine slope and the peak drug slope of each cell, and report the p-value in the title by performing a paired t-test between baseline slopes and peak drug slopes.",
"_____no_output_____"
]
],
[
[
"plotTools.plotPairs(baselineSlopes, drugSlopes, \"slopes\")\n",
"_____no_output_____"
]
],
[
[
"## Assess Responsiveness of All Cells",
"_____no_output_____"
],
[
"Generate a scatter plot showing the slope difference of each cell.\nThis plot can assist users to decide the desired threshold (red dotted line) to seperate ",
"_____no_output_____"
]
],
[
[
"slopeThreshold = -1.5 \ndrugEffects = []\nfor i in range(len(abfIDs)):\n drugEffects.append(drugSlopes[i] - baselineSlopes[i])\n\nplt.figure (figsize=(6, 4))\nplt.ylabel(\"Slope Difference (pA/min)\")\nplt.plot(abfIDs, drugEffects, 'o', color = \"b\")\nplt.gca().set_xticklabels(abfIDs, rotation=45, ha='right')\nplt.axhline(slopeThreshold, color='r', ls='--')\nplt.show()",
"<ipython-input-6-05cac887f3a1>:9: UserWarning: FixedFormatter should only be used together with FixedLocator\n plt.gca().set_xticklabels(abfIDs, rotation=45, ha='right')\n"
]
],
[
[
"## Define Cells as Responsive vs. Non-Responsive",
"_____no_output_____"
],
[
"The users can define the <b>slopeThreshold</b>. The difference between baseline slope and peak drug slope must be more negative than this value to be a responder\nslopeThreshold",
"_____no_output_____"
]
],
[
[
"drugEffects=statsTools.responderLessThanThreshold(abfIDs, drugEffects, slopeThreshold)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
]
|
cb73f55d6fcc00c65fb7d0d1ac353f9297edb11e | 180,767 | ipynb | Jupyter Notebook | 1_Linear_Regression/.ipynb_checkpoints/02-RegressionBatchKeras-checkpoint.ipynb | zht007/tensorflow-practice | 26123ee03f644f9c43f046cd1596d6c0be1e6dd6 | [
"MIT"
]
| 97 | 2019-03-18T10:01:44.000Z | 2022-02-10T06:31:48.000Z | 1_Linear_Regression/.ipynb_checkpoints/02-RegressionBatchKeras-checkpoint.ipynb | zht007/tensorflow-practice | 26123ee03f644f9c43f046cd1596d6c0be1e6dd6 | [
"MIT"
]
| null | null | null | 1_Linear_Regression/.ipynb_checkpoints/02-RegressionBatchKeras-checkpoint.ipynb | zht007/tensorflow-practice | 26123ee03f644f9c43f046cd1596d6c0be1e6dd6 | [
"MIT"
]
| 73 | 2019-05-28T12:25:55.000Z | 2022-02-17T05:26:12.000Z | 62.376467 | 22,459 | 0.728169 | [
[
[
"# TensorFlow Regression Example",
"_____no_output_____"
],
[
"## Creating Data",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"# 1 Million Points\nx_data = np.linspace(0.0,10.0,1000000)",
"_____no_output_____"
],
[
"noise = np.random.randn(len(x_data))",
"_____no_output_____"
],
[
"# y = mx + b + noise_levels\nb = 5\n\ny_true = (0.5 * x_data ) + 5 + noise",
"_____no_output_____"
],
[
"my_data = pd.concat([pd.DataFrame(data=x_data,columns=['X Data']),pd.DataFrame(data=y_true,columns=['Y'])],axis=1)",
"_____no_output_____"
],
[
"my_data.head()",
"_____no_output_____"
],
[
"my_data.sample(n=250).plot(kind='scatter',x='X Data',y='Y')",
"_____no_output_____"
]
],
[
[
"# TensorFlow\n## Batch Size\n\nWe will take the data in batches (1,000,000 points is a lot to pass in at once)",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf",
"_____no_output_____"
],
[
"# Random 10 points to grab\nbatch_size = 8",
"_____no_output_____"
]
],
[
[
"** Variables **",
"_____no_output_____"
]
],
[
[
"w_tf = tf.Variable(np.random.uniform())\nb_tf = tf.Variable(np.random.uniform(1,10))",
"_____no_output_____"
]
],
[
[
"** Placeholders **",
"_____no_output_____"
]
],
[
[
"x_train = tf.placeholder(tf.float32,shape=(batch_size))\ny_train = tf.placeholder(tf.float32,shape=(batch_size))",
"_____no_output_____"
]
],
[
[
"** Graph **",
"_____no_output_____"
]
],
[
[
"y_hat = w_tf * x_train + b_tf",
"_____no_output_____"
]
],
[
[
"** Loss Function **",
"_____no_output_____"
]
],
[
[
"error = tf.reduce_sum((y_train - y_hat)**2)",
"_____no_output_____"
]
],
[
[
"** Optimizer **",
"_____no_output_____"
]
],
[
[
"optimizer = tf.train.GradientDescentOptimizer(0.001)\ntrain = optimizer.minimize(error)",
"_____no_output_____"
]
],
[
[
"** Initialize Variables **",
"_____no_output_____"
]
],
[
[
"init = tf.global_variables_initializer()",
"_____no_output_____"
]
],
[
[
"### Session",
"_____no_output_____"
]
],
[
[
"with tf.Session() as sess:\n sess.run(init)\n batchs = 1000\n for i in range(batchs):\n batch_index = np.random.randint(len(x_data),size=(batch_size))\n feed = {x_train:x_data[batch_index], y_train:y_true[batch_index]}\n sess.run(train,feed_dict = feed)\n final_w, final_b = sess.run([w_tf,b_tf])",
"_____no_output_____"
],
[
"final_w",
"_____no_output_____"
],
[
"final_b",
"_____no_output_____"
]
],
[
[
"### Results",
"_____no_output_____"
]
],
[
[
"my_data.sample(n=250).plot(kind='scatter',x='X Data',y='Y')\nplt.plot(x_data, final_w*x_data+final_b,'r')",
"_____no_output_____"
]
],
[
[
"## tf.keras API",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras.models import Model",
"_____no_output_____"
]
],
[
[
"## tf.estimator API\n\nMuch simpler API for basic tasks like regression! We'll talk about more abstractions like TF-Slim later on.",
"_____no_output_____"
]
],
[
[
"feat_cols = [tf.feature_column.numeric_column('x',shape=[1])]",
"_____no_output_____"
],
[
"estimator = tf.estimator.LinearRegressor(feature_columns=feat_cols)",
"INFO:tensorflow:Using default config.\nWARNING:tensorflow:Using temporary folder as model directory: /var/folders/q_/7b87rxq97d74fk63y0b6xcy80000gn/T/tmprx24qzla\nINFO:tensorflow:Using config: {'_model_dir': '/var/folders/q_/7b87rxq97d74fk63y0b6xcy80000gn/T/tmprx24qzla', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x10babff60>, '_task_type': 'worker', '_task_id': 0, '_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n"
]
],
[
[
"### Train Test Split\n\nWe haven't actually performed a train test split yet! So let's do that on our data now and perform a more realistic version of a Regression Task",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"x_train, x_eval, y_train, y_eval = train_test_split(x_data,y_true,test_size=0.3, random_state = 101)",
"_____no_output_____"
],
[
"print(x_train.shape)\nprint(y_train.shape)\n\nprint(x_eval.shape)\nprint(y_eval.shape)",
"(700000,)\n(700000,)\n(300000,)\n(300000,)\n"
]
],
[
[
"### Set up Estimator Inputs",
"_____no_output_____"
]
],
[
[
"# Can also do .pandas_input_fn\ninput_func = tf.estimator.inputs.numpy_input_fn({'x':x_train},y_train,batch_size=4,num_epochs=None,shuffle=True)",
"_____no_output_____"
],
[
"train_input_func = tf.estimator.inputs.numpy_input_fn({'x':x_train},y_train,batch_size=4,num_epochs=1000,shuffle=False)",
"_____no_output_____"
],
[
"eval_input_func = tf.estimator.inputs.numpy_input_fn({'x':x_eval},y_eval,batch_size=4,num_epochs=1000,shuffle=False)",
"_____no_output_____"
]
],
[
[
"### Train the Estimator",
"_____no_output_____"
]
],
[
[
"estimator.train(input_fn=input_func,steps=1000)",
"INFO:tensorflow:Create CheckpointSaverHook.\nINFO:tensorflow:Saving checkpoints for 1 into C:\\Users\\Marcial\\AppData\\Local\\Temp\\tmphej5e4se\\model.ckpt.\nINFO:tensorflow:loss = 221.003, step = 1\nINFO:tensorflow:global_step/sec: 910.556\nINFO:tensorflow:loss = 10.6791, step = 101 (0.111 sec)\nINFO:tensorflow:global_step/sec: 1007.43\nINFO:tensorflow:loss = 21.3457, step = 201 (0.099 sec)\nINFO:tensorflow:global_step/sec: 945.347\nINFO:tensorflow:loss = 11.026, step = 301 (0.106 sec)\nINFO:tensorflow:global_step/sec: 949.829\nINFO:tensorflow:loss = 10.8599, step = 401 (0.105 sec)\nINFO:tensorflow:global_step/sec: 1028.2\nINFO:tensorflow:loss = 13.3654, step = 501 (0.097 sec)\nINFO:tensorflow:global_step/sec: 1033.52\nINFO:tensorflow:loss = 2.5809, step = 601 (0.097 sec)\nINFO:tensorflow:global_step/sec: 1022.91\nINFO:tensorflow:loss = 4.30405, step = 701 (0.098 sec)\nINFO:tensorflow:global_step/sec: 1044.28\nINFO:tensorflow:loss = 4.66325, step = 801 (0.096 sec)\nINFO:tensorflow:global_step/sec: 992.324\nINFO:tensorflow:loss = 10.0263, step = 901 (0.101 sec)\nINFO:tensorflow:Saving checkpoints for 1000 into C:\\Users\\Marcial\\AppData\\Local\\Temp\\tmphej5e4se\\model.ckpt.\nINFO:tensorflow:Loss for final step: 8.46343.\n"
]
],
[
[
"### Evaluation",
"_____no_output_____"
]
],
[
[
"train_metrics = estimator.evaluate(input_fn=train_input_func,steps=1000)",
"INFO:tensorflow:Starting evaluation at 2017-08-29-07:31:13\nINFO:tensorflow:Restoring parameters from C:\\Users\\Marcial\\AppData\\Local\\Temp\\tmphej5e4se\\model.ckpt-1000\nINFO:tensorflow:Evaluation [1/1000]\nINFO:tensorflow:Evaluation [2/1000]\nINFO:tensorflow:Evaluation [3/1000]\nINFO:tensorflow:Evaluation [4/1000]\nINFO:tensorflow:Evaluation [5/1000]\nINFO:tensorflow:Evaluation [6/1000]\nINFO:tensorflow:Evaluation [7/1000]\nINFO:tensorflow:Evaluation [8/1000]\nINFO:tensorflow:Evaluation [9/1000]\nINFO:tensorflow:Evaluation [10/1000]\nINFO:tensorflow:Evaluation [11/1000]\nINFO:tensorflow:Evaluation [12/1000]\nINFO:tensorflow:Evaluation [13/1000]\nINFO:tensorflow:Evaluation [14/1000]\nINFO:tensorflow:Evaluation [15/1000]\nINFO:tensorflow:Evaluation [16/1000]\nINFO:tensorflow:Evaluation [17/1000]\nINFO:tensorflow:Evaluation [18/1000]\nINFO:tensorflow:Evaluation [19/1000]\nINFO:tensorflow:Evaluation [20/1000]\nINFO:tensorflow:Evaluation [21/1000]\nINFO:tensorflow:Evaluation [22/1000]\nINFO:tensorflow:Evaluation [23/1000]\nINFO:tensorflow:Evaluation [24/1000]\nINFO:tensorflow:Evaluation [25/1000]\nINFO:tensorflow:Evaluation [26/1000]\nINFO:tensorflow:Evaluation [27/1000]\nINFO:tensorflow:Evaluation [28/1000]\nINFO:tensorflow:Evaluation [29/1000]\nINFO:tensorflow:Evaluation [30/1000]\nINFO:tensorflow:Evaluation [31/1000]\nINFO:tensorflow:Evaluation [32/1000]\nINFO:tensorflow:Evaluation [33/1000]\nINFO:tensorflow:Evaluation [34/1000]\nINFO:tensorflow:Evaluation [35/1000]\nINFO:tensorflow:Evaluation [36/1000]\nINFO:tensorflow:Evaluation [37/1000]\nINFO:tensorflow:Evaluation [38/1000]\nINFO:tensorflow:Evaluation [39/1000]\nINFO:tensorflow:Evaluation [40/1000]\nINFO:tensorflow:Evaluation [41/1000]\nINFO:tensorflow:Evaluation [42/1000]\nINFO:tensorflow:Evaluation [43/1000]\nINFO:tensorflow:Evaluation [44/1000]\nINFO:tensorflow:Evaluation [45/1000]\nINFO:tensorflow:Evaluation [46/1000]\nINFO:tensorflow:Evaluation [47/1000]\nINFO:tensorflow:Evaluation [48/1000]\nINFO:tensorflow:Evaluation [49/1000]\nINFO:tensorflow:Evaluation [50/1000]\nINFO:tensorflow:Evaluation [51/1000]\nINFO:tensorflow:Evaluation [52/1000]\nINFO:tensorflow:Evaluation [53/1000]\nINFO:tensorflow:Evaluation [54/1000]\nINFO:tensorflow:Evaluation [55/1000]\nINFO:tensorflow:Evaluation [56/1000]\nINFO:tensorflow:Evaluation [57/1000]\nINFO:tensorflow:Evaluation [58/1000]\nINFO:tensorflow:Evaluation [59/1000]\nINFO:tensorflow:Evaluation [60/1000]\nINFO:tensorflow:Evaluation [61/1000]\nINFO:tensorflow:Evaluation [62/1000]\nINFO:tensorflow:Evaluation [63/1000]\nINFO:tensorflow:Evaluation [64/1000]\nINFO:tensorflow:Evaluation [65/1000]\nINFO:tensorflow:Evaluation [66/1000]\nINFO:tensorflow:Evaluation [67/1000]\nINFO:tensorflow:Evaluation [68/1000]\nINFO:tensorflow:Evaluation [69/1000]\nINFO:tensorflow:Evaluation [70/1000]\nINFO:tensorflow:Evaluation [71/1000]\nINFO:tensorflow:Evaluation [72/1000]\nINFO:tensorflow:Evaluation [73/1000]\nINFO:tensorflow:Evaluation [74/1000]\nINFO:tensorflow:Evaluation [75/1000]\nINFO:tensorflow:Evaluation [76/1000]\nINFO:tensorflow:Evaluation [77/1000]\nINFO:tensorflow:Evaluation [78/1000]\nINFO:tensorflow:Evaluation [79/1000]\nINFO:tensorflow:Evaluation [80/1000]\nINFO:tensorflow:Evaluation [81/1000]\nINFO:tensorflow:Evaluation [82/1000]\nINFO:tensorflow:Evaluation [83/1000]\nINFO:tensorflow:Evaluation [84/1000]\nINFO:tensorflow:Evaluation [85/1000]\nINFO:tensorflow:Evaluation [86/1000]\nINFO:tensorflow:Evaluation [87/1000]\nINFO:tensorflow:Evaluation [88/1000]\nINFO:tensorflow:Evaluation [89/1000]\nINFO:tensorflow:Evaluation [90/1000]\nINFO:tensorflow:Evaluation [91/1000]\nINFO:tensorflow:Evaluation [92/1000]\nINFO:tensorflow:Evaluation [93/1000]\nINFO:tensorflow:Evaluation [94/1000]\nINFO:tensorflow:Evaluation [95/1000]\nINFO:tensorflow:Evaluation [96/1000]\nINFO:tensorflow:Evaluation [97/1000]\nINFO:tensorflow:Evaluation [98/1000]\nINFO:tensorflow:Evaluation [99/1000]\nINFO:tensorflow:Evaluation [100/1000]\nINFO:tensorflow:Evaluation [101/1000]\nINFO:tensorflow:Evaluation [102/1000]\nINFO:tensorflow:Evaluation [103/1000]\nINFO:tensorflow:Evaluation [104/1000]\nINFO:tensorflow:Evaluation [105/1000]\nINFO:tensorflow:Evaluation [106/1000]\nINFO:tensorflow:Evaluation [107/1000]\nINFO:tensorflow:Evaluation [108/1000]\nINFO:tensorflow:Evaluation [109/1000]\nINFO:tensorflow:Evaluation [110/1000]\nINFO:tensorflow:Evaluation [111/1000]\nINFO:tensorflow:Evaluation [112/1000]\nINFO:tensorflow:Evaluation [113/1000]\nINFO:tensorflow:Evaluation [114/1000]\nINFO:tensorflow:Evaluation [115/1000]\nINFO:tensorflow:Evaluation [116/1000]\nINFO:tensorflow:Evaluation [117/1000]\nINFO:tensorflow:Evaluation [118/1000]\nINFO:tensorflow:Evaluation [119/1000]\nINFO:tensorflow:Evaluation [120/1000]\nINFO:tensorflow:Evaluation [121/1000]\nINFO:tensorflow:Evaluation [122/1000]\nINFO:tensorflow:Evaluation [123/1000]\nINFO:tensorflow:Evaluation [124/1000]\nINFO:tensorflow:Evaluation [125/1000]\nINFO:tensorflow:Evaluation [126/1000]\nINFO:tensorflow:Evaluation [127/1000]\nINFO:tensorflow:Evaluation [128/1000]\nINFO:tensorflow:Evaluation [129/1000]\nINFO:tensorflow:Evaluation [130/1000]\nINFO:tensorflow:Evaluation [131/1000]\nINFO:tensorflow:Evaluation [132/1000]\nINFO:tensorflow:Evaluation [133/1000]\nINFO:tensorflow:Evaluation [134/1000]\nINFO:tensorflow:Evaluation [135/1000]\nINFO:tensorflow:Evaluation [136/1000]\nINFO:tensorflow:Evaluation [137/1000]\nINFO:tensorflow:Evaluation [138/1000]\nINFO:tensorflow:Evaluation [139/1000]\nINFO:tensorflow:Evaluation [140/1000]\nINFO:tensorflow:Evaluation [141/1000]\nINFO:tensorflow:Evaluation [142/1000]\nINFO:tensorflow:Evaluation [143/1000]\nINFO:tensorflow:Evaluation [144/1000]\nINFO:tensorflow:Evaluation [145/1000]\nINFO:tensorflow:Evaluation [146/1000]\nINFO:tensorflow:Evaluation [147/1000]\nINFO:tensorflow:Evaluation [148/1000]\nINFO:tensorflow:Evaluation [149/1000]\nINFO:tensorflow:Evaluation [150/1000]\nINFO:tensorflow:Evaluation [151/1000]\nINFO:tensorflow:Evaluation [152/1000]\nINFO:tensorflow:Evaluation [153/1000]\nINFO:tensorflow:Evaluation [154/1000]\nINFO:tensorflow:Evaluation [155/1000]\nINFO:tensorflow:Evaluation [156/1000]\nINFO:tensorflow:Evaluation [157/1000]\nINFO:tensorflow:Evaluation [158/1000]\nINFO:tensorflow:Evaluation [159/1000]\nINFO:tensorflow:Evaluation [160/1000]\nINFO:tensorflow:Evaluation [161/1000]\nINFO:tensorflow:Evaluation [162/1000]\nINFO:tensorflow:Evaluation [163/1000]\nINFO:tensorflow:Evaluation [164/1000]\nINFO:tensorflow:Evaluation [165/1000]\nINFO:tensorflow:Evaluation [166/1000]\nINFO:tensorflow:Evaluation [167/1000]\nINFO:tensorflow:Evaluation [168/1000]\nINFO:tensorflow:Evaluation [169/1000]\nINFO:tensorflow:Evaluation [170/1000]\nINFO:tensorflow:Evaluation [171/1000]\nINFO:tensorflow:Evaluation [172/1000]\nINFO:tensorflow:Evaluation [173/1000]\nINFO:tensorflow:Evaluation [174/1000]\nINFO:tensorflow:Evaluation [175/1000]\nINFO:tensorflow:Evaluation [176/1000]\nINFO:tensorflow:Evaluation [177/1000]\nINFO:tensorflow:Evaluation [178/1000]\nINFO:tensorflow:Evaluation [179/1000]\nINFO:tensorflow:Evaluation [180/1000]\nINFO:tensorflow:Evaluation [181/1000]\nINFO:tensorflow:Evaluation [182/1000]\nINFO:tensorflow:Evaluation [183/1000]\nINFO:tensorflow:Evaluation [184/1000]\nINFO:tensorflow:Evaluation [185/1000]\nINFO:tensorflow:Evaluation [186/1000]\nINFO:tensorflow:Evaluation [187/1000]\nINFO:tensorflow:Evaluation [188/1000]\nINFO:tensorflow:Evaluation [189/1000]\nINFO:tensorflow:Evaluation [190/1000]\nINFO:tensorflow:Evaluation [191/1000]\nINFO:tensorflow:Evaluation [192/1000]\nINFO:tensorflow:Evaluation [193/1000]\nINFO:tensorflow:Evaluation [194/1000]\nINFO:tensorflow:Evaluation [195/1000]\nINFO:tensorflow:Evaluation [196/1000]\nINFO:tensorflow:Evaluation [197/1000]\nINFO:tensorflow:Evaluation [198/1000]\nINFO:tensorflow:Evaluation [199/1000]\nINFO:tensorflow:Evaluation [200/1000]\nINFO:tensorflow:Evaluation [201/1000]\nINFO:tensorflow:Evaluation [202/1000]\nINFO:tensorflow:Evaluation [203/1000]\nINFO:tensorflow:Evaluation [204/1000]\nINFO:tensorflow:Evaluation [205/1000]\nINFO:tensorflow:Evaluation [206/1000]\nINFO:tensorflow:Evaluation [207/1000]\nINFO:tensorflow:Evaluation [208/1000]\nINFO:tensorflow:Evaluation [209/1000]\nINFO:tensorflow:Evaluation [210/1000]\nINFO:tensorflow:Evaluation [211/1000]\nINFO:tensorflow:Evaluation [212/1000]\nINFO:tensorflow:Evaluation [213/1000]\nINFO:tensorflow:Evaluation [214/1000]\nINFO:tensorflow:Evaluation [215/1000]\n"
],
[
"eval_metrics = estimator.evaluate(input_fn=eval_input_func,steps=1000)",
"INFO:tensorflow:Starting evaluation at 2017-08-29-07:35:50\nINFO:tensorflow:Restoring parameters from C:\\Users\\Marcial\\AppData\\Local\\Temp\\tmphej5e4se\\model.ckpt-1000\nINFO:tensorflow:Evaluation [1/1000]\nINFO:tensorflow:Evaluation [2/1000]\nINFO:tensorflow:Evaluation [3/1000]\nINFO:tensorflow:Evaluation [4/1000]\nINFO:tensorflow:Evaluation [5/1000]\nINFO:tensorflow:Evaluation [6/1000]\nINFO:tensorflow:Evaluation [7/1000]\nINFO:tensorflow:Evaluation [8/1000]\nINFO:tensorflow:Evaluation [9/1000]\nINFO:tensorflow:Evaluation [10/1000]\nINFO:tensorflow:Evaluation [11/1000]\nINFO:tensorflow:Evaluation [12/1000]\nINFO:tensorflow:Evaluation [13/1000]\nINFO:tensorflow:Evaluation [14/1000]\nINFO:tensorflow:Evaluation [15/1000]\nINFO:tensorflow:Evaluation [16/1000]\nINFO:tensorflow:Evaluation [17/1000]\nINFO:tensorflow:Evaluation [18/1000]\nINFO:tensorflow:Evaluation [19/1000]\nINFO:tensorflow:Evaluation [20/1000]\nINFO:tensorflow:Evaluation [21/1000]\nINFO:tensorflow:Evaluation [22/1000]\nINFO:tensorflow:Evaluation [23/1000]\nINFO:tensorflow:Evaluation [24/1000]\nINFO:tensorflow:Evaluation [25/1000]\nINFO:tensorflow:Evaluation [26/1000]\nINFO:tensorflow:Evaluation [27/1000]\nINFO:tensorflow:Evaluation [28/1000]\nINFO:tensorflow:Evaluation [29/1000]\nINFO:tensorflow:Evaluation [30/1000]\nINFO:tensorflow:Evaluation [31/1000]\nINFO:tensorflow:Evaluation [32/1000]\nINFO:tensorflow:Evaluation [33/1000]\nINFO:tensorflow:Evaluation [34/1000]\nINFO:tensorflow:Evaluation [35/1000]\nINFO:tensorflow:Evaluation [36/1000]\nINFO:tensorflow:Evaluation [37/1000]\nINFO:tensorflow:Evaluation [38/1000]\nINFO:tensorflow:Evaluation [39/1000]\nINFO:tensorflow:Evaluation [40/1000]\nINFO:tensorflow:Evaluation [41/1000]\nINFO:tensorflow:Evaluation [42/1000]\nINFO:tensorflow:Evaluation [43/1000]\nINFO:tensorflow:Evaluation [44/1000]\nINFO:tensorflow:Evaluation [45/1000]\nINFO:tensorflow:Evaluation [46/1000]\nINFO:tensorflow:Evaluation [47/1000]\nINFO:tensorflow:Evaluation [48/1000]\nINFO:tensorflow:Evaluation [49/1000]\nINFO:tensorflow:Evaluation [50/1000]\nINFO:tensorflow:Evaluation [51/1000]\nINFO:tensorflow:Evaluation [52/1000]\nINFO:tensorflow:Evaluation [53/1000]\nINFO:tensorflow:Evaluation [54/1000]\nINFO:tensorflow:Evaluation [55/1000]\nINFO:tensorflow:Evaluation [56/1000]\nINFO:tensorflow:Evaluation [57/1000]\nINFO:tensorflow:Evaluation [58/1000]\nINFO:tensorflow:Evaluation [59/1000]\nINFO:tensorflow:Evaluation [60/1000]\nINFO:tensorflow:Evaluation [61/1000]\nINFO:tensorflow:Evaluation [62/1000]\nINFO:tensorflow:Evaluation [63/1000]\nINFO:tensorflow:Evaluation [64/1000]\nINFO:tensorflow:Evaluation [65/1000]\nINFO:tensorflow:Evaluation [66/1000]\nINFO:tensorflow:Evaluation [67/1000]\nINFO:tensorflow:Evaluation [68/1000]\nINFO:tensorflow:Evaluation [69/1000]\nINFO:tensorflow:Evaluation [70/1000]\nINFO:tensorflow:Evaluation [71/1000]\nINFO:tensorflow:Evaluation [72/1000]\nINFO:tensorflow:Evaluation [73/1000]\nINFO:tensorflow:Evaluation [74/1000]\nINFO:tensorflow:Evaluation [75/1000]\nINFO:tensorflow:Evaluation [76/1000]\nINFO:tensorflow:Evaluation [77/1000]\nINFO:tensorflow:Evaluation [78/1000]\nINFO:tensorflow:Evaluation [79/1000]\nINFO:tensorflow:Evaluation [80/1000]\nINFO:tensorflow:Evaluation [81/1000]\nINFO:tensorflow:Evaluation [82/1000]\nINFO:tensorflow:Evaluation [83/1000]\nINFO:tensorflow:Evaluation [84/1000]\nINFO:tensorflow:Evaluation [85/1000]\nINFO:tensorflow:Evaluation [86/1000]\nINFO:tensorflow:Evaluation [87/1000]\nINFO:tensorflow:Evaluation [88/1000]\nINFO:tensorflow:Evaluation [89/1000]\nINFO:tensorflow:Evaluation [90/1000]\nINFO:tensorflow:Evaluation [91/1000]\nINFO:tensorflow:Evaluation [92/1000]\nINFO:tensorflow:Evaluation [93/1000]\nINFO:tensorflow:Evaluation [94/1000]\nINFO:tensorflow:Evaluation [95/1000]\nINFO:tensorflow:Evaluation [96/1000]\nINFO:tensorflow:Evaluation [97/1000]\nINFO:tensorflow:Evaluation [98/1000]\nINFO:tensorflow:Evaluation [99/1000]\nINFO:tensorflow:Evaluation [100/1000]\nINFO:tensorflow:Evaluation [101/1000]\nINFO:tensorflow:Evaluation [102/1000]\nINFO:tensorflow:Evaluation [103/1000]\nINFO:tensorflow:Evaluation [104/1000]\nINFO:tensorflow:Evaluation [105/1000]\nINFO:tensorflow:Evaluation [106/1000]\nINFO:tensorflow:Evaluation [107/1000]\nINFO:tensorflow:Evaluation [108/1000]\nINFO:tensorflow:Evaluation [109/1000]\nINFO:tensorflow:Evaluation [110/1000]\nINFO:tensorflow:Evaluation [111/1000]\nINFO:tensorflow:Evaluation [112/1000]\nINFO:tensorflow:Evaluation [113/1000]\nINFO:tensorflow:Evaluation [114/1000]\nINFO:tensorflow:Evaluation [115/1000]\nINFO:tensorflow:Evaluation [116/1000]\nINFO:tensorflow:Evaluation [117/1000]\nINFO:tensorflow:Evaluation [118/1000]\nINFO:tensorflow:Evaluation [119/1000]\nINFO:tensorflow:Evaluation [120/1000]\nINFO:tensorflow:Evaluation [121/1000]\nINFO:tensorflow:Evaluation [122/1000]\nINFO:tensorflow:Evaluation [123/1000]\nINFO:tensorflow:Evaluation [124/1000]\nINFO:tensorflow:Evaluation [125/1000]\nINFO:tensorflow:Evaluation [126/1000]\nINFO:tensorflow:Evaluation [127/1000]\nINFO:tensorflow:Evaluation [128/1000]\nINFO:tensorflow:Evaluation [129/1000]\nINFO:tensorflow:Evaluation [130/1000]\nINFO:tensorflow:Evaluation [131/1000]\nINFO:tensorflow:Evaluation [132/1000]\nINFO:tensorflow:Evaluation [133/1000]\nINFO:tensorflow:Evaluation [134/1000]\nINFO:tensorflow:Evaluation [135/1000]\nINFO:tensorflow:Evaluation [136/1000]\nINFO:tensorflow:Evaluation [137/1000]\nINFO:tensorflow:Evaluation [138/1000]\nINFO:tensorflow:Evaluation [139/1000]\nINFO:tensorflow:Evaluation [140/1000]\nINFO:tensorflow:Evaluation [141/1000]\nINFO:tensorflow:Evaluation [142/1000]\nINFO:tensorflow:Evaluation [143/1000]\nINFO:tensorflow:Evaluation [144/1000]\nINFO:tensorflow:Evaluation [145/1000]\nINFO:tensorflow:Evaluation [146/1000]\nINFO:tensorflow:Evaluation [147/1000]\nINFO:tensorflow:Evaluation [148/1000]\nINFO:tensorflow:Evaluation [149/1000]\nINFO:tensorflow:Evaluation [150/1000]\nINFO:tensorflow:Evaluation [151/1000]\nINFO:tensorflow:Evaluation [152/1000]\nINFO:tensorflow:Evaluation [153/1000]\nINFO:tensorflow:Evaluation [154/1000]\nINFO:tensorflow:Evaluation [155/1000]\nINFO:tensorflow:Evaluation [156/1000]\nINFO:tensorflow:Evaluation [157/1000]\nINFO:tensorflow:Evaluation [158/1000]\nINFO:tensorflow:Evaluation [159/1000]\nINFO:tensorflow:Evaluation [160/1000]\nINFO:tensorflow:Evaluation [161/1000]\nINFO:tensorflow:Evaluation [162/1000]\nINFO:tensorflow:Evaluation [163/1000]\nINFO:tensorflow:Evaluation [164/1000]\nINFO:tensorflow:Evaluation [165/1000]\nINFO:tensorflow:Evaluation [166/1000]\nINFO:tensorflow:Evaluation [167/1000]\nINFO:tensorflow:Evaluation [168/1000]\nINFO:tensorflow:Evaluation [169/1000]\nINFO:tensorflow:Evaluation [170/1000]\nINFO:tensorflow:Evaluation [171/1000]\nINFO:tensorflow:Evaluation [172/1000]\nINFO:tensorflow:Evaluation [173/1000]\nINFO:tensorflow:Evaluation [174/1000]\nINFO:tensorflow:Evaluation [175/1000]\nINFO:tensorflow:Evaluation [176/1000]\nINFO:tensorflow:Evaluation [177/1000]\nINFO:tensorflow:Evaluation [178/1000]\nINFO:tensorflow:Evaluation [179/1000]\nINFO:tensorflow:Evaluation [180/1000]\nINFO:tensorflow:Evaluation [181/1000]\nINFO:tensorflow:Evaluation [182/1000]\nINFO:tensorflow:Evaluation [183/1000]\nINFO:tensorflow:Evaluation [184/1000]\nINFO:tensorflow:Evaluation [185/1000]\nINFO:tensorflow:Evaluation [186/1000]\nINFO:tensorflow:Evaluation [187/1000]\nINFO:tensorflow:Evaluation [188/1000]\nINFO:tensorflow:Evaluation [189/1000]\nINFO:tensorflow:Evaluation [190/1000]\nINFO:tensorflow:Evaluation [191/1000]\nINFO:tensorflow:Evaluation [192/1000]\nINFO:tensorflow:Evaluation [193/1000]\nINFO:tensorflow:Evaluation [194/1000]\nINFO:tensorflow:Evaluation [195/1000]\nINFO:tensorflow:Evaluation [196/1000]\nINFO:tensorflow:Evaluation [197/1000]\nINFO:tensorflow:Evaluation [198/1000]\nINFO:tensorflow:Evaluation [199/1000]\nINFO:tensorflow:Evaluation [200/1000]\nINFO:tensorflow:Evaluation [201/1000]\nINFO:tensorflow:Evaluation [202/1000]\nINFO:tensorflow:Evaluation [203/1000]\nINFO:tensorflow:Evaluation [204/1000]\nINFO:tensorflow:Evaluation [205/1000]\nINFO:tensorflow:Evaluation [206/1000]\nINFO:tensorflow:Evaluation [207/1000]\nINFO:tensorflow:Evaluation [208/1000]\nINFO:tensorflow:Evaluation [209/1000]\nINFO:tensorflow:Evaluation [210/1000]\nINFO:tensorflow:Evaluation [211/1000]\nINFO:tensorflow:Evaluation [212/1000]\nINFO:tensorflow:Evaluation [213/1000]\nINFO:tensorflow:Evaluation [214/1000]\nINFO:tensorflow:Evaluation [215/1000]\n"
],
[
"print(\"train metrics: {}\".format(train_metrics))\nprint(\"eval metrics: {}\".format(eval_metrics))",
"train metrics: {'loss': 4.673841, 'average_loss': 1.1684602, 'global_step': 1000}\neval metrics: {'loss': 4.7633586, 'average_loss': 1.1908396, 'global_step': 1000}\n"
]
],
[
[
"### Predictions",
"_____no_output_____"
]
],
[
[
"input_fn_predict = tf.estimator.inputs.numpy_input_fn({'x':np.linspace(0,10,10)},shuffle=False)",
"_____no_output_____"
],
[
"list(estimator.predict(input_fn=input_fn_predict))",
"INFO:tensorflow:Restoring parameters from C:\\Users\\Marcial\\AppData\\Local\\Temp\\tmphej5e4se\\model.ckpt-1000\n"
],
[
"predictions = []# np.array([])\nfor x in estimator.predict(input_fn=input_fn_predict):\n predictions.append(x['predictions'])",
"INFO:tensorflow:Restoring parameters from C:\\Users\\Marcial\\AppData\\Local\\Temp\\tmphej5e4se\\model.ckpt-1000\n"
],
[
"predictions",
"_____no_output_____"
],
[
"my_data.sample(n=250).plot(kind='scatter',x='X Data',y='Y')\nplt.plot(np.linspace(0,10,10),predictions,'r')",
"_____no_output_____"
]
],
[
[
"# Great Job!",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
]
|
cb73f5ea7f4b4836a39a67693d5171ba4d32b546 | 12,472 | ipynb | Jupyter Notebook | matrix_two/day1_meta.ipynb | DranelM/dw_matrix | 961af2cd312fcd6e378aba8c3a4fac95972337e7 | [
"MIT"
]
| null | null | null | matrix_two/day1_meta.ipynb | DranelM/dw_matrix | 961af2cd312fcd6e378aba8c3a4fac95972337e7 | [
"MIT"
]
| null | null | null | matrix_two/day1_meta.ipynb | DranelM/dw_matrix | 961af2cd312fcd6e378aba8c3a4fac95972337e7 | [
"MIT"
]
| null | null | null | 36.682353 | 87 | 0.386225 | [
[
[
"# ''' Jeśli colab to '''\n\n# GITHUB_TOKEN = ''\n# GITHUB_URL = f'https://{GITHUB_TOKEN}@github.com/DranelM/dw_matrix.git'\n# !git clone &GITHUB_URL\n# !curl -L http://bit.ly/dw_car_data -i car.h5",
"_____no_output_____"
],
[
"import pandas as pd",
"_____no_output_____"
],
[
"df = pd.read_hdf('../data/car.h5')",
"_____no_output_____"
],
[
"df.sample(5)",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code"
]
]
|
cb740baca0b7edcc8382a8953aecd89f505bf4f1 | 109,920 | ipynb | Jupyter Notebook | Projects/4_HMM Tagger/HMM Tagger.ipynb | urviyi/artificial-intelligence | f19233a29a454591ec6014c8320af60a68a069d9 | [
"MIT"
]
| null | null | null | Projects/4_HMM Tagger/HMM Tagger.ipynb | urviyi/artificial-intelligence | f19233a29a454591ec6014c8320af60a68a069d9 | [
"MIT"
]
| null | null | null | Projects/4_HMM Tagger/HMM Tagger.ipynb | urviyi/artificial-intelligence | f19233a29a454591ec6014c8320af60a68a069d9 | [
"MIT"
]
| null | null | null | 72.315789 | 21,052 | 0.607314 | [
[
[
"# Project: Part of Speech Tagging with Hidden Markov Models \n---\n### Introduction\n\nPart of speech tagging is the process of determining the syntactic category of a word from the words in its surrounding context. It is often used to help disambiguate natural language phrases because it can be done quickly with high accuracy. Tagging can be used for many NLP tasks like determining correct pronunciation during speech synthesis (for example, _dis_-count as a noun vs dis-_count_ as a verb), for information retrieval, and for word sense disambiguation.\n\nIn this notebook, you'll use the [Pomegranate](http://pomegranate.readthedocs.io/) library to build a hidden Markov model for part of speech tagging using a \"universal\" tagset. Hidden Markov models have been able to achieve [>96% tag accuracy with larger tagsets on realistic text corpora](http://www.coli.uni-saarland.de/~thorsten/publications/Brants-ANLP00.pdf). Hidden Markov models have also been used for speech recognition and speech generation, machine translation, gene recognition for bioinformatics, and human gesture recognition for computer vision, and more. \n\n\n\nThe notebook already contains some code to get you started. You only need to add some new functionality in the areas indicated to complete the project; you will not need to modify the included code beyond what is requested. Sections that begin with **'IMPLEMENTATION'** in the header indicate that you must provide code in the block that follows. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!",
"_____no_output_____"
],
[
"<div class=\"alert alert-block alert-info\">\n**Note:** Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You must then **export the notebook** by running the last cell in the notebook, or by using the menu above and navigating to **File -> Download as -> HTML (.html)** Your submissions should include both the `html` and `ipynb` files.\n</div>",
"_____no_output_____"
],
[
"<div class=\"alert alert-block alert-info\">\n**Note:** Code and Markdown cells can be executed using the `Shift + Enter` keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.\n</div>",
"_____no_output_____"
],
[
"### The Road Ahead\nYou must complete Steps 1-3 below to pass the project. The section on Step 4 includes references & resources you can use to further explore HMM taggers.\n\n- [Step 1](#Step-1:-Read-and-preprocess-the-dataset): Review the provided interface to load and access the text corpus\n- [Step 2](#Step-2:-Build-a-Most-Frequent-Class-tagger): Build a Most Frequent Class tagger to use as a baseline\n- [Step 3](#Step-3:-Build-an-HMM-tagger): Build an HMM Part of Speech tagger and compare to the MFC baseline\n- [Step 4](#Step-4:-[Optional]-Improving-model-performance): (Optional) Improve the HMM tagger",
"_____no_output_____"
]
],
[
[
"# Jupyter \"magic methods\" -- only need to be run once per kernel restart\n%load_ext autoreload\nimport helpers, tests\n%autoreload 1",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"# import python modules -- this cell needs to be run again if you make changes to any of the files\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom IPython.core.display import HTML\nfrom itertools import chain\nfrom collections import Counter, defaultdict\nfrom helpers import show_model, Dataset\nfrom pomegranate import State, HiddenMarkovModel, DiscreteDistribution",
"_____no_output_____"
]
],
[
[
"## Step 1: Read and preprocess the dataset\n---\nWe'll start by reading in a text corpus and splitting it into a training and testing dataset. The data set is a copy of the [Brown corpus](https://en.wikipedia.org/wiki/Brown_Corpus) (originally from the [NLTK](https://www.nltk.org/) library) that has already been pre-processed to only include the [universal tagset](https://arxiv.org/pdf/1104.2086.pdf). You should expect to get slightly higher accuracy using this simplified tagset than the same model would achieve on a larger tagset like the full [Penn treebank tagset](https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html), but the process you'll follow would be the same.\n\nThe `Dataset` class provided in helpers.py will read and parse the corpus. You can generate your own datasets compatible with the reader by writing them to the following format. The dataset is stored in plaintext as a collection of words and corresponding tags. Each sentence starts with a unique identifier on the first line, followed by one tab-separated word/tag pair on each following line. Sentences are separated by a single blank line.\n\nExample from the Brown corpus. \n```\nb100-38532\nPerhaps\tADV\nit\tPRON\nwas\tVERB\nright\tADJ\n;\t.\n;\t.\n\nb100-35577\n...\n```",
"_____no_output_____"
]
],
[
[
"data = Dataset(\"tags-universal.txt\", \"brown-universal.txt\", train_test_split=0.8)\n\nprint(\"There are {} sentences in the corpus.\".format(len(data)))\nprint(\"There are {} sentences in the training set.\".format(len(data.training_set)))\nprint(\"There are {} sentences in the testing set.\".format(len(data.testing_set)))\n\nassert len(data) == len(data.training_set) + len(data.testing_set), \\\n \"The number of sentences in the training set + testing set should sum to the number of sentences in the corpus\"",
"There are 57340 sentences in the corpus.\nThere are 45872 sentences in the training set.\nThere are 11468 sentences in the testing set.\n"
]
],
[
[
"### The Dataset Interface\n\nYou can access (mostly) immutable references to the dataset through a simple interface provided through the `Dataset` class, which represents an iterable collection of sentences along with easy access to partitions of the data for training & testing. Review the reference below, then run and review the next few cells to make sure you understand the interface before moving on to the next step.\n\n```\nDataset-only Attributes:\n training_set - reference to a Subset object containing the samples for training\n testing_set - reference to a Subset object containing the samples for testing\n\nDataset & Subset Attributes:\n sentences - a dictionary with an entry {sentence_key: Sentence()} for each sentence in the corpus\n keys - an immutable ordered (not sorted) collection of the sentence_keys for the corpus\n vocab - an immutable collection of the unique words in the corpus\n tagset - an immutable collection of the unique tags in the corpus\n X - returns an array of words grouped by sentences ((w11, w12, w13, ...), (w21, w22, w23, ...), ...)\n Y - returns an array of tags grouped by sentences ((t11, t12, t13, ...), (t21, t22, t23, ...), ...)\n N - returns the number of distinct samples (individual words or tags) in the dataset\n\nMethods:\n stream() - returns an flat iterable over all (word, tag) pairs across all sentences in the corpus\n __iter__() - returns an iterable over the data as (sentence_key, Sentence()) pairs\n __len__() - returns the nubmer of sentences in the dataset\n```\n\nFor example, consider a Subset, `subset`, of the sentences `{\"s0\": Sentence((\"See\", \"Spot\", \"run\"), (\"VERB\", \"NOUN\", \"VERB\")), \"s1\": Sentence((\"Spot\", \"ran\"), (\"NOUN\", \"VERB\"))}`. The subset will have these attributes:\n\n```\nsubset.keys == {\"s1\", \"s0\"} # unordered\nsubset.vocab == {\"See\", \"run\", \"ran\", \"Spot\"} # unordered\nsubset.tagset == {\"VERB\", \"NOUN\"} # unordered\nsubset.X == ((\"Spot\", \"ran\"), (\"See\", \"Spot\", \"run\")) # order matches .keys\nsubset.Y == ((\"NOUN\", \"VERB\"), (\"VERB\", \"NOUN\", \"VERB\")) # order matches .keys\nsubset.N == 7 # there are a total of seven observations over all sentences\nlen(subset) == 2 # because there are two sentences\n```\n\n<div class=\"alert alert-block alert-info\">\n**Note:** The `Dataset` class is _convenient_, but it is **not** efficient. It is not suitable for huge datasets because it stores multiple redundant copies of the same data.\n</div>",
"_____no_output_____"
],
[
"#### Sentences\n\n`Dataset.sentences` is a dictionary of all sentences in the training corpus, each keyed to a unique sentence identifier. Each `Sentence` is itself an object with two attributes: a tuple of the words in the sentence named `words` and a tuple of the tag corresponding to each word named `tags`.",
"_____no_output_____"
]
],
[
[
"key = 'b100-38532'\nprint(\"Sentence: {}\".format(key))\nprint(\"words:\\n\\t{!s}\".format(data.sentences[key].words))\nprint(\"tags:\\n\\t{!s}\".format(data.sentences[key].tags))",
"Sentence: b100-38532\nwords:\n\t('Perhaps', 'it', 'was', 'right', ';', ';')\ntags:\n\t('ADV', 'PRON', 'VERB', 'ADJ', '.', '.')\n"
]
],
[
[
"<div class=\"alert alert-block alert-info\">\n**Note:** The underlying iterable sequence is **unordered** over the sentences in the corpus; it is not guaranteed to return the sentences in a consistent order between calls. Use `Dataset.stream()`, `Dataset.keys`, `Dataset.X`, or `Dataset.Y` attributes if you need ordered access to the data.\n</div>\n\n#### Counting Unique Elements\n\nYou can access the list of unique words (the dataset vocabulary) via `Dataset.vocab` and the unique list of tags via `Dataset.tagset`.",
"_____no_output_____"
]
],
[
[
"print(\"There are a total of {} samples of {} unique words in the corpus.\"\n .format(data.N, len(data.vocab)))\nprint(\"There are {} samples of {} unique words in the training set.\"\n .format(data.training_set.N, len(data.training_set.vocab)))\nprint(\"There are {} samples of {} unique words in the testing set.\"\n .format(data.testing_set.N, len(data.testing_set.vocab)))\nprint(\"There are {} words in the test set that are missing in the training set.\"\n .format(len(data.testing_set.vocab - data.training_set.vocab)))\n\nassert data.N == data.training_set.N + data.testing_set.N, \\\n \"The number of training + test samples should sum to the total number of samples\"",
"There are a total of 1161192 samples of 56057 unique words in the corpus.\nThere are 928458 samples of 50536 unique words in the training set.\nThere are 232734 samples of 25112 unique words in the testing set.\nThere are 5521 words in the test set that are missing in the training set.\n"
]
],
[
[
"#### Accessing word and tag Sequences\nThe `Dataset.X` and `Dataset.Y` attributes provide access to ordered collections of matching word and tag sequences for each sentence in the dataset.",
"_____no_output_____"
]
],
[
[
"# accessing words with Dataset.X and tags with Dataset.Y \nfor i in range(100): \n print(\"Sentence {}:\".format(i + 1), data.X[i])\n print()\n print(\"Labels {}:\".format(i + 1), data.Y[i])\n print()",
"Sentence 1: ('Mr.', 'Podger', 'had', 'thanked', 'him', 'gravely', ',', 'and', 'now', 'he', 'made', 'use', 'of', 'the', 'advice', '.')\n\nLabels 1: ('NOUN', 'NOUN', 'VERB', 'VERB', 'PRON', 'ADV', '.', 'CONJ', 'ADV', 'PRON', 'VERB', 'NOUN', 'ADP', 'DET', 'NOUN', '.')\n\nSentence 2: ('But', 'there', 'seemed', 'to', 'be', 'some', 'difference', 'of', 'opinion', 'as', 'to', 'how', 'far', 'the', 'board', 'should', 'go', ',', 'and', 'whose', 'advice', 'it', 'should', 'follow', '.')\n\nLabels 2: ('CONJ', 'PRT', 'VERB', 'PRT', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'ADP', 'ADV', 'ADV', 'DET', 'NOUN', 'VERB', 'VERB', '.', 'CONJ', 'DET', 'NOUN', 'PRON', 'VERB', 'VERB', '.')\n\nSentence 3: ('Such', 'an', 'instrument', 'is', 'expected', 'to', 'be', 'especially', 'useful', 'if', 'it', 'could', 'be', 'used', 'to', 'measure', 'the', 'elasticity', 'of', 'heavy', 'pastes', 'such', 'as', 'printing', 'inks', ',', 'paints', ',', 'adhesives', ',', 'molten', 'plastics', ',', 'and', 'bread', 'dough', ',', 'for', 'the', 'elasticity', 'is', 'related', 'to', 'those', 'various', 'properties', 'termed', '``', 'length', \"''\", ',', '``', 'shortness', \"''\", ',', '``', 'spinnability', \"''\", ',', 'etc.', ',', 'which', 'are', 'usually', 'judged', 'by', 'subjective', 'methods', 'at', 'present', '.')\n\nLabels 3: ('PRT', 'DET', 'NOUN', 'VERB', 'VERB', 'PRT', 'VERB', 'ADV', 'ADJ', 'ADP', 'PRON', 'VERB', 'VERB', 'VERB', 'PRT', 'VERB', 'DET', 'NOUN', 'ADP', 'ADJ', 'NOUN', 'ADJ', 'ADP', 'VERB', 'NOUN', '.', 'NOUN', '.', 'NOUN', '.', 'ADJ', 'NOUN', '.', 'CONJ', 'NOUN', 'NOUN', '.', 'ADP', 'DET', 'NOUN', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'NOUN', 'VERB', '.', 'NOUN', '.', '.', '.', 'NOUN', '.', '.', '.', 'NOUN', '.', '.', 'ADV', '.', 'DET', 'VERB', 'ADV', 'VERB', 'ADP', 'ADJ', 'NOUN', 'ADP', 'NOUN', '.')\n\nSentence 4: ('My', 'future', 'plans', 'are', 'to', 'become', 'a', 'language', 'teacher', '.')\n\nLabels 4: ('DET', 'ADJ', 'NOUN', 'VERB', 'PRT', 'VERB', 'DET', 'NOUN', 'NOUN', '.')\n\nSentence 5: ('We', 'ran', 'east', 'for', 'about', 'half', 'a', 'mile', 'before', 'we', 'turned', 'back', 'to', 'the', 'road', ',', 'panting', 'from', 'the', 'effort', 'and', 'soaked', 'with', 'sweat', '.')\n\nLabels 5: ('PRON', 'VERB', 'NOUN', 'ADP', 'ADV', 'PRT', 'DET', 'NOUN', 'ADP', 'PRON', 'VERB', 'ADV', 'ADP', 'DET', 'NOUN', '.', 'VERB', 'ADP', 'DET', 'NOUN', 'CONJ', 'VERB', 'ADP', 'NOUN', '.')\n\nSentence 6: ('After', 'television', ',', '``', 'La', 'Dolce', 'Vita', \"''\", 'seems', 'as', 'harmless', 'as', 'a', 'Gray', 'Line', 'tour', 'of', 'North', 'Beach', 'at', 'night', '.')\n\nLabels 6: ('ADP', 'NOUN', '.', '.', 'X', 'X', 'X', '.', 'VERB', 'ADV', 'ADJ', 'ADP', 'DET', 'ADJ', 'NOUN', 'NOUN', 'ADP', 'ADJ', 'NOUN', 'ADP', 'NOUN', '.')\n\nSentence 7: ('It', 'would', 'give', 'him', 'an', 'opportunity', 'to', 'take', 'the', 'measure', 'of', 'his', 'chief', 'adversary', 'in', 'the', 'cold', 'war', ',', 'to', 'try', 'to', 'probe', 'Mr.', \"Khrushchev's\", 'intentions', 'and', 'to', 'make', 'clear', 'his', 'own', 'views', '.')\n\nLabels 7: ('PRON', 'VERB', 'VERB', 'PRON', 'DET', 'NOUN', 'PRT', 'VERB', 'DET', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.', 'PRT', 'VERB', 'PRT', 'VERB', 'NOUN', 'NOUN', 'NOUN', 'CONJ', 'PRT', 'VERB', 'ADJ', 'DET', 'ADJ', 'NOUN', '.')\n\nSentence 8: ('She', 'had', 'to', 'move', 'in', 'some', 'direction', '--', 'any', 'direction', 'that', 'would', 'take', 'her', 'away', 'from', 'this', 'evil', 'place', '.')\n\nLabels 8: ('PRON', 'VERB', 'PRT', 'VERB', 'ADP', 'DET', 'NOUN', '.', 'DET', 'NOUN', 'PRON', 'VERB', 'VERB', 'PRON', 'ADV', 'ADP', 'DET', 'ADJ', 'NOUN', '.')\n\nSentence 9: ('English', 'philosopher', 'Samuel', \"Alexander's\", 'debt', 'to', 'Wordsworth', 'and', 'Meredith', 'is', 'a', 'recent', 'interesting', 'example', ',', 'as', 'also', 'A.', 'N.', \"Whitehead's\", 'understanding', 'of', 'the', 'English', 'romantics', ',', 'chiefly', 'Shelley', 'and', 'Wordsworth', '.')\n\nLabels 9: ('ADJ', 'NOUN', 'NOUN', 'NOUN', 'NOUN', 'ADP', 'NOUN', 'CONJ', 'NOUN', 'VERB', 'DET', 'ADJ', 'ADJ', 'NOUN', '.', 'ADP', 'ADV', 'NOUN', 'NOUN', 'NOUN', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.', 'ADV', 'NOUN', 'CONJ', 'NOUN', '.')\n\nSentence 10: ('So', 'we', 'are', 'faced', 'with', 'a', 'vast', 'network', 'of', 'amorphous', 'entities', 'perpetuating', 'themselves', 'in', 'whatever', 'manner', 'they', 'can', ',', 'without', 'regard', 'to', 'the', 'needs', 'of', 'society', ',', 'controlling', 'society', 'and', 'forcing', 'upon', 'it', 'a', 'regime', 'representing', 'only', 'the', \"corporation's\", 'needs', 'for', 'survival', '.')\n\nLabels 10: ('ADV', 'PRON', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'NOUN', 'ADP', 'ADJ', 'NOUN', 'VERB', 'PRON', 'ADP', 'DET', 'NOUN', 'PRON', 'VERB', '.', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', '.', 'VERB', 'NOUN', 'CONJ', 'VERB', 'ADP', 'PRON', 'DET', 'NOUN', 'VERB', 'ADV', 'DET', 'NOUN', 'NOUN', 'ADP', 'NOUN', '.')\n\nSentence 11: ('First', ',', 'we', 'can', 'encourage', 'responsibility', 'by', 'establishing', 'as', 'conditions', 'for', 'assistance', 'on', 'a', 'substantial', 'and', 'sustained', 'scale', 'the', 'definition', 'of', 'objectives', 'and', 'the', 'assessment', 'of', 'costs', '.')\n\nLabels 11: ('ADV', '.', 'PRON', 'VERB', 'VERB', 'NOUN', 'ADP', 'VERB', 'ADP', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'ADJ', 'CONJ', 'VERB', 'NOUN', 'DET', 'NOUN', 'ADP', 'NOUN', 'CONJ', 'DET', 'NOUN', 'ADP', 'NOUN', '.')\n\nSentence 12: ('Could', 'it', 'just', 'be', ',', 'Theresa', 'wondered', ',', 'that', 'Anne', 'had', 'understood', 'only', 'too', 'well', ',', 'and', 'that', 'George', 'all', 'along', 'was', 'extraordinary', 'only', 'in', 'the', 'degree', 'to', 'which', 'he', 'was', 'dull', '?', '?')\n\nLabels 12: ('VERB', 'PRON', 'ADV', 'VERB', '.', 'NOUN', 'VERB', '.', 'ADP', 'NOUN', 'VERB', 'VERB', 'ADV', 'ADV', 'ADV', '.', 'CONJ', 'ADP', 'NOUN', 'PRT', 'ADV', 'VERB', 'ADJ', 'ADV', 'ADP', 'DET', 'NOUN', 'ADP', 'DET', 'PRON', 'VERB', 'ADJ', '.', '.')\n\nSentence 13: ('Since', 'Af', 'and', 'P', 'divides', 'Af', 'for', 'Af', ',', 'we', 'have', 'Af', '.')\n\nLabels 13: ('ADP', 'NOUN', 'CONJ', 'NOUN', 'VERB', 'NOUN', 'ADP', 'NOUN', '.', 'PRON', 'VERB', 'NOUN', '.')\n\nSentence 14: ('1', '.')\n\nLabels 14: ('NUM', '.')\n\nSentence 15: ('In', 'upper', 'teen', 'Jewish', 'life', ',', 'the', 'non-college', 'group', 'tends', 'to', 'have', 'a', 'sense', 'of', 'marginality', '.')\n\nLabels 15: ('ADP', 'ADJ', 'NOUN', 'ADJ', 'NOUN', '.', 'DET', 'NOUN', 'NOUN', 'VERB', 'PRT', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', '.')\n\nSentence 16: ('One', 'tempest', 'was', 'stirred', 'up', 'last', 'March', 'when', 'Udall', 'announced', 'that', 'an', 'eight-and-a-half-foot', 'bronze', 'statue', 'of', 'William', 'Jennings', 'Bryan', ',', 'sculpted', 'by', 'the', 'late', 'Gutzon', 'Borglum', ',', 'would', 'be', 'sent', '``', 'on', 'indefinite', 'loan', \"''\", 'to', 'Salem', ',', 'Illinois', ',', \"Bryan's\", 'birthplace', '.')\n\nLabels 16: ('NUM', 'NOUN', 'VERB', 'VERB', 'PRT', 'ADJ', 'NOUN', 'ADV', 'NOUN', 'VERB', 'ADP', 'DET', 'ADJ', 'NOUN', 'NOUN', 'ADP', 'NOUN', 'NOUN', 'NOUN', '.', 'VERB', 'ADP', 'DET', 'ADJ', 'NOUN', 'NOUN', '.', 'VERB', 'VERB', 'VERB', '.', 'ADP', 'ADJ', 'NOUN', '.', 'ADP', 'NOUN', '.', 'NOUN', '.', 'NOUN', 'NOUN', '.')\n\nSentence 17: ('You', 'killed', 'him', ',', \"didn't\", 'you', \"''\", '?', '?')\n\nLabels 17: ('PRON', 'VERB', 'PRON', '.', 'VERB', 'PRON', '.', '.', '.')\n\nSentence 18: ('In', 'the', 'first', 'case', 'the', 'fixed', 'elements', 'within', 'each', 'pencil', 'are', 'the', 'multiple', 'secant', 'and', 'the', 'line', 'joining', 'the', 'vertex', ',', 'P', ',', 'to', 'the', 'intersection', 'of', '**zg', 'and', 'the', 'plane', 'of', 'the', 'pencil', 'which', 'does', 'not', 'lie', 'on', 'the', 'multiple', 'secant', '.')\n\nLabels 18: ('ADP', 'DET', 'ADJ', 'NOUN', 'DET', 'VERB', 'NOUN', 'ADP', 'DET', 'NOUN', 'VERB', 'DET', 'ADJ', 'NOUN', 'CONJ', 'DET', 'NOUN', 'VERB', 'DET', 'NOUN', '.', 'NOUN', '.', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'CONJ', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'DET', 'VERB', 'ADV', 'VERB', 'ADP', 'DET', 'ADJ', 'NOUN', '.')\n\nSentence 19: ('Before', 'you', 'let', 'loose', 'a', 'howl', 'saying', 'we', 'announced', 'its', 'coming', ',', 'not', 'once', 'but', 'several', 'times', ',', 'indeed', 'we', 'did', '.')\n\nLabels 19: ('ADP', 'PRON', 'VERB', 'ADJ', 'DET', 'NOUN', 'VERB', 'PRON', 'VERB', 'DET', 'NOUN', '.', 'ADV', 'ADV', 'CONJ', 'ADJ', 'NOUN', '.', 'ADV', 'PRON', 'VERB', '.')\n\nSentence 20: ('``', 'Either', 'that', 'or', 'a', 'veterinarian', \"''\", '.')\n\nLabels 20: ('.', 'CONJ', 'DET', 'CONJ', 'DET', 'NOUN', '.', '.')\n\nSentence 21: ('Mr.', 'Kennan', 'takes', 'careful', 'account', 'of', 'every', 'mitigating', 'circumstance', 'in', 'recalling', 'the', 'historical', 'atmosphere', 'in', 'which', 'mistaken', 'decisions', 'were', 'taken', '.')\n\nLabels 21: ('NOUN', 'NOUN', 'VERB', 'ADJ', 'NOUN', 'ADP', 'DET', 'VERB', 'NOUN', 'ADP', 'VERB', 'DET', 'ADJ', 'NOUN', 'ADP', 'DET', 'VERB', 'NOUN', 'VERB', 'VERB', '.')\n\nSentence 22: ('In', '1957', 'the', 'social-economic', 'approach', 'to', 'European', 'integration', 'was', 'capped', 'by', 'the', 'formation', 'among', '``', 'the', 'Six', \"''\", 'of', 'a', 'tariff-free', 'European', 'Common', 'Market', ',', 'and', 'Euratom', 'for', 'cooperation', 'in', 'the', 'development', 'of', 'atomic', 'energy', '.')\n\nLabels 22: ('ADP', 'NUM', 'DET', 'ADJ', 'NOUN', 'ADP', 'ADJ', 'NOUN', 'VERB', 'VERB', 'ADP', 'DET', 'NOUN', 'ADP', '.', 'DET', 'NUM', '.', 'ADP', 'DET', 'ADJ', 'ADJ', 'ADJ', 'NOUN', '.', 'CONJ', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'ADJ', 'NOUN', '.')\n\nSentence 23: ('This', 'leads', 'one', 'to', 'conclude', ',', 'as', 'you', 'have', ',', 'that', 'there', 'is', 'inevitably', 'more', 'prestige', 'in', 'a', 'management', 'position', 'in', 'the', 'minds', 'of', 'our', 'people', \"''\", '.')\n\nLabels 23: ('DET', 'VERB', 'NOUN', 'PRT', 'VERB', '.', 'ADP', 'PRON', 'VERB', '.', 'ADP', 'PRT', 'VERB', 'ADV', 'ADJ', 'NOUN', 'ADP', 'DET', 'NOUN', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', '.', '.')\n\nSentence 24: ('``', 'I', 'have', 'to', 'believe', 'it', \"''\", '.')\n\nLabels 24: ('.', 'PRON', 'VERB', 'PRT', 'VERB', 'PRON', '.', '.')\n\nSentence 25: ('But', 'then', ',', 'when', 'you', 'stuck', 'things', 'into', 'the', 'holes', ',', 'why', \"didn't\", 'they', 'come', 'right', 'out', 'again', '?', '?')\n\nLabels 25: ('CONJ', 'ADV', '.', 'ADV', 'PRON', 'VERB', 'NOUN', 'ADP', 'DET', 'NOUN', '.', 'ADV', 'VERB', 'PRON', 'VERB', 'ADV', 'PRT', 'ADV', '.', '.')\n\nSentence 26: ('But', 'no', 'art', 'at', 'all', 'was', 'born', 'of', 'the', 'art', 'effort', 'in', 'the', 'early', 'movies', '.')\n\nLabels 26: ('CONJ', 'DET', 'NOUN', 'ADP', 'PRT', 'VERB', 'VERB', 'ADP', 'DET', 'NOUN', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.')\n\nSentence 27: ('``', 'To', 'me', \"you'll\", 'always', 'be', 'the', 'girl', \"o'\", 'my', 'dreams', ',', \"an'\", 'the', 'sweetest', 'flower', 'that', 'grows', \"''\", '.')\n\nLabels 27: ('.', 'ADP', 'PRON', 'PRT', 'ADV', 'VERB', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', '.', 'CONJ', 'DET', 'ADJ', 'NOUN', 'DET', 'VERB', '.', '.')\n\nSentence 28: ('He', 'held', 'his', 'elbows', 'away', 'from', 'his', 'body', ',', 'and', 'the', 'little', 'sweet', 'potato', 'trilled', 'neatly', 'and', 'sweetly', 'as', 'he', 'tickled', 'its', 'tune-belly', '.')\n\nLabels 28: ('PRON', 'VERB', 'DET', 'NOUN', 'ADV', 'ADP', 'DET', 'NOUN', '.', 'CONJ', 'DET', 'ADJ', 'ADJ', 'NOUN', 'VERB', 'ADV', 'CONJ', 'ADV', 'ADP', 'PRON', 'VERB', 'DET', 'NOUN', '.')\n\nSentence 29: ('The', 'jury', 'also', 'commented', 'on', 'the', 'Fulton', \"ordinary's\", 'court', 'which', 'has', 'been', 'under', 'fire', 'for', 'its', 'practices', 'in', 'the', 'appointment', 'of', 'appraisers', ',', 'guardians', 'and', 'administrators', 'and', 'the', 'awarding', 'of', 'fees', 'and', 'compensation', '.')\n\nLabels 29: ('DET', 'NOUN', 'ADV', 'VERB', 'ADP', 'DET', 'NOUN', 'NOUN', 'NOUN', 'DET', 'VERB', 'VERB', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', '.', 'NOUN', 'CONJ', 'NOUN', 'CONJ', 'DET', 'NOUN', 'ADP', 'NOUN', 'CONJ', 'NOUN', '.')\n\nSentence 30: ('I', \"didn't\", 'want', 'to', 'stir', 'things', 'up', '.')\n\nLabels 30: ('PRON', 'VERB', 'VERB', 'PRT', 'VERB', 'NOUN', 'PRT', '.')\n\nSentence 31: ('Yet', 'there', 'was', 'some', 'precedent', 'for', 'it', '.')\n\nLabels 31: ('ADV', 'PRT', 'VERB', 'DET', 'NOUN', 'ADP', 'PRON', '.')\n\nSentence 32: ('At', 'the', 'time', 'of', 'his', 'capture', 'Helion', 'had', 'on', 'his', 'person', 'a', 'sketchbook', 'he', 'had', 'bought', 'at', \"Woolworth's\", 'in', 'New', 'York', '.')\n\nLabels 32: ('ADP', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'NOUN', 'VERB', 'ADP', 'DET', 'NOUN', 'DET', 'NOUN', 'PRON', 'VERB', 'VERB', 'ADP', 'NOUN', 'ADP', 'ADJ', 'NOUN', '.')\n\nSentence 33: ('Nowhere', 'in', 'the', 'boat', 'do', 'the', 'frames', 'come', 'in', 'contact', 'with', 'the', 'plywood', 'planking', '.')\n\nLabels 33: ('ADV', 'ADP', 'DET', 'NOUN', 'VERB', 'DET', 'NOUN', 'VERB', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', 'NOUN', '.')\n\nSentence 34: ('Undoubtedly', ',', 'however', ',', 'the', 'significance', 'of', 'the', 'volume', 'is', 'greater', 'than', 'the', 'foregoing', 'paragraphs', 'suggest', '.')\n\nLabels 34: ('ADV', '.', 'ADV', '.', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'VERB', 'ADJ', 'ADP', 'DET', 'VERB', 'NOUN', 'VERB', '.')\n\nSentence 35: ('Laguerre', 'Hanover', '(', '(', 'Tar', 'Heel-Lotus', 'Hanover', ')', ',', '2:30.3-:36.1', ';', ';')\n\nLabels 35: ('NOUN', 'NOUN', '.', '.', 'NOUN', 'NOUN', 'NOUN', '.', '.', 'NUM', '.', '.')\n\nSentence 36: ('I', 'looked', 'from', 'her', 'to', 'him', '.')\n\nLabels 36: ('PRON', 'VERB', 'ADP', 'PRON', 'ADP', 'PRON', '.')\n\nSentence 37: ('The', 'radioactivity', 'of', 'fallout', 'decays', 'rapidly', 'at', 'first', '.')\n\nLabels 37: ('DET', 'NOUN', 'ADP', 'NOUN', 'VERB', 'ADV', 'ADP', 'ADV', '.')\n\nSentence 38: ('It', 'was', 'the', '7th', 'Cavalry', 'whose', 'troopers', 'were', 'charged', 'with', 'guarding', 'the', 'Imperial', 'Palace', 'of', 'the', 'Emperor', '.')\n\nLabels 38: ('PRON', 'VERB', 'DET', 'ADJ', 'NOUN', 'DET', 'NOUN', 'VERB', 'VERB', 'ADP', 'VERB', 'DET', 'ADJ', 'NOUN', 'ADP', 'DET', 'NOUN', '.')\n\nSentence 39: ('He', 'said', ',', '``', 'We', 'had', 'a', 'good', 'time', 'tonight', ',', \"didn't\", 'we', ',', 'Earl', \"''\", '?', '?')\n\nLabels 39: ('PRON', 'VERB', '.', '.', 'PRON', 'VERB', 'DET', 'ADJ', 'NOUN', 'NOUN', '.', 'VERB', 'PRON', '.', 'NOUN', '.', '.', '.')\n\nSentence 40: ('As', 'notable', 'examples', 'of', 'this', 'abuse', ',', 'he', 'quotes', 'passages', 'from', 'the', 'Examiner', ',', '``', 'that', 'Destroyer', 'of', 'all', 'things', \"''\", ',', 'and', 'The', 'Character', 'of', 'Richard', 'Steele', ',', 'which', 'he', 'here', 'attributes', 'to', 'Swift', '.')\n\nLabels 40: ('ADP', 'ADJ', 'NOUN', 'ADP', 'DET', 'NOUN', '.', 'PRON', 'VERB', 'NOUN', 'ADP', 'DET', 'NOUN', '.', '.', 'DET', 'NOUN', 'ADP', 'PRT', 'NOUN', '.', '.', 'CONJ', 'DET', 'NOUN', 'ADP', 'NOUN', 'NOUN', '.', 'DET', 'PRON', 'ADV', 'VERB', 'ADP', 'NOUN', '.')\n\nSentence 41: ('The', 'truth', 'is', ',', 'though', ',', 'that', 'men', 'react', 'differently', 'to', 'different', 'treatment', '.')\n\nLabels 41: ('DET', 'NOUN', 'VERB', '.', 'ADV', '.', 'ADP', 'NOUN', 'VERB', 'ADV', 'ADP', 'ADJ', 'NOUN', '.')\n\nSentence 42: ('Robert', 'E.', 'Lee', 'represented', 'the', 'dignity', 'needed', 'by', 'a', 'rebelling', 'confederacy', '.')\n\nLabels 42: ('NOUN', 'NOUN', 'NOUN', 'VERB', 'DET', 'NOUN', 'VERB', 'ADP', 'DET', 'VERB', 'NOUN', '.')\n\nSentence 43: ('Heat', 'transfer', 'by', 'molecular', 'conduction', 'as', 'well', 'as', 'by', 'radiation', 'from', 'the', 'arc', 'column', '.')\n\nLabels 43: ('NOUN', 'NOUN', 'ADP', 'ADJ', 'NOUN', 'ADV', 'ADV', 'ADP', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', 'NOUN', '.')\n\nSentence 44: ('Since', 'accurate', 'base', 'maps', 'are', 'necessary', 'for', 'any', 'planning', 'program', ',', 'the', 'first', 'step', 'taken', 'by', 'the', 'planning', 'division', 'to', 'implement', 'the', 'long-range', 'state', 'plan', 'has', 'been', 'to', 'prepare', 'two', 'series', 'of', 'base', 'maps', '--', 'one', 'at', 'a', 'scale', 'of', '1', 'inch', 'to', 'a', 'mile', ',', 'and', 'the', 'second', 'a', 'series', 'of', '26', 'sheets', 'at', 'a', 'scale', 'of', '1', 'inch', 'to', '2000', 'feet', ',', 'covering', 'the', 'entire', 'state', '.')\n\nLabels 44: ('ADP', 'ADJ', 'ADJ', 'NOUN', 'VERB', 'ADJ', 'ADP', 'DET', 'VERB', 'NOUN', '.', 'DET', 'ADJ', 'NOUN', 'VERB', 'ADP', 'DET', 'VERB', 'NOUN', 'PRT', 'VERB', 'DET', 'NOUN', 'NOUN', 'NOUN', 'VERB', 'VERB', 'PRT', 'VERB', 'NUM', 'NOUN', 'ADP', 'ADJ', 'NOUN', '.', 'NUM', 'ADP', 'DET', 'NOUN', 'ADP', 'NUM', 'NOUN', 'ADP', 'DET', 'NOUN', '.', 'CONJ', 'DET', 'ADJ', 'DET', 'NOUN', 'ADP', 'NUM', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NUM', 'NOUN', 'ADP', 'NUM', 'NOUN', '.', 'VERB', 'DET', 'ADJ', 'NOUN', '.')\n\nSentence 45: ('Hong', 'Kong', ',', 'India', 'and', 'Pakistan', 'have', 'been', 'limiting', 'exports', 'of', 'certain', 'types', 'of', 'textiles', 'to', 'Britain', 'for', 'several', 'years', 'under', 'the', '``', 'Lancashire', 'Pact', \"''\", '.')\n\nLabels 45: ('NOUN', 'NOUN', '.', 'NOUN', 'CONJ', 'NOUN', 'VERB', 'VERB', 'VERB', 'NOUN', 'ADP', 'ADJ', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN', 'ADP', 'ADJ', 'NOUN', 'ADP', 'DET', '.', 'NOUN', 'NOUN', '.', '.')\n\nSentence 46: ('One', 'day', 'the', 'dogs', 'of', 'Ireland', 'will', 'do', 'that', 'too', 'and', 'perhaps', 'also', 'the', 'pigs', \"''\", '.')\n\nLabels 46: ('NUM', 'NOUN', 'DET', 'NOUN', 'ADP', 'NOUN', 'VERB', 'VERB', 'DET', 'ADV', 'CONJ', 'ADV', 'ADV', 'DET', 'NOUN', '.', '.')\n\nSentence 47: ('The', 'big', 'fans', 'were', 'going', ',', 'drawing', 'from', 'the', 'large', 'room', 'the', 'remnants', 'of', 'stale', 'smoke', 'which', 'drifted', 'about', 'in', 'pale', 'strata', 'underneath', 'the', 'ceiling', '.')\n\nLabels 47: ('DET', 'ADJ', 'NOUN', 'VERB', 'VERB', '.', 'VERB', 'ADP', 'DET', 'ADJ', 'NOUN', 'DET', 'NOUN', 'ADP', 'ADJ', 'NOUN', 'DET', 'VERB', 'ADV', 'ADP', 'ADJ', 'NOUN', 'ADP', 'DET', 'NOUN', '.')\n\nSentence 48: ('He', 'had', 'obtained', 'and', 'provisioned', 'a', 'veteran', 'ship', 'called', 'the', 'Discovery', 'and', 'had', 'recruited', 'a', 'crew', 'of', 'twenty-one', ',', 'the', 'largest', 'he', 'had', 'ever', 'commanded', '.')\n\nLabels 48: ('PRON', 'VERB', 'VERB', 'CONJ', 'VERB', 'DET', 'ADJ', 'NOUN', 'VERB', 'DET', 'NOUN', 'CONJ', 'VERB', 'VERB', 'DET', 'NOUN', 'ADP', 'NUM', '.', 'DET', 'ADJ', 'PRON', 'VERB', 'ADV', 'VERB', '.')\n\nSentence 49: ('The', 'nuclei', 'of', 'these', 'fibers', ',', 'as', 'is', 'shown', 'in', 'Figures', '3', 'and', '4', ',', 'showed', 'remarkable', 'proliferation', 'and', 'were', 'closely', 'approximated', ',', 'forming', 'a', 'chainlike', 'structure', 'at', 'either', 'the', 'center', 'or', 'the', 'periphery', 'of', 'the', 'fiber', '.')\n\nLabels 49: ('DET', 'NOUN', 'ADP', 'DET', 'NOUN', '.', 'ADP', 'VERB', 'VERB', 'ADP', 'NOUN', 'NUM', 'CONJ', 'NUM', '.', 'VERB', 'ADJ', 'NOUN', 'CONJ', 'VERB', 'ADV', 'VERB', '.', 'VERB', 'DET', 'ADJ', 'NOUN', 'ADP', 'CONJ', 'DET', 'NOUN', 'CONJ', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', '.')\n\nSentence 50: ('The', 'presence', 'of', 'normally', 'occurring', 'bronchial', 'artery-pulmonary', 'artery', 'anastomoses', 'was', 'first', 'noted', 'in', '1721', 'by', 'Ruysch', ',', 'and', 'thereafter', 'by', 'many', 'others', '.')\n\nLabels 50: ('DET', 'NOUN', 'ADP', 'ADV', 'VERB', 'ADJ', 'ADJ', 'NOUN', 'NOUN', 'VERB', 'ADV', 'VERB', 'ADP', 'NUM', 'ADP', 'NOUN', '.', 'CONJ', 'ADV', 'ADP', 'ADJ', 'NOUN', '.')\n\nSentence 51: ('Much', 'information', 'has', 'been', 'gathered', 'relative', 'to', 'quantitative', 'sampling', 'and', 'assesment', 'techniques', '.')\n\nLabels 51: ('ADJ', 'NOUN', 'VERB', 'VERB', 'VERB', 'ADJ', 'ADP', 'ADJ', 'NOUN', 'CONJ', 'NOUN', 'NOUN', '.')\n\nSentence 52: ('But', 'most', 'disturbing', 'of', 'all', 'were', 'the', 'advisers', 'he', 'called', 'to', 'sit', 'with', 'him', 'in', 'the', 'Palace', ';', ';')\n\nLabels 52: ('CONJ', 'ADV', 'ADJ', 'ADP', 'PRT', 'VERB', 'DET', 'NOUN', 'PRON', 'VERB', 'PRT', 'VERB', 'ADP', 'PRON', 'ADP', 'DET', 'NOUN', '.', '.')\n\nSentence 53: ('Precise',)\n\nLabels 53: ('ADJ',)\n\nSentence 54: ('This', 'may', 'both', 'divert', 'the', 'attention', 'of', 'the', 'uninitiate', 'and', 'cause', 'confusion', 'for', 'the', 'more', 'knowledgeable', '.')\n\nLabels 54: ('DET', 'VERB', 'DET', 'VERB', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'CONJ', 'VERB', 'NOUN', 'ADP', 'DET', 'ADV', 'ADJ', '.')\n\nSentence 55: ('6', '.')\n\nLabels 55: ('NUM', '.')\n\nSentence 56: (')', 'Now', 'Af', 'is', 'a', 'diagonalizable', 'operator', 'which', 'is', 'also', 'nilpotent', '.')\n\nLabels 56: ('.', 'ADV', 'NOUN', 'VERB', 'DET', 'ADJ', 'NOUN', 'DET', 'VERB', 'ADV', 'ADJ', '.')\n\nSentence 57: ('He', \"don't\", 'care', 'about', 'anybody', '.')\n\nLabels 57: ('PRON', 'VERB', 'VERB', 'ADP', 'NOUN', '.')\n\nSentence 58: ('``', 'We', 'have', 'witnessed', 'in', 'this', 'campaign', 'the', 'effort', 'to', 'project', 'Mr.', 'Mitchell', 'as', 'the', 'image', 'of', 'a', 'unity', 'candidate', 'from', 'Washington', '.')\n\nLabels 58: ('.', 'PRON', 'VERB', 'VERB', 'ADP', 'DET', 'NOUN', 'DET', 'NOUN', 'PRT', 'VERB', 'NOUN', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'NOUN', 'ADP', 'NOUN', '.')\n\nSentence 59: ('This', 'was', 'the', 'crassest', 'kind', 'of', 'materialism', 'and', 'they', ',', 'the', 'Artists', ',', 'would', 'have', 'no', 'truck', 'with', 'it', '.')\n\nLabels 59: ('DET', 'VERB', 'DET', 'ADJ', 'NOUN', 'ADP', 'NOUN', 'CONJ', 'PRON', '.', 'DET', 'NOUN', '.', 'VERB', 'VERB', 'DET', 'NOUN', 'ADP', 'PRON', '.')\n\nSentence 60: (\"It's\", 'easy', 'to', 'see', 'why', '.')\n\nLabels 60: ('PRT', 'ADJ', 'PRT', 'VERB', 'ADV', '.')\n\nSentence 61: ('If', 'you', 'wish', 'to', 'budget', 'closely', 'on', 'transportation', ',', 'saving', 'your', 'extra', 'dollars', 'to', 'indulge', 'in', 'luxuries', ',', 'one', 'agency', 'lists', 'the', 'small', 'Fiat', '500', 'at', 'only', '$1.26', 'a', 'day', 'plus', '$.03', 'a', 'kilometer', 'and', 'the', 'Fiat', '2100', 'Station', 'Wagon', ',', 'seating', 'six', ',', 'at', 'just', '$1.10', 'a', 'day', 'and', '$.105', 'a', 'kilometer', '.')\n\nLabels 61: ('ADP', 'PRON', 'VERB', 'PRT', 'VERB', 'ADV', 'ADP', 'NOUN', '.', 'VERB', 'DET', 'ADJ', 'NOUN', 'PRT', 'VERB', 'ADP', 'NOUN', '.', 'NUM', 'NOUN', 'VERB', 'DET', 'ADJ', 'NOUN', 'NUM', 'ADP', 'ADV', 'NOUN', 'DET', 'NOUN', 'CONJ', 'NOUN', 'DET', 'NOUN', 'CONJ', 'DET', 'NOUN', 'NUM', 'NOUN', 'NOUN', '.', 'VERB', 'NUM', '.', 'ADP', 'ADV', 'NOUN', 'DET', 'NOUN', 'CONJ', 'NOUN', 'DET', 'NOUN', '.')\n\nSentence 62: ('``', 'The', \"kid's\", 'froze', 'good', '.')\n\nLabels 62: ('.', 'DET', 'PRT', 'VERB', 'ADV', '.')\n\nSentence 63: ('But', 'how', 'can', 'one', 'figure', 'symbolize', 'both', '?', '?')\n\nLabels 63: ('CONJ', 'ADV', 'VERB', 'NUM', 'VERB', 'VERB', 'DET', '.', '.')\n\nSentence 64: ('Thus', 'when', 'Premier', 'Khrushchev', 'intimated', 'even', 'before', 'inauguration', 'that', 'he', 'hoped', 'for', 'an', 'early', 'meeting', 'with', 'the', 'new', 'President', ',', 'Mr.', 'Kennedy', 'was', 'confronted', 'with', 'a', 'delicate', 'problem', '.')\n\nLabels 64: ('ADV', 'ADV', 'NOUN', 'NOUN', 'VERB', 'ADV', 'ADP', 'NOUN', 'ADP', 'PRON', 'VERB', 'ADP', 'DET', 'ADJ', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.', 'NOUN', 'NOUN', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'NOUN', '.')\n\nSentence 65: ('These', 'are', 'as', 'follows', ':', '(', '1', ')', 'field', 'work', 'procedures', '.')\n\nLabels 65: ('DET', 'VERB', 'ADV', 'VERB', '.', '.', 'NUM', '.', 'NOUN', 'NOUN', 'NOUN', '.')\n\nSentence 66: ('Felix', 'Kopstein', 'states', 'that', '``', 'when', 'the', 'snake', 'reaches', 'its', 'maturity', 'it', 'has', 'already', 'reached', 'about', 'its', 'maximal', 'length', \"''\", ',', 'but', 'goes', 'on', 'to', 'cite', 'the', 'reticulate', 'python', 'as', 'an', 'exception', ',', 'with', 'maximum', 'length', 'approximately', 'three', 'times', 'that', 'at', 'maturity', '.')\n\nLabels 66: ('NOUN', 'NOUN', 'VERB', 'ADP', '.', 'ADV', 'DET', 'NOUN', 'VERB', 'DET', 'NOUN', 'PRON', 'VERB', 'ADV', 'VERB', 'ADV', 'DET', 'ADJ', 'NOUN', '.', '.', 'CONJ', 'VERB', 'PRT', 'PRT', 'VERB', 'DET', 'ADJ', 'NOUN', 'ADP', 'DET', 'NOUN', '.', 'ADP', 'ADJ', 'NOUN', 'ADV', 'NUM', 'NOUN', 'DET', 'ADP', 'NOUN', '.')\n\nSentence 67: ('He', 'went', 'around', 'the', 'corner', 'and', 'parked', ',', 'turning', 'off', 'his', 'lights', 'and', 'motor', '.')\n\nLabels 67: ('PRON', 'VERB', 'ADP', 'DET', 'NOUN', 'CONJ', 'VERB', '.', 'VERB', 'PRT', 'DET', 'NOUN', 'CONJ', 'NOUN', '.')\n\nSentence 68: ('If', 'she', 'could', 'not', 'take', 'the', 'children', 'out', 'of', 'this', 'section', ',', 'at', 'least', 'she', 'could', 'take', 'other', 'children', 'out', 'of', 'their', 'countries', 'and', 'put', 'them', 'on', 'the', 'farms', '.')\n\nLabels 68: ('ADP', 'PRON', 'VERB', 'ADV', 'VERB', 'DET', 'NOUN', 'ADP', 'ADP', 'DET', 'NOUN', '.', 'ADP', 'ADJ', 'PRON', 'VERB', 'VERB', 'ADJ', 'NOUN', 'ADP', 'ADP', 'DET', 'NOUN', 'CONJ', 'VERB', 'PRON', 'ADP', 'DET', 'NOUN', '.')\n\nSentence 69: ('Under', 'its', 'plan', 'Du', 'Pont', 'would', 'retain', 'its', 'General', 'Motors', 'shares', 'but', 'be', 'required', 'to', 'pass', 'on', 'to', 'its', 'stockholders', 'the', 'right', 'to', 'vote', 'those', 'shares', '.')\n\nLabels 69: ('ADP', 'DET', 'NOUN', 'NOUN', 'NOUN', 'VERB', 'VERB', 'DET', 'NOUN', 'NOUN', 'NOUN', 'CONJ', 'VERB', 'VERB', 'PRT', 'VERB', 'PRT', 'ADP', 'DET', 'NOUN', 'DET', 'NOUN', 'PRT', 'VERB', 'DET', 'NOUN', '.')\n\nSentence 70: ('his', 'eyes', 'were', 'black', 'and', 'deep-set', ',', 'and', 'expressionless', '.')\n\nLabels 70: ('DET', 'NOUN', 'VERB', 'ADJ', 'CONJ', 'ADJ', '.', 'CONJ', 'ADJ', '.')\n\nSentence 71: ('It', 'is', 'one', 'of', 'the', 'very', 'few', ',', 'if', 'not', 'the', 'only', 'surviving', 'bridge', 'of', 'its', 'type', 'to', 'serve', 'a', 'main', 'artery', 'of', 'the', 'U.S.', 'highway', 'system', ',', 'thus', 'it', 'is', 'far', 'more', 'than', 'a', 'relic', 'of', 'the', 'horse', 'and', 'buggy', 'days', '.')\n\nLabels 71: ('PRON', 'VERB', 'NUM', 'ADP', 'DET', 'ADV', 'ADJ', '.', 'ADP', 'ADV', 'DET', 'ADJ', 'VERB', 'NOUN', 'ADP', 'DET', 'NOUN', 'PRT', 'VERB', 'DET', 'ADJ', 'NOUN', 'ADP', 'DET', 'NOUN', 'NOUN', 'NOUN', '.', 'ADV', 'PRON', 'VERB', 'ADV', 'ADJ', 'ADP', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'CONJ', 'NOUN', 'NOUN', '.')\n\nSentence 72: ('The', 'promise', 'that', 'the', 'lion', 'and', 'the', 'lamb', 'will', 'lie', 'down', 'together', 'was', 'given', 'in', 'the', 'future', 'tense', '.')\n\nLabels 72: ('DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'CONJ', 'DET', 'NOUN', 'VERB', 'VERB', 'PRT', 'ADV', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'NOUN', '.')\n\nSentence 73: ('``', 'But', \"that's\", 'what', 'he', 'told', 'me', '.')\n\nLabels 73: ('.', 'CONJ', 'PRT', 'DET', 'PRON', 'VERB', 'PRON', '.')\n\nSentence 74: ('The', 'common', 'spices', ',', 'flavorings', ',', 'and', 'condiments', 'make', 'up', 'this', 'group', '.')\n\nLabels 74: ('DET', 'ADJ', 'NOUN', '.', 'NOUN', '.', 'CONJ', 'NOUN', 'VERB', 'PRT', 'DET', 'NOUN', '.')\n\nSentence 75: ('The', 'values', 'and', 'talents', 'which', 'made', 'the', 'tile', 'and', 'the', 'dome', ',', 'the', 'rug', ',', 'the', 'poem', 'and', 'the', 'miniature', ',', 'continue', 'in', 'certain', 'social', 'institutions', 'which', 'rise', 'above', 'the', 'ordinary', 'life', 'of', 'this', 'city', ',', 'as', 'the', 'great', 'buildings', 'rise', 'above', 'blank', 'walls', 'and', 'dirty', 'lanes', '.')\n\nLabels 75: ('DET', 'NOUN', 'CONJ', 'NOUN', 'DET', 'VERB', 'DET', 'NOUN', 'CONJ', 'DET', 'NOUN', '.', 'DET', 'NOUN', '.', 'DET', 'NOUN', 'CONJ', 'DET', 'NOUN', '.', 'VERB', 'ADP', 'ADJ', 'ADJ', 'NOUN', 'DET', 'VERB', 'ADP', 'DET', 'ADJ', 'NOUN', 'ADP', 'DET', 'NOUN', '.', 'ADP', 'DET', 'ADJ', 'NOUN', 'VERB', 'ADP', 'ADJ', 'NOUN', 'CONJ', 'ADJ', 'NOUN', '.')\n\nSentence 76: ('But', 'do', 'the', 'plays', 'deal', 'with', 'the', 'same', 'facets', 'of', 'experience', 'religion', 'must', 'also', 'deal', 'with', '?', '?')\n\nLabels 76: ('CONJ', 'VERB', 'DET', 'NOUN', 'VERB', 'ADP', 'DET', 'ADJ', 'NOUN', 'ADP', 'NOUN', 'NOUN', 'VERB', 'ADV', 'VERB', 'ADP', '.', '.')\n\nSentence 77: ('Called', 'Perennian', ',', 'to', 'indicate', 'its', 'lasting', ',', 'good', 'today', 'and', 'tomorrow', 'quality', ',', 'the', 'collection', 'truly', 'avoids', 'the', 'monotony', 'of', 'identical', 'pieces', '.')\n\nLabels 77: ('VERB', 'ADJ', '.', 'PRT', 'VERB', 'DET', 'VERB', '.', 'ADJ', 'NOUN', 'CONJ', 'NOUN', 'NOUN', '.', 'DET', 'NOUN', 'ADV', 'VERB', 'DET', 'NOUN', 'ADP', 'ADJ', 'NOUN', '.')\n\nSentence 78: ('It', 'had', 'a', \"gourmet's\", 'corner', '(', 'instead', 'of', 'a', 'kitchen', ')', ',', 'a', 'breakfast', 'room', ',', 'a', 'luncheon', 'room', ',', 'a', 'dining', 'room', ',', 'a', 'sitting', 'room', ',', 'a', 'room', 'for', 'standing', 'up', ',', 'a', 'party', 'room', ',', 'dressing', 'rooms', 'for', 'everybody', ',', 'even', 'a', 'room', 'for', 'mud', '.')\n\nLabels 78: ('PRON', 'VERB', 'DET', 'NOUN', 'NOUN', '.', 'ADV', 'ADP', 'DET', 'NOUN', '.', '.', 'DET', 'NOUN', 'NOUN', '.', 'DET', 'NOUN', 'NOUN', '.', 'DET', 'VERB', 'NOUN', '.', 'DET', 'VERB', 'NOUN', '.', 'DET', 'NOUN', 'ADP', 'VERB', 'PRT', '.', 'DET', 'NOUN', 'NOUN', '.', 'VERB', 'NOUN', 'ADP', 'NOUN', '.', 'ADV', 'DET', 'NOUN', 'ADP', 'NOUN', '.')\n\nSentence 79: ('``', 'Few', 'crank', 'calls', \"''\", ',', 'McFeeley', 'said', '.')\n\nLabels 79: ('.', 'ADJ', 'NOUN', 'NOUN', '.', '.', 'NOUN', 'VERB', '.')\n\nSentence 80: ('If', 'he', 'does', ',', \"it's\", 'still', 'better', 'than', 'an', 'even', 'chance', 'he', \"won't\", 'notice', 'the', 'transposition', 'of', 'the', 'numbers', ',', 'and', 'if', 'he', 'should', 'notice', 'it', ',', 'the', 'thing', 'can', 'be', 'passed', 'off', 'as', 'an', 'honest', 'mistake', '.')\n\nLabels 80: ('ADP', 'PRON', 'VERB', '.', 'PRT', 'ADV', 'ADJ', 'ADP', 'DET', 'ADJ', 'NOUN', 'PRON', 'VERB', 'VERB', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', '.', 'CONJ', 'ADP', 'PRON', 'VERB', 'VERB', 'PRON', '.', 'DET', 'NOUN', 'VERB', 'VERB', 'VERB', 'PRT', 'ADP', 'DET', 'ADJ', 'NOUN', '.')\n\nSentence 81: ('The', 'most', 'infamous', 'of', 'all', 'was', 'launched', 'by', 'the', 'explosion', 'of', 'the', 'island', 'of', 'Krakatoa', 'in', '1883', ';', ';')\n\nLabels 81: ('DET', 'ADV', 'ADJ', 'ADP', 'PRT', 'VERB', 'VERB', 'ADP', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NUM', '.', '.')\n\nSentence 82: ('What', 'about', 'transfers', '?', '?')\n\nLabels 82: ('DET', 'ADP', 'NOUN', '.', '.')\n\nSentence 83: ('This', 'angle', 'of', 'just', 'where', 'the', 'Orioles', 'can', 'look', 'for', 'improvement', 'this', 'year', 'is', 'an', 'interesting', 'one', '.')\n\nLabels 83: ('DET', 'NOUN', 'ADP', 'ADV', 'ADV', 'DET', 'NOUN', 'VERB', 'VERB', 'ADP', 'NOUN', 'DET', 'NOUN', 'VERB', 'DET', 'ADJ', 'NUM', '.')\n\nSentence 84: (')',)\n\nLabels 84: ('.',)\n\nSentence 85: ('Under', 'the', 'influence', 'of', 'marijuana', 'the', 'beatnik', 'comes', 'alive', 'within', 'and', 'experiences', 'a', 'wonderfully', 'enhanced', 'sense', 'of', 'self', 'as', 'if', 'he', 'had', 'discovered', 'the', 'open', 'sesame', 'to', 'the', 'universe', 'of', 'being', '.')\n\nLabels 85: ('ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'DET', 'NOUN', 'VERB', 'ADJ', 'ADP', 'CONJ', 'VERB', 'DET', 'ADV', 'VERB', 'NOUN', 'ADP', 'NOUN', 'ADP', 'ADP', 'PRON', 'VERB', 'VERB', 'DET', 'ADJ', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'VERB', '.')\n\nSentence 86: ('She', 'asked', 'him', 'and', ',', 'laughing', ',', 'she', 'added', ',', '``', 'I', 'was', 'nervous', 'about', 'buying', 'a', 'book', 'with', 'a', 'title', 'like', 'that', ',', 'but', 'I', 'knew', \"you'd\", 'like', 'it', \"''\", '.')\n\nLabels 86: ('PRON', 'VERB', 'PRON', 'CONJ', '.', 'VERB', '.', 'PRON', 'VERB', '.', '.', 'PRON', 'VERB', 'ADJ', 'ADP', 'VERB', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'DET', '.', 'CONJ', 'PRON', 'VERB', 'PRT', 'VERB', 'PRON', '.', '.')\n\nSentence 87: ('At', 'the', 'same', 'moment', 'Wheeler', 'Fiske', 'fired', 'the', 'rifle', 'Mike', 'had', 'given', 'him', 'and', 'another', 'guerrilla', 'was', 'hit', '.')\n\nLabels 87: ('ADP', 'DET', 'ADJ', 'NOUN', 'NOUN', 'NOUN', 'VERB', 'DET', 'NOUN', 'NOUN', 'VERB', 'VERB', 'PRON', 'CONJ', 'DET', 'NOUN', 'VERB', 'VERB', '.')\n\nSentence 88: ('``', 'And', 'what', 'makes', 'you', 'think', \"you're\", 'going', 'to', 'get', 'it', ',', 'pretty', 'boy', \"''\", '?', '?')\n\nLabels 88: ('.', 'CONJ', 'DET', 'VERB', 'PRON', 'VERB', 'PRT', 'VERB', 'PRT', 'VERB', 'PRON', '.', 'ADJ', 'NOUN', '.', '.', '.')\n\nSentence 89: ('``', 'Certainly', ',', 'sir', \"''\", '.')\n\nLabels 89: ('.', 'ADV', '.', 'NOUN', '.', '.')\n\nSentence 90: ('Later', ',', 'riding', 'in', 'for', 'some', 'lusty', 'enjoyment', 'of', 'the', 'liquor', 'and', 'professional', 'ladies', 'of', 'Cheyenne', ',', 'he', 'laid', 'claim', 'to', 'the', 'killing', 'with', 'the', 'vague', 'insinuations', 'he', 'made', '.')\n\nLabels 90: ('ADV', '.', 'VERB', 'PRT', 'ADP', 'DET', 'ADJ', 'NOUN', 'ADP', 'DET', 'NOUN', 'CONJ', 'ADJ', 'NOUN', 'ADP', 'NOUN', '.', 'PRON', 'VERB', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', 'PRON', 'VERB', '.')\n\nSentence 91: ('It', 'was', 'said', 'that', 'the', 'Hetman', 'plotted', 'to', 'take', 'over', 'the', 'entire', 'Hearst', 'newspaper', 'empire', 'one', 'day', 'by', 'means', 'of', 'various', 'coups', ':', 'the', 'destruction', 'of', 'editors', 'who', 'tried', 'to', 'halt', 'his', 'course', ',', 'the', 'unfrocking', 'of', 'publishers', 'whose', 'mistakes', 'of', 'judgment', 'might', 'be', 'magnified', 'in', 'secret', 'reports', 'to', 'Mr.', 'Hearst', '.')\n\nLabels 91: ('PRON', 'VERB', 'VERB', 'ADP', 'DET', 'NOUN', 'VERB', 'PRT', 'VERB', 'PRT', 'DET', 'ADJ', 'NOUN', 'NOUN', 'NOUN', 'NUM', 'NOUN', 'ADP', 'NOUN', 'ADP', 'ADJ', 'NOUN', '.', 'DET', 'NOUN', 'ADP', 'NOUN', 'PRON', 'VERB', 'PRT', 'VERB', 'DET', 'NOUN', '.', 'DET', 'NOUN', 'ADP', 'NOUN', 'DET', 'NOUN', 'ADP', 'NOUN', 'VERB', 'VERB', 'VERB', 'ADP', 'ADJ', 'NOUN', 'ADP', 'NOUN', 'NOUN', '.')\n\nSentence 92: ('Mama', 'always', 'felt', 'that', 'the', 'collection', 'symbolized', 'Mrs.', \"Coolidge's\", 'wish', 'for', 'a', 'little', 'girl', '.')\n\nLabels 92: ('NOUN', 'ADV', 'VERB', 'ADP', 'DET', 'NOUN', 'VERB', 'NOUN', 'NOUN', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.')\n\nSentence 93: ('We', 'would', 'have', 'preferred', ',', 'however', ',', 'to', 'have', 'had', 'the', 'rest', 'of', 'the', 'orchestra', 'refrain', 'from', 'laughing', 'at', 'this', 'and', 'other', 'spots', 'on', 'the', 'recording', ',', 'since', 'it', 'mars', 'an', 'otherwise', 'sober', ',', 'if', 'not', 'lofty', ',', 'performance', '.')\n\nLabels 93: ('PRON', 'VERB', 'VERB', 'VERB', '.', 'ADV', '.', 'PRT', 'VERB', 'VERB', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'VERB', 'ADP', 'VERB', 'ADP', 'DET', 'CONJ', 'ADJ', 'NOUN', 'ADP', 'DET', 'NOUN', '.', 'ADP', 'PRON', 'VERB', 'DET', 'ADV', 'ADJ', '.', 'ADP', 'ADV', 'ADJ', '.', 'NOUN', '.')\n\nSentence 94: ('Their', 'burgeoning', 'popularity', 'may', 'be', 'a', 'result', 'of', 'the', 'closing', 'of', 'the', '52nd', 'Street', 'burlesque', 'joints', ',', 'but', 'curiously', 'enough', 'their', 'atmosphere', 'is', 'almost', 'always', 'familial', '--', 'neighborhood', 'saloons', 'with', 'a', 'bit', 'of', 'epidermis', '.')\n\nLabels 94: ('DET', 'VERB', 'NOUN', 'VERB', 'VERB', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', 'NOUN', 'NOUN', '.', 'CONJ', 'ADV', 'ADV', 'DET', 'NOUN', 'VERB', 'ADV', 'ADV', 'ADJ', '.', 'NOUN', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', '.')\n\nSentence 95: ('He', 'swung', 'around', ',', 'eyes', 'toward', 'the', 'bedroom', ',', 'some', 'fifteen', 'feet', 'away', '.')\n\nLabels 95: ('PRON', 'VERB', 'ADV', '.', 'NOUN', 'ADP', 'DET', 'NOUN', '.', 'DET', 'NUM', 'NOUN', 'ADV', '.')\n\nSentence 96: ('It', 'is', 'possible', 'that', 'certain', 'mutational', 'forms', 'may', 'be', 'produced', 'such', 'as', 'antibiotic', 'resistant', 'strains', '.')\n\nLabels 96: ('PRON', 'VERB', 'ADJ', 'ADP', 'ADJ', 'ADJ', 'NOUN', 'VERB', 'VERB', 'VERB', 'ADJ', 'ADP', 'NOUN', 'ADJ', 'NOUN', '.')\n\nSentence 97: ('Few', 'passed', \"''\", '.')\n\nLabels 97: ('ADJ', 'VERB', '.', '.')\n\nSentence 98: ('An', 'examination', 'of', 'some', 'forty', 'catalogs', 'of', 'schools', 'offering', 'courses', 'in', 'interior', 'design', ',', 'for', 'the', 'most', 'part', 'schools', 'accredited', 'by', 'membership', 'in', 'the', 'National', 'Association', 'of', 'Schools', 'of', 'Art', ',', 'and', 'a', 'further', '``', 'on', 'the', 'spot', \"''\", 'inspection', 'of', 'a', 'number', 'of', 'schools', ',', 'show', 'their', 'courses', 'adhere', 'pretty', 'closely', 'to', 'the', 'recommendations', '.')\n\nLabels 98: ('DET', 'NOUN', 'ADP', 'DET', 'NUM', 'NOUN', 'ADP', 'NOUN', 'VERB', 'NOUN', 'ADP', 'ADJ', 'NOUN', '.', 'ADP', 'DET', 'ADJ', 'NOUN', 'NOUN', 'VERB', 'ADP', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN', '.', 'CONJ', 'DET', 'ADJ', '.', 'ADP', 'DET', 'NOUN', '.', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', '.', 'VERB', 'DET', 'NOUN', 'VERB', 'ADV', 'ADV', 'ADP', 'DET', 'NOUN', '.')\n\nSentence 99: ('It', 'changes', 'the', 'answers', 'to', '``', 'Who', 'should', 'do', 'what', ',', 'and', 'where', \"''\", '?', '?')\n\nLabels 99: ('PRON', 'VERB', 'DET', 'NOUN', 'ADP', '.', 'PRON', 'VERB', 'VERB', 'DET', '.', 'CONJ', 'ADV', '.', '.', '.')\n\nSentence 100: ('the', 'clay', 'he', 'used', 'plastically', 'to', 'suggest', 'soft', 'moving', 'flesh', ',', 'as', 'in', 'an', 'abdomen', ',', 'in', 'a', 'reclining', 'torso', ';', ';')\n\nLabels 100: ('DET', 'NOUN', 'PRON', 'VERB', 'ADV', 'PRT', 'VERB', 'ADJ', 'VERB', 'NOUN', '.', 'ADP', 'ADP', 'DET', 'NOUN', '.', 'ADP', 'DET', 'VERB', 'NOUN', '.', '.')\n\n"
]
],
[
[
"#### Accessing (word, tag) Samples\nThe `Dataset.stream()` method returns an iterator that chains together every pair of (word, tag) entries across all sentences in the entire corpus.",
"_____no_output_____"
]
],
[
[
"# use Dataset.stream() (word, tag) samples for the entire corpus\nprint(\"\\nStream (word, tag) pairs:\\n\")\nfor i, pair in enumerate(data.stream()):\n print(\"\\t\", pair)\n if i > 5: break",
"\nStream (word, tag) pairs:\n\n\t ('Mr.', 'NOUN')\n\t ('Podger', 'NOUN')\n\t ('had', 'VERB')\n\t ('thanked', 'VERB')\n\t ('him', 'PRON')\n\t ('gravely', 'ADV')\n\t (',', '.')\n"
]
],
[
[
"\nFor both our baseline tagger and the HMM model we'll build, we need to estimate the frequency of tags & words from the frequency counts of observations in the training corpus. In the next several cells you will complete functions to compute the counts of several sets of counts. ",
"_____no_output_____"
],
[
"## Step 2: Build a Most Frequent Class tagger\n---\n\nPerhaps the simplest tagger (and a good baseline for tagger performance) is to simply choose the tag most frequently assigned to each word. This \"most frequent class\" tagger inspects each observed word in the sequence and assigns it the label that was most often assigned to that word in the corpus.",
"_____no_output_____"
],
[
"### IMPLEMENTATION: Pair Counts\n\nComplete the function below that computes the joint frequency counts for two input sequences.",
"_____no_output_____"
]
],
[
[
"def pair_counts(sequences_A, sequences_B):\n \"\"\"Return a dictionary keyed to each unique value in the first sequence list\n that counts the number of occurrences of the corresponding value from the\n second sequences list.\n \n For example, if sequences_A is tags and sequences_B is the corresponding\n words, then if 1244 sequences contain the word \"time\" tagged as a NOUN, then\n you should return a dictionary such that pair_counts[NOUN][time] == 1244\n \"\"\"\n # TODO: Finish this function!\n #raise NotImplementedError\n \n pair_count = {}\n \n for i in range(len(sequences_A)):\n for word, tag in zip(sequences_A[i], sequences_B[i]):\n if tag not in pair_count:\n pair_count[tag] = {}\n pair_count[tag][word] = 1\n else:\n pair_count[tag][word] = pair_count[tag].get(word, 0) + 1\n return pair_count\n\n# Calculate C(t_i, w_i)\nemission_counts = pair_counts(data.X, data.Y)\n\nassert len(emission_counts) == 12, \\\n \"Uh oh. There should be 12 tags in your dictionary.\"\nassert max(emission_counts[\"NOUN\"], key=emission_counts[\"NOUN\"].get) == 'time', \\\n \"Hmmm...'time' is expected to be the most common NOUN.\"\nHTML('<div class=\"alert alert-block alert-success\">Your emission counts look good!</div>')",
"_____no_output_____"
]
],
[
[
"### IMPLEMENTATION: Most Frequent Class Tagger\n\nUse the `pair_counts()` function and the training dataset to find the most frequent class label for each word in the training data, and populate the `mfc_table` below. The table keys should be words, and the values should be the appropriate tag string.\n\nThe `MFCTagger` class is provided to mock the interface of Pomegranite HMM models so that they can be used interchangeably.",
"_____no_output_____"
]
],
[
[
"# Create a lookup table mfc_table where mfc_table[word] contains the tag label most frequently assigned to that word\nfrom collections import namedtuple\n\nFakeState = namedtuple(\"FakeState\", \"name\")\n\nclass MFCTagger:\n # NOTE: You should not need to modify this class or any of its methods\n missing = FakeState(name=\"<MISSING>\")\n \n def __init__(self, table):\n self.table = defaultdict(lambda: MFCTagger.missing)\n self.table.update({word: FakeState(name=tag) for word, tag in table.items()})\n \n def viterbi(self, seq):\n \"\"\"This method simplifies predictions by matching the Pomegranate viterbi() interface\"\"\"\n return 0., list(enumerate([\"<start>\"] + [self.table[w] for w in seq] + [\"<end>\"]))\n\n\n# TODO: calculate the frequency of each tag being assigned to each word (hint: similar, but not\n# the same as the emission probabilities) and use it to fill the mfc_table\n\nword_counts = pair_counts(data.training_set.Y, data.training_set.X)\n\nmfc_table = dict((word, max(tags.keys(), key=lambda key: tags[key])) for word, tags in word_counts.items())\n\n# DO NOT MODIFY BELOW THIS LINE\nmfc_model = MFCTagger(mfc_table) # Create a Most Frequent Class tagger instance\n\nassert len(mfc_table) == len(data.training_set.vocab), \"\"\nassert all(k in data.training_set.vocab for k in mfc_table.keys()), \"\"\nassert sum(int(k not in mfc_table) for k in data.testing_set.vocab) == 5521, \"\"\nHTML('<div class=\"alert alert-block alert-success\">Your MFC tagger has all the correct words!</div>')",
"_____no_output_____"
]
],
[
[
"### Making Predictions with a Model\nThe helper functions provided below interface with Pomegranate network models & the mocked MFCTagger to take advantage of the [missing value](http://pomegranate.readthedocs.io/en/latest/nan.html) functionality in Pomegranate through a simple sequence decoding function. Run these functions, then run the next cell to see some of the predictions made by the MFC tagger.",
"_____no_output_____"
]
],
[
[
"def replace_unknown(sequence):\n \"\"\"Return a copy of the input sequence where each unknown word is replaced\n by the literal string value 'nan'. Pomegranate will ignore these values\n during computation.\n \"\"\"\n return [w if w in data.training_set.vocab else 'nan' for w in sequence]\n\ndef simplify_decoding(X, model):\n \"\"\"X should be a 1-D sequence of observations for the model to predict\"\"\"\n _, state_path = model.viterbi(replace_unknown(X))\n return [state[1].name for state in state_path[1:-1]] # do not show the start/end state predictions",
"_____no_output_____"
]
],
[
[
"### Example Decoding Sequences with MFC Tagger",
"_____no_output_____"
]
],
[
[
"for key in data.testing_set.keys[:3]:\n print(\"Sentence Key: {}\\n\".format(key))\n print(\"Predicted labels:\\n-----------------\")\n print(simplify_decoding(data.sentences[key].words, mfc_model))\n print()\n print(\"Actual labels:\\n--------------\")\n print(data.sentences[key].tags)\n print(\"\\n\")",
"Sentence Key: b100-28144\n\nPredicted labels:\n-----------------\n['CONJ', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'CONJ', 'NOUN', 'NUM', '.', '.', 'NOUN', '.', '.']\n\nActual labels:\n--------------\n('CONJ', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'CONJ', 'NOUN', 'NUM', '.', '.', 'NOUN', '.', '.')\n\n\nSentence Key: b100-23146\n\nPredicted labels:\n-----------------\n['PRON', 'VERB', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', 'NOUN', 'VERB', 'VERB', '.', 'ADP', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', '.']\n\nActual labels:\n--------------\n('PRON', 'VERB', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', 'NOUN', 'VERB', 'VERB', '.', 'ADP', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', '.')\n\n\nSentence Key: b100-35462\n\nPredicted labels:\n-----------------\n['DET', 'ADJ', 'NOUN', 'VERB', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'ADJ', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.', 'ADP', 'ADJ', 'NOUN', '.', 'CONJ', 'ADP', 'DET', '<MISSING>', 'ADP', 'ADJ', 'ADJ', '.', 'ADJ', '.', 'CONJ', 'ADJ', 'NOUN', 'ADP', 'ADV', 'NOUN', '.']\n\nActual labels:\n--------------\n('DET', 'ADJ', 'NOUN', 'VERB', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'ADJ', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.', 'ADP', 'ADJ', 'NOUN', '.', 'CONJ', 'ADP', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', '.', 'ADJ', '.', 'CONJ', 'ADJ', 'NOUN', 'ADP', 'ADJ', 'NOUN', '.')\n\n\n"
]
],
[
[
"### Evaluating Model Accuracy\n\nThe function below will evaluate the accuracy of the MFC tagger on the collection of all sentences from a text corpus. ",
"_____no_output_____"
]
],
[
[
"def accuracy(X, Y, model):\n \"\"\"Calculate the prediction accuracy by using the model to decode each sequence\n in the input X and comparing the prediction with the true labels in Y.\n \n The X should be an array whose first dimension is the number of sentences to test,\n and each element of the array should be an iterable of the words in the sequence.\n The arrays X and Y should have the exact same shape.\n \n X = [(\"See\", \"Spot\", \"run\"), (\"Run\", \"Spot\", \"run\", \"fast\"), ...]\n Y = [(), (), ...]\n \"\"\"\n correct = total_predictions = 0\n for observations, actual_tags in zip(X, Y):\n \n # The model.viterbi call in simplify_decoding will return None if the HMM\n # raises an error (for example, if a test sentence contains a word that\n # is out of vocabulary for the training set). Any exception counts the\n # full sentence as an error (which makes this a conservative estimate).\n try:\n most_likely_tags = simplify_decoding(observations, model)\n correct += sum(p == t for p, t in zip(most_likely_tags, actual_tags))\n except:\n pass\n total_predictions += len(observations)\n return correct / total_predictions",
"_____no_output_____"
]
],
[
[
"#### Evaluate the accuracy of the MFC tagger\nRun the next cell to evaluate the accuracy of the tagger on the training and test corpus.",
"_____no_output_____"
]
],
[
[
"mfc_training_acc = accuracy(data.training_set.X, data.training_set.Y, mfc_model)\nprint(\"training accuracy mfc_model: {:.2f}%\".format(100 * mfc_training_acc))\n\nmfc_testing_acc = accuracy(data.testing_set.X, data.testing_set.Y, mfc_model)\nprint(\"testing accuracy mfc_model: {:.2f}%\".format(100 * mfc_testing_acc))\n\nassert mfc_training_acc >= 0.955, \"Uh oh. Your MFC accuracy on the training set doesn't look right.\"\nassert mfc_testing_acc >= 0.925, \"Uh oh. Your MFC accuracy on the testing set doesn't look right.\"\nHTML('<div class=\"alert alert-block alert-success\">Your MFC tagger accuracy looks correct!</div>')",
"training accuracy mfc_model: 95.72%\ntesting accuracy mfc_model: 93.02%\n"
]
],
[
[
"## Step 3: Build an HMM tagger\n---\nThe HMM tagger has one hidden state for each possible tag, and parameterized by two distributions: the emission probabilties giving the conditional probability of observing a given **word** from each hidden state, and the transition probabilities giving the conditional probability of moving between **tags** during the sequence.\n\nWe will also estimate the starting probability distribution (the probability of each **tag** being the first tag in a sequence), and the terminal probability distribution (the probability of each **tag** being the last tag in a sequence).\n\nThe maximum likelihood estimate of these distributions can be calculated from the frequency counts as described in the following sections where you'll implement functions to count the frequencies, and finally build the model. The HMM model will make predictions according to the formula:\n\n$$t_i^n = \\underset{t_i^n}{\\mathrm{argmax}} \\prod_{i=1}^n P(w_i|t_i) P(t_i|t_{i-1})$$\n\nRefer to Speech & Language Processing [Chapter 10](https://web.stanford.edu/~jurafsky/slp3/10.pdf) for more information.",
"_____no_output_____"
],
[
"### IMPLEMENTATION: Unigram Counts\n\nComplete the function below to estimate the co-occurrence frequency of each symbol over all of the input sequences. The unigram probabilities in our HMM model are estimated from the formula below, where N is the total number of samples in the input. (You only need to compute the counts for now.)\n\n$$P(tag_1) = \\frac{C(tag_1)}{N}$$",
"_____no_output_____"
]
],
[
[
"def unigram_counts(sequences):\n \"\"\"Return a dictionary keyed to each unique value in the input sequence list that\n counts the number of occurrences of the value in the sequences list. The sequences\n collection should be a 2-dimensional array.\n \n For example, if the tag NOUN appears 275558 times over all the input sequences,\n then you should return a dictionary such that your_unigram_counts[NOUN] == 275558.\n \"\"\"\n return Counter(sequences)\n # raise NotImplementedError\n\n# TODO: call unigram_counts with a list of tag sequences from the training set\ntags = [tag for i, (word, tag) in enumerate(data.training_set.stream())]\ntag_unigrams = unigram_counts(tags)\n\nassert set(tag_unigrams.keys()) == data.training_set.tagset, \\\n \"Uh oh. It looks like your tag counts doesn't include all the tags!\"\nassert min(tag_unigrams, key=tag_unigrams.get) == 'X', \\\n \"Hmmm...'X' is expected to be the least common class\"\nassert max(tag_unigrams, key=tag_unigrams.get) == 'NOUN', \\\n \"Hmmm...'NOUN' is expected to be the most common class\"\nHTML('<div class=\"alert alert-block alert-success\">Your tag unigrams look good!</div>')",
"_____no_output_____"
]
],
[
[
"### IMPLEMENTATION: Bigram Counts\n\nComplete the function below to estimate the co-occurrence frequency of each pair of symbols in each of the input sequences. These counts are used in the HMM model to estimate the bigram probability of two tags from the frequency counts according to the formula: $$P(tag_2|tag_1) = \\frac{C(tag_2|tag_1)}{C(tag_2)}$$\n",
"_____no_output_____"
]
],
[
[
"def bigram_counts(sequences):\n \"\"\"Return a dictionary keyed to each unique PAIR of values in the input sequences\n list that counts the number of occurrences of pair in the sequences list. The input\n should be a 2-dimensional array.\n \n For example, if the pair of tags (NOUN, VERB) appear 61582 times, then you should\n return a dictionary such that your_bigram_counts[(NOUN, VERB)] == 61582\n \"\"\"\n\n # TODO: Finish this function!\n return Counter(sequences)\n #raise NotImplementedError\n\n# TODO: call bigram_counts with a list of tag sequences from the training set\ntags = [tag for i, (word, tag) in enumerate(data.stream())]\ntag_pairs = [(tags[i],tags[i+1]) for i in range(0,len(tags)-2,2)]\ntag_bigrams = bigram_counts(tag_pairs)\n\n\nassert len(tag_bigrams) == 144, \\\n \"Uh oh. There should be 144 pairs of bigrams (12 tags x 12 tags)\"\nassert min(tag_bigrams, key=tag_bigrams.get) in [('X', 'NUM'), ('PRON', 'X')], \\\n \"Hmmm...The least common bigram should be one of ('X', 'NUM') or ('PRON', 'X').\"\nassert max(tag_bigrams, key=tag_bigrams.get) in [('DET', 'NOUN')], \\\n \"Hmmm...('DET', 'NOUN') is expected to be the most common bigram.\"\nHTML('<div class=\"alert alert-block alert-success\">Your tag bigrams look good!</div>')",
"_____no_output_____"
]
],
[
[
"### IMPLEMENTATION: Sequence Starting Counts\nComplete the code below to estimate the bigram probabilities of a sequence starting with each tag.",
"_____no_output_____"
]
],
[
[
"def starting_counts(sequences):\n \"\"\"Return a dictionary keyed to each unique value in the input sequences list\n that counts the number of occurrences where that value is at the beginning of\n a sequence.\n \n For example, if 8093 sequences start with NOUN, then you should return a\n dictionary such that your_starting_counts[NOUN] == 8093\n \"\"\"\n # TODO: Finish this function!\n #raise NotImplementedError\n return Counter(sequences)\n\n# TODO: Calculate the count of each tag starting a sequence\nstarting_tags = [tag[0] for tag in data.training_set.Y ]\ntag_starts = starting_counts(starting_tags)\n\nassert len(tag_starts) == 12, \"Uh oh. There should be 12 tags in your dictionary.\"\nassert min(tag_starts, key=tag_starts.get) == 'X', \"Hmmm...'X' is expected to be the least common starting bigram.\"\nassert max(tag_starts, key=tag_starts.get) == 'DET', \"Hmmm...'DET' is expected to be the most common starting bigram.\"\nHTML('<div class=\"alert alert-block alert-success\">Your starting tag counts look good!</div>')",
"_____no_output_____"
]
],
[
[
"### IMPLEMENTATION: Sequence Ending Counts\nComplete the function below to estimate the bigram probabilities of a sequence ending with each tag.",
"_____no_output_____"
]
],
[
[
"def ending_counts(sequences):\n \"\"\"Return a dictionary keyed to each unique value in the input sequences list\n that counts the number of occurrences where that value is at the end of\n a sequence.\n \n For example, if 18 sequences end with DET, then you should return a\n dictionary such that your_starting_counts[DET] == 18\n \"\"\"\n # TODO: Finish this function!\n #raise NotImplementedError\n return Counter(sequences)\n\n# TODO: Calculate the count of each tag ending a sequence\nending_tags = [tag[-1] for tag in data.training_set.Y ]\ntag_ends = ending_counts(ending_tags)\n\nassert len(tag_ends) == 12, \"Uh oh. There should be 12 tags in your dictionary.\"\nassert min(tag_ends, key=tag_ends.get) in ['X', 'CONJ'], \"Hmmm...'X' or 'CONJ' should be the least common ending bigram.\"\nassert max(tag_ends, key=tag_ends.get) == '.', \"Hmmm...'.' is expected to be the most common ending bigram.\"\nHTML('<div class=\"alert alert-block alert-success\">Your ending tag counts look good!</div>')",
"_____no_output_____"
]
],
[
[
"### IMPLEMENTATION: Basic HMM Tagger\nUse the tag unigrams and bigrams calculated above to construct a hidden Markov tagger.\n\n- Add one state per tag\n - The emission distribution at each state should be estimated with the formula: $P(w|t) = \\frac{C(t, w)}{C(t)}$\n- Add an edge from the starting state `basic_model.start` to each tag\n - The transition probability should be estimated with the formula: $P(t|start) = \\frac{C(start, t)}{C(start)}$\n- Add an edge from each tag to the end state `basic_model.end`\n - The transition probability should be estimated with the formula: $P(end|t) = \\frac{C(t, end)}{C(t)}$\n- Add an edge between _every_ pair of tags\n - The transition probability should be estimated with the formula: $P(t_2|t_1) = \\frac{C(t_1, t_2)}{C(t_1)}$",
"_____no_output_____"
]
],
[
[
"basic_model = HiddenMarkovModel(name=\"base-hmm-tagger\")\n\n# TODO: create states with emission probability distributions P(word | tag) and add to the model\n# (Hint: you may need to loop & create/add new states)\ntag_states = {}\nfor tag in data.training_set.tagset:\n emission_prob = {word:emission_counts[tag][word]/tag_unigrams[tag] for word in emission_counts[tag]}\n tag_emission = DiscreteDistribution(emission_prob)\n tag_states[tag] = State(tag_emission, name=tag)\n \n basic_model.add_states(tag_states[tag])\n basic_model.add_transition(basic_model.start, tag_states[tag], tag_starts[tag]/len(data.training_set))\n basic_model.add_transition(tag_states[tag], basic_model.end, tag_ends[tag]/len(data.training_set))\n\n\nfor tag1 in data.training_set.tagset:\n for tag2 in data.training_set.tagset:\n basic_model.add_transition(tag_states[tag1], tag_states[tag2], tag_bigrams[tag1,tag2]/tag_unigrams[tag1])\n\n\n# TODO: add edges between states for the observed transition frequencies P(tag_i | tag_i-1)\n# (Hint: you may need to loop & add transitions\n\nshow_model(basic_model, figsize=(5, 5), filename=\"example.png\", overwrite=True, show_ends=True)\n \n# NOTE: YOU SHOULD NOT NEED TO MODIFY ANYTHING BELOW THIS LINE\n# finalize the model\nbasic_model.bake()\n\nassert all(tag in set(s.name for s in basic_model.states) for tag in data.training_set.tagset), \\\n \"Every state in your network should use the name of the associated tag, which must be one of the training set tags.\"\nassert basic_model.edge_count() == 168, \\\n (\"Your network should have an edge from the start node to each state, one edge between every \" +\n \"pair of tags (states), and an edge from each state to the end node.\")\nHTML('<div class=\"alert alert-block alert-success\">Your HMM network topology looks good!</div>')",
"_____no_output_____"
],
[
"hmm_training_acc = accuracy(data.training_set.X, data.training_set.Y, basic_model)\nprint(\"training accuracy basic hmm model: {:.2f}%\".format(100 * hmm_training_acc))\n\nhmm_testing_acc = accuracy(data.testing_set.X, data.testing_set.Y, basic_model)\nprint(\"testing accuracy basic hmm model: {:.2f}%\".format(100 * hmm_testing_acc))\n\nassert hmm_training_acc > 0.97, \"Uh oh. Your HMM accuracy on the training set doesn't look right.\"\nassert hmm_training_acc > 0.955, \"Uh oh. Your HMM accuracy on the training set doesn't look right.\"\nHTML('<div class=\"alert alert-block alert-success\">Your HMM tagger accuracy looks correct! Congratulations, you\\'ve finished the project.</div>')",
"training accuracy basic hmm model: 97.53%\ntesting accuracy basic hmm model: 96.16%\n"
]
],
[
[
"### Example Decoding Sequences with the HMM Tagger",
"_____no_output_____"
]
],
[
[
"for key in data.testing_set.keys[:3]:\n print(\"Sentence Key: {}\\n\".format(key))\n print(\"Predicted labels:\\n-----------------\")\n print(simplify_decoding(data.sentences[key].words, basic_model))\n print()\n print(\"Actual labels:\\n--------------\")\n print(data.sentences[key].tags)\n print(\"\\n\")",
"_____no_output_____"
]
],
[
[
"\n## Finishing the project\n---\n\n<div class=\"alert alert-block alert-info\">\n**Note:** **SAVE YOUR NOTEBOOK**, then run the next cell to generate an HTML copy. You will zip & submit both this file and the HTML copy for review.\n</div>",
"_____no_output_____"
]
],
[
[
"!!jupyter nbconvert *.ipynb",
"_____no_output_____"
]
],
[
[
"## Step 4: [Optional] Improving model performance\n---\nThere are additional enhancements that can be incorporated into your tagger that improve performance on larger tagsets where the data sparsity problem is more significant. The data sparsity problem arises because the same amount of data split over more tags means there will be fewer samples in each tag, and there will be more missing data tags that have zero occurrences in the data. The techniques in this section are optional.\n\n- [Laplace Smoothing](https://en.wikipedia.org/wiki/Additive_smoothing) (pseudocounts)\n Laplace smoothing is a technique where you add a small, non-zero value to all observed counts to offset for unobserved values.\n\n- Backoff Smoothing\n Another smoothing technique is to interpolate between n-grams for missing data. This method is more effective than Laplace smoothing at combatting the data sparsity problem. Refer to chapters 4, 9, and 10 of the [Speech & Language Processing](https://web.stanford.edu/~jurafsky/slp3/) book for more information.\n\n- Extending to Trigrams\n HMM taggers have achieved better than 96% accuracy on this dataset with the full Penn treebank tagset using an architecture described in [this](http://www.coli.uni-saarland.de/~thorsten/publications/Brants-ANLP00.pdf) paper. Altering your HMM to achieve the same performance would require implementing deleted interpolation (described in the paper), incorporating trigram probabilities in your frequency tables, and re-implementing the Viterbi algorithm to consider three consecutive states instead of two.\n\n### Obtain the Brown Corpus with a Larger Tagset\nRun the code below to download a copy of the brown corpus with the full NLTK tagset. You will need to research the available tagset information in the NLTK docs and determine the best way to extract the subset of NLTK tags you want to explore. If you write the following the format specified in Step 1, then you can reload the data using all of the code above for comparison.\n\nRefer to [Chapter 5](http://www.nltk.org/book/ch05.html) of the NLTK book for more information on the available tagsets.",
"_____no_output_____"
]
],
[
[
"import nltk\nfrom nltk import pos_tag, word_tokenize\nfrom nltk.corpus import brown\n\nnltk.download('brown')\ntraining_corpus = nltk.corpus.brown\ntraining_corpus.tagged_sents()[0]",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
cb740f0610286e355e84f6764e8203acd046ada6 | 7,702 | ipynb | Jupyter Notebook | notebooks/lexicon_sc.ipynb | ekinakyurek/lexsym | 58e2261fd18812aef3a2c5b8796aba7522a61941 | [
"MIT"
]
| null | null | null | notebooks/lexicon_sc.ipynb | ekinakyurek/lexsym | 58e2261fd18812aef3a2c5b8796aba7522a61941 | [
"MIT"
]
| null | null | null | notebooks/lexicon_sc.ipynb | ekinakyurek/lexsym | 58e2261fd18812aef3a2c5b8796aba7522a61941 | [
"MIT"
]
| null | null | null | 34.538117 | 712 | 0.480914 | [
[
[
"import json\nimport itertools\nimport copy\nimport random\n\ndef filter_lexicon(lexicon):\n keys_to_hold = \"yellow,red,green,cyan,purple,blue,gray,brown\".split(\",\")\n deleted_keys = set()\n for k in lexicon.keys():\n if k not in keys_to_hold:\n deleted_keys.add(k)\n\n for k in deleted_keys:\n del lexicon[k]\n\n return lexicon\n\n\ndef load_lexicon(lexicon_path, train_path):\n lexicon = json.load(open(lexicon_path))\n inputs = []\n with open(train_path, 'r') as f:\n for line in f:\n inputs.append(line.split('\\t')[0])\n return lexicon, inputs\n\ndef filter_uncommon_tokens(lexicon, threshold):\n # Filter uncommon tokens\n deleted_keys = set()\n \n for (k1, v1) in lexicon.items():\n deleted_codes = set()\n \n for c, count in v1.items():\n if count < threshold:\n deleted_codes.add(c)\n \n for k in deleted_codes:\n del v1[k]\n \n if len(v1) == 0:\n deleted_keys.add(k1)\n \n for k in deleted_keys:\n del lexicon[k]\n \n return lexicon\n\n\ndef filter_intersected_tokens(lexicon):\n deleted_keys = set()\n for (k1, v1) in lexicon.items():\n for ci, count in v1.items():\n for (k2, v2) in lexicon.items():\n if k2 == k1:\n continue\n if ci in v2:\n deleted_keys.add(k1)\n deleted_keys.add(k2)\n for k in deleted_keys:\n del lexicon[k]\n return lexicon\n \n\ndef get_swapables(lexicon, inputs):\n inputs = copy.deepcopy(inputs)\n random.shuffle(inputs)\n swapables = {k: [] for k in lexicon.keys()}\n for k1 in lexicon.keys():\n for k2 in lexicon.keys():\n if k1 != k2:\n if k1 in swapables[k2]:\n swapables[k1].append(k2)\n else: \n x1s = itertools.islice(filter(lambda x: k1 in x, inputs), 5000)\n x2s = itertools.islice(filter(lambda x: k2 in x, inputs), 5000)\n for (x1, x2) in itertools.product(x1s, x2s):\n if x1.replace(k1, k2) == x2:\n swapables[k1].append(k2)\n print(f\"Linked {k1} - {k2}\")\n break\n deleted_keys = set() \n for k, v in swapables.items():\n if len(v) == 0:\n deleted_keys.add(k)\n \n for k in deleted_keys:\n del lexicon[k]\n del swapables[k]\n \n return (lexicon, swapables)\n\ndef propagate_swaps(swapables): \n for k1, swaps in swapables.items():\n for k2 in swaps:\n swaps2 = swapables[k2]\n if k1 in swaps2 and k2 not in swaps:\n swaps.append(k2)\n elif k2 in swaps and k1 not in swaps2:\n swaps2.append(k1)\n return swapables\n \n \ndef filter_lexicon_v2(lexicon, inputs):\n lexicon = copy.deepcopy(lexicon)\n lexicon = filter_uncommon_tokens(lexicon, len(inputs)/100)\n lexicon = filter_intersected_tokens(lexicon)\n lexicon, swapables = get_swapables(lexicon, inputs)\n return lexicon, propagate_swaps(swapables)",
"_____no_output_____"
],
[
"from IPython.core.debugger import Pdb\n#this one triggers the debugger",
"_____no_output_____"
],
[
"for clevr_type in (\"clevr\",):\n for seed in range(3, 4):\n exp_root = f\"clip_exp_img_seed_{seed}_{clevr_type}/clevr/VQVAE/beta_1.0_ncodes_32_ldim_64_dim_128_lr_0.0003/\"\n lexicon, inputs = load_lexicon(exp_root + \"diag.align.o.json\", exp_root + \"train_encodings.txt\")\n filtered_lexicon, swapables = filter_lexicon_v2(lexicon, inputs)\n print(swapables)\n ",
"Linked cyan - red\nLinked cyan - gray\nLinked cyan - blue\nLinked cyan - purple\nLinked cyan - green\nLinked cyan - yellow\nLinked cyan - big\nLinked cyan - brown\nLinked red - gray\nLinked red - blue\nLinked red - purple\nLinked red - green\nLinked red - yellow\nLinked red - big\nLinked red - brown\nLinked gray - blue\nLinked gray - purple\nLinked gray - green\nLinked gray - yellow\nLinked gray - big\nLinked gray - brown\nLinked blue - purple\nLinked blue - green\nLinked blue - yellow\nLinked blue - big\nLinked blue - brown\nLinked purple - green\nLinked purple - yellow\nLinked purple - big\nLinked purple - brown\nLinked green - yellow\nLinked green - big\nLinked green - brown\nLinked yellow - big\nLinked yellow - brown\nLinked big - brown\n{'cyan': ['red', 'gray', 'blue', 'purple', 'green', 'yellow', 'big', 'brown'], 'red': ['cyan', 'gray', 'blue', 'purple', 'green', 'yellow', 'big', 'brown'], 'gray': ['cyan', 'red', 'blue', 'purple', 'green', 'yellow', 'big', 'brown'], 'blue': ['cyan', 'red', 'gray', 'purple', 'green', 'yellow', 'big', 'brown'], 'purple': ['cyan', 'red', 'gray', 'blue', 'green', 'yellow', 'big', 'brown'], 'green': ['cyan', 'red', 'gray', 'blue', 'purple', 'yellow', 'big', 'brown'], 'yellow': ['cyan', 'red', 'gray', 'blue', 'purple', 'green', 'big', 'brown'], 'big': ['cyan', 'red', 'gray', 'blue', 'purple', 'green', 'yellow', 'brown'], 'brown': ['cyan', 'red', 'gray', 'blue', 'purple', 'green', 'yellow', 'big']}\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code"
]
]
|
cb7418b9ab7dfeda8a3bb074d7c7ef3dd1d78642 | 50,296 | ipynb | Jupyter Notebook | introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-lst-format.ipynb | vikramelango/amazon-sagemaker-examples | 9a3b8de17c253fc18fc089120885afc6ff36111d | [
"Apache-2.0"
]
| 2 | 2022-03-28T09:17:44.000Z | 2022-03-28T09:17:47.000Z | introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-lst-format.ipynb | vikramelango/amazon-sagemaker-examples | 9a3b8de17c253fc18fc089120885afc6ff36111d | [
"Apache-2.0"
]
| null | null | null | introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-lst-format.ipynb | vikramelango/amazon-sagemaker-examples | 9a3b8de17c253fc18fc089120885afc6ff36111d | [
"Apache-2.0"
]
| 1 | 2022-03-28T09:18:00.000Z | 2022-03-28T09:18:00.000Z | 35.900071 | 759 | 0.531474 | [
[
[
"# Image classification training with image format\n\n1. [Introduction](#Introduction)\n2. [Prerequisites and Preprocessing](#Prerequisites-and-Preprocessing)\n 1. [Permissions and environment variables](#Permissions-and-environment-variables)\n 2. [Prepare the data](#Prepare-the-data)\n3. [Fine-tuning The Image Classification Model](#Fine-tuning-the-Image-classification-model)\n 1. [Training parameters](#Training-parameters)\n 2. [Training](#Training)\n4. [Deploy The Model](#Deploy-the-model)\n 1. [Create model](#Create-model)\n 2. [Batch transform](#Batch-transform)\n 3. [Realtime inference](#Realtime-inference)\n 1. [Create endpoint configuration](#Create-endpoint-configuration) \n 2. [Create endpoint](#Create-endpoint) \n 3. [Perform inference](#Perform-inference) \n 4. [Clean up](#Clean-up)",
"_____no_output_____"
],
[
"## Introduction\n\nWelcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon SageMaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on ImageNet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using the [Caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). \n\nTo get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on.",
"_____no_output_____"
],
[
"## Prerequisites and Preprocessing\n\n### Permissions and environment variables\n\nHere we set up the linkage and authentication to AWS services. There are three parts to this:\n\n* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook\n* The S3 bucket that you want to use for training and model data\n* The Amazon SageMaker image classification docker image which need not be changed",
"_____no_output_____"
]
],
[
[
"%%time\nimport boto3\nimport sagemaker\nfrom sagemaker import get_execution_role\nfrom sagemaker import image_uris\n\nrole = get_execution_role()\n\nbucket = sagemaker.session.Session().default_bucket()\n\ntraining_image = image_uris.retrieve(\n region=boto3.Session().region_name, framework=\"image-classification\"\n)",
"_____no_output_____"
]
],
[
[
"## Fine-tuning the Image classification model\n\n### Prepare the data\nThe Caltech-256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. \n\nThe image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).",
"_____no_output_____"
]
],
[
[
"import os\nimport urllib.request\n\n\ndef download(url):\n filename = url.split(\"/\")[-1]\n if not os.path.exists(filename):\n urllib.request.urlretrieve(url, filename)\n\n\n# Caltech-256 image files\ns3 = boto3.client(\"s3\")\ns3.download_file(\n \"sagemaker-sample-files\",\n \"datasets/image/caltech-256/256_ObjectCategories.tar\",\n \"256_ObjectCategories.tar\",\n)\n!tar -xf 256_ObjectCategories.tar\n\n# Tool for creating lst file\ndownload(\"https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py\")",
"_____no_output_____"
],
[
"%%bash\n\nmkdir -p caltech_256_train_60\nfor i in 256_ObjectCategories/*; do\n c=`basename $i`\n mkdir -p caltech_256_train_60/$c\n for j in `ls $i/*.jpg | shuf | head -n 60`; do\n mv $j caltech_256_train_60/$c/\n done\ndone\n\npython im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/\npython im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/",
"_____no_output_____"
]
],
[
[
"A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows. ",
"_____no_output_____"
]
],
[
[
"!head -n 3 ./caltech-256-60-train.lst > example.lst\nf = open(\"example.lst\", \"r\")\nlst_content = f.read()\nprint(lst_content)",
"_____no_output_____"
]
],
[
[
"When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket.",
"_____no_output_____"
]
],
[
[
"# Four channels: train, validation, train_lst, and validation_lst\ns3train = \"s3://{}/image-classification/train/\".format(bucket)\ns3validation = \"s3://{}/image-classification/validation/\".format(bucket)\ns3train_lst = \"s3://{}/image-classification/train_lst/\".format(bucket)\ns3validation_lst = \"s3://{}/image-classification/validation_lst/\".format(bucket)\n\n# upload the image files to train and validation channels\n!aws s3 cp caltech_256_train_60 $s3train --recursive --quiet\n!aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet\n\n# upload the lst files to train_lst and validation_lst channels\n!aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet\n!aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet",
"_____no_output_____"
]
],
[
[
"Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internally by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec).",
"_____no_output_____"
]
],
[
[
"%%bash\npython im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/\npython im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/",
"_____no_output_____"
]
],
[
[
"After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow \"[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)\" and \"[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)\". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files.",
"_____no_output_____"
],
[
"Before training the model, we need to set up the training parameters. The next section will explain the parameters in detail.",
"_____no_output_____"
],
[
"## Fine-tuning the Image Classification Model\n\n### Training parameters\nThere are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:\n\n* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the \"InputDataConfig\" section. The main parameters that need to be set is the \"ContentType\" which can be set to \"application/x-recordio\" or \"application/x-image\" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. \n* **Output specification**: This is specified in the \"OutputDataConfig\" section. We just need to specify the path where the output can be stored after training\n* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If \"InstanceCount\" is more than 1, then training can be run in a distributed manner. \n\nApart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:\n\n* **num_layers**: The number of layers (depth) for the network. We use 18 in this sample but other values such as 50, 152 can be used.\n* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.\n* **num_training_samples**: This is the total number of training samples. It is set to 15240 for the Caltech dataset with the current split.\n* **num_classes**: This is the number of output classes for the new dataset. ImageNet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For Caltech, we use 257 because it has 256 object categories + 1 clutter class.\n* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.\n* **epochs**: Number of training epochs.\n* **learning_rate**: Learning rate for training.\n* **top_k**: Report the top-k accuracy during training.\n* **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing.\n* **checkpoint_frequency**: Period to store model parameters (in number of epochs).\n* **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning.",
"_____no_output_____"
]
],
[
[
"# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200\n# For this training, we will use 18 layers\nnum_layers = 18\n# we need to specify the input image shape for the training data\nimage_shape = \"3,224,224\"\n# we also need to specify the number of training samples in the training set\nnum_training_samples = 15240\n# specify the number of output classes\nnum_classes = 257\n# batch size for training\nmini_batch_size = 128\n# number of epochs\nepochs = 6\n# learning rate\nlearning_rate = 0.01\n# report top_5 accuracy\ntop_k = 5\n# resize image before training\nresize = 256\n# period to store model parameters (in number of epochs), in this case, we will save parameters from epoch 2, 4, and 6\ncheckpoint_frequency = 2\n# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be\n# initialized with pre-trained weights\nuse_pretrained_model = 1",
"_____no_output_____"
]
],
[
[
"### Training\nRun the training using Amazon SageMaker CreateTrainingJob API",
"_____no_output_____"
]
],
[
[
"%%time\nimport time\nimport boto3\nfrom time import gmtime, strftime\n\n\ns3 = boto3.client(\"s3\")\n# create unique job name\njob_name_prefix = \"sagemaker-imageclassification-notebook\"\ntimestamp = time.strftime(\"-%Y-%m-%d-%H-%M-%S\", time.gmtime())\njob_name = job_name_prefix + timestamp\ntraining_params = {\n # specify the training docker image\n \"AlgorithmSpecification\": {\"TrainingImage\": training_image, \"TrainingInputMode\": \"File\"},\n \"RoleArn\": role,\n \"OutputDataConfig\": {\"S3OutputPath\": \"s3://{}/{}/output\".format(bucket, job_name_prefix)},\n \"ResourceConfig\": {\"InstanceCount\": 1, \"InstanceType\": \"ml.p2.xlarge\", \"VolumeSizeInGB\": 50},\n \"TrainingJobName\": job_name,\n \"HyperParameters\": {\n \"image_shape\": image_shape,\n \"num_layers\": str(num_layers),\n \"num_training_samples\": str(num_training_samples),\n \"num_classes\": str(num_classes),\n \"mini_batch_size\": str(mini_batch_size),\n \"epochs\": str(epochs),\n \"learning_rate\": str(learning_rate),\n \"top_k\": str(top_k),\n \"resize\": str(resize),\n \"checkpoint_frequency\": str(checkpoint_frequency),\n \"use_pretrained_model\": str(use_pretrained_model),\n },\n \"StoppingCondition\": {\"MaxRuntimeInSeconds\": 360000},\n # Training data should be inside a subdirectory called \"train\"\n # Validation data should be inside a subdirectory called \"validation\"\n # The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)\n \"InputDataConfig\": [\n {\n \"ChannelName\": \"train\",\n \"DataSource\": {\n \"S3DataSource\": {\n \"S3DataType\": \"S3Prefix\",\n \"S3Uri\": s3train,\n \"S3DataDistributionType\": \"FullyReplicated\",\n }\n },\n \"ContentType\": \"application/x-image\",\n \"CompressionType\": \"None\",\n },\n {\n \"ChannelName\": \"validation\",\n \"DataSource\": {\n \"S3DataSource\": {\n \"S3DataType\": \"S3Prefix\",\n \"S3Uri\": s3validation,\n \"S3DataDistributionType\": \"FullyReplicated\",\n }\n },\n \"ContentType\": \"application/x-image\",\n \"CompressionType\": \"None\",\n },\n {\n \"ChannelName\": \"train_lst\",\n \"DataSource\": {\n \"S3DataSource\": {\n \"S3DataType\": \"S3Prefix\",\n \"S3Uri\": s3train_lst,\n \"S3DataDistributionType\": \"FullyReplicated\",\n }\n },\n \"ContentType\": \"application/x-image\",\n \"CompressionType\": \"None\",\n },\n {\n \"ChannelName\": \"validation_lst\",\n \"DataSource\": {\n \"S3DataSource\": {\n \"S3DataType\": \"S3Prefix\",\n \"S3Uri\": s3validation_lst,\n \"S3DataDistributionType\": \"FullyReplicated\",\n }\n },\n \"ContentType\": \"application/x-image\",\n \"CompressionType\": \"None\",\n },\n ],\n}\nprint(\"Training job name: {}\".format(job_name))\nprint(\n \"\\nInput Data Location: {}\".format(\n training_params[\"InputDataConfig\"][0][\"DataSource\"][\"S3DataSource\"]\n )\n)",
"_____no_output_____"
],
[
"# create the Amazon SageMaker training job\nsagemaker = boto3.client(service_name=\"sagemaker\")\nsagemaker.create_training_job(**training_params)\n\n# confirm that the training job has started\nstatus = sagemaker.describe_training_job(TrainingJobName=job_name)[\"TrainingJobStatus\"]\nprint(\"Training job current status: {}\".format(status))\n\ntry:\n # wait for the job to finish and report the ending status\n sagemaker.get_waiter(\"training_job_completed_or_stopped\").wait(TrainingJobName=job_name)\n training_info = sagemaker.describe_training_job(TrainingJobName=job_name)\n status = training_info[\"TrainingJobStatus\"]\n print(\"Training job ended with status: \" + status)\nexcept:\n print(\"Training failed to start\")\n # if exception is raised, that means it has failed\n message = sagemaker.describe_training_job(TrainingJobName=job_name)[\"FailureReason\"]\n print(\"Training failed with the following error: {}\".format(message))",
"_____no_output_____"
],
[
"training_info = sagemaker.describe_training_job(TrainingJobName=job_name)\nstatus = training_info[\"TrainingJobStatus\"]\nprint(\"Training job ended with status: \" + status)\nprint(training_info)",
"_____no_output_____"
]
],
[
[
"If you see the message,\n\n> `Training job ended with status: Completed`\n\nthen that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.\n\nYou can also view information about and the status of a training job using the AWS SageMaker console. Just click on the \"Jobs\" tab.",
"_____no_output_____"
],
[
"## Deploy The Model\n\nA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the class label given an input image.\n\nThis section involves several steps,\n\n1. [Create model](#CreateModel) - Create model for the training output\n1. [Batch Transform](#BatchTransform) - Create a transform job to perform batch inference.\n1. [Host the model for realtime inference](#HostTheModel) - Create an inference endpoint and perform realtime inference.",
"_____no_output_____"
],
[
"### Create model\n\nWe now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.",
"_____no_output_____"
]
],
[
[
"%%time\nimport boto3\nfrom time import gmtime, strftime\n\nsage = boto3.Session().client(service_name=\"sagemaker\")\n\ntimestamp = time.strftime(\"-%Y-%m-%d-%H-%M-%S\", time.gmtime())\nmodel_name = \"image-classification-model\" + timestamp\nprint(model_name)\ninfo = sage.describe_training_job(TrainingJobName=job_name)\nmodel_data = info[\"ModelArtifacts\"][\"S3ModelArtifacts\"]\nprint(model_data)\n\nhosting_image = image_uris.retrieve(\n region=boto3.Session().region_name, framework=\"image-classification\"\n)\n\nprimary_container = {\n \"Image\": hosting_image,\n \"ModelDataUrl\": model_data,\n}\n\ncreate_model_response = sage.create_model(\n ModelName=model_name, ExecutionRoleArn=role, PrimaryContainer=primary_container\n)\n\nprint(create_model_response[\"ModelArn\"])",
"_____no_output_____"
]
],
[
[
"### Batch transform\n\nWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction.",
"_____no_output_____"
]
],
[
[
"timestamp = time.strftime(\"-%Y-%m-%d-%H-%M-%S\", time.gmtime())\nbatch_job_name = \"image-classification-model\" + timestamp\nbatch_input = s3validation + \"001.ak47/\"\nrequest = {\n \"TransformJobName\": batch_job_name,\n \"ModelName\": model_name,\n \"MaxConcurrentTransforms\": 16,\n \"MaxPayloadInMB\": 6,\n \"BatchStrategy\": \"SingleRecord\",\n \"TransformOutput\": {\"S3OutputPath\": \"s3://{}/{}/output\".format(bucket, batch_job_name)},\n \"TransformInput\": {\n \"DataSource\": {\"S3DataSource\": {\"S3DataType\": \"S3Prefix\", \"S3Uri\": batch_input}},\n \"ContentType\": \"application/x-image\",\n \"SplitType\": \"None\",\n \"CompressionType\": \"None\",\n },\n \"TransformResources\": {\"InstanceType\": \"ml.p2.xlarge\", \"InstanceCount\": 1},\n}\n\nprint(\"Transform job name: {}\".format(batch_job_name))\nprint(\"\\nInput Data Location: {}\".format(batch_input))",
"_____no_output_____"
],
[
"sagemaker = boto3.client(\"sagemaker\")\nsagemaker.create_transform_job(**request)\n\nprint(\"Created Transform job with name: \", batch_job_name)\n\nwhile True:\n response = sagemaker.describe_transform_job(TransformJobName=batch_job_name)\n status = response[\"TransformJobStatus\"]\n if status == \"Completed\":\n print(\"Transform job ended with status: \" + status)\n break\n if status == \"Failed\":\n message = response[\"FailureReason\"]\n print(\"Transform failed with the following error: {}\".format(message))\n raise Exception(\"Transform job failed\")\n time.sleep(30)",
"_____no_output_____"
]
],
[
[
"After the job completes, let's check the prediction results.",
"_____no_output_____"
]
],
[
[
"from urllib.parse import urlparse\nimport json\nimport numpy as np\n\ns3_client = boto3.client(\"s3\")\nobject_categories = [\n \"ak47\",\n \"american-flag\",\n \"backpack\",\n \"baseball-bat\",\n \"baseball-glove\",\n \"basketball-hoop\",\n \"bat\",\n \"bathtub\",\n \"bear\",\n \"beer-mug\",\n \"billiards\",\n \"binoculars\",\n \"birdbath\",\n \"blimp\",\n \"bonsai-101\",\n \"boom-box\",\n \"bowling-ball\",\n \"bowling-pin\",\n \"boxing-glove\",\n \"brain-101\",\n \"breadmaker\",\n \"buddha-101\",\n \"bulldozer\",\n \"butterfly\",\n \"cactus\",\n \"cake\",\n \"calculator\",\n \"camel\",\n \"cannon\",\n \"canoe\",\n \"car-tire\",\n \"cartman\",\n \"cd\",\n \"centipede\",\n \"cereal-box\",\n \"chandelier-101\",\n \"chess-board\",\n \"chimp\",\n \"chopsticks\",\n \"cockroach\",\n \"coffee-mug\",\n \"coffin\",\n \"coin\",\n \"comet\",\n \"computer-keyboard\",\n \"computer-monitor\",\n \"computer-mouse\",\n \"conch\",\n \"cormorant\",\n \"covered-wagon\",\n \"cowboy-hat\",\n \"crab-101\",\n \"desk-globe\",\n \"diamond-ring\",\n \"dice\",\n \"dog\",\n \"dolphin-101\",\n \"doorknob\",\n \"drinking-straw\",\n \"duck\",\n \"dumb-bell\",\n \"eiffel-tower\",\n \"electric-guitar-101\",\n \"elephant-101\",\n \"elk\",\n \"ewer-101\",\n \"eyeglasses\",\n \"fern\",\n \"fighter-jet\",\n \"fire-extinguisher\",\n \"fire-hydrant\",\n \"fire-truck\",\n \"fireworks\",\n \"flashlight\",\n \"floppy-disk\",\n \"football-helmet\",\n \"french-horn\",\n \"fried-egg\",\n \"frisbee\",\n \"frog\",\n \"frying-pan\",\n \"galaxy\",\n \"gas-pump\",\n \"giraffe\",\n \"goat\",\n \"golden-gate-bridge\",\n \"goldfish\",\n \"golf-ball\",\n \"goose\",\n \"gorilla\",\n \"grand-piano-101\",\n \"grapes\",\n \"grasshopper\",\n \"guitar-pick\",\n \"hamburger\",\n \"hammock\",\n \"harmonica\",\n \"harp\",\n \"harpsichord\",\n \"hawksbill-101\",\n \"head-phones\",\n \"helicopter-101\",\n \"hibiscus\",\n \"homer-simpson\",\n \"horse\",\n \"horseshoe-crab\",\n \"hot-air-balloon\",\n \"hot-dog\",\n \"hot-tub\",\n \"hourglass\",\n \"house-fly\",\n \"human-skeleton\",\n \"hummingbird\",\n \"ibis-101\",\n \"ice-cream-cone\",\n \"iguana\",\n \"ipod\",\n \"iris\",\n \"jesus-christ\",\n \"joy-stick\",\n \"kangaroo-101\",\n \"kayak\",\n \"ketch-101\",\n \"killer-whale\",\n \"knife\",\n \"ladder\",\n \"laptop-101\",\n \"lathe\",\n \"leopards-101\",\n \"license-plate\",\n \"lightbulb\",\n \"light-house\",\n \"lightning\",\n \"llama-101\",\n \"mailbox\",\n \"mandolin\",\n \"mars\",\n \"mattress\",\n \"megaphone\",\n \"menorah-101\",\n \"microscope\",\n \"microwave\",\n \"minaret\",\n \"minotaur\",\n \"motorbikes-101\",\n \"mountain-bike\",\n \"mushroom\",\n \"mussels\",\n \"necktie\",\n \"octopus\",\n \"ostrich\",\n \"owl\",\n \"palm-pilot\",\n \"palm-tree\",\n \"paperclip\",\n \"paper-shredder\",\n \"pci-card\",\n \"penguin\",\n \"people\",\n \"pez-dispenser\",\n \"photocopier\",\n \"picnic-table\",\n \"playing-card\",\n \"porcupine\",\n \"pram\",\n \"praying-mantis\",\n \"pyramid\",\n \"raccoon\",\n \"radio-telescope\",\n \"rainbow\",\n \"refrigerator\",\n \"revolver-101\",\n \"rifle\",\n \"rotary-phone\",\n \"roulette-wheel\",\n \"saddle\",\n \"saturn\",\n \"school-bus\",\n \"scorpion-101\",\n \"screwdriver\",\n \"segway\",\n \"self-propelled-lawn-mower\",\n \"sextant\",\n \"sheet-music\",\n \"skateboard\",\n \"skunk\",\n \"skyscraper\",\n \"smokestack\",\n \"snail\",\n \"snake\",\n \"sneaker\",\n \"snowmobile\",\n \"soccer-ball\",\n \"socks\",\n \"soda-can\",\n \"spaghetti\",\n \"speed-boat\",\n \"spider\",\n \"spoon\",\n \"stained-glass\",\n \"starfish-101\",\n \"steering-wheel\",\n \"stirrups\",\n \"sunflower-101\",\n \"superman\",\n \"sushi\",\n \"swan\",\n \"swiss-army-knife\",\n \"sword\",\n \"syringe\",\n \"tambourine\",\n \"teapot\",\n \"teddy-bear\",\n \"teepee\",\n \"telephone-box\",\n \"tennis-ball\",\n \"tennis-court\",\n \"tennis-racket\",\n \"theodolite\",\n \"toaster\",\n \"tomato\",\n \"tombstone\",\n \"top-hat\",\n \"touring-bike\",\n \"tower-pisa\",\n \"traffic-light\",\n \"treadmill\",\n \"triceratops\",\n \"tricycle\",\n \"trilobite-101\",\n \"tripod\",\n \"t-shirt\",\n \"tuning-fork\",\n \"tweezer\",\n \"umbrella-101\",\n \"unicorn\",\n \"vcr\",\n \"video-projector\",\n \"washing-machine\",\n \"watch-101\",\n \"waterfall\",\n \"watermelon\",\n \"welding-mask\",\n \"wheelbarrow\",\n \"windmill\",\n \"wine-bottle\",\n \"xylophone\",\n \"yarmulke\",\n \"yo-yo\",\n \"zebra\",\n \"airplanes-101\",\n \"car-side-101\",\n \"faces-easy-101\",\n \"greyhound\",\n \"tennis-shoes\",\n \"toad\",\n \"clutter\",\n]\n\n\ndef list_objects(s3_client, bucket, prefix):\n response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)\n objects = [content[\"Key\"] for content in response[\"Contents\"]]\n return objects\n\n\ndef get_label(s3_client, bucket, prefix):\n filename = prefix.split(\"/\")[-1]\n s3_client.download_file(bucket, prefix, filename)\n with open(filename) as f:\n data = json.load(f)\n index = np.argmax(data[\"prediction\"])\n probability = data[\"prediction\"][index]\n print(\"Result: label - \" + object_categories[index] + \", probability - \" + str(probability))\n return object_categories[index], probability\n\n\ninputs = list_objects(s3_client, bucket, urlparse(batch_input).path.lstrip(\"/\"))\nprint(\"Sample inputs: \" + str(inputs[:2]))\n\noutputs = list_objects(s3_client, bucket, batch_job_name + \"/output\")\nprint(\"Sample output: \" + str(outputs[:2]))\n\n# Check prediction result of the first 2 images\n[get_label(s3_client, bucket, prefix) for prefix in outputs[0:2]]",
"_____no_output_____"
]
],
[
[
"### Realtime inference\n\nWe now host the model with an endpoint and perform realtime inference.\n\nThis section involves several steps,\n1. [Create endpoint configuration](#CreateEndpointConfiguration) - Create a configuration defining an endpoint.\n1. [Create endpoint](#CreateEndpoint) - Use the configuration to create an inference endpoint.\n1. [Perform inference](#PerformInference) - Perform inference on some input data using the endpoint.\n1. [Clean up](#CleanUp) - Delete the endpoint and model",
"_____no_output_____"
],
[
"#### Create endpoint configuration\nAt launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.\n\nIn addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.",
"_____no_output_____"
]
],
[
[
"from time import gmtime, strftime\n\ntimestamp = time.strftime(\"-%Y-%m-%d-%H-%M-%S\", time.gmtime())\nendpoint_config_name = job_name_prefix + \"-epc-\" + timestamp\nendpoint_config_response = sage.create_endpoint_config(\n EndpointConfigName=endpoint_config_name,\n ProductionVariants=[\n {\n \"InstanceType\": \"ml.p2.xlarge\",\n \"InitialInstanceCount\": 1,\n \"ModelName\": model_name,\n \"VariantName\": \"AllTraffic\",\n }\n ],\n)\n\nprint(\"Endpoint configuration name: {}\".format(endpoint_config_name))\nprint(\"Endpoint configuration arn: {}\".format(endpoint_config_response[\"EndpointConfigArn\"]))",
"_____no_output_____"
]
],
[
[
"#### Create endpoint\nNext, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.",
"_____no_output_____"
]
],
[
[
"%%time\nimport time\n\ntimestamp = time.strftime(\"-%Y-%m-%d-%H-%M-%S\", time.gmtime())\nendpoint_name = job_name_prefix + \"-ep-\" + timestamp\nprint(\"Endpoint name: {}\".format(endpoint_name))\n\nendpoint_params = {\n \"EndpointName\": endpoint_name,\n \"EndpointConfigName\": endpoint_config_name,\n}\nendpoint_response = sagemaker.create_endpoint(**endpoint_params)\nprint(\"EndpointArn = {}\".format(endpoint_response[\"EndpointArn\"]))",
"_____no_output_____"
]
],
[
[
"Finally, now the endpoint can be created. It may take a few minutes to create the endpoint...",
"_____no_output_____"
]
],
[
[
"# get the status of the endpoint\nresponse = sagemaker.describe_endpoint(EndpointName=endpoint_name)\nstatus = response[\"EndpointStatus\"]\nprint(\"EndpointStatus = {}\".format(status))\n\ntry:\n sagemaker.get_waiter(\"endpoint_in_service\").wait(EndpointName=endpoint_name)\nfinally:\n resp = sagemaker.describe_endpoint(EndpointName=endpoint_name)\n status = resp[\"EndpointStatus\"]\n print(\"Arn: \" + resp[\"EndpointArn\"])\n print(\"Create endpoint ended with status: \" + status)\n\n if status != \"InService\":\n message = sagemaker.describe_endpoint(EndpointName=endpoint_name)[\"FailureReason\"]\n print(\"Training failed with the following error: {}\".format(message))\n raise Exception(\"Endpoint creation did not succeed\")",
"_____no_output_____"
]
],
[
[
"If you see the message,\n\n> `Endpoint creation ended with EndpointStatus = InService`\n\nthen congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the \"Endpoints\" tab in the AWS SageMaker console.\n\nWe will finally create a runtime object from which we can invoke the endpoint.",
"_____no_output_____"
],
[
"#### Perform inference\nFinally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.\n",
"_____no_output_____"
]
],
[
[
"import boto3\n\nruntime = boto3.Session().client(service_name=\"runtime.sagemaker\")",
"_____no_output_____"
]
],
[
[
"##### Download test image",
"_____no_output_____"
]
],
[
[
"file_name = \"/tmp/test.jpg\"\ns3.download_file(\n \"sagemaker-sample-files\",\n \"datasets/image/caltech-256/256_ObjectCategories/008.bathtub/008_0007.jpg\",\n file_name,\n)\n\n# test image\nfrom IPython.display import Image\n\nImage(file_name)",
"_____no_output_____"
],
[
"import json\nimport numpy as np\n\nwith open(file_name, \"rb\") as f:\n payload = f.read()\n payload = bytearray(payload)\nresponse = runtime.invoke_endpoint(\n EndpointName=endpoint_name, ContentType=\"application/x-image\", Body=payload\n)\nresult = response[\"Body\"].read()\n# result will be in json format and convert it to ndarray\nresult = json.loads(result)\n# the result will output the probabilities for all classes\n# find the class with maximum probability and print the class index\nindex = np.argmax(result)\n\nobject_categories = [\n \"ak47\",\n \"american-flag\",\n \"backpack\",\n \"baseball-bat\",\n \"baseball-glove\",\n \"basketball-hoop\",\n \"bat\",\n \"bathtub\",\n \"bear\",\n \"beer-mug\",\n \"billiards\",\n \"binoculars\",\n \"birdbath\",\n \"blimp\",\n \"bonsai-101\",\n \"boom-box\",\n \"bowling-ball\",\n \"bowling-pin\",\n \"boxing-glove\",\n \"brain-101\",\n \"breadmaker\",\n \"buddha-101\",\n \"bulldozer\",\n \"butterfly\",\n \"cactus\",\n \"cake\",\n \"calculator\",\n \"camel\",\n \"cannon\",\n \"canoe\",\n \"car-tire\",\n \"cartman\",\n \"cd\",\n \"centipede\",\n \"cereal-box\",\n \"chandelier-101\",\n \"chess-board\",\n \"chimp\",\n \"chopsticks\",\n \"cockroach\",\n \"coffee-mug\",\n \"coffin\",\n \"coin\",\n \"comet\",\n \"computer-keyboard\",\n \"computer-monitor\",\n \"computer-mouse\",\n \"conch\",\n \"cormorant\",\n \"covered-wagon\",\n \"cowboy-hat\",\n \"crab-101\",\n \"desk-globe\",\n \"diamond-ring\",\n \"dice\",\n \"dog\",\n \"dolphin-101\",\n \"doorknob\",\n \"drinking-straw\",\n \"duck\",\n \"dumb-bell\",\n \"eiffel-tower\",\n \"electric-guitar-101\",\n \"elephant-101\",\n \"elk\",\n \"ewer-101\",\n \"eyeglasses\",\n \"fern\",\n \"fighter-jet\",\n \"fire-extinguisher\",\n \"fire-hydrant\",\n \"fire-truck\",\n \"fireworks\",\n \"flashlight\",\n \"floppy-disk\",\n \"football-helmet\",\n \"french-horn\",\n \"fried-egg\",\n \"frisbee\",\n \"frog\",\n \"frying-pan\",\n \"galaxy\",\n \"gas-pump\",\n \"giraffe\",\n \"goat\",\n \"golden-gate-bridge\",\n \"goldfish\",\n \"golf-ball\",\n \"goose\",\n \"gorilla\",\n \"grand-piano-101\",\n \"grapes\",\n \"grasshopper\",\n \"guitar-pick\",\n \"hamburger\",\n \"hammock\",\n \"harmonica\",\n \"harp\",\n \"harpsichord\",\n \"hawksbill-101\",\n \"head-phones\",\n \"helicopter-101\",\n \"hibiscus\",\n \"homer-simpson\",\n \"horse\",\n \"horseshoe-crab\",\n \"hot-air-balloon\",\n \"hot-dog\",\n \"hot-tub\",\n \"hourglass\",\n \"house-fly\",\n \"human-skeleton\",\n \"hummingbird\",\n \"ibis-101\",\n \"ice-cream-cone\",\n \"iguana\",\n \"ipod\",\n \"iris\",\n \"jesus-christ\",\n \"joy-stick\",\n \"kangaroo-101\",\n \"kayak\",\n \"ketch-101\",\n \"killer-whale\",\n \"knife\",\n \"ladder\",\n \"laptop-101\",\n \"lathe\",\n \"leopards-101\",\n \"license-plate\",\n \"lightbulb\",\n \"light-house\",\n \"lightning\",\n \"llama-101\",\n \"mailbox\",\n \"mandolin\",\n \"mars\",\n \"mattress\",\n \"megaphone\",\n \"menorah-101\",\n \"microscope\",\n \"microwave\",\n \"minaret\",\n \"minotaur\",\n \"motorbikes-101\",\n \"mountain-bike\",\n \"mushroom\",\n \"mussels\",\n \"necktie\",\n \"octopus\",\n \"ostrich\",\n \"owl\",\n \"palm-pilot\",\n \"palm-tree\",\n \"paperclip\",\n \"paper-shredder\",\n \"pci-card\",\n \"penguin\",\n \"people\",\n \"pez-dispenser\",\n \"photocopier\",\n \"picnic-table\",\n \"playing-card\",\n \"porcupine\",\n \"pram\",\n \"praying-mantis\",\n \"pyramid\",\n \"raccoon\",\n \"radio-telescope\",\n \"rainbow\",\n \"refrigerator\",\n \"revolver-101\",\n \"rifle\",\n \"rotary-phone\",\n \"roulette-wheel\",\n \"saddle\",\n \"saturn\",\n \"school-bus\",\n \"scorpion-101\",\n \"screwdriver\",\n \"segway\",\n \"self-propelled-lawn-mower\",\n \"sextant\",\n \"sheet-music\",\n \"skateboard\",\n \"skunk\",\n \"skyscraper\",\n \"smokestack\",\n \"snail\",\n \"snake\",\n \"sneaker\",\n \"snowmobile\",\n \"soccer-ball\",\n \"socks\",\n \"soda-can\",\n \"spaghetti\",\n \"speed-boat\",\n \"spider\",\n \"spoon\",\n \"stained-glass\",\n \"starfish-101\",\n \"steering-wheel\",\n \"stirrups\",\n \"sunflower-101\",\n \"superman\",\n \"sushi\",\n \"swan\",\n \"swiss-army-knife\",\n \"sword\",\n \"syringe\",\n \"tambourine\",\n \"teapot\",\n \"teddy-bear\",\n \"teepee\",\n \"telephone-box\",\n \"tennis-ball\",\n \"tennis-court\",\n \"tennis-racket\",\n \"theodolite\",\n \"toaster\",\n \"tomato\",\n \"tombstone\",\n \"top-hat\",\n \"touring-bike\",\n \"tower-pisa\",\n \"traffic-light\",\n \"treadmill\",\n \"triceratops\",\n \"tricycle\",\n \"trilobite-101\",\n \"tripod\",\n \"t-shirt\",\n \"tuning-fork\",\n \"tweezer\",\n \"umbrella-101\",\n \"unicorn\",\n \"vcr\",\n \"video-projector\",\n \"washing-machine\",\n \"watch-101\",\n \"waterfall\",\n \"watermelon\",\n \"welding-mask\",\n \"wheelbarrow\",\n \"windmill\",\n \"wine-bottle\",\n \"xylophone\",\n \"yarmulke\",\n \"yo-yo\",\n \"zebra\",\n \"airplanes-101\",\n \"car-side-101\",\n \"faces-easy-101\",\n \"greyhound\",\n \"tennis-shoes\",\n \"toad\",\n \"clutter\",\n]\nprint(\"Result: label - \" + object_categories[index] + \", probability - \" + str(result[index]))",
"_____no_output_____"
]
],
[
[
"#### Clean up\n\nWhen we're done with the endpoint, we can just delete it and the backing instances will be released. Uncomment and run the following cell to delete the endpoint and model",
"_____no_output_____"
]
],
[
[
"sage.delete_endpoint(EndpointName=endpoint_name)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
cb743a630bbb5cef3ef2a5252abfabf7369c0cfb | 21,479 | ipynb | Jupyter Notebook | 04-Pandas/2/Activities/05-Evr_PandasRecap/Solved/PandasRecap.ipynb | madhavinamballa/datascience_Berkely | 15f2987a0b012b344d209df70fdcf527f1c664b2 | [
"ADSL"
]
| null | null | null | 04-Pandas/2/Activities/05-Evr_PandasRecap/Solved/PandasRecap.ipynb | madhavinamballa/datascience_Berkely | 15f2987a0b012b344d209df70fdcf527f1c664b2 | [
"ADSL"
]
| null | null | null | 04-Pandas/2/Activities/05-Evr_PandasRecap/Solved/PandasRecap.ipynb | madhavinamballa/datascience_Berkely | 15f2987a0b012b344d209df70fdcf527f1c664b2 | [
"ADSL"
]
| null | null | null | 29.302865 | 222 | 0.387262 | [
[
[
"# Import the Pandas library\nimport pandas as pd",
"_____no_output_____"
],
[
"# Create a reference the CSV file desired\ncsv_path = \"Resources/ufoSightings.csv\"\n\n# Read the CSV into a Pandas DataFrame\nufo_df = pd.read_csv(csv_path)\n\n# Print the first five rows of data to the screen\nufo_df.head()",
"/Users/arwenshackelford/anaconda3/envs/dev/lib/python3.6/site-packages/IPython/core/interactiveshell.py:2714: DtypeWarning: Columns (5,9) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n"
],
[
"# Check to see if there are any rows with missing data\nufo_df.count()",
"_____no_output_____"
],
[
"# Remove the rows with missing data\nclean_ufo_df = ufo_df.dropna(how=\"any\")\nclean_ufo_df.count()",
"_____no_output_____"
],
[
"# Collect a list of sightings seen in the US\ncolumns = [\n \"datetime\",\n \"city\",\n \"state\",\n \"country\",\n \"shape\",\n \"duration (seconds)\",\n \"duration (hours/min)\",\n \"comments\",\n \"date posted\"\n]\n\n# Filter the data so that only those sightings in the US are in a DataFrame\nusa_ufo_df = clean_ufo_df.loc[clean_ufo_df[\"country\"] == \"us\", columns]\nusa_ufo_df.head()",
"_____no_output_____"
],
[
"# Count how many sightings have occured within each state\nstate_counts = usa_ufo_df[\"state\"].value_counts()\nstate_counts",
"_____no_output_____"
],
[
"# Convert the state_counts Series into a DataFrame\nstate_ufo_counts_df = pd.DataFrame(state_counts)\nstate_ufo_counts_df.head()",
"_____no_output_____"
],
[
"# Convert the column name into \"Sum of Sightings\"\nstate_ufo_counts_df = state_ufo_counts_df.rename(\n columns={\"state\": \"Sum of Sightings\"})\nstate_ufo_counts_df.head()",
"_____no_output_____"
],
[
"# Want to add up the seconds UFOs are seen? There is a problem\n# Problem can be seen by examining datatypes within the DataFrame\nusa_ufo_df.dtypes",
"_____no_output_____"
],
[
"# Using astype() to convert a column's data into floats\nusa_ufo_df.loc[:, \"duration (seconds)\"] = usa_ufo_df[\"duration (seconds)\"].astype(\"float\")\nusa_ufo_df.dtypes",
"_____no_output_____"
],
[
"# Now it is possible to find the sum of seconds\nusa_ufo_df[\"duration (seconds)\"].sum()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb7448ebd7473cdd3a51c594871565ea178cd50d | 31,779 | ipynb | Jupyter Notebook | docs/_downloads/03a48646520c277662581e858e680809/model_parallel_tutorial.ipynb | kwonmha/PyTorch-tutorials-kr | e48bbbc7088bf6b9da66abb8862b8d0539662bd5 | [
"BSD-3-Clause"
]
| null | null | null | docs/_downloads/03a48646520c277662581e858e680809/model_parallel_tutorial.ipynb | kwonmha/PyTorch-tutorials-kr | e48bbbc7088bf6b9da66abb8862b8d0539662bd5 | [
"BSD-3-Clause"
]
| null | null | null | docs/_downloads/03a48646520c277662581e858e680809/model_parallel_tutorial.ipynb | kwonmha/PyTorch-tutorials-kr | e48bbbc7088bf6b9da66abb8862b8d0539662bd5 | [
"BSD-3-Clause"
]
| null | null | null | 149.900943 | 4,668 | 0.705843 | [
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\n단일 머신을 이용한 모델 병렬화 실습 예제\n===================================================\n**저자** : `Shen Li <https://mrshenli.github.io/>`_\n**번역** : `안상준 <https://github.com/Justin-A>`_\n\n모델 병렬 처리는 분산 학습 기술에 범용적으로 사용되고 있습니다. \n이전 튜토리얼들에서는 'DataParallel' `<https://pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html>`_\n여러 GPU를 사용하여 신경망 모델을 학습 시킬 때 어떻게 DataParallel을 사용하는지에 대해서 살펴보았습니다.\n이 방법은 각 GPU에 입력 데이터를 부분적으로 할당하고 동일한 신경망 모델을 복제하여 이용하는 방식이었습니다. \n이 방법은 신경망 모델을 상당히 빠르게 학습시킬 수 있는 장점이 있지만, 신경망 모델이 너무 커서 단일 GPU에 할당이 되지 않는 경우에는 동작하지 않습니다.\n\n이번 튜토리얼에서는 ``데이터 병렬 처리`` 가 아닌 **모델 병렬 처리** 문제를 해결하는 방법을 소개합니다.\n각 GPU에 모델 전체를 복제하는 것이 아닌, 하나의 모델을 여러 GPU에 분할하여 할당하는 방법입니다.\n구체적으로, 10개의 층으로 구성된 ``m`` 신경망 모델에 대해서 ``데이터 병렬 처리`` 방법은 10개의 층을 전부 복제하여 각 GPU에 할당하여 처리하지만,\n이와 반대로 2개의 GPU에 모델을 병렬 처리한다면, 각 GPU에 5개의 층씩 각각 할당하여 호스팅할 수 있습니다.\n\n모델 병렬 처리의 전반적인 아이디어는 모델의 서브 네트워크들을 각각 다른 GPU에 할당하고, \n각 장비 별로 순전파를 진행하여 계산되는 출력값들을 각 장비 간 공유하여 이용하는 것입니다. \n이용하고자 하는 신경망 모델을 부분적으로 각 GPU에 할당하는 것이기 때문에, 여러 GPU를 이용하여 더 큰 신경망 모델을 할당하고 학습시킬 수 있습니다.\n이번 튜토리얼은 거대한 모델을 제한된 수의 GPU에 분할하여 할당하지 않고, 그 대신, 모델 병렬 처리의 아이디어를 이해하는 목적으로 작성되었습니다. \n모델 병렬 처리의 아이디어를 활용하여 실제 어플리케이션에 적용하는 것은 여러분의 몫입니다.\n\n<div class=\"alert alert-info\"><h4>Note</h4><p>신경망 모델을 여러 서버를 이용하여 학습시키는 병렬 학습 방법에 대해서는 다음 튜토리얼을 참고하세요.\n `분산 프레임워크 RPC 시작해보기 <rpc_tutorial.html>`__</p></div>\n\nBasic Usage\n-----------\n\n",
"_____no_output_____"
],
[
"2개의 층으로 이루어진 간단한 신경망 모델을 이용해서 기본적인 내용을 실습해봅시다.\n신경망 모델을 2개의 GPU에 할당하여 실행하기 위해서, 각 1개의 층을 각각 다른 GPU에 할당하고,\n입력 텐서값과 중간 산출물 텐서값을 신경망 모델의 구성에 맞게 배치합니다.\n\n\n",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n\nclass ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n self.net1 = torch.nn.Linear(10, 10).to('cuda:0') # 첫 번째 층을 첫 번째 GPU에 할당\n self.relu = torch.nn.ReLU()\n self.net2 = torch.nn.Linear(10, 5).to('cuda:1') # 두 번째 층을 두 번째 GPU에 할당\n\n def forward(self, x):\n x = self.relu(self.net1(x.to('cuda:0'))) \n return self.net2(x.to('cuda:1')) # 첫 번째 층의 산출물을 두 번째 GPU에 할당하여 진행",
"_____no_output_____"
]
],
[
[
"위의 ``ToyModel`` 예제는 선헝 층과 텐션값을 5개의 ``to(device)`` 장비에 적절하게 할당하는 것이 아닌,\n단일 GPU로 신경망 모델을 구현하는 것과 매우 유사한 구조인 것임을 확인할 수 있습니다.\n다시 말해, GPU에 텐서값 혹은 층을 할당하는 것 외에는 추가적으로 설정하는 부분이 없습니다.\n``backward()`` 와 ``torch.optim`` 코드를 통해 단일 GPU를 이용하여 신경망 모델의 가중치값을 업데이트하는 것 처럼, 자동으로 오차에 의한 기울기값을 반영합니다.\n여러분은 레이블값과 신경망 모델의 최종 출력 텐서값을 이용하여 오차를 계산할 수 있도록 동일한 GPU에 할당하는 것만 주의하면 됩니다.\n\n",
"_____no_output_____"
]
],
[
[
"model = ToyModel()\nloss_fn = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.001)\n\noptimizer.zero_grad()\noutputs = model(torch.randn(20, 10))\nlabels = torch.randn(20, 5).to('cuda:1') # 신경망 모델의 최종 출력값과 동일한 GPU에 할당\nloss_fn(outputs, labels).backward()\noptimizer.step()",
"_____no_output_____"
]
],
[
[
"기존에 존재하는 모듈에 모델 병렬 처리 적용해보기\n----------------------------------------\n\n기존에 단일 GPU에 존재하는 모듈을 여러 GPU에 할당하는 것은 단지 몇 줄의 코드를 수정하는 것으로도 쉽게 가능합니다. \n아래에 있는 코드들은 ResNet50 모델을 분할하는 방법입니다. 이 아이디어는, 기존에 존재하는 ResNet 모듈을 상속받아 설계할 때, 2개의 GPU에 층을 나누어 설계하는 방식으로 진행됩니다. \n그 후, 2개 GPU에서 계산되는 중간 산출물 텐서값을 적절히 배치하기 위헤 순전파 메소드를 수정합니다.\n\n",
"_____no_output_____"
]
],
[
[
"from torchvision.models.resnet import ResNet, Bottleneck\n\nnum_classes = 1000\n\n\nclass ModelParallelResNet50(ResNet):\n def __init__(self, *args, **kwargs):\n super(ModelParallelResNet50, self).__init__(\n Bottleneck, [3, 4, 6, 3], num_classes=num_classes, *args, **kwargs)\n\n self.seq1 = nn.Sequential(\n self.conv1,\n self.bn1,\n self.relu,\n self.maxpool,\n\n self.layer1,\n self.layer2\n ).to('cuda:0') # 첫 번째 GPU에 일련의 과정을 할당\n\n self.seq2 = nn.Sequential(\n self.layer3,\n self.layer4,\n self.avgpool,\n ).to('cuda:1') # 두 번째 GPU에 일련의 과정을 할당\n\n self.fc.to('cuda:1') # ResNet50 구성요소를 두 번째 GPU에 할당\n\n def forward(self, x):\n x = self.seq2(self.seq1(x).to('cuda:1')) # seq1의 출력값을 두 번쨰 GPU에 할당하여 연결\n return self.fc(x.view(x.size(0), -1))",
"_____no_output_____"
]
],
[
[
"위의 예제에서는 단일 GPU에 신경망 모델을 할당하여 학습시키기에는 모델 크기가 너무 클 때 발생하는 문제를 해결하는 방법입니다. \n하지만, 여러분은 단일 GPU를 이용할 때보다 학습 과정이 오래걸리며, 이는 여러분들이 이미 알고 있는 내용이었을 수 있습니다. \n그 이유는, 두 개의 GPU가 동시에 계산하는 것이 아니라 1개의 GPU는 계산하지 않고 대기하고 있기 때문입니다.\n또한, 두 번째 층 (layer2)이 할당된 첫 번째 GPU에서 계산된 결과를 세 번째 층 (layer3)이 할당된 두 번째 GPU로 텐서값을 복사하기 때문에 계산 과정이 더 길어지게 됩니다.\n\n코드 실행 시간을 정량적으로 살펴보기 위해 실험을 하나 해봅시다. 입력 텐서값과 레이블값을 랜덤으로 설정한 후, \n이미 존재하는 torchvision.models.reset50() 과, 모델 병렬 처리를 진행한 ``ModelParallelResNet50`` 을 통해 학습을 진행합니다.\n학습 진행을 완료한 후, 두 모델들은 랜덤으로 생성된 데이터로 학습을 진행했기 때문에 실용적인 예측을 하진 못하지만, 학습 진행 시간을 실용적으로 비교하여 할 수 있습니다.\n\n",
"_____no_output_____"
]
],
[
[
"import torchvision.models as models\n\nnum_batches = 3\nbatch_size = 120\nimage_w = 128\nimage_h = 128\n\n\ndef train(model):\n model.train(True)\n loss_fn = nn.MSELoss()\n optimizer = optim.SGD(model.parameters(), lr=0.001)\n\n one_hot_indices = torch.LongTensor(batch_size) \\\n .random_(0, num_classes) \\\n .view(batch_size, 1)\n\n for _ in range(num_batches):\n # 입력 텐서값과 레이블값을 랜덤으로 생성합니다.\n inputs = torch.randn(batch_size, 3, image_w, image_h)\n labels = torch.zeros(batch_size, num_classes) \\\n .scatter_(1, one_hot_indices, 1)\n\n # 입력값을 이용하여 순전파를 진행합니다.\n optimizer.zero_grad()\n outputs = model(inputs.to('cuda:0'))\n\n # 역전파를 진행하여 신경망 모델의 가중치를 업데이트합니다.\n labels = labels.to(outputs.device)\n loss_fn(outputs, labels).backward()\n optimizer.step()",
"_____no_output_____"
]
],
[
[
"위에서 정의한 ``train(model)`` 메소드는 nn.MSELoss (Mean Squared Error ; 평균 제곱 오차) 로 손실 함수를 정의하여 신경망 모델을 학습하는 것을 의미합니다. \n그리고, ``optim.SGD`` 메소드는 최적화 방식을 의미합니다. 위 방식은 128 * 128 크기의 이미지가 120개로 구성된 배치 데이터가 3개 존재하는 상황을 모방하기 위해 랜덤으로 생성하였습니다. \n그리고나서, 우리는 ``timeit`` 을 이용하여 ``train(model)`` 메소드를 10회 실행하여 학습을 진행하고, 학습 실행 시간에 대해서 표준 편차값을 반영하는 이미지를 생성하여 저장합니다.\n\n",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nplt.switch_backend('Agg')\nimport numpy as np\nimport timeit\n\nnum_repeat = 10\n\nstmt = \"train(model)\"\n\nsetup = \"model = ModelParallelResNet50()\"\n\n# globals 인자값은 파이썬 3 버전에서만 이용할 수 있습니다.\n# 만약 파이썬 2 버전을 이용한다면 다음과 같이 이용할 수 있습니다.\n# import __builtin__\n# __builtin__.__dict__.update(locals())\n\nmp_run_times = timeit.repeat(\n stmt, setup, number=1, repeat=num_repeat, globals=globals())\nmp_mean, mp_std = np.mean(mp_run_times), np.std(mp_run_times)\n\nsetup = \"import torchvision.models as models;\" + \\\n \"model = models.resnet50(num_classes=num_classes).to('cuda:0')\"\nrn_run_times = timeit.repeat(\n stmt, setup, number=1, repeat=num_repeat, globals=globals())\nrn_mean, rn_std = np.mean(rn_run_times), np.std(rn_run_times)\n\n\ndef plot(means, stds, labels, fig_name):\n fig, ax = plt.subplots()\n ax.bar(np.arange(len(means)), means, yerr=stds,\n align='center', alpha=0.5, ecolor='red', capsize=10, width=0.6)\n ax.set_ylabel('ResNet50 Execution Time (Second)')\n ax.set_xticks(np.arange(len(means)))\n ax.set_xticklabels(labels)\n ax.yaxis.grid(True)\n plt.tight_layout()\n plt.savefig(fig_name)\n plt.close(fig)\n\n\nplot([mp_mean, rn_mean],\n [mp_std, rn_std],\n ['Model Parallel', 'Single GPU'],\n 'mp_vs_rn.png')",
"_____no_output_____"
]
],
[
[
".. figure:: /_static/img/model-parallel-images/mp_vs_rn.png\n :alt:\n\n\n",
"_____no_output_____"
]
],
[
[
"# 실험 결과, 모델 병렬 철리하여 학습하는 시간이 단일 GPU로 학습하는 시간보다 약 7% ``4.02/3.75-1=7%``정도\n# 오래 걸리는 것을 확인할 수 있습니다. 그러므로, 순전파와 역전파를 진행하면서 GPU 간 텐서값들이\n# 복제되어 이용하는 시간이 약 7%정도 소요되는 것으로 결론지을 수 있습니다. 학습하는 과정 속에서\n# 2개의 GPU 중 1개의 GPU가 계산하지 않고 대기하고 있기 때문에, 이를 해결하여 \n# 학습 시간을 빠르게 개선시킬 수 있습니다. 그 중 한 가지 방법은, 학습 단위인 미니 배치 1개의 데이터를\n# 2개로 분할하는 파이프라인을 생성하여, 분할된 첫 번째 데이터가 첫 번째 층을 통과하여 두 번째 층으로\n# 복제되고, 두 번째 층을 통과할 때, 두번재로 분할된 데이터가 첫 번쨰 층을 통해 계산되는 방식으로 설정하는 것입니다.\n# 이러한 방법을 통해서 2개의 GPU가 2개로 분할된 데이터를 동시에 처리할 수 있으며 학습 시간을 단축시킬 수 있습니다.",
"_____no_output_____"
]
],
[
[
"입력 텐서값을 분할하는 파이프라인을 설계하여 학습 시간을 단축하는 방법에 대한 예제\n-----------------------------\n\n아래에 있는 실험은, 120개의 이미지로 구성된 1개의 미니 배치 데이터를 20개씩 나누어 진행하는 \n과정입니다. 아래의 과정을 실행할 때, PyTorch가 CUDA 연산을 비동기적으로 이용하기 때문에, \n프로세스를 실행하는 스레드를 여러개 생성할 필요가 없습니다.\n\n",
"_____no_output_____"
]
],
[
[
"class PipelineParallelResNet50(ModelParallelResNet50):\n def __init__(self, split_size=20, *args, **kwargs):\n super(PipelineParallelResNet50, self).__init__(*args, **kwargs)\n self.split_size = split_size\n\n def forward(self, x):\n splits = iter(x.split(self.split_size, dim=0))\n s_next = next(splits)\n s_prev = self.seq1(s_next).to('cuda:1')\n ret = []\n\n for s_next in splits:\n # A. s_prev는 두 번째 GPU에서 실행됩니다.\n s_prev = self.seq2(s_prev)\n ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))\n\n # B. s_next는 A.와 동시에 진행되면서 첫 번째 GPU에서 실행됩니다. \n s_prev = self.seq1(s_next).to('cuda:1')\n\n s_prev = self.seq2(s_prev)\n ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))\n\n return torch.cat(ret)\n\n\nsetup = \"model = PipelineParallelResNet50()\"\npp_run_times = timeit.repeat(\n stmt, setup, number=1, repeat=num_repeat, globals=globals())\npp_mean, pp_std = np.mean(pp_run_times), np.std(pp_run_times)\n\nplot([mp_mean, rn_mean, pp_mean],\n [mp_std, rn_std, pp_std],\n ['Model Parallel', 'Single GPU', 'Pipelining Model Parallel'],\n 'mp_vs_rn_vs_pp.png')",
"_____no_output_____"
]
],
[
[
"GPU 간 텐서값이 복사되는 것은 현재 계산되고 있는 소스값과, 소스값의 목적지 GPU 간 연산되고 있는\n스트림과 동기화되는 것을 주의하세요. 만약 여러 스트림을 생성하여 진행하고 있다면, GPU 간 텐서값이\n정상적으로 복사되어 계산되고 있는지 꼭 확인해야 합니다. 만약 복사되는 과정 중에 소스값을 이용하거나,\nGPU의 텐서값을 읽거나 쓰는 것은 올바르게 계산되지 않을 수 있습니다. 위의 예제에서는 소스값 및 GPU \n텐서값을 기본 스트림만 이용하여 진행하므로 추가적인 동기화 과정을 진행할 필요는 없습니다.\n\n",
"_____no_output_____"
],
[
".. figure:: /_static/img/model-parallel-images/mp_vs_rn_vs_pp.png\n :alt:\n\n파이프라인을 이용하여 미니 배치 내 데이터를 분할하여 적용하였을 때, ResNet50 신경망 모델의\n학습 시간이 약 49% ``3.75/2.51-1=49%`` 정도 단축된 것을 이번 실험을 통해 확인할 수 있습니다. 하지만, 이상적으로\n학습 시간이 2배 단축되는 것에 비해 다소 적게 학습 시간이 단축되었습니다. 파이프라인을 이용할 때,\n``split_sizes`` 매개변수를 도입하였기 때문에, 파이프라인을 이용하는 것이 학습 시간 단축에 얼마나\n영향을 미쳤는지 불분명합니다. 직관적으로 생각하였을 때, ``split_sizes`` 매개변수 값을 작게 설정한다면,\n아주 소규모의 CUDA 연산이 많이 진행되고, ``split_sizes`` 매개변수 값을 크게 설정한다면, 첫 번째와 \n마지막 분리될 때 비교적 긴 시간 동안 CUDA 연산이 이루어지게 됩니다. 둘 다 최적의 설정이 아닙니다.\n따라서, ``split_sizes`` 매개변수 값을 최적으로 설정하였을 때, 학습 시간 과정이 단축될 수 있을 것이라\n기대됩니다. ``split_sizes`` 매개변수 값을 조정하여 실험하면서 최적의 값을 찾아봅시다.\n\n",
"_____no_output_____"
]
],
[
[
"means = []\nstds = []\nsplit_sizes = [1, 3, 5, 8, 10, 12, 20, 40, 60]\n\nfor split_size in split_sizes:\n setup = \"model = PipelineParallelResNet50(split_size=%d)\" % split_size\n pp_run_times = timeit.repeat(\n stmt, setup, number=1, repeat=num_repeat, globals=globals())\n means.append(np.mean(pp_run_times))\n stds.append(np.std(pp_run_times))\n\nfig, ax = plt.subplots()\nax.plot(split_sizes, means)\nax.errorbar(split_sizes, means, yerr=stds, ecolor='red', fmt='ro')\nax.set_ylabel('ResNet50 Execution Time (Second)')\nax.set_xlabel('Pipeline Split Size')\nax.set_xticks(split_sizes)\nax.yaxis.grid(True)\nplt.tight_layout()\nplt.savefig(\"split_size_tradeoff.png\")\nplt.close(fig)",
"_____no_output_____"
]
],
[
[
".. figure:: /_static/img/model-parallel-images/split_size_tradeoff.png\n :alt:\n\n실험 결과, ``split_size`` 매개변수값을 12로 설정하였을 때, 학습 시간이 54% 수준으로 \n가장 많이 단축되었습니다. 아직 학습 시간을 더 단축시킬 수 있는 방법은 다양하게 존재합니다.\n예를 들어, 첫 번째 GPU에서 모든 연산과정이 기본으로 설정되어 진행됩니다. 이는 미니배치 분할 과정 중,\n현재 진행되는 과정의 다음 단계는 현재 진행되는 과정과 동시에 복제가 이루어질 수 없는 것을 의미합니다.\n그러나, 이전과 다음 단계의 분할과정이 다른 텐서값을 이용하기 때문에, 다른 계산과 중복되어 진행되어도\n문제가 없습니다. 이에 대해서, 2개 GPU에 여러개의 스트림을 사용하는 것이 필요하며, 서로 다른 서브 네트워크 \n구조가 서로 다른 스트림을 관리하는 전략이 요구됩니다. 모델 병렬 처리에 대해서 여러 스트림을 사용하는 방법이 \n일반적을로 존재하지 않기 때문에 이번 튜토리얼에서는 설명하지 않습니다.\n\n",
"_____no_output_____"
]
],
[
[
"\"\"\"\n.. note::\n 이번 게시물에서는 다양한 성능 측정값을 확인할 수 있습니다. 여러분은 위의 예제를 실행할 때 마다 매번\n 다른 결과를 확인할 수 있습니다. 그 이유는, 이용하는 소프트웨어 및 하드웨어에 따라 결과가 \n 다르게 나타나기 때문입니다. 여러분이 이용하고 있는 환경 내에서 가장 좋은 성능을 얻기 위해서는, 곡선을 그려서\n 최적의 ``split_size`` 값을 도출한 후, 해당 값을 이용하여 미니 배치 내 데이터를 분리하는 파이프라인을 \n 생성하는 것입니다.\n\"\"\"",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
cb744c190368f8e757962c5a4bf307dce21501d6 | 42,872 | ipynb | Jupyter Notebook | Athletes.ipynb | ChloeBors/Olympics-Data-Engineering-Practice | 0d2fab9ecd4c180e179a95c8a50228eb887f4c18 | [
"MIT"
]
| 1 | 2021-09-24T14:37:16.000Z | 2021-09-24T14:37:16.000Z | Athletes.ipynb | ChloeBors/Olympics-Data-Engineering-Practice | 0d2fab9ecd4c180e179a95c8a50228eb887f4c18 | [
"MIT"
]
| null | null | null | Athletes.ipynb | ChloeBors/Olympics-Data-Engineering-Practice | 0d2fab9ecd4c180e179a95c8a50228eb887f4c18 | [
"MIT"
]
| null | null | null | 31.851412 | 173 | 0.420624 | [
[
[
"## PySpark Data Engineering Practice (Sandboxing)\n### Olympic Athlete Data",
"_____no_output_____"
],
[
"This notebook is for data engineering practicing purposes.\nDuring this notebook I want to explore data by using and learning PySpark.\nThe data is from: https://www.kaggle.com/mysarahmadbhat/120-years-of-olympic-history",
"_____no_output_____"
]
],
[
[
"## Imports\nfrom pyspark.sql import SparkSession ## Create session\nfrom pyspark.sql.types import StructType, StructField, StringType, IntegerType ## Create schema",
"_____no_output_____"
],
[
"## Create spark sessions\nspark = (SparkSession.builder.appName(\"AthletesAnalytics\").getOrCreate())",
"_____no_output_____"
]
],
[
[
"### Import the data",
"_____no_output_____"
]
],
[
[
"## Create schema\nschema = StructType([\n StructField(\"ID\", StringType(), True),\n StructField(\"Name\", StringType(), True),\n StructField(\"Sex\", StringType(), True),\n StructField(\"Age\", StringType(), True),\n StructField(\"Height\", StringType(), True),\n StructField(\"Weight\", StringType(), True),\n StructField(\"Team\", StringType(), True),\n StructField(\"NOC\", StringType(), True),\n StructField(\"Games\", StringType(), True),\n StructField(\"Year\", StringType(), True),\n StructField(\"Season\", StringType(), True),\n StructField(\"City\", StringType(), True),\n StructField(\"Sport\", StringType(), True),\n StructField(\"Event\", StringType(), True),\n StructField(\"Medal\", StringType(), True),\n])",
"_____no_output_____"
],
[
"## Read CSV into dataframe\nfile_path = \"./data/athlete_events.csv\"\nathletes_df = (spark.read.format(\"csv\")\n .option(\"header\", True)\n .schema(schema)\n .load(file_path))",
"_____no_output_____"
],
[
"## Showing first 10 rows\nathletes_df.show(10, False)",
"+---+------------------------+---+---+------+------+--------------+---+-----------+----+------+-----------+-------------+----------------------------------+-----+\n|ID |Name |Sex|Age|Height|Weight|Team |NOC|Games |Year|Season|City |Sport |Event |Medal|\n+---+------------------------+---+---+------+------+--------------+---+-----------+----+------+-----------+-------------+----------------------------------+-----+\n|1 |A Dijiang |M |24 |180 |80 |China |CHN|1992 Summer|1992|Summer|Barcelona |Basketball |Basketball Men's Basketball |NA |\n|2 |A Lamusi |M |23 |170 |60 |China |CHN|2012 Summer|2012|Summer|London |Judo |Judo Men's Extra-Lightweight |NA |\n|3 |Gunnar Nielsen Aaby |M |24 |NA |NA |Denmark |DEN|1920 Summer|1920|Summer|Antwerpen |Football |Football Men's Football |NA |\n|4 |Edgar Lindenau Aabye |M |34 |NA |NA |Denmark/Sweden|DEN|1900 Summer|1900|Summer|Paris |Tug-Of-War |Tug-Of-War Men's Tug-Of-War |Gold |\n|5 |Christine Jacoba Aaftink|F |21 |185 |82 |Netherlands |NED|1988 Winter|1988|Winter|Calgary |Speed Skating|Speed Skating Women's 500 metres |NA |\n|5 |Christine Jacoba Aaftink|F |21 |185 |82 |Netherlands |NED|1988 Winter|1988|Winter|Calgary |Speed Skating|Speed Skating Women's 1,000 metres|NA |\n|5 |Christine Jacoba Aaftink|F |25 |185 |82 |Netherlands |NED|1992 Winter|1992|Winter|Albertville|Speed Skating|Speed Skating Women's 500 metres |NA |\n|5 |Christine Jacoba Aaftink|F |25 |185 |82 |Netherlands |NED|1992 Winter|1992|Winter|Albertville|Speed Skating|Speed Skating Women's 1,000 metres|NA |\n|5 |Christine Jacoba Aaftink|F |27 |185 |82 |Netherlands |NED|1994 Winter|1994|Winter|Lillehammer|Speed Skating|Speed Skating Women's 500 metres |NA |\n|5 |Christine Jacoba Aaftink|F |27 |185 |82 |Netherlands |NED|1994 Winter|1994|Winter|Lillehammer|Speed Skating|Speed Skating Women's 1,000 metres|NA |\n+---+------------------------+---+---+------+------+--------------+---+-----------+----+------+-----------+-------------+----------------------------------+-----+\nonly showing top 10 rows\n\n"
],
[
"## Print out schema details\nathletes_df.printSchema()",
"root\n |-- ID: string (nullable = true)\n |-- Name: string (nullable = true)\n |-- Sex: string (nullable = true)\n |-- Age: string (nullable = true)\n |-- Height: string (nullable = true)\n |-- Weight: string (nullable = true)\n |-- Team: string (nullable = true)\n |-- NOC: string (nullable = true)\n |-- Games: string (nullable = true)\n |-- Year: string (nullable = true)\n |-- Season: string (nullable = true)\n |-- City: string (nullable = true)\n |-- Sport: string (nullable = true)\n |-- Event: string (nullable = true)\n |-- Medal: string (nullable = true)\n\n"
],
[
"athletes_df.show(3, vertical=True)",
"-RECORD 0----------------------\n ID | 1 \n Name | A Dijiang \n Sex | M \n Age | 24 \n Height | 180 \n Weight | 80 \n Team | China \n NOC | CHN \n Games | 1992 Summer \n Year | 1992 \n Season | Summer \n City | Barcelona \n Sport | Basketball \n Event | Basketball Men's ... \n Medal | NA \n-RECORD 1----------------------\n ID | 2 \n Name | A Lamusi \n Sex | M \n Age | 23 \n Height | 170 \n Weight | 60 \n Team | China \n NOC | CHN \n Games | 2012 Summer \n Year | 2012 \n Season | Summer \n City | London \n Sport | Judo \n Event | Judo Men's Extra-... \n Medal | NA \n-RECORD 2----------------------\n ID | 3 \n Name | Gunnar Nielsen Aaby \n Sex | M \n Age | 24 \n Height | NA \n Weight | NA \n Team | Denmark \n NOC | DEN \n Games | 1920 Summer \n Year | 1920 \n Season | Summer \n City | Antwerpen \n Sport | Football \n Event | Football Men's Fo... \n Medal | NA \nonly showing top 3 rows\n\n"
]
],
[
[
"### Exploration & Cleansing",
"_____no_output_____"
]
],
[
[
"### Check for NA values by exploring columns\nfrom pyspark.sql.functions import col\nathletes_df.filter(col(\"Medal\") == \"NA\").show(10)\n## NA values in: \n## Age, Height, Weight, Team, NOC National Olympics Committee, and Medal.",
"+---+--------------------+---+---+------+------+-------------+---+-----------+----+------+-----------+--------------------+--------------------+-----+\n| ID| Name|Sex|Age|Height|Weight| Team|NOC| Games|Year|Season| City| Sport| Event|Medal|\n+---+--------------------+---+---+------+------+-------------+---+-----------+----+------+-----------+--------------------+--------------------+-----+\n| 1| A Dijiang| M| 24| 180| 80| China|CHN|1992 Summer|1992|Summer| Barcelona| Basketball|Basketball Men's ...| NA|\n| 2| A Lamusi| M| 23| 170| 60| China|CHN|2012 Summer|2012|Summer| London| Judo|Judo Men's Extra-...| NA|\n| 3| Gunnar Nielsen Aaby| M| 24| NA| NA| Denmark|DEN|1920 Summer|1920|Summer| Antwerpen| Football|Football Men's Fo...| NA|\n| 5|Christine Jacoba ...| F| 21| 185| 82| Netherlands|NED|1988 Winter|1988|Winter| Calgary| Speed Skating|Speed Skating Wom...| NA|\n| 5|Christine Jacoba ...| F| 21| 185| 82| Netherlands|NED|1988 Winter|1988|Winter| Calgary| Speed Skating|Speed Skating Wom...| NA|\n| 5|Christine Jacoba ...| F| 25| 185| 82| Netherlands|NED|1992 Winter|1992|Winter|Albertville| Speed Skating|Speed Skating Wom...| NA|\n| 5|Christine Jacoba ...| F| 25| 185| 82| Netherlands|NED|1992 Winter|1992|Winter|Albertville| Speed Skating|Speed Skating Wom...| NA|\n| 5|Christine Jacoba ...| F| 27| 185| 82| Netherlands|NED|1994 Winter|1994|Winter|Lillehammer| Speed Skating|Speed Skating Wom...| NA|\n| 5|Christine Jacoba ...| F| 27| 185| 82| Netherlands|NED|1994 Winter|1994|Winter|Lillehammer| Speed Skating|Speed Skating Wom...| NA|\n| 6| Per Knut Aaland| M| 31| 188| 75|United States|USA|1992 Winter|1992|Winter|Albertville|Cross Country Skiing|Cross Country Ski...| NA|\n+---+--------------------+---+---+------+------+-------------+---+-----------+----+------+-----------+--------------------+--------------------+-----+\nonly showing top 10 rows\n\n"
]
],
[
[
"#### Drop rows where age, height or weight have NA values.",
"_____no_output_____"
]
],
[
[
"athletes_df = athletes_df.filter((col(\"Age\") != \"NA\") & (col(\"Height\") != \"NA\") & (col(\"Weight\") != \"NA\"))",
"_____no_output_____"
],
[
"## Check if correct\nathletes_df.filter((col(\"Age\") == \"NA\")).show(5)\nathletes_df.filter((col(\"Height\") == \"NA\")).show(5)\nathletes_df.filter((col(\"Weight\") == \"NA\")).show(5)",
"+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n| ID|Name|Sex|Age|Height|Weight|Team|NOC|Games|Year|Season|City|Sport|Event|Medal|\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n| ID|Name|Sex|Age|Height|Weight|Team|NOC|Games|Year|Season|City|Sport|Event|Medal|\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n| ID|Name|Sex|Age|Height|Weight|Team|NOC|Games|Year|Season|City|Sport|Event|Medal|\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n\n"
]
],
[
[
"#### Check if other columns have the right values",
"_____no_output_____"
]
],
[
[
"### Check if ID, Age, Height, Weight and Year are indeed all integer values\n### Checking ID first on non numeric values\nfrom pyspark.sql.types import DataType, StructField, StructType, IntegerType, StringType\ntest_df = athletes_df.select('ID',col('ID').cast(IntegerType()).isNotNull().alias(\"Value\"))\ntest_df.filter((col(\"Value\") == False)).show(5)",
"+---+-----+\n| ID|Value|\n+---+-----+\n+---+-----+\n\n"
],
[
"### Checking Age on non numeric values\nfrom pyspark.sql.types import DataType, StructField, StructType, IntegerType, StringType\ntest_df = athletes_df.select('Age',col('Age').cast(IntegerType()).isNotNull().alias(\"Value\"))\ntest_df.filter((col(\"Value\") == False)).show(5)",
"+------------+-----+\n| Age|Value|\n+------------+-----+\n| M|false|\n| M|false|\n| M|false|\n| -Andersen)\"|false|\n| M|false|\n+------------+-----+\nonly showing top 5 rows\n\n"
],
[
"### As seen something isn't going well. There are gender and even name values in Age. \n### Let's see how many rows have this problem\ntest_df.filter((col(\"Value\") == True)).count()",
"_____no_output_____"
],
[
"### 500 out of 206188 values have this problem\ntest_df.filter((col(\"Value\") == False)).count()",
"_____no_output_____"
],
[
"### Percentage of broken rows\nprint(str(round(500 / 206188 * 100,2)) + '%')",
"0.24%\n"
],
[
"athletes_df.filter((col(\"Age\") == \"M\")).show(5)",
"+----+--------------------+-----+---+------+------+----+-------------+-----+-----------+------+------+---------+----------+--------------------+\n| ID| Name| Sex|Age|Height|Weight|Team| NOC|Games| Year|Season| City| Sport| Event| Medal|\n+----+--------------------+-----+---+------+------+----+-------------+-----+-----------+------+------+---------+----------+--------------------+\n|2781|\"Robert Jeffrey \"...| II\"| M| 22| 185| 91|United States| USA|1992 Summer| 1992|Summer|Barcelona| Baseball|Baseball Men's Ba...|\n|3874|\"William Lloyd \"\"...| Jr.\"| M| 21| 200| 86|United States| USA|1988 Summer| 1988|Summer| Seoul|Basketball|Basketball Men's ...|\n|4361|\"Joseph \"\"Joe\"\" A...| Jr.\"| M| 31| 170| 66|United States| USA|1948 Summer| 1948|Summer| London| Rowing|Rowing Men's Doub...|\n|6270|\"Arthur DeLancey ...| Jr.\"| M| 21| 193| 86|United States| USA|1956 Summer| 1956|Summer|Melbourne| Rowing|Rowing Men's Coxe...|\n|6270|\"Arthur DeLancey ...| Jr.\"| M| 25| 193| 86|United States| USA|1960 Summer| 1960|Summer| Roma| Rowing|Rowing Men's Coxl...|\n+----+--------------------+-----+---+------+------+----+-------------+-----+-----------+------+------+---------+----------+--------------------+\nonly showing top 5 rows\n\n"
],
[
"### The reason for this error is that there is a , in some of the names. \n### For now I'll drop these rows. This can be done with the following filter function\nathletes_df = athletes_df.filter(\"CAST(Age AS INTEGER) IS NOT NULL\")\nathletes_df.filter((col(\"Age\"))==\"M\").show()",
"+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n| ID|Name|Sex|Age|Height|Weight|Team|NOC|Games|Year|Season|City|Sport|Event|Medal|\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n\n"
],
[
"### By fixing the rows, there are also no wrong values anymore in Height\ntest_df = athletes_df.select('Height',col('Height').cast(IntegerType()).isNotNull().alias(\"Value\"))\ntest_df.filter((col(\"Value\") == False)).show(5)",
"+------+-----+\n|Height|Value|\n+------+-----+\n+------+-----+\n\n"
],
[
"### As you can see, 500 rows where deleted.\nathletes_df.count()",
"_____no_output_____"
],
[
"### Check the distinct values for seasons.\n### As seen there are no odd values in this column.\nathletes_df.select(\"Season\").distinct().show()",
"+------+\n|Season|\n+------+\n|Summer|\n|Winter|\n+------+\n\n"
],
[
"### Check the length of NOC, as seen in the result this is always 3, so that is good.\nfrom pyspark.sql.functions import length\ntest_df = athletes_df.withColumn(\"length_NOC\", length(\"NOC\")).filter((col(\"length_NOC\") != 3))\ntest_df.show()",
"+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+----------+\n| ID|Name|Sex|Age|Height|Weight|Team|NOC|Games|Year|Season|City|Sport|Event|Medal|length_NOC|\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+----------+\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+----------+\n\n"
],
[
"### Check if sex is only M and F, as seen this is correct.\nathletes_df.filter((col(\"Sex\")!=\"F\") & (col(\"Sex\")!=\"M\")).show()",
"+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n| ID|Name|Sex|Age|Height|Weight|Team|NOC|Games|Year|Season|City|Sport|Event|Medal|\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n+---+----+---+---+------+------+----+---+-----+----+------+----+-----+-----+-----+\n\n"
]
],
[
[
"### Masking the name",
"_____no_output_____"
],
[
"To practice the idea of private information I want to explore masking the name.",
"_____no_output_____"
],
[
"#### Masking ",
"_____no_output_____"
]
],
[
[
"### Masks name showing the first and last two characters. \n### If name is less than 5 characters, it will only show the first character.\nfrom pyspark.sql.functions import udf\n\ndef mask_name(columnValue):\n if len(columnValue) < 5:\n nameList=list(columnValue)\n start = \"\".join(nameList[:1])\n masking = 'x'*(len(nameList)-1)\n masked_name = start+masking\n else:\n nameList=list(columnValue)\n start = \"\".join(nameList[:2])\n end = \"\".join(nameList[-2:])\n masking = 'x'*(len(nameList)-4)\n masked_name = start+masking+end\n return masked_name\n \n### Make the function work with PySpark\nmask_name_udf = udf(mask_name, StringType())\n\n### Test function\nathletes_df.select(\"Name\",mask_name_udf(athletes_df[\"Name\"])).distinct().show(5, truncate=False)",
"+-------------------------+-------------------------+\n|Name |mask_name(Name) |\n+-------------------------+-------------------------+\n|Shakeel Abbasi |Shxxxxxxxxxxsi |\n|Raouf Abdelraouf |Raxxxxxxxxxxxxuf |\n|Sara Helena berg |Saxxxxxxxxxxxxrg |\n|Jos Eugenio Acosta |Joxxxxxxxxxxxxxxta |\n|\"Frances \"\"Fran\"\" Adcock\"|\"Fxxxxxxxxxxxxxxxxxxxxxk\"|\n+-------------------------+-------------------------+\nonly showing top 5 rows\n\n"
],
[
"athletes_df = athletes_df.withColumn(\"MaskedName\",mask_name_udf(athletes_df[\"Name\"])).drop(col(\"Name\"))",
"_____no_output_____"
],
[
"athletes_df.show(1,vertical=True)",
"-RECORD 0--------------------------\n ID | 1 \n Sex | M \n Age | 24 \n Height | 180 \n Weight | 80 \n Team | China \n NOC | CHN \n Games | 1992 Summer \n Year | 1992 \n Season | Summer \n City | Barcelona \n Sport | Basketball \n Event | Basketball Men's ... \n Medal | NA \n MaskedName | A xxxxxng \nonly showing top 1 row\n\n"
]
],
[
[
"### Fixing Schema",
"_____no_output_____"
]
],
[
[
"athletes_df.printSchema()",
"root\n |-- ID: string (nullable = true)\n |-- Sex: string (nullable = true)\n |-- Age: string (nullable = true)\n |-- Height: string (nullable = true)\n |-- Weight: string (nullable = true)\n |-- Team: string (nullable = true)\n |-- NOC: string (nullable = true)\n |-- Games: string (nullable = true)\n |-- Year: string (nullable = true)\n |-- Season: string (nullable = true)\n |-- City: string (nullable = true)\n |-- Sport: string (nullable = true)\n |-- Event: string (nullable = true)\n |-- Medal: string (nullable = true)\n |-- MaskedName: string (nullable = true)\n\n"
],
[
"### ID, Age Height, Weight and Year should be integer\nathletes_final_df = (athletes_df.withColumn(\"PlayerID\", col(\"ID\").cast(IntegerType()))\n .drop(col(\"ID\"))\n .withColumn(\"Name\", col(\"MaskedName\").cast(StringType()))\n .withColumn(\"Age\", col(\"Age\").cast(IntegerType()))\n .withColumn(\"Height\", col(\"Height\").cast(IntegerType()))\n .withColumn(\"Weight\", col(\"Weight\").cast(IntegerType()))\n .withColumn(\"Year\", col(\"Year\").cast(IntegerType()))\n )\nathletes_final_df.printSchema()",
"root\n |-- Sex: string (nullable = true)\n |-- Age: integer (nullable = true)\n |-- Height: integer (nullable = true)\n |-- Weight: integer (nullable = true)\n |-- Team: string (nullable = true)\n |-- NOC: string (nullable = true)\n |-- Games: string (nullable = true)\n |-- Year: integer (nullable = true)\n |-- Season: string (nullable = true)\n |-- City: string (nullable = true)\n |-- Sport: string (nullable = true)\n |-- Event: string (nullable = true)\n |-- Medal: string (nullable = true)\n |-- MaskedName: string (nullable = true)\n |-- PlayerID: integer (nullable = true)\n |-- Name: string (nullable = true)\n\n"
],
[
"### Sort column order\nathletes_sorted_df = athletes_final_df.select(\n [athletes_final_df.columns[-2]]\n + [athletes_final_df.columns[-1]]\n + athletes_final_df.columns[:-3])\n\nathletes_sorted_df.show(1, vertical=True)",
"-RECORD 0------------------------\n PlayerID | 1 \n Name | A xxxxxng \n Sex | M \n Age | 24 \n Height | 180 \n Weight | 80 \n Team | China \n NOC | CHN \n Games | 1992 Summer \n Year | 1992 \n Season | Summer \n City | Barcelona \n Sport | Basketball \n Event | Basketball Men's ... \n Medal | NA \nonly showing top 1 row\n\n"
],
[
"athletes_sorted_df.printSchema()",
"root\n |-- PlayerID: integer (nullable = true)\n |-- Name: string (nullable = true)\n |-- Sex: string (nullable = true)\n |-- Age: integer (nullable = true)\n |-- Height: integer (nullable = true)\n |-- Weight: integer (nullable = true)\n |-- Team: string (nullable = true)\n |-- NOC: string (nullable = true)\n |-- Games: string (nullable = true)\n |-- Year: integer (nullable = true)\n |-- Season: string (nullable = true)\n |-- City: string (nullable = true)\n |-- Sport: string (nullable = true)\n |-- Event: string (nullable = true)\n |-- Medal: string (nullable = true)\n\n"
]
],
[
[
"### Save to parquet",
"_____no_output_____"
]
],
[
[
"## Write to parquet file, but this crashes laptop\n#output_path = './output/athlete_data'\n#athletes_sorted_df.write.partitionBy(\"Games\").mode(\"overwrite\").parquet(output_path)",
"_____no_output_____"
]
],
[
[
"### Aggregations",
"_____no_output_____"
]
],
[
[
"from pyspark.sql.functions import min, max, sum, sumDistinct, avg, col, expr, round, count",
"_____no_output_____"
]
],
[
[
"#### Medals per year",
"_____no_output_____"
]
],
[
[
"### Get year and medal\nmedals_per_year_df = athletes_sorted_df.select(\n col(\"Year\"),\n col(\"Medal\")\n )\nmedals_per_year_df.show(5)",
"+----+-----+\n|Year|Medal|\n+----+-----+\n|1992| NA|\n|2012| NA|\n|1988| NA|\n|1988| NA|\n|1992| NA|\n+----+-----+\nonly showing top 5 rows\n\n"
],
[
"### Filter out all rows with NA\nmedals_per_year_df = medals_per_year_df.filter(col(\"Medal\")!=\"NA\")\nmedals_per_year_df.show(5)",
"+----+------+\n|Year| Medal|\n+----+------+\n|2014|Bronze|\n|1948|Bronze|\n|1948| Gold|\n|1948| Gold|\n|1948| Gold|\n+----+------+\nonly showing top 5 rows\n\n"
],
[
"### show amount of medals per Year\nmedals_per_year_df.groupBy(\"Year\").agg(count(\"Medal\").alias(\"Medals Amount\")).orderBy(\"Year\", ascending=False).show(5)",
"+----+-------------+\n|Year|Medals Amount|\n+----+-------------+\n|2016| 2013|\n|2014| 568|\n|2012| 1914|\n|2010| 511|\n|2008| 2035|\n+----+-------------+\nonly showing top 5 rows\n\n"
]
],
[
[
"#### Medals per country",
"_____no_output_____"
]
],
[
[
"### Show distinct medal values.\nathletes_sorted_df.select(\"Medal\").distinct().show()",
"+------+\n| Medal|\n+------+\n| NA|\n|Silver|\n| Gold|\n|Bronze|\n+------+\n\n"
],
[
"### create new dataframe and filter out NA values for the medal column.\nmedals_per_country_df = athletes_sorted_df.select(\n col(\"Team\"),\n col(\"Medal\")\n )\n\nmedals_per_country_df = medals_per_country_df.filter(col(\"Medal\")!=\"NA\")\n\nmedals_per_country_df.show(5)",
"+-------+------+\n| Team| Medal|\n+-------+------+\n|Finland|Bronze|\n|Finland|Bronze|\n|Finland| Gold|\n|Finland| Gold|\n|Finland| Gold|\n+-------+------+\nonly showing top 5 rows\n\n"
],
[
"### Aggregate and order by medal amount\nmedals_per_country_df = medals_per_country_df.groupBy(\"Team\",\"Medal\").agg(count(\"Medal\").alias(\"Amount\")).orderBy(\"Amount\", ascending=False)\nmedals_per_country_df.show(10)",
"+-------------+------+------+\n| Team| Medal|Amount|\n+-------------+------+------+\n|United States| Gold| 1995|\n|United States|Silver| 1230|\n| Soviet Union| Gold| 961|\n|United States|Bronze| 909|\n| Soviet Union|Silver| 629|\n| Soviet Union|Bronze| 613|\n| Germany|Bronze| 538|\n| Germany| Gold| 507|\n| Australia|Bronze| 471|\n| Germany|Silver| 470|\n+-------------+------+------+\nonly showing top 10 rows\n\n"
]
],
[
[
"#### Show information about height and weight",
"_____no_output_____"
]
],
[
[
"### This could also be used to make sure there are no odd values in the columns\nathletes_sorted_df.select(\"Height\", \"Weight\").describe().show()",
"+-------+------------------+------------------+\n|summary| Height| Weight|\n+-------+------------------+------------------+\n| count| 205688| 205688|\n| mean|175.36527167360273| 70.67846933219245|\n| stddev|10.544748708871328|14.335718186598772|\n| min| 127| 25|\n| max| 226| 214|\n+-------+------------------+------------------+\n\n"
],
[
"### Weight of only 25?? Let's check out why that is.\nathletes_sorted_df.select(\"Weight\",\"Height\",\"Age\",\"PlayerID\",\"Name\",\"Team\").filter(col(\"Weight\")==25).distinct().show()",
"+------+------+---+--------+--------------+-----------+\n|Weight|Height|Age|PlayerID| Name| Team|\n+------+------+---+--------+--------------+-----------+\n| 25| 135| 14| 21049|Chxxxxxxxxxxui|North Korea|\n+------+------+---+--------+--------------+-----------+\n\n"
]
],
[
[
"#### Which country has the most medals in basketball?",
"_____no_output_____"
]
],
[
[
"athletes_sorted_df.show(2)",
"+--------+---------+---+---+------+------+-----+---+-----------+----+------+---------+----------+--------------------+-----+\n|PlayerID| Name|Sex|Age|Height|Weight| Team|NOC| Games|Year|Season| City| Sport| Event|Medal|\n+--------+---------+---+---+------+------+-----+---+-----------+----+------+---------+----------+--------------------+-----+\n| 1|A xxxxxng| M| 24| 180| 80|China|CHN|1992 Summer|1992|Summer|Barcelona|Basketball|Basketball Men's ...| NA|\n| 2| A xxxxsi| M| 23| 170| 60|China|CHN|2012 Summer|2012|Summer| London| Judo|Judo Men's Extra-...| NA|\n+--------+---------+---+---+------+------+-----+---+-----------+----+------+---------+----------+--------------------+-----+\nonly showing top 2 rows\n\n"
],
[
"best_in_basketball_df = athletes_sorted_df.select(\n col(\"Team\"),\n col(\"Sport\"),\n col(\"Medal\")\n )\n\nbest_in_basketball_df = best_in_basketball_df.filter(col(\"Sport\")==\"Basketball\")\n\nbest_in_basketball_df.show(3)",
"+-----+----------+-----+\n| Team| Sport|Medal|\n+-----+----------+-----+\n|China|Basketball| NA|\n|Spain|Basketball| NA|\n|Italy|Basketball| NA|\n+-----+----------+-----+\nonly showing top 3 rows\n\n"
],
[
"best_in_basketball_df = best_in_basketball_df.groupBy(\"Team\",\"Sport\").agg(count(\"Medal\").alias(\"Amount\")).orderBy(\"Amount\", ascending=False)\nbest_in_basketball_df.show(5)",
"+-------------+----------+------+\n| Team| Sport|Amount|\n+-------------+----------+------+\n|United States|Basketball| 331|\n| Australia|Basketball| 248|\n| Brazil|Basketball| 211|\n| China|Basketball| 200|\n| Spain|Basketball| 180|\n+-------------+----------+------+\nonly showing top 5 rows\n\n"
]
],
[
[
"As you could expect, US has the most medals in Basketball.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
]
|
cb744ea270c65e77df2c2dd730530d9760ac97a8 | 21,579 | ipynb | Jupyter Notebook | SourceAnalysis/4.SummedPythonLikelihood/4.SummedPythonLikelihood.ipynb | emilychau/AnalysisThreads | 8270f7b3baa65816a12bff52e0cf7d9038ee9533 | [
"MIT"
]
| null | null | null | SourceAnalysis/4.SummedPythonLikelihood/4.SummedPythonLikelihood.ipynb | emilychau/AnalysisThreads | 8270f7b3baa65816a12bff52e0cf7d9038ee9533 | [
"MIT"
]
| null | null | null | SourceAnalysis/4.SummedPythonLikelihood/4.SummedPythonLikelihood.ipynb | emilychau/AnalysisThreads | 8270f7b3baa65816a12bff52e0cf7d9038ee9533 | [
"MIT"
]
| null | null | null | 30.012517 | 421 | 0.611011 | [
[
[
"# Summed Likelihood Analysis with Python\n\nThis sample analysis shows a way of performing joint likelihood on two data selections using the same XML model. This is useful if you want to do the following:\n\n* Coanalysis of Front and Back selections (not using the combined IRF)\n* Coanalysis of separate time intervals\n* Coanalysis of separate energy ranges\n* Pass 8 PSF type analysis\n* Pass 8 EDISP type analysis\n\nThis tutorial also assumes that you've gone through the standard [binned likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/binned_likelihood_tutorial.html) thread using the combined front + back events, to which we will compare.",
"_____no_output_____"
],
[
"# Get the data\n\nFor this thread the original data were extracted from the [LAT data server](https://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/LATDataQuery.cgi) with the following selections (these selections are similar to those in the paper):\n\n```\nSearch Center (RA,Dec) = (193.98,-5.82)\nRadius = 15 degrees\nStart Time (MET) = 239557417 seconds (2008-08-04T15:43:37)\nStop Time (MET) = 302572802 seconds (2010-08-04T00:00:00)\nMinimum Energy = 100 MeV\nMaximum Energy = 500000 MeV\n```\n\nFor more information on how to download LAT data please see the [Extract LAT Data](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/extract_latdata.html) tutorial.\n\nThese are the event files. Run the code cell below to retrieve them:\n```\nL181126210218F4F0ED2738_PH00.fits (5.4 MB)\nL181126210218F4F0ED2738_PH01.fits (10.8 MB)\nL181126210218F4F0ED2738_PH02.fits (6.9 MB)\nL181126210218F4F0ED2738_PH03.fits (9.8 MB)\nL181126210218F4F0ED2738_PH04.fits (7.8 MB)\nL181126210218F4F0ED2738_PH05.fits (6.6 MB)\nL181126210218F4F0ED2738_PH06.fits (4.8 MB)\nL181126210218F4F0ED2738_SC00.fits (256 MB spacecraft file)\n```",
"_____no_output_____"
]
],
[
[
"!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH00.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH01.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH02.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH03.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH04.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH05.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH06.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_SC00.fits",
"_____no_output_____"
],
[
"!mkdir data\n!mv *.fits ./data",
"_____no_output_____"
]
],
[
[
"You'll first need to make a file list with the names of your input event files:",
"_____no_output_____"
]
],
[
[
"!ls ./data/*_PH*.fits > ./data/binned_events.txt\n!cat ./data/binned_events.txt",
"_____no_output_____"
]
],
[
[
"In the following analysis we've assumed that you've named your list of data files `binned_events.txt`.",
"_____no_output_____"
],
[
"# Perform Event Selections\n\nYou could follow the unbinned likelihood tutorial to perform your event selections using **gtlike*, **gtmktime**, etc. directly from the command line, and then use pylikelihood later.\n\nBut we're going to go ahead and use python. The `gt_apps` module provides methods to call these tools from within python. This'll get us used to using python.\n\nSo, let's jump into python:",
"_____no_output_____"
]
],
[
[
"import gt_apps as my_apps",
"_____no_output_____"
]
],
[
[
"We need to run **gtselect** (called `filter` in python) twice. Once, we select only the front events and the other time we select only back events. You do this with `evtype=1` (front) and `evtype=2` (back).",
"_____no_output_____"
]
],
[
[
"my_apps.filter['evclass'] = 128\nmy_apps.filter['evtype'] = 1\nmy_apps.filter['ra'] = 193.98\nmy_apps.filter['dec'] = -5.82\nmy_apps.filter['rad'] = 15\nmy_apps.filter['emin'] = 100\nmy_apps.filter['emax'] = 500000\nmy_apps.filter['zmax'] = 90\nmy_apps.filter['tmin'] = 239557417\nmy_apps.filter['tmax'] = 302572802\nmy_apps.filter['infile'] = '@./data/binned_events.txt'\nmy_apps.filter['outfile'] = './data/3C279_front_filtered.fits'",
"_____no_output_____"
]
],
[
[
"Once this is done, we can run **gtselect**:",
"_____no_output_____"
]
],
[
[
"my_apps.filter.run()",
"_____no_output_____"
]
],
[
[
"Now, we select the back events and run it again:",
"_____no_output_____"
]
],
[
[
"my_apps.filter['evtype'] = 2\nmy_apps.filter['outfile'] = './data/3C279_back_filtered.fits'",
"_____no_output_____"
],
[
"my_apps.filter.run()",
"_____no_output_____"
]
],
[
[
"Now, we need to find the GTIs for each data set (front and back). This is accessed within python via the `maketime` object:",
"_____no_output_____"
]
],
[
[
"# Front\nmy_apps.maketime['scfile'] = './data/L181126210218F4F0ED2738_SC00.fits'\nmy_apps.maketime['filter'] = '(DATA_QUAL>0)&&(LAT_CONFIG==1)'\nmy_apps.maketime['roicut'] = 'no'\nmy_apps.maketime['evfile'] = './data/3C279_front_filtered.fits'\nmy_apps.maketime['outfile'] = './data/3C279_front_filtered_gti.fits'",
"_____no_output_____"
],
[
"my_apps.maketime.run()",
"_____no_output_____"
]
],
[
[
"Similar for the back:",
"_____no_output_____"
]
],
[
[
"# Back\nmy_apps.maketime['evfile'] = './data/3C279_back_filtered.fits'\nmy_apps.maketime['outfile'] = './data/3C279_back_filtered_gti.fits'",
"_____no_output_____"
],
[
"my_apps.maketime.run()",
"_____no_output_____"
]
],
[
[
"# Livetime and Counts Cubes\n\n### Livetime Cube\n\nWe can now compute the livetime cube. We only need to do this once since in this case we made the exact same time cuts and used the same GTI filter on front and back datasets.",
"_____no_output_____"
]
],
[
[
"my_apps.expCube['evfile'] = './data/3C279_front_filtered_gti.fits'\nmy_apps.expCube['scfile'] = './data/L181126210218F4F0ED2738_SC00.fits'\nmy_apps.expCube['outfile'] = './data/3C279_front_ltcube.fits'\nmy_apps.expCube['zmax'] = 90\nmy_apps.expCube['dcostheta'] = 0.025\nmy_apps.expCube['binsz'] = 1",
"_____no_output_____"
],
[
"my_apps.expCube.run()",
"_____no_output_____"
]
],
[
[
"### Counts Cube\n\nThe counts cube is the counts from our data file binned in space and energy. All of the steps above use a circular ROI (or a cone, really).\n\nOnce you switch to binned analysis, you start doing things in squares. Your counts cube can only be as big as the biggest square that can fit in the circular ROI you already selected.\n\nWe start with front events:",
"_____no_output_____"
]
],
[
[
"my_apps.evtbin['evfile'] = './data/3C279_front_filtered_gti.fits'\nmy_apps.evtbin['outfile'] = './data/3C279_front_ccube.fits'\nmy_apps.evtbin['algorithm'] = 'CCUBE'\nmy_apps.evtbin['nxpix'] = 100\nmy_apps.evtbin['nypix'] = 100\nmy_apps.evtbin['binsz'] = 0.2\nmy_apps.evtbin['coordsys'] = 'CEL'\nmy_apps.evtbin['xref'] = 193.98\nmy_apps.evtbin['yref'] = -5.82\nmy_apps.evtbin['axisrot'] = 0\nmy_apps.evtbin['proj'] = 'AIT'\nmy_apps.evtbin['ebinalg'] = 'LOG'\nmy_apps.evtbin['emin'] = 100\nmy_apps.evtbin['emax'] = 500000\nmy_apps.evtbin['enumbins'] = 37",
"_____no_output_____"
],
[
"my_apps.evtbin.run()",
"_____no_output_____"
]
],
[
[
"And then for the back events:",
"_____no_output_____"
]
],
[
[
"my_apps.evtbin['evfile'] = './data/3C279_back_filtered_gti.fits'\nmy_apps.evtbin['outfile'] = './data/3C279_back_ccube.fits'",
"_____no_output_____"
],
[
"my_apps.evtbin.run()",
"_____no_output_____"
]
],
[
[
"# Exposure Maps\n\nThe binned exposure map is an exposure map binned in space and energy.\n\nWe first need to import the python version of `gtexpcube2`, which doesn't have a gtapp version by default. This is easy to do (you can import any of the command line tools into python this way). Then, you can check out the parameters with the `pars` function.",
"_____no_output_____"
]
],
[
[
"from GtApp import GtApp\nexpCube2= GtApp('gtexpcube2','Likelihood')\nexpCube2.pars()",
"_____no_output_____"
]
],
[
[
"Here, we generate exposure maps for the entire sky. ",
"_____no_output_____"
]
],
[
[
"expCube2['infile'] = './data/3C279_front_ltcube.fits'\nexpCube2['cmap'] = 'none'\nexpCube2['outfile'] = './data/3C279_front_BinnedExpMap.fits'\nexpCube2['irfs'] = 'P8R3_SOURCE_V2'\nexpCube2['evtype'] = '1'\nexpCube2['nxpix'] = 1800\nexpCube2['nypix'] = 900\nexpCube2['binsz'] = 0.2\nexpCube2['coordsys'] = 'CEL'\nexpCube2['xref'] = 193.98\nexpCube2['yref'] = -5.82\nexpCube2['axisrot'] = 0\nexpCube2['proj'] = 'AIT'\nexpCube2['ebinalg'] = 'LOG'\nexpCube2['emin'] = 100\nexpCube2['emax'] = 500000\nexpCube2['enumbins'] = 37",
"_____no_output_____"
],
[
"expCube2.run()",
"_____no_output_____"
],
[
"expCube2['infile'] = './data/3C279_front_ltcube.fits'\nexpCube2['outfile'] = './data/3C279_back_BinnedExpMap.fits'\nexpCube2['evtype'] = '2'",
"_____no_output_____"
],
[
"expCube2.run()",
"_____no_output_____"
]
],
[
[
"# Compute Source Maps\n\nThe source maps step convolves the LAT response with your source model, generating maps for each source in the model for use in the likelihood calculation.\n\nWe use the same [XML](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_input_model.xml) file as in the standard [binned likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/binned_likelihood_tutorial.html) analysis.\n\nYou should also download the recommended models for a normal point source analysis `gll_iem_v07.fits` and `iso_P8R3_SOURCE_V2_v1.txt`.\n\nThese three files can be downloaded by running the code cell below:",
"_____no_output_____"
]
],
[
[
"!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/software/aux/4fgl/gll_iem_v07.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/software/aux/4fgl/iso_P8R3_SOURCE_V2_v1.txt\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_input_model.xml",
"_____no_output_____"
],
[
"!mv *.xml ./data",
"_____no_output_____"
]
],
[
[
"Note that the files `gll_iem_v07.fits` and `iso_P8R3_SOURCE_V2_v1.txt` must be in your current working directory for the next steps to work.\n\nWe compute the front events:",
"_____no_output_____"
]
],
[
[
"my_apps.srcMaps['expcube'] = './data/3C279_front_ltcube.fits'\nmy_apps.srcMaps['cmap'] = './data/3C279_front_ccube.fits'\nmy_apps.srcMaps['srcmdl'] = './data/3C279_input_model.xml'\nmy_apps.srcMaps['bexpmap'] = './data/3C279_front_BinnedExpMap.fits'\nmy_apps.srcMaps['outfile'] = './data/3C279_front_srcmap.fits'\nmy_apps.srcMaps['irfs'] = 'P8R3_SOURCE_V2'\nmy_apps.srcMaps['evtype'] = '1'",
"_____no_output_____"
],
[
"my_apps.srcMaps.run()",
"_____no_output_____"
]
],
[
[
"And similarly, the back events:",
"_____no_output_____"
]
],
[
[
"my_apps.srcMaps['expcube'] = './data/3C279_front_ltcube.fits'\nmy_apps.srcMaps['cmap'] = './data/3C279_back_ccube.fits'\nmy_apps.srcMaps['srcmdl'] = './data/3C279_input_model.xml'\nmy_apps.srcMaps['bexpmap'] = './data/3C279_back_BinnedExpMap.fits'\nmy_apps.srcMaps['outfile'] = './data/3C279_back_srcmap.fits'\nmy_apps.srcMaps['irfs'] = 'P8R3_SOURCE_V2'\nmy_apps.srcMaps['evtype'] = '2'",
"_____no_output_____"
],
[
"my_apps.srcMaps.run()",
"_____no_output_____"
]
],
[
[
"# Run the Likelihood Analysis\n\nFirst, import the BinnedAnalysis and SummedAnalysis libraries. Then, create a likelihood object for both the front and the back datasets. For more details on the pyLikelihood module, check out the [pyLikelihood Usage Notes](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/python_usage_notes.html).",
"_____no_output_____"
]
],
[
[
"import pyLikelihood\nfrom BinnedAnalysis import *\nfrom SummedLikelihood import *\n\nfront = BinnedObs(srcMaps='./data/3C279_front_srcmap.fits',binnedExpMap='./data/3C279_front_BinnedExpMap.fits',expCube='./data/3C279_front_ltcube.fits',irfs='CALDB')\nlikefront = BinnedAnalysis(front,'./data/3C279_input_model.xml',optimizer='NewMinuit')\nback = BinnedObs(srcMaps='./data/3C279_back_srcmap.fits',binnedExpMap='./data/3C279_back_BinnedExpMap.fits',expCube='./data/3C279_front_ltcube.fits',irfs='CALDB')\nlikeback = BinnedAnalysis(back,'./data/3C279_input_model.xml',optimizer='NewMinuit')",
"_____no_output_____"
]
],
[
[
"Then, create the summedlikelihood object and add the two likelihood objects, one for the front selection and the second for the back selection.",
"_____no_output_____"
]
],
[
[
"summed_like = SummedLikelihood()\nsummed_like.addComponent(likefront)\nsummed_like.addComponent(likeback)",
"_____no_output_____"
]
],
[
[
"Perform the fit and print out the results:",
"_____no_output_____"
]
],
[
[
"summedobj = pyLike.NewMinuit(summed_like.logLike)\nsummed_like.fit(verbosity=0,covar=True,optObject=summedobj)",
"_____no_output_____"
]
],
[
[
"Print TS for 3C 279 (4FGL J1256.1-0547):",
"_____no_output_____"
]
],
[
[
"summed_like.Ts('4FGL J1256.1-0547')",
"_____no_output_____"
]
],
[
[
"We can now compare to the standard [binned likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/binned_likelihood_tutorial.html) analysis that uses only one data set containing both Front and Back event types that are represented by a single, combined IRF set. You will need to download the files created in that analysis thread or rerun this python tutorial with the combined dataset `(evtype=3)`.\n\nFor your convenience, the files can be obtained from the code cell below:",
"_____no_output_____"
]
],
[
[
"!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_srcmaps.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_allsky_expcube.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_ltcube.fits",
"_____no_output_____"
],
[
"!mv 3C279*.fits ./data",
"_____no_output_____"
],
[
"all = BinnedObs(srcMaps='./data/3C279_binned_srcmaps.fits',binnedExpMap='./data/3C279_binned_allsky_expcube.fits',expCube='./data/3C279_binned_ltcube.fits',irfs='CALDB')\nlikeall = BinnedAnalysis(all,'./data/3C279_input_model.xml',optimizer='NewMinuit')",
"_____no_output_____"
]
],
[
[
"Perform the fit and print out the results:",
"_____no_output_____"
]
],
[
[
"likeallobj = pyLike.NewMinuit(likeall.logLike)\nlikeall.fit(verbosity=0,covar=True,optObject=likeallobj)",
"_____no_output_____"
]
],
[
[
"Print TS for 3C 279 (4FGL J1256.1-0547):",
"_____no_output_____"
]
],
[
[
"likeall.Ts('4FGL J1256.1-0547')",
"_____no_output_____"
]
],
[
[
"The TS for the front + back analysis is 29261.558, a bit lower than what we found for the separate front and back analysis 30191.550.\n\nThe important difference is that in the separated version of the analysis each event type has a dedicated response function set instead of using the averaged Front+Back response. This should increase the sensitivity, and therefore, the TS value.",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
cb744f6a27c7ab7ec9f05287468d68d4fee1fedc | 20,176 | ipynb | Jupyter Notebook | 02_fundamentos_pandas/notebook/09_organizando_dataframes.ipynb | eltonrp/formacao_Data_Science | 278c807740d0ba0e8294937181b41076a1697622 | [
"MIT"
]
| null | null | null | 02_fundamentos_pandas/notebook/09_organizando_dataframes.ipynb | eltonrp/formacao_Data_Science | 278c807740d0ba0e8294937181b41076a1697622 | [
"MIT"
]
| null | null | null | 02_fundamentos_pandas/notebook/09_organizando_dataframes.ipynb | eltonrp/formacao_Data_Science | 278c807740d0ba0e8294937181b41076a1697622 | [
"MIT"
]
| null | null | null | 23.764429 | 102 | 0.334952 | [
[
[
"# Organizando Dataframes",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\ndata",
"_____no_output_____"
],
[
"#cria uma lista com cada caracter\nlist('321')",
"_____no_output_____"
]
],
[
[
"## Index e Columns em Dataframes",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame(data, list('321'), list('ZYX'))\ndf",
"_____no_output_____"
]
],
[
[
"### Organizando o index",
"_____no_output_____"
]
],
[
[
"# organiza o index e implementa a função dentro do Dataframe\ndf.sort_index(inplace=True)\ndf",
"_____no_output_____"
],
[
"# organiza o index, grava a função no df e organiza as colunas\n# o parâmetro axis=1 que organiza as colunas\ndf.sort_index(inplace=True, axis=1)\ndf",
"_____no_output_____"
]
],
[
[
"### Escolhendo Parâmetro para Organizar\n* função => sort_values()",
"_____no_output_____"
]
],
[
[
"# pega a coluna X como refeência para colocar em ordem\ndf.sort_values(by='X', inplace=True)\ndf",
"_____no_output_____"
],
[
"# organiza com base na linha 3\ndf.sort_values(by='3', axis=1, inplace=True)\ndf",
"_____no_output_____"
],
[
"# também é possível passar uma lista como parâmetro\n# se a lista for para linhas, deve acompanhar o parâmetro axis=1\ndf.sort_values(by=['X', 'Y'])",
"_____no_output_____"
]
],
[
[
"## Exercícios",
"_____no_output_____"
],
[
"### 1. Considere o seguinte DataFrame nomeado como df:\nZ=9,6,3 \nY=8,5,2 \nX=7,4,1 \nColunas = C,B,A \nApós executar o seguinte conjunto de rotinas no Jupyter: \ndf.sort_index() \ndf.sort_index(axis = 1) \ndf \nQual o resultado obtido? ",
"_____no_output_____"
]
],
[
[
"dados2 = [[9,6,3],[8,5,2],[7,4,1]]\nde = pd.DataFrame(dados2, list('ZYX'), list('CBA'))\nde.sort_index()\nde.sort_index(axis = 1)\nde",
"_____no_output_____"
]
],
[
[
"### 2.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"df_A = pd.DataFrame([['Ary', 'M', 21], ['Cátia', 'F', 19],\n ['Carlos', 'M', 50], ['Beto', 'M', 29],\n ['Bruna', 'F', 31], ['Ana', 'F', 42]], columns=['Nome', 'Sexo', 'Idade'])\ndf_A",
"_____no_output_____"
],
[
"df_B = df_A.sort_values(by=['Sexo', 'Nome'])\ndf_B",
"_____no_output_____"
]
],
[
[
"### Observações\n* se chamar primeiro a variável 'Nome' ele irá gerar outro resultado\n* será organizado apenas os nomes por ordem alfabética e não será organizado o sexo\n* deve-se tomar esse tipo de cuidado ao organizar um dataframe",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
]
|
cb74516694ba6d09b1b88c57dd4d72b47cfd5f48 | 22,721 | ipynb | Jupyter Notebook | 2-Randomised-Experiments.ipynb | mariusgruenewald/python-causality-handbook | 12057268a64adee678a78a66ceb18ac6d827ad72 | [
"MIT"
]
| 1 | 2022-01-09T20:58:02.000Z | 2022-01-09T20:58:02.000Z | 2-Randomised-Experiments.ipynb | mariusgruenewald/python-causality-handbook | 12057268a64adee678a78a66ceb18ac6d827ad72 | [
"MIT"
]
| null | null | null | 2-Randomised-Experiments.ipynb | mariusgruenewald/python-causality-handbook | 12057268a64adee678a78a66ceb18ac6d827ad72 | [
"MIT"
]
| 2 | 2020-07-24T12:36:11.000Z | 2021-08-13T22:18:27.000Z | 53.086449 | 717 | 0.570265 | [
[
[
"## The Golden Standard\n\nIn the previous session, we saw why and how association is different from causation. We also saw what is required to make association be causation.\n\n$\nE[Y|T=1] - E[Y|T=0] = \\underbrace{E[Y_1 - Y_0|T=1]}_{ATET} + \\underbrace{\\{ E[Y_0|T=1] - E[Y_0|T=0] \\}}_{BIAS}\n$\n\n\nTo recap, association becomes causation if there is no bias. There will be no bias if \\\\(E[Y_0|T=0]=E[Y_0|T=1]\\\\). In words, association will be causation if the treated and control are equal, or comparable, unless for the treatment they receive. Or, in more technical words, when the outcome of the untreated is equal to the counterfactual outcome of the treated. Remember that this counterfactual outcome is the outcome of the treated group if they had not received the treatment.\n\nI think we did an OK job explaining in math terms how to make association equal to causation. But that was only in theory. Now, we look at the first tool we have to make the bias vanish: **Randomised Experiments**. Randomised experiments consist of randomly assigning individuals in a population to the treatment or to a control group. The proportion that receives the treatment doesn't have to be 50%. You could have an experiment where only 10% of your samples get the treatment.\n\nRandomisation annihilates bias by making the potential outcomes independent of the treatment.\n\n$\n(Y_0, Y_1) \\perp\\!\\!\\!\\perp T\n$\n\nThis can be confusing at first. If the outcome is independent of the treatment, doesn't it mean that the treatment has no effect? Well, yes! but notice I'm not talking about the outcomes. Rather, I'm talking about the **potential** outcomes. The potential outcomes is how the outcome **would have been** under the treatment (\\\\(Y_1\\\\)) or under the control (\\\\(Y_0\\\\)). In randomized trials, we **don't** want the outcome to be dependent on the treatment, since we think the treatment causes the outcome. But we want the **potential** outcomes to be independent from the treatment. \n\n\n\nSaying that the potential outcomes are independent from the treatment is saying that they would be, in expectation, the same in the treatment or the control group. In simpler terms, it means that treatment and control are comparable. Or that knowing the treatment assignment doesn't give me any information on how the outcome was previous to the treatment. Consequently, \\\\((Y_0, Y_1)\\perp T\\\\) means that the treatment is the only thing that is generating a difference between the outcome in the treated and in the control. To see this, notice that independence implies precisely that that\n\n$\nE[Y_0|T=0]=E[Y_0|T=1]=E[Y_0]\n$\n\nWhich, as we've seen, makes it so that\n\n$\nE[Y|T=1] - E[Y|T=0] = E[Y_1 - Y_0]=ATE\n$\n\nSo, randomization gives us a way to use a simple difference in means between treatment and control and call that the treatment effect.\n\n\n## In a School Far, Far Away\n\nIn the year of 2020, the Coronavirus Pandemic forced business to adapt to social distancing. Delivery services became widespread, big corporations shifted to a remote work strategy. With schools, it wasn't different. Many started their own online repository of classes. \n\nFour months into the crises and many are wondering if the introduced changes could be maintained. There is no question that online learning has its benefits. For once, it is cheaper, since it can save on real estate and transportation. It can also me more digital, leveraging world class content from around the globe, not just from a fixed set of teachers. In spite all of that, we still need to answer if online learning has or not a negative or positive impact in the student's academic performance.\n\nOne way to answer that is to take students from schools that give mostly online classes and compare them with students from schools that give lectures in traditional classrooms. As we know by now this is not the best approach. It could be that online schools attract only the well disciplined students that do better than average even if the class where presential. In this case, we would have a positive bias, where the treated are academically better than the untreated: \\\\(E[Y_0|T=1] > E[Y_0|T=0]\\\\).\n\nOr, on the flip side, it could be that online classes are cheaper and are composed mostly of less wealthy students, who might have to work besides studying. In this case, these students would do worse than those from the presidential schools even if they took presential classes. If this was the case, we would have bias in the other direction, where the treated are academically worse than the untreated: \\\\(E[Y_0|T=1] < E[Y_0|T=0]\\\\). \n\nSo, although we could do simple comparisons, it wouldn't be very convincing. One way or another, we could never be sure if there wasn't any bias lurking around and masking our causal effect.\n\n\n\nTo solve that, we need to make the treated and untreated comparable \\\\(E[Y_0|T=1] = E[Y_0|T=0]\\\\). One way to force this, is by randomly assigning the online and presential classes to students. If we managed to do that, the treatment and untreated would be, on average, the same, unless for the treatment they receive. \n\nFortunately, some economists have done that for us. They randomized not the students, but the classes. Some of them were randomly assigned to have face-to-face lectures, others, to have only online lectures and a third group, to have a blended format of both online and face-to-face lectures. At the end of the semester, they collected data on a standard exam.\n\nHere is what the data looks like:",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\n\ndata = pd.read_csv(\"./data/online_classroom.csv\")\nprint(data.shape)\ndata.head()",
"(323, 10)\n"
]
],
[
[
"We can see that we have 323 samples. It's not exactly big data, but is something we can work with. To estimate the causal effect, we can simply compute the mean score for each of the treatment groups.",
"_____no_output_____"
]
],
[
[
"(data\n .assign(class_format = np.select(\n [data[\"format_ol\"].astype(bool), data[\"format_blended\"].astype(bool)],\n [\"online\", \"blended\"],\n default=\"face_to_face\"\n ))\n .groupby([\"class_format\"])\n .mean())",
"_____no_output_____"
]
],
[
[
"Yup. It's that simple. We can see that face to face classes yield a 78.54 average score, while online classes yield a 73.63 average score. Not so good news for the proponents of online learning. The \\\\(ATE\\\\) for online class is thus -4.91. This means that online classes cause students to perform about 5 points lower, on average. That's it. You don't need to worry that online classes might have poorer students that can't afford face to face classes or, for that matter, you don't have to worry that the students from the different treatments are different in any way other than the treatment they received. By design, the random experiment is made to wipe out those differences. \n\nFor this reason, a good sanity check to see if the randomisation was done right (or if you are looking at the right data) is to check if the treated are equal to the untreated in pre-treatment variables. In our data, we have information on gender and ethnicity, so we can see if they are equal across groups. For the `gender`, `asian`, `hispanic` and `white` variables, we can say that they look pretty similar. The `black` variable, however, looks a little bit different. This draws attention to what happens with a small dataset. Even under randomisation, it could be that, by chance, one group is different from another. In large samples, this difference tends to disappear.\n\n## The Ideal Experiment\n\nRandomised experiments are the most reliable way to get causal effects. It is a ridiculously simple technique and absurdly convincing. It is so powerful that most countries have it as a requirement for showing the effectiveness of new medicine. To make a terrible analogy, you can think of RCT and Aang, from Avatar: The Last Airbender, while other techniques are more like Sokka. He is cool and can pull some neat tricks here and there, but Aang can bend the four elements and connect with the spiritual world. Think of it this way, if we could, RCT would be all we would ever do to uncover causality. A well designed RCT is the dream of any scientist.\n\n\n\nUnfortunately, they tend to be either very expensive or just plain unethical. Sometimes, we simply can't control the assignment mechanism. Imagine yourself as a physician trying to estimate the effect of smoking during pregnancy on baby weight at birth. You can't simply force a random portion of moms to smoke during pregnancy. Or say you work for a big bank and you need to estimate the impact of the credit line on customer churn. It would be too expensive to give random credit lines to your customers. Or that you want to understand the impact of increasing minimum wage on unemployment. You can't simply assign countries to have one or another minimum wage.\n\nWe will later see how to lower the randomisation cost by using conditional randomisation, but there is nothing we can do about unethical or unfeasible experiments. Still, whenever we deal with causal questions, it is worth thinking about the **ideal experiment**. Always ask yourself, if you could, **what would be the ideal experiment you would run to uncover this causal effect?**. This tends to shed some light in the way of how we can uncover the causal effect even without the ideal experiment.\n\n\n## The Assignment Mechanism\n\nIn a randomised experiment, the mechanism that assigns unit to one treatment or the other is, well, random. As we will see later, all causal inference techniques will somehow try to identify the assignment mechanisms of the treatments. When we know for sure how this mechanism behaves, causal inference will be much more certain, even if the assignment mechanism isn't random.\n\nUnfortunately, the assignment mechanism can't be discovered by simply looking at the data. For example, if you have a dataset where higher education correlates with wealth, you can't know for sure which one caused which by just looking at the data. You will have to use your knowledge about how the world works to argue in favor of a plausible assignment mechanism: is it the case that schools educate people, making them more productive and hence leading them to higher paying jobs. Or, if you are pessimistic about education, you can say that schools do nothing to increase productivity and this is just a spurious correlation because only wealthy families can afford to have a kid getting a higher degree.\n\nIn causal questions, we usually have the possibility to argue in both ways: that X causes Y, or that it is a third variable Z that causes both X and Y, and hence the X and Y correlation is just spurious. It is for this reason that knowing the assignment mechanism leads to a much more convincing causal answer. \n\n\n## Key Ideas\n\nWe looked at how randomised experiments are the simplest and most effective way to uncover causal impact. It does this by making the treatment and control group comparable. Unfortunately, we can't do randomised experiments all the time, but it is still useful to think about what is the ideal experiment we would do if we could.\n\nSomeone that is familiar with statistics might be protesting right now that I didn't look at the variance of my causal effect estimate. How can I know that a 4.91 points decrease is not due to chance? In other words, how can I know if the difference is statistically significant? And they would be right. Don't worry. I intend to review some statistical concepts next. \n\n\n## References\n\nI like to think of this entire series as a tribute to Joshua Angrist, Alberto Abadie and Christopher Walters for their amazing Econometrics class. Most of the ideas here are taken from their classes at the American Economic Association. Watching them is what is keeping me sane during this tough year of 2020.\n* [Cross-Section Econometrics](https://www.aeaweb.org/conference/cont-ed/2017-webcasts)\n* [Mastering Mostly Harmless Econometrics](https://www.aeaweb.org/conference/cont-ed/2020-webcasts)\n\nI'll also like to reference the amazing books from Angrist. They have shown me that Econometrics, or 'Metrics as they call it, is not only extremely useful but also profoundly fun.\n\n* [Mostly Harmless Econometrics](https://www.mostlyharmlesseconometrics.com/)\n* [Mastering 'Metrics](https://www.masteringmetrics.com/)\n\nMy final reference is Miguel Hernan and Jamie Robins' book. It has been my trustworthy companion in the most thorny causal questions I had to answer.\n\n* [Causal Inference Book](https://www.hsph.harvard.edu/miguel-hernan/causal-inference-book/)\n\nThe data used here is from a study of Alpert, William T., Kenneth A. Couch, and Oskar R. Harmon. 2016. [\"A Randomized Assessment of Online Learning\"](https://www.aeaweb.org/articles?id=10.1257/aer.p20161057). American Economic Review, 106 (5): 378-82.\n\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
cb7451b0dcfe9de870b564ec76353da7348c2a7b | 2,273 | ipynb | Jupyter Notebook | News Scraping/standarlize.ipynb | nguyendaiky/CS114.L21-1 | 6d9aa22bc4405f48f1fd7e49b2c1852db9ea0bc7 | [
"MIT"
]
| 1 | 2021-03-07T00:54:10.000Z | 2021-03-07T00:54:10.000Z | News Scraping/standarlize.ipynb | levanphuoc-dev/CS114.L21 | cf9ea8012c28f494faf107357c6f98aba6ffbbbb | [
"MIT"
]
| null | null | null | News Scraping/standarlize.ipynb | levanphuoc-dev/CS114.L21 | cf9ea8012c28f494faf107357c6f98aba6ffbbbb | [
"MIT"
]
| 8 | 2021-03-07T00:46:45.000Z | 2021-08-20T15:50:28.000Z | 24.180851 | 100 | 0.57941 | [
[
[
"import pandas",
"_____no_output_____"
],
[
"# 19521225\ndf_19521225 = pandas.read_csv('./19521225/news/all.csv')",
"_____no_output_____"
],
[
"# 19521731\ndf_ClickHole = pandas.read_csv('./19521731/news/crawled_data/ClickHole.csv')\ndf_NewYorker = pandas.read_csv('./19521731/news/crawled_data/NewYorker.csv')\ndf_TheBabylonBee = pandas.read_csv('./19521731/news/crawled_data/TheBabylonBee.csv')\n\ndf_ClickHole = df_ClickHole.iloc[:, 1:]\ndf_NewYorker = df_NewYorker.iloc[:, 1:]\ndf_TheBabylonBee = df_TheBabylonBee.iloc[:, 1:]\n\ndf_19521731 = pandas.concat([df_ClickHole, df_TheBabylonBee, df_NewYorker])",
"_____no_output_____"
],
[
"# 19522054\ndf_0 = pandas.read_csv('./19522054/news/data_0.csv')\ndf_1 = pandas.read_csv('./19522054/news/data_1.csv')\n\ndf_19522054 = pandas.concat([df_0, df_1])",
"_____no_output_____"
],
[
"df_result = pandas.concat([df_19522054, df_19521225, df_19521731]).iloc[:, :-1]",
"_____no_output_____"
],
[
"df_result.to_json('data.json', orient='records')\ndf_result.to_csv('data.csv', index=False)",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb746753ff46bfa04bc32a0969bf5432e967bdbc | 100,652 | ipynb | Jupyter Notebook | notebooks/miscellaneous/Pull_models_basics.ipynb | dmcnamee/snc | c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786 | [
"Apache-2.0"
]
| 5 | 2021-03-24T16:23:10.000Z | 2021-11-17T12:44:51.000Z | notebooks/miscellaneous/Pull_models_basics.ipynb | dmcnamee/snc | c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786 | [
"Apache-2.0"
]
| 3 | 2021-03-26T01:16:08.000Z | 2021-05-08T22:06:47.000Z | notebooks/miscellaneous/Pull_models_basics.ipynb | dmcnamee/snc | c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786 | [
"Apache-2.0"
]
| 2 | 2021-03-24T17:20:06.000Z | 2021-04-19T09:01:12.000Z | 25.169292 | 175 | 0.522871 | [
[
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import numpy as np\nfrom scipy.stats import poisson, norm",
"_____no_output_____"
],
[
"def compute_scaling_ratio(mu_drain,mu_demand,drift_sd,init_state):\n drain_time = init_state/(mu_drain-mu_demand)\n accum_std = drift_sd*np.sqrt(drain_time)\n ratio = accum_std/init_state\n \n return ratio",
"_____no_output_____"
],
[
"def compute_workloads(arrival_buffer,inter_buffer,drain_buffer):\n workload_1= arrival_buffer/(mu_drain/2)+(inter_buffer+drain_buffer)/(mu_drain)\n workload_2 = (inter_buffer+arrival_buffer)/(mu_fast)\n return workload_1, workload_2\n \ndef compute_draining_times(arrival_buffer,inter_buffer,drain_buffer):\n workload_1, workload_2 = compute_workloads(arrival_buffer,inter_buffer,drain_buffer)\n drain_time_1= workload_1/(1-mu_demand*2/mu_drain)\n drain_time_2 = workload_2/(1-mu_demand/mu_fast)\n return drain_time_1, drain_time_2",
"_____no_output_____"
],
[
"def simulate_single_buffer_pull(feed_sequence,\n demand_sequence,\n h_thres,\n init_state,\n flow,\n init_with_zeros = False):\n \n demand_buffer = np.zeros(len(feed_sequence)+1) \n demand_buffer[0] = init_state if not init_with_zeros else 0\n\n for i,(f,d) in enumerate(zip(feed_sequence,demand_sequence)):\n \n if demand_buffer[i] > h_thres:\n f = 0 \n \n demand_buffer[i+1] = demand_buffer[i]+f-d\n\n \n return demand_buffer",
"_____no_output_____"
],
[
"def simulate_double_buffer_pull(feed_sequence_1,\n feed_sequence_2,\n demand_sequence_1,\n demand_sequence_2,\n h_thres_1,\n h_thres_2,\n sf_thres_1,\n sf_thres_2,\n sf_1):\n \n buffer_1 = np.zeros(len(feed_sequence_1)+1)\n buffer_2 = np.zeros(len(feed_sequence_1)+1)\n \n buffer_1[0] = 300\n buffer_2[0] = 200\n \n for i,(f1,f2,d1,d2) in enumerate(zip(feed_sequence_1,feed_sequence_2,demand_sequence_1,demand_sequence_2)):\n z1 = 0\n z2 = 0\n \n if sf_1:\n if buffer_2[i] <= sf_thres_2:\n z1 = 0\n z2 = 1\n\n if buffer_1[i] <= sf_thres_1:\n z1 = 1\n z2 = 0 \n else:\n \n if buffer_1[i] <= sf_thres_1:\n z1 = 1\n z2 = 0\n \n if buffer_2[i] <= sf_thres_2:\n z1 = 0\n z2 = 1\n \n if buffer_2[i] <= h_thres_2 and z1 == 0:\n z2 = 1\n \n if buffer_1[i] <= h_thres_1 and z2 == 0:\n z1 = 1\n \n #if i % 2 == 0:\n # z1 = 1\n # z2 = 0\n #else:\n # z1 = 0\n # z2 = 1\n \n #if buffer_2[i] > h_thres_2:\n # z2 = 0\n \n #if buffer_1[i] > h_thres_1:\n # z1 = 0 \n \n assert z1+z2 < 2\n \n buffer_1[i+1] = buffer_1[i]+z1*f1-d1\n buffer_2[i+1] = buffer_2[i]+z2*f2-d2\n \n return buffer_1,buffer_2",
"_____no_output_____"
],
[
"def simulate_tandem_buffer_pull(feed_sequence_1,\n feed_sequence_2,\n demand_sequence,\n h_thres_1,\n h_thres_2):\n \n buffer_1 = np.zeros(len(feed_sequence_1)+1)\n buffer_2 = np.zeros(len(feed_sequence_1)+1)\n \n buffer_1[0] = h_thres_1\n buffer_2[0] = 0\n \n for i,(f1,f2,d) in enumerate(zip(feed_sequence_1,feed_sequence_2,demand_sequence)):\n z1 = 1\n z2 = 1\n \n if buffer_2[i] > h_thres_2:\n z2 = 0\n \n if buffer_1[i] > h_thres_1:\n z1 = 0\n \n f2 = min(f2,buffer_1[i])\n \n assert z1*f1 <= 1\n assert z2*f2 <= 1\n \n \n buffer_1[i+1] = buffer_1[i]+z1*f1-z2*f2\n assert buffer_1[i+1] >= 0\n buffer_2[i+1] = buffer_2[i]+z2*f2-d\n \n return buffer_1,buffer_2",
"_____no_output_____"
],
[
"mu_demand = 0.33\nmu_feed_1 = 0.34\nmu_feed_2 = 0.34\nduration = int(1e6)\nnp.random.seed(100)\ndemand_seq = np.random.binomial(1,mu_demand,duration)\nfeed_seq_1 = np.random.binomial(1,mu_feed_1,duration)\nfeed_seq_2 = np.random.binomial(1,mu_feed_2,duration)",
"_____no_output_____"
],
[
"c_s = 1\nc_d = 10",
"_____no_output_____"
],
[
"0.33/0.34",
"_____no_output_____"
],
[
"0.33/(0.005),0.33/(0.5-0.33)",
"_____no_output_____"
],
[
"buffer_1,buffer_2 = simulate_tandem_buffer_pull(feed_seq_1,feed_seq_2,demand_seq,50,55)\nh_optimal = np.percentile(-buffer_2,1000/11)",
"_____no_output_____"
],
[
"h_range = range(20,80,5)\ndeficit_cost = np.zeros_like(h_range)\nsurplus_cost = np.zeros_like(h_range)\nsf_cost = np.zeros_like(h_range)\nfor i,h1 in enumerate(h_range):\n print(i)\n\n buffer_1,buffer_2 = simulate_tandem_buffer_pull(feed_seq_1,feed_seq_2,demand_seq,h1,0)\n\n h_optimal = np.percentile(-buffer_2,1000/11)\n surplus = np.where(buffer_2+h_optimal >= 0,buffer_2+h_optimal,0)\n deficit = np.where(buffer_2+h_optimal < 0,buffer_2+h_optimal,0)\n deficit_cost[i] = np.sum(-deficit)*c_d\n surplus_cost[i] = np.sum(surplus)*c_s\n sf_cost[i] = np.sum(buffer_1)*c_s",
"_____no_output_____"
],
[
"mu_range = np.arange(0.34,0.55,0.01)\ndeficit_cost = np.zeros_like(mu_range)\nsurplus_cost = np.zeros_like(mu_range)\nsf_cost = np.zeros_like(mu_range)\nfor i,mu in enumerate(mu_range):\n print(i)\n np.random.seed(100)\n \n feed_seq_1 = np.random.binomial(1,mu,duration)\n buffer_1,buffer_2 = simulate_tandem_buffer_pull(feed_seq_1,feed_seq_2,demand_seq,500,0)\n a = np.percentile(buffer_1,1)\n print(a)\n \n\n h_optimal = np.percentile(-buffer_2,1000/11)\n surplus = np.where(buffer_2+h_optimal >= 0,buffer_2+h_optimal,0)\n deficit = np.where(buffer_2+h_optimal < 0,buffer_2+h_optimal,0)\n deficit_cost[i] = np.sum(-deficit)*c_d\n surplus_cost[i] = np.sum(surplus)*c_s\n sf_cost[i] = np.sum(np.maximum(buffer_1-a,0))\n\n #cost = np.sum(surplus)*c_s + np.sum(-deficit)*c_d + np.sum(buffer_1)*c_s\n \n\n",
"_____no_output_____"
],
[
"mu_range",
"_____no_output_____"
],
[
"plt.plot((mu_range-0.33)/0.01,sf_cost/min(sf_cost))\nplt.plot((mu_range-0.33)/0.01,sf_cost/min(sf_cost),\".\")",
"_____no_output_____"
],
[
"plt.figure(figsize=(8,6))\nplt.plot(0.33/mu_range,sf_cost/min(sf_cost))\nplt.plot(0.33/mu_range,sf_cost/min(sf_cost),\".\")\nplt.vlines(0.97,0,10,label=\"mu_2 load = 0.97\")\nplt.xlabel(\"mu_1 load\")\nplt.ylabel(\"Relative cost\")\nplt.legend()",
"_____no_output_____"
],
[
"h_optimal = np.percentile(-buffer_2,1000/11)\na = []\nfor i in range(-10,10):\n surplus = np.where(buffer_2+h_optimal+i >= 0,buffer_2+h_optimal+i,0)\n deficit = np.where(buffer_2+h_optimal+i < 0,buffer_2+h_optimal+i,0)\n a.append(np.sum(-deficit)*c_d + np.sum(surplus)*c_s)",
"_____no_output_____"
],
[
"plt.plot(range(-10,10),a)",
"_____no_output_____"
],
[
"sf_cost",
"_____no_output_____"
],
[
"h_optimal = np.percentile(-buffer_2,1000/11)",
"_____no_output_____"
],
[
"#plt.plot(h_range,sf_cost)\n\n#norm = np.min(deficit_cost+surplus_cost)\nplt.figure(figsize=(10,8))\n\nplt.fill_between(h_range,sf_cost/norm,label=\"safety_stocks_cost\")\nplt.plot(h_range,sf_cost/norm,\"k.\")\nplt.fill_between(h_range,(surplus_cost+sf_cost)/norm,sf_cost/norm,label=\"surplus cost\")\nplt.plot(h_range,(surplus_cost+sf_cost)/norm,\"k.\")\nplt.fill_between(h_range,(surplus_cost+sf_cost)/norm,(deficit_cost+surplus_cost+sf_cost)/norm,label=\"deficit cost\")\nplt.plot(h_range,(deficit_cost+surplus_cost+sf_cost)/norm,\"k.\")\nplt.hlines(1,20,75,\"k\",label=\"infinite supply reference\")\nplt.legend()",
"_____no_output_____"
],
[
"max(buffer_2)",
"_____no_output_____"
],
[
"np.percentile(buffer_2,1)",
"_____no_output_____"
],
[
"a = plt.hist(-buffer_2,bins=range(-1,600))",
"_____no_output_____"
],
[
"a = plt.hist(-buffer_2,bins=range(-1,200))",
"_____no_output_____"
],
[
"h_optimal",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,4))\nplt.plot(buffer_2,label=\"buffer 2\")\nplt.plot(buffer_1,label=\"buffer 1\")\n#plt.plot(buffer_2,label=\"buffer 2\")\nplt.legend()",
"_____no_output_____"
],
[
"x3 = buffer_2",
"_____no_output_____"
],
[
"x2 = buffer_2",
"_____no_output_____"
],
[
"x1 = buffer_2",
"_____no_output_____"
],
[
"c,d = np.histogram(x2,bins=range(-150,0))",
"_____no_output_____"
],
[
"#plt.plot(b[:-1],np.log(a))\nplt.plot(d[:-1],np.log10(c))\nplt.plot(e[:-1],np.log10(f))",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,4))\n#a = plt.hist(buffer_2,bins=range(-150,10),label = \"30\")\na = plt.hist(-x3,bins=range(-1,150),alpha = 1,label=\"non-limiting\")\na = plt.hist(-x2,bins=range(-1,150),alpha = 0.75,label=\"45\")\na = plt.hist(-x1,bins=range(-1,200),alpha=0.5,label=\"25\")\nplt.legend()",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,4))\n#a = plt.hist(buffer_2,bins=range(-150,10),label = \"30\")\na = plt.hist(-x1,bins=range(-1,250),alpha=1,label=\"25\")\na = plt.hist(-x2,bins=range(-1,150),alpha = 0.75,label=\"45\")\na = plt.hist(-x3,bins=range(-1,150),alpha = 0.5,label=\"non-limiting\")\n#a = plt.hist(buffer_2,bins=range(-100,50))\nplt.",
"_____no_output_____"
],
[
"mu_demand = 0.33\nmu_feed = 0.68\nduration = int(1e5)\ndemand_seq_1 = np.random.binomial(1,mu_demand,duration)\ndemand_seq_2 = np.random.binomial(1,mu_demand,duration)\nfeed_seq_1 = np.random.binomial(1,mu_feed,duration)\nfeed_seq_2 = np.random.binomial(1,mu_feed,duration)",
"_____no_output_____"
],
[
"buffer_1,buffer_2 = simulate_double_buffer_pull(feed_seq_1,feed_seq_2,\n demand_seq_1, demand_seq_2,\n 30,3,3,3,sf_1=True)\n\nplt.figure(figsize=(10,4))\nplt.plot(buffer_1,label=\"buffer 1\")\nplt.plot(buffer_2,label=\"buffer 2\")\nplt.legend()",
"_____no_output_____"
],
[
"buffer_1,buffer_2 = simulate_double_buffer_pull(feed_seq_1,feed_seq_2,\n demand_seq_1, demand_seq_2,\n 30,3,3,3,False)\n\nplt.figure(figsize=(10,4))\nplt.plot(buffer_1,label=\"buffer 1\")\nplt.plot(buffer_2,label=\"buffer 2\")\nplt.legend()",
"_____no_output_____"
],
[
"buffer_1,buffer_2 = simulate_double_buffer_pull(feed_seq_1,feed_seq_2,\n demand_seq_1, demand_seq_2,\n 3,30,3,3,True)\n\nplt.figure(figsize=(10,4))\nplt.plot(buffer_1,label=\"buffer 1\")\nplt.plot(buffer_2,label=\"buffer 2\")\nplt.legend()",
"_____no_output_____"
],
[
"buffer_1,buffer_2 = simulate_double_buffer_pull(feed_seq_1,feed_seq_2,\n demand_seq_1, demand_seq_2,\n 3,30,3,3,False)\n\nplt.figure(figsize=(10,4))\nplt.plot(buffer_1,label=\"buffer 1\")\nplt.plot(buffer_2,label=\"buffer 2\")\nplt.legend()",
"_____no_output_____"
],
[
"mu_demand = 0.33\nmu_feed = 0.34",
"_____no_output_____"
],
[
"c_s = 1\nc_d = 10",
"_____no_output_____"
],
[
"duration = int(1e5)\ndemand_seq = np.random.binomial(1,mu_demand,duration)\nfeed_seq = np.random.binomial(1,mu_feed,duration)",
"_____no_output_____"
],
[
"demand_buffer = simulate_single_buffer_pull(feed_seq,demand_seq,60,0,False)\nsurplus = np.where(demand_buffer >= 0,demand_buffer,0)\ndeficit = np.where(demand_buffer < 0,demand_buffer,0)",
"_____no_output_____"
],
[
"plt.plot(demand_buffer)",
"_____no_output_____"
],
[
"#plt.plot(demand_buffer[:100000])\nplt.figure(figsize=(8,6))\n\nplt.fill_between(np.arange(len(surplus)),surplus,0)\nplt.fill_between(np.arange(len(surplus)),deficit,0)",
"_____no_output_____"
],
[
"cost = np.sum(surplus)*c_s + np.sum(-deficit)*c_d",
"_____no_output_____"
],
[
"cost_record = []\nhedging = np.arange(-5,140,5)\nhedging = np.arange(40,70,1)\nfor h in hedging:\n demand_buffer = simulate_single_buffer_pull(feed_seq,demand_seq,h,h,False)\n surplus = np.where(demand_buffer >= 0,demand_buffer,0)\n deficit = np.where(demand_buffer < 0,demand_buffer,0)\n cost = np.sum(surplus)*c_s + np.sum(-deficit)*c_d\n cost_record.append(cost)",
"_____no_output_____"
],
[
"f,ax = plt.subplots(2,1,figsize=(10,8),sharex=True)\nax[0].hist(-demand_buffer,bins=range(-20,140),normed=True)\nax[0].vlines(h_optimal,0,0.04)\n\nax[1].plot(hedging,cost_record/min(cost_record))\nax[1].plot(hedging,cost_record/min(cost_record),\"o\")\n\nax[1].vlines(h_optimal,1,1.1)",
"_____no_output_____"
],
[
"f,ax = plt.subplots(2,1,figsize=(10,8),sharex=True)\nax[0].hist(-demand_buffer,bins=range(-20,140),normed=True)\nax[0].vlines(h_optimal,0,0.04)\n\nax[1].plot(hedging,cost_record/min(cost_record))\nax[1].plot(hedging,cost_record/min(cost_record),\"o\")\n\nax[1].vlines(h_optimal,1,5)",
"_____no_output_____"
],
[
"1000/11",
"_____no_output_____"
],
[
"h_optimal",
"_____no_output_____"
],
[
"h_optimal = np.percentile(-demand_buffer,1000/11)",
"_____no_output_____"
],
[
"plt.hist(-demand_buffer,bins=range(120),normed=True)\nplt.vlines(h_optimal,0,0.04)",
"_____no_output_____"
],
[
"h_optimal = np.percentile(-demand_buffer,1000/11)\n#np.percentile(-demand_buffer,1000/11)",
"_____no_output_____"
],
[
"c1 = 1\nc2 = 2\nc3 = 1",
"_____no_output_____"
],
[
"c1 = 1.5\nc2 = 1\nc3 = 2",
"_____no_output_____"
],
[
"c1 = 0.1\nc2 = 1\nc3 = 1",
"_____no_output_____"
],
[
"costs = {}\nbetas = {}\nsc_ratios = {}\neff_rates = {}\nslopes = {}\nhedging_levels = {}",
"_____no_output_____"
],
[
"percentile = 4",
"_____no_output_____"
],
[
"hedging = np.concatenate((np.arange(0,20,2),np.arange(20,150,10)))",
"_____no_output_____"
],
[
"from sklearn.linear_model import LinearRegression",
"_____no_output_____"
],
[
"hedging = np.arange(2,40,2)",
"_____no_output_____"
],
[
"arrivals = []",
"_____no_output_____"
],
[
"#scale_list = [0.1,0.3,1,3]\n#scale_list = [0.2,0.4,0.5,0.6,0.7]\nscale_list = np.arange(0.35,0.37,0.001)\nscale_list = np.arange(0.32,0.333,0.001)\nscale_list = np.arange(0.335,0.345,0.001)\nscale_list = [0.33]\n\nhedging = np.concatenate((np.arange(0,20,2),np.arange(20,150,10)))\n#hedging = np.arange(0,150,10)\nhedging = np.arange(50,600,50)\nhedging = np.arange(5,100,5)\n#hedging = np.arange(7,8,1)\n#hedging = [beta_h]\n#hedging = np.arange(30,200,10)\n#hedging = np.arange(20,500,50)\n#hedging = np.concatenate((np.arange(50,500,50),np.arange(500,11000,2000)))\nhedging = np.arange(100,11000,1000)\n#hedging = np.arange(2,100,5)\n#hedging = np.arange(0,100,5)\n#offset = -100000\n#hedging = [100]\n\n# settings for scale = 3\ndur_star = 10000\nomega_star = 7.5645\n#init_state_star = 210000\n\n#dur_star = int(4500000*1)\nduration = dur_star\n\nfor scale in reversed(scale_list):\n print(scale)\n scale_costs = []\n scale_rates = []\n \n #init_state = 7e4*scale\n \n mu_demand = 0.33\n mu_drain = mu_transfer = 0.35*2\n mu_fast = 0.34\n \n slack_capacity_h = mu_fast-mu_drain/2\n std_h = np.sqrt(mu_drain*(1-mu_drain)+mu_fast*(1-mu_fast))\n \n omega_h = std_h/slack_capacity_h\n print(slack_capacity_h,std_h,omega_h)\n print()\n \n \n slack_capacity_ss = mu_fast-mu_drain\n std_ss = np.sqrt(mu_fast*(1-mu_fast)+mu_drain*(1-mu_drain))\n \n omega_ss = std_ss/slack_capacity_ss\n \n \n duration = int(1000000 * 1.5 * 0.5)\n print(scale,duration)\n #print(scale,omega)\n #continue\n \n #print(omega/omega_star)\n \n #duration = int((omega/omega_star)**2*dur_star)\n init_state = 10000\n #init_state = 0\n n_seeds = 1#100\n \n beta_h = (1/4)*(percentile**2)*omega_h# + slack_capacity/std\n beta_ss = (1/4)*(percentile**2)*omega_ss\n scaling_ratio = compute_scaling_ratio(mu_drain,mu_demand,std_h,init_state)\n print(scaling_ratio)\n \n hedge = True\n \n for h in reversed(hedging):\n print(h)\n if hedge:\n h_thres = h\n ss_thres = mu_drain+beta_ss*std_ss\n else:\n h_thres = beta_h*std_ss\n ss_thres = mu_drain+h*std_ss\n print(h_thres)\n #thres = 2*mu_drain+h*np.sqrt(mu_drain+mu_fast)\n #thres = h*10\n buf_1_samples = []\n buf_2_samples = []\n buf_3_samples = []\n \n\n np.random.seed(7)\n \n for _ in range(n_seeds): \n\n demand_seq = np.random.binomial(1,mu_demand,duration)\n transfer_seq = np.random.binomial(1,mu_transfer,duration)\n fast_seq = np.random.binomial(1,mu_fast,duration)\n drain_seq = np.random.binomial(1,mu_drain,duration)\n \n arrival_buffer,inter_buffer,drain_buffer = simulate_simple_reentrant_line(\n demand_seq[:duration],\n transfer_seq[:duration],\n fast_seq[:duration],\n drain_seq[:duration],\n h_thres=h_thres,\n ss_thres=5,\n init_state=init_state,\n flow=False,\n init_with_zeros=False)\n \n #try:\n # end = np.where((arrival_buffer < 10) & (inter_buffer < 10))[0][0]\n #except:\n end = len(arrival_buffer)\n \n buf_1_samples.append(sum(arrival_buffer[0:end]*c1))\n buf_2_samples.append(sum(inter_buffer[0:end]*c2))\n buf_3_samples.append(sum(drain_buffer[0:end]*c3))\n \n #arrivals.append(arrival_buffer)\n \n \n scale_costs.append((np.mean(buf_1_samples),np.mean(buf_2_samples),np.mean(buf_3_samples)))\n #scale_rates.append(zeta*mu_transfer)\n #scale_costs.append(sum(arrival_buffer*c1))\n '''\n a,b = np.histogram(inter_buffer,bins=40,normed=True)\n b = b.reshape(-1,1)\n clf = LinearRegression()\n clf.fit(b[:-15,:],np.log(a[:-14]))\n plt.plot(b[:-15],np.log(a[:-14]),label=scale)\n slopes[scale] = clf.coef_\n '''\n \n costs[scale] = np.array(scale_costs[::-1])\n betas[scale] = beta_h\n sc_ratios[scale] = scaling_ratio\n eff_rates[scale] = np.array(scale_rates[::-1])\nplt.legend()",
"_____no_output_____"
],
[
"costs",
"_____no_output_____"
],
[
"#arrivals_2 = arrivals\nplt.plot(np.cumsum(np.array(arrivals_10).mean(axis=0)))\nplt.plot(np.cumsum(np.array(arrivals).mean(axis=0)),\"r\")",
"_____no_output_____"
],
[
"#arrivals_10 = arrivals\n#plt.plot(np.array(arrivals_30).mean(axis=0)[:2000])\nplt.plot(np.array(arrivals_10).mean(axis=0)[:200000])\nplt.plot(np.array(arrivals).mean(axis=0)[:20000],\"r\")",
"_____no_output_____"
],
[
"no_h_cost = ref_cost",
"_____no_output_____"
],
[
"no_h_cost",
"_____no_output_____"
],
[
"min_t_cost/no_h_cost",
"_____no_output_____"
],
[
"no_h_cost/min_t_cost",
"_____no_output_____"
],
[
"bad_cost = ref_cost",
"_____no_output_____"
],
[
"bad_cost/ref_cost",
"_____no_output_____"
],
[
"scale = 0.33\n\nbeta = beta_ss#betas[scale]\nsc_ratio = sc_ratios[scale]\ncost_1,cost_2,cost_3 = zip(*costs[scale])\ncost_1=np.array(cost_1)\ncost_2=np.array(cost_2)\ncost_3=np.array(cost_3)\nt_cost = np.array(cost_1)+np.array(cost_2)+np.array(cost_3)\n#t_cost = np.array(cost_2)+np.array(cost_3)\n#t_cost = np.array(cost_3)\nmin_t_cost = min(t_cost)\n#t_cost = t_cost/min_t_cost\n#ref_cost = no_ss_cost\nref_cost = min_t_cost\n#ref_cost = no_h_cost\n\nt_cost = t_cost/ref_cost\n\ncost_1=np.array(cost_1)/ref_cost\ncost_2=np.array(cost_2)/ref_cost\ncost_3=np.array(cost_3)/ref_cost\n\nindexes = np.where(t_cost < 100)[0]\n\nplt.figure(figsize=(16,8))\n\nplt.plot(hedging[indexes],cost_1[indexes],label=\"Buffer 1 cost\")\n#plt.plot(hedging[indexes],cost_1[indexes],\"o\")\n#plt.plot(hedging[indexes],cost_2[indexes])\nplt.fill_between(hedging[indexes],cost_1[indexes]+cost_2[indexes],cost_1[indexes],alpha=0.5,label=\"Buffer 2 cost\")\nplt.fill_between(hedging[indexes],t_cost[indexes],cost_1[indexes]+cost_2[indexes],alpha=0.5, label=\"Buffer 3 cost\")\n\nplt.plot(hedging[indexes],t_cost[indexes],label=\"Total cost\")\nplt.plot(hedging[indexes],t_cost[indexes],\".\")\n\n#plt.vlines(10,min(t_cost[indexes]),max(t_cost[indexes]),label=\"empirical hedging\")\nplt.hlines(1.03,min(hedging[indexes]),max(hedging[indexes]),color=\"r\",label=\"+3% margin\")\n#plt.hlines(0.97,min(hedging[indexes]),max(hedging[indexes]),color=\"r\",label=\"-3% margin\")\n#plt.title(\"{:.3f}\".format(sc_ratio))\nplt.ylabel(\"Relative cumulative cost\")\nplt.xlabel(\"Hedging threshold h2\")\nplt.legend()",
"_____no_output_____"
],
[
"set(np.array([1,2]))",
"_____no_output_____"
],
[
"scale = 0.33\n\nbeta = beta_ss#betas[scale]\nsc_ratio = sc_ratios[scale]\ncost_1,cost_2,cost_3 = zip(*costs[scale])\ncost_1=np.array(cost_1)\ncost_2=np.array(cost_2)\ncost_3=np.array(cost_3)\nt_cost = np.array(cost_1)+np.array(cost_2)+np.array(cost_3)\n#t_cost = np.array(cost_2)+np.array(cost_3)\n#t_cost = np.array(cost_3)\nmin_t_cost = min(t_cost)\n#t_cost = t_cost/min_t_cost\n#ref_cost = no_ss_cost\n#ref_cost = min_t_cost\n\nt_cost = t_cost/ref_cost\n\ncost_1=np.array(cost_1)/ref_cost\ncost_2=np.array(cost_2)/ref_cost\ncost_3=np.array(cost_3)/ref_cost\n\nindexes = np.where(t_cost < 100)[0]\n\nplt.figure(figsize=(12,8))\n\nplt.plot(hedging[indexes],cost_1[indexes],label=\"Buffer 1 cost\")\n#plt.plot(hedging[indexes],cost_1[indexes],\"o\")\n#plt.plot(hedging[indexes],cost_2[indexes])\nplt.fill_between(hedging[indexes],cost_1[indexes]+cost_2[indexes],cost_1[indexes],alpha=0.5,label=\"Buffer 2 cost\")\nplt.fill_between(hedging[indexes],t_cost[indexes],cost_1[indexes]+cost_2[indexes],alpha=0.5, label=\"Buffer 3 cost\")\n\nplt.plot(hedging[indexes],t_cost[indexes],label=\"Total cost\")\nplt.plot(hedging[indexes],t_cost[indexes],\".\")\n\n#plt.vlines(10,min(t_cost[indexes]),max(t_cost[indexes]),label=\"empirical hedging\")\nplt.hlines(1.03*min(t_cost),min(hedging[indexes]),max(hedging[indexes]),color=\"r\",label=\"+3% margin\")\n#plt.hlines(0.97,min(hedging[indexes]),max(hedging[indexes]),color=\"r\",label=\"-3% margin\")\n#plt.title(\"{:.3f}\".format(sc_ratio))\nplt.ylabel(\"Relative cumulative cost\")\nplt.xlabel(\"Hedging threshold\")\nplt.legend()",
"_____no_output_____"
],
[
"ref_cost",
"_____no_output_____"
],
[
"scale = 0.33\n\nbeta = beta_ss#betas[scale]\nsc_ratio = sc_ratios[scale]\ncost_1,cost_2,cost_3 = zip(*costs[scale])\ncost_1=np.array(cost_1)\ncost_2=np.array(cost_2)\ncost_3=np.array(cost_3)\nt_cost = np.array(cost_1)+np.array(cost_2)+np.array(cost_3)\nt_cost = np.array(cost_2)+np.array(cost_3)\n#t_cost = np.array(cost_3)\nmin_t_cost = min(t_cost)\n#t_cost = t_cost/min_t_cost\n#ref_cost = no_ss_cost\nref_cost = min_t_cost\n\nt_cost = t_cost/ref_cost\n\ncost_1=np.array(cost_1)/ref_cost\ncost_2=np.array(cost_2)/ref_cost\ncost_3=np.array(cost_3)/ref_cost\n\nindexes = np.where(t_cost < 100)[0]\n\nplt.figure(figsize=(12,4))\n\n#plt.plot(hedging[indexes],cost_1[indexes],label=\"Buffer 1 cost\")\n#plt.plot(hedging[indexes],cost_1[indexes],\"o\")\n#plt.plot(hedging[indexes],cost_2[indexes])\n#plt.fill_between(hedging[indexes],cost_1[indexes]+cost_2[indexes],cost_1[indexes],alpha=0.5,label=\"Buffer 2 cost\")\n#plt.fill_between(hedging[indexes],t_cost[indexes],cost_1[indexes]+cost_2[indexes],alpha=0.5, label=\"Buffer 3 cost\")\n\nplt.plot(hedging[indexes],t_cost[indexes],label=\"Total cost\")\nplt.plot(hedging[indexes],t_cost[indexes],\".\")\n\n#plt.vlines(10,min(t_cost[indexes]),max(t_cost[indexes]),label=\"empirical hedging\")\nplt.hlines(1.03,min(hedging[indexes]),max(hedging[indexes]),color=\"r\",label=\"+3% margin\")\n#plt.hlines(0.97,min(hedging[indexes]),max(hedging[indexes]),color=\"r\",label=\"-3% margin\")\n#plt.title(\"{:.3f}\".format(sc_ratio))\nplt.ylabel(\"Relative cumulative cost\")\nplt.xlabel(\"Hedging threshold\")\nplt.legend()",
"_____no_output_____"
],
[
"(2120/(1-0.33/0.345))/(2770/(1-0.33/0.35))",
"_____no_output_____"
],
[
"np.sum(costs[0.33])/no_ss_cost",
"_____no_output_____"
],
[
"no_ss_cost = np.sum(costs[0.33])",
"_____no_output_____"
],
[
"no_ss_cost",
"_____no_output_____"
],
[
"plt.plot(inter_buffer[:10000], label=\"buffer 3\")",
"_____no_output_____"
],
[
"np.sum(inter_buffer == 0)",
"_____no_output_____"
],
[
"np.sum(inter_buffer == 0)",
"_____no_output_____"
],
[
"-1.02*2977.9+1.05*2874.3",
"_____no_output_____"
],
[
"-1.02*2972.+1.05*2868.6",
"_____no_output_____"
],
[
"2874.3*0.35,2868.6*0.35",
"_____no_output_____"
],
[
"988+18,983+21",
"_____no_output_____"
],
[
"2/0.35",
"_____no_output_____"
],
[
"plt.plot(inter_buffer[8000:10000], label=\"buffer 3\")",
"_____no_output_____"
],
[
"end = 100000\nplt.figure(figsize=(16,6))\n#plt.plot(arrival_buffer[:end],label=\"buffer 1\")\nplt.plot(inter_buffer[30000:end], label=\"buffer 2\")\nplt.plot(drain_buffer[30000:end], label=\"buffer 3\")\nplt.legend()",
"_____no_output_____"
],
[
"plt.hist(inter_buffer,bins=np.arange(150))\nplt.hist(drain_buffer,bins=np.arange(150))",
"_____no_output_____"
],
[
"end = 80000\nplt.figure(figsize=(16,6))\n#plt.plot(arrival_buffer[:end],label=\"buffer 1\")\nplt.plot(inter_buffer[:end], label=\"buffer 2\")\n#plt.plot(drain_buffer[:end], label=\"buffer 3\")\nplt.legend()",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,6))\nplt.plot(arrival_buffer,label=\"buffer 1\")\nplt.plot(inter_buffer, label=\"buffer 2\")\nplt.plot(drain_buffer, label=\"buffer 3\")\n#plt.hlines(3,0,15000, label = \"ss\")\n#plt.hlines(5,0,15000, label = \"ss\")\nplt.legend()",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,6))\nplt.plot(arrival_buffer,label=\"buffer 1\")\nplt.plot(inter_buffer, label=\"buffer 2\")\nplt.plot(drain_buffer, label=\"buffer 3\")\n#plt.hlines(3,0,15000, label = \"ss\")\n#plt.hlines(5,0,15000, label = \"ss\")\nplt.legend()",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,6))\nplt.plot(arrival_buffer,label=\"buffer 1\")\nplt.plot(inter_buffer, label=\"buffer 2\")\nplt.plot(drain_buffer, label=\"buffer 3\")\n#plt.hlines(3,0,15000, label = \"ss\")\n#plt.hlines(5,0,15000, label = \"ss\")\nplt.legend()",
"_____no_output_____"
],
[
"f,ax = plt.subplots(2,1,figsize=(16,10))\nax[0].plot(arrival_buffer,label=\"buffer 1\")\nax[0].plot(inter_buffer, label=\"buffer 2\")\nax[0].plot(drain_buffer, label=\"buffer 3\")\nax[0].set_ylabel(\"Buffer level\")\nax[0].legend()\n\ndrain_time_1,drain_time_2=compute_draining_times(arrival_buffer,inter_buffer,drain_buffer)\n\nax[1].plot(drain_time_1,label=\"resource 1\")\nax[1].plot(drain_time_2,label=\"resource 2\")\nax[1].set_ylabel(\"Draining time\")\nax[1].legend()\n#ax[1].gca().set_aspect(\"equal\")\n",
"_____no_output_____"
],
[
"drain_time_1,drain_time_2=compute_draining_times(arrival_buffer,inter_buffer,drain_buffer)",
"_____no_output_____"
],
[
"workload_1,workload_2 = compute_workloads(arrival_buffer,inter_buffer,drain_buffer)",
"_____no_output_____"
],
[
"np.array([i for i in range(10)])",
"_____no_output_____"
],
[
"np.where(np.array([i for i in range(10)]) > 5)[0]",
"_____no_output_____"
],
[
"plt.figure(figsize=(8,8))\nplt.plot(drain_time_1,label=\"1\")\nplt.plot(drain_time_2)\nplt.legend()\nplt.gca().set_aspect(\"equal\")",
"_____no_output_____"
],
[
"plt.plot(workload_1)\nplt.plot(workload_2)",
"_____no_output_____"
],
[
"#plt.figure(figsize=(16,6))\nf,ax = plt.subplots(2,1,figsize=(16,8))\nax[0].plot(arrival_buffer,label=\"buffer 1\")\nax[0].plot(inter_buffer, label=\"buffer 2\")\nax[0].plot(drain_buffer, label=\"buffer 3\")\nax[1].plot(arrival_buffer*c1+inter_buffer*c2+drain_buffer*c3,label=\"Total cost\")\n#plt.hlines(3,0,15000, label = \"ss\")\n#plt.hlines(5,0,15000, label = \"ss\")\nax[0].legend()\nax[1].legend()",
"_____no_output_____"
],
[
"#plt.figure(figsize=(16,6))\nf,ax = plt.subplots(2,1,figsize=(16,8))\nax[0].plot(arrival_buffer,label=\"buffer 1\")\nax[0].plot(inter_buffer, label=\"buffer 2\")\nax[0].plot(drain_buffer, label=\"buffer 3\")\nax[1].plot(arrival_buffer*c1+inter_buffer*c2+drain_buffer*c3,label=\"Total cost\")\n#plt.hlines(3,0,15000, label = \"ss\")\n#plt.hlines(5,0,15000, label = \"ss\")\nax[0].legend()\nax[1].legend()",
"_____no_output_____"
],
[
"cost_2 = arrival_buffer*c1+inter_buffer*c2+drain_buffer*c3",
"_____no_output_____"
],
[
"plt.plot(arrival_buffer*c1+inter_buffer*c2+drain_buffer*c3)\nplt.plot(cost_2)",
"_____no_output_____"
],
[
"plt.plot(cost_1)\nplt.plot(cost_2)",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,6))\nplt.plot(arrival_buffer,label=\"buffer 1\")\nplt.plot(inter_buffer, label=\"buffer 2\")\nplt.plot(drain_buffer, label=\"buffer 3\")\n#plt.hlines(3,0,15000, label = \"ss\")\n#plt.hlines(5,0,15000, label = \"ss\")\nplt.legend()",
"_____no_output_____"
],
[
"workload = arrival_buffer/(mu_drain/2)+(inter_buffer+drain_buffer)/(mu_drain)",
"_____no_output_____"
],
[
"workload_2 = (inter_buffer+arrival_buffer)/(mu_fast)",
"_____no_output_____"
],
[
"plt.plot(workload[:100000],workload_2[:100000])",
"_____no_output_____"
],
[
"plt.plot(workload[:100000],workload_2[:100000])",
"_____no_output_____"
],
[
"min_drain_time = workload/(1-mu_demand*2/mu_drain)",
"_____no_output_____"
],
[
"np.mean(min_drain_time),np.median(min_drain_time)",
"_____no_output_____"
],
[
"np.mean(min_drain_time > 1000)",
"_____no_output_____"
],
[
"a,b,_ = plt.hist(min_drain_time,bins=np.arange(0,14000,50),normed=True)",
"_____no_output_____"
],
[
"np.argmax(a)",
"_____no_output_____"
],
[
"a[:20]",
"_____no_output_____"
],
[
"b[:20]",
"_____no_output_____"
],
[
"b[17]",
"_____no_output_____"
],
[
"np.mean(arrival_buffer)",
"_____no_output_____"
],
[
"np.mean(inter_buffer)",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,8))\ndur = np.arange(54000,65000)\n#dur = np.arange(300000)\nplt.fill_between(dur,drain_buffer[dur],label = \"buffer 3\")\n#plt.plot(dur,drain_buffer[dur])\nplt.fill_between(dur,-inter_buffer[dur],label='-buffer 2')\n#plt.fill_between(dur,-inter_buffer[dur],np.minimum(-inter_buffer[dur],-offset),label='-buffer 2')\n#plt.plot(dur,-inter_buffer[dur])\n#plt.plot(dur,a[dur]-offset,\"k\",alpha=0.5)\nplt.ylim(top=50,bottom=-100)\nplt.legend()",
"_____no_output_____"
],
[
"np.mean(arrival_buffer)",
"_____no_output_____"
],
[
"a = drain_buffer",
"_____no_output_____"
],
[
"std_h",
"_____no_output_____"
],
[
"np.percentile(inter_buffer,33)",
"_____no_output_____"
],
[
"350*0.16",
"_____no_output_____"
],
[
"inter_buffer_ss = inter_buffer",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,6))\nplt.hist(inter_buffer,bins=np.arange(150),normed=True,label=\"long drain\")\n\nplt.vlines(np.percentile(inter_buffer,33),0,0.04,label=\"long_drain\")",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,6))\nplt.hist(inter_buffer,bins=np.arange(150),normed=True,label=\"long drain\")\nplt.hist(inter_buffer_ss,bins=np.arange(150),normed=True,label=\"steady state\",alpha=0.7)\n\nplt.xlabel(\"Buffer 2 level\")\nplt.ylabel(\"Occupancy probability\")\n\nh = np.percentile(inter_buffer,33)\n\nplt.vlines(np.percentile(inter_buffer,33),0,0.04,label=\"long_drain\")\nplt.vlines(np.percentile(inter_buffer_ss,33),0,0.04,label=\"steady state\",color=\"r\")\nplt.legend()",
"_____no_output_____"
],
[
"np.percentile(150-drain_buffer,33)",
"_____no_output_____"
],
[
"1/(omega_h*std_h)",
"_____no_output_____"
],
[
"plt.plot(drain_buffer)",
"_____no_output_____"
],
[
"-np.log(0.33)/(0.01*3.5)",
"_____no_output_____"
],
[
"b,a = zip(*slopes.items())",
"_____no_output_____"
],
[
"clf = LinearRegression()\nclf.fit(np.array(b).reshape(-1,1),a)",
"_____no_output_____"
],
[
"clf.coef_",
"_____no_output_____"
],
[
"plt.plot(np.array(b),a,\".\")\nplt.plot(np.array(b),clf.predict(np.array(b).reshape(-1,1)))",
"_____no_output_____"
],
[
"np.histogram(inter_buffer,bins=50)",
"_____no_output_____"
],
[
"beta_ss = (1/4)*(percentile**2)*omega_ss",
"_____no_output_____"
],
[
"beta_ss",
"_____no_output_____"
],
[
"mu_demand,mu_transfer,mu_fast,mu_drain",
"_____no_output_____"
],
[
"std_h**2*(1-omega_h*2*(c3/c2))/(4*slack_capacity_h)",
"_____no_output_____"
],
[
"plt.plot(arrival_buffer[:1000000])",
"_____no_output_____"
],
[
"np.sum(drain_buffer)/(26*len(drain_buffer))",
"_____no_output_____"
],
[
"#\n#plt.plot(arrival_buffer[:1000000])\n#plt.plot(inter_buffer[:1000000])\nplt.plot(drain_buffer[:1000000])\nplt.plot(inter_buffer[:1000000],label='safety stocks')\nplt.legend()",
"_____no_output_____"
],
[
"#\n#plt.plot(arrival_buffer[:1000000])\n#plt.plot(inter_buffer[:1000000])\nplt.plot(drain_buffer[:1000000])\nplt.plot(inter_buffer[:1000000],label='safety stocks')\nplt.legend()",
"_____no_output_____"
],
[
"#\n#plt.plot(arrival_buffer[:1000000])\n#plt.plot(inter_buffer[:1000000])\n#plt.plot(drain_buffer[:1000000])\nplt.plot(inter_buffer[:100000000],label='safety stocks')\nplt.legend()",
"_____no_output_____"
],
[
"max(drain_buffer)- np.percentile(drain_buffer,66)",
"_____no_output_____"
],
[
"np.percentile(inter_buffer,33)",
"_____no_output_____"
],
[
"plt.plot(inter_buffer)",
"_____no_output_____"
],
[
"plt.plot(np.arange(199,-1,-1),0.035*np.exp(np.arange(200)*-0.035))",
"_____no_output_____"
],
[
"std_h",
"_____no_output_____"
],
[
"(0.7*omega_h*std_h)",
"_____no_output_____"
],
[
"s = 1/(0.7*omega_h*std_h)",
"_____no_output_____"
],
[
"s",
"_____no_output_____"
],
[
"1/clf.coef_",
"_____no_output_____"
],
[
"plt.hist(drain_buffer,bins=40,normed=True)\n#plt.plot(b[15:,:],clf.predict(b[15:,:]))\n",
"_____no_output_____"
],
[
"np.log(0.66)/s",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,6))\n\na,b,_ = plt.hist(drain_buffer,bins=30,normed=True)\n\nb = b.reshape(-1,1)\nclf = LinearRegression()\nclf.fit(b[15:,:],np.log(a[14:]))\nprint(clf.coef_)\n\n\n#plt.plot(np.arange(149,-1,-1),clf.coef_[0]*np.exp(np.arange(150)*-clf.coef_[0]))\nplt.plot(np.arange(149,-1,-1),s*np.exp(np.arange(150)*-s),linewidth=2)\n\nplt.vlines(150+np.log(0.66)/s,0,0.04,color=\"r\")\nplt.xlabel(\"Buffer 3 level\")\nplt.ylabel(\"Occupancy probability\")",
"_____no_output_____"
],
[
"np.percentile(a,66)",
"_____no_output_____"
],
[
"1/omega_h",
"_____no_output_____"
],
[
"len(a)",
"_____no_output_____"
],
[
"len(b)",
"_____no_output_____"
],
[
"0.33-0.34",
"_____no_output_____"
],
[
"3/200",
"_____no_output_____"
],
[
"mu_demand/mu_fast",
"_____no_output_____"
],
[
"mu_transfer/2/mu_fast",
"_____no_output_____"
],
[
"5/140",
"_____no_output_____"
],
[
"-np.log(1-0.33)/(3.5*0.015)",
"_____no_output_____"
],
[
"plt.plot(b[10:],np.log(a[9:]))",
"_____no_output_____"
],
[
"#\n#plt.plot(arrival_buffer[:1000000])\n#plt.plot(inter_buffer[:1000000])\nplt.plot(-drain_buffer[:1000000])\nplt.plot(inter_buffer[:1000000],label='safety stocks')\nplt.legend()",
"_____no_output_____"
],
[
"beta_h*std_h/(beta_ss*std_ss)",
"_____no_output_____"
],
[
"beta_h",
"_____no_output_____"
],
[
"plt.figure(figsize=(14,8))\nrun = np.arange(10000)\nplt.fill_between(run,inter_buffer[run],label=\"buffer 2\")\nplt.fill_between(run,drain_buffer[run],label=\"buffer 3\")\nplt.legend()",
"_____no_output_____"
],
[
"omega_h",
"_____no_output_____"
],
[
"cost_3",
"_____no_output_____"
],
[
"scale = 0.33\n\nbeta = beta_ss#betas[scale]\nsc_ratio = sc_ratios[scale]\ncost_1,cost_2,cost_3 = zip(*costs[scale])\ncost_1=np.array(cost_1)\ncost_2=np.array(cost_2)\ncost_3=np.array(cost_3)\nt_cost = np.array(cost_1)+np.array(cost_2)+np.array(cost_3)\nmin_t_cost = min(t_cost)\nt_cost = t_cost/min_t_cost\n\ncost_1=np.array(cost_1)/min_t_cost\ncost_2=np.array(cost_2)/min_t_cost\ncost_3=np.array(cost_3)/min_t_cost\n\nindexes = np.where(t_cost < 5)[0]\n\nplt.figure(figsize=(12,8))\n\nplt.plot(hedging[indexes],cost_1[indexes],label=\"Buffer 1 cost\")\n#plt.plot(hedging[indexes],cost_1[indexes],\"o\")\n#plt.plot(hedging[indexes],cost_2[indexes])\nplt.fill_between(hedging[indexes],cost_1[indexes]+cost_2[indexes],cost_1[indexes],alpha=0.1)\nplt.fill_between(hedging[indexes],t_cost[indexes],cost_1[indexes]+cost_2[indexes],alpha=0.1)\nplt.plot(hedging[indexes],t_cost[indexes],label=\"Total cost\")\nplt.plot(hedging[indexes],t_cost[indexes],\".\")\nplt.vlines(beta,min(t_cost[indexes]),max(t_cost[indexes]),label=\"beta\")\nplt.hlines(1.03,min(hedging[indexes]),max(hedging[indexes]),color=\"r\",label=\"3% margin\")\nplt.title(\"{:.3f}\".format(sc_ratio))\nplt.ylabel(\"Relative cumulative cost\")\nplt.xlabel(\"Threshold (xSTD)\")\nplt.legend()",
"_____no_output_____"
],
[
"scale = 0.33\n\nbeta = betas[scale]\nsc_ratio = sc_ratios[scale]\ncost_1,cost_2,cost_3 = zip(*costs[scale])\ncost_1=np.array(cost_1)\ncost_2=np.array(cost_2)\ncost_3=np.array(cost_3)\nt_cost = np.array(cost_1)+np.array(cost_2)+np.array(cost_3)\nmin_t_cost = min(t_cost)\nt_cost = t_cost/min_t_cost\n\ncost_1=np.array(cost_1)/min_t_cost\ncost_2=np.array(cost_2)/min_t_cost\ncost_3=np.array(cost_3)/min_t_cost\n\nindexes = np.where(t_cost < 2e6)[0]\n\nplt.figure(figsize=(12,8))\n\nplt.plot(hedging[indexes],cost_1[indexes],label=\"Buffer 1 cost\")\n#plt.plot(hedging[indexes],cost_1[indexes],\"o\")\n#plt.plot(hedging[indexes],cost_2[indexes])\nplt.fill_between(hedging[indexes],cost_1[indexes]+cost_2[indexes],cost_1[indexes],alpha=0.1)\nplt.fill_between(hedging[indexes],t_cost[indexes],cost_1[indexes]+cost_2[indexes],alpha=0.1)\nplt.plot(hedging[indexes],t_cost[indexes],label=\"Total cost\")\nplt.plot(hedging[indexes],t_cost[indexes],\".\")\nplt.vlines(beta,min(t_cost[indexes]),max(t_cost[indexes]),label=\"beta\")\nplt.hlines(1.03,min(hedging[indexes]),max(hedging[indexes]),color=\"r\",label=\"3% margin\")\nplt.title(\"{:.3f}\".format(sc_ratio))\nplt.ylabel(\"Relative cumulative cost\")\nplt.xlabel(\"Threshold (xSTD)\")\nplt.legend()",
"_____no_output_____"
],
[
"scale = 3\nbeta = betas[scale]\nsc_ratio = sc_ratios[scale]\ncost = costs[scale]\nr_cost = cost/min(cost)\nindexes = np.where(r_cost < 1.2)[0]\nplt.plot(hedging[indexes],r_cost[indexes])\nplt.plot(hedging[indexes],r_cost[indexes],\".\")\nplt.vlines(beta,min(r_cost[indexes]),max(r_cost[indexes]))\nplt.hlines(1.03,min(hedging[indexes]),max(hedging[indexes]),color=\"r\")\nplt.title(\"{:.3f}\".format(sc_ratio))",
"_____no_output_____"
],
[
"plt.plot(hedging,costs[1])",
"_____no_output_____"
],
[
"mu_demand",
"_____no_output_____"
],
[
"percentile = 3.1",
"_____no_output_____"
],
[
"scale = 0.1",
"_____no_output_____"
],
[
"cost = []\nrates = []\nhedging = np.arange(30,200,100)\n\nf,ax = plt.subplots(3,1,figsize=(16,8))\n\nduration = 10000\nplot_range = range(0,duration)\n\nmu_demand = 30*scale\nmu_drain = mu_demand*1.02\nmu_transfer = mu_drain + (mu_drain-mu_demand)*1\n\nslack_capacity = mu_transfer-mu_drain\nstd = np.sqrt(mu_drain+mu_transfer)\n\nomega = std/slack_capacity\n\nbeta = (1/4)*(percentile**2)*(std/slack_capacity)\n\nhedging=[beta/4,beta/2,beta]\n#hedging=[beta]\n\ninit_state = (mu_drain-mu_demand)*duration*0.6\n\n\nnp.random.seed(5)\n\ndemand_seq = np.random.poisson(mu_demand,duration)\ntransfer_seq = np.random.poisson(mu_transfer,duration)\ndrain_seq = np.random.poisson(mu_drain,duration)\n\ncumul =False\nfor h in reversed(hedging):\n thres = 2*mu_drain+h*np.sqrt(mu_drain+mu_transfer)\n \n #thres = h*10\n arrival_buffer,drain_buffer,zeta = simulate_reflected_random_walk_repeat(\n demand_seq[:duration],\n transfer_seq[:duration],\n drain_seq[:duration],\n thres,\n init_state=init_state,\n flow=False)\n \n #print(np.where(drain_buffer == 0))\n \n cost.append(sum(arrival_buffer*c1)+sum(drain_buffer*c2))\n rates.append(zeta*mu_transfer)\n #plt.plot(drain_buffer[j*1000:(j+1)*1000]*c2+arrival_buffer[j*1000:(j+1)*1000]*c1)\n if cumul:\n ax[1].plot(np.cumsum(drain_buffer)[plot_range],label=int(h))\n ax[0].plot(np.cumsum(arrival_buffer)[plot_range])\n ax[2].plot(np.cumsum(arrival_buffer*c1+drain_buffer*c2)[plot_range])\n else:\n ax[1].plot((drain_buffer)[plot_range])\n #ax[1].plot(np.ones(len(plot_range))*thres,\".-\")\n ax[0].plot((arrival_buffer)[plot_range],label=\"{} * {}\".format(int(h),int(std)))\n ax[2].plot((arrival_buffer*c1+drain_buffer*c2)[plot_range])\n #print(np.min(np.diff((arrival_buffer[1500:2000]*c1+drain_buffer[1500:2000]*c2))))\n \nax[0].set_ylabel(\"Items in buffer 1\")\nax[1].set_ylabel(\"Items in buffer 2\")\nax[2].set_ylabel(\"Total cost\")\nf.legend()",
"_____no_output_____"
],
[
"slack_capacity",
"_____no_output_____"
],
[
"std/slack_capacity",
"_____no_output_____"
],
[
"mu_drain*c2",
"_____no_output_____"
],
[
"thres*c2",
"_____no_output_____"
],
[
"np.sum(drain_buffer == 0)",
"_____no_output_____"
],
[
"mu_demand",
"_____no_output_____"
],
[
"rates",
"_____no_output_____"
],
[
"mu_demand",
"_____no_output_____"
],
[
"mu_transfer",
"_____no_output_____"
],
[
"time_horizon",
"_____no_output_____"
],
[
"offset/std",
"_____no_output_____"
],
[
"offset",
"_____no_output_____"
],
[
"percentile = 1.645\n#percentile = 0\npercentile = 1.96\n#percentile = 2.33\npercentile = 3.1\n#percentile = 1\n#percentile = 7\nslack_capacity = mu_transfer-mu_drain\nstd = np.sqrt(mu_drain+mu_transfer)\ntime_horizon = (percentile*std)**2/(2*slack_capacity)**2\noffset = time_horizon*(-slack_capacity) + percentile*std*np.sqrt(time_horizon)\ntime_horizon = int(np.ceil(time_horizon))\noffset = int(np.ceil(offset))",
"_____no_output_____"
],
[
"percentile*np.sqrt(3)",
"_____no_output_____"
],
[
"slack_capacity = mu_transfer-mu_drain\nstd = np.sqrt(mu_drain+mu_transfer)\nbeta = (1/4)*(percentile**2)*(std/slack_capacity) + slack_capacity/std",
"_____no_output_____"
],
[
"offset",
"_____no_output_____"
],
[
"std",
"_____no_output_____"
],
[
"slack_capacity",
"_____no_output_____"
],
[
"slack_capacity/std",
"_____no_output_____"
],
[
"slack_capacity",
"_____no_output_____"
],
[
"0.5*percentile*std/np.sqrt(time_horizon)",
"_____no_output_____"
],
[
"offset/std + slack_capacity/std",
"_____no_output_____"
],
[
"scaling_ratio = compute_scaling_ratio(mu_drain,mu_demand,std,init_state)",
"_____no_output_____"
],
[
"beta",
"_____no_output_____"
],
[
"min_cost = min(cost)\nhedging = np.array(hedging)\nr_cost = np.array([c/min_cost for c in cost[::-1]])\n\nindexes = np.where(r_cost < 1.2)[0]\nplt.plot(hedging[indexes],r_cost[indexes])\nplt.plot(hedging[indexes],r_cost[indexes],\".\")\nplt.vlines(beta,min(r_cost[indexes]),max(r_cost[indexes]))\nplt.title(\"{:.3f}\".format(scaling_ratio))",
"_____no_output_____"
],
[
"min_cost = min(cost)\nhedging = np.array(hedging)\nr_cost = np.array([c/min_cost for c in cost[::-1]])\n\nindexes = np.where(r_cost < 1.2)[0]\nplt.plot(hedging[indexes],r_cost[indexes])\nplt.plot(hedging[indexes],r_cost[indexes],\".\")\nplt.vlines(beta,min(r_cost[indexes]),max(r_cost[indexes]))\nplt.title(\"{:.3f}\".format(scaling_ratio))",
"_____no_output_____"
],
[
"cost = []\nhedging = np.arange(30,60,5)\ninit_state = 7e4\n#hedging = np.arange(1,7)\nj = 1\nf,ax = plt.subplots(3,1,figsize=(16,8))\n#plot_range = range(4000,5000)\n\nduration = 100000\nplot_range = range(0,10000)\nplot_range = range(0,200)\ncumul =False\nfor h in reversed(hedging):\n thres = mu_drain+h*np.sqrt(mu_drain+mu_transfer)\n #thres = h*10\n arrival_buffer,drain_buffer,zeta = simulate_reflected_random_walk_repeat(demand_seq[:duration],\n transfer_seq[:duration],\n drain_seq[:duration],\n thres,init_state=init_state,\n flow=False)\n cost.append(sum(arrival_buffer*c1)+sum(drain_buffer*c2))\n #plt.plot(drain_buffer[j*1000:(j+1)*1000]*c2+arrival_buffer[j*1000:(j+1)*1000]*c1)\n if cumul:\n ax[1].plot(np.cumsum(drain_buffer*c2)[plot_range],label=h)\n ax[0].plot(np.cumsum(arrival_buffer*c1)[plot_range])\n ax[2].plot(np.cumsum(arrival_buffer*c1+drain_buffer*c2)[plot_range])\n else:\n ax[1].plot((drain_buffer*c2)[plot_range],label=h)\n ax[0].plot((arrival_buffer*c1)[plot_range])\n ax[2].plot((arrival_buffer*c1+drain_buffer*c2)[plot_range])\n #print(np.min(np.diff((arrival_buffer[1500:2000]*c1+drain_buffer[1500:2000]*c2))))\nf.legend()",
"_____no_output_____"
],
[
"min_cost = min(cost)\nplt.plot(hedging,[c/min_cost for c in cost[::-1]])\nplt.plot(hedging,[c/min_cost for c in cost[::-1]],\".\")",
"_____no_output_____"
],
[
"cost = []\nhedging = np.arange(5,70,5)\ninit_state = 1e4\n#hedging = np.arange(1,7)\nj = 1\nf,ax = plt.subplots(3,1,figsize=(16,8))\n#plot_range = range(4000,5000)\n\nduration = 6000\nplot_range = range(0,6000)\n#plot_range = range(0,300)\ncumul =False\nfor h in reversed(hedging):\n thres = mu_drain+h*np.sqrt(mu_drain)\n #thres = h*10\n arrival_buffer,drain_buffer,zeta = simulate_reflected_random_walk(demand_seq[:duration],transfer_seq[:duration],drain_seq[:duration],thres,init_state=init_state)\n cost.append(sum(arrival_buffer*c1)+sum(drain_buffer*c2))\n #plt.plot(drain_buffer[j*1000:(j+1)*1000]*c2+arrival_buffer[j*1000:(j+1)*1000]*c1)\n if cumul:\n ax[1].plot(np.cumsum(drain_buffer*c2)[plot_range],label=h)\n ax[0].plot(np.cumsum(arrival_buffer*c1)[plot_range])\n ax[2].plot(np.cumsum(arrival_buffer*c1+drain_buffer*c2)[plot_range])\n else:\n ax[1].plot((drain_buffer*c2)[plot_range],label=h)\n ax[0].plot((arrival_buffer*c1)[plot_range])\n ax[2].plot((arrival_buffer*c1+drain_buffer*c2)[plot_range])\n #print(np.min(np.diff((arrival_buffer[1500:2000]*c1+drain_buffer[1500:2000]*c2))))\n \nthres = 1e6\n#thres = h*10\narrival_buffer,drain_buffer,_ = simulate_reflected_random_walk(demand_seq[:duration],transfer_seq[:duration],drain_seq[:duration],thres,init_state=init_state)\n#plt.plot(drain_buffer[j*1000:(j+1)*1000]*c2+arrival_buffer[j*1000:(j+1)*1000]*c1)\nif cumul:\n #ax[1].plot(np.cumsum(drain_buffer*c2)[plot_range],label=\"e\")\n ax[0].plot(np.cumsum(arrival_buffer*c1)[plot_range],label=\"e\")\n #ax[2].plot(np.cumsum(arrival_buffer*c1+drain_buffer*c2)[plot_range])\nelse:\n #ax[1].plot((drain_buffer*c2)[plot_range],label=\"e\")\n ax[0].plot((arrival_buffer*c1)[plot_range],label=\"e\")\n #ax[2].plot((arrival_buffer*c1+drain_buffer*c2)[plot_range])\nf.legend()",
"_____no_output_____"
],
[
"(mu_transfer-mu_demand)/((zeta*mu_transfer)-mu_demand)",
"_____no_output_____"
],
[
"min_cost = min(cost)\nplt.plot(hedging,[c/min_cost for c in cost[::-1]])\nplt.plot(hedging,[c/min_cost for c in cost[::-1]],\".\")",
"_____no_output_____"
],
[
"min_cost = min(cost)\nplt.plot(hedging,[c/min_cost for c in cost[::-1]])\nplt.plot(hedging,[c/min_cost for c in cost[::-1]],\".\")",
"_____no_output_____"
],
[
"h = []\nfor i in np.arange(0.94,0.949,0.001):\n h.append(1/(1-i))\n \nplt.plot(np.arange(0.94,0.949,0.001)/0.94,[i/min(h) for i in h])",
"_____no_output_____"
],
[
"min_cost = min(cost)",
"_____no_output_____"
],
[
"cost[0]-cost[1]",
"_____no_output_____"
],
[
"plt.plot(drain_buffer[:300])",
"_____no_output_____"
],
[
"plt.plot(arrival_buffer[:600])",
"_____no_output_____"
],
[
"plt.plot(buffer_seq[:1000])",
"_____no_output_____"
],
[
"sum(buffer_seq)",
"_____no_output_____"
],
[
"sum(buffer_seq)",
"_____no_output_____"
],
[
"np.percentile((supply_seq-demand_seq)[(supply_seq-demand_seq) < 0],0.01)",
"_____no_output_____"
],
[
"plt.plot(np.cumsum(supply_seq)-np.cumsum(demand_seq))",
"_____no_output_____"
],
[
"percentile = 1.645\n#percentile = 0\n#percentile = 1.96\n#percentile = 2.33\nslack_capacity = mu_supply-mu_demand\ntime_horizon = (percentile**2)*mu_supply/(2*slack_capacity**2)\noffset = time_horizon*(-slack_capacity) + percentile* np.sqrt(mu_supply*2*time_horizon)\nprint(time_horizon*2)\ntime_horizon = int(np.ceil(time_horizon))\noffset = int(np.ceil(offset))",
"_____no_output_____"
],
[
"time_horizon = (percentile**2)*mu_supply*2/slack_capacity**2\ntime_horizon = int(np.ceil(time_horizon))",
"_____no_output_____"
],
[
"y = []\nfor d in range(time_horizon):\n y.append(d*(slack_capacity) - percentile* np.sqrt(mu_supply*2*d))",
"_____no_output_____"
],
[
"y_1 = y\ntime_horizon_1 = time_horizon",
"_____no_output_____"
],
[
"y_2 = y\ntime_horizon_2 = time_horizon",
"_____no_output_____"
],
[
"time_horizon/time_horizon_1",
"_____no_output_____"
],
[
"1.96/1.645",
"_____no_output_____"
],
[
"plt.plot(range(time_horizon),y)\nplt.plot(range(time_horizon_1),y_1)\nplt.plot(range(time_horizon_2),y_2)",
"_____no_output_____"
],
[
"y",
"_____no_output_____"
],
[
"time_horizon",
"_____no_output_____"
],
[
"offset",
"_____no_output_____"
],
[
"thres = poisson.ppf(0.95,mu_demand)\n#thres = 0\nthres = poisson.ppf(0.5,mu_demand)\n\ndef idle_supply(demand_seq,supply_seq,offset):\n inv_pos = offset\n idle_supply_seq = np.zeros_like(supply_seq) \n idle_count = 0\n for i,(d,s) in enumerate(zip(demand_seq,supply_seq)):\n if inv_pos > thres+offset:\n s = 0\n idle_count += 1\n idle_supply_seq[i] = s\n inv_pos += s-d\n #print(idle_count/len(supply_seq)) \n return idle_supply_seq\n\ndef idle_supply_time_horizon(demand_seq,supply_seq,offset,time_horizon):\n inv_pos = offset\n inv_pos_seq = np.zeros_like(supply_seq)\n days_count = 0\n for i,(d,s) in enumerate(zip(demand_seq,supply_seq)):\n if (inv_pos > thres+offset) and days_count >= time_horizon:\n s = 0\n days_count = 0\n idle_supply_seq[i] = s\n inv_pos += s-d\n inv_pos_seq[i] = inv_pos\n days_count += 1\n return inv_pos_seq\n\ndef idle_supply_time_horizon_smooth(demand_seq,supply_seq,offset,time_horizon):\n inv_pos = offset\n inv_pos_seq = np.zeros_like(supply_seq)\n days_count = 0\n just_idled = False\n for i,(d,s) in enumerate(zip(demand_seq,supply_seq)):\n surplus = inv_pos - offset\n if surplus > 0 and ((days_count >= time_horizon) or just_idled):\n if d > surplus:\n s = d-surplus\n else:\n s = 0\n days_count=0\n just_idled = True\n else:\n just_idled = False\n \n inv_pos += s-d\n inv_pos_seq[i] = inv_pos\n if not just_idled:\n days_count += 1\n \n return inv_pos_seq\n\ndef work_supply_time_horizon_smooth(demand_seq,supply_seq,offset,time_horizon):\n inv_pos = offset\n inv_pos_seq = np.zeros_like(supply_seq)\n days_count = 0\n just_idled = True\n for i,(d,s) in enumerate(zip(demand_seq,supply_seq)):\n surplus = inv_pos - offset\n if surplus > 0 and ((days_count >= time_horizon) or just_idled):\n days_count = 0\n if d > surplus:\n s = d-surplus\n else:\n s = 0\n days_count=0\n just_idled = True\n else:\n days_count += 1\n just_idled = False\n \n inv_pos += s-d\n inv_pos_seq[i] = inv_pos\n \n return inv_pos_seq\n\ndef idle_supply_smooth(demand_seq,supply_seq,offset):\n inv_pos = offset\n idle_supply_seq = np.zeros_like(supply_seq) \n idle_count = 0\n inv_pos_array = np.zeros_like(supply_seq)\n for i,(d,s) in enumerate(zip(demand_seq,supply_seq)):\n surplus = inv_pos - offset\n if surplus > 0:\n if d > surplus:\n s = d-surplus\n else:\n s = 0\n idle_count += 1\n \n idle_supply_seq[i] = s\n inv_pos += s-d\n inv_pos = min(inv_pos,offset)\n inv_pos_array[i] = inv_pos\n\n #print(idle_count/len(supply_seq)) \n print(inv_pos)\n return inv_pos_array",
"_____no_output_____"
],
[
"slack_capacity/np.sqrt(2*mu_demand)",
"_____no_output_____"
],
[
"point = 1400\nplt.plot(inv_pos_seq[point-100:point+500])",
"_____no_output_____"
],
[
"point = 1400\nplt.plot(inv_pos_seq[point-100:point+500])",
"_____no_output_____"
],
[
"point = 1400\nplt.plot(inv_pos_seq[point-100:point+100])",
"_____no_output_____"
],
[
"offset",
"_____no_output_____"
],
[
"time_horizon*slack_capacity/2",
"_____no_output_____"
],
[
"slack_capacity",
"_____no_output_____"
],
[
"inv_pos_seq = work_supply_time_horizon_smooth(demand_seq,supply_seq,53,12)\n\nprint(np.mean(inv_pos_seq < 0))",
"_____no_output_____"
],
[
"inv_pos_seq = idle_supply_time_horizon_smooth(demand_seq,supply_seq,53,12)\n\nprint(np.mean(inv_pos_seq < 0))",
"_____no_output_____"
],
[
"stocks = inv_pos_seq.copy()\nstocks[inv_pos_seq < 0] = 0\nnp.mean(stocks)",
"_____no_output_____"
],
[
"inv_pos_seq = idle_supply_time_horizon_smooth(demand_seq,supply_seq,41,69)\n\nprint(np.mean(inv_pos_seq < 0))",
"_____no_output_____"
],
[
"stocks = inv_pos_seq.copy()\nstocks[inv_pos_seq < 0] = 0\nnp.mean(stocks)",
"_____no_output_____"
],
[
"inv_pos_seq = idle_supply_time_horizon(demand_seq,supply_seq,offset,time_horizon)\n\nprint(np.mean(inv_pos_seq < 0))\n#plt.plot(inv_pos_seq[827341-10:827341+10])\n#plt.plot(inv_pos_seq[827341-10:827341+10],\".\")",
"_____no_output_____"
],
[
"stocks = inv_pos_seq.copy()\nstocks[inv_pos_seq < 0] = 0\nnp.mean(stocks)",
"_____no_output_____"
],
[
"idle_supply_seq,inv_pos_seq = idle_supply_smooth(demand_seq,supply_seq, np.ceil(offset))\n#inv_pos_seq = offset + np.cumsum(idle_supply_seq)-np.cumsum(demand_seq)\nprint(np.mean(inv_pos_seq < 0))\n#plt.plot(inv_pos_seq[827341-10:827341+10])\n#plt.plot(inv_pos_seq[827341-10:827341+10],\".\")\n\nplt.plot(inv_pos_seq[:1200])",
"_____no_output_____"
],
[
"n_sims = 100000\ndemand_sum = np.random.poisson(mu_demand*np.ceil(time_horizon),n_sims)\nsupply_sum = np.random.poisson(mu_supply*np.ceil(time_horizon),n_sims)\n\nprint(np.mean((demand_sum-supply_sum) > np.ceil(offset)))",
"_____no_output_____"
],
[
"offset+time_horizon*slack_capacity",
"_____no_output_____"
],
[
"1001 % 100",
"_____no_output_____"
],
[
"offset",
"_____no_output_____"
],
[
"time_horizon*slack_capacity/2",
"_____no_output_____"
],
[
"np.random.seed(500)\nn_sims = 100000\n#n_sims = 20\nstockouts = []\nlast_day_stockouts = []\nlast_day_stockouts_vals = []\nave_inventories = []\nsim_time_horizon = time_horizon\nfor i in range(n_sims):\n demand = np.random.poisson(mu_demand,sim_time_horizon)\n supply = np.random.poisson(mu_supply,sim_time_horizon)\n inv_pos_seq = offset + np.cumsum(supply)-np.cumsum(demand)\n stockouts.append(np.sum(inv_pos_seq < 0))\n last_day_stockouts.append(inv_pos_seq[-1] < offset)\n if last_day_stockouts[-1]:\n last_day_stockouts_vals.append(inv_pos_seq[-1]-offset)\n \n ave_inventories.append(np.mean(inv_pos_seq))\n if i % 10000 == 0:\n plt.plot(inv_pos_seq)\n \nsum(stockouts)/(sim_time_horizon*n_sims),np.sum(last_day_stockouts)/(n_sims),np.mean(ave_inventories)\n",
"_____no_output_____"
],
[
"offset",
"_____no_output_____"
],
[
"np.median(last_day_stockouts_vals)",
"_____no_output_____"
],
[
"for offset in range(200):\n stock_out_probs = []\n for d in range(1,time_horizon+1):\n stock_out_prob = norm.cdf(-offset,slack_capacity*d,np.sqrt(2*mu_supply*d))\n stock_out_probs.append(stock_out_prob)\n overal_stockout_prob = np.mean(stock_out_probs)\n #print(overal_stockout_prob)\n if overal_stockout_prob < 0.05:\n break",
"_____no_output_____"
],
[
"time_horizon",
"_____no_output_____"
],
[
"def get_percentile_deficit(cycle_dur,slack_capacity,variance,percentile = 0.5):\n mu = slack_capacity*cycle_dur\n std = np.sqrt(variance*cycle_dur)\n cum_deficit_prob = norm.cdf(0,mu,std)\n cum_percentile = 0\n prev_cum_prob = cum_deficit_prob\n for i in range(10000):\n cum_prob = norm.cdf(-i,mu,std)\n prob = (prev_cum_prob - cum_prob)/cum_deficit_prob\n cum_percentile += prob\n if cum_percentile >= percentile:\n return i\n prev_cum_prob = cum_prob\n \na = get_percentile_deficit(time_horizon/4,slack_capacity,2*mu_supply)\n#get_percentile_deficit(slack_capacity,2*mu_supply,time_horizon)\nprint(a)\n\ndef compute_recovery_time(slack_capacity,variance,deficit,bound = 2.33):\n dur = ((bound*np.sqrt(variance)+np.sqrt(bound**2*variance+4*slack_capacity*deficit))/(2*slack_capacity))**2\n return int(np.ceil(dur))\n\nprint(compute_recovery_time(slack_capacity,2*mu_supply,a))\n\ndef get_average_stockout_prob(duration,slack_capacity,variance,start):\n stock_out_probs = []\n for d in range(1,duration+1):\n stock_out_prob = norm.cdf(0,start+slack_capacity*d,np.sqrt(variance*d))\n stock_out_probs.append(stock_out_prob)\n average_stockout_prob = np.mean(stock_out_probs)\n return average_stockout_prob\n\ndef compute_stockout_prob_and_inventory_cost(cycle_dur,slack_capacity,variance,offset):\n mu = slack_capacity*cycle_dur\n std = np.sqrt(variance*cycle_dur)\n cum_deficit_prob = norm.cdf(0,mu,std)\n #print(cum_deficit_prob)\n deficit = get_percentile_deficit(cycle_dur,slack_capacity,variance,0.95)\n #print(deficit)\n rec_dur = compute_recovery_time(slack_capacity,variance,deficit)\n #print(rec_dur)\n cycle_stockout_prob = get_average_stockout_prob(cycle_dur,slack_capacity,variance,offset)\n \n rec_dur = int(np.ceil(deficit/slack_capacity))\n print(rec_dur)\n rec_stockout_prob = get_average_stockout_prob(rec_dur,slack_capacity,variance,offset-deficit)\n #print(cycle_stockout_prob,rec_stockout_prob)\n \n effective_duration = (cycle_dur+cum_deficit_prob*rec_dur)\n #print(cycle_dur/effective_duration)\n \n overall_stockout_prob = (cycle_dur*cycle_stockout_prob+cum_deficit_prob*rec_dur*rec_stockout_prob)/effective_duration\n \n overall_inventory_cost = (cycle_dur*(0.5*slack_capacity*cycle_dur+offset)+cum_deficit_prob*rec_dur*(0.5*slack_capacity*rec_dur+offset-deficit))/effective_duration\n #print(overall_inventory_cost)\n return overall_stockout_prob,overall_inventory_cost",
"_____no_output_____"
],
[
"time_horizon/4",
"_____no_output_____"
],
[
"variance = 2*mu_supply",
"_____no_output_____"
],
[
"min_inv_cost = np.inf\nmin_cycle_dur = None\nmin_offset = None\nfor cycle_dur in range(1,int(time_horizon)):\n\n for offset in range(200):\n overall_stockout_prob,inv_cost = compute_stockout_prob_and_inventory_cost(cycle_dur,slack_capacity,variance,offset)\n #print(overall_stockout_prob)\n if overall_stockout_prob < 0.05:\n break\n \n print(cycle_dur,inv_cost)\n if inv_cost < min_inv_cost:\n print(cycle_dur)\n min_inv_cost = inv_cost\n min_cycle_dur = cycle_dur\n min_offset = offset\n \nprint(offset)",
"_____no_output_____"
],
[
"min_offset",
"_____no_output_____"
],
[
"min_cycle_dur",
"_____no_output_____"
],
[
"min_inv_cost",
"_____no_output_____"
],
[
"time_horizon",
"_____no_output_____"
],
[
"int(time_horizon)*(0.5*slack_capacity)",
"_____no_output_____"
],
[
"inv_cost",
"_____no_output_____"
],
[
"print(overal_stockout_prob)",
"_____no_output_____"
],
[
"overal_stockout_prob",
"_____no_output_____"
],
[
"probs = []\ndeficit = 10000\nfor i in range(deficit):\n v = -offset-i\n mu = slack_capacity*time_horizon\n std = np.sqrt(2*mu_supply*time_horizon)\n probs.append(norm.cdf(v,mu,std))\n #print(i,probs[-1])\n \nnp.sum(-np.diff(probs)*np.arange(1,deficit)/norm.cdf(-offset,mu,std))",
"_____no_output_____"
],
[
"offsets = []\nfor dur in range(1,time_horizon+1):\n for offset in range(200):\n stock_out_probs = []\n for d in range(1,dur+1):\n stock_out_prob = norm.cdf(-offset,slack_capacity*d,np.sqrt(2*mu_supply*d))\n stock_out_probs.append(stock_out_prob)\n overal_stockout_prob = np.mean(stock_out_probs)\n #print(overal_stockout_prob)\n if overal_stockout_prob < 0.05:\n break\n #print(dur,offset)\n offsets.append(offset)",
"_____no_output_____"
],
[
"plt.plot(offsets)",
"_____no_output_____"
],
[
"norm.cdf(-offset,mu,std)",
"_____no_output_____"
],
[
"offset",
"_____no_output_____"
],
[
"mu",
"_____no_output_____"
],
[
"(-np.diff(probs)/norm.cdf(-offset,mu,std))[:50]",
"_____no_output_____"
],
[
"-np.diff(probs)/norm.cdf(-offset,mu,std)",
"_____no_output_____"
],
[
"offset",
"_____no_output_____"
],
[
"np.sum(last_day_stockouts)/(n_sims)",
"_____no_output_____"
],
[
"sum(stockouts)/(int(np.ceil(time_horizon))*n_sims)",
"_____no_output_____"
],
[
"np.sum(last_day_stockouts)",
"_____no_output_____"
],
[
"np.sum(last_day_stockouts)/sum(stockouts)",
"_____no_output_____"
],
[
"np.mean(stockouts)",
"_____no_output_____"
],
[
"stockouts = np.array(stockouts)",
"_____no_output_____"
],
[
"np.median(stockouts[stockouts > 0])",
"_____no_output_____"
],
[
"plt.hist(stockouts[stockouts > 0])",
"_____no_output_____"
],
[
"plt.hist(stockouts,bins=range(0,50,2))",
"_____no_output_____"
],
[
"2*time_horizon",
"_____no_output_____"
],
[
"norm.cdf(-offset,slack_capacity*10,np.sqrt(mu_supply*10))",
"_____no_output_____"
],
[
"int(np.ceil(time_horizon))",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb7468bb07ec3f678e262c2fa711354212d01587 | 6,895 | ipynb | Jupyter Notebook | udacitycar/rnn/.ipynb_checkpoints/simple_rnn-checkpoint.ipynb | dougc333/DeepLearning | 0076f8490e25786494bbc7da54c21408c3c1aa7f | [
"Apache-2.0"
]
| null | null | null | udacitycar/rnn/.ipynb_checkpoints/simple_rnn-checkpoint.ipynb | dougc333/DeepLearning | 0076f8490e25786494bbc7da54c21408c3c1aa7f | [
"Apache-2.0"
]
| null | null | null | udacitycar/rnn/.ipynb_checkpoints/simple_rnn-checkpoint.ipynb | dougc333/DeepLearning | 0076f8490e25786494bbc7da54c21408c3c1aa7f | [
"Apache-2.0"
]
| null | null | null | 42.042683 | 1,277 | 0.578535 | [
[
[
"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nnum_epochs = 100\ntotal_series_length = 50000\ntruncated_backprop_length = 15\nstate_size = 4\nnum_classes = 2\necho_step = 3\nbatch_size = 5\nnum_batches = total_series_length//batch_size//truncated_backprop_length",
"_____no_output_____"
],
[
"from numpy import *\nfrom matplotlib.pyplot import *\nimport scipy.linalg\n\n# load the data\ntrainLen = 2000\ntestLen = 2000\ninitLen = 100\n\ndata = loadtxt('MackeyGlass_t17.txt')\n\n# plot some of it\nfigure(10).clear()\nplot(data[0:1000])\ntitle('A sample of data')\n\n# generate the ESN reservoir\ninSize = outSize = 1\nresSize = 1000\na = 0.3 # leaking rate\n\nrandom.seed(42)\nWin = (random.rand(resSize,1+inSize)-0.5) * 1\nW = random.rand(resSize,resSize)-0.5 \n# Option 1 - direct scaling (quick&dirty, reservoir-specific):\n#W *= 0.135 \n# Option 2 - normalizing and setting spectral radius (correct, slow):\nprint ('Computing spectral radius...',)\nrhoW = max(abs(linalg.eig(W)[0]))\nprint ('done.')\nW *= 1.25 / rhoW\n\n# allocated memory for the design (collected states) matrix\nX = zeros((1+inSize+resSize,trainLen-initLen))\n# set the corresponding target matrix directly\nYt = data[None,initLen+1:trainLen+1] \n\n# run the reservoir with the data and collect X\nx = zeros((resSize,1))\nfor t in range(trainLen):\n u = data[t]\n x = (1-a)*x + a*tanh( dot( Win, vstack((1,u)) ) + dot( W, x ) )\n if t >= initLen:\n X[:,t-initLen] = vstack((1,u,x))[:,0]\n \n# train the output\nreg = 1e-8 # regularization coefficient\nX_T = X.T\nWout = dot( dot(Yt,X_T), linalg.inv( dot(X,X_T) + \\\n reg*eye(1+inSize+resSize) ) )\n#Wout = dot( Yt, linalg.pinv(X) )\n\n# run the trained ESN in a generative mode. no need to initialize here, \n# because x is initialized with training data and we continue from there.\nY = zeros((outSize,testLen))\nu = data[trainLen]\nfor t in range(testLen):\n x = (1-a)*x + a*tanh( dot( Win, vstack((1,u)) ) + dot( W, x ) )\n y = dot( Wout, vstack((1,u,x)) )\n Y[:,t] = y\n # generative mode:\n u = y\n ## this would be a predictive mode:\n #u = data[trainLen+t+1] \n\n# compute MSE for the first errorLen time steps\nerrorLen = 500\nmse = sum( square( data[trainLen+1:trainLen+errorLen+1] - Y[0,0:errorLen] ) ) / errorLen\nprint ('MSE = ' + str( mse ))\n \n# plot some signals\nfigure(1).clear()\nplot( data[trainLen+1:trainLen+testLen+1], 'g' )\nplot( Y.T, 'b' )\ntitle('Target and generated signals $y(n)$ starting at $n=0$')\nlegend(['Target signal', 'Free-running predicted signal'])\n\nfigure(2).clear()\nplot( X[0:20,0:200].T )\ntitle('Some reservoir activations $\\mathbf{x}(n)$')\n\nfigure(3).clear()\nbar( range(1+inSize+resSize), Wout.T )\ntitle('Output weights $\\mathbf{W}^{out}$')\n\nshow()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code"
]
]
|
cb746b9d0407475c6d7b6e7976d1eb33987e8477 | 223,635 | ipynb | Jupyter Notebook | Mathematics/Mathematical Modeling/03.08-Manometer-Models-and-Dynamics.ipynb | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
]
| null | null | null | Mathematics/Mathematical Modeling/03.08-Manometer-Models-and-Dynamics.ipynb | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
]
| null | null | null | Mathematics/Mathematical Modeling/03.08-Manometer-Models-and-Dynamics.ipynb | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
]
| 2 | 2022-02-09T15:41:33.000Z | 2022-02-11T07:47:40.000Z | 308.462069 | 69,682 | 0.912156 | [
[
[
"<!--NOTEBOOK_HEADER-->\n*This notebook contains course material from [CBE30338](https://jckantor.github.io/CBE30338)\nby Jeffrey Kantor (jeff at nd.edu); the content is available [on Github](https://github.com/jckantor/CBE30338.git).\nThe text is released under the [CC-BY-NC-ND-4.0 license](https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode),\nand code is released under the [MIT license](https://opensource.org/licenses/MIT).*",
"_____no_output_____"
],
[
"<!--NAVIGATION-->\n< [Interacting Tanks](http://nbviewer.jupyter.org/github/jckantor/CBE30338/blob/master/notebooks/03.07-Interacting-Tanks.ipynb) | [Contents](toc.ipynb) | [Modeling and Control of a Campus Outbreak of Coronavirus COVID-19](http://nbviewer.jupyter.org/github/jckantor/CBE30338/blob/master/notebooks/03.09-COVID-19.ipynb) ><p><a href=\"https://colab.research.google.com/github/jckantor/CBE30338/blob/master/notebooks/03.08-Manometer-Models-and-Dynamics.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open in Google Colaboratory\"></a><p><a href=\"https://raw.githubusercontent.com/jckantor/CBE30338/master/notebooks/03.08-Manometer-Models-and-Dynamics.ipynb\"><img align=\"left\" src=\"https://img.shields.io/badge/Github-Download-blue.svg\" alt=\"Download\" title=\"Download Notebook\"></a>",
"_____no_output_____"
],
[
"# Manometer Models and Dynamics",
"_____no_output_____"
],
[
"## Summary\n\nThis notebook demonstrates the modeling and interactive simulation of a u-tube manometer. This device demonstrates a variety of behaviors exhibited by a linear second order system. An interesting aspect of the problem is the opportunity for passive design of dynamics for a measurement device.",
"_____no_output_____"
],
[
"## Learning Goals\n\n* Develop linear differential equations models for mechanical systems from momentum/force balances. \n* Describe role of position and velocity as state variables in a dynamic model.\n* Describe undamped, underdamped, overdamped, and critically damped responses.\n* Represent a second order system in standard form with natural frequency and damping factor.\n* Describe second order response to sinusoidal input, and resonance.\n* Construct a state space representation of a second order linear differential equation.",
"_____no_output_____"
],
[
"## Initializations",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\nfrom scipy import linalg as la\nfrom ipywidgets import interact,interactive\nfrom control.matlab import *\n\n# scales for all subsequent plots\ntmax = 20\nymin = -0.02\nymax = +0.02\naxis = [0.0,tmax,ymin,ymax]\nt = np.linspace(0.0,tmax,1000)\n\n# physical properties\ng = 9.8 # m/s\nrho = 1000.0 # density of water kg/m^3\nnu = 1.0e-6 # kinematic viscosity of water m/s^2\n\n# system dimensions\nL = 7 # meters\nd = 0.08 # meters",
"_____no_output_____"
]
],
[
[
"## Model 1. Steady State Response to a Pressure Differential\n\nFor this first model we will that the ends of the u-tube are exposed to a pressure differential $\\Delta P$. How does the level in the tubes change?\n\nThe u-tube manometer of cross-sectional area $A$, filled with a liquid of density $\\rho$, the total length of the liquid column $L$. When the ends are open and exposed to the same environmental pressure $P$ the liquid levels in the two the legs of the device will reach the same level. We'll measure the levels in the tubes as a deviation $y$ from this equilibrium position.\n\nAt steady state the difference in the levels of the tubes will be $h$. The static pressure difference \n\n$$\\Delta P = \\rho g h$$\n\nor \n\n$$y = \\frac{\\Delta P}{\\rho g}$$\n\nThis is simple statics. Notice that neither the cross-sectional area or the length of the liquid column matter. This is the rationale behind the common water level.\n\n\n\n(By [Bd](https://de.wikipedia.org/wiki/User:Bd) at the [German language Wikipedia](https://de.wikipedia.org/wiki/), [CC BY-SA 3.0](http://creativecommons.org/licenses/by-sa/3.0/), [Link](https://commons.wikimedia.org/w/index.php?curid=46342405))",
"_____no_output_____"
]
],
[
[
"def model1(deltaP = 100.0):\n h = deltaP/(rho*g)\n plt.axis(axis)\n plt.plot(plt.xlim(),[h,h])\n plt.grid()\n plt.xlabel('Time [sec]')\n plt.ylabel('h [m]')\n plt.title('dP = {0:5.1f} Pascals'.format(deltaP))\n\ninteract(model1,deltaP=(-200,200,20.0));",
"_____no_output_____"
]
],
[
[
"## Model 2. Dynamic Response with Negligible Viscosity\n\nThe second model for the manometer includes the dynamics associated with moving a mass $m$ of the liquid column held within the manometer. For this model we will chose a different measure of displacem\n\n\nThe net force on the liquid column is due to the applied pressure differential, $A\\Delta P$, and the gravitational force due to the difference in liquid levels between the two arms of the manometer, $2 A \\rho g$. $A$ is the cross-sectional area. From Newton's law\n\n$$m \\frac{d^2y}{dt^2} = A \\Delta P - 2 A \\rho g y$$\n\nThe mass of liquid is $m = \\rho L A$ where $L$ is the total length of the liquid column. After canceling a common factor $A$, the result is an inhomogeneous linear second order differential equation\n\n$$ \\frac{d^2y}{dt^2} + \\frac{2 g}{L} y = \\frac{1}{\\rho L} \\Delta P$$\n\nAt steady state this model reduces to the static case outlined in model 1 above. The dynamic case corresponds to an undamped harmonic oscillator with an angular frequency\n\n$$\\omega = \\sqrt{\\frac{2 g}{L}}$$",
"_____no_output_____"
],
[
"For numerical solution using the scipy libraries, it is necessary to convert the second order differential equation to a system of first order differential equations. \n\n$$\\begin{align*}\n\\frac{dy}{dt} & = v \\\\\n\\frac{dv}{dt} & = -\\frac{2g}{L} y + \\frac{1}{\\rho L} \\Delta P \n\\end{align*}$$",
"_____no_output_____"
]
],
[
[
"def model2(deltaP=100, L = 7.0):\n\n def deriv(X,t):\n x,v = X\n xdot = v\n vdot = -(2*g/L)*x + deltaP/rho/L\n return [xdot,vdot]\n\n IC = [0,0]\n \n w = np.sqrt(2*g/L)\n print(\" natural frequency = {0:0.1f} rad/sec\".format(w))\n print(\"period of oscillation = {0:0.1f} seconds\".format(2*np.pi/w))\n \n sol = odeint(deriv,IC,t)\n plt.axis(axis)\n plt.plot(t,sol)\n plt.grid()\n plt.xlabel('Time [sec]')\n plt.ylabel('y [m], v[m/s]')\n plt.title('dP = {0:5.1f} Pascals, L = {1:4.2f} meters'.format(deltaP,L))\n plt.legend(['Position','Velocity'])\n \ninteract(model2, deltaP = (-200,200,1), L = (0.2,10,0.1));",
" natural frequency = 1.7 rad/sec\nperiod of oscillation = 3.8 seconds\n"
]
],
[
[
"## Model 3. Dynamic Response with Viscous Dissipation\n\nThis third model for manometer incorporates the energy loss due to viscous dissipation in fluid motion. The pressure drop due to the laminar flow of incompressible Newtonian fluid in a long pipe with circular cross-section is given by the Hagen-Poiseuille equation\n\n$$\\Delta P_{drag} = \\frac{32 \\mu L v}{d^2}$$\n\nwhere $\\mu$ is the dynamic viscosity and $d$ is pipe diameter. Doing a balance of forces acting on the fluid column\n\n$$\\rho AL\\frac{d^2y}{dt^2} + \\frac{32\\mu L A}{d^2}v + 2 A \\rho g y = A \\Delta P$$\n\nDenoting $\\nu = \\frac{\\mu}{\\rho}$ as the kinematic viscosity, substituting for velocity $\\frac{dy}{dt} = v$ leaves\n\n$$\\frac{d^2y}{dt^2} + \\frac{32 \\nu }{d^2}\\frac{dy}{dt} + \\frac{2g}{L} y = \\frac{1}{\\rho L} \\Delta P$$\n\nThis can be recast as a pair of first-order linear differential equations\n\n$$\\begin{align*}\n\\frac{dy}{dt} & = v \\\\\n\\frac{dv}{dt} & = -\\frac{2g}{L} y - \\frac{32 \\nu }{d^2}v + \\frac{1}{\\rho L} \\Delta P \n\\end{align*}$$",
"_____no_output_____"
]
],
[
[
"def model3(dP = 100.0, L = 7.0, d = 0.008):\n\n def deriv(X,t):\n y,v = X\n ydot = v\n vdot = -(2*g/L)*y - (32*nu/d**2)*v + dP/rho/L\n return [ydot,vdot]\n\n IC = [0,0]\n sol = odeint(deriv,IC,t)\n plt.axis(axis)\n plt.plot(t,sol)\n plt.grid()\n plt.xlabel('Time [sec]')\n plt.ylabel('y [m], v[m/s]')\n plt.title('dP = {0:5.1f} bars, L = {1:4.2f} meters, d = {2:5.3f} meters'.format(dP,L,d))\n plt.legend(['Position','Velocity'])\n\nw = interactive(model3, dP=(-200,200,20), L = (0.2,30,0.1), d=(0.001,0.020,0.001));\nw.children[2].readout_format = '.3f'\nw",
"_____no_output_____"
]
],
[
[
"## Model 4. Second Order System in Standard Form\n\nStandard form of a damped second order system is\n\n$$\\tau^2\\frac{d^2y}{dt^2} + 2\\zeta\\tau\\frac{dy}{dt} + y = K u(t)$$\n\nExamples include buildings, car suspensions, other structures. Starting with the model equation \n\n$$\\frac{d^2y}{dt^2} + \\frac{32 \\nu }{d^2}\\frac{dy}{dt} + \\frac{2g}{L} y = \\frac{1}{\\rho L} \\Delta P$$\n\nThe first step is to normalize the zeroth order term in $y$ and compare to the second-order model in standard form\n\n$$\\underbrace{\\frac{L}{2g}}_{\\tau^2}\\frac{d^2y}{dt^2} + \\underbrace{\\frac{16 \\nu L}{g d^2}}_{2\\zeta\\tau}\\frac{dy}{dt} + y = \\underbrace{\\frac{1}{2\\rho g}}_K \\underbrace{\\Delta P}_{u(t)}$$\n\nSolving for the coefficients in standard form\n\n$$\\begin{align*}\nK & = \\frac{1}{2\\rho g}\\\\\n\\tau & = \\sqrt{\\frac{L}{2g}} \\\\\n\\zeta & = \\frac{8\\nu}{d^2}\\sqrt{\\frac{2L}{g}}\n\\end{align*}$$",
"_____no_output_____"
],
[
"#### Undamped ($\\zeta = 0$)",
"_____no_output_____"
],
[
"#### Underdamped ($\\zeta < 1$)",
"_____no_output_____"
],
[
"#### Critically damped ($\\zeta = 1$)\n\n$$d_\\text{critical damping} = \\left(\\frac{128 \\nu^2 L}{g}\\right)^\\frac{1}{4}$$",
"_____no_output_____"
],
[
"#### Overdamped ($\\zeta > 1$)",
"_____no_output_____"
]
],
[
[
"K = 1/2/rho/g\ntau = np.sqrt(L/2/g)\nzeta = (8*nu/d**2)*np.sqrt(2*L/g)\nprint(K,tau,zeta)\n\n\ndcritical = (128*nu*nu*L/g)**0.25\nprint(dcritical)",
"5.10204081632653e-05 0.597614304667 0.00149403576167\n0.0030922207027757817\n"
]
],
[
[
"## Model 5. Dynamic Response to Sinusoidal Input\n\n$$\\frac{d^2y}{dt^2} + \\frac{32 \\nu }{d^2}\\frac{dy}{dt} + \\frac{2g}{L} y = \\frac{1}{\\rho L} \\Delta P$$",
"_____no_output_____"
]
],
[
[
"def model4(dP=100.0, L=1.0, d=0.10, freq=0.5):\n\n def deriv(X,t):\n x,v = X\n xdot = v\n vdot = -(2*g/L)*x - (32*nu/d**2)*v + dP*np.sin(2.0*np.pi*freq*t)/rho/L\n return [xdot,vdot]\n\n IC = [0,0]\n sol = odeint(deriv,IC,t)\n plt.axis(axis)\n plt.plot(t,sol[:,1])\n plt.plot(t,dP*np.sin(2.0*np.pi*freq*t)/10000)\n plt.grid()\n plt.xlabel('Time [sec]')\n plt.ylabel('y [m], P[bars/10000]')\n plt.title('dP = {0:5.1f} bars, L = {1:4.2f} meters, d = {2:5.3f} meters'.format(dP,L,d))\n plt.legend(['Position','Pressure/10000'])\n\ninteract(model4, dP=(-200,200,20), L = (0.2,5,0.1), d=(0.01,0.20,0.002), freq=(0,4,0.01));",
"_____no_output_____"
]
],
[
[
"## Model 6. State Space Representation\n\nState space models are widely used in textbooks, software, and the research literature to represent linear systems. It's a generic model that represents a system with inputs and outputs. Here's how to recast out manometer model is time-varying pressure as a state model where the liquid level is the measured output.\n\nStart with the model written as a differential equation\n\n$$\\frac{d^2y}{dt^2} + \\frac{32\\nu}{d^2}\\frac{dy}{dt} + \\frac{2g}{L} y = \\frac{1}{\\rho L} \\Delta P$$\n\nAssemble the dependent variables in a vector, and rewrite using matrix/vector operations.\n\n$$\\begin{align*}\n\\frac{d}{dt}\n\\left[\\begin{array}{c}y \\\\ v\\end{array}\\right]\n& =\n\\left[\\begin{array}{cc}0 & 1 \\\\ - \\frac{2g}{L} & -\\frac{32\\nu}{d^2} \\end{array}\\right]\n\\left[\\begin{array}{c}y \\\\ v\\end{array}\\right]\n+\n\\left[\\begin{array}{c}0 \\\\ \\frac{1}{\\rho L}\\end{array}\\right]\n\\left[\\Delta P\\right] \\\\\n\\left[y\\right]\n& =\n\\left[\\begin{array}{c} 1 & 0\\end{array}\\right]\n\\left[\\begin{array}{c}y \\\\ v\\end{array}\\right]\n+\n\\left[0\\right]\n\\left[\\Delta P\\right]\n\\end{align*}\n$$\n\nUse standard symbols to label the vectors and matrices.\n\n$$\\begin{align*}\n\\frac{d}{dt}\n\\underbrace{\\left[\\begin{array}{c}y \\\\ v\\end{array}\\right]}_{x}\n& =\n\\underbrace{\\left[\\begin{array}{cc}0 & 1 \\\\ - \\frac{2g}{L} & -\\frac{32\\nu}{d^2} \\end{array}\\right]}_{A}\n\\underbrace{\\left[\\begin{array}{c}y \\\\ v\\end{array}\\right]}_{x}\n+\n\\underbrace{\\left[\\begin{array}{c}0 \\\\ \\frac{1}{\\rho L}\\end{array}\\right]}_{B}\n\\underbrace{\\left[\\Delta P\\right]}_{u} \\\\\n\\underbrace{\\left[y\\right]}_{y} \n& =\n\\underbrace{\\left[\\begin{array}{c} 1 & 0\\end{array}\\right]}_{C}\n\\underbrace{\\left[\\begin{array}{c}y \\\\ v\\end{array}\\right]}_{x}\n+\n\\underbrace{\\left[0\\right]}_{D}\n\\underbrace{\\left[\\Delta P\\right]}_{u}\n\\end{align*}\n$$\n\nThe result is a model of a linear system in a standard state space representation. \n\n$$\\begin{align*}\n\\frac{dx}{dt} & = Ax + Bu \\\\\ny & = Cx + Du\n\\end{align*}$$\n\n",
"_____no_output_____"
]
],
[
[
"def model6(dP=100, L=1.0, d=0.10):\n\n A = [[0,1],[-2*g/L, -32*nu/(d**2)]]\n B = [[0],[1/rho/L]]\n C = [[1,0]]\n D = [[0]]\n \n sys = ss(A,B,C,D)\n y,tout = step(sys,t);\n\n plt.axis(axis)\n plt.plot(t,dP*y)\n plt.grid()\n plt.xlabel('Time [sec]')\n plt.ylabel('y [m]')\n plt.title('dP = {0:5.1f} bars, L = {1:4.2f} meters, d = {2:5.3f} meters'.format(dP,L,d))\n plt.legend(['Position'])\n \ninteract(model6, dP=(-200,200,1), L = (0.2,5,0.1), d=(0.01,0.20,0.002));",
"_____no_output_____"
],
[
"w = np.logspace(0,1,200)\n\ndef model6(L=1.0, d=0.10):\n\n A = [[0,1],[-2*g/L, -32*nu/(d**2)]]\n B = [[0],[1/rho/L]]\n C = [[1,0]]\n D = [[0]]\n\n mano = ss(A,B,C,D)\n bode(mano,w);\n \ninteract(model6, L = (0.2,5,0.1), d=(0.01,0.20,0.002));",
"_____no_output_____"
],
[
"w = np.logspace(0,1,200)\n\ndef model6(L=1.0, d=0.10):\n\n A = [[0,1],[-2*g/L, -128*nu/(np.pi*d**4)]]\n B = [[0],[1/rho/L]]\n C = [[1,0]]\n D = [[0]]\n\n e_vals,e_vecs = la.eig(A)\n \n plt.axis([-5,5,-5,5])\n plt.axis('equal')\n plt.plot(e_vals.real,e_vals.imag,'o')\n \n \n \ninteract(model6, L = (0.2,5,0.1), d=(0.01,0.20,0.002));",
"_____no_output_____"
]
],
[
[
"<!--NAVIGATION-->\n< [Interacting Tanks](http://nbviewer.jupyter.org/github/jckantor/CBE30338/blob/master/notebooks/03.07-Interacting-Tanks.ipynb) | [Contents](toc.ipynb) | [Modeling and Control of a Campus Outbreak of Coronavirus COVID-19](http://nbviewer.jupyter.org/github/jckantor/CBE30338/blob/master/notebooks/03.09-COVID-19.ipynb) ><p><a href=\"https://colab.research.google.com/github/jckantor/CBE30338/blob/master/notebooks/03.08-Manometer-Models-and-Dynamics.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open in Google Colaboratory\"></a><p><a href=\"https://raw.githubusercontent.com/jckantor/CBE30338/master/notebooks/03.08-Manometer-Models-and-Dynamics.ipynb\"><img align=\"left\" src=\"https://img.shields.io/badge/Github-Download-blue.svg\" alt=\"Download\" title=\"Download Notebook\"></a>",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
]
|
cb747d5799ba8ccca35071d9cb6617294549af10 | 727 | ipynb | Jupyter Notebook | network_machine_learning_in_python/applications/ch10/anomaly-detection.ipynb | Laknath1996/graph-stats-book | 4b10c2f99dbfb5e05a72c98130f8c4338d7c9a21 | [
"MIT"
]
| 10 | 2020-09-15T19:09:53.000Z | 2022-03-17T21:24:14.000Z | network_machine_learning_in_python/applications/ch10/anomaly-detection.ipynb | Laknath1996/graph-stats-book | 4b10c2f99dbfb5e05a72c98130f8c4338d7c9a21 | [
"MIT"
]
| 30 | 2020-09-15T19:15:11.000Z | 2022-03-10T15:33:24.000Z | network_machine_learning_in_python/applications/ch10/anomaly-detection.ipynb | Laknath1996/graph-stats-book | 4b10c2f99dbfb5e05a72c98130f8c4338d7c9a21 | [
"MIT"
]
| 2 | 2021-04-12T05:08:00.000Z | 2021-10-04T09:42:21.000Z | 18.175 | 107 | 0.54608 | [
[
[
"# Anomaly Detection",
"_____no_output_____"
],
[
"- Guodong's stuff about vertex anomaly detection in time series: https://arxiv.org/pdf/2008.10055.pdf",
"_____no_output_____"
]
]
]
| [
"markdown"
]
| [
[
"markdown",
"markdown"
]
]
|
cb74820140d66b41401ed8febd7d9b680a536d4b | 181,524 | ipynb | Jupyter Notebook | Regression/Linear Models/RidgeRegression_Scaled.ipynb | PrajwalNimje1997/ds-seed | db23e5d4f99c145ed889b7fc96f7c2239df78eef | [
"Apache-2.0"
]
| 2 | 2021-07-28T15:26:40.000Z | 2021-07-29T04:14:35.000Z | Regression/Linear Models/RidgeRegression_Scaled.ipynb | PrajwalNimje1997/ds-seed | db23e5d4f99c145ed889b7fc96f7c2239df78eef | [
"Apache-2.0"
]
| 1 | 2021-07-30T06:00:30.000Z | 2021-07-30T06:00:30.000Z | Regression/Linear Models/RidgeRegression_Scaled.ipynb | PrajwalNimje1997/ds-seed | db23e5d4f99c145ed889b7fc96f7c2239df78eef | [
"Apache-2.0"
]
| null | null | null | 317.905429 | 79,090 | 0.801977 | [
[
[
"# Ridge Regressor with StandardScaler\n### Required Packages",
"_____no_output_____"
]
],
[
[
"import warnings \r\nimport numpy as np \r\nimport pandas as pd \r\nimport seaborn as se\r\nimport matplotlib.pyplot as plt \r\nfrom sklearn.linear_model import Ridge\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.preprocessing import StandardScaler \r\nfrom sklearn.model_selection import train_test_split \r\nfrom sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error \r\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"### Initialization\nFilepath of CSV file",
"_____no_output_____"
]
],
[
[
"#filepath\r\nfile_path= \"\"",
"_____no_output_____"
]
],
[
[
"List of features which are required for model training .",
"_____no_output_____"
]
],
[
[
"#x_values\r\nfeatures=[]",
"_____no_output_____"
]
],
[
[
"Target feature for prediction.",
"_____no_output_____"
]
],
[
[
"#y_value\r\ntarget=''",
"_____no_output_____"
]
],
[
[
"### Data fetching\n\npandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.\n\nWe will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.",
"_____no_output_____"
]
],
[
[
"df=pd.read_csv(file_path)\r\ndf.head()",
"_____no_output_____"
]
],
[
[
"### Feature Selections\n\nIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.\n\nWe will assign all the required input features to X and target/outcome to Y.",
"_____no_output_____"
]
],
[
[
"X=df[features]\r\nY=df[target]",
"_____no_output_____"
]
],
[
[
"### Data Preprocessing\n\nSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.\n",
"_____no_output_____"
]
],
[
[
"def NullClearner(df):\r\n if(isinstance(df, pd.Series) and (df.dtype in [\"float64\",\"int64\"])):\r\n df.fillna(df.mean(),inplace=True)\r\n return df\r\n elif(isinstance(df, pd.Series)):\r\n df.fillna(df.mode()[0],inplace=True)\r\n return df\r\n else:return df\r\ndef EncodeX(df):\r\n return pd.get_dummies(df)",
"_____no_output_____"
]
],
[
[
"Calling preprocessing functions on the feature and target set.\n",
"_____no_output_____"
]
],
[
[
"x=X.columns.to_list()\r\nfor i in x:\r\n X[i]=NullClearner(X[i])\r\nX=EncodeX(X)\r\nY=NullClearner(Y)\r\nX.head()",
"_____no_output_____"
]
],
[
[
"#### Correlation Map\n\nIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.",
"_____no_output_____"
]
],
[
[
"f,ax = plt.subplots(figsize=(18, 18))\r\nmatrix = np.triu(X.corr())\r\nse.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)\r\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Data Splitting\n\nThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.",
"_____no_output_____"
]
],
[
[
"x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)",
"_____no_output_____"
]
],
[
[
"### Model\nRidge regression addresses some of the problems of Ordinary Least Squares by imposing a penalty on the size of the coefficients. The ridge coefficients minimize a penalized residual sum of squares:\n\n\\begin{equation*}\n\\min_{w} || X w - y||_2^2 + \\alpha ||w||_2^2\n\\end{equation*}\n\nThe complexity parameter controls the amount of shrinkage: the larger the value of , the greater the amount of shrinkage and thus the coefficients become more robust to collinearity.\n\nThis model solves a regression model where the loss function is the linear least squares function and regularization is given by the l2-norm. Also known as Ridge Regression or Tikhonov regularization. This estimator has built-in support for multi-variate regression (i.e., when y is a 2d-array of shape (n_samples, n_targets)).\n\n#### Model Tuning Parameters\n\n> **alpha** -> Regularization strength; must be a positive float. Regularization improves the conditioning of the problem and reduces the variance of the estimates. Larger values specify stronger regularization.\n\n> **solver** -> Solver to use in the computational routines {‘auto’, ‘svd’, ‘cholesky’, ‘lsqr’, ‘sparse_cg’, ‘sag’, ‘saga’}",
"_____no_output_____"
]
],
[
[
"Input=[(\"standard\",StandardScaler()),(\"model\",Ridge(random_state=123))]\r\nmodel=Pipeline(Input)\r\nmodel.fit(x_train,y_train)",
"_____no_output_____"
]
],
[
[
"#### Model Accuracy\n\nWe will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.\n\n> **score**: The **score** function returns the coefficient of determination <code>R<sup>2</sup></code> of the prediction.",
"_____no_output_____"
]
],
[
[
"print(\"Accuracy score {:.2f} %\\n\".format(model.score(x_test,y_test)*100))",
"Accuracy score 92.35 %\n\n"
]
],
[
[
"> **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. \n\n> **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. \n\n> **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model. ",
"_____no_output_____"
]
],
[
[
"y_pred=model.predict(x_test)\r\nprint(\"R2 Score: {:.2f} %\".format(r2_score(y_test,y_pred)*100))\r\nprint(\"Mean Absolute Error {:.2f}\".format(mean_absolute_error(y_test,y_pred)))\r\nprint(\"Mean Squared Error {:.2f}\".format(mean_squared_error(y_test,y_pred)))",
"R2 Score: 92.35 %\nMean Absolute Error 3.74\nMean Squared Error 22.13\n"
]
],
[
[
"#### Prediction Plot\n\nFirst, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.\nFor the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(14,10))\r\nplt.plot(range(20),y_test[0:20], color = \"green\")\r\nplt.plot(range(20),model.predict(x_test[0:20]), color = \"red\")\r\nplt.legend([\"Actual\",\"prediction\"]) \r\nplt.title(\"Predicted vs True Value\")\r\nplt.xlabel(\"Record number\")\r\nplt.ylabel(target)\r\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Creator: Thilakraj Devadiga , Github: [Profile](https://github.com/Thilakraj1998)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
]
|
cb748a16761f633ea9d06bd099caf5c7a7f5995d | 539,992 | ipynb | Jupyter Notebook | simulator/simulator-topologies-fault-tolerance/latency/simulator_latency.ipynb | giordano-lucas/DeAI | d389010e371473d0e1262176d30ceb36acef7c5a | [
"Apache-2.0"
]
| 44 | 2020-06-25T07:35:39.000Z | 2022-02-18T12:29:45.000Z | simulator/simulator-topologies-fault-tolerance/latency/simulator_latency.ipynb | giordano-lucas/DeAI | d389010e371473d0e1262176d30ceb36acef7c5a | [
"Apache-2.0"
]
| 152 | 2020-06-23T23:30:15.000Z | 2022-02-25T10:22:30.000Z | simulator/simulator-topologies-fault-tolerance/latency/simulator_latency.ipynb | giordano-lucas/DeAI | d389010e371473d0e1262176d30ceb36acef7c5a | [
"Apache-2.0"
]
| 11 | 2020-08-11T21:19:49.000Z | 2022-01-30T17:15:31.000Z | 219.330626 | 75,896 | 0.89785 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom latency import run_latency, run_latency_changing_topo, run_latency_per_round, run_latency_per_round_changing_topo, nodes_latency\n\nimport sys\nsys.path.append('..') \nfrom utils import create_mixing_matrix, load_data, run, consensus",
"_____no_output_____"
]
],
[
[
"# Base case",
"_____no_output_____"
]
],
[
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs = run(train_loader, test_loader, comm_matrix, num_rounds, epochs, num_clients)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs)\nplt.show()",
"0-th round\naverage train loss 2.26 | test loss 2.25 | test acc: 0.433\n1-th round\naverage train loss 4.01 | test loss 1.51 | test acc: 0.724\n2-th round\naverage train loss 5.11 | test loss 0.77 | test acc: 0.799\n3-th round\naverage train loss 5.91 | test loss 0.522 | test acc: 0.864\n4-th round\naverage train loss 6.51 | test loss 0.4 | test acc: 0.895\n5-th round\naverage train loss 6.97 | test loss 0.33 | test acc: 0.911\n6-th round\naverage train loss 7.37 | test loss 0.277 | test acc: 0.923\n7-th round\naverage train loss 7.73 | test loss 0.245 | test acc: 0.927\n8-th round\naverage train loss 8.05 | test loss 0.22 | test acc: 0.934\n9-th round\naverage train loss 8.31 | test loss 0.201 | test acc: 0.938\n[0.022937, 0.020871, 0.020657, 0.021288, 0.022118, 0.021407, 0.023051, 0.020801, 0.020571, 0.021497, 0.021515, 0.018482, 0.018877, 0.020188, 0.020132, 0.019913, 0.019367, 0.018818, 0.019476, 0.019096, 0.020575, 0.018511, 0.019395, 0.0197, 0.019288, 0.01972, 0.019619, 0.020222, 0.019839, 0.018965, 0.022856, 0.019458, 0.018501, 0.017988, 0.019381, 0.020819, 0.019737, 0.020234, 0.021063, 0.020606, 0.021582, 0.01938, 0.018452, 0.018966, 0.019629, 0.019538, 0.018751, 0.020069, 0.019772, 0.017609, 0.020551, 0.019726, 0.019692, 0.020394, 0.022021, 0.019908, 0.019228, 0.02008, 0.020942, 0.017743, 0.020453, 0.018935, 0.020256, 0.021853, 0.022969, 0.020098, 0.018785, 0.021128, 0.022534, 0.01897, 0.021843, 0.018924, 0.020028, 0.020767, 0.024153, 0.020876, 0.02062, 0.021726, 0.022239, 0.019734, 0.020421, 0.019987, 0.021077, 0.020201, 0.021531, 0.018776, 0.018257, 0.018866, 0.018718, 0.017566, 0.019147, 0.020006, 0.01933, 0.017455, 0.01877, 0.018132, 0.01766, 0.017618, 0.017312, 0.01638]\n"
]
],
[
[
"# Latency with fixed topology",
"_____no_output_____"
]
],
[
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 2)\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs2 = run_latency(train_loader, test_loader, comm_matrix, \n num_rounds, epochs, num_clients, latency_nodes)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs2)\nplt.show()",
"0-th round\naverage train loss 2.3 | test loss 2.29 | test acc: 0.127\n1-th round\naverage train loss 4.57 | test loss 2.28 | test acc: 0.427\n2-th round\naverage train loss 6.7 | test loss 2.19 | test acc: 0.659\n3-th round\naverage train loss 8.23 | test loss 1.58 | test acc: 0.783\n4-th round\naverage train loss 9.41 | test loss 1.13 | test acc: 0.821\n5-th round\naverage train loss 10.3 | test loss 0.907 | test acc: 0.859\n6-th round\naverage train loss 11.2 | test loss 0.758 | test acc: 0.884\n7-th round\naverage train loss 11.9 | test loss 0.693 | test acc: 0.901\n8-th round\naverage train loss 12.6 | test loss 0.64 | test acc: 0.903\n9-th round\naverage train loss 13.3 | test loss 0.58 | test acc: 0.913\n[1.081684, 1.038212, 0.730912, 0.356245, 0.149424, 0.134768, 0.16335, 0.254151, 0.471264, 0.753668, 1.060893, 0.748097, 0.247592, 0.042085, 0.264553, 0.047126, 0.113146, 0.375693, 0.675868, 0.905346, 0.915556, 0.422744, 0.038956, 1.486416, 6.22252, 0.950911, 0.022734, 0.277226, 0.64629, 0.864005, 0.75506, 0.25335, 0.207281, 6.22252, 6.22252, 6.580249, 0.070551, 0.12471, 0.444488, 0.685809, 0.654322, 0.343466, 0.024422, 0.875871, 6.580249, 0.569273, 0.128203, 0.033579, 0.16106, 0.427054, 0.541963, 0.510281, 0.209637, 0.021152, 0.129898, 0.256358, 0.466579, 0.563734, 0.05811, 0.15337, 0.428118, 0.647788, 0.522318, 0.222743, 0.039511, 0.337602, 2.182921, 6.265857, 1.004021, 0.017931, 0.424446, 0.768842, 0.766219, 0.458058, 0.075756, 0.454333, 6.265857, 6.265857, 6.589137, 0.013536, 0.583307, 0.904866, 0.912355, 0.622558, 0.201249, 0.058716, 1.055523, 6.589137, 0.209989, 0.077779, 0.802841, 0.978975, 0.877554, 0.578869, 0.261874, 0.07013, 0.029374, 0.028814, 0.062264, 0.341974]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 4)\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs4 = run_latency(train_loader, test_loader, comm_matrix, \n num_rounds, epochs, num_clients, latency_nodes)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs4)\nplt.show()",
"0-th round\naverage train loss 2.28 | test loss 2.28 | test acc: 0.198\n1-th round\naverage train loss 4.48 | test loss 2.25 | test acc: 0.610\n2-th round\naverage train loss 6.18 | test loss 1.92 | test acc: 0.743\n3-th round\naverage train loss 7.48 | test loss 1.61 | test acc: 0.804\n4-th round\naverage train loss 8.57 | test loss 1.45 | test acc: 0.835\n5-th round\naverage train loss 9.56 | test loss 1.31 | test acc: 0.879\n6-th round\naverage train loss 10.5 | test loss 1.27 | test acc: 0.889\n7-th round\naverage train loss 11.4 | test loss 1.2 | test acc: 0.897\n8-th round\naverage train loss 12.3 | test loss 1.17 | test acc: 0.906\n9-th round\naverage train loss 13.1 | test loss 1.13 | test acc: 0.910\n[0.02526, 0.479344, 0.704709, 0.211458, 0.41053, 3.43922, 1.750594, 3.667331, 3.451542, 3.671544, 0.174052, 0.567635, 0.526685, 0.012606, 3.43922, 3.43922, 3.669038, 1.222978, 3.671544, 0.126492, 0.236401, 0.500143, 0.582298, 0.197174, 0.145288, 3.669038, 0.08534, 0.048122, 0.040456, 0.060525, 0.044334, 0.150605, 0.529081, 0.604149, 0.352899, 0.234335, 0.314391, 0.275165, 0.143384, 0.064235, 3.562664, 0.159793, 0.33151, 0.936313, 1.145658, 1.136305, 0.98778, 0.556988, 0.112139, 0.359942, 3.562664, 3.694372, 0.320988, 1.220526, 1.730611, 1.767922, 1.348274, 0.568266, 0.032384, 3.694372, 3.694372, 0.032536, 0.712532, 1.564866, 1.988638, 1.848906, 1.229867, 0.467138, 0.078116, 0.1658, 0.060363, 0.384143, 1.104174, 1.655564, 1.716721, 1.287779, 0.587437, 0.111262, 0.16502, 0.131968, 0.083167, 0.520311, 1.11913, 1.276965, 0.940629, 0.42881, 0.085123, 0.644078, 3.509927, 0.534664, 0.063229, 0.358282, 0.791233, 0.596714, 0.122921, 0.062427, 0.588212, 3.509927, 3.455553, 3.67239]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 8)\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs8 = run_latency(train_loader, test_loader, comm_matrix, \n num_rounds, epochs, num_clients, latency_nodes)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs8)\nplt.show()",
"0-th round\naverage train loss 2.29 | test loss 2.29 | test acc: 0.345\n1-th round\naverage train loss 4.55 | test loss 2.29 | test acc: 0.180\n2-th round\naverage train loss 6.63 | test loss 2.28 | test acc: 0.375\n3-th round\naverage train loss 8.44 | test loss 2.22 | test acc: 0.646\n4-th round\naverage train loss 10 | test loss 2.16 | test acc: 0.753\n5-th round\naverage train loss 11.4 | test loss 2.13 | test acc: 0.770\n6-th round\naverage train loss 12.8 | test loss 2.11 | test acc: 0.822\n7-th round\naverage train loss 14.2 | test loss 2.09 | test acc: 0.823\n8-th round\naverage train loss 15.5 | test loss 2.09 | test acc: 0.840\n9-th round\naverage train loss 16.8 | test loss 2.08 | test acc: 0.845\n[0.096552, 1.060949, 1.621927, 0.988897, 0.017448, 0.790157, 0.051155, 0.247516, 0.021037, 0.767636, 0.720432, 0.072728, 0.678508, 0.240893, 0.790157, 0.790157, 0.884934, 0.065111, 0.767636, 0.767636, 0.858017, 0.899066, 0.354525, 0.327395, 0.038357, 0.884934, 0.120282, 0.097914, 0.310638, 0.804869, 0.832919, 0.094922, 0.230908, 0.094126, 0.180495, 0.174642, 0.209806, 0.105469, 0.84294, 0.84294, 0.056255, 0.160765, 0.0809, 0.808107, 0.079976, 0.088328, 0.092138, 0.293911, 0.886089, 0.895954, 0.071662, 0.076529, 0.808107, 0.808107, 0.888709, 0.849372, 0.43692, 0.886089, 0.886089, 0.904775, 0.8698, 0.779581, 0.112728, 0.888709, 0.849372, 0.849372, 0.854426, 0.791719, 0.904775, 0.190512, 1.905339, 1.939528, 1.271677, 0.484519, 0.020163, 0.854426, 0.866315, 0.90076, 0.227752, 0.894008, 2.253714, 2.545038, 2.193747, 1.317801, 0.461086, 0.022249, 0.90076, 0.184371, 0.65236, 1.201081, 1.293188, 1.968074, 2.060343, 1.356239, 0.478221, 0.151449, 0.147505, 0.337709, 0.393758, 0.53923]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 16)\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs16 = run_latency(train_loader, test_loader, comm_matrix, \n num_rounds, epochs, num_clients, latency_nodes)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs16)\nplt.show()",
"0-th round\naverage train loss 2.28 | test loss 2.29 | test acc: 0.114\n1-th round\naverage train loss 4.54 | test loss 2.3 | test acc: 0.114\n2-th round\naverage train loss 6.71 | test loss 2.3 | test acc: 0.114\n3-th round\naverage train loss 8.82 | test loss 2.3 | test acc: 0.114\n4-th round\naverage train loss 10.9 | test loss 2.3 | test acc: 0.114\n5-th round\naverage train loss 12.9 | test loss 2.3 | test acc: 0.114\n6-th round\naverage train loss 15 | test loss 2.3 | test acc: 0.114\n7-th round\naverage train loss 17 | test loss 2.3 | test acc: 0.114\n8-th round\naverage train loss 19 | test loss 2.3 | test acc: 0.114\n9-th round\naverage train loss 21 | test loss 2.3 | test acc: 0.114\n[0.055916, 0.057801, 0.057915, 0.057996, 0.054914, 0.057937, 0.05797, 0.058008, 0.053158, 0.056073, 0.056234, 0.057972, 0.057345, 0.057977, 0.057982, 0.057982, 0.058, 0.141494, 0.002985, 0.057626, 0.054584, 0.057366, 0.057328, 0.057839, 0.057982, 0.058011, 0.486926, 0.292018, 0.003544, 0.05296, 0.05296, 0.010448, 0.057879, 0.057982, 0.747363, 0.874164, 0.692278, 0.107749, 0.05296, 0.05296, 0.055815, 0.042255, 0.690299, 1.225885, 1.399516, 1.004457, 0.20143, 0.047336, 0.056301, 0.056987, 0.055815, 0.057573, 1.074451, 1.512002, 1.193995, 0.259996, 0.047336, 0.045789, 0.053777, 0.056715, 0.057573, 0.200632, 0.920641, 1.047299, 0.268973, 0.046283, 0.004949, 0.055454, 0.057156, 0.026278, 0.028329, 0.050472, 0.26954, 0.185446, 0.046283, 0.046283, 0.055564, 0.037899, 0.046429, 0.056607, 0.04916, 0.050472, 0.056465, 0.001146, 0.054914, 0.055564, 0.043274, 0.046171, 0.053678, 0.056607, 0.054155, 0.05642, 0.057996, 0.054914, 0.054914, 0.05739, 0.058008, 0.055047, 0.055047, 0.056746]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 32)\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs32 = run_latency(train_loader, test_loader, comm_matrix, \n num_rounds, epochs, num_clients, latency_nodes)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs32)\nplt.show()",
"0-th round\naverage train loss 2.27 | test loss 2.3 | test acc: 0.201\n1-th round\naverage train loss 4.55 | test loss 2.3 | test acc: 0.114\n2-th round\naverage train loss 6.82 | test loss 2.3 | test acc: 0.114\n3-th round\naverage train loss 9.12 | test loss 2.3 | test acc: 0.114\n4-th round\naverage train loss 11.4 | test loss 2.3 | test acc: 0.114\n5-th round\naverage train loss 13.7 | test loss 2.3 | test acc: 0.114\n6-th round\naverage train loss 16 | test loss 2.3 | test acc: 0.114\n7-th round\naverage train loss 18.3 | test loss 2.3 | test acc: 0.114\n8-th round\naverage train loss 20.6 | test loss 2.3 | test acc: 0.114\n9-th round\naverage train loss 22.9 | test loss 2.3 | test acc: 0.114\n[8e-06, 1.2e-05, 3e-06, 6e-06, 6e-06, 7e-06, 1.2e-05, 7e-06, 1.1e-05, 3e-06, 5e-06, 0.000259, 0.000296, 1.2e-05, 1.1e-05, 1.2e-05, 0.000597, 0.000584, 9e-06, 2e-06, 3e-06, 1.1e-05, 0.000278, 1.1e-05, 6e-06, 1.2e-05, 7e-06, 0.00027, 5e-06, 3e-06, 1.1e-05, 0.000213, 1e-05, 7e-06, 5e-06, 5e-06, 7e-06, 7e-06, 0.000533, 1.1e-05, 0.000321, 1e-05, 8e-06, 8e-06, 8e-06, 1.1e-05, 6e-06, 4e-06, 1.1e-05, 0.000435, 7e-06, 9e-06, 8e-06, 8e-06, 1e-05, 1.2e-05, 9e-06, 6e-06, 1.2e-05, 0.000246, 4e-06, 9e-06, 1e-05, 1e-05, 1.2e-05, 7e-06, 9e-06, 6e-06, 1.2e-05, 1.1e-05, 1.1e-05, 9e-06, 9e-06, 1.2e-05, 8e-06, 7e-06, 9e-06, 9e-06, 1.3e-05, 0.000318, 1.1e-05, 0.000123, 6e-06, 0.000331, 0.000223, 1.2e-05, 5.7e-05, 7e-06, 0.000266, 9e-06, 5e-06, 6e-06, 8e-06, 1.1e-05, 0.000103, 1.2e-05, 7e-06, 7e-06, 8e-06, 5e-06]\n"
],
[
"fig, ax = plt.subplots(1, figsize=(12, 9))\nax.set_ylim([0, 1])\nx = np.array(range(10))\nax.plot(x, accs, color=\"red\", label=\"base case\")\nax.plot(x, accs2, color=\"lime\", label=\"two delayed nodes\")\nax.plot(x, accs4, color=\"green\", label=\"four delayed nodes\")\nax.plot(x, accs8, color=\"purple\", label=\"eight delayed nodes\")\nax.plot(x, accs16, color=\"blue\", label=\"sixteen delayed nodes\")\nax.plot(x, accs32, color=\"cyan\", label=\"thirty-two delayed nodes\")\n\nplt.legend(loc=\"lower right\", title=\"Number of delayed nodes\")\nplt.title(\"Accuracy curve depending on number of delayed nodes\")\nplt.xlabel(\"Round\")\nplt.ylabel(\"Accuracy\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Latency with changing topology",
"_____no_output_____"
]
],
[
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 2)\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs2_ = run_latency_changing_topo(train_loader, test_loader, \n num_rounds, epochs, num_clients, latency_nodes)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs2_)\nplt.show()",
"old topo: grid, new topo: grid\n0-th round\naverage train loss 2.24 | test loss 2.24 | test acc: 0.521\nold topo: grid, new topo: grid\n1-th round\naverage train loss 4.04 | test loss 1.8 | test acc: 0.746\nold topo: grid, new topo: ring\n2-th round\naverage train loss 5.24 | test loss 1.22 | test acc: 0.802\nold topo: ring, new topo: grid\n3-th round\naverage train loss 6.18 | test loss 0.914 | test acc: 0.854\nold topo: grid, new topo: centralized\n4-th round\naverage train loss 7.03 | test loss 0.778 | test acc: 0.882\nold topo: centralized, new topo: centralized\n5-th round\naverage train loss 7.78 | test loss 0.669 | test acc: 0.901\nold topo: centralized, new topo: centralized\n6-th round\naverage train loss 8.45 | test loss 0.608 | test acc: 0.911\nold topo: centralized, new topo: ring\n7-th round\naverage train loss 9.1 | test loss 0.563 | test acc: 0.920\nold topo: ring, new topo: grid\n8-th round\naverage train loss 9.72 | test loss 0.527 | test acc: 0.926\nold topo: grid, new topo: grid\n9-th round\naverage train loss 10.3 | test loss 0.491 | test acc: 0.929\n[6.897977, 6.897977, 7.119571, 1.877665, 6.975756, 6.975756, 7.135328, 0.055659, 0.042739, 0.145445, 0.914706, 7.119571, 0.796112, 0.705136, 1.680001, 7.135328, 0.307068, 0.027973, 0.066017, 0.050395, 0.023925, 0.045482, 0.036846, 0.04681, 0.075035, 0.055651, 0.036346, 0.176029, 0.236526, 0.10529, 0.348497, 0.243134, 0.236875, 0.227404, 0.202296, 0.225107, 0.351052, 0.500234, 0.545211, 0.433861, 0.760528, 0.674279, 0.644057, 0.626423, 0.614068, 0.63524, 0.721393, 0.819003, 0.846573, 0.778018, 0.960708, 0.885033, 0.861515, 0.843839, 0.822054, 0.827762, 0.894091, 0.979136, 1.0061, 0.952244, 0.823314, 0.709701, 0.708165, 0.699097, 0.655668, 0.642083, 0.751061, 0.88669, 0.936515, 0.853515, 0.358905, 0.201349, 0.249243, 0.260397, 0.202658, 0.166603, 0.337179, 0.555401, 0.630822, 0.49054, 0.044715, 0.307195, 0.080566, 0.077186, 0.16383, 0.347707, 0.027617, 0.173616, 0.256051, 0.110278, 1.022213, 7.119571, 0.98773, 0.843718, 1.86369, 7.135328, 0.413963, 0.025692, 0.05235, 0.065813]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 4)\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs4_ = run_latency_changing_topo(train_loader, test_loader, \n num_rounds, epochs, num_clients, latency_nodes)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs4_)\nplt.show()",
"old topo: centralized, new topo: centralized\n0-th round\naverage train loss 2.28 | test loss 2.28 | test acc: 0.200\nold topo: centralized, new topo: ring\n1-th round\naverage train loss 4.49 | test loss 2.3 | test acc: 0.098\nold topo: ring, new topo: ring\n2-th round\naverage train loss 6.8 | test loss 2.3 | test acc: 0.114\nold topo: ring, new topo: grid\n3-th round\naverage train loss 9.1 | test loss 2.3 | test acc: 0.114\nold topo: grid, new topo: ring\n4-th round\naverage train loss 11.4 | test loss 2.3 | test acc: 0.114\nold topo: ring, new topo: centralized\n5-th round\naverage train loss 13.7 | test loss 2.3 | test acc: 0.114\nold topo: centralized, new topo: ring\n6-th round\naverage train loss 16 | test loss 2.3 | test acc: 0.114\nold topo: ring, new topo: grid\n7-th round\naverage train loss 18.3 | test loss 2.3 | test acc: 0.114\nold topo: grid, new topo: ring\n8-th round\naverage train loss 20.6 | test loss 2.3 | test acc: 0.114\nold topo: ring, new topo: grid\n9-th round\naverage train loss 22.9 | test loss 2.3 | test acc: 0.114\n[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 8)\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs8_ = run_latency_changing_topo(train_loader, test_loader, \n num_rounds, epochs, num_clients, latency_nodes)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs8_)\nplt.show()",
"old topo: centralized, new topo: grid\n0-th round\naverage train loss 2.29 | test loss 2.29 | test acc: 0.117\nold topo: grid, new topo: ring\n1-th round\naverage train loss 4.55 | test loss 2.3 | test acc: 0.101\nold topo: ring, new topo: centralized\n2-th round\naverage train loss 6.85 | test loss 2.3 | test acc: 0.114\nold topo: centralized, new topo: ring\n3-th round\naverage train loss 9.15 | test loss 2.3 | test acc: 0.114\nold topo: ring, new topo: ring\n4-th round\naverage train loss 11.5 | test loss 2.3 | test acc: 0.114\nold topo: ring, new topo: grid\n5-th round\naverage train loss 13.8 | test loss 2.3 | test acc: 0.114\nold topo: grid, new topo: grid\n6-th round\naverage train loss 16.1 | test loss 2.3 | test acc: 0.114\nold topo: grid, new topo: centralized\n7-th round\naverage train loss 18.4 | test loss 2.3 | test acc: 0.114\nold topo: centralized, new topo: centralized\n8-th round\naverage train loss 20.7 | test loss 2.3 | test acc: 0.114\nold topo: centralized, new topo: grid\n9-th round\naverage train loss 23 | test loss 2.3 | test acc: 0.114\n[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 16)\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs16_ = run_latency_changing_topo(train_loader, test_loader, \n num_rounds, epochs, num_clients, latency_nodes)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs16_)\nplt.show()",
"old topo: ring, new topo: centralized\n0-th round\naverage train loss 2.28 | test loss 2.29 | test acc: 0.330\nold topo: centralized, new topo: ring\n1-th round\naverage train loss 4.46 | test loss 2.28 | test acc: 0.474\nold topo: ring, new topo: grid\n2-th round\naverage train loss 6.26 | test loss 2.18 | test acc: 0.686\nold topo: grid, new topo: grid\n3-th round\naverage train loss 7.81 | test loss 2.08 | test acc: 0.806\nold topo: grid, new topo: centralized\n4-th round\naverage train loss 9.18 | test loss 2.03 | test acc: 0.850\nold topo: centralized, new topo: centralized\n5-th round\naverage train loss 10.5 | test loss 2 | test acc: 0.872\nold topo: centralized, new topo: centralized\n6-th round\naverage train loss 11.7 | test loss 1.96 | test acc: 0.882\nold topo: centralized, new topo: ring\n7-th round\naverage train loss 13 | test loss 1.96 | test acc: 0.895\nold topo: ring, new topo: ring\n8-th round\naverage train loss 14.1 | test loss 1.94 | test acc: 0.903\nold topo: ring, new topo: grid\n9-th round\naverage train loss 15.3 | test loss 1.92 | test acc: 0.904\n[0.400577, 0.620641, 2.353908, 2.353908, 2.430324, 0.085045, 0.724488, 1.826102, 2.883727, 3.617985, 3.8102, 3.227735, 1.953189, 0.620425, 0.160197, 2.30949, 2.30949, 2.415274, 0.123622, 0.335954, 0.449795, 0.303045, 0.422952, 2.338946, 2.338946, 2.42527, 2.443574, 2.467961, 2.467961, 2.46872, 0.12515, 0.339892, 0.468337, 0.313642, 0.433873, 2.3395, 2.3395, 2.423561, 2.467187, 2.468462, 2.453469, 2.46838, 2.46838, 2.46886, 0.08408, 0.70491, 1.809654, 2.911401, 3.747958, 4.201197, 4.119646, 3.347744, 1.989539, 0.629525, 0.160068, 2.315704, 2.275192, 2.360616, 2.430886, 2.455713, 2.468487, 2.468894, 0.092838, 0.727289, 1.826726, 2.913283, 3.770314, 4.37976, 4.764308, 4.959639, 4.940112, 4.535618, 3.55332, 2.071371, 0.64897, 0.153074, 2.314916, 2.274307, 2.360368, 2.430981, 2.45577, 2.468523, 2.468908, 0.086309, 0.720381, 1.825959, 2.913809, 3.769593, 4.332973, 4.565529, 4.341488, 3.485716, 2.065866, 0.647439, 0.148069, 2.308455, 2.308455, 2.414924, 0.345828, 0.33171]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 32)\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs32_ = run_latency_changing_topo(train_loader, test_loader, \n num_rounds, epochs, num_clients, latency_nodes)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs32_)\nplt.show()",
"old topo: grid, new topo: ring\n0-th round\naverage train loss 2.28 | test loss 2.3 | test acc: 0.101\nold topo: ring, new topo: ring\n1-th round\naverage train loss 4.57 | test loss 2.3 | test acc: 0.101\nold topo: ring, new topo: centralized\n2-th round\naverage train loss 6.85 | test loss 2.3 | test acc: 0.101\nold topo: centralized, new topo: centralized\n3-th round\naverage train loss 9.14 | test loss 2.3 | test acc: 0.114\nold topo: centralized, new topo: ring\n4-th round\naverage train loss 11.4 | test loss 2.3 | test acc: 0.114\nold topo: ring, new topo: grid\n5-th round\naverage train loss 13.7 | test loss 2.3 | test acc: 0.114\nold topo: grid, new topo: ring\n6-th round\naverage train loss 16 | test loss 2.3 | test acc: 0.114\nold topo: ring, new topo: grid\n7-th round\naverage train loss 18.3 | test loss 2.3 | test acc: 0.114\nold topo: grid, new topo: grid\n8-th round\naverage train loss 20.6 | test loss 2.3 | test acc: 0.114\nold topo: grid, new topo: ring\n9-th round\naverage train loss 22.9 | test loss 2.3 | test acc: 0.114\n[7e-06, 0.000215, 4e-06, 3e-06, 0.000609, 6e-06, 6e-06, 6e-06, 1.1e-05, 7e-06, 6e-06, 5e-06, 0.001139, 0.001892, 0.000562, 1e-05, 9e-06, 9e-06, 6e-06, 7e-06, 5e-06, 6e-06, 8e-06, 0.000441, 3e-06, 6e-06, 9e-06, 6e-06, 6e-06, 8e-06, 4e-05, 8e-06, 0.000273, 3e-06, 2e-06, 5e-06, 6e-06, 9e-06, 5e-06, 8e-06, 8e-06, 0.000362, 6e-06, 0.000204, 6e-06, 6e-06, 6e-06, 5e-06, 5e-06, 8e-06, 7e-06, 6e-06, 6e-06, 9e-06, 0.000126, 8e-06, 6e-06, 8e-06, 8e-06, 9e-06, 1e-05, 6e-06, 9e-06, 0.000198, 8e-06, 8e-06, 7e-06, 7e-06, 9e-06, 9e-06, 8e-06, 6e-06, 8e-06, 0.000239, 1e-05, 9e-06, 7e-06, 7e-06, 8e-06, 9e-06, 9e-06, 8e-06, 4e-06, 3e-06, 0.000132, 7e-06, 6e-06, 7e-06, 1.3e-05, 1.1e-05, 0.000119, 4e-06, 1e-06, 2e-06, 7e-06, 7e-06, 6e-06, 9e-06, 1.1e-05, 9e-06]\n"
],
[
"fig, ax = plt.subplots(1, figsize=(12, 9))\nax.set_ylim([0, 1])\nx = np.array(range(10))\nax.plot(x, accs, color=\"red\", label=\"base case\")\nax.plot(x, accs2_, color=\"lime\", label=\"two delayed nodes\")\nax.plot(x, accs4_, color=\"green\", label=\"four delayed nodes\")\nax.plot(x, accs8_, color=\"purple\", label=\"eight delayed nodes\")\nax.plot(x, accs16_, color=\"blue\", label=\"sixteen delayed nodes\")\nax.plot(x, accs32_, color=\"cyan\", label=\"thirty-two delayed nodes\")\n\n\nplt.legend(loc=\"lower right\", title=\"Number of delayed nodes\")\nplt.title(\"Accuracy curve depending on number of delayed nodes with changing topology\")\nplt.xlabel(\"Round\")\nplt.ylabel(\"Accuracy\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Latency on a few rounds",
"_____no_output_____"
]
],
[
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 2)\nlatency_rounds = np.array([3, 7])\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs1 = run_latency_per_round(train_loader, test_loader, comm_matrix,\n num_rounds, epochs, num_clients, latency_nodes, latency_rounds)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs1)\nplt.show()",
"Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to ./data/MNIST/raw/train-images-idx3-ubyte.gz\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 4)\nlatency_rounds = np.array([3, 7])\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs2 = run_latency_per_round(train_loader, test_loader, comm_matrix,\n num_rounds, epochs, num_clients, latency_nodes, latency_rounds)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs2)\nplt.show()",
"round 0, normal\n0-th round\naverage train loss 2.27 | test loss 2.26 | test acc: 0.111\nround 1, normal\n1-th round\naverage train loss 4.13 | test loss 1.7 | test acc: 0.653\nround 2, normal\n2-th round\naverage train loss 5.28 | test loss 0.821 | test acc: 0.796\nround 3, delay\n3-th round\naverage train loss 6.12 | test loss 0.666 | test acc: 0.848\nround 4, delay recovery\n4-th round\naverage train loss 6.78 | test loss 0.917 | test acc: 0.882\nround 5, normal\n5-th round\naverage train loss 7.64 | test loss 0.782 | test acc: 0.901\nround 6, normal\n6-th round\naverage train loss 8.33 | test loss 0.405 | test acc: 0.907\nround 7, delay\n7-th round\naverage train loss 8.84 | test loss 0.389 | test acc: 0.913\nround 8, delay recovery\n8-th round\naverage train loss 9.29 | test loss 0.681 | test acc: 0.921\nround 9, normal\n9-th round\naverage train loss 10 | test loss 0.608 | test acc: 0.927\n[0.400068, 0.248191, 0.741911, 0.779332, 0.420436, 0.352372, 0.92416, 1.307567, 1.356909, 3.305896, 0.127932, 0.284976, 0.707044, 0.675691, 0.019943, 0.890035, 3.391614, 1.523471, 1.250028, 1.351615, 0.107129, 0.485259, 0.805279, 0.745931, 0.254806, 0.300664, 1.27133, 0.579949, 0.788706, 0.161754, 0.19341, 0.596363, 0.881687, 0.773994, 0.320902, 0.051484, 0.802296, 1.790325, 1.822479, 0.626847, 0.013184, 0.525138, 0.867128, 0.710168, 0.238743, 0.696514, 2.814292, 4.043558, 4.61365, 1.112303, 0.31286, 0.739916, 0.932657, 0.707622, 0.014786, 0.976814, 4.020064, 3.250121, 1.420446, 0.045134, 0.605487, 0.908002, 1.036651, 0.853497, 0.343497, 0.169359, 0.738909, 0.127975, 0.017298, 0.136271, 0.635377, 0.910905, 1.080375, 1.012459, 0.703197, 0.25963, 0.087909, 0.112208, 0.159001, 0.267644, 0.385336, 0.726939, 0.996653, 1.027726, 0.815139, 0.484201, 0.266581, 0.17757, 0.129395, 0.034289, 0.107163, 0.305027, 0.76391, 0.858108, 0.621182, 0.150355, 0.028978, 0.036471, 0.497999, 1.226889]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 8)\nlatency_rounds = np.array([3, 7])\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs3 = run_latency_per_round(train_loader, test_loader, comm_matrix,\n num_rounds, epochs, num_clients, latency_nodes, latency_rounds)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs3)\nplt.show()",
"round 0, normal\n0-th round\naverage train loss 2.28 | test loss 2.28 | test acc: 0.190\nround 1, normal\n1-th round\naverage train loss 4.45 | test loss 2.15 | test acc: 0.638\nround 2, normal\n2-th round\naverage train loss 5.96 | test loss 1.13 | test acc: 0.721\nround 3, delay\n3-th round\naverage train loss 6.9 | test loss 0.966 | test acc: 0.800\nround 4, delay recovery\n4-th round\naverage train loss 7.67 | test loss 1.46 | test acc: 0.852\nround 5, normal\n5-th round\naverage train loss 8.77 | test loss 1.34 | test acc: 0.884\nround 6, normal\n6-th round\naverage train loss 9.67 | test loss 0.585 | test acc: 0.888\nround 7, delay\n7-th round\naverage train loss 10.3 | test loss 0.607 | test acc: 0.899\nround 8, delay recovery\n8-th round\naverage train loss 10.8 | test loss 1.29 | test acc: 0.906\nround 9, normal\n9-th round\naverage train loss 11.9 | test loss 1.2 | test acc: 0.918\n[0.251846, 0.087644, 0.148927, 0.250933, 0.081334, 0.402571, 2.172259, 1.941908, 1.855172, 1.105616, 0.094505, 0.286124, 1.086066, 1.91763, 0.936902, 0.906534, 2.194871, 2.228275, 1.513755, 0.745556, 0.231703, 0.497533, 2.024809, 0.486196, 0.022605, 0.117201, 0.726907, 1.977683, 2.545194, 0.702441, 0.317728, 0.015709, 0.114562, 0.141152, 0.446992, 0.343499, 0.022409, 0.745477, 0.297876, 0.042177, 0.938342, 0.605609, 0.569442, 0.964883, 1.283604, 1.250283, 0.842862, 0.488992, 0.509198, 0.667019, 0.940428, 0.205738, 0.820305, 1.437108, 1.892635, 1.991928, 1.77149, 1.481803, 1.253405, 0.996028, 0.019955, 0.145531, 0.03935, 1.179638, 1.976608, 2.12206, 1.870644, 1.480296, 1.079686, 0.447983, 0.28776, 1.578218, 0.053975, 0.870979, 1.613811, 1.582819, 1.137534, 0.216949, 0.338446, 0.02233, 1.280459, 0.392899, 0.122473, 0.744229, 1.022689, 0.741715, 0.036213, 0.357788, 0.417182, 1.070529, 0.814955, 0.030728, 0.088139, 0.198401, 0.251297, 0.023961, 1.036363, 1.566152, 1.681593, 1.406641]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 16)\nlatency_rounds = np.array([3, 7])\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs4 = run_latency_per_round(train_loader, test_loader, comm_matrix,\n num_rounds, epochs, num_clients, latency_nodes, latency_rounds)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs4)\nplt.show()",
"round 0, normal\n0-th round\naverage train loss 2.21 | test loss 2.19 | test acc: 0.520\nround 1, normal\n1-th round\naverage train loss 3.77 | test loss 1.2 | test acc: 0.688\nround 2, normal\n2-th round\naverage train loss 4.77 | test loss 0.696 | test acc: 0.822\nround 3, delay\n3-th round\naverage train loss 5.53 | test loss 1.2 | test acc: 0.870\nround 4, delay recovery\n4-th round\naverage train loss 6.3 | test loss 2.13 | test acc: 0.849\nround 5, normal\n5-th round\naverage train loss 7.89 | test loss 2.09 | test acc: 0.865\nround 6, normal\n6-th round\naverage train loss 9.14 | test loss 1.14 | test acc: 0.859\nround 7, delay\n7-th round\naverage train loss 9.95 | test loss 1.22 | test acc: 0.879\nround 8, delay recovery\n8-th round\naverage train loss 10.7 | test loss 2.14 | test acc: 0.863\nround 9, normal\n9-th round\naverage train loss 12.3 | test loss 2.09 | test acc: 0.883\n[0.282392, 0.055487, 0.029005, 0.299041, 0.499274, 0.011152, 0.410203, 0.820023, 1.769118, 0.584178, 1.317929, 0.739027, 0.007301, 0.320239, 0.184632, 0.248487, 0.157589, 1.393948, 1.882944, 0.685872, 0.193473, 0.033964, 0.141113, 0.14905, 0.121853, 0.046366, 0.133876, 1.159691, 0.280685, 0.022564, 0.01001, 0.006796, 0.201376, 0.257739, 0.30967, 0.424728, 0.025302, 0.245693, 0.031205, 0.069038, 0.149498, 0.081551, 0.204996, 0.206787, 0.391066, 0.456275, 0.053136, 0.077726, 0.085546, 0.05164, 0.042466, 0.076334, 0.102075, 0.170948, 0.347641, 0.106115, 0.354513, 0.016392, 0.006991, 0.069866, 0.020624, 0.024258, 0.155203, 0.346007, 0.009083, 0.026706, 0.014264, 0.008095, 0.015156, 0.132819, 0.080825, 0.055584, 0.360573, 0.127624, 0.271585, 0.571886, 0.019703, 0.008042, 0.056622, 0.036508, 0.011815, 0.085287, 0.39585, 0.070588, 0.082546, 0.26238, 0.016509, 0.216185, 0.013368, 0.008144, 0.140987, 0.160733, 0.234379, 0.299339, 0.030275, 0.272975, 0.097964, 0.050967, 0.155162, 0.007348]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 32)\nlatency_rounds = np.array([3, 7])\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs5 = run_latency_per_round(train_loader, test_loader, comm_matrix,\n num_rounds, epochs, num_clients, latency_nodes, latency_rounds)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs5)\nplt.show()",
"round 0, normal\n0-th round\naverage train loss 2.28 | test loss 2.28 | test acc: 0.232\nround 1, normal\n1-th round\naverage train loss 4.45 | test loss 2.15 | test acc: 0.440\nround 2, normal\n2-th round\naverage train loss 6.04 | test loss 1.19 | test acc: 0.756\nround 3, delay\n3-th round\naverage train loss 6.99 | test loss 2.04 | test acc: 0.790\nround 4, delay recovery\n4-th round\naverage train loss 8.3 | test loss 2.29 | test acc: 0.369\nround 5, normal\n5-th round\naverage train loss 10.3 | test loss 2.27 | test acc: 0.421\nround 6, normal\n6-th round\naverage train loss 12.3 | test loss 2.15 | test acc: 0.752\nround 7, delay\n7-th round\naverage train loss 13.8 | test loss 2.2 | test acc: 0.713\nround 8, delay recovery\n8-th round\naverage train loss 15.5 | test loss 2.3 | test acc: 0.114\nround 9, normal\n9-th round\naverage train loss 17.6 | test loss 2.29 | test acc: 0.114\n[0.660707, 0.326132, 0.047158, 0.020113, 0.003394, 0.011825, 0.168548, 0.001875, 0.008823, 0.13211, 0.16779, 0.378947, 0.085754, 0.022582, 0.034458, 0.04733, 0.065897, 0.08022, 0.003107, 0.010329, 0.006681, 0.000663, 0.004267, 0.00598, 0.011498, 0.022395, 0.016344, 0.190872, 0.005349, 0.017338, 0.007809, 0.011153, 0.020505, 0.038868, 0.066449, 0.058475, 0.0055, 0.044786, 0.01835, 0.045365, 0.016534, 0.027366, 0.036723, 0.057116, 0.062996, 0.062446, 0.033259, 0.016132, 0.019622, 0.010985, 0.012621, 0.038668, 0.057257, 0.06948, 0.082328, 0.074925, 0.044438, 0.035719, 0.046741, 0.030659, 0.019188, 0.044496, 0.066472, 0.091687, 0.103176, 0.068061, 0.036321, 0.060353, 0.001576, 0.032774, 0.032916, 0.034936, 0.049802, 0.082693, 0.08798, 0.04514, 0.048557, 0.00285, 0.162847, 0.037874, 0.170731, 0.003513, 0.019161, 0.035796, 0.025121, 0.01914, 0.018681, 0.029974, 0.285416, 0.525728, 0.512485, 0.038619, 0.00116, 0.011135, 0.008457, 0.004216, 0.00072, 0.005539, 0.066927, 0.438816]\n"
],
[
"fig, ax = plt.subplots(1, figsize=(12, 9))\nax.set_ylim([0, 1])\nx = np.array(range(10))\nax.plot(x, accs, color=\"red\", label=\"base case\")\nax.plot(x, accs1, color=\"lime\", label=\"two delayed nodes\")\nax.plot(x, accs2, color=\"green\", label=\"four delayed nodes\")\nax.plot(x, accs3, color=\"purple\", label=\"eight delayed nodes\")\nax.plot(x, accs4, color=\"blue\", label=\"sixteen delayed nodes\")\nax.plot(x, accs5, color=\"cyan\", label=\"thirty-two delayed nodes\")\n\n\n\nplt.legend(loc=\"lower right\", title=\"Number of delayed nodes\")\nplt.title(\"Accuracy curve depending on number of delayed nodes with delays only on specific rounds\")\nplt.xlabel(\"Round\")\nplt.ylabel(\"Accuracy\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Latency on a few rounds with changing topology",
"_____no_output_____"
]
],
[
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 2)\nlatency_rounds = np.array([3, 7])\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs1_ = run_latency_per_round_changing_topo(train_loader, test_loader, \n num_rounds, epochs, num_clients, latency_nodes, latency_rounds)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs1_)\nplt.show()",
"round 0, normal\nold topo: centralized, new topo: ring\n0-th round\naverage train loss 2.26 | test loss 2.26 | test acc: 0.325\nround 1, normal\nold topo: ring, new topo: ring\n1-th round\naverage train loss 4.14 | test loss 1.7 | test acc: 0.696\nround 2, normal\nold topo: ring, new topo: ring\n2-th round\naverage train loss 5.31 | test loss 0.809 | test acc: 0.792\nround 3, delay\nold topo: ring, new topo: centralized\n3-th round\naverage train loss 6.14 | test loss 0.593 | test acc: 0.853\nround 4, delay recovery\nold topo: centralized, new topo: ring\n4-th round\naverage train loss 6.81 | test loss 2.3 | test acc: 0.098\nround 5, normal\nold topo: ring, new topo: centralized\n5-th round\naverage train loss 9.11 | test loss 2.3 | test acc: 0.114\nround 6, normal\nold topo: centralized, new topo: grid\n6-th round\naverage train loss 11.4 | test loss 2.3 | test acc: 0.114\nround 7, delay\nold topo: grid, new topo: grid\n7-th round\naverage train loss 13.7 | test loss 2.3 | test acc: 0.114\nround 8, delay recovery\nold topo: grid, new topo: centralized\n8-th round\naverage train loss 16 | test loss 2.3 | test acc: 0.114\nround 9, normal\nold topo: centralized, new topo: grid\n9-th round\naverage train loss 18.3 | test loss 2.3 | test acc: 0.114\n[1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1e-06, 0.0, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 4)\nlatency_rounds = np.array([3, 7])\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs2_ = run_latency_per_round_changing_topo(train_loader, test_loader, \n num_rounds, epochs, num_clients, latency_nodes, latency_rounds)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs2_)\nplt.show()",
"round 0, normal\nold topo: grid, new topo: grid\n0-th round\naverage train loss 2.27 | test loss 2.26 | test acc: 0.321\nround 1, normal\nold topo: grid, new topo: grid\n1-th round\naverage train loss 4.09 | test loss 1.65 | test acc: 0.687\nround 2, normal\nold topo: grid, new topo: grid\n2-th round\naverage train loss 5.24 | test loss 0.823 | test acc: 0.790\nround 3, delay\nold topo: grid, new topo: centralized\n3-th round\naverage train loss 6.04 | test loss 0.664 | test acc: 0.853\nround 4, delay recovery\nold topo: centralized, new topo: centralized\n4-th round\naverage train loss 6.72 | test loss 0.932 | test acc: 0.885\nround 5, normal\nold topo: centralized, new topo: centralized\n5-th round\naverage train loss 7.57 | test loss 0.759 | test acc: 0.902\nround 6, normal\nold topo: centralized, new topo: ring\n6-th round\naverage train loss 8.24 | test loss 0.387 | test acc: 0.904\nround 7, delay\nold topo: ring, new topo: centralized\n7-th round\naverage train loss 8.7 | test loss 0.365 | test acc: 0.917\nround 8, delay recovery\nold topo: centralized, new topo: ring\n8-th round\naverage train loss 9.17 | test loss 0.68 | test acc: 0.924\nround 9, normal\nold topo: ring, new topo: centralized\n9-th round\naverage train loss 9.89 | test loss 0.589 | test acc: 0.930\n[0.016754, 0.057926, 0.560776, 0.686774, 0.100794, 0.377529, 0.795988, 0.851647, 0.645382, 0.253166, 0.645667, 0.306313, 0.019768, 0.059977, 0.236184, 0.538215, 0.607715, 0.455648, 0.242023, 0.193421, 4.453769, 0.876014, 0.03937, 0.31118, 0.505417, 0.492108, 0.284147, 0.023834, 0.215593, 1.349203, 1.97997, 0.14285, 0.127155, 0.456944, 0.549302, 0.320355, 0.379033, 1.094663, 0.832379, 0.732639, 1.509091, 0.512902, 0.108941, 0.48426, 0.534154, 0.015361, 0.832638, 3.35989, 1.331445, 1.521674, 4.822291, 0.792009, 0.096634, 0.481324, 0.584034, 0.287995, 0.156219, 0.735232, 0.718252, 1.703677, 0.469001, 0.014314, 0.206553, 0.419489, 0.562975, 0.531762, 0.254996, 0.098406, 0.050685, 0.04337, 0.169992, 0.170019, 0.166479, 0.011878, 0.276584, 0.544935, 0.616455, 0.562374, 0.463403, 0.249427, 0.41128, 0.17167, 0.457891, 0.842978, 0.149842, 0.297606, 0.706681, 0.843217, 0.780479, 0.546791, 0.237463, 0.040528, 1.360317, 2.283296, 0.657152, 0.163251, 0.670009, 0.868838, 0.777517, 0.466219]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 8)\nlatency_rounds = np.array([3, 7])\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs3_ = run_latency_per_round_changing_topo(train_loader, test_loader, \n num_rounds, epochs, num_clients, latency_nodes, latency_rounds)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs3_)\nplt.show()",
"round 0, normal\nold topo: centralized, new topo: ring\n0-th round\naverage train loss 2.29 | test loss 2.28 | test acc: 0.283\nround 1, normal\nold topo: ring, new topo: centralized\n1-th round\naverage train loss 4.48 | test loss 2.18 | test acc: 0.582\nround 2, normal\nold topo: centralized, new topo: grid\n2-th round\naverage train loss 6.01 | test loss 1.18 | test acc: 0.748\nround 3, delay\nold topo: grid, new topo: centralized\n3-th round\naverage train loss 6.94 | test loss 1.06 | test acc: 0.829\nround 4, delay recovery\nold topo: centralized, new topo: grid\n4-th round\naverage train loss 7.79 | test loss 2.3 | test acc: 0.114\nround 5, normal\nold topo: grid, new topo: grid\n5-th round\naverage train loss 10.1 | test loss 2.3 | test acc: 0.114\nround 6, normal\nold topo: grid, new topo: centralized\n6-th round\naverage train loss 12.4 | test loss 2.3 | test acc: 0.114\nround 7, delay\nold topo: centralized, new topo: grid\n7-th round\naverage train loss 14.7 | test loss 2.3 | test acc: 0.114\nround 8, delay recovery\nold topo: grid, new topo: centralized\n8-th round\naverage train loss 17 | test loss 2.3 | test acc: 0.114\nround 9, normal\nold topo: centralized, new topo: centralized\n9-th round\naverage train loss 19.3 | test loss 2.3 | test acc: 0.114\n[1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 16)\nlatency_rounds = np.array([3, 7])\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs4_ = run_latency_per_round_changing_topo(train_loader, test_loader, \n num_rounds, epochs, num_clients, latency_nodes, latency_rounds)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs4_)\nplt.show()",
"round 0, normal\nold topo: centralized, new topo: centralized\n0-th round\naverage train loss 2.29 | test loss 2.28 | test acc: 0.244\nround 1, normal\nold topo: centralized, new topo: ring\n1-th round\naverage train loss 4.52 | test loss 2.22 | test acc: 0.440\nround 2, normal\nold topo: ring, new topo: centralized\n2-th round\naverage train loss 6.21 | test loss 1.48 | test acc: 0.705\nround 3, delay\nold topo: centralized, new topo: centralized\n3-th round\naverage train loss 7.28 | test loss 1.66 | test acc: 0.793\nround 4, delay recovery\nold topo: centralized, new topo: grid\n4-th round\naverage train loss 8.22 | test loss 2.3 | test acc: 0.114\nround 5, normal\nold topo: grid, new topo: ring\n5-th round\naverage train loss 10.5 | test loss 2.3 | test acc: 0.114\nround 6, normal\nold topo: ring, new topo: grid\n6-th round\naverage train loss 12.8 | test loss 2.3 | test acc: 0.114\nround 7, delay\nold topo: grid, new topo: ring\n7-th round\naverage train loss 15.1 | test loss 2.3 | test acc: 0.114\nround 8, delay recovery\nold topo: ring, new topo: centralized\n8-th round\naverage train loss 17.4 | test loss 2.3 | test acc: 0.114\nround 9, normal\nold topo: centralized, new topo: centralized\n9-th round\naverage train loss 19.7 | test loss 2.3 | test acc: 0.114\n[9e-06, 8e-06, 8e-06, 7e-06, 7e-06, 6e-06, 6e-06, 6e-06, 5e-06, 5e-06, 5e-06, 5e-06, 5e-06, 4e-06, 4e-06, 4e-06, 3e-06, 3e-06, 3e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 0.0, 1e-06, 0.0, 1e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 1e-06, 2e-06, 1e-06, 2e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 0.0, 0.0, 0.0, 0.0, 0.0]\n"
],
[
"# IID case: all the clients have images of all the classes\n# Grid graph topology: each client is connected to exactly 4 neighbours\n\n# Hyperparameters\n\nnum_clients = 100\nnum_rounds = 10\nepochs = 1\nbatch_size = 32\nlatency_nodes = nodes_latency(num_clients, 32)\nlatency_rounds = np.array([3, 7])\n\n# Communication matrix\n\ncomm_matrix = create_mixing_matrix('grid', num_clients)\n\n# Creating decentralized datasets\n\ntrain_loader, test_loader = load_data(batch_size, num_clients)\n\n# Instantiate models and optimizers and run decentralized training\n\nglobal_model, client_models, accs5_ = run_latency_per_round_changing_topo(train_loader, test_loader, \n num_rounds, epochs, num_clients, latency_nodes, latency_rounds)\n\ncons = consensus(global_model, client_models)\nprint(cons)\n\naxes = plt.gca()\naxes.set_ylim([0,1])\nplt.plot(range(num_rounds), accs5_)\nplt.show()",
"round 0, normal\nold topo: centralized, new topo: ring\n0-th round\naverage train loss 2.28 | test loss 2.28 | test acc: 0.155\nround 1, normal\nold topo: ring, new topo: ring\n1-th round\naverage train loss 4.43 | test loss 2.11 | test acc: 0.655\nround 2, normal\nold topo: ring, new topo: ring\n2-th round\naverage train loss 5.81 | test loss 1.02 | test acc: 0.752\nround 3, delay\nold topo: ring, new topo: grid\n3-th round\naverage train loss 6.72 | test loss 2.09 | test acc: 0.830\nround 4, delay recovery\nold topo: grid, new topo: ring\n4-th round\naverage train loss 7.92 | test loss 2.3 | test acc: 0.114\nround 5, normal\nold topo: ring, new topo: ring\n5-th round\naverage train loss 10.2 | test loss 2.3 | test acc: 0.114\nround 6, normal\nold topo: ring, new topo: grid\n6-th round\naverage train loss 12.5 | test loss 2.3 | test acc: 0.114\nround 7, delay\nold topo: grid, new topo: centralized\n7-th round\naverage train loss 14.8 | test loss 2.3 | test acc: 0.114\nround 8, delay recovery\nold topo: centralized, new topo: grid\n8-th round\naverage train loss 17.1 | test loss 2.3 | test acc: 0.114\nround 9, normal\nold topo: grid, new topo: ring\n9-th round\naverage train loss 19.4 | test loss 2.3 | test acc: 0.114\n[2.4e-05, 2.2e-05, 2e-05, 1.8e-05, 1.6e-05, 1.5e-05, 1.4e-05, 1.3e-05, 1.1e-05, 1e-05, 9e-06, 9e-06, 8e-06, 7e-06, 6e-06, 6e-06, 5e-06, 4e-06, 4e-06, 3e-06, 3e-06, 2e-06, 2e-06, 2e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 3e-06, 3e-06, 3e-06, 3e-06, 3e-06, 3e-06, 3e-06, 3e-06, 4e-06, 4e-06, 4e-06, 4e-06, 4e-06, 4e-06, 3e-06, 3e-06, 3e-06, 3e-06, 3e-06, 3e-06, 3e-06, 3e-06, 3e-06, 3e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 2e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 1e-06, 0.0, 0.0, 0.0, 0.0, 0.0]\n"
],
[
"fig, ax = plt.subplots(1, figsize=(12, 9))\nax.set_ylim([0, 1])\nx = np.array(range(10))\nax.plot(x, accs, color=\"red\", label=\"base case\")\nax.plot(x, accs1_, color=\"lime\", label=\"two delayed nodes\")\nax.plot(x, accs2_, color=\"green\", label=\"four delayed nodes\")\nax.plot(x, accs3_, color=\"purple\", label=\"eight delayed nodes\")\nax.plot(x, accs4_, color=\"blue\", label=\"sixteen delayed nodes\")\nax.plot(x, accs5_, color=\"cyan\", label=\"thirty-two delayed nodes\")\n\n\nplt.legend(loc=\"lower right\", title=\"Number of delayed nodes\")\nplt.title(\"Accuracy curve depending on number of delayed nodes with changing topology and delays only on specific rounds\")\nplt.xlabel(\"Round\")\nplt.ylabel(\"Accuracy\")\nplt.show()",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb748c5ac3509272cda175a00972812cdf3fa675 | 3,079 | ipynb | Jupyter Notebook | pylondinium/notebooks/xleaflet.ipynb | jtpio/quantstack-talks | 092f93ddb9901cb614f428e13a0b1b1e3ffcc0ec | [
"BSD-3-Clause"
]
| 82 | 2017-04-14T20:18:55.000Z | 2021-12-25T23:38:52.000Z | pylondinium/notebooks/xleaflet.ipynb | jtpio/quantstack-talks | 092f93ddb9901cb614f428e13a0b1b1e3ffcc0ec | [
"BSD-3-Clause"
]
| 3 | 2017-04-07T18:37:21.000Z | 2020-07-11T09:37:53.000Z | pylondinium/notebooks/xleaflet.ipynb | jtpio/quantstack-talks | 092f93ddb9901cb614f428e13a0b1b1e3ffcc0ec | [
"BSD-3-Clause"
]
| 59 | 2017-04-07T11:16:56.000Z | 2022-03-25T14:48:55.000Z | 20.66443 | 85 | 0.497239 | [
[
[
"empty"
]
]
]
| [
"empty"
]
| [
[
"empty"
]
]
|
cb748e7cad8e129b86d5a1194e6e9e3865727eed | 412,065 | ipynb | Jupyter Notebook | glycompare/.ipynb_checkpoints/demo-checkpoint.ipynb | LewisLabUCSD/GlyCompare | e0e5fb16c7117e681f167b5ee4ca40978db7fad7 | [
"MIT"
]
| 7 | 2019-07-06T04:26:40.000Z | 2021-12-06T20:48:34.000Z | glycompare/.ipynb_checkpoints/demo-checkpoint.ipynb | LewisLabUCSD/GlyCompare | e0e5fb16c7117e681f167b5ee4ca40978db7fad7 | [
"MIT"
]
| 13 | 2019-07-07T06:43:08.000Z | 2022-01-03T22:01:17.000Z | glycompare/.ipynb_checkpoints/demo-checkpoint.ipynb | LewisLabUCSD/GlyCompare | e0e5fb16c7117e681f167b5ee4ca40978db7fad7 | [
"MIT"
]
| 3 | 2019-07-08T07:15:23.000Z | 2021-07-16T03:13:50.000Z | 176.171441 | 315,460 | 0.876029 | [
[
[
"# import customizing_motif_vec\nimport extract_motif\nimport motif_class\nimport __init__\nimport json_utility\nfrom importlib import reload\nreload(__init__)\nreload(extract_motif)\n# reload(customizing_motif_vec)\nreload(motif_class)\nimport plot_glycan_utilities\nreload(plot_glycan_utilities)\nimport matplotlib.pyplot as plt\nfrom glypy.io import glycoct\nfrom glypy.structure.glycan import fragment_to_substructure, Glycan\nimport glycan_io\nfrom glypy.structure.glycan_composition import GlycanComposition, FrozenGlycanComposition\n\n%matplotlib inline\n",
"_____no_output_____"
]
],
[
[
"A4FG4S4 = \"\"\"\nRES\n1b:x-dglc-HEX-1:5\n2s:n-acetyl\n3b:b-dglc-HEX-1:5\n4s:n-acetyl\n5b:a-dman-HEX-1:5\n6b:a-dman-HEX-1:5\n7b:b-dglc-HEX-1:5\n8s:n-acetyl\n9b:b-dgal-HEX-1:5\n10b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d\n11s:n-acetyl\n12b:b-dglc-HEX-1:5\n13s:n-acetyl\n14b:b-dgal-HEX-1:5\n15b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d\n16s:n-acetyl\n17b:a-dman-HEX-1:5\n18b:b-dglc-HEX-1:5\n19s:n-acetyl\n20b:b-dgal-HEX-1:5\n21b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d\n22s:n-acetyl\n23b:b-dglc-HEX-1:5\n24s:n-acetyl\n25b:b-dgal-HEX-1:5\n26b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d\n27s:n-acetyl\n28b:a-lgal-HEX-1:5|6:d\nLIN\n1:1d(2+1)2n\n2:1o(4+1)3d\n3:3d(2+1)4n\n4:3o(4+1)5d\n5:5o(3+1)6d\n6:6o(2+1)7d\n7:7d(2+1)8n\n8:7o(4+1)9d\n9:9o(3+2)10d\n10:10d(5+1)11n\n11:6o(4+1)12d\n12:12d(2+1)13n\n13:12o(4+1)14d\n14:14o(3+2)15d\n15:15d(5+1)16n\n16:5o(6+1)17d\n17:17o(2+1)18d\n18:18d(2+1)19n\n19:18o(4+1)20d\n20:20o(3+2)21d\n21:21d(5+1)22n\n22:17o(6+1)23d\n23:23d(2+1)24n\n24:23o(4+1)25d\n25:25o(3+2)26d\n26:26d(5+1)27n\n27:1o(6+1)28d\n\"\"\"\n",
"_____no_output_____"
],
[
"a_3350 = \"\"\"RES\n1b:x-dglc-HEX-1:5\n2b:x-lgal-HEX-1:5|6:d\n3b:x-dglc-HEX-1:5\n4b:x-dman-HEX-1:5\n5b:x-dman-HEX-1:5\n6b:x-dglc-HEX-1:5\n7b:x-dgal-HEX-1:5\n8b:x-dgro-dgal-NON-2:6|1:a|2:keto|3:d\n9s:n-acetyl\n10s:n-acetyl\n11b:x-dman-HEX-1:5\n12b:x-dglc-HEX-1:5\n13b:x-dgal-HEX-1:5\n14s:n-acetyl\n15b:x-dglc-HEX-1:5\n16b:x-dgal-HEX-1:5\n17s:n-acetyl\n18s:n-acetyl\n19s:n-acetyl\nLIN\n1:1o(-1+1)2d\n2:1o(-1+1)3d\n3:3o(-1+1)4d\n4:4o(-1+1)5d\n5:5o(-1+1)6d\n6:6o(-1+1)7d\n7:7o(-1+2)8d\n8:8d(5+1)9n\n9:6d(2+1)10n\n10:4o(-1+1)11d\n11:11o(-1+1)12d\n12:12o(-1+1)13d\n13:12d(2+1)14n\n14:11o(-1+1)15d\n15:15o(-1+1)16d\n16:15d(2+1)17n\n17:3d(2+1)18n\n18:1d(2+1)19n\n\"\"\"",
"_____no_output_____"
]
],
[
[
"undefined = \"\"\"RES\n1b:x-dglc-HEX-1:5\n2s:n-acetyl\n3b:b-dglc-HEX-1:5\n4s:n-acetyl\n5b:b-dman-HEX-1:5\n6b:a-dman-HEX-1:5\n7b:b-dglc-HEX-1:5\n8s:n-acetyl\n9b:a-dman-HEX-1:5\n10b:b-dglc-HEX-1:5\n11s:n-acetyl\n12b:b-dglc-HEX-1:5\n13s:n-acetyl\n14b:a-lgal-HEX-1:5|6:d\nLIN\n1:1d(2+1)2n\n2:1o(4+1)3d\n3:3d(2+1)4n\n4:3o(4+1)5d\n5:5o(3+1)6d\n6:6o(2+1)7d\n7:7d(2+1)8n\n8:5o(6+1)9d\n9:9o(2+1)10d\n10:10d(2+1)11n\n11:9o(6+1)12d\n12:12d(2+1)13n\n13:1o(6+1)14d\nUND\nUND1:100.0:100.0\nParentIDs:1|3|5|6|7|9|10|12|14\nSubtreeLinkageID1:o(4+1)d\nRES\n15b:b-dgal-HEX-1:5\n16b:a-lgal-HEX-1:5|6:d\n17b:a-dgal-HEX-1:5\n18s:n-acetyl\nLIN\n14:15o(2+1)16d\n15:15o(3+1)17d\n16:17d(2+1)18n\"\"\"\nund_glycan = glycoct.loads(undefined)\ntest1 = \"\"\"RES\n1b:x-dglc-HEX-1:5\n2s:n-acetyl\n3b:b-dglc-HEX-1:5\n4s:n-acetyl\n5b:a-dman-HEX-1:5\n6b:a-dman-HEX-1:5\n7b:b-dglc-HEX-1:5\n8s:n-acetyl\n9b:b-dglc-HEX-1:5\n10s:n-acetyl\n11b:a-dman-HEX-1:5\n12b:b-dglc-HEX-1:5\n13s:n-acetyl\n14b:a-lgal-HEX-1:5|6:d\nLIN\n1:1d(2+1)2n\n2:1o(4+1)3d\n3:3d(2+1)4n\n4:3o(4+1)5d\n5:5o(3+1)6d\n6:6o(2+1)7d\n7:7d(2+1)8n\n8:6o(4+1)9d\n9:9d(2+1)10n\n10:5o(6+1)11d\n11:11o(2+1)12d\n12:12d(2+1)13n\n13:1o(6+1)14d\nUND\nUND1:100.0:100.0\nParentIDs:1|3|5|6|7|9|11|12|14\nSubtreeLinkageID1:o(4+1)d\nRES\n15b:b-dgal-HEX-1:5\n\n\"\"\"\nglycan_test1 = glycoct.loads(test1)",
"_____no_output_____"
],
[
"reload(glycoct)\nreload(glycan_io)\nglycan_dict = glycan_io.load_glycan_obj_from_dir('/Users/apple/Desktop/NathanLab/CHO_Anders/GlycanSVG/')\nA4FG4S4 = glycoct.loads(str(glycan_dict['A4FG4S4']))",
"A3FG\nA3F\nA4FG4S4\nM9\nA1\nA2FG2S2\nA2\nA2G1\nA3FG3S3\nA4FG4\nM3\nM6\nA2FG1\nA2G2S2\nM2\nM3-F\nA1F\nM1\nM4\nA2F\nM5\n"
],
[
"glycan_dict['A4FG4S4']",
"_____no_output_____"
],
[
"temp_mono = A4FG4S4.root\n## recursion,\ntemp_mono.children()",
"_____no_output_____"
],
[
"GlycanComposition.from_glycan(A4FG4S4)",
"_____no_output_____"
],
[
"from glypy.structure import monosaccharide\nfrom glypy import monosaccharides\nfrom glypy.structure import glycan\n# (monosaccharides.GlcNAc)\nGlycanComposition.from_glycan(glycan.Glycan(monosaccharides.GlcNAc))",
"_____no_output_____"
],
[
"# # get \ndef drop_terminal(a_glycan):\n term_list =[]\n temp_mono = a_glycan.root\n def rec_drop_term(a_mono):\n# print(a_mono)\n temp_children = a_mono.children()\n return_list = []\n if temp_children:\n for pos, child in temp_children:\n temp_term = rec_term(child)\n# print(temp_term)\n return_list.extend(temp_term)\n return return_list\n else:\n# print(a_mono, temp_children)\n return [(a_mono] # a list of term\n temp_term = rec_term(temp_mono)\n return temp_term\n# # A4FG4S4.root\n# term_a4fg4s4=find_terminal(A4FG4S4)[4]\n# term_a4fg4s4.parents()",
"_____no_output_____"
],
[
"A4FG4S4 = glycoct.loads(str(glycan_dict['A4FG4S4']))\nfor i in list(A4FG4S4.leaves()):\n i.drop_monosaccharide(i.parents()[0][0])\n \n",
"_____no_output_____"
],
[
"_mono_list = list(A4FG4S4.leaves())\n_mono_list",
"_____no_output_____"
],
[
"for i in _mono_list:\n i.drop_monosaccharide(i.parents()[0][0])",
"_____no_output_____"
],
[
"plot_glycan_utilities.plot_glycan(A4FG4S4)",
"_____no_output_____"
],
[
"_mono_parents_list = [i.parents()[0][1] for i in _mono_list]\n_mono_parents_list\n#drop_monosaccharide(pos)\n",
"_____no_output_____"
],
[
"for _mpar in _mono_parents_list:\n if len(_mpar.children())==1:\n print(_mpar.children())\n _mpar.drop_monosaccharide(_mpar.children()[0][0])\n continue\n for _index, _mchild in _mpar.children():\n if _mchild in _mono_list:\n _mpar.drop_monosaccharide(_index)\n break",
"[(3, RES 1b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d 2s:n-acetyl LIN 1:1d(5+1)2n)]\n[(3, RES 1b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d 2s:n-acetyl LIN 1:1d(5+1)2n)]\n[(3, RES 1b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d 2s:n-acetyl LIN 1:1d(5+1)2n)]\n[(3, RES 1b:a-dgro-dgal-NON-2:6|1:a|2:keto|3:d 2s:n-acetyl LIN 1:1d(5+1)2n)]\n"
],
[
"A4FG4S4",
"_____no_output_____"
],
[
"ud_composition = GlycanComposition.from_glycan(ud_glycan)\nud_composition.serialize()",
"_____no_output_____"
],
[
"a = FrozenGlycanComposition.from_glycan(ud_glycan)",
"_____no_output_____"
]
],
[
[
"# extract_motif",
"_____no_output_____"
]
],
[
[
"# transform glycoct to Glycan obj\na_glycan = glycoct.loads(a_3350)",
"_____no_output_____"
],
[
"# extract_motif\nglycan_motif_dict = extract_motif.extract_motif(a_glycan)\nprint(glycan_motif_dict.keys())\nprint(glycan_motif_dict[1])\nprint(type(glycan_motif_dict[1][0]))",
"0 13 9.5367431640625e-07 3.434659004211426\ndict_keys([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])\n['RES\\n1b:x-lgal-HEX-1:5|6:d\\nLIN\\n', 'RES\\n1b:x-dgal-HEX-1:5\\nLIN\\n', 'RES\\n1b:x-dgal-HEX-1:5\\nLIN\\n', 'RES\\n1b:x-dgro-dgal-NON-2:6|1:a|2:keto|3:d\\n2s:n-acetyl\\nLIN\\n1:1d(5+1)2n\\n', 'RES\\n1b:x-dglc-HEX-1:5\\n2s:n-acetyl\\nLIN\\n1:1d(2+1)2n\\n', 'RES\\n1b:x-dglc-HEX-1:5\\n2s:n-acetyl\\nLIN\\n1:1d(2+1)2n\\n', 'RES\\n1b:x-dman-HEX-1:5\\nLIN\\n', 'RES\\n1b:x-dglc-HEX-1:5\\n2s:n-acetyl\\nLIN\\n1:1d(2+1)2n\\n', 'RES\\n1b:x-dglc-HEX-1:5\\n2s:n-acetyl\\nLIN\\n1:1d(2+1)2n\\n', 'RES\\n1b:x-dglc-HEX-1:5\\n2s:n-acetyl\\nLIN\\n1:1d(2+1)2n\\n', 'RES\\n1b:x-dgal-HEX-1:5\\nLIN\\n', 'RES\\n1b:x-dman-HEX-1:5\\nLIN\\n', 'RES\\n1b:x-dman-HEX-1:5\\nLIN\\n']\n<class 'str'>\n"
]
],
[
[
"# Plot",
"_____no_output_____"
]
],
[
[
"plot_glycan_utilities.plot_glycan(a_glycan)",
"_____no_output_____"
],
[
"plot_glycan_utilities.plot_glycan_list([a_glycan],['demo'])",
"_____no_output_____"
]
],
[
[
"# pipeline",
"_____no_output_____"
]
],
[
[
"# in gc_init: clarify the glycoct_dict_goto_extraction_addr\n# in gc_init: clarify the glytoucan_data_base_addr__\n# two files above are input data file for this pip \nextract_motif.get_motif_pip(22, prior=True)\n# it would be faster if you run the python directly",
"start parallel\n0 6 0.0 0.08706402778625488\nclosing poll\njoining pool\n1 10 9.5367431640625e-07 1.5671770572662354\n9 7 0.0 0.20027899742126465\n7 12 1.1920928955078125e-06 3.842092990875244\n5 12 9.5367431640625e-07 4.40812087059021\n12 8 9.5367431640625e-07 0.427156925201416\n10 12 0.0 4.673549175262451\n4 13 9.5367431640625e-07 7.675364971160889\n15 9 9.5367431640625e-07 0.8958439826965332\n14 11 9.5367431640625e-07 2.7697248458862305\n3 14 9.5367431640625e-07 12.078837156295776\n17 11 9.5367431640625e-07 2.897017002105713\n16 12 9.5367431640625e-07 4.6319260597229\n19 12 9.5367431640625e-07 5.887125015258789\n21 11 0.0 3.381891965866089\n22 6 0.0 0.20746588706970215\n23 12 9.5367431640625e-07 5.499340057373047\n20 14 9.5367431640625e-07 16.480144023895264\n24 11 9.5367431640625e-07 3.336660146713257\n25 10 0.0 1.9231481552124023\n2 16 1.1920928955078125e-06 34.89481782913208\n18 15 0.0 23.577799081802368\n29 7 9.5367431640625e-07 0.28688693046569824\n28 9 9.5367431640625e-07 1.2253930568695068\n30 11 0.0 3.8473398685455322\n32 8 0.0 0.6323990821838379\n13 16 9.5367431640625e-07 36.13741493225098\n34 10 9.5367431640625e-07 1.8789501190185547\n35 11 1.1920928955078125e-06 3.9453530311584473\n31 13 0.0 11.26827096939087\n26 14 9.5367431640625e-07 18.045707941055298\n27 14 9.5367431640625e-07 17.12822198867798\n36 10 0.0 2.0342798233032227\n11 17 0.0 52.85451912879944\n41 8 0.0 0.5552408695220947\n37 13 1.1920928955078125e-06 11.205185890197754\n43 10 0.0 2.1786420345306396\n44 9 0.0 1.3106610774993896\n40 14 0.0 16.88992214202881\n46 13 0.0 10.068217992782593\n45 16 9.5367431640625e-07 36.45813703536987\n39 17 0.0 60.1173369884491\n48 14 0.0 17.305004119873047\n33 18 9.5367431640625e-07 76.57050395011902\n51 7 9.5367431640625e-07 0.29700398445129395\n52 12 9.5367431640625e-07 7.137371063232422\n38 18 9.5367431640625e-07 88.7938129901886\n54 9 9.5367431640625e-07 0.9078881740570068\n49 16 9.5367431640625e-07 33.367377042770386\n55 13 0.0 9.578915119171143\n53 15 9.5367431640625e-07 25.86977195739746\n58 13 9.5367431640625e-07 8.92409896850586\n42 19 9.5367431640625e-07 111.92442607879639\n50 17 4.0531158447265625e-06 54.48628902435303\n61 10 9.5367431640625e-07 1.9070980548858643\n60 12 0.0 5.558928966522217\n63 11 9.5367431640625e-07 3.410633087158203\n57 16 0.0 40.851271867752075\n47 19 9.5367431640625e-07 118.1122670173645\n65 13 0.0 10.956500053405762\n67 8 9.5367431640625e-07 0.5605449676513672\n64 15 0.0 28.58658504486084\n66 14 1.1920928955078125e-06 13.917259931564331\n69 10 9.5367431640625e-07 2.0062341690063477\n8 21 0.0 214.55248498916626\n59 17 1.1920928955078125e-06 63.44219183921814\n70 14 9.5367431640625e-07 16.31237292289734\n72 15 3.0994415283203125e-06 25.68829894065857\n56 19 9.5367431640625e-07 106.70391011238098\n75 14 1.1920928955078125e-06 14.019502878189087\n68 18 0.0 75.87249493598938\n6 22 1.1920928955078125e-06 281.470312833786\n74 18 0.0 60.473663091659546\n62 20 0.0 121.64717602729797\n76 19 9.5367431640625e-07 68.21399593353271\n71 20 9.5367431640625e-07 110.12269401550293\n73 21 9.5367431640625e-07 109.56891989707947\nfinished pool\nsuccess_log\nstore duplicate\n"
],
[
"# check the gc_init as well\n# it would be faster if you run the python directly\ncustomizing_motif_vec.customizing_motif_vec_pip()",
"1 6\n2 9\n3 12\n4 16\n5 23\n6 32\n7 45\n8 59\n9 83\n10 111\n11 143\n12 173\n13 194\n14 201\n15 188\n16 157\n17 115\n18 71\n19 38\n20 15\n21 4\n1695\nget motif vec, the length is 1695\nstart processing G04483SK\nstart processing G03445UI\nstart processing 4490.1\nstart processing G54953LX\nstart processing 3055.1\nstart processing 2693.2\nstart processing 3661.1\nstart processing 2967.1\nstart processing G30460NZ\nstart processing 5486.1\nstart processing G52428MJ\nstart processing 1754.1\nstart processing G39813YP\nstart processing G39764AC\nstart processing 2646.1\nstart processing 2244.1\nstart processing 1417.1\nstart processing G76812VG\nstart processing G07568IR\nstart processing 2693.3\nstart processing 3416.2\nstart processing 3457.1\nstart processing G49604DB\nstart processing 3592.1\nstart processing 4851.1\nstart processing G40242TG\nstart processing 3416.1\nstart processing G00176HZ\nstart processing 3416.3\nstart processing 4675.1\nstart processing G24987DS\nstart processing G80858MF\nstart processing 4587.1\nstart processing G17689DH\nstart processing G54338PJ\nstart processing G79457WN\nstart processing 2605.2\nstart processing G37597FW\nstart processing G36191CD\nstart processing G10691MJ\nstart processing 3953.1\nstart processing 2693.1\nstart processing G05098FE\nstart processing G05203UQ\nstart processing G88966ZO\nstart processing 3865.1\nstart processing G56516KW\nstart processing G39439UR\nstart processing 3212.1\nstart processing G86696LV\nstart processing G39213VZ\nstart processing G90130AG\nstart processing G49721VX\nstart processing G09280JF\nstart processing 3143.1\nstart processing G07483YN\nstart processing G12398HZ\nstart processing G60415BS\nstart processing G58667NI\nstart processing 2605.1\nstart processing G88127MB\nstart processing 2401.1\nstart processing G16873YG\nstart processing G23295TF\nstart processing G80264ZA\nstart processing G85809SI\nstart processing G99891PR\nstart processing G80393PG\nstart processing G00536FZ\nstart processing 4041.1\nstart processing G79412GP\nstart processing G20924UR\nstart processing 4402.1\nstart processing G75308SV\nstart processing G10292TC\nstart processing 3055.2\nstart processing 5312.1\nclosing poll\njoining pool\nfinished 0\nfinished 1\nfinished 3\nfinished 8\nfinished 4\nfinished 5\nfinished 7\nfinished 11\nfinished 10\nfinished 13\nfinished 6\nfinished 12\nfinished 15\nfinished 14\nfinished 16\nfinished 19\nfinished 18\nfinished 17\nfinished 23\nfinished 21\nfinished 20\nfinished 25\nfinished 27\nfinished 26\nfinished 30\nfinished 28\nfinished 31\nfinished 33\nfinished 34\nfinished 35\nfinished 36\nfinished 38\nfinished 37\nfinished 39\nfinished 41\nfinished 32\nfinished 42\nfinished 43\nfinished 29\nfinished 44\nfinished 47\nfinished 48\nfinished 45\nfinished 40\nfinished 50\nfinished 52\nfinished 51\nfinished 22\nfinished 54\nfinished 46\nfinished 56\nfinished 57\nfinished 58\nfinished 49\nfinished 55\nfinished 59\nfinished 61\nfinished 60\nfinished 63\nfinished 2\nfinished 67\nfinished 64\nfinished 68\nfinished 66\nfinished 70\nfinished 71\nfinished 24\nfinished 62\nfinished 53\nfinished 65\nfinished 75\nfinished 73\nfinished 74\nfinished 9\nfinished 69\nfinished 72\nfinished 76\nconverting dict\n"
],
[
"# load motif vector and return edge_list\nmotif_dict = json_utility.load_json(\"/Users/apple/PycharmProjects/GlyCompare/intermediate_file/NBT_motif_dic_degree_list.json\")\nmotif_lib = motif_class.GlycanMotifLib(motif_dict)\ndep_tree, edge_list = motif_lib.motif_dependence_tree()",
"<class 'str'>\nstart motif_with core\nlen 5\nlen 6\nlen 7\nlen 8\nlen 9\nlen 10\nlen 11\nlen 12\nlen 13\nlen 14\nlen 15\nlen 16\nlen 17\nlen 18\nlen 19\nlen 20\nlen 21\nFinish the n-glycan match 716 motifs are matched to the n-glycan core\nstart building dependence_tree\n"
],
[
"edge_list",
"_____no_output_____"
],
[
"len(motif_lib.motif_vec)",
"_____no_output_____"
]
],
[
[
"## plot glycan mass",
"_____no_output_____"
]
],
[
[
"a = json_utility.load_json('/Users/apple/PycharmProjects/nbt_glycan_profile/intermediate_file/NBT_glycan_dict.json')",
"_____no_output_____"
],
[
"name_k = {}\nname_dict = {}\nlist_k = []\nlist_mass = []\n# fi.patch.set_facecolor('white')\nfor i in sorted(a.keys()):\n for k in a[i].keys():\n name_k[k] = a[i][k]\n name_dict[k] = i\n list_k.append(glycoct.loads(a[i][k]))\n list_mass.append(i)\n",
"_____no_output_____"
],
[
"len(list(name_k))",
"_____no_output_____"
],
[
"plot_glycan_utilities.plot_glycan_list(list_k, list_mass)",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
]
|
cb748fef217e18037d4b654d9b0015968d3d84b0 | 3,737 | ipynb | Jupyter Notebook | pyspark-ml-crashcourse/notebooks/02 - PySpark WordCount Exercise.ipynb | Code360In/spark-code-examples | 181c9906d32571ba6138e63040edfcb4c74ef4bf | [
"MIT"
]
| 1 | 2021-12-13T15:41:48.000Z | 2021-12-13T15:41:48.000Z | pyspark-ml-crashcourse/notebooks/02 - PySpark WordCount Exercise.ipynb | Code360In/spark-code-examples | 181c9906d32571ba6138e63040edfcb4c74ef4bf | [
"MIT"
]
| 15 | 2021-09-12T15:06:13.000Z | 2022-03-31T19:02:08.000Z | pyspark-ml-crashcourse/notebooks/02 - PySpark WordCount Exercise.ipynb | Code360In/spark-code-examples | 181c9906d32571ba6138e63040edfcb4c74ef4bf | [
"MIT"
]
| 1 | 2022-01-29T00:37:52.000Z | 2022-01-29T00:37:52.000Z | 24.913333 | 182 | 0.578004 | [
[
[
"# Word Count Example\n\nSpark (as Hadoop) is a massively parallel system for counting words. Although DataFrames are not the perfect data structure for implementing a word count, it is still possible.",
"_____no_output_____"
]
],
[
[
"from pyspark.sql.functions import *",
"_____no_output_____"
],
[
"# Load text \"Alice in wonderland\"\ntext = spark.read.text(\"s3://dimajix-training/data/alice\")",
"_____no_output_____"
],
[
"# Inspect Schema of \"text\" DataFrame\n## YOUR CODE HERE",
"_____no_output_____"
],
[
"# Print first 10 entries of \"text\" DataFrame\n## YOUR CODE HERE",
"_____no_output_____"
]
],
[
[
"## Extracting words\nNow we a DataFrame with a single column. Each entry contains a line of the original text file. We need to extract the individual words in three steps:\n1. Split each line into words using the `split` function. This will result in a DataFrame with a single column, which contains a list of words\n2. Convert each list of words into individual records using the `explode` function\n3. Remove empty words using an appropriate `filter` expression",
"_____no_output_____"
]
],
[
[
"# 1. Using the split function, split each record into a list of words\nword_lists = text.select(split(text.value, ' ').alias(\"word_list\"))\n# 2. Using the explode function, convert each list into individual records\nwords = # YOUR CODE HERE\n# 3. Remove empty words\nnon_empty_words = # YOUR CODE HERE\n# Show first 10 entries\n# YOUR CODE HERE",
"_____no_output_____"
]
],
[
[
"## Counting Words\nNow that we have a DataFrame containing an individual word per record, we can count word frequencies using grouping and aggregation.\n1. Group by word\n2. Count the size of each group\n3. Sort by frequency (descinding)",
"_____no_output_____"
]
],
[
[
"# 1. & 2. Group by \"word\" column and count the size of each group\nresult = # YOUR CODE HERE\n\n# 3. Sort words by frequency (descending)\nsorted_result = # YOUR CODE HERE\n\n# Print first 10 entries (most frequent words)\n# YOUR CODE HERE",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
]
|
cb74a58f45f25c66e4a5b90404bdc349a366d78a | 150,677 | ipynb | Jupyter Notebook | diffusion.ipynb | pdebuyl-lab/colloidal_chemotaxis_companion | aa4dbc6e054275cb63771604c350ab60c5b5b0e6 | [
"BSD-3-Clause"
]
| null | null | null | diffusion.ipynb | pdebuyl-lab/colloidal_chemotaxis_companion | aa4dbc6e054275cb63771604c350ab60c5b5b0e6 | [
"BSD-3-Clause"
]
| null | null | null | diffusion.ipynb | pdebuyl-lab/colloidal_chemotaxis_companion | aa4dbc6e054275cb63771604c350ab60c5b5b0e6 | [
"BSD-3-Clause"
]
| null | null | null | 357.902613 | 67,776 | 0.922171 | [
[
[
"# Passive and active colloidal chemotaxis in a microfluidic channel: mesoscopic and stochastic models\n\n**Author:** Pierre de Buyl \n*Supplemental information to the article by L. Deprez and P. de Buyl*\n\nThis notebook reports the characterization of the diffusion coefficients for a rigid dimer\nconfined between plates.\n\nThe data originates from the RMPCDMD simulation program. Please read its documentation and the\npublished paper for meaningful use of this notebook.\n\nThe correlation functions are computed online in RMPCDMD and stored in the H5MD files. They are read here\nand integrated to obtain the diffusion coefficients. A time limit on the integral is set for all integrals,\nand displayed in the figures, to obtain the value of the plateau of the running integral for D.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport h5py\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import SubplotParams\nimport numpy as np\nfrom scipy.signal import fftconvolve\nfrom scipy.optimize import leastsq, curve_fit\nfrom scipy.integrate import simps, cumtrapz\nfrom glob import glob\n\nplt.rcParams['figure.figsize'] = (12, 6)\nplt.rcParams['figure.subplot.hspace'] = 0.25\nplt.rcParams['figure.subplot.wspace'] = 0.25\nplt.rcParams['figure.subplot.left'] = 0.17\nplt.rcParams['axes.labelsize'] = 16",
"_____no_output_____"
],
[
"def expfitfunc(t, f0, tau):\n \"\"\"Exponential fitting function\"\"\"\n return f0*np.exp(-t/tau)\n\ndef fitfunc(p, t):\n \"\"\"Linear fitting function\"\"\"\n return p[0] + p[1]*t\n\ndef errfunc(p, t, y):\n \"\"\"Error function for `fitfunc`\"\"\"\n return fitfunc(p, t) - y\n\ndef get_block_data(group, name, dim=3):\n \"\"\"Return the time and correlation function for the data\n read from RMPCDMD output files.\"\"\"\n block = group[name]['value'][:]\n count = group[name]['count'][:]\n block /= count.reshape((-1, 1, 1, 1))\n t_data = [np.array([0])]\n data = [block[0,:1,:,:].reshape((-1,dim))]\n dt = group[name]['time'][()]\n for i in range(block.shape[0]):\n t = dt*np.arange(block.shape[1])*block.shape[1]**i\n t_data.append(t[1:])\n data.append(block[i,1:,:,:].reshape((-1,dim)))\n\n return np.concatenate(t_data), np.concatenate(data)\n",
"_____no_output_____"
],
[
"# Collect simulation data\n\nruns = glob('cceq_*.h5')\n\nruns.sort()\n\nmsd_all = []\nvacf_all = []\ntvacf_all = []\npvacf_all = []\nwacf_all = []\n\nfor f in runs:\n a = h5py.File(f, 'r')\n group = a['block_correlators']\n\n msd_t, msd_data = get_block_data(group, 'mean_square_displacement')\n msd_all.append(msd_data)\n\n vacf_t, vacf_data = get_block_data(group, 'velocity_autocorrelation')\n vacf_all.append(vacf_data)\n\n do_pvacf = 'parallel_velocity_autocorrelation' in group\n if do_pvacf:\n pvacf_t, pvacf_data = get_block_data(group, 'parallel_velocity_autocorrelation')\n pvacf_all.append(pvacf_data)\n\n do_tvacf = 'transverse_velocity_autocorrelation' in group\n if do_tvacf:\n tvacf_t, tvacf_data = get_block_data(group, 'transverse_velocity_autocorrelation')\n tvacf_all.append(tvacf_data)\n\n do_wacf = 'planar_angular_velocity_autocorrelation' in group\n if do_wacf:\n wacf_t, w_data = get_block_data(group, 'planar_angular_velocity_autocorrelation', dim=1)\n wacf_all.append(w_data.flatten())\n \n a.close()\n\nmsd_all = np.array(msd_all)\nvacf_all = np.array(vacf_all)\npvacf_all = np.array(pvacf_all)\ntvacf_all = np.array(tvacf_all)\nwacf_all = np.array(wacf_all)",
"_____no_output_____"
]
],
[
[
"Below, we plot the mean-square displacement (MSD) of the dimer in cartesian coordinates.\nThere are thus three components. The z component saturates because of the confinement.\nThe x and y components result from a mixing of the parallel and transverse diffusion\ncoefficients.\nThe fit is for the long-time behaviour of the x-y MSD.",
"_____no_output_____"
]
],
[
[
"# Plot and fit the mean-squared displacement\n\nplt.ylabel(r'$\\langle (\\mathbf{r}(\\tau) - \\mathbf{r}(0))^2 \\rangle$')\n\nm = msd_all.mean(axis=0)\n\n# Plot all three components\nplt.plot(msd_t, m, marker='o')\n\n# Sum only xy components\nm = m[...,:2].sum(axis=-1)\n\n# Fit data to t>100\nmask = msd_t>100\nsolution, ierr = leastsq(errfunc, [0, 0.1], args=(msd_t[mask], m[mask]))\nintercept, D = solution\n\n# MSD = 2 d D t = 4 D t -> The coefficient of the linear fit must be divided by 4\n# as the diffusion in z is bounded by the confining plates.\nD = D/4\nplt.plot(msd_t, fitfunc((intercept, 2*D), msd_t))\nplt.xlabel(r'$\\tau$')\nplt.loglog()\n\n# Via the MSD, we can only access the sum of D_parallel and D_perp\nprint(\"D_parallel + D_perp = \", 2*D)\n",
"D_parallel + D_perp = 0.00346132554469\n"
]
],
[
[
"We use the velocity autocorrelation function (VACF) of the transverse and\nparallel components of the velocity.\nIntegrating those functions yields the transverse and parallel diffusion\ncoefficients.\nThe integration is stopped when it reaches a plateau. This is done by setting\na limit in time, that is highlighted by reference lines in the plots.\n\nWe proceed in the same fashion for the planar angle diffusion coefficient.",
"_____no_output_____"
]
],
[
[
"# Integrate the VACF\n\nlimit = 800\n\nparams = SubplotParams(hspace=0.08, wspace=0.15)\nplt.figure(figsize=(14,8), subplotpars=params)\n\n# Transverse VACF\n\nm = tvacf_all[...,:2].sum(axis=-1).mean(axis=0)\n\nax1 = plt.subplot(221)\n\nplt.plot(tvacf_t, m, marker='o')\nplt.axvline(limit)\nplt.xscale('log')\nplt.xticks([])\nplt.ylabel(r'Transv. VACF')\n\n# Integral of transverse VACF\n\nax1_int = plt.subplot(222)\n\nplt.plot(tvacf_t, cumtrapz(m, tvacf_t, initial=0))\nplt.axvline(limit)\nplt.xscale('log')\nplt.xticks([])\n\nidx = np.searchsorted(tvacf_t, limit)\nintegrated_Dt = simps(m[:idx], tvacf_t[:idx])\nplt.axhline(integrated_Dt)\n\nax1_int.yaxis.tick_right()\nax1_int.yaxis.set_label_position('right')\nplt.ylabel(r'Integral of transv. VACF')\nplt.ylim(-0.0002,0.0025)\n\n# Parallel VACF\n\nax2 = plt.subplot(223)\n\nm = pvacf_all[...,:2].sum(axis=-1).mean(axis=0)\nplt.plot(pvacf_t, m, marker='o')\nplt.axvline(limit)\nplt.xscale('log')\nplt.xlabel(r'$\\tau$')\nplt.ylabel(r'Parallel VACF')\n\n# Integral of parallel VACF\n\nax2_int = plt.subplot(224)\nplt.plot(pvacf_t, cumtrapz(m, pvacf_t, initial=0))\nplt.axvline(limit)\nplt.xscale('log')\nplt.xlabel(r'$\\tau$')\n\nidx = np.searchsorted(pvacf_t, limit)\nintegrated_Dp = simps(m[:idx], pvacf_t[:idx])\n\nplt.axhline(integrated_Dp)\nplt.ylim(-0.0002,0.0025)\n\nax2_int.yaxis.tick_right()\nax2_int.yaxis.set_label_position('right')\nplt.ylabel(r'Integral of parallel VACF')\n\nprint('Transverse D:', integrated_Dt)\nprint('Parallel D:', integrated_Dp)\nprint(\"Sum of the D's\", integrated_Dt+integrated_Dp)\n",
"Transverse D: 0.00154324462063\nParallel D: 0.00201316703627\nSum of the D's 0.0035564116569\n"
],
[
"plt.figure(figsize=(14,4), subplotpars=params)\n\nm = wacf_all.mean(axis=0)\ns = wacf_all.std(axis=0)\n\nax1 = plt.subplot(121)\nplt.xscale('log')\n\nplt.plot(wacf_t, m, marker='o')\nplt.axvline(limit)\nplt.xlim(.5, 1e4)\nplt.xlabel(r'$\\tau$')\nplt.ylabel(r'Orientational ACF')\n\nax2 = plt.subplot(122)\nplt.xscale('log')\nax2.yaxis.tick_right()\nax2.yaxis.set_label_position('right')\n\nplt.plot(wacf_t, cumtrapz(m, wacf_t, initial=0))\nplt.xlim(.5, 1e4)\nplt.ylim(-1e-6, 2e-4)\nplt.xlabel(r'$\\tau$')\nplt.ylabel(r'Integral of orientational ACF')\n\nlimit = 800\nidx = np.searchsorted(wacf_t, limit)\nplt.axvline(limit)\n\nD_integral = simps(m[:idx], wacf_t[:idx])\nprint('Integrated rotational diffusion coefficient', D_integral)\nplt.axhline(D_integral)\n\nplt.xlabel(r'$\\tau$')\n",
"Integrated rotational diffusion coefficient 0.000141341609226\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
cb74aa844600ba7c3cf6bfd04a04075dd2e074f1 | 192,345 | ipynb | Jupyter Notebook | Scipy-stats.ipynb | sinduff/MLS_Assign | 5070a0a524b412f66f9a17f5e7b329c49b496b50 | [
"MIT"
]
| null | null | null | Scipy-stats.ipynb | sinduff/MLS_Assign | 5070a0a524b412f66f9a17f5e7b329c49b496b50 | [
"MIT"
]
| null | null | null | Scipy-stats.ipynb | sinduff/MLS_Assign | 5070a0a524b412f66f9a17f5e7b329c49b496b50 | [
"MIT"
]
| null | null | null | 166.676776 | 92,952 | 0.861135 | [
[
[
"# Project for Machine Learning and Statistics - December 2021\n\n## Submitted by Sinéad Duffy, ID 10016151\n\n***\n\n## Notebook 2 - Scipy-stats.ipynb\n\n### Brief - write an overview of the SciPy.stats library, outline (using examples) the package and complete an example hypothesis using ANOVA\n\n***",
"_____no_output_____"
],
[
"\n\n# SciPy.Stats Library\nSciPy is a an extension of the NumPy language in Python, and gives users the opportunity to work with data in an environment similar to that of MatLab/ SciLab etc.$^3$ The package is organised into 15 subpackages dealing with specific mathmatical domains such as clustering, optimize, sparse and statistics. For the purpose of this notebook, the author will focus on the SciPy.Stats package.\n<br><br>\nSciPy.Stats contains algorithms outlining probability distributions, summary / frequency, correlation and statistical tests. Users will need to still include packages such as Pandas to format the data before applying an algorithm to it.\n<br><br>\nSciPy.Stats allows the user to complete t-tests (__ttest_1samp()__) as well as One-Way Anova (__f_oneway()__). T-tests allow the user to compare the statistical difference between two groups, whilst a one-way ANOVA between three or more groups. Completing a one-way ANOVA is outlined in the following paragraphs. \n\n***\n### What is a One-Way ANOVA\n<br>\nLaerd Statistics defines ANOVA as being short for one-way analysis of variance, and outlines that it is used to see if there are any statistically significant differences between 3 or more independent (unrelated) groups.$^9$\n<br><br>\nAn ANOVA will allow a person to understand the statistical differences (variances) through use of hypotheses. In this instance, an null hypothesis and an alternative hypothesis is formed, along with a research question to be answered.\n<br><br>\nThe research question in this instance is; What is the best video to show when informing a group of the public about a medical condition.\n<br><br>\nThe hypotheses relating to this question are;\n<br><br>\n<b>1 - Null Hypothesis</b> is that there is no difference between the subjects knowledge of the medical condition after watching the videos\n<br><br>\n<b>2 - Alternative Hypothesis</b> is that there is a difference between the groups based on their knowledge of the medical condition\n\n***\n\nLaerd Statistics$^2$ outline that for a sucessful ANOVA to be run, the data will need to pass 6 assumptions. The assumptions are;\n\n1 - <b>Dependent variable</b> is an interval or ratio along a continious scale<br>\n2 - <b>Independent Variable</b> should be made up of categorical groups<br>\n3 - The data has <b>independence of observations</b><br>\n4 - The data has <b>no significant outliers</b><br>\n5 - The dependent variable has <b>a normal distribution</b> for each category of the independent variable<br>\n6 - There is <b>homogeneity of variances</b> in the data<br>\n\nThe author has determined that the independent variable in this example is the categorical data which outlines the groups who have heard / not heard / or it is not relevant of the medical condition (i.e the column of Heardofcondition). The dependent variable is the first preference video of the shown to the public. The data has been gathered using a Likert Scale.",
"_____no_output_____"
]
],
[
[
"# import the standard pyton libraries\nimport pandas as pd\nimport numpy as np\n\n# import graphs library\nimport seaborn as sns\nimport matplotlib as plt\n\n# set style for graphs\nsns.set_style(\"white\")\n\n# Statsistical Libraries\nimport scipy.stats as ss\n\n# create tables\nfrom tabulate import tabulate\n",
"_____no_output_____"
]
],
[
[
"### Import and explore the dataset\n\nThe chosen dataset relates to informational videos relating to a prescribed medical condition. The dataset was sourced from the University of Sheffield. $^1$\n<br>\n\nThe attributes of the dataframe are;\n\n- Person, index value to the person who answered the survey\n- Gender, with binary values of 1= Male, 2 = Female\n- Heardofcondition questions if the respondent heard of the condition being discussed, the answers are 0 = N/A, 1 = Yes, 2 = No\n- Set gives the order of the groups the preference of the respondents in terms of videos watched, the replies are maked as\n - 1 = General Video A, \n - 2 = Medical video B, \n - 3 = Old video C, \n - 4 = Demo D\n - @1st-Favourite video\n - @2nd-2nd favourite\n - @3rd-3rd favourite\n - @4th-Least favourite\n- Combination displays the order that videos were seen in; the combination is shown as a series of numbers\n- General understandings of the videos. The ordinal Likert scale used was from 1, where the respondents strongly disagree to 5 where the respondents strongly agree\n - VideoAGenUnderstandingCONDITION wehere the video A is a general understanding\n - VideoBdoctorUnderstandingCONDITION where video B Doctors video B understanding\n - VideoCOldUnderstandingCONDITION where video C is the old understanding\n - DEMOUnderstandingCONDITION where video D demonstrates an understanding\n- TotalAGen-Overall score (video A)\n- TotalBdoc-Overall score (video B) \n- TotalCOld-Overall score (video C)\n- TotalDDEMO-Overall score (demo D)\n\nThe following sections outline explore the dataframe before completing the ANOVA analysis.",
"_____no_output_____"
]
],
[
[
"# import the dataframe to the notebook\ndf = pd.read_csv('https://www.sheffield.ac.uk/polopoly_fs/1.937213!/file/Video_R.csv')\n\n#display first 5 rows of the dataframe\ndf.head(5)",
"_____no_output_____"
],
[
"#show the main statistics associated with df\ndf.describe()",
"_____no_output_____"
]
],
[
[
"***\n### Preparing for the ANOVA\n\nThis section will look at the 6 assumptions that must be taken into account to run a true ANOVA.\n<br> <br> \nAs outlined above, 6 assumptions must be passed in order for the results of the ANOVA to be true. \n\n***",
"_____no_output_____"
],
[
"#### Assumption 1 - Dependent Variable\n\nThe dependent variable 'should be measured at the interval or ratio level (i.e., they are continuous).'$^2$\n<br> <br> \nIn this instance, the chosen dependent variable is the 1st preference video of each of the group. ",
"_____no_output_____"
]
],
[
[
"# Set a value for the dependent variable\n\ndependent = df['@1st']\n",
"_____no_output_____"
]
],
[
[
"***\n#### Assumption 2 - Independent Variable\n\nThe indepedent variable should consist of at least two independent categorical groups$^2$.\n<br> <br> \nFor this analysis, the chosen categorical variable with no overlap e.g.you have heard of the condition, you haven't heard of the condition, or the question is not appliciable to you.",
"_____no_output_____"
]
],
[
[
"#The independent variable \n\nindependent = df['Heardofcondition']\n",
"_____no_output_____"
]
],
[
[
"***\n#### Assumption 3 - Independence of Observation\n\nThis refers to the fact that there should be no relationship between the groups themselves $^2$.\n<br><br>\nThis dataset was collected to evaulate the best way of educating the public about a medical condition$^1$. \n<br><br>\nThe source does not call out any relationships between the groups of data. Data was collected using Likert style questions were answers were given along a scale.$^2$\n<br>",
"_____no_output_____"
],
[
"***\n#### Assumption 4 - No Significant Outliers\n\nLaerd Statistics outlines that the chosen variables should have no significant outliers in the data$^2$. \n<br><br>\nThe author will demonstrate this using boxplots. The dependent and independent variables are plotted together and seperately to identify any outliers.\n<br><br>\nAs can be clearly seen, there is no significant outliers identifed in the dataset.\n<br>",
"_____no_output_____"
]
],
[
[
"#plotting the dependent and independent variables\n\nsns.boxplot(x=dependent, y=independent)",
"_____no_output_____"
],
[
"#plotting the dependent variable\n\nsns.boxplot(x=dependent)",
"_____no_output_____"
],
[
"#plotting the independent variable\n\nsns.boxplot(y=independent)",
"_____no_output_____"
]
],
[
[
"***\n#### Assumption 5 - Normal distribution for each of the independent variable categories\n\nOne of the key assumptions is that the dependent variable should approximately follow a normal distribution for the different categories of the individual variable$^2$.\n<br><br>\nTo confirm if a normal distribution is true, a displot of the independent variables is plotted against the dependent varaible. The results show that the curves appear to largely follow a normal distribution.\n<br><br>\nFurther analysis can be completed using the Shapiro Wilks test as the sample in this instance is less than 50$^4$. \n<br><br>\nResults of the Shapiro Wilks test with a pvalue of greater that 0.05$^6$, indicate that the data is normally distributed. Where the value of p is less than 0.05, then the data is not normal, i.e. the data will deviate from a normal distribution.$^4$\n<br>",
"_____no_output_____"
]
],
[
[
"sns.displot(x=dependent, hue=independent, kind=\"kde\")",
"_____no_output_____"
],
[
"# Shapiro Wilk test for Normalacy - 1\n# previous knowledge N/a\n\nshapiro_test1 = ss.shapiro(dependent[independent == 0])\n#shapiro_test1\n\nprint(\"The p-value of the Shapiro_Test1 is = {:.2}\".format(shapiro_test1.pvalue))",
"The p-value of the Shapiro_Test1 is = 0.011\n"
],
[
"# Shapiro Wilk test for Normalacy - 2\n# previous knowledge is yes\n\nshapiro_test2 = ss.shapiro(dependent[independent == 1])\n#shapiro_test2\n\nprint(\"The p-value of the Shapiro_Test2 is = {:.2}\".format(shapiro_test2.pvalue))",
"The p-value of the Shapiro_Test2 is = 0.025\n"
],
[
"# Shapiro Wilk test for Normalacy - 3\n# previous knowledge is no\n\nshapiro_test3 = ss.shapiro(dependent[independent == 2])\n#shapiro_test3\n\nprint(\"The p-value of the Shapiro_Test3 is = {:.2}\".format(shapiro_test3.pvalue))",
"The p-value of the Shapiro_Test3 is = 0.024\n"
]
],
[
[
"***\n#### Assumption 6 - Homogeneity of variances\n\nLaerd Statistics outlines that the 6th and final assumption to complete an Anova analysis is that there must be 'homogeneity of variances'.$^2$ This relates to the <i>t</i> and <i>F</i> statistics respectivly $^2$,$^4$ , and basically means that the variance of the different groups should be the same$^6$.\n\nLaerd Statistics$^2$ outline that the Levene’s test for homogeneity of variances will help determine if this is the case for the chosen dataset.\n\nThe pvalue result (the significant value) should be greater than 0.05 for the variances to be treated as equal$^7$.\n\nUsing the levene test, it is possible to say that the current dataset has equal values.",
"_____no_output_____"
]
],
[
[
"#test for variances - Levene\n\nss.levene(\n dependent[independent == 0], \n dependent[independent == 1], \n dependent[independent == 2])",
"_____no_output_____"
]
],
[
[
"***\n#### Review of the Assumptions\n\nIn order for the data to comply with the ANOVA standards, it must pass all of the 6 assumptions outlined above.\n<br>\nThe results of the analysis clearly show that Assumption 5 outlining the need for the data to follow a normal distribution curve initially is true. However further analysis using the Shapiro-Wilks tests shows that the data fails this test.\n<br><br>\nThe table below shows the results of the 3 tests runs for each of the categories of data. All the values are less than 0.05, therefore the data does not follow a normal distribution.\n<br><br>\nAssumption 5 is the only assumption to fail the ANOVA test. Laerd Statistics outline that the one-way ANOVA is a robust test and can accept data that does not fully follow the normal distribution$^10$. \n<br><br>\nOn that basis, the author has decided to proceed with the ANOVA test, and will complete post hoc analysis using Tukey's honestly significant difference (HSD) as outined by Laerd Statistics.\n<br>",
"_____no_output_____"
]
],
[
[
"# display the results of the Shapiro results\n\nshapiro_results = {'Test1': [shapiro_test1.pvalue], \n 'Test2': [shapiro_test2.pvalue], \n 'Test3':[shapiro_test3.pvalue]}\n\nprint(tabulate(shapiro_results, headers='keys', tablefmt='fancy_grid'))",
"╒═══════════╤═══════════╤═══════════╕\n│ Test1 │ Test2 │ Test3 │\n╞═══════════╪═══════════╪═══════════╡\n│ 0.0107576 │ 0.0249161 │ 0.0238568 │\n╘═══════════╧═══════════╧═══════════╛\n"
]
],
[
[
"***\n### Running the ANOVA\n\nA pvalue result of greater than 0.05 mens that there was no statistially difference between the groups, and therefore the null hypothesis can be rejected $^8$ \n\nA pvalue result of less than 0.05 determines that a statistical difference was found. This requires a posthoc test should be run. $^8$ \n\nA posthoc will allow the author to deterimine where the difference between the groups occurred\n<br> \n",
"_____no_output_____"
]
],
[
[
"ss.f_oneway(\n dependent[independent == 0], \n dependent[independent == 1], \n dependent[independent == 2])",
"_____no_output_____"
]
],
[
[
"---\n\n### Reporting the results of the ANOVA\n\nhttps://statistics.laerd.com/statistical-guides/one-way-anova-statistical-guide-3.php\n\n",
"_____no_output_____"
],
[
"The pvalue of the one-way ANOVA is 0.45 (see above). This means that no statistical difference was identifed between the groups, so the null hypothesis can be rejected i.e. there is no difference between the subjects knowledge of the medical condition after watching the videos.\n\nThe author does acknowledge that the test group of 20 individuals was quiet small.",
"_____no_output_____"
],
[
"***\n### Post hoc test\n\nAs outlined previously, the dataset failed the normality test in Assumption 5. One of the reasons for this could be the small size of the sample. As such, the Author has decided to undertake post hoc analsys. \n<br><br>\nLaerd Satistic's suggest using Tukey's honestly significant difference (HSD) in cases where assumption 6 was not violated. In the case of this notebook, Assumption 5 was not met, i.e. normal distribtuion was not found to be in place. Tukey's test (also known as the honestly significant difference (HSD) test) will help explain where the signifcant differences lie between the groups that form part of the analysis.$^11$\n",
"_____no_output_____"
]
],
[
[
"from statsmodels.stats.multicomp import pairwise_tukeyhsd\n\nm_comp = pairwise_tukeyhsd(endog=df['@1st'], groups=df['Heardofcondition'], alpha=0.05)\nprint(m_comp)",
"Multiple Comparison of Means - Tukey HSD, FWER=0.05\n===================================================\ngroup1 group2 meandiff p-adj lower upper reject\n---------------------------------------------------\n 0 1 -0.5 0.6594 -1.9834 0.9834 False\n 0 2 0.375 0.8476 -1.4418 2.1918 False\n 1 2 0.875 0.4511 -0.9418 2.6918 False\n---------------------------------------------------\n"
]
],
[
[
"Referencing back to the ANOVA, the pvalue was 0.45, which is in excess of 0.05 which finds that the groups are significantly different. The results of Tukey's Post Hoc analsyis show that;\n<br>\n1. the pvalue of differences between group 0 and group 1 was 0.6594\n2. the pvalue of differences between group 0 and group 2 was 0.8476\n3. the pvalue of differences between group 0 and group 1 was 0.4511\n\n\nAs all the values are in excess of the pvalue of the ANOVA, and also in excess of the 0.05, it is possible to say that there is a statistically significant difference between all the goups. ",
"_____no_output_____"
],
[
"***\n\n### Conclusion\n\n",
"_____no_output_____"
],
[
"In conclusion, it is possoble to say that there is no statistical difference between the groups who watched the video, and their previous knowledge of the subject.",
"_____no_output_____"
],
[
"***\n### References:\n\n1. University of Sheffield.ac.uk, Datasets for Teaching, https://www.sheffield.ac.uk/mash/statistics/datasets, accessed 01 December 2021\n2. Laerd Statistics, One-way ANOVA in SPSS Statistics, https://statistics.laerd.com/spss-tutorials/one-way-anova-using-spss-statistics.php , accessed 01 December 2021\n3. Scipy.org, Statistical functions (scipy.stats), https://docs.scipy.org/doc/scipy/reference/stats.html, accessed 01 December 2021\n4. , LaerdStatistics.com, Testing for Normality using SPSS Statistics, https://statistics.laerd.com/spss-tutorials/testing-for-normality-using-spss-statistics.php, accessed 29 December 2021\n5. Statistic Solutions.com, The Assumption of Homogeneity of Variance, https://www.statisticssolutions.com/the-assumption-of-homogeneity-of-variance/, accessed 29 December 2021\n6. TechnologyNetworks.com, One-Way vs Two-Way ANOVA: Differences, Assumptions and Hypotheses, https://www.technologynetworks.com/informatics/articles/one-way-vs-two-way-anova-definition-differences-assumptions-and-hypotheses-306553, accessed 29 December 2021\n7. LaerdStatistics.com, Independent t-test for two samples, https://statistics.laerd.com/statistical-guides/independent-t-test-statistical-guide.php, accessed 29 December 2021\n8. LaerdStatistics.com, One-way ANOVA (cont...), https://statistics.laerd.com/statistical-guides/one-way-anova-statistical-guide-4.php, accessed 29 December 2021\n9. LaerdStatistic.com, One-way ANOVA, https://statistics.laerd.com/statistical-guides/one-way-anova-statistical-guide.php, accessed 29 December 2021\n10. LaerdStatistic.com, One-way ANOVA (Contd.), https://statistics.laerd.com/statistical-guides/one-way-anova-statistical-guide-3.php , accessed 29 December 2021\n11. Statisticshowto.com, What is the Tukey Test / Honest Significant Difference? , https://www.statisticshowto.com/tukey-test-honest-significant-difference/, accessed 30 December 2021\n\n\n\n## End\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
]
|
cb74ac2b8aa266cdb85f9efe350fc92c24f8f190 | 12,873 | ipynb | Jupyter Notebook | Testing_Meta_Model.ipynb | aCStandke/Stacked-Ensemble-CBOW-Model | a91478c48ca8ddbd64da9e6a786d8734164bb76a | [
"MIT"
]
| null | null | null | Testing_Meta_Model.ipynb | aCStandke/Stacked-Ensemble-CBOW-Model | a91478c48ca8ddbd64da9e6a786d8734164bb76a | [
"MIT"
]
| null | null | null | Testing_Meta_Model.ipynb | aCStandke/Stacked-Ensemble-CBOW-Model | a91478c48ca8ddbd64da9e6a786d8734164bb76a | [
"MIT"
]
| null | null | null | 37.973451 | 138 | 0.50804 | [
[
[
"EPOCHS = 40\nLR = 3e-4 \nBATCH_SIZE_TWO = 1\nHIDDEN =20\nMEMBERS = 3\n\nimport pandas as pd\nimport numpy as np\nimport random\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torchinfo import summary\nimport re\nimport string\nimport torch.optim as optim\nfrom torchtext.legacy import data\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.nn.utils.rnn import pad_sequence, pack_padded_sequence\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"def collate_batch(batch):\n label_list, text_list, length_list = [], [], []\n for (_text,_label, _len) in batch:\n label_list.append(_label)\n length_list.append(_len)\n tensor = torch.tensor(_text, dtype=torch.long)\n text_list.append(tensor)\n text_list = pad_sequence(text_list, batch_first=True)\n label_list = torch.tensor(label_list, dtype=torch.float)\n length_list = torch.tensor(length_list)\n return text_list,label_list, length_list\n\nclass VectorizeData(Dataset):\n def __init__(self, file):\n self.data = pd.read_pickle(file)\n \n def __len__(self):\n return self.data.shape[0]\n \n def __getitem__(self, idx):\n X = self.data.vector[idx]\n lens = self.data.lengths[idx]\n y = self.data.label[idx]\n return X,y,lens\n \ntesting = VectorizeData('predict_set.csv')\nprediction = DataLoader(testing, batch_size=BATCH_SIZE_TWO, shuffle=False, collate_fn=collate_batch)",
"_____no_output_____"
],
[
"'''loading the pretrained embedding weights'''\nweights=torch.load('CBOW_NEWS.pth')\npre_trained = nn.Embedding.from_pretrained(weights)\npre_trained.weight.requires_grad=False",
"_____no_output_____"
],
[
"def create_emb_layer(pre_trained):\n num_embeddings = pre_trained.num_embeddings\n embedding_dim = pre_trained.embedding_dim\n emb_layer = nn.Embedding.from_pretrained(pre_trained.weight.data, freeze=True)\n return emb_layer, embedding_dim\n\nclass StackedLSTMAtteionModel(nn.Module):\n def __init__(self, pre_trained,num_labels):\n super(StackedLSTMAtteionModel, self).__init__()\n self.n_class = num_labels\n self.embedding, self.embedding_dim = create_emb_layer(pre_trained)\n self.LSTM = nn.LSTM(self.embedding_dim, HIDDEN, num_layers=2,bidirectional=True,dropout=0.26,batch_first=True)\n self.label = nn.Linear(2*HIDDEN, self.n_class)\n self.act = nn.Sigmoid()\n \n def attention_net(self, Lstm_output, final_state):\n hidden = final_state\n output = Lstm_output[0]\n attn_weights = torch.matmul(output, hidden.transpose(1, 0))\n soft_attn_weights = F.softmax(attn_weights.transpose(1, 0), dim=1)\n new_hidden_state = torch.matmul(output.transpose(1,0), soft_attn_weights.transpose(1,0))\n return new_hidden_state.transpose(1, 0)\n \n def forward(self, x, text_len):\n embeds = self.embedding(x)\n pack = pack_padded_sequence(embeds, text_len, batch_first=True, enforce_sorted=False)\n output, (hidden, cell) = self.LSTM(pack)\n hidden = torch.cat((hidden[0,:, :], hidden[1,:, :]), dim=1)\n attn_output = self.attention_net(output, hidden)\n logits = self.label(attn_output)\n outputs = self.act(logits.view(-1))\n return outputs\n \n \nclass TwoLayerGRUAttModel(nn.Module):\n def __init__(self, pre_trained, HIDDEN, num_labels):\n super(TwoLayerGRUAttModel, self).__init__()\n self.n_class = num_labels\n self.embedding, self.embedding_dim = create_emb_layer(pre_trained)\n self.gru = nn.GRU(self.embedding_dim, hidden_size=HIDDEN, num_layers=2,batch_first=True, bidirectional=True, dropout=0.2)\n self.label = nn.Linear(2*HIDDEN, self.n_class)\n self.act = nn.Sigmoid()\n \n def attention_net(self, gru_output, final_state):\n hidden = final_state\n output = gru_output[0]\n attn_weights = torch.matmul(output, hidden.transpose(1, 0))\n soft_attn_weights = F.softmax(attn_weights.transpose(1, 0), dim=1)\n new_hidden_state = torch.matmul(output.transpose(1,0), soft_attn_weights.transpose(1,0))\n return new_hidden_state.transpose(1, 0)\n \n def forward(self, x, text_len):\n embeds = self.embedding(x)\n pack = pack_padded_sequence(embeds, text_len, batch_first=True, enforce_sorted=False)\n output, hidden = self.gru(pack)\n hidden = torch.cat((hidden[0,:, :], hidden[1,:, :]), dim=1)\n attn_output = self.attention_net(output, hidden)\n logits = self.label(attn_output)\n outputs = self.act(logits.view(-1))\n return outputs \n \nclass C_DNN(nn.Module):\n def __init__(self, pre_trained,num_labels):\n super(C_DNN, self).__init__()\n self.n_class = num_labels\n self.embedding, self.embedding_dim = create_emb_layer(pre_trained)\n self.conv1D = nn.Conv2d(1, 100, kernel_size=(3,16), padding=(1,0))\n self.label = nn.Linear(100, self.n_class)\n self.act = nn.Sigmoid()\n \n def forward(self, x):\n embeds = self.embedding(x)\n embeds = embeds.unsqueeze(1)\n conv1d = self.conv1D(embeds)\n relu = F.relu(conv1d).squeeze(3)\n maxpool = F.max_pool1d(input=relu, kernel_size=relu.size(2)).squeeze(2)\n fc = self.label(maxpool)\n sig = self.act(fc)\n return sig.squeeze(1)\n \nclass MetaLearner(nn.Module):\n def __init__(self, modelA, modelB, modelC):\n super(MetaLearner, self).__init__()\n self.modelA = modelA\n self.modelB = modelB\n self.modelC = modelC\n self.fc1 = nn.Linear(3, 2)\n self.fc2 = nn.Linear(2, 1)\n self.act = nn.Sigmoid()\n \n def forward(self, text, length):\n x1=self.modelA(text, length) \n x2=self.modelB(text,length)\n x3=self.modelC(text)\n x4 = torch.cat((x1.detach(),x2.detach(), x3.detach()), dim=0)\n x5 = F.relu(self.fc1(x4))\n output = self.act(self.fc2(x5))\n return output",
"_____no_output_____"
],
[
"def load_all_models(n_models):\n all_models = []\n for i in range(n_models):\n filename = \"models/model_\"+str(i+1)+'.pth'\n if filename == \"models/model_1.pth\": \n model_one = StackedLSTMAtteionModel(pre_trained, 1)\n model_one.load_state_dict(torch.load(filename))\n for param in model_one.parameters():\n param.requires_grad = False\n all_models.append(model_one)\n elif filename == \"models/model_2.pth\":\n model_two = TwoLayerGRUAttModel(pre_trained, HIDDEN, 1)\n model_two.load_state_dict(torch.load(filename))\n for param in model_two.parameters():\n param.requires_grad = False\n all_models.append(model_two)\n else:\n model = C_DNN(pre_trained=pre_trained, num_labels=1)\n model.load_state_dict(torch.load(filename))\n for param in model.parameters():\n param.requires_grad = False\n all_models.append(model)\n return all_models",
"_____no_output_____"
],
[
"'''Loading the meta_model'''\nfilename=\"models/model_metaLearner.pth\"\nmodels = load_all_models(MEMBERS)\nmeta_model = MetaLearner(models[0], models[1], models[2])\nmeta_model.load_state_dict(torch.load(filename))",
"_____no_output_____"
],
[
"summary(meta_model)",
"_____no_output_____"
],
[
"def binary_accuracy(dataloader, model):\n #round predictions to the closest integer\n correct = []\n model.eval()\n with torch.no_grad():\n for idx, (text,label,lengths) in enumerate(dataloader):\n rounded_preds = torch.round(model(text, lengths))\n correct.append((rounded_preds == label).float()) \n acc = sum(correct)/len(correct)\n return acc\n\nprint('Checking the results of test dataset.')\naccu_test = binary_accuracy(prediction, meta_model)\nprint(f'test accuracy: {accu_test.item():8.3f}')",
"Checking the results of test dataset.\ntest accuracy: 0.898\n"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb74c5d363b9cab2fcc37dfa511c34b31eadd244 | 6,277 | ipynb | Jupyter Notebook | party/training.ipynb | wcoder/nn-playground | b4050128d99f891c32732bbc4115221885322c25 | [
"MIT"
]
| null | null | null | party/training.ipynb | wcoder/nn-playground | b4050128d99f891c32732bbc4115221885322c25 | [
"MIT"
]
| null | null | null | party/training.ipynb | wcoder/nn-playground | b4050128d99f891c32732bbc4115221885322c25 | [
"MIT"
]
| null | null | null | 30.470874 | 642 | 0.533057 | [
[
[
"import numpy as np\nimport sys",
"_____no_output_____"
],
[
"class PartyNN(object):\n\n def __init__(self, learning_rate=0.1):\n self.weights_0_1 = np.random.normal(0.0, 2 ** -0.5, (2, 3))\n self.weights_1_2 = np.random.normal(0.0, 1, (1, 2))\n self.sigmoid_mapper = np.vectorize(self.sigmoid)\n self.learning_rate = np.array([learning_rate])\n\n def sigmoid(self, x):\n return 1 / (1 + np.exp(-x))\n\n def predict(self, inputs): # len=3\n inputs_1 = np.dot(self.weights_0_1, inputs)\n outputs_1 = self.sigmoid_mapper(inputs_1)\n\n inputs_2 = np.dot(self.weights_1_2, outputs_1)\n outputs_2 = self.sigmoid_mapper(inputs_2)\n return outputs_2\n\n def train(self, inputs, expected_predict):\n inputs_1 = np.dot(self.weights_0_1, inputs)\n outputs_1 = self.sigmoid_mapper(inputs_1)\n\n inputs_2 = np.dot(self.weights_1_2, outputs_1)\n outputs_2 = self.sigmoid_mapper(inputs_2)\n actual_predict = outputs_2[0]\n\n error_layer_2 = np.array([actual_predict - expected_predict])\n gradient_layer_2 = actual_predict * (1 - actual_predict)\n weights_delta_layer_2 = error_layer_2 * gradient_layer_2\n self.weights_1_2 -= (np.dot(weights_delta_layer_2, outputs_1.reshape(1, len(outputs_1)))) * self.learning_rate\n\n error_layer_1 = weights_delta_layer_2 * self.weights_1_2\n gradient_layer_1 = outputs_1 * (1 - outputs_1)\n weights_delta_layer_1 = error_layer_1 * gradient_layer_1\n self.weights_0_1 -= np.dot(inputs.reshape(len(inputs), 1), weights_delta_layer_1).T * self.learning_rate",
"_____no_output_____"
],
[
"def mean_squared_error(y, Y):\n return np.mean((y - Y) ** 2)",
"_____no_output_____"
],
[
"train = [\n ([0, 0, 0], 0),\n ([0, 0, 1], 1),\n ([0, 1, 0], 0),\n ([0, 1, 1], 0),\n ([1, 0, 0], 1),\n ([1, 0, 1], 1),\n ([1, 1, 0], 0),\n ([1, 1, 1], 1),\n]",
"_____no_output_____"
],
[
"# to GPU, Parallel\n\nepochs = 5000\nlearning_rate = 0.05\n\nnetwork = PartyNN(learning_rate=learning_rate)\n\nfor e in range(epochs):\n inputs_ = []\n correct_predictions = []\n for input_stat, correct_predict in train:\n network.train(np.array(input_stat), correct_predict)\n inputs_.append(np.array(input_stat))\n correct_predictions.append(np.array(correct_predict))\n \n train_loss = mean_squared_error(network.predict(np.array(inputs_).T), np.array(correct_predictions))\n sys.stdout.write(\"\\rProgress: {}, Training loss: {}\".format(str(100 * e / float(epochs))[:4], str(train_loss)[:5]))",
"Progress: 99.9, Training loss: 0.004"
],
[
"for input_stat, correct_predict in train:\n predict = network.predict(np.array(input_stat))\n print(\"For input: {} the prediction is: {}:{}, expected: {}\".format(\n str(input_stat),\n str(predict),\n str(predict > .5),\n str(correct_predict == 1)))",
"For input: [0, 0, 0] the prediction is: [0.12968761]:[False], expected: False\nFor input: [0, 0, 1] the prediction is: [0.94530342]:[ True], expected: True\nFor input: [0, 1, 0] the prediction is: [0.00098543]:[False], expected: False\nFor input: [0, 1, 1] the prediction is: [0.04263897]:[False], expected: False\nFor input: [1, 0, 0] the prediction is: [0.9450802]:[ True], expected: True\nFor input: [1, 0, 1] the prediction is: [0.97274353]:[ True], expected: True\nFor input: [1, 1, 0] the prediction is: [0.04262131]:[False], expected: False\nFor input: [1, 1, 1] the prediction is: [0.92116731]:[ True], expected: True\n"
],
[
"network.weights_0_1",
"_____no_output_____"
],
[
"network.weights_1_2",
"_____no_output_____"
]
],
[
[
"[Resource](https://www.youtube.com/watch?v=HA-F6cZPvrg)",
"_____no_output_____"
]
]
]
| [
"code",
"markdown"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
]
|
cb74f30b3ac26eae0162c9d0bbcdf86524833772 | 14,289 | ipynb | Jupyter Notebook | docs/notebooks/api/data.ipynb | GloriaColmenares/pyblp | 0eac4d652736553455731f80f1b0f7884d76578e | [
"MIT"
]
| 1 | 2020-02-10T08:46:50.000Z | 2020-02-10T08:46:50.000Z | docs/notebooks/api/data.ipynb | GloriaColmenares/pyblp | 0eac4d652736553455731f80f1b0f7884d76578e | [
"MIT"
]
| null | null | null | docs/notebooks/api/data.ipynb | GloriaColmenares/pyblp | 0eac4d652736553455731f80f1b0f7884d76578e | [
"MIT"
]
| null | null | null | 32.328054 | 236 | 0.371125 | [
[
[
"# Loading Data Example",
"_____no_output_____"
]
],
[
[
"import pyblp\n\npyblp.__version__",
"_____no_output_____"
]
],
[
[
"Any number of functions can be used to load the example data into memory. In this example, we'll first use [NumPy](https://numpy.org/).",
"_____no_output_____"
]
],
[
[
"import numpy as np\nblp_product_data = np.recfromcsv(pyblp.data.BLP_PRODUCTS_LOCATION, encoding='utf-8')\nblp_agent_data = np.recfromcsv(pyblp.data.BLP_AGENTS_LOCATION, encoding='utf-8')",
"_____no_output_____"
]
],
[
[
"Record arrays can be cumbersome to manipulate. A more flexible alternative is the [pandas](https://pandas.pydata.org/) DataFrame. Unlike NumPy, pyblp does not directly depend on pandas, but it can be useful when manipulating data.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nblp_product_data = pd.read_csv(pyblp.data.BLP_PRODUCTS_LOCATION)\nblp_agent_data = pd.read_csv(pyblp.data.BLP_AGENTS_LOCATION)",
"_____no_output_____"
]
],
[
[
"Another benefit of DataFrame objects is that they display nicely in Jupyter notebooks.",
"_____no_output_____"
]
],
[
[
"blp_product_data.head()",
"_____no_output_____"
],
[
"blp_agent_data.head()",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
cb74f454f50628d5cf62e3b254635ad394e955b8 | 158,663 | ipynb | Jupyter Notebook | COLAB_version_of_plt_refactor.ipynb | pokepokepokedex/Pokedex-DS-Quinn | 3b781fc7b3af697708fbec312a673a005261776c | [
"MIT"
]
| null | null | null | COLAB_version_of_plt_refactor.ipynb | pokepokepokedex/Pokedex-DS-Quinn | 3b781fc7b3af697708fbec312a673a005261776c | [
"MIT"
]
| 1 | 2021-06-01T23:36:28.000Z | 2021-06-01T23:36:28.000Z | COLAB_version_of_plt_refactor.ipynb | pokepokepokedex/pokedex-ds-quinn | 3b781fc7b3af697708fbec312a673a005261776c | [
"MIT"
]
| null | null | null | 248.688088 | 45,132 | 0.895691 | [
[
[
"import sqlite3 as sl\nimport pandas as pd # type: ignore\n\n\nCOLORS_by_TYPE = {\n 'fire': 'red',\n 'water': '#09E1FF',\n 'normal': '#1DFDA8',\n 'poison': '#B918FF',\n 'electric': 'yellow',\n 'ground': '#FF9C15',\n 'fairy': '#FF69B4',\n 'grass': '#34FF5C',\n 'bug': '#90EE38',\n 'psychic': '#B71ECF',\n 'rock': '#DCB883',\n 'fighting': '#FF3A17',\n 'ghost': '#6817ff',\n 'ice': '#52fffa',\n 'dragon': '#a533ff',\n 'dark': '#3D009C',\n 'flying': '#4da1ff',\n 'steel': '#bfbfbf'}\n\n\ndef clean_lite_6(datf: pd.DataFrame) -> pd.DataFrame:\n return (datf.fillna('')\n .assign(Legendary=[1 if x else 0 for x in datf.Legendary],\n Sp_Attack=datf['Sp. Atk'],\n Sp_Defense=datf['Sp. Def'],\n Type1=datf['Type 1'],\n Type2=datf['Type 2'])\n .drop(['Sp. Atk', 'Sp. Def', 'Type 1', 'Type 2'], axis=1)\n .rename(lambda s: s.lower() + '_g6', axis='columns')\n )\n\n\ndef clean_7(datf: pd.DataFrame) -> pd.DataFrame:\n '''we need to renamed `against_fight` to `against_fighting`'''\n return datf\n\n\ndf6 = pd.read_csv('https://raw.githubusercontent.com/pokepokepokedex/pokedex-ds-quinn/master/Pokemon.csv').pipe(clean_lite_6)\n\ndf7 = pd.read_csv('https://raw.githubusercontent.com/pokepokepokedex/pokedex-ds-quinn/master/pokemon_w7.csv').pipe(clean_7)\n\ndf = df7.merge(df6, how='outer', left_on='name', right_on='name' + '_g6')\n\n\n\n\n\n\nimport pandas as pd # type: ignore\nimport numpy as np # type: ignore\nfrom scipy.stats import norm # type: ignore\nimport altair as alt # type : ignore\nfrom typing import Optional\nfrom functools import reduce\nfrom itertools import chain\nVcat = lambda R,S: R & S\nOcat = lambda C,D: C + D\n\n#from models import df, COLORS_by_TYPE\n\n\ntypes = set(chain.from_iterable(df[['type1', 'type2']].values)) - {np.nan}\n\nordering = pd.DataFrame(np.ones((len(types), len(types))), columns=types, index=types)\n\n\n\nclass PokeDescribe: \n def __init__(self, datf: pd.DataFrame): \n self.TYPE_COLOR_MAPPING = COLORS_by_TYPE\n self.HEIGHT = 30\n self.WIDTH = 330\n self.xlim = (0, 180)\n self.stats = ['hp', 'attack', 'defense', \n 'sp_attack', 'sp_defense', 'speed']\n self.df = datf\n self.x = np.linspace(self.xlim[0], self.xlim[1], 1000)\n self.gaussians = {name: norm(loc=self.df[name].mean(), \n scale=self.df[name].std()) \n for name in self.stats}\n self.bells = pd.DataFrame({**{'x': self.x}, \n **{name: self.gaussians[name].pdf(self.x) \n for name in self.stats}})\n self.C = alt.Chart(self.bells, \n height=self.HEIGHT, \n width=self.WIDTH\n ).mark_line(color='white').encode(\n x=alt.X('x', title=None, axis=alt.Axis(labels=False)))\n self.charts = {name: self.C.encode(y=alt.Y(name, title=None, axis=alt.Axis(labels=False))) for name in self.stats}\n self.BellCurves = reduce(Vcat, [self.charts[name] for name in self.stats])\n\nclass PokeDescribeNAME(PokeDescribe): \n def __init__(self, datf: pd.DataFrame, Name: str): \n super().__init__(datf)\n self.PSI = 50\n self.pokename = Name\n self.typ = self.df[self.df.name==self.pokename].type1.values[0]\n self.typ_color = self.TYPE_COLOR_MAPPING[self.typ]\n self.y_max = 1.3 * max([max(ls) for ls in [self.gaussians[st].pdf(self.x) for st in self.stats]])\n self.y = pd.DataFrame({'y': np.linspace(0, self.y_max, self.PSI)})\n self.D = alt.Chart(self.y).mark_line(color=self.typ_color).encode(y=alt.Y('y', title=None))\n self.means = {st: self.df[self.df.name==self.pokename][st].mean() for st in self.stats}\n self.Dcharts = {st: self.D.encode(x=alt.value(self.means[st]))\n for st in self.stats}\n self.SHOW = reduce(Vcat, [self.charts[st] + self.Dcharts[st] \n for st in self.stats]\n ).configure_text(color='white', angle=90)\n\n\n\n# from gaussians import PokeDescribeNAME\n\n# from models import df, COLORS_by_TYPE\n\nfrom typing import List\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nraichu = PokeDescribeNAME(df, 'Charizard')",
"_____no_output_____"
],
[
"raichu.bells.head()",
"_____no_output_____"
],
[
"raichu.means",
"_____no_output_____"
],
[
"stats = ['hp', 'attack', 'defense', 'sp_attack', 'sp_defense', 'speed']\n\nfig, axes = plt.subplots(nrows=len(stats), ncols=1, sharex=True, constrained_layout=True)\n#axes.flatten()\n\n\ndef bell(sts: List[str]): \n for i in range(len(sts)): \n c = COLORS_by_TYPE[raichu.typ]\n axes[i].set_xlim(raichu.xlim)\n axes[i].set_ylim(0, raichu.y_max)\n axes[i].axvline(raichu.means[sts[i]], color=c)#, xmin=raichu.xlim[0], xmax=raichu.xlim[1])\n axes[i].plot(x=raichu.bells.x, y=raichu.bells[sts[i]], color=c)\n\n\n# for i in range(len(stats)): \n# (i)\nbell(stats)",
"_____no_output_____"
],
[
"stats = ['hp', 'attack', 'defense', 'sp_attack', 'sp_defense', 'speed']\nsts = stats\n\nc = COLORS_by_TYPE[raichu.typ]\n\nplt.figure(1)\n\nplt.subplot(611)\nplt.plot(raichu.bells.x, raichu.bells.hp, color=c)\nplt.axvline(raichu.means[sts[0]], color=c)\n\nplt.subplot(612)\nplt.plot(raichu.bells.x, raichu.bells.attack, color=c)\nplt.axvline(raichu.means[sts[1]], color=c)\n\nplt.subplot(613)\nplt.plot(raichu.bells.x, raichu.bells.defense, color=c)\nplt.axvline(raichu.means[sts[2]], color=c)\n\nplt.subplot(614)\nplt.plot(raichu.bells.x, raichu.bells.sp_attack, color=c)\nplt.axvline(raichu.means[sts[3]], color=c)\n\nplt.subplot(615)\nplt.plot(raichu.bells.x, raichu.bells.sp_defense, color=c)\nplt.axvline(raichu.means[sts[4]], color=c)\n\nplt.subplot(616)\nplt.plot(raichu.bells.x, raichu.bells.speed, color=c)\nplt.axvline(raichu.means[sts[5]], color=c)\n\n\nplt.show()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(nrows=6, ncols=1, constrained_layout=True, sharex=True)\n\n# for i in range(6): \n# ax[i].set_xlim(raichu.xlim)\n\nax[0].plot(x=raichu.bells.x, y=raichu.bells.hp, color='black')\nax[1].plot(x=raichu.bells.x, y=raichu.bells.attack, color='black')\nax[2].plot(x=raichu.bells.x, y=raichu.bells.defense, color='black')\nax[3].plot(x=raichu.bells.x, y=raichu.bells.sp_attack, color='black')\nax[4].plot(x=raichu.bells.x, y=raichu.bells.sp_defense, color='black')\nax[5].plot(x=raichu.bells.x, y=raichu.bells.speed, color='black')\n\n\nplt.show()",
"_____no_output_____"
],
[
"plt.plot(x=raichu.bells.x, y=raichu.bells.defense, color='black'); ",
"_____no_output_____"
],
[
"import seaborn as sns\n\nsns.line(x=raichu.bells.x, y=raichu.bells.defense, color=COLORS_by_TYPE[raichu.typ])\nsns.line(x=raichu.bells.x, y=raichu.bells.sp_attack, color=COLORS_by_TYPE[raichu.typ])",
"_____no_output_____"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef f(t):\n return np.exp(-t) * np.cos(2*np.pi*t)\n\nt1 = np.arange(0.0, 5.0, 0.1)\nt2 = np.arange(0.0, 5.0, 0.02)\n\nplt.figure(1)\nplt.subplot(211)\nplt.plot(t1, f(t1), 'bo', t2, f(t2), 'k')\n\nplt.subplot(212)\nplt.plot(t2, np.cos(2*np.pi*t2), 'r--')\nplt.show()",
"_____no_output_____"
],
[
"plt.figure(1)\nplt.subplot(611)\nplt.plot(raichu.bells.x, raichu.bells.hp)\nplt.subplot(612)\nplt.plot(raichu.bells.x, raichu.bells.attack)\n\nplt.show()",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb74f47ffda66b2158239550390feb1212881d2b | 285,507 | ipynb | Jupyter Notebook | xlmroberta-weighted-layer-pooling-training.ipynb | Leo1998-Lu/chaii-Hindi-and-Tamil-Question-Answering | cb0bdf1c18d142dd361676f2fbc2863f45ce8150 | [
"MIT"
]
| null | null | null | xlmroberta-weighted-layer-pooling-training.ipynb | Leo1998-Lu/chaii-Hindi-and-Tamil-Question-Answering | cb0bdf1c18d142dd361676f2fbc2863f45ce8150 | [
"MIT"
]
| null | null | null | xlmroberta-weighted-layer-pooling-training.ipynb | Leo1998-Lu/chaii-Hindi-and-Tamil-Question-Answering | cb0bdf1c18d142dd361676f2fbc2863f45ce8150 | [
"MIT"
]
| null | null | null | 52.959933 | 218 | 0.535423 | [
[
[
"#### Import Dependencies",
"_____no_output_____"
]
],
[
[
"import os\nimport gc\ngc.enable()\nimport math\nimport json\nimport time\nimport random\nimport multiprocessing\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm, trange\nfrom sklearn import model_selection\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Parameter\nimport torch.optim as optim\nfrom torch.utils.data import (\n Dataset, DataLoader,\n SequentialSampler, RandomSampler\n)\nfrom torch.utils.data.distributed import DistributedSampler\n\ntry:\n from apex import amp\n APEX_INSTALLED = True\nexcept ImportError:\n APEX_INSTALLED = False\n\nimport transformers\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n AutoConfig,\n AutoModel,\n AutoTokenizer,\n get_cosine_schedule_with_warmup,\n get_linear_schedule_with_warmup,\n logging,\n MODEL_FOR_QUESTION_ANSWERING_MAPPING,\n)\nlogging.set_verbosity_warning()\nlogging.set_verbosity_error()\n\ndef fix_all_seeds(seed):\n np.random.seed(seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\ndef optimal_num_of_loader_workers():\n num_cpus = multiprocessing.cpu_count()\n num_gpus = torch.cuda.device_count()\n optimal_value = min(num_cpus, num_gpus*4) if num_gpus else num_cpus - 1\n return optimal_value\n\nMODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)",
"_____no_output_____"
],
[
"tamil_xquad_tr = pd.read_csv('../input/google-translated-squad20-to-hindi-and-tamil/squad_ta.csv')\nhindi_xquad_tr = pd.read_csv('../input/google-translated-squad20-to-hindi-and-tamil/squad_hi.csv')\nhindi_xquad_tr.head()",
"_____no_output_____"
],
[
"import ast\nhindi_xquad_tr['answers'] = hindi_xquad_tr['answers'].apply(ast.literal_eval)\ntamil_xquad_tr['answers'] = tamil_xquad_tr['answers'].apply(ast.literal_eval)\n\ndef get_text(d):\n return d[0]['text']\ndef get_start(d):\n return d[0]['answer_start']\n\nhindi_xquad_tr['answer_text'] = hindi_xquad_tr['answers'].apply(get_text)\nhindi_xquad_tr['answer_start'] = hindi_xquad_tr['answers'].apply(get_start)\ntamil_xquad_tr['answer_text'] = tamil_xquad_tr['answers'].apply(get_text)\ntamil_xquad_tr['answer_start'] = tamil_xquad_tr['answers'].apply(get_start)\n\nhindi_xquad_tr['language'] = 'hindi'\ntamil_xquad_tr['language'] = 'tamil'\n\nhindi_xquad_tr.drop(['id','answers','c_id','is_in'], axis=1, inplace=True)\ntamil_xquad_tr.drop(['id','answers','c_id','is_in'], axis=1, inplace=True)\n\nhindi_xquad_tr = hindi_xquad_tr[hindi_xquad_tr['answer_start']!=-1]\ntamil_xquad_tr = tamil_xquad_tr[tamil_xquad_tr['answer_start']!=-1]\n\nhindi_xquad_tr = hindi_xquad_tr.sample(frac=0.03)\ntamil_xquad_tr = tamil_xquad_tr.sample(frac=0.05)\n\ntamil_xquad_tr.shape,hindi_xquad_tr.shape",
"_____no_output_____"
],
[
"tamil_xquad_tr",
"_____no_output_____"
],
[
"XQA_tamil_dev = pd.read_csv('../input/preprocessed-xqa-tamil/XQA_tamil_dev.csv')\nXQA_tamil_test = pd.read_csv('../input/preprocessed-xqa-tamil/XQA_tamil_test.csv')\nXQA_tamil_dev = XQA_tamil_dev[XQA_tamil_dev['answer_start']!=-1]\nXQA_tamil_test = XQA_tamil_test[XQA_tamil_test['answer_start']!=-1]\nXQA_tamil_dev = XQA_tamil_dev.sample(frac=0.5)\nXQA_tamil_test = XQA_tamil_test.sample(frac=0.5)\nXQA_tamil_dev.head()",
"_____no_output_____"
],
[
"XQA_tamil_test.shape,XQA_tamil_dev.shape",
"_____no_output_____"
]
],
[
[
"#### Training Configuration",
"_____no_output_____"
]
],
[
[
"class Config:\n # model\n model_type = 'xlm_roberta'\n model_name_or_path = '../input/xlm-roberta-squad2/deepset/xlm-roberta-large-squad2'\n config_name = '../input/xlm-roberta-squad2/deepset/xlm-roberta-large-squad2'\n fp16 = True if APEX_INSTALLED else False\n fp16_opt_level = \"O1\"\n gradient_accumulation_steps = 2\n\n # tokenizer\n tokenizer_name = '../input/xlm-roberta-squad2/deepset/xlm-roberta-large-squad2'\n max_seq_length = 400\n doc_stride = 135\n\n # train\n epochs = 1\n train_batch_size = 4\n eval_batch_size = 8\n\n # optimizer\n optimizer_type = 'AdamW'\n learning_rate = 1e-5\n weight_decay = 1e-2\n epsilon = 1e-8\n max_grad_norm = 1.0\n \n # scheduler\n decay_name = 'cosine-warmup'\n warmup_ratio = 0.1\n\n # logging\n logging_steps = 10\n\n # evaluate\n output_dir = 'output'\n seed = 43",
"_____no_output_____"
]
],
[
[
"#### Data Factory",
"_____no_output_____"
]
],
[
[
"train = pd.read_csv('../input/chaii-hindi-and-tamil-question-answering/train.csv')\ntest = pd.read_csv('../input/chaii-hindi-and-tamil-question-answering/test.csv')\nexternal_mlqa = pd.read_csv('../input/mlqa-hindi-processed/mlqa_hindi.csv')\nexternal_xquad = pd.read_csv('../input/mlqa-hindi-processed/xquad.csv')\nexternal_train = pd.concat([external_mlqa, external_xquad,XQA_tamil_dev,XQA_tamil_test,hindi_xquad_tr, tamil_xquad_tr])#\n\ndef create_folds(data, num_splits):\n data[\"kfold\"] = -1\n kf = model_selection.StratifiedKFold(n_splits=num_splits, shuffle=True, random_state=43)\n for f, (t_, v_) in enumerate(kf.split(X=data, y=data['language'])):\n data.loc[v_, 'kfold'] = f\n return data\n\ntrain = create_folds(train, num_splits=5)\nexternal_train[\"kfold\"] = -1\nexternal_train['id'] = list(np.arange(1, len(external_train)+1))\ntrain = pd.concat([train, external_train]).reset_index(drop=True)\n\ndef convert_answers(row):\n return {'answer_start': [row[0]], 'text': [row[1]]}\n\ntrain['answers'] = train[['answer_start', 'answer_text']].apply(convert_answers, axis=1)",
"_____no_output_____"
],
[
"len(train)",
"_____no_output_____"
],
[
"train = train.drop_duplicates(subset=['context','question','answer_text','answer_start','language'])\nlen(train)",
"_____no_output_____"
]
],
[
[
"#### Convert Examples to Features (Preprocess)",
"_____no_output_____"
]
],
[
[
"def prepare_train_features(args, example, tokenizer):\n example[\"question\"] = example[\"question\"].lstrip()\n tokenized_example = tokenizer(\n example[\"question\"],\n example[\"context\"],\n truncation=\"only_second\",\n max_length=args.max_seq_length,\n stride=args.doc_stride,\n return_overflowing_tokens=True,\n return_offsets_mapping=True,\n padding=\"max_length\",\n )\n\n sample_mapping = tokenized_example.pop(\"overflow_to_sample_mapping\")\n offset_mapping = tokenized_example.pop(\"offset_mapping\")\n\n features = []\n for i, offsets in enumerate(offset_mapping):\n feature = {}\n\n input_ids = tokenized_example[\"input_ids\"][i]\n attention_mask = tokenized_example[\"attention_mask\"][i]\n\n feature['input_ids'] = input_ids\n feature['attention_mask'] = attention_mask\n feature['offset_mapping'] = offsets\n\n cls_index = input_ids.index(tokenizer.cls_token_id)\n sequence_ids = tokenized_example.sequence_ids(i)\n \n ## for validation\n feature[\"example_id\"] = example['id'] \n feature['sequence_ids'] = [0 if i is None else i for i in tokenized_example.sequence_ids(i)] \n feature['context'] = example[\"context\"]\n feature['question'] = example[\"question\"]\n feature['hindi_tamil'] = 0 if example[\"language\"]=='hindi' else 1 \n ## \n\n sample_index = sample_mapping[i]\n answers = example[\"answers\"]\n\n if len(answers[\"answer_start\"]) == 0:\n feature[\"start_position\"] = cls_index\n feature[\"end_position\"] = cls_index\n else:\n start_char = answers[\"answer_start\"][0]\n end_char = start_char + len(answers[\"text\"][0])\n\n token_start_index = 0\n while sequence_ids[token_start_index] != 1:\n token_start_index += 1\n\n token_end_index = len(input_ids) - 1\n while sequence_ids[token_end_index] != 1:\n token_end_index -= 1\n\n if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):\n feature[\"start_position\"] = cls_index\n feature[\"end_position\"] = cls_index\n else:\n while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:\n token_start_index += 1\n feature[\"start_position\"] = token_start_index - 1\n while offsets[token_end_index][1] >= end_char:\n token_end_index -= 1\n feature[\"end_position\"] = token_end_index + 1\n\n features.append(feature)\n return features",
"_____no_output_____"
]
],
[
[
"#### Dataset Retriever",
"_____no_output_____"
]
],
[
[
"class DatasetRetriever(Dataset):\n def __init__(self, features, mode='train'):\n super(DatasetRetriever, self).__init__()\n self.features = features\n self.mode = mode\n \n def __len__(self):\n return len(self.features)\n \n def __getitem__(self, item): \n feature = self.features[item]\n if self.mode == 'train':\n return {\n 'input_ids':torch.tensor(feature['input_ids'], dtype=torch.long),\n 'attention_mask':torch.tensor(feature['attention_mask'], dtype=torch.long),\n 'offset_mapping':torch.tensor(feature['offset_mapping'], dtype=torch.long),\n 'start_position':torch.tensor(feature['start_position'], dtype=torch.long),\n 'end_position':torch.tensor(feature['end_position'], dtype=torch.long)\n }\n else:\n if self.mode == 'valid': \n return {\n 'input_ids':torch.tensor(feature['input_ids'], dtype=torch.long),\n 'attention_mask':torch.tensor(feature['attention_mask'], dtype=torch.long),\n 'offset_mapping':torch.tensor(feature['offset_mapping'], dtype=torch.long),\n 'sequence_ids':feature['sequence_ids'],\n 'start_position':torch.tensor(feature['start_position'], dtype=torch.long),\n 'end_position':torch.tensor(feature['end_position'], dtype=torch.long),\n 'example_id':feature['example_id'],\n 'context': feature['context'],\n }\n else:\n return {\n 'input_ids':torch.tensor(feature['input_ids'], dtype=torch.long),\n 'attention_mask':torch.tensor(feature['attention_mask'], dtype=torch.long),\n 'offset_mapping':feature['offset_mapping'],\n 'sequence_ids':feature['sequence_ids'],\n 'id':feature['example_id'],\n 'context': feature['context'],\n 'question': feature['question']\n }",
"_____no_output_____"
]
],
[
[
"#### Model",
"_____no_output_____"
]
],
[
[
"class WeightedLayerPooling(nn.Module):\n def __init__(self, num_hidden_layers, layer_start: int = 4, layer_weights=None):\n super(WeightedLayerPooling, self).__init__()\n self.layer_start = layer_start\n self.num_hidden_layers = num_hidden_layers\n self.layer_weights = layer_weights if layer_weights is not None \\\n else nn.Parameter(\n torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float)\n )\n\n def forward(self, all_hidden_states):\n all_layer_embedding = all_hidden_states[self.layer_start:, :, :, :]\n weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())\n weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()\n return weighted_average\n\n\nclass Model(nn.Module):\n def __init__(self, modelname_or_path, config, layer_start, layer_weights=None):\n super(Model, self).__init__()\n self.config = config\n config.update({\n \"hidden_dropout_prob\": 0.0,\n \"layer_norm_eps\": 1e-7,\n \"output_hidden_states\": True\n })\n self.xlm_roberta = AutoModel.from_pretrained(modelname_or_path, config=config)\n self.layer_start = layer_start\n self.pooling = WeightedLayerPooling(config.num_hidden_layers,\n layer_start=layer_start,\n layer_weights=None)\n self.layer_norm = nn.LayerNorm(config.hidden_size)\n self.dropout = torch.nn.Dropout(0.3)\n self.qa_output = torch.nn.Linear(config.hidden_size, 2)\n torch.nn.init.normal_(self.qa_output.weight, std=0.02)\n\n def forward(self, input_ids, attention_mask=None):\n outputs = self.xlm_roberta(input_ids, attention_mask=attention_mask)\n all_hidden_states = torch.stack(outputs.hidden_states)\n weighted_pooling_embeddings = self.layer_norm(self.pooling(all_hidden_states))\n #weighted_pooling_embeddings = weighted_pooling_embeddings[:, 0]\n\n norm_embeddings = self.dropout(weighted_pooling_embeddings)\n logits = self.qa_output(norm_embeddings)\n start_logits, end_logits = logits.split(1, dim=-1)\n\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n return start_logits, end_logits",
"_____no_output_____"
]
],
[
[
"#### Loss",
"_____no_output_____"
]
],
[
[
"def loss_fn(preds, labels):\n start_preds, end_preds = preds\n start_labels, end_labels = labels\n start_loss = nn.CrossEntropyLoss(ignore_index=-1)(start_preds, start_labels)\n end_loss = nn.CrossEntropyLoss(ignore_index=-1)(end_preds, end_labels)\n total_loss = (start_loss + end_loss) / 2\n return total_loss",
"_____no_output_____"
]
],
[
[
"#### Grouped Layerwise Learning Rate Decay",
"_____no_output_____"
]
],
[
[
"def get_optimizer_grouped_parameters(args, model):\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n group1=['layer.0.','layer.1.','layer.2.','layer.3.']\n group2=['layer.4.','layer.5.','layer.6.','layer.7.'] \n group3=['layer.8.','layer.9.','layer.10.','layer.11.']\n group_all=['layer.0.','layer.1.','layer.2.','layer.3.','layer.4.','layer.5.','layer.6.','layer.7.','layer.8.','layer.9.','layer.10.','layer.11.']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.xlm_roberta.named_parameters() if not any(nd in n for nd in no_decay) and not any(nd in n for nd in group_all)],'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.xlm_roberta.named_parameters() if not any(nd in n for nd in no_decay) and any(nd in n for nd in group1)],'weight_decay': args.weight_decay, 'lr': args.learning_rate/10},\n {'params': [p for n, p in model.xlm_roberta.named_parameters() if not any(nd in n for nd in no_decay) and any(nd in n for nd in group2)],'weight_decay': args.weight_decay, 'lr': args.learning_rate},\n {'params': [p for n, p in model.xlm_roberta.named_parameters() if not any(nd in n for nd in no_decay) and any(nd in n for nd in group3)],'weight_decay': args.weight_decay, 'lr': args.learning_rate*10},\n {'params': [p for n, p in model.xlm_roberta.named_parameters() if any(nd in n for nd in no_decay) and not any(nd in n for nd in group_all)],'weight_decay': 0.0},\n {'params': [p for n, p in model.xlm_roberta.named_parameters() if any(nd in n for nd in no_decay) and any(nd in n for nd in group1)],'weight_decay': 0.0, 'lr': args.learning_rate/10},\n {'params': [p for n, p in model.xlm_roberta.named_parameters() if any(nd in n for nd in no_decay) and any(nd in n for nd in group2)],'weight_decay': 0.0, 'lr': args.learning_rate},\n {'params': [p for n, p in model.xlm_roberta.named_parameters() if any(nd in n for nd in no_decay) and any(nd in n for nd in group3)],'weight_decay': 0.0, 'lr': args.learning_rate*10},\n {'params': [p for n, p in model.named_parameters() if args.model_type not in n], 'lr':args.learning_rate*40, \"weight_decay\": 0.0},\n ]\n return optimizer_grouped_parameters",
"_____no_output_____"
]
],
[
[
"#### Metric Logger",
"_____no_output_____"
]
],
[
[
"class AverageMeter(object):\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = 0\n self.min = 1e5\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n if val > self.max:\n self.max = val\n if val < self.min:\n self.min = val",
"_____no_output_____"
]
],
[
[
"#### Utilities",
"_____no_output_____"
]
],
[
[
"def make_model(args):\n config = AutoConfig.from_pretrained(args.config_name)\n tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name)\n model = Model(args.model_name_or_path, config=config,layer_start=12,layer_weights=None)\n #model = Model(args.model_name_or_path, config=config)\n return config, tokenizer, model\n\ndef make_optimizer(args, model):\n named_parameters = list(model.named_parameters()) \n\n roberta_parameters = named_parameters[:389] \n pooler_parameters = named_parameters[389:391] \n qa_parameters = named_parameters[391:]\n \n parameters = []\n \n # increase lr every k layer\n increase_lr_every_k_layer = 1\n lrs = np.linspace(1, 5, 24 // increase_lr_every_k_layer)\n for layer_num, (name, params) in enumerate(roberta_parameters):\n weight_decay = 0.0 if \"bias\" in name else 0.01\n splitted_name = name.split('.')\n lr = args.learning_rate #Config.lr\n if len(splitted_name) >= 4 and str.isdigit(splitted_name[3]):\n layer_num = int(splitted_name[3])\n lr = lrs[layer_num // increase_lr_every_k_layer] * lr\n\n parameters.append({\"params\": params,\n \"weight_decay\": weight_decay,\n \"lr\": lr})\n \n default_lr = 1e-3 #default LR for AdamW\n for layer_num, (name,params) in enumerate(qa_parameters):\n weight_decay = 0.0 if \"bias\" in name else 0.01\n parameters.append({\"params\": params,\n \"weight_decay\": weight_decay,\n \"lr\": default_lr})\n \n for layer_num, (name,params) in enumerate(pooler_parameters):\n weight_decay = 0.0 if \"bias\" in name else 0.01\n parameters.append({\"params\": params,\n \"weight_decay\": weight_decay,\n \"lr\": default_lr})\n\n return AdamW(parameters)\n\n\ndef make_scheduler(\n args, optimizer, \n num_warmup_steps, \n num_training_steps\n):\n if args.decay_name == \"cosine-warmup\":\n scheduler = get_cosine_schedule_with_warmup(\n optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_training_steps\n )\n else:\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_training_steps\n )\n return scheduler \n\n\ndef make_loader(\n args, data, \n tokenizer, fold\n):\n train_set, valid_set = data[data['kfold']!=fold], data[data['kfold']==fold].reset_index(drop=True)\n \n train_features, valid_features = [[] for _ in range(2)]\n for i, row in train_set.iterrows():\n train_features += prepare_train_features(args, row, tokenizer)\n for i, row in valid_set.iterrows():\n valid_features += prepare_train_features(args, row, tokenizer)\n\n ## Weighted sampler\n hindi_tamil_count = [] \n for i, f in enumerate(train_features):\n hindi_tamil_count.append(train_features[i]['hindi_tamil']) \n class_sample_count = pd.Series(hindi_tamil_count).value_counts().values\n weight = 1. / class_sample_count\n samples_weight = np.array([weight[t] for t in hindi_tamil_count]) \n samples_weight = torch.from_numpy(samples_weight)\n wsampler = torch.utils.data.sampler.WeightedRandomSampler(samples_weight.type('torch.DoubleTensor'), len(samples_weight))\n\n train_dataset = DatasetRetriever(train_features, mode=\"train\")\n valid_dataset = DatasetRetriever(valid_features, mode=\"valid\")\n print(f\"Num examples Train= {len(train_dataset)}, Num examples Valid={len(valid_dataset)}\")\n \n train_sampler = RandomSampler(train_dataset)\n valid_sampler = SequentialSampler(valid_dataset)\n\n train_dataloader = DataLoader(\n train_dataset,\n batch_size=args.train_batch_size,\n sampler=train_sampler, #wsampler\n num_workers=optimal_num_of_loader_workers(),\n pin_memory=True,\n drop_last=False \n )\n\n valid_dataloader = DataLoader(\n valid_dataset,\n batch_size=args.eval_batch_size, \n sampler=valid_sampler,\n num_workers=optimal_num_of_loader_workers(),\n pin_memory=True, \n drop_last=False\n )\n\n return train_dataloader, valid_dataloader, valid_features, valid_set",
"_____no_output_____"
]
],
[
[
"#### Trainer",
"_____no_output_____"
]
],
[
[
"class Trainer:\n def __init__(\n self, model, tokenizer, \n optimizer, scheduler\n ):\n self.model = model\n self.tokenizer = tokenizer\n self.optimizer = optimizer\n self.scheduler = scheduler\n\n def train(\n self, args, \n train_dataloader, \n epoch, result_dict\n ):\n count = 0\n losses = AverageMeter()\n \n self.model.zero_grad()\n self.model.train()\n \n fix_all_seeds(args.seed)\n \n for batch_idx, batch_data in enumerate(train_dataloader):\n input_ids, attention_mask, targets_start, targets_end = \\\n batch_data['input_ids'], batch_data['attention_mask'], \\\n batch_data['start_position'], batch_data['end_position']\n \n input_ids, attention_mask, targets_start, targets_end = \\\n input_ids.cuda(), attention_mask.cuda(), targets_start.cuda(), targets_end.cuda()\n\n outputs_start, outputs_end = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n )\n \n loss = loss_fn((outputs_start, outputs_end), (targets_start, targets_end))\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n count += input_ids.size(0)\n losses.update(loss.item(), input_ids.size(0))\n\n # if args.fp16:\n # torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), args.max_grad_norm)\n # else:\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), args.max_grad_norm)\n\n if batch_idx % args.gradient_accumulation_steps == 0 or batch_idx == len(train_dataloader) - 1:\n self.optimizer.step()\n self.scheduler.step()\n self.optimizer.zero_grad()\n\n if (batch_idx % args.logging_steps == 0) or (batch_idx+1)==len(train_dataloader):\n _s = str(len(str(len(train_dataloader.sampler))))\n ret = [\n ('Epoch: {:0>2} [{: >' + _s + '}/{} ({: >3.0f}%)]').format(epoch, count, len(train_dataloader.sampler), 100 * count / len(train_dataloader.sampler)),\n 'Train Loss: {: >4.5f}'.format(losses.avg),\n ]\n print(', '.join(ret))\n\n result_dict['train_loss'].append(losses.avg)\n return result_dict",
"_____no_output_____"
]
],
[
[
"#### Evaluator",
"_____no_output_____"
]
],
[
[
"class Evaluator:\n def __init__(self, model):\n self.model = model\n \n def save(self, result, output_dir):\n with open(f'{output_dir}/result_dict.json', 'w') as f:\n f.write(json.dumps(result, sort_keys=True, indent=4, ensure_ascii=False))\n\n def evaluate(self, valid_dataloader, epoch, result_dict):\n losses = AverageMeter()\n all_outputs_start, all_outputs_end = [], []\n for batch_idx, batch_data in enumerate(valid_dataloader):\n self.model = self.model.eval()\n input_ids, attention_mask, targets_start, targets_end = \\\n batch_data['input_ids'], batch_data['attention_mask'], \\\n batch_data['start_position'], batch_data['end_position']\n \n input_ids, attention_mask, targets_start, targets_end = \\\n input_ids.cuda(), attention_mask.cuda(), targets_start.cuda(), targets_end.cuda()\n \n with torch.no_grad(): \n outputs_start, outputs_end = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n )\n all_outputs_start.append(outputs_start.cpu().numpy().tolist())\n all_outputs_end.append(outputs_end.cpu().numpy().tolist())\n \n loss = loss_fn((outputs_start, outputs_end), (targets_start, targets_end))\n losses.update(loss.item(), input_ids.size(0))\n \n all_outputs_start = np.vstack(all_outputs_start)\n all_outputs_end = np.vstack(all_outputs_end)\n\n print('----Validation Results Summary----')\n print('Epoch: [{}] Valid Loss: {: >4.5f}'.format(epoch, losses.avg))\n result_dict['val_loss'].append(losses.avg) \n return result_dict, all_outputs_start, all_outputs_end\n \n ",
"_____no_output_____"
]
],
[
[
"#### Initialize Training",
"_____no_output_____"
]
],
[
[
"def init_training(args, data, fold):\n fix_all_seeds(args.seed)\n \n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n \n # model\n model_config, tokenizer, model = make_model(args)\n if torch.cuda.device_count() >= 1:\n print('Model pushed to {} GPU(s), type {}.'.format(\n torch.cuda.device_count(), \n torch.cuda.get_device_name(0))\n )\n model = model.cuda() \n else:\n raise ValueError('CPU training is not supported')\n \n # data loaders\n train_dataloader, valid_dataloader, valid_features, valid_set = make_loader(args, data, tokenizer, fold)\n\n # optimizer\n optimizer = make_optimizer(args, model)\n\n # scheduler\n num_training_steps = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) * args.epochs\n if args.warmup_ratio > 0:\n num_warmup_steps = int(args.warmup_ratio * num_training_steps)\n else:\n num_warmup_steps = 0\n print(f\"Total Training Steps: {num_training_steps}, Total Warmup Steps: {num_warmup_steps}\")\n scheduler = make_scheduler(args, optimizer, num_warmup_steps, num_training_steps)\n\n # mixed precision training with NVIDIA Apex\n if args.fp16:\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n \n result_dict = {\n 'epoch':[], \n 'train_loss': [], \n 'val_loss' : [], \n 'best_val_loss': np.inf\n }\n\n return (\n model, model_config, tokenizer, optimizer, scheduler, \n train_dataloader, valid_dataloader, result_dict, valid_features, valid_set\n )",
"_____no_output_____"
]
],
[
[
"#### Validation Jaccard",
"_____no_output_____"
]
],
[
[
"# Ref: https://www.kaggle.com/rhtsingh/chaii-qa-5-fold-xlmroberta-torch-infer\nimport collections\n\ndef postprocess_qa_predictions(examples, features1, raw_predictions, tokenizer, n_best_size = 20, max_answer_length = 30):\n features = features1\n all_start_logits, all_end_logits = raw_predictions\n \n example_id_to_index = {k: i for i, k in enumerate(examples[\"id\"])}\n features_per_example = collections.defaultdict(list)\n for i, feature in enumerate(features):\n features_per_example[example_id_to_index[feature[\"example_id\"]]].append(i)\n\n predictions = collections.OrderedDict()\n\n print(f\"Post-processing {len(examples)} example predictions split into {len(features)} features.\")\n\n for example_index, example in examples.iterrows():\n feature_indices = features_per_example[example_index]\n #print(example['id'],example_index,feature_indices)\n min_null_score = None\n valid_answers = []\n \n context = example[\"context\"]\n for feature_index in feature_indices:\n start_logits = all_start_logits[feature_index]\n end_logits = all_end_logits[feature_index]\n\n sequence_ids = features[feature_index][\"sequence_ids\"]\n context_index = 1\n\n offset_mapping = [\n (o if sequence_ids[k] == context_index else None)\n for k, o in enumerate(features[feature_index][\"offset_mapping\"])\n ] \n \n cls_index = features[feature_index][\"input_ids\"].index(tokenizer.cls_token_id)\n feature_null_score = start_logits[cls_index] + end_logits[cls_index]\n if min_null_score is None or min_null_score < feature_null_score:\n min_null_score = feature_null_score\n\n start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()\n end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()\n for start_index in start_indexes:\n for end_index in end_indexes:\n if (\n start_index >= len(offset_mapping)\n or end_index >= len(offset_mapping)\n or offset_mapping[start_index] is None\n or offset_mapping[end_index] is None\n ):\n continue\n # Don't consider answers with a length that is either < 0 or > max_answer_length.\n if end_index < start_index or end_index - start_index + 1 > max_answer_length:\n continue\n\n start_char = offset_mapping[start_index][0]\n end_char = offset_mapping[end_index][1]\n valid_answers.append(\n {\n \"score\": start_logits[start_index] + end_logits[end_index],\n \"text\": context[start_char: end_char]\n }\n )\n \n if len(valid_answers) > 0:\n best_answer = sorted(valid_answers, key=lambda x: x[\"score\"], reverse=True)[0]\n else:\n best_answer = {\"text\": \"\", \"score\": 0.0}\n \n predictions[example[\"id\"]] = best_answer[\"text\"]\n \n \n return predictions",
"_____no_output_____"
],
[
"def jaccard(str1, str2): \n a = set(str1.lower().split()) \n b = set(str2.lower().split())\n c = a.intersection(b)\n return float(len(c)) / (len(a) + len(b) - len(c))",
"_____no_output_____"
]
],
[
[
"#### Run",
"_____no_output_____"
]
],
[
[
"all_jacard_scores = []\n\ndef run(data, fold):\n args = Config()\n model, model_config, tokenizer, optimizer, scheduler, train_dataloader, \\\n valid_dataloader, result_dict, valid_features, valid_set = init_training(args, data, fold)\n \n trainer = Trainer(model, tokenizer, optimizer, scheduler)\n evaluator = Evaluator(model)\n\n train_time_list = []\n valid_time_list = []\n\n for epoch in range(args.epochs):\n result_dict['epoch'].append(epoch)\n\n # Train\n torch.cuda.synchronize()\n tic1 = time.time()\n result_dict = trainer.train(\n args, train_dataloader, \n epoch, result_dict\n )\n torch.cuda.synchronize()\n tic2 = time.time() \n train_time_list.append(tic2 - tic1)\n # Evaluate\n torch.cuda.synchronize()\n tic3 = time.time()\n result_dict, all_outputs_start, all_outputs_end = evaluator.evaluate(\n valid_dataloader, epoch, result_dict\n )\n torch.cuda.synchronize()\n# # Get valid jaccard score\n valid_features1 = valid_features.copy()\n valid_preds = postprocess_qa_predictions(valid_set, valid_features1, (all_outputs_start, all_outputs_end), tokenizer)\n valid_set['PredictionString'] = valid_set['id'].map(valid_preds)\n valid_set['jaccard'] = valid_set[['answer_text','PredictionString']].apply(lambda x: jaccard(x[0],x[1]), axis=1)\n print(\"valid jaccard: \",np.mean(valid_set.jaccard))\n all_jacard_scores.append(np.mean(valid_set.jaccard))\n \n tic4 = time.time() \n valid_time_list.append(tic4 - tic3)\n \n output_dir = os.path.join(args.output_dir, f\"checkpoint-fold-{fold}-epoch-{epoch}\")\n os.makedirs(output_dir, exist_ok=True)\n if result_dict['val_loss'][-1] < result_dict['best_val_loss']:\n print(\"{} Epoch, Best epoch was updated! Valid Loss: {: >4.5f}\".format(epoch, result_dict['val_loss'][-1]))\n result_dict[\"best_val_loss\"] = result_dict['val_loss'][-1] \n \n# os.makedirs(output_dir, exist_ok=True)\n torch.save(model.state_dict(), f\"{output_dir}/pytorch_model.bin\")\n model_config.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n print(f\"Saving model checkpoint to {output_dir}.\")\n \n print()\n\n evaluator.save(result_dict, output_dir)\n \n print(f\"Total Training Time: {np.sum(train_time_list)}secs, Average Training Time per Epoch: {np.mean(train_time_list)}secs.\")\n print(f\"Total Validation Time: {np.sum(valid_time_list)}secs, Average Validation Time per Epoch: {np.mean(valid_time_list)}secs.\")\n \n #del trainer, evaluator\n #del model, model_config, tokenizer\n #del optimizer, scheduler\n #del train_dataloader, valid_dataloader, result_dict",
"_____no_output_____"
],
[
"for fold in range(5):\n print();print()\n print('-'*50)\n print(f'FOLD: {fold}')\n print('-'*50)\n run(train, fold)\n\nprint(\"*\"*50)\nprint(\"Final jacard scores, 5-fold: \", np.round(all_jacard_scores,5))\nprint(\"Average jacard:\",np.mean(all_jacard_scores))\nprint(\"*\"*50)",
"\n\n--------------------------------------------------\nFOLD: 0\n--------------------------------------------------\nModel pushed to 1 GPU(s), type Tesla P100-PCIE-16GB.\nNum examples Train= 26073, Num examples Valid=2427\nTotal Training Steps: 3260, Total Warmup Steps: 326\nEpoch: 00 [ 4/26073 ( 0%)], Train Loss: 3.35908\nEpoch: 00 [ 44/26073 ( 0%)], Train Loss: 3.30513\nEpoch: 00 [ 84/26073 ( 0%)], Train Loss: 3.25556\nEpoch: 00 [ 124/26073 ( 0%)], Train Loss: 3.23333\nEpoch: 00 [ 164/26073 ( 1%)], Train Loss: 3.19503\nEpoch: 00 [ 204/26073 ( 1%)], Train Loss: 3.14787\nEpoch: 00 [ 244/26073 ( 1%)], Train Loss: 3.08589\nEpoch: 00 [ 284/26073 ( 1%)], Train Loss: 3.01475\nEpoch: 00 [ 324/26073 ( 1%)], Train Loss: 2.91228\nEpoch: 00 [ 364/26073 ( 1%)], Train Loss: 2.80831\nEpoch: 00 [ 404/26073 ( 2%)], Train Loss: 2.68667\nEpoch: 00 [ 444/26073 ( 2%)], Train Loss: 2.55810\nEpoch: 00 [ 484/26073 ( 2%)], Train Loss: 2.41252\nEpoch: 00 [ 524/26073 ( 2%)], Train Loss: 2.30834\nEpoch: 00 [ 564/26073 ( 2%)], Train Loss: 2.21533\nEpoch: 00 [ 604/26073 ( 2%)], Train Loss: 2.12583\nEpoch: 00 [ 644/26073 ( 2%)], Train Loss: 2.05236\nEpoch: 00 [ 684/26073 ( 3%)], Train Loss: 1.97124\nEpoch: 00 [ 724/26073 ( 3%)], Train Loss: 1.88809\nEpoch: 00 [ 764/26073 ( 3%)], Train Loss: 1.82719\nEpoch: 00 [ 804/26073 ( 3%)], Train Loss: 1.76538\nEpoch: 00 [ 844/26073 ( 3%)], Train Loss: 1.71356\nEpoch: 00 [ 884/26073 ( 3%)], Train Loss: 1.65420\nEpoch: 00 [ 924/26073 ( 4%)], Train Loss: 1.60511\nEpoch: 00 [ 964/26073 ( 4%)], Train Loss: 1.56635\nEpoch: 00 [ 1004/26073 ( 4%)], Train Loss: 1.52905\nEpoch: 00 [ 1044/26073 ( 4%)], Train Loss: 1.48521\nEpoch: 00 [ 1084/26073 ( 4%)], Train Loss: 1.44727\nEpoch: 00 [ 1124/26073 ( 4%)], Train Loss: 1.41592\nEpoch: 00 [ 1164/26073 ( 4%)], Train Loss: 1.37692\nEpoch: 00 [ 1204/26073 ( 5%)], Train Loss: 1.35768\nEpoch: 00 [ 1244/26073 ( 5%)], Train Loss: 1.33047\nEpoch: 00 [ 1284/26073 ( 5%)], Train Loss: 1.30552\nEpoch: 00 [ 1324/26073 ( 5%)], Train Loss: 1.27877\nEpoch: 00 [ 1364/26073 ( 5%)], Train Loss: 1.25854\nEpoch: 00 [ 1404/26073 ( 5%)], Train Loss: 1.23678\nEpoch: 00 [ 1444/26073 ( 6%)], Train Loss: 1.21446\nEpoch: 00 [ 1484/26073 ( 6%)], Train Loss: 1.19747\nEpoch: 00 [ 1524/26073 ( 6%)], Train Loss: 1.18180\nEpoch: 00 [ 1564/26073 ( 6%)], Train Loss: 1.16468\nEpoch: 00 [ 1604/26073 ( 6%)], Train Loss: 1.14619\nEpoch: 00 [ 1644/26073 ( 6%)], Train Loss: 1.12884\nEpoch: 00 [ 1684/26073 ( 6%)], Train Loss: 1.11286\nEpoch: 00 [ 1724/26073 ( 7%)], Train Loss: 1.10022\nEpoch: 00 [ 1764/26073 ( 7%)], Train Loss: 1.08863\nEpoch: 00 [ 1804/26073 ( 7%)], Train Loss: 1.07449\nEpoch: 00 [ 1844/26073 ( 7%)], Train Loss: 1.06419\nEpoch: 00 [ 1884/26073 ( 7%)], Train Loss: 1.05186\nEpoch: 00 [ 1924/26073 ( 7%)], Train Loss: 1.04309\nEpoch: 00 [ 1964/26073 ( 8%)], Train Loss: 1.03201\nEpoch: 00 [ 2004/26073 ( 8%)], Train Loss: 1.02239\nEpoch: 00 [ 2044/26073 ( 8%)], Train Loss: 1.01347\nEpoch: 00 [ 2084/26073 ( 8%)], Train Loss: 1.00284\nEpoch: 00 [ 2124/26073 ( 8%)], Train Loss: 0.99719\nEpoch: 00 [ 2164/26073 ( 8%)], Train Loss: 0.99057\nEpoch: 00 [ 2204/26073 ( 8%)], Train Loss: 0.98488\nEpoch: 00 [ 2244/26073 ( 9%)], Train Loss: 0.97834\nEpoch: 00 [ 2284/26073 ( 9%)], Train Loss: 0.97076\nEpoch: 00 [ 2324/26073 ( 9%)], Train Loss: 0.96524\nEpoch: 00 [ 2364/26073 ( 9%)], Train Loss: 0.95944\nEpoch: 00 [ 2404/26073 ( 9%)], Train Loss: 0.95129\nEpoch: 00 [ 2444/26073 ( 9%)], Train Loss: 0.94508\nEpoch: 00 [ 2484/26073 ( 10%)], Train Loss: 0.94091\nEpoch: 00 [ 2524/26073 ( 10%)], Train Loss: 0.93558\nEpoch: 00 [ 2564/26073 ( 10%)], Train Loss: 0.92908\nEpoch: 00 [ 2604/26073 ( 10%)], Train Loss: 0.92448\nEpoch: 00 [ 2644/26073 ( 10%)], Train Loss: 0.92308\nEpoch: 00 [ 2684/26073 ( 10%)], Train Loss: 0.91729\nEpoch: 00 [ 2724/26073 ( 10%)], Train Loss: 0.91265\nEpoch: 00 [ 2764/26073 ( 11%)], Train Loss: 0.91270\nEpoch: 00 [ 2804/26073 ( 11%)], Train Loss: 0.90930\nEpoch: 00 [ 2844/26073 ( 11%)], Train Loss: 0.90498\nEpoch: 00 [ 2884/26073 ( 11%)], Train Loss: 0.89898\nEpoch: 00 [ 2924/26073 ( 11%)], Train Loss: 0.89575\nEpoch: 00 [ 2964/26073 ( 11%)], Train Loss: 0.89154\nEpoch: 00 [ 3004/26073 ( 12%)], Train Loss: 0.88844\nEpoch: 00 [ 3044/26073 ( 12%)], Train Loss: 0.88333\nEpoch: 00 [ 3084/26073 ( 12%)], Train Loss: 0.87910\nEpoch: 00 [ 3124/26073 ( 12%)], Train Loss: 0.87621\nEpoch: 00 [ 3164/26073 ( 12%)], Train Loss: 0.86971\nEpoch: 00 [ 3204/26073 ( 12%)], Train Loss: 0.86509\nEpoch: 00 [ 3244/26073 ( 12%)], Train Loss: 0.86006\nEpoch: 00 [ 3284/26073 ( 13%)], Train Loss: 0.85873\nEpoch: 00 [ 3324/26073 ( 13%)], Train Loss: 0.85402\nEpoch: 00 [ 3364/26073 ( 13%)], Train Loss: 0.84998\nEpoch: 00 [ 3404/26073 ( 13%)], Train Loss: 0.84518\nEpoch: 00 [ 3444/26073 ( 13%)], Train Loss: 0.84320\nEpoch: 00 [ 3484/26073 ( 13%)], Train Loss: 0.83798\nEpoch: 00 [ 3524/26073 ( 14%)], Train Loss: 0.83295\nEpoch: 00 [ 3564/26073 ( 14%)], Train Loss: 0.82791\nEpoch: 00 [ 3604/26073 ( 14%)], Train Loss: 0.82384\nEpoch: 00 [ 3644/26073 ( 14%)], Train Loss: 0.81981\nEpoch: 00 [ 3684/26073 ( 14%)], Train Loss: 0.81673\nEpoch: 00 [ 3724/26073 ( 14%)], Train Loss: 0.81495\nEpoch: 00 [ 3764/26073 ( 14%)], Train Loss: 0.81122\nEpoch: 00 [ 3804/26073 ( 15%)], Train Loss: 0.80940\nEpoch: 00 [ 3844/26073 ( 15%)], Train Loss: 0.80718\nEpoch: 00 [ 3884/26073 ( 15%)], Train Loss: 0.80215\nEpoch: 00 [ 3924/26073 ( 15%)], Train Loss: 0.79676\nEpoch: 00 [ 3964/26073 ( 15%)], Train Loss: 0.79364\nEpoch: 00 [ 4004/26073 ( 15%)], Train Loss: 0.79129\nEpoch: 00 [ 4044/26073 ( 16%)], Train Loss: 0.78790\nEpoch: 00 [ 4084/26073 ( 16%)], Train Loss: 0.78288\nEpoch: 00 [ 4124/26073 ( 16%)], Train Loss: 0.77956\nEpoch: 00 [ 4164/26073 ( 16%)], Train Loss: 0.77798\nEpoch: 00 [ 4204/26073 ( 16%)], Train Loss: 0.77502\nEpoch: 00 [ 4244/26073 ( 16%)], Train Loss: 0.77266\nEpoch: 00 [ 4284/26073 ( 16%)], Train Loss: 0.77258\nEpoch: 00 [ 4324/26073 ( 17%)], Train Loss: 0.76987\nEpoch: 00 [ 4364/26073 ( 17%)], Train Loss: 0.76724\nEpoch: 00 [ 4404/26073 ( 17%)], Train Loss: 0.76609\nEpoch: 00 [ 4444/26073 ( 17%)], Train Loss: 0.76455\nEpoch: 00 [ 4484/26073 ( 17%)], Train Loss: 0.76145\nEpoch: 00 [ 4524/26073 ( 17%)], Train Loss: 0.75883\nEpoch: 00 [ 4564/26073 ( 18%)], Train Loss: 0.75670\nEpoch: 00 [ 4604/26073 ( 18%)], Train Loss: 0.75393\nEpoch: 00 [ 4644/26073 ( 18%)], Train Loss: 0.75108\nEpoch: 00 [ 4684/26073 ( 18%)], Train Loss: 0.74763\nEpoch: 00 [ 4724/26073 ( 18%)], Train Loss: 0.74530\nEpoch: 00 [ 4764/26073 ( 18%)], Train Loss: 0.74485\nEpoch: 00 [ 4804/26073 ( 18%)], Train Loss: 0.74505\nEpoch: 00 [ 4844/26073 ( 19%)], Train Loss: 0.74269\nEpoch: 00 [ 4884/26073 ( 19%)], Train Loss: 0.74022\nEpoch: 00 [ 4924/26073 ( 19%)], Train Loss: 0.73854\nEpoch: 00 [ 4964/26073 ( 19%)], Train Loss: 0.73632\nEpoch: 00 [ 5004/26073 ( 19%)], Train Loss: 0.73384\nEpoch: 00 [ 5044/26073 ( 19%)], Train Loss: 0.73367\nEpoch: 00 [ 5084/26073 ( 19%)], Train Loss: 0.73148\nEpoch: 00 [ 5124/26073 ( 20%)], Train Loss: 0.72871\nEpoch: 00 [ 5164/26073 ( 20%)], Train Loss: 0.72734\nEpoch: 00 [ 5204/26073 ( 20%)], Train Loss: 0.72502\nEpoch: 00 [ 5244/26073 ( 20%)], Train Loss: 0.72291\nEpoch: 00 [ 5284/26073 ( 20%)], Train Loss: 0.72201\nEpoch: 00 [ 5324/26073 ( 20%)], Train Loss: 0.72033\nEpoch: 00 [ 5364/26073 ( 21%)], Train Loss: 0.71942\nEpoch: 00 [ 5404/26073 ( 21%)], Train Loss: 0.71714\nEpoch: 00 [ 5444/26073 ( 21%)], Train Loss: 0.71459\nEpoch: 00 [ 5484/26073 ( 21%)], Train Loss: 0.71188\nEpoch: 00 [ 5524/26073 ( 21%)], Train Loss: 0.71170\nEpoch: 00 [ 5564/26073 ( 21%)], Train Loss: 0.71121\nEpoch: 00 [ 5604/26073 ( 21%)], Train Loss: 0.70818\nEpoch: 00 [ 5644/26073 ( 22%)], Train Loss: 0.70582\nEpoch: 00 [ 5684/26073 ( 22%)], Train Loss: 0.70485\nEpoch: 00 [ 5724/26073 ( 22%)], Train Loss: 0.70371\nEpoch: 00 [ 5764/26073 ( 22%)], Train Loss: 0.70069\nEpoch: 00 [ 5804/26073 ( 22%)], Train Loss: 0.69880\nEpoch: 00 [ 5844/26073 ( 22%)], Train Loss: 0.69677\nEpoch: 00 [ 5884/26073 ( 23%)], Train Loss: 0.69510\nEpoch: 00 [ 5924/26073 ( 23%)], Train Loss: 0.69330\nEpoch: 00 [ 5964/26073 ( 23%)], Train Loss: 0.69306\nEpoch: 00 [ 6004/26073 ( 23%)], Train Loss: 0.69055\nEpoch: 00 [ 6044/26073 ( 23%)], Train Loss: 0.68846\nEpoch: 00 [ 6084/26073 ( 23%)], Train Loss: 0.68653\nEpoch: 00 [ 6124/26073 ( 23%)], Train Loss: 0.68589\nEpoch: 00 [ 6164/26073 ( 24%)], Train Loss: 0.68516\nEpoch: 00 [ 6204/26073 ( 24%)], Train Loss: 0.68433\nEpoch: 00 [ 6244/26073 ( 24%)], Train Loss: 0.68277\nEpoch: 00 [ 6284/26073 ( 24%)], Train Loss: 0.68249\nEpoch: 00 [ 6324/26073 ( 24%)], Train Loss: 0.68107\nEpoch: 00 [ 6364/26073 ( 24%)], Train Loss: 0.68077\nEpoch: 00 [ 6404/26073 ( 25%)], Train Loss: 0.67981\nEpoch: 00 [ 6444/26073 ( 25%)], Train Loss: 0.67877\nEpoch: 00 [ 6484/26073 ( 25%)], Train Loss: 0.67826\nEpoch: 00 [ 6524/26073 ( 25%)], Train Loss: 0.67613\nEpoch: 00 [ 6564/26073 ( 25%)], Train Loss: 0.67529\nEpoch: 00 [ 6604/26073 ( 25%)], Train Loss: 0.67421\nEpoch: 00 [ 6644/26073 ( 25%)], Train Loss: 0.67326\nEpoch: 00 [ 6684/26073 ( 26%)], Train Loss: 0.67322\nEpoch: 00 [ 6724/26073 ( 26%)], Train Loss: 0.67086\nEpoch: 00 [ 6764/26073 ( 26%)], Train Loss: 0.67115\nEpoch: 00 [ 6804/26073 ( 26%)], Train Loss: 0.67030\nEpoch: 00 [ 6844/26073 ( 26%)], Train Loss: 0.66952\nEpoch: 00 [ 6884/26073 ( 26%)], Train Loss: 0.66796\nEpoch: 00 [ 6924/26073 ( 27%)], Train Loss: 0.66651\nEpoch: 00 [ 6964/26073 ( 27%)], Train Loss: 0.66719\nEpoch: 00 [ 7004/26073 ( 27%)], Train Loss: 0.66631\nEpoch: 00 [ 7044/26073 ( 27%)], Train Loss: 0.66468\nEpoch: 00 [ 7084/26073 ( 27%)], Train Loss: 0.66374\nEpoch: 00 [ 7124/26073 ( 27%)], Train Loss: 0.66180\nEpoch: 00 [ 7164/26073 ( 27%)], Train Loss: 0.66019\nEpoch: 00 [ 7204/26073 ( 28%)], Train Loss: 0.66155\nEpoch: 00 [ 7244/26073 ( 28%)], Train Loss: 0.66033\nEpoch: 00 [ 7284/26073 ( 28%)], Train Loss: 0.65956\nEpoch: 00 [ 7324/26073 ( 28%)], Train Loss: 0.65830\nEpoch: 00 [ 7364/26073 ( 28%)], Train Loss: 0.65768\nEpoch: 00 [ 7404/26073 ( 28%)], Train Loss: 0.65687\nEpoch: 00 [ 7444/26073 ( 29%)], Train Loss: 0.65492\nEpoch: 00 [ 7484/26073 ( 29%)], Train Loss: 0.65358\nEpoch: 00 [ 7524/26073 ( 29%)], Train Loss: 0.65126\nEpoch: 00 [ 7564/26073 ( 29%)], Train Loss: 0.65036\nEpoch: 00 [ 7604/26073 ( 29%)], Train Loss: 0.64865\nEpoch: 00 [ 7644/26073 ( 29%)], Train Loss: 0.64752\nEpoch: 00 [ 7684/26073 ( 29%)], Train Loss: 0.64496\nEpoch: 00 [ 7724/26073 ( 30%)], Train Loss: 0.64472\nEpoch: 00 [ 7764/26073 ( 30%)], Train Loss: 0.64379\nEpoch: 00 [ 7804/26073 ( 30%)], Train Loss: 0.64257\nEpoch: 00 [ 7844/26073 ( 30%)], Train Loss: 0.64211\nEpoch: 00 [ 7884/26073 ( 30%)], Train Loss: 0.63990\nEpoch: 00 [ 7924/26073 ( 30%)], Train Loss: 0.63991\nEpoch: 00 [ 7964/26073 ( 31%)], Train Loss: 0.63889\nEpoch: 00 [ 8004/26073 ( 31%)], Train Loss: 0.63757\nEpoch: 00 [ 8044/26073 ( 31%)], Train Loss: 0.63755\nEpoch: 00 [ 8084/26073 ( 31%)], Train Loss: 0.63704\nEpoch: 00 [ 8124/26073 ( 31%)], Train Loss: 0.63633\nEpoch: 00 [ 8164/26073 ( 31%)], Train Loss: 0.63562\nEpoch: 00 [ 8204/26073 ( 31%)], Train Loss: 0.63392\nEpoch: 00 [ 8244/26073 ( 32%)], Train Loss: 0.63428\nEpoch: 00 [ 8284/26073 ( 32%)], Train Loss: 0.63364\nEpoch: 00 [ 8324/26073 ( 32%)], Train Loss: 0.63298\nEpoch: 00 [ 8364/26073 ( 32%)], Train Loss: 0.63262\nEpoch: 00 [ 8404/26073 ( 32%)], Train Loss: 0.63181\nEpoch: 00 [ 8444/26073 ( 32%)], Train Loss: 0.63002\nEpoch: 00 [ 8484/26073 ( 33%)], Train Loss: 0.62992\nEpoch: 00 [ 8524/26073 ( 33%)], Train Loss: 0.62938\nEpoch: 00 [ 8564/26073 ( 33%)], Train Loss: 0.62934\nEpoch: 00 [ 8604/26073 ( 33%)], Train Loss: 0.62848\nEpoch: 00 [ 8644/26073 ( 33%)], Train Loss: 0.62821\nEpoch: 00 [ 8684/26073 ( 33%)], Train Loss: 0.62741\nEpoch: 00 [ 8724/26073 ( 33%)], Train Loss: 0.62634\nEpoch: 00 [ 8764/26073 ( 34%)], Train Loss: 0.62576\nEpoch: 00 [ 8804/26073 ( 34%)], Train Loss: 0.62536\nEpoch: 00 [ 8844/26073 ( 34%)], Train Loss: 0.62454\nEpoch: 00 [ 8884/26073 ( 34%)], Train Loss: 0.62336\nEpoch: 00 [ 8924/26073 ( 34%)], Train Loss: 0.62428\nEpoch: 00 [ 8964/26073 ( 34%)], Train Loss: 0.62449\nEpoch: 00 [ 9004/26073 ( 35%)], Train Loss: 0.62471\nEpoch: 00 [ 9044/26073 ( 35%)], Train Loss: 0.62343\nEpoch: 00 [ 9084/26073 ( 35%)], Train Loss: 0.62263\nEpoch: 00 [ 9124/26073 ( 35%)], Train Loss: 0.62228\nEpoch: 00 [ 9164/26073 ( 35%)], Train Loss: 0.62123\nEpoch: 00 [ 9204/26073 ( 35%)], Train Loss: 0.62076\nEpoch: 00 [ 9244/26073 ( 35%)], Train Loss: 0.62027\nEpoch: 00 [ 9284/26073 ( 36%)], Train Loss: 0.62002\nEpoch: 00 [ 9324/26073 ( 36%)], Train Loss: 0.61863\nEpoch: 00 [ 9364/26073 ( 36%)], Train Loss: 0.61773\nEpoch: 00 [ 9404/26073 ( 36%)], Train Loss: 0.61629\nEpoch: 00 [ 9444/26073 ( 36%)], Train Loss: 0.61509\nEpoch: 00 [ 9484/26073 ( 36%)], Train Loss: 0.61429\nEpoch: 00 [ 9524/26073 ( 37%)], Train Loss: 0.61449\nEpoch: 00 [ 9564/26073 ( 37%)], Train Loss: 0.61401\nEpoch: 00 [ 9604/26073 ( 37%)], Train Loss: 0.61428\nEpoch: 00 [ 9644/26073 ( 37%)], Train Loss: 0.61306\nEpoch: 00 [ 9684/26073 ( 37%)], Train Loss: 0.61428\nEpoch: 00 [ 9724/26073 ( 37%)], Train Loss: 0.61331\nEpoch: 00 [ 9764/26073 ( 37%)], Train Loss: 0.61247\nEpoch: 00 [ 9804/26073 ( 38%)], Train Loss: 0.61198\nEpoch: 00 [ 9844/26073 ( 38%)], Train Loss: 0.61075\nEpoch: 00 [ 9884/26073 ( 38%)], Train Loss: 0.61047\nEpoch: 00 [ 9924/26073 ( 38%)], Train Loss: 0.60954\nEpoch: 00 [ 9964/26073 ( 38%)], Train Loss: 0.60879\nEpoch: 00 [10004/26073 ( 38%)], Train Loss: 0.60878\nEpoch: 00 [10044/26073 ( 39%)], Train Loss: 0.60823\nEpoch: 00 [10084/26073 ( 39%)], Train Loss: 0.60717\nEpoch: 00 [10124/26073 ( 39%)], Train Loss: 0.60682\nEpoch: 00 [10164/26073 ( 39%)], Train Loss: 0.60667\nEpoch: 00 [10204/26073 ( 39%)], Train Loss: 0.60555\nEpoch: 00 [10244/26073 ( 39%)], Train Loss: 0.60583\nEpoch: 00 [10284/26073 ( 39%)], Train Loss: 0.60618\nEpoch: 00 [10324/26073 ( 40%)], Train Loss: 0.60657\nEpoch: 00 [10364/26073 ( 40%)], Train Loss: 0.60557\nEpoch: 00 [10404/26073 ( 40%)], Train Loss: 0.60448\nEpoch: 00 [10444/26073 ( 40%)], Train Loss: 0.60391\nEpoch: 00 [10484/26073 ( 40%)], Train Loss: 0.60390\nEpoch: 00 [10524/26073 ( 40%)], Train Loss: 0.60263\nEpoch: 00 [10564/26073 ( 41%)], Train Loss: 0.60287\nEpoch: 00 [10604/26073 ( 41%)], Train Loss: 0.60190\nEpoch: 00 [10644/26073 ( 41%)], Train Loss: 0.60122\nEpoch: 00 [10684/26073 ( 41%)], Train Loss: 0.59980\nEpoch: 00 [10724/26073 ( 41%)], Train Loss: 0.59888\nEpoch: 00 [10764/26073 ( 41%)], Train Loss: 0.59788\nEpoch: 00 [10804/26073 ( 41%)], Train Loss: 0.59652\nEpoch: 00 [10844/26073 ( 42%)], Train Loss: 0.59613\nEpoch: 00 [10884/26073 ( 42%)], Train Loss: 0.59520\nEpoch: 00 [10924/26073 ( 42%)], Train Loss: 0.59437\nEpoch: 00 [10964/26073 ( 42%)], Train Loss: 0.59374\nEpoch: 00 [11004/26073 ( 42%)], Train Loss: 0.59329\nEpoch: 00 [11044/26073 ( 42%)], Train Loss: 0.59291\nEpoch: 00 [11084/26073 ( 43%)], Train Loss: 0.59195\nEpoch: 00 [11124/26073 ( 43%)], Train Loss: 0.59111\nEpoch: 00 [11164/26073 ( 43%)], Train Loss: 0.59048\nEpoch: 00 [11204/26073 ( 43%)], Train Loss: 0.58981\nEpoch: 00 [11244/26073 ( 43%)], Train Loss: 0.58927\nEpoch: 00 [11284/26073 ( 43%)], Train Loss: 0.58861\nEpoch: 00 [11324/26073 ( 43%)], Train Loss: 0.58730\nEpoch: 00 [11364/26073 ( 44%)], Train Loss: 0.58697\nEpoch: 00 [11404/26073 ( 44%)], Train Loss: 0.58692\nEpoch: 00 [11444/26073 ( 44%)], Train Loss: 0.58575\nEpoch: 00 [11484/26073 ( 44%)], Train Loss: 0.58484\nEpoch: 00 [11524/26073 ( 44%)], Train Loss: 0.58408\nEpoch: 00 [11564/26073 ( 44%)], Train Loss: 0.58349\nEpoch: 00 [11604/26073 ( 45%)], Train Loss: 0.58293\nEpoch: 00 [11644/26073 ( 45%)], Train Loss: 0.58259\nEpoch: 00 [11684/26073 ( 45%)], Train Loss: 0.58117\nEpoch: 00 [11724/26073 ( 45%)], Train Loss: 0.58100\nEpoch: 00 [11764/26073 ( 45%)], Train Loss: 0.58006\nEpoch: 00 [11804/26073 ( 45%)], Train Loss: 0.57981\nEpoch: 00 [11844/26073 ( 45%)], Train Loss: 0.57910\nEpoch: 00 [11884/26073 ( 46%)], Train Loss: 0.57910\nEpoch: 00 [11924/26073 ( 46%)], Train Loss: 0.57882\nEpoch: 00 [11964/26073 ( 46%)], Train Loss: 0.57901\nEpoch: 00 [12004/26073 ( 46%)], Train Loss: 0.57859\nEpoch: 00 [12044/26073 ( 46%)], Train Loss: 0.57819\nEpoch: 00 [12084/26073 ( 46%)], Train Loss: 0.57745\nEpoch: 00 [12124/26073 ( 47%)], Train Loss: 0.57645\nEpoch: 00 [12164/26073 ( 47%)], Train Loss: 0.57606\nEpoch: 00 [12204/26073 ( 47%)], Train Loss: 0.57534\nEpoch: 00 [12244/26073 ( 47%)], Train Loss: 0.57481\nEpoch: 00 [12284/26073 ( 47%)], Train Loss: 0.57497\nEpoch: 00 [12324/26073 ( 47%)], Train Loss: 0.57409\nEpoch: 00 [12364/26073 ( 47%)], Train Loss: 0.57320\nEpoch: 00 [12404/26073 ( 48%)], Train Loss: 0.57329\nEpoch: 00 [12444/26073 ( 48%)], Train Loss: 0.57312\nEpoch: 00 [12484/26073 ( 48%)], Train Loss: 0.57218\nEpoch: 00 [12524/26073 ( 48%)], Train Loss: 0.57182\nEpoch: 00 [12564/26073 ( 48%)], Train Loss: 0.57100\nEpoch: 00 [12604/26073 ( 48%)], Train Loss: 0.57061\nEpoch: 00 [12644/26073 ( 48%)], Train Loss: 0.56985\nEpoch: 00 [12684/26073 ( 49%)], Train Loss: 0.57066\nEpoch: 00 [12724/26073 ( 49%)], Train Loss: 0.57019\nEpoch: 00 [12764/26073 ( 49%)], Train Loss: 0.57038\nEpoch: 00 [12804/26073 ( 49%)], Train Loss: 0.57011\nEpoch: 00 [12844/26073 ( 49%)], Train Loss: 0.57005\nEpoch: 00 [12884/26073 ( 49%)], Train Loss: 0.56930\nEpoch: 00 [12924/26073 ( 50%)], Train Loss: 0.56895\nEpoch: 00 [12964/26073 ( 50%)], Train Loss: 0.56871\nEpoch: 00 [13004/26073 ( 50%)], Train Loss: 0.56760\nEpoch: 00 [13044/26073 ( 50%)], Train Loss: 0.56688\nEpoch: 00 [13084/26073 ( 50%)], Train Loss: 0.56645\nEpoch: 00 [13124/26073 ( 50%)], Train Loss: 0.56634\nEpoch: 00 [13164/26073 ( 50%)], Train Loss: 0.56620\nEpoch: 00 [13204/26073 ( 51%)], Train Loss: 0.56540\nEpoch: 00 [13244/26073 ( 51%)], Train Loss: 0.56478\nEpoch: 00 [13284/26073 ( 51%)], Train Loss: 0.56421\nEpoch: 00 [13324/26073 ( 51%)], Train Loss: 0.56386\nEpoch: 00 [13364/26073 ( 51%)], Train Loss: 0.56315\nEpoch: 00 [13404/26073 ( 51%)], Train Loss: 0.56200\nEpoch: 00 [13444/26073 ( 52%)], Train Loss: 0.56206\nEpoch: 00 [13484/26073 ( 52%)], Train Loss: 0.56137\nEpoch: 00 [13524/26073 ( 52%)], Train Loss: 0.56072\nEpoch: 00 [13564/26073 ( 52%)], Train Loss: 0.56013\nEpoch: 00 [13604/26073 ( 52%)], Train Loss: 0.55965\nEpoch: 00 [13644/26073 ( 52%)], Train Loss: 0.55920\nEpoch: 00 [13684/26073 ( 52%)], Train Loss: 0.55937\nEpoch: 00 [13724/26073 ( 53%)], Train Loss: 0.55868\nEpoch: 00 [13764/26073 ( 53%)], Train Loss: 0.55837\nEpoch: 00 [13804/26073 ( 53%)], Train Loss: 0.55758\nEpoch: 00 [13844/26073 ( 53%)], Train Loss: 0.55717\nEpoch: 00 [13884/26073 ( 53%)], Train Loss: 0.55629\nEpoch: 00 [13924/26073 ( 53%)], Train Loss: 0.55574\nEpoch: 00 [13964/26073 ( 54%)], Train Loss: 0.55572\nEpoch: 00 [14004/26073 ( 54%)], Train Loss: 0.55566\nEpoch: 00 [14044/26073 ( 54%)], Train Loss: 0.55531\nEpoch: 00 [14084/26073 ( 54%)], Train Loss: 0.55489\nEpoch: 00 [14124/26073 ( 54%)], Train Loss: 0.55479\nEpoch: 00 [14164/26073 ( 54%)], Train Loss: 0.55421\nEpoch: 00 [14204/26073 ( 54%)], Train Loss: 0.55384\nEpoch: 00 [14244/26073 ( 55%)], Train Loss: 0.55267\nEpoch: 00 [14284/26073 ( 55%)], Train Loss: 0.55235\nEpoch: 00 [14324/26073 ( 55%)], Train Loss: 0.55193\nEpoch: 00 [14364/26073 ( 55%)], Train Loss: 0.55127\nEpoch: 00 [14404/26073 ( 55%)], Train Loss: 0.55065\nEpoch: 00 [14444/26073 ( 55%)], Train Loss: 0.55007\nEpoch: 00 [14484/26073 ( 56%)], Train Loss: 0.54971\nEpoch: 00 [14524/26073 ( 56%)], Train Loss: 0.54940\nEpoch: 00 [14564/26073 ( 56%)], Train Loss: 0.54886\nEpoch: 00 [14604/26073 ( 56%)], Train Loss: 0.54877\nEpoch: 00 [14644/26073 ( 56%)], Train Loss: 0.54853\nEpoch: 00 [14684/26073 ( 56%)], Train Loss: 0.54794\nEpoch: 00 [14724/26073 ( 56%)], Train Loss: 0.54764\nEpoch: 00 [14764/26073 ( 57%)], Train Loss: 0.54702\nEpoch: 00 [14804/26073 ( 57%)], Train Loss: 0.54656\nEpoch: 00 [14844/26073 ( 57%)], Train Loss: 0.54651\nEpoch: 00 [14884/26073 ( 57%)], Train Loss: 0.54654\nEpoch: 00 [14924/26073 ( 57%)], Train Loss: 0.54621\nEpoch: 00 [14964/26073 ( 57%)], Train Loss: 0.54584\nEpoch: 00 [15004/26073 ( 58%)], Train Loss: 0.54539\nEpoch: 00 [15044/26073 ( 58%)], Train Loss: 0.54526\nEpoch: 00 [15084/26073 ( 58%)], Train Loss: 0.54545\nEpoch: 00 [15124/26073 ( 58%)], Train Loss: 0.54484\nEpoch: 00 [15164/26073 ( 58%)], Train Loss: 0.54460\nEpoch: 00 [15204/26073 ( 58%)], Train Loss: 0.54442\nEpoch: 00 [15244/26073 ( 58%)], Train Loss: 0.54414\nEpoch: 00 [15284/26073 ( 59%)], Train Loss: 0.54402\nEpoch: 00 [15324/26073 ( 59%)], Train Loss: 0.54379\nEpoch: 00 [15364/26073 ( 59%)], Train Loss: 0.54346\nEpoch: 00 [15404/26073 ( 59%)], Train Loss: 0.54298\nEpoch: 00 [15444/26073 ( 59%)], Train Loss: 0.54280\nEpoch: 00 [15484/26073 ( 59%)], Train Loss: 0.54265\nEpoch: 00 [15524/26073 ( 60%)], Train Loss: 0.54231\nEpoch: 00 [15564/26073 ( 60%)], Train Loss: 0.54208\nEpoch: 00 [15604/26073 ( 60%)], Train Loss: 0.54170\nEpoch: 00 [15644/26073 ( 60%)], Train Loss: 0.54148\nEpoch: 00 [15684/26073 ( 60%)], Train Loss: 0.54072\nEpoch: 00 [15724/26073 ( 60%)], Train Loss: 0.54046\nEpoch: 00 [15764/26073 ( 60%)], Train Loss: 0.53987\nEpoch: 00 [15804/26073 ( 61%)], Train Loss: 0.53971\nEpoch: 00 [15844/26073 ( 61%)], Train Loss: 0.53937\nEpoch: 00 [15884/26073 ( 61%)], Train Loss: 0.53928\nEpoch: 00 [15924/26073 ( 61%)], Train Loss: 0.53890\nEpoch: 00 [15964/26073 ( 61%)], Train Loss: 0.53871\nEpoch: 00 [16004/26073 ( 61%)], Train Loss: 0.53818\nEpoch: 00 [16044/26073 ( 62%)], Train Loss: 0.53801\nEpoch: 00 [16084/26073 ( 62%)], Train Loss: 0.53752\nEpoch: 00 [16124/26073 ( 62%)], Train Loss: 0.53718\nEpoch: 00 [16164/26073 ( 62%)], Train Loss: 0.53704\nEpoch: 00 [16204/26073 ( 62%)], Train Loss: 0.53707\nEpoch: 00 [16244/26073 ( 62%)], Train Loss: 0.53669\nEpoch: 00 [16284/26073 ( 62%)], Train Loss: 0.53626\nEpoch: 00 [16324/26073 ( 63%)], Train Loss: 0.53620\nEpoch: 00 [16364/26073 ( 63%)], Train Loss: 0.53601\nEpoch: 00 [16404/26073 ( 63%)], Train Loss: 0.53559\nEpoch: 00 [16444/26073 ( 63%)], Train Loss: 0.53511\nEpoch: 00 [16484/26073 ( 63%)], Train Loss: 0.53507\nEpoch: 00 [16524/26073 ( 63%)], Train Loss: 0.53534\nEpoch: 00 [16564/26073 ( 64%)], Train Loss: 0.53501\nEpoch: 00 [16604/26073 ( 64%)], Train Loss: 0.53471\nEpoch: 00 [16644/26073 ( 64%)], Train Loss: 0.53438\nEpoch: 00 [16684/26073 ( 64%)], Train Loss: 0.53402\nEpoch: 00 [16724/26073 ( 64%)], Train Loss: 0.53357\nEpoch: 00 [16764/26073 ( 64%)], Train Loss: 0.53282\nEpoch: 00 [16804/26073 ( 64%)], Train Loss: 0.53281\nEpoch: 00 [16844/26073 ( 65%)], Train Loss: 0.53225\nEpoch: 00 [16884/26073 ( 65%)], Train Loss: 0.53145\nEpoch: 00 [16924/26073 ( 65%)], Train Loss: 0.53094\nEpoch: 00 [16964/26073 ( 65%)], Train Loss: 0.53060\nEpoch: 00 [17004/26073 ( 65%)], Train Loss: 0.52986\nEpoch: 00 [17044/26073 ( 65%)], Train Loss: 0.52912\nEpoch: 00 [17084/26073 ( 66%)], Train Loss: 0.52884\nEpoch: 00 [17124/26073 ( 66%)], Train Loss: 0.52839\nEpoch: 00 [17164/26073 ( 66%)], Train Loss: 0.52838\nEpoch: 00 [17204/26073 ( 66%)], Train Loss: 0.52795\nEpoch: 00 [17244/26073 ( 66%)], Train Loss: 0.52753\nEpoch: 00 [17284/26073 ( 66%)], Train Loss: 0.52692\nEpoch: 00 [17324/26073 ( 66%)], Train Loss: 0.52681\nEpoch: 00 [17364/26073 ( 67%)], Train Loss: 0.52631\nEpoch: 00 [17404/26073 ( 67%)], Train Loss: 0.52595\nEpoch: 00 [17444/26073 ( 67%)], Train Loss: 0.52577\nEpoch: 00 [17484/26073 ( 67%)], Train Loss: 0.52554\nEpoch: 00 [17524/26073 ( 67%)], Train Loss: 0.52531\nEpoch: 00 [17564/26073 ( 67%)], Train Loss: 0.52505\nEpoch: 00 [17604/26073 ( 68%)], Train Loss: 0.52434\nEpoch: 00 [17644/26073 ( 68%)], Train Loss: 0.52396\nEpoch: 00 [17684/26073 ( 68%)], Train Loss: 0.52357\nEpoch: 00 [17724/26073 ( 68%)], Train Loss: 0.52365\nEpoch: 00 [17764/26073 ( 68%)], Train Loss: 0.52340\nEpoch: 00 [17804/26073 ( 68%)], Train Loss: 0.52337\nEpoch: 00 [17844/26073 ( 68%)], Train Loss: 0.52288\nEpoch: 00 [17884/26073 ( 69%)], Train Loss: 0.52235\nEpoch: 00 [17924/26073 ( 69%)], Train Loss: 0.52204\nEpoch: 00 [17964/26073 ( 69%)], Train Loss: 0.52171\nEpoch: 00 [18004/26073 ( 69%)], Train Loss: 0.52156\nEpoch: 00 [18044/26073 ( 69%)], Train Loss: 0.52122\nEpoch: 00 [18084/26073 ( 69%)], Train Loss: 0.52078\nEpoch: 00 [18124/26073 ( 70%)], Train Loss: 0.52037\nEpoch: 00 [18164/26073 ( 70%)], Train Loss: 0.51966\nEpoch: 00 [18204/26073 ( 70%)], Train Loss: 0.51929\nEpoch: 00 [18244/26073 ( 70%)], Train Loss: 0.51882\nEpoch: 00 [18284/26073 ( 70%)], Train Loss: 0.51849\nEpoch: 00 [18324/26073 ( 70%)], Train Loss: 0.51812\nEpoch: 00 [18364/26073 ( 70%)], Train Loss: 0.51764\nEpoch: 00 [18404/26073 ( 71%)], Train Loss: 0.51742\nEpoch: 00 [18444/26073 ( 71%)], Train Loss: 0.51705\nEpoch: 00 [18484/26073 ( 71%)], Train Loss: 0.51712\nEpoch: 00 [18524/26073 ( 71%)], Train Loss: 0.51657\nEpoch: 00 [18564/26073 ( 71%)], Train Loss: 0.51651\nEpoch: 00 [18604/26073 ( 71%)], Train Loss: 0.51630\nEpoch: 00 [18644/26073 ( 72%)], Train Loss: 0.51621\nEpoch: 00 [18684/26073 ( 72%)], Train Loss: 0.51584\nEpoch: 00 [18724/26073 ( 72%)], Train Loss: 0.51560\nEpoch: 00 [18764/26073 ( 72%)], Train Loss: 0.51538\nEpoch: 00 [18804/26073 ( 72%)], Train Loss: 0.51495\nEpoch: 00 [18844/26073 ( 72%)], Train Loss: 0.51463\nEpoch: 00 [18884/26073 ( 72%)], Train Loss: 0.51441\nEpoch: 00 [18924/26073 ( 73%)], Train Loss: 0.51382\nEpoch: 00 [18964/26073 ( 73%)], Train Loss: 0.51347\nEpoch: 00 [19004/26073 ( 73%)], Train Loss: 0.51317\nEpoch: 00 [19044/26073 ( 73%)], Train Loss: 0.51318\nEpoch: 00 [19084/26073 ( 73%)], Train Loss: 0.51281\nEpoch: 00 [19124/26073 ( 73%)], Train Loss: 0.51237\nEpoch: 00 [19164/26073 ( 74%)], Train Loss: 0.51171\nEpoch: 00 [19204/26073 ( 74%)], Train Loss: 0.51151\nEpoch: 00 [19244/26073 ( 74%)], Train Loss: 0.51125\nEpoch: 00 [19284/26073 ( 74%)], Train Loss: 0.51130\nEpoch: 00 [19324/26073 ( 74%)], Train Loss: 0.51146\nEpoch: 00 [19364/26073 ( 74%)], Train Loss: 0.51101\nEpoch: 00 [19404/26073 ( 74%)], Train Loss: 0.51104\nEpoch: 00 [19444/26073 ( 75%)], Train Loss: 0.51079\nEpoch: 00 [19484/26073 ( 75%)], Train Loss: 0.51094\nEpoch: 00 [19524/26073 ( 75%)], Train Loss: 0.51057\nEpoch: 00 [19564/26073 ( 75%)], Train Loss: 0.51045\nEpoch: 00 [19604/26073 ( 75%)], Train Loss: 0.51019\nEpoch: 00 [19644/26073 ( 75%)], Train Loss: 0.50973\nEpoch: 00 [19684/26073 ( 75%)], Train Loss: 0.50929\nEpoch: 00 [19724/26073 ( 76%)], Train Loss: 0.50874\nEpoch: 00 [19764/26073 ( 76%)], Train Loss: 0.50857\nEpoch: 00 [19804/26073 ( 76%)], Train Loss: 0.50824\nEpoch: 00 [19844/26073 ( 76%)], Train Loss: 0.50789\nEpoch: 00 [19884/26073 ( 76%)], Train Loss: 0.50739\nEpoch: 00 [19924/26073 ( 76%)], Train Loss: 0.50706\nEpoch: 00 [19964/26073 ( 77%)], Train Loss: 0.50681\nEpoch: 00 [20004/26073 ( 77%)], Train Loss: 0.50666\nEpoch: 00 [20044/26073 ( 77%)], Train Loss: 0.50632\nEpoch: 00 [20084/26073 ( 77%)], Train Loss: 0.50650\nEpoch: 00 [20124/26073 ( 77%)], Train Loss: 0.50630\nEpoch: 00 [20164/26073 ( 77%)], Train Loss: 0.50599\nEpoch: 00 [20204/26073 ( 77%)], Train Loss: 0.50560\nEpoch: 00 [20244/26073 ( 78%)], Train Loss: 0.50523\nEpoch: 00 [20284/26073 ( 78%)], Train Loss: 0.50476\nEpoch: 00 [20324/26073 ( 78%)], Train Loss: 0.50443\nEpoch: 00 [20364/26073 ( 78%)], Train Loss: 0.50373\nEpoch: 00 [20404/26073 ( 78%)], Train Loss: 0.50345\nEpoch: 00 [20444/26073 ( 78%)], Train Loss: 0.50365\nEpoch: 00 [20484/26073 ( 79%)], Train Loss: 0.50324\nEpoch: 00 [20524/26073 ( 79%)], Train Loss: 0.50324\nEpoch: 00 [20564/26073 ( 79%)], Train Loss: 0.50287\nEpoch: 00 [20604/26073 ( 79%)], Train Loss: 0.50257\nEpoch: 00 [20644/26073 ( 79%)], Train Loss: 0.50220\nEpoch: 00 [20684/26073 ( 79%)], Train Loss: 0.50203\nEpoch: 00 [20724/26073 ( 79%)], Train Loss: 0.50175\nEpoch: 00 [20764/26073 ( 80%)], Train Loss: 0.50150\nEpoch: 00 [20804/26073 ( 80%)], Train Loss: 0.50098\nEpoch: 00 [20844/26073 ( 80%)], Train Loss: 0.50045\nEpoch: 00 [20884/26073 ( 80%)], Train Loss: 0.50024\nEpoch: 00 [20924/26073 ( 80%)], Train Loss: 0.49969\nEpoch: 00 [20964/26073 ( 80%)], Train Loss: 0.49938\nEpoch: 00 [21004/26073 ( 81%)], Train Loss: 0.49900\nEpoch: 00 [21044/26073 ( 81%)], Train Loss: 0.49885\nEpoch: 00 [21084/26073 ( 81%)], Train Loss: 0.49854\nEpoch: 00 [21124/26073 ( 81%)], Train Loss: 0.49845\nEpoch: 00 [21164/26073 ( 81%)], Train Loss: 0.49811\nEpoch: 00 [21204/26073 ( 81%)], Train Loss: 0.49811\nEpoch: 00 [21244/26073 ( 81%)], Train Loss: 0.49781\nEpoch: 00 [21284/26073 ( 82%)], Train Loss: 0.49750\nEpoch: 00 [21324/26073 ( 82%)], Train Loss: 0.49744\nEpoch: 00 [21364/26073 ( 82%)], Train Loss: 0.49693\nEpoch: 00 [21404/26073 ( 82%)], Train Loss: 0.49683\nEpoch: 00 [21444/26073 ( 82%)], Train Loss: 0.49651\nEpoch: 00 [21484/26073 ( 82%)], Train Loss: 0.49631\nEpoch: 00 [21524/26073 ( 83%)], Train Loss: 0.49598\nEpoch: 00 [21564/26073 ( 83%)], Train Loss: 0.49562\nEpoch: 00 [21604/26073 ( 83%)], Train Loss: 0.49545\nEpoch: 00 [21644/26073 ( 83%)], Train Loss: 0.49525\nEpoch: 00 [21684/26073 ( 83%)], Train Loss: 0.49485\nEpoch: 00 [21724/26073 ( 83%)], Train Loss: 0.49439\nEpoch: 00 [21764/26073 ( 83%)], Train Loss: 0.49427\nEpoch: 00 [21804/26073 ( 84%)], Train Loss: 0.49401\nEpoch: 00 [21844/26073 ( 84%)], Train Loss: 0.49373\nEpoch: 00 [21884/26073 ( 84%)], Train Loss: 0.49392\nEpoch: 00 [21924/26073 ( 84%)], Train Loss: 0.49371\nEpoch: 00 [21964/26073 ( 84%)], Train Loss: 0.49349\nEpoch: 00 [22004/26073 ( 84%)], Train Loss: 0.49312\nEpoch: 00 [22044/26073 ( 85%)], Train Loss: 0.49295\nEpoch: 00 [22084/26073 ( 85%)], Train Loss: 0.49242\nEpoch: 00 [22124/26073 ( 85%)], Train Loss: 0.49208\nEpoch: 00 [22164/26073 ( 85%)], Train Loss: 0.49167\nEpoch: 00 [22204/26073 ( 85%)], Train Loss: 0.49135\nEpoch: 00 [22244/26073 ( 85%)], Train Loss: 0.49104\nEpoch: 00 [22284/26073 ( 85%)], Train Loss: 0.49107\nEpoch: 00 [22324/26073 ( 86%)], Train Loss: 0.49069\nEpoch: 00 [22364/26073 ( 86%)], Train Loss: 0.49084\nEpoch: 00 [22404/26073 ( 86%)], Train Loss: 0.49044\nEpoch: 00 [22444/26073 ( 86%)], Train Loss: 0.49023\nEpoch: 00 [22484/26073 ( 86%)], Train Loss: 0.49003\nEpoch: 00 [22524/26073 ( 86%)], Train Loss: 0.48959\nEpoch: 00 [22564/26073 ( 87%)], Train Loss: 0.48973\nEpoch: 00 [22604/26073 ( 87%)], Train Loss: 0.48983\nEpoch: 00 [22644/26073 ( 87%)], Train Loss: 0.48970\nEpoch: 00 [22684/26073 ( 87%)], Train Loss: 0.48952\nEpoch: 00 [22724/26073 ( 87%)], Train Loss: 0.48961\nEpoch: 00 [22764/26073 ( 87%)], Train Loss: 0.48945\nEpoch: 00 [22804/26073 ( 87%)], Train Loss: 0.48920\nEpoch: 00 [22844/26073 ( 88%)], Train Loss: 0.48873\nEpoch: 00 [22884/26073 ( 88%)], Train Loss: 0.48844\nEpoch: 00 [22924/26073 ( 88%)], Train Loss: 0.48820\nEpoch: 00 [22964/26073 ( 88%)], Train Loss: 0.48798\nEpoch: 00 [23004/26073 ( 88%)], Train Loss: 0.48760\nEpoch: 00 [23044/26073 ( 88%)], Train Loss: 0.48723\nEpoch: 00 [23084/26073 ( 89%)], Train Loss: 0.48701\nEpoch: 00 [23124/26073 ( 89%)], Train Loss: 0.48685\nEpoch: 00 [23164/26073 ( 89%)], Train Loss: 0.48660\nEpoch: 00 [23204/26073 ( 89%)], Train Loss: 0.48624\nEpoch: 00 [23244/26073 ( 89%)], Train Loss: 0.48605\nEpoch: 00 [23284/26073 ( 89%)], Train Loss: 0.48579\nEpoch: 00 [23324/26073 ( 89%)], Train Loss: 0.48554\nEpoch: 00 [23364/26073 ( 90%)], Train Loss: 0.48516\nEpoch: 00 [23404/26073 ( 90%)], Train Loss: 0.48492\nEpoch: 00 [23444/26073 ( 90%)], Train Loss: 0.48450\nEpoch: 00 [23484/26073 ( 90%)], Train Loss: 0.48439\nEpoch: 00 [23524/26073 ( 90%)], Train Loss: 0.48418\nEpoch: 00 [23564/26073 ( 90%)], Train Loss: 0.48393\nEpoch: 00 [23604/26073 ( 91%)], Train Loss: 0.48409\nEpoch: 00 [23644/26073 ( 91%)], Train Loss: 0.48379\nEpoch: 00 [23684/26073 ( 91%)], Train Loss: 0.48335\nEpoch: 00 [23724/26073 ( 91%)], Train Loss: 0.48299\nEpoch: 00 [23764/26073 ( 91%)], Train Loss: 0.48288\nEpoch: 00 [23804/26073 ( 91%)], Train Loss: 0.48258\nEpoch: 00 [23844/26073 ( 91%)], Train Loss: 0.48235\nEpoch: 00 [23884/26073 ( 92%)], Train Loss: 0.48206\nEpoch: 00 [23924/26073 ( 92%)], Train Loss: 0.48176\nEpoch: 00 [23964/26073 ( 92%)], Train Loss: 0.48137\nEpoch: 00 [24004/26073 ( 92%)], Train Loss: 0.48099\nEpoch: 00 [24044/26073 ( 92%)], Train Loss: 0.48083\nEpoch: 00 [24084/26073 ( 92%)], Train Loss: 0.48049\nEpoch: 00 [24124/26073 ( 93%)], Train Loss: 0.48012\nEpoch: 00 [24164/26073 ( 93%)], Train Loss: 0.47992\nEpoch: 00 [24204/26073 ( 93%)], Train Loss: 0.47967\nEpoch: 00 [24244/26073 ( 93%)], Train Loss: 0.47964\nEpoch: 00 [24284/26073 ( 93%)], Train Loss: 0.47950\nEpoch: 00 [24324/26073 ( 93%)], Train Loss: 0.47933\nEpoch: 00 [24364/26073 ( 93%)], Train Loss: 0.47914\nEpoch: 00 [24404/26073 ( 94%)], Train Loss: 0.47903\nEpoch: 00 [24444/26073 ( 94%)], Train Loss: 0.47870\nEpoch: 00 [24484/26073 ( 94%)], Train Loss: 0.47888\nEpoch: 00 [24524/26073 ( 94%)], Train Loss: 0.47908\nEpoch: 00 [24564/26073 ( 94%)], Train Loss: 0.47883\nEpoch: 00 [24604/26073 ( 94%)], Train Loss: 0.47855\nEpoch: 00 [24644/26073 ( 95%)], Train Loss: 0.47849\nEpoch: 00 [24684/26073 ( 95%)], Train Loss: 0.47824\nEpoch: 00 [24724/26073 ( 95%)], Train Loss: 0.47809\nEpoch: 00 [24764/26073 ( 95%)], Train Loss: 0.47771\nEpoch: 00 [24804/26073 ( 95%)], Train Loss: 0.47740\nEpoch: 00 [24844/26073 ( 95%)], Train Loss: 0.47736\nEpoch: 00 [24884/26073 ( 95%)], Train Loss: 0.47743\nEpoch: 00 [24924/26073 ( 96%)], Train Loss: 0.47769\nEpoch: 00 [24964/26073 ( 96%)], Train Loss: 0.47761\nEpoch: 00 [25004/26073 ( 96%)], Train Loss: 0.47744\nEpoch: 00 [25044/26073 ( 96%)], Train Loss: 0.47743\nEpoch: 00 [25084/26073 ( 96%)], Train Loss: 0.47742\nEpoch: 00 [25124/26073 ( 96%)], Train Loss: 0.47710\nEpoch: 00 [25164/26073 ( 97%)], Train Loss: 0.47695\nEpoch: 00 [25204/26073 ( 97%)], Train Loss: 0.47691\nEpoch: 00 [25244/26073 ( 97%)], Train Loss: 0.47643\nEpoch: 00 [25284/26073 ( 97%)], Train Loss: 0.47617\nEpoch: 00 [25324/26073 ( 97%)], Train Loss: 0.47613\nEpoch: 00 [25364/26073 ( 97%)], Train Loss: 0.47623\nEpoch: 00 [25404/26073 ( 97%)], Train Loss: 0.47606\nEpoch: 00 [25444/26073 ( 98%)], Train Loss: 0.47609\nEpoch: 00 [25484/26073 ( 98%)], Train Loss: 0.47577\nEpoch: 00 [25524/26073 ( 98%)], Train Loss: 0.47532\nEpoch: 00 [25564/26073 ( 98%)], Train Loss: 0.47538\nEpoch: 00 [25604/26073 ( 98%)], Train Loss: 0.47526\nEpoch: 00 [25644/26073 ( 98%)], Train Loss: 0.47502\nEpoch: 00 [25684/26073 ( 99%)], Train Loss: 0.47497\nEpoch: 00 [25724/26073 ( 99%)], Train Loss: 0.47462\nEpoch: 00 [25764/26073 ( 99%)], Train Loss: 0.47471\nEpoch: 00 [25804/26073 ( 99%)], Train Loss: 0.47438\nEpoch: 00 [25844/26073 ( 99%)], Train Loss: 0.47420\nEpoch: 00 [25884/26073 ( 99%)], Train Loss: 0.47384\nEpoch: 00 [25924/26073 ( 99%)], Train Loss: 0.47360\nEpoch: 00 [25964/26073 (100%)], Train Loss: 0.47357\nEpoch: 00 [26004/26073 (100%)], Train Loss: 0.47327\nEpoch: 00 [26044/26073 (100%)], Train Loss: 0.47312\nEpoch: 00 [26073/26073 (100%)], Train Loss: 0.47311\n----Validation Results Summary----\nEpoch: [0] Valid Loss: 0.27729\nPost-processing 223 example predictions split into 2427 features.\nvalid jaccard: 0.6752473485657342\n0 Epoch, Best epoch was updated! Valid Loss: 0.27729\nSaving model checkpoint to output/checkpoint-fold-0-epoch-0.\n\nTotal Training Time: 3833.1595861911774secs, Average Training Time per Epoch: 3833.1595861911774secs.\nTotal Validation Time: 115.24001145362854secs, Average Validation Time per Epoch: 115.24001145362854secs.\n\n\n--------------------------------------------------\nFOLD: 1\n--------------------------------------------------\nModel pushed to 1 GPU(s), type Tesla P100-PCIE-16GB.\nNum examples Train= 25439, Num examples Valid=3061\nTotal Training Steps: 3180, Total Warmup Steps: 318\nEpoch: 00 [ 4/25439 ( 0%)], Train Loss: 3.15411\nEpoch: 00 [ 44/25439 ( 0%)], Train Loss: 3.20302\nEpoch: 00 [ 84/25439 ( 0%)], Train Loss: 3.19916\nEpoch: 00 [ 124/25439 ( 0%)], Train Loss: 3.18911\nEpoch: 00 [ 164/25439 ( 1%)], Train Loss: 3.16002\nEpoch: 00 [ 204/25439 ( 1%)], Train Loss: 3.10645\nEpoch: 00 [ 244/25439 ( 1%)], Train Loss: 3.05624\nEpoch: 00 [ 284/25439 ( 1%)], Train Loss: 2.98766\nEpoch: 00 [ 324/25439 ( 1%)], Train Loss: 2.90448\nEpoch: 00 [ 364/25439 ( 1%)], Train Loss: 2.80073\nEpoch: 00 [ 404/25439 ( 2%)], Train Loss: 2.67665\nEpoch: 00 [ 444/25439 ( 2%)], Train Loss: 2.53886\nEpoch: 00 [ 484/25439 ( 2%)], Train Loss: 2.42816\nEpoch: 00 [ 524/25439 ( 2%)], Train Loss: 2.30220\nEpoch: 00 [ 564/25439 ( 2%)], Train Loss: 2.19973\nEpoch: 00 [ 604/25439 ( 2%)], Train Loss: 2.12679\nEpoch: 00 [ 644/25439 ( 3%)], Train Loss: 2.04092\nEpoch: 00 [ 684/25439 ( 3%)], Train Loss: 1.95386\nEpoch: 00 [ 724/25439 ( 3%)], Train Loss: 1.86787\nEpoch: 00 [ 764/25439 ( 3%)], Train Loss: 1.80201\nEpoch: 00 [ 804/25439 ( 3%)], Train Loss: 1.73988\nEpoch: 00 [ 844/25439 ( 3%)], Train Loss: 1.67932\nEpoch: 00 [ 884/25439 ( 3%)], Train Loss: 1.62474\nEpoch: 00 [ 924/25439 ( 4%)], Train Loss: 1.57775\nEpoch: 00 [ 964/25439 ( 4%)], Train Loss: 1.54731\nEpoch: 00 [ 1004/25439 ( 4%)], Train Loss: 1.50794\nEpoch: 00 [ 1044/25439 ( 4%)], Train Loss: 1.46992\nEpoch: 00 [ 1084/25439 ( 4%)], Train Loss: 1.43117\nEpoch: 00 [ 1124/25439 ( 4%)], Train Loss: 1.40174\nEpoch: 00 [ 1164/25439 ( 5%)], Train Loss: 1.37565\nEpoch: 00 [ 1204/25439 ( 5%)], Train Loss: 1.34591\nEpoch: 00 [ 1244/25439 ( 5%)], Train Loss: 1.32300\nEpoch: 00 [ 1284/25439 ( 5%)], Train Loss: 1.29869\nEpoch: 00 [ 1324/25439 ( 5%)], Train Loss: 1.27193\nEpoch: 00 [ 1364/25439 ( 5%)], Train Loss: 1.25091\nEpoch: 00 [ 1404/25439 ( 6%)], Train Loss: 1.23114\nEpoch: 00 [ 1444/25439 ( 6%)], Train Loss: 1.20540\nEpoch: 00 [ 1484/25439 ( 6%)], Train Loss: 1.18753\nEpoch: 00 [ 1524/25439 ( 6%)], Train Loss: 1.16763\nEpoch: 00 [ 1564/25439 ( 6%)], Train Loss: 1.15308\nEpoch: 00 [ 1604/25439 ( 6%)], Train Loss: 1.13884\nEpoch: 00 [ 1644/25439 ( 6%)], Train Loss: 1.12708\nEpoch: 00 [ 1684/25439 ( 7%)], Train Loss: 1.11807\nEpoch: 00 [ 1724/25439 ( 7%)], Train Loss: 1.10589\nEpoch: 00 [ 1764/25439 ( 7%)], Train Loss: 1.09684\nEpoch: 00 [ 1804/25439 ( 7%)], Train Loss: 1.08424\nEpoch: 00 [ 1844/25439 ( 7%)], Train Loss: 1.07184\nEpoch: 00 [ 1884/25439 ( 7%)], Train Loss: 1.05833\nEpoch: 00 [ 1924/25439 ( 8%)], Train Loss: 1.05004\nEpoch: 00 [ 1964/25439 ( 8%)], Train Loss: 1.03948\nEpoch: 00 [ 2004/25439 ( 8%)], Train Loss: 1.03153\nEpoch: 00 [ 2044/25439 ( 8%)], Train Loss: 1.01993\nEpoch: 00 [ 2084/25439 ( 8%)], Train Loss: 1.01133\nEpoch: 00 [ 2124/25439 ( 8%)], Train Loss: 1.00247\nEpoch: 00 [ 2164/25439 ( 9%)], Train Loss: 0.99538\nEpoch: 00 [ 2204/25439 ( 9%)], Train Loss: 0.98494\nEpoch: 00 [ 2244/25439 ( 9%)], Train Loss: 0.97402\nEpoch: 00 [ 2284/25439 ( 9%)], Train Loss: 0.96749\nEpoch: 00 [ 2324/25439 ( 9%)], Train Loss: 0.95629\nEpoch: 00 [ 2364/25439 ( 9%)], Train Loss: 0.94887\nEpoch: 00 [ 2404/25439 ( 9%)], Train Loss: 0.94281\nEpoch: 00 [ 2444/25439 ( 10%)], Train Loss: 0.93817\nEpoch: 00 [ 2484/25439 ( 10%)], Train Loss: 0.92987\nEpoch: 00 [ 2524/25439 ( 10%)], Train Loss: 0.92360\nEpoch: 00 [ 2564/25439 ( 10%)], Train Loss: 0.91858\nEpoch: 00 [ 2604/25439 ( 10%)], Train Loss: 0.91324\nEpoch: 00 [ 2644/25439 ( 10%)], Train Loss: 0.90741\nEpoch: 00 [ 2684/25439 ( 11%)], Train Loss: 0.90294\nEpoch: 00 [ 2724/25439 ( 11%)], Train Loss: 0.90086\nEpoch: 00 [ 2764/25439 ( 11%)], Train Loss: 0.89945\nEpoch: 00 [ 2804/25439 ( 11%)], Train Loss: 0.89644\nEpoch: 00 [ 2844/25439 ( 11%)], Train Loss: 0.89047\nEpoch: 00 [ 2884/25439 ( 11%)], Train Loss: 0.88620\nEpoch: 00 [ 2924/25439 ( 11%)], Train Loss: 0.88195\nEpoch: 00 [ 2964/25439 ( 12%)], Train Loss: 0.87860\nEpoch: 00 [ 3004/25439 ( 12%)], Train Loss: 0.87669\nEpoch: 00 [ 3044/25439 ( 12%)], Train Loss: 0.87170\nEpoch: 00 [ 3084/25439 ( 12%)], Train Loss: 0.86484\nEpoch: 00 [ 3124/25439 ( 12%)], Train Loss: 0.85817\nEpoch: 00 [ 3164/25439 ( 12%)], Train Loss: 0.85194\nEpoch: 00 [ 3204/25439 ( 13%)], Train Loss: 0.85049\nEpoch: 00 [ 3244/25439 ( 13%)], Train Loss: 0.84697\nEpoch: 00 [ 3284/25439 ( 13%)], Train Loss: 0.84365\nEpoch: 00 [ 3324/25439 ( 13%)], Train Loss: 0.84022\nEpoch: 00 [ 3364/25439 ( 13%)], Train Loss: 0.83816\nEpoch: 00 [ 3404/25439 ( 13%)], Train Loss: 0.83460\nEpoch: 00 [ 3444/25439 ( 14%)], Train Loss: 0.83054\nEpoch: 00 [ 3484/25439 ( 14%)], Train Loss: 0.82893\nEpoch: 00 [ 3524/25439 ( 14%)], Train Loss: 0.82446\nEpoch: 00 [ 3564/25439 ( 14%)], Train Loss: 0.82097\nEpoch: 00 [ 3604/25439 ( 14%)], Train Loss: 0.81596\nEpoch: 00 [ 3644/25439 ( 14%)], Train Loss: 0.81511\nEpoch: 00 [ 3684/25439 ( 14%)], Train Loss: 0.81102\nEpoch: 00 [ 3724/25439 ( 15%)], Train Loss: 0.80638\nEpoch: 00 [ 3764/25439 ( 15%)], Train Loss: 0.80253\nEpoch: 00 [ 3804/25439 ( 15%)], Train Loss: 0.79870\nEpoch: 00 [ 3844/25439 ( 15%)], Train Loss: 0.79470\nEpoch: 00 [ 3884/25439 ( 15%)], Train Loss: 0.78962\nEpoch: 00 [ 3924/25439 ( 15%)], Train Loss: 0.78569\nEpoch: 00 [ 3964/25439 ( 16%)], Train Loss: 0.78336\nEpoch: 00 [ 4004/25439 ( 16%)], Train Loss: 0.78179\nEpoch: 00 [ 4044/25439 ( 16%)], Train Loss: 0.77834\nEpoch: 00 [ 4084/25439 ( 16%)], Train Loss: 0.77564\nEpoch: 00 [ 4124/25439 ( 16%)], Train Loss: 0.77160\nEpoch: 00 [ 4164/25439 ( 16%)], Train Loss: 0.77013\nEpoch: 00 [ 4204/25439 ( 17%)], Train Loss: 0.76697\nEpoch: 00 [ 4244/25439 ( 17%)], Train Loss: 0.76460\nEpoch: 00 [ 4284/25439 ( 17%)], Train Loss: 0.76228\nEpoch: 00 [ 4324/25439 ( 17%)], Train Loss: 0.75997\nEpoch: 00 [ 4364/25439 ( 17%)], Train Loss: 0.75583\nEpoch: 00 [ 4404/25439 ( 17%)], Train Loss: 0.75235\nEpoch: 00 [ 4444/25439 ( 17%)], Train Loss: 0.75157\nEpoch: 00 [ 4484/25439 ( 18%)], Train Loss: 0.74856\nEpoch: 00 [ 4524/25439 ( 18%)], Train Loss: 0.74847\nEpoch: 00 [ 4564/25439 ( 18%)], Train Loss: 0.74790\nEpoch: 00 [ 4604/25439 ( 18%)], Train Loss: 0.74584\nEpoch: 00 [ 4644/25439 ( 18%)], Train Loss: 0.74386\nEpoch: 00 [ 4684/25439 ( 18%)], Train Loss: 0.74208\nEpoch: 00 [ 4724/25439 ( 19%)], Train Loss: 0.73839\nEpoch: 00 [ 4764/25439 ( 19%)], Train Loss: 0.73578\nEpoch: 00 [ 4804/25439 ( 19%)], Train Loss: 0.73446\nEpoch: 00 [ 4844/25439 ( 19%)], Train Loss: 0.73330\nEpoch: 00 [ 4884/25439 ( 19%)], Train Loss: 0.73094\nEpoch: 00 [ 4924/25439 ( 19%)], Train Loss: 0.72947\nEpoch: 00 [ 4964/25439 ( 20%)], Train Loss: 0.72699\nEpoch: 00 [ 5004/25439 ( 20%)], Train Loss: 0.72517\nEpoch: 00 [ 5044/25439 ( 20%)], Train Loss: 0.72416\nEpoch: 00 [ 5084/25439 ( 20%)], Train Loss: 0.72270\nEpoch: 00 [ 5124/25439 ( 20%)], Train Loss: 0.72323\nEpoch: 00 [ 5164/25439 ( 20%)], Train Loss: 0.72123\nEpoch: 00 [ 5204/25439 ( 20%)], Train Loss: 0.71947\nEpoch: 00 [ 5244/25439 ( 21%)], Train Loss: 0.71757\nEpoch: 00 [ 5284/25439 ( 21%)], Train Loss: 0.71617\nEpoch: 00 [ 5324/25439 ( 21%)], Train Loss: 0.71408\nEpoch: 00 [ 5364/25439 ( 21%)], Train Loss: 0.71412\nEpoch: 00 [ 5404/25439 ( 21%)], Train Loss: 0.71344\nEpoch: 00 [ 5444/25439 ( 21%)], Train Loss: 0.71089\nEpoch: 00 [ 5484/25439 ( 22%)], Train Loss: 0.71155\nEpoch: 00 [ 5524/25439 ( 22%)], Train Loss: 0.70973\nEpoch: 00 [ 5564/25439 ( 22%)], Train Loss: 0.70868\nEpoch: 00 [ 5604/25439 ( 22%)], Train Loss: 0.70757\nEpoch: 00 [ 5644/25439 ( 22%)], Train Loss: 0.70644\nEpoch: 00 [ 5684/25439 ( 22%)], Train Loss: 0.70480\nEpoch: 00 [ 5724/25439 ( 23%)], Train Loss: 0.70354\nEpoch: 00 [ 5764/25439 ( 23%)], Train Loss: 0.70404\nEpoch: 00 [ 5804/25439 ( 23%)], Train Loss: 0.70204\nEpoch: 00 [ 5844/25439 ( 23%)], Train Loss: 0.70023\nEpoch: 00 [ 5884/25439 ( 23%)], Train Loss: 0.69987\nEpoch: 00 [ 5924/25439 ( 23%)], Train Loss: 0.69926\nEpoch: 00 [ 5964/25439 ( 23%)], Train Loss: 0.69742\nEpoch: 00 [ 6004/25439 ( 24%)], Train Loss: 0.69529\nEpoch: 00 [ 6044/25439 ( 24%)], Train Loss: 0.69397\nEpoch: 00 [ 6084/25439 ( 24%)], Train Loss: 0.69205\nEpoch: 00 [ 6124/25439 ( 24%)], Train Loss: 0.69093\nEpoch: 00 [ 6164/25439 ( 24%)], Train Loss: 0.69058\nEpoch: 00 [ 6204/25439 ( 24%)], Train Loss: 0.68987\nEpoch: 00 [ 6244/25439 ( 25%)], Train Loss: 0.68936\nEpoch: 00 [ 6284/25439 ( 25%)], Train Loss: 0.68890\nEpoch: 00 [ 6324/25439 ( 25%)], Train Loss: 0.68808\nEpoch: 00 [ 6364/25439 ( 25%)], Train Loss: 0.68675\nEpoch: 00 [ 6404/25439 ( 25%)], Train Loss: 0.68607\nEpoch: 00 [ 6444/25439 ( 25%)], Train Loss: 0.68480\nEpoch: 00 [ 6484/25439 ( 25%)], Train Loss: 0.68269\nEpoch: 00 [ 6524/25439 ( 26%)], Train Loss: 0.68136\nEpoch: 00 [ 6564/25439 ( 26%)], Train Loss: 0.68063\nEpoch: 00 [ 6604/25439 ( 26%)], Train Loss: 0.67917\nEpoch: 00 [ 6644/25439 ( 26%)], Train Loss: 0.67693\nEpoch: 00 [ 6684/25439 ( 26%)], Train Loss: 0.67662\nEpoch: 00 [ 6724/25439 ( 26%)], Train Loss: 0.67504\nEpoch: 00 [ 6764/25439 ( 27%)], Train Loss: 0.67488\nEpoch: 00 [ 6804/25439 ( 27%)], Train Loss: 0.67360\nEpoch: 00 [ 6844/25439 ( 27%)], Train Loss: 0.67273\nEpoch: 00 [ 6884/25439 ( 27%)], Train Loss: 0.67239\nEpoch: 00 [ 6924/25439 ( 27%)], Train Loss: 0.67203\nEpoch: 00 [ 6964/25439 ( 27%)], Train Loss: 0.67139\nEpoch: 00 [ 7004/25439 ( 28%)], Train Loss: 0.67181\nEpoch: 00 [ 7044/25439 ( 28%)], Train Loss: 0.67139\nEpoch: 00 [ 7084/25439 ( 28%)], Train Loss: 0.67032\nEpoch: 00 [ 7124/25439 ( 28%)], Train Loss: 0.67018\nEpoch: 00 [ 7164/25439 ( 28%)], Train Loss: 0.66978\nEpoch: 00 [ 7204/25439 ( 28%)], Train Loss: 0.67037\nEpoch: 00 [ 7244/25439 ( 28%)], Train Loss: 0.67019\nEpoch: 00 [ 7284/25439 ( 29%)], Train Loss: 0.66801\nEpoch: 00 [ 7324/25439 ( 29%)], Train Loss: 0.66683\nEpoch: 00 [ 7364/25439 ( 29%)], Train Loss: 0.66495\nEpoch: 00 [ 7404/25439 ( 29%)], Train Loss: 0.66435\nEpoch: 00 [ 7444/25439 ( 29%)], Train Loss: 0.66378\nEpoch: 00 [ 7484/25439 ( 29%)], Train Loss: 0.66219\nEpoch: 00 [ 7524/25439 ( 30%)], Train Loss: 0.66090\nEpoch: 00 [ 7564/25439 ( 30%)], Train Loss: 0.66005\nEpoch: 00 [ 7604/25439 ( 30%)], Train Loss: 0.65849\nEpoch: 00 [ 7644/25439 ( 30%)], Train Loss: 0.65775\nEpoch: 00 [ 7684/25439 ( 30%)], Train Loss: 0.65635\nEpoch: 00 [ 7724/25439 ( 30%)], Train Loss: 0.65496\nEpoch: 00 [ 7764/25439 ( 31%)], Train Loss: 0.65417\nEpoch: 00 [ 7804/25439 ( 31%)], Train Loss: 0.65376\nEpoch: 00 [ 7844/25439 ( 31%)], Train Loss: 0.65268\nEpoch: 00 [ 7884/25439 ( 31%)], Train Loss: 0.65207\nEpoch: 00 [ 7924/25439 ( 31%)], Train Loss: 0.65083\nEpoch: 00 [ 7964/25439 ( 31%)], Train Loss: 0.65029\nEpoch: 00 [ 8004/25439 ( 31%)], Train Loss: 0.64919\nEpoch: 00 [ 8044/25439 ( 32%)], Train Loss: 0.64792\nEpoch: 00 [ 8084/25439 ( 32%)], Train Loss: 0.64665\nEpoch: 00 [ 8124/25439 ( 32%)], Train Loss: 0.64538\nEpoch: 00 [ 8164/25439 ( 32%)], Train Loss: 0.64512\nEpoch: 00 [ 8204/25439 ( 32%)], Train Loss: 0.64562\nEpoch: 00 [ 8244/25439 ( 32%)], Train Loss: 0.64495\nEpoch: 00 [ 8284/25439 ( 33%)], Train Loss: 0.64386\nEpoch: 00 [ 8324/25439 ( 33%)], Train Loss: 0.64230\nEpoch: 00 [ 8364/25439 ( 33%)], Train Loss: 0.64175\nEpoch: 00 [ 8404/25439 ( 33%)], Train Loss: 0.64214\nEpoch: 00 [ 8444/25439 ( 33%)], Train Loss: 0.64128\nEpoch: 00 [ 8484/25439 ( 33%)], Train Loss: 0.64036\nEpoch: 00 [ 8524/25439 ( 34%)], Train Loss: 0.63927\nEpoch: 00 [ 8564/25439 ( 34%)], Train Loss: 0.63986\nEpoch: 00 [ 8604/25439 ( 34%)], Train Loss: 0.63874\nEpoch: 00 [ 8644/25439 ( 34%)], Train Loss: 0.63767\nEpoch: 00 [ 8684/25439 ( 34%)], Train Loss: 0.63594\nEpoch: 00 [ 8724/25439 ( 34%)], Train Loss: 0.63603\nEpoch: 00 [ 8764/25439 ( 34%)], Train Loss: 0.63532\nEpoch: 00 [ 8804/25439 ( 35%)], Train Loss: 0.63490\nEpoch: 00 [ 8844/25439 ( 35%)], Train Loss: 0.63323\nEpoch: 00 [ 8884/25439 ( 35%)], Train Loss: 0.63220\nEpoch: 00 [ 8924/25439 ( 35%)], Train Loss: 0.63134\nEpoch: 00 [ 8964/25439 ( 35%)], Train Loss: 0.62974\nEpoch: 00 [ 9004/25439 ( 35%)], Train Loss: 0.62845\nEpoch: 00 [ 9044/25439 ( 36%)], Train Loss: 0.62718\nEpoch: 00 [ 9084/25439 ( 36%)], Train Loss: 0.62734\nEpoch: 00 [ 9124/25439 ( 36%)], Train Loss: 0.62616\nEpoch: 00 [ 9164/25439 ( 36%)], Train Loss: 0.62466\nEpoch: 00 [ 9204/25439 ( 36%)], Train Loss: 0.62422\nEpoch: 00 [ 9244/25439 ( 36%)], Train Loss: 0.62395\nEpoch: 00 [ 9284/25439 ( 36%)], Train Loss: 0.62342\nEpoch: 00 [ 9324/25439 ( 37%)], Train Loss: 0.62372\nEpoch: 00 [ 9364/25439 ( 37%)], Train Loss: 0.62380\nEpoch: 00 [ 9404/25439 ( 37%)], Train Loss: 0.62427\nEpoch: 00 [ 9444/25439 ( 37%)], Train Loss: 0.62409\nEpoch: 00 [ 9484/25439 ( 37%)], Train Loss: 0.62320\nEpoch: 00 [ 9524/25439 ( 37%)], Train Loss: 0.62178\nEpoch: 00 [ 9564/25439 ( 38%)], Train Loss: 0.62086\nEpoch: 00 [ 9604/25439 ( 38%)], Train Loss: 0.62034\nEpoch: 00 [ 9644/25439 ( 38%)], Train Loss: 0.62019\nEpoch: 00 [ 9684/25439 ( 38%)], Train Loss: 0.61942\nEpoch: 00 [ 9724/25439 ( 38%)], Train Loss: 0.61803\nEpoch: 00 [ 9764/25439 ( 38%)], Train Loss: 0.61776\nEpoch: 00 [ 9804/25439 ( 39%)], Train Loss: 0.61622\nEpoch: 00 [ 9844/25439 ( 39%)], Train Loss: 0.61532\nEpoch: 00 [ 9884/25439 ( 39%)], Train Loss: 0.61500\nEpoch: 00 [ 9924/25439 ( 39%)], Train Loss: 0.61547\nEpoch: 00 [ 9964/25439 ( 39%)], Train Loss: 0.61630\nEpoch: 00 [10004/25439 ( 39%)], Train Loss: 0.61604\nEpoch: 00 [10044/25439 ( 39%)], Train Loss: 0.61499\nEpoch: 00 [10084/25439 ( 40%)], Train Loss: 0.61460\nEpoch: 00 [10124/25439 ( 40%)], Train Loss: 0.61407\nEpoch: 00 [10164/25439 ( 40%)], Train Loss: 0.61447\nEpoch: 00 [10204/25439 ( 40%)], Train Loss: 0.61308\nEpoch: 00 [10244/25439 ( 40%)], Train Loss: 0.61227\nEpoch: 00 [10284/25439 ( 40%)], Train Loss: 0.61285\nEpoch: 00 [10324/25439 ( 41%)], Train Loss: 0.61274\nEpoch: 00 [10364/25439 ( 41%)], Train Loss: 0.61297\nEpoch: 00 [10404/25439 ( 41%)], Train Loss: 0.61236\nEpoch: 00 [10444/25439 ( 41%)], Train Loss: 0.61211\nEpoch: 00 [10484/25439 ( 41%)], Train Loss: 0.61104\nEpoch: 00 [10524/25439 ( 41%)], Train Loss: 0.61043\nEpoch: 00 [10564/25439 ( 42%)], Train Loss: 0.60975\nEpoch: 00 [10604/25439 ( 42%)], Train Loss: 0.60924\nEpoch: 00 [10644/25439 ( 42%)], Train Loss: 0.60782\nEpoch: 00 [10684/25439 ( 42%)], Train Loss: 0.60685\nEpoch: 00 [10724/25439 ( 42%)], Train Loss: 0.60662\nEpoch: 00 [10764/25439 ( 42%)], Train Loss: 0.60615\nEpoch: 00 [10804/25439 ( 42%)], Train Loss: 0.60531\nEpoch: 00 [10844/25439 ( 43%)], Train Loss: 0.60580\nEpoch: 00 [10884/25439 ( 43%)], Train Loss: 0.60521\nEpoch: 00 [10924/25439 ( 43%)], Train Loss: 0.60561\nEpoch: 00 [10964/25439 ( 43%)], Train Loss: 0.60488\nEpoch: 00 [11004/25439 ( 43%)], Train Loss: 0.60381\nEpoch: 00 [11044/25439 ( 43%)], Train Loss: 0.60281\nEpoch: 00 [11084/25439 ( 44%)], Train Loss: 0.60267\nEpoch: 00 [11124/25439 ( 44%)], Train Loss: 0.60377\nEpoch: 00 [11164/25439 ( 44%)], Train Loss: 0.60320\nEpoch: 00 [11204/25439 ( 44%)], Train Loss: 0.60254\nEpoch: 00 [11244/25439 ( 44%)], Train Loss: 0.60202\nEpoch: 00 [11284/25439 ( 44%)], Train Loss: 0.60143\nEpoch: 00 [11324/25439 ( 45%)], Train Loss: 0.60110\nEpoch: 00 [11364/25439 ( 45%)], Train Loss: 0.60062\nEpoch: 00 [11404/25439 ( 45%)], Train Loss: 0.59990\nEpoch: 00 [11444/25439 ( 45%)], Train Loss: 0.59975\nEpoch: 00 [11484/25439 ( 45%)], Train Loss: 0.59956\nEpoch: 00 [11524/25439 ( 45%)], Train Loss: 0.59874\nEpoch: 00 [11564/25439 ( 45%)], Train Loss: 0.59828\nEpoch: 00 [11604/25439 ( 46%)], Train Loss: 0.59778\nEpoch: 00 [11644/25439 ( 46%)], Train Loss: 0.59733\nEpoch: 00 [11684/25439 ( 46%)], Train Loss: 0.59624\nEpoch: 00 [11724/25439 ( 46%)], Train Loss: 0.59601\nEpoch: 00 [11764/25439 ( 46%)], Train Loss: 0.59507\nEpoch: 00 [11804/25439 ( 46%)], Train Loss: 0.59518\nEpoch: 00 [11844/25439 ( 47%)], Train Loss: 0.59416\nEpoch: 00 [11884/25439 ( 47%)], Train Loss: 0.59359\nEpoch: 00 [11924/25439 ( 47%)], Train Loss: 0.59368\nEpoch: 00 [11964/25439 ( 47%)], Train Loss: 0.59279\nEpoch: 00 [12004/25439 ( 47%)], Train Loss: 0.59174\nEpoch: 00 [12044/25439 ( 47%)], Train Loss: 0.59120\nEpoch: 00 [12084/25439 ( 48%)], Train Loss: 0.59119\nEpoch: 00 [12124/25439 ( 48%)], Train Loss: 0.59108\nEpoch: 00 [12164/25439 ( 48%)], Train Loss: 0.59064\nEpoch: 00 [12204/25439 ( 48%)], Train Loss: 0.59018\nEpoch: 00 [12244/25439 ( 48%)], Train Loss: 0.58968\nEpoch: 00 [12284/25439 ( 48%)], Train Loss: 0.58925\nEpoch: 00 [12324/25439 ( 48%)], Train Loss: 0.58822\nEpoch: 00 [12364/25439 ( 49%)], Train Loss: 0.58781\nEpoch: 00 [12404/25439 ( 49%)], Train Loss: 0.58760\nEpoch: 00 [12444/25439 ( 49%)], Train Loss: 0.58723\nEpoch: 00 [12484/25439 ( 49%)], Train Loss: 0.58686\nEpoch: 00 [12524/25439 ( 49%)], Train Loss: 0.58656\nEpoch: 00 [12564/25439 ( 49%)], Train Loss: 0.58584\nEpoch: 00 [12604/25439 ( 50%)], Train Loss: 0.58485\nEpoch: 00 [12644/25439 ( 50%)], Train Loss: 0.58406\nEpoch: 00 [12684/25439 ( 50%)], Train Loss: 0.58275\nEpoch: 00 [12724/25439 ( 50%)], Train Loss: 0.58205\nEpoch: 00 [12764/25439 ( 50%)], Train Loss: 0.58177\nEpoch: 00 [12804/25439 ( 50%)], Train Loss: 0.58107\nEpoch: 00 [12844/25439 ( 50%)], Train Loss: 0.58011\nEpoch: 00 [12884/25439 ( 51%)], Train Loss: 0.57958\nEpoch: 00 [12924/25439 ( 51%)], Train Loss: 0.57904\nEpoch: 00 [12964/25439 ( 51%)], Train Loss: 0.57867\nEpoch: 00 [13004/25439 ( 51%)], Train Loss: 0.57791\nEpoch: 00 [13044/25439 ( 51%)], Train Loss: 0.57686\nEpoch: 00 [13084/25439 ( 51%)], Train Loss: 0.57609\nEpoch: 00 [13124/25439 ( 52%)], Train Loss: 0.57638\nEpoch: 00 [13164/25439 ( 52%)], Train Loss: 0.57593\nEpoch: 00 [13204/25439 ( 52%)], Train Loss: 0.57573\nEpoch: 00 [13244/25439 ( 52%)], Train Loss: 0.57505\nEpoch: 00 [13284/25439 ( 52%)], Train Loss: 0.57460\nEpoch: 00 [13324/25439 ( 52%)], Train Loss: 0.57438\nEpoch: 00 [13364/25439 ( 53%)], Train Loss: 0.57446\nEpoch: 00 [13404/25439 ( 53%)], Train Loss: 0.57434\nEpoch: 00 [13444/25439 ( 53%)], Train Loss: 0.57327\nEpoch: 00 [13484/25439 ( 53%)], Train Loss: 0.57259\nEpoch: 00 [13524/25439 ( 53%)], Train Loss: 0.57245\nEpoch: 00 [13564/25439 ( 53%)], Train Loss: 0.57154\nEpoch: 00 [13604/25439 ( 53%)], Train Loss: 0.57077\nEpoch: 00 [13644/25439 ( 54%)], Train Loss: 0.57053\nEpoch: 00 [13684/25439 ( 54%)], Train Loss: 0.57081\nEpoch: 00 [13724/25439 ( 54%)], Train Loss: 0.56980\nEpoch: 00 [13764/25439 ( 54%)], Train Loss: 0.56906\nEpoch: 00 [13804/25439 ( 54%)], Train Loss: 0.56919\nEpoch: 00 [13844/25439 ( 54%)], Train Loss: 0.56900\nEpoch: 00 [13884/25439 ( 55%)], Train Loss: 0.56892\nEpoch: 00 [13924/25439 ( 55%)], Train Loss: 0.56851\nEpoch: 00 [13964/25439 ( 55%)], Train Loss: 0.56831\nEpoch: 00 [14004/25439 ( 55%)], Train Loss: 0.56771\nEpoch: 00 [14044/25439 ( 55%)], Train Loss: 0.56755\nEpoch: 00 [14084/25439 ( 55%)], Train Loss: 0.56689\nEpoch: 00 [14124/25439 ( 56%)], Train Loss: 0.56600\nEpoch: 00 [14164/25439 ( 56%)], Train Loss: 0.56540\nEpoch: 00 [14204/25439 ( 56%)], Train Loss: 0.56528\nEpoch: 00 [14244/25439 ( 56%)], Train Loss: 0.56516\nEpoch: 00 [14284/25439 ( 56%)], Train Loss: 0.56479\nEpoch: 00 [14324/25439 ( 56%)], Train Loss: 0.56373\nEpoch: 00 [14364/25439 ( 56%)], Train Loss: 0.56333\nEpoch: 00 [14404/25439 ( 57%)], Train Loss: 0.56312\nEpoch: 00 [14444/25439 ( 57%)], Train Loss: 0.56289\nEpoch: 00 [14484/25439 ( 57%)], Train Loss: 0.56219\nEpoch: 00 [14524/25439 ( 57%)], Train Loss: 0.56217\nEpoch: 00 [14564/25439 ( 57%)], Train Loss: 0.56201\nEpoch: 00 [14604/25439 ( 57%)], Train Loss: 0.56169\nEpoch: 00 [14644/25439 ( 58%)], Train Loss: 0.56095\nEpoch: 00 [14684/25439 ( 58%)], Train Loss: 0.56085\nEpoch: 00 [14724/25439 ( 58%)], Train Loss: 0.56024\nEpoch: 00 [14764/25439 ( 58%)], Train Loss: 0.56039\nEpoch: 00 [14804/25439 ( 58%)], Train Loss: 0.55963\nEpoch: 00 [14844/25439 ( 58%)], Train Loss: 0.55962\nEpoch: 00 [14884/25439 ( 59%)], Train Loss: 0.55890\nEpoch: 00 [14924/25439 ( 59%)], Train Loss: 0.55878\nEpoch: 00 [14964/25439 ( 59%)], Train Loss: 0.55845\nEpoch: 00 [15004/25439 ( 59%)], Train Loss: 0.55872\nEpoch: 00 [15044/25439 ( 59%)], Train Loss: 0.55789\nEpoch: 00 [15084/25439 ( 59%)], Train Loss: 0.55759\nEpoch: 00 [15124/25439 ( 59%)], Train Loss: 0.55715\nEpoch: 00 [15164/25439 ( 60%)], Train Loss: 0.55628\nEpoch: 00 [15204/25439 ( 60%)], Train Loss: 0.55577\nEpoch: 00 [15244/25439 ( 60%)], Train Loss: 0.55568\nEpoch: 00 [15284/25439 ( 60%)], Train Loss: 0.55529\nEpoch: 00 [15324/25439 ( 60%)], Train Loss: 0.55462\nEpoch: 00 [15364/25439 ( 60%)], Train Loss: 0.55398\nEpoch: 00 [15404/25439 ( 61%)], Train Loss: 0.55360\nEpoch: 00 [15444/25439 ( 61%)], Train Loss: 0.55316\nEpoch: 00 [15484/25439 ( 61%)], Train Loss: 0.55308\nEpoch: 00 [15524/25439 ( 61%)], Train Loss: 0.55331\nEpoch: 00 [15564/25439 ( 61%)], Train Loss: 0.55307\nEpoch: 00 [15604/25439 ( 61%)], Train Loss: 0.55280\nEpoch: 00 [15644/25439 ( 61%)], Train Loss: 0.55227\nEpoch: 00 [15684/25439 ( 62%)], Train Loss: 0.55179\nEpoch: 00 [15724/25439 ( 62%)], Train Loss: 0.55140\nEpoch: 00 [15764/25439 ( 62%)], Train Loss: 0.55113\nEpoch: 00 [15804/25439 ( 62%)], Train Loss: 0.55125\nEpoch: 00 [15844/25439 ( 62%)], Train Loss: 0.55115\nEpoch: 00 [15884/25439 ( 62%)], Train Loss: 0.55124\nEpoch: 00 [15924/25439 ( 63%)], Train Loss: 0.55104\nEpoch: 00 [15964/25439 ( 63%)], Train Loss: 0.55089\nEpoch: 00 [16004/25439 ( 63%)], Train Loss: 0.55080\nEpoch: 00 [16044/25439 ( 63%)], Train Loss: 0.55032\nEpoch: 00 [16084/25439 ( 63%)], Train Loss: 0.54954\nEpoch: 00 [16124/25439 ( 63%)], Train Loss: 0.54888\nEpoch: 00 [16164/25439 ( 64%)], Train Loss: 0.54843\nEpoch: 00 [16204/25439 ( 64%)], Train Loss: 0.54814\nEpoch: 00 [16244/25439 ( 64%)], Train Loss: 0.54831\nEpoch: 00 [16284/25439 ( 64%)], Train Loss: 0.54804\nEpoch: 00 [16324/25439 ( 64%)], Train Loss: 0.54786\nEpoch: 00 [16364/25439 ( 64%)], Train Loss: 0.54730\nEpoch: 00 [16404/25439 ( 64%)], Train Loss: 0.54663\nEpoch: 00 [16444/25439 ( 65%)], Train Loss: 0.54650\nEpoch: 00 [16484/25439 ( 65%)], Train Loss: 0.54585\nEpoch: 00 [16524/25439 ( 65%)], Train Loss: 0.54559\nEpoch: 00 [16564/25439 ( 65%)], Train Loss: 0.54542\nEpoch: 00 [16604/25439 ( 65%)], Train Loss: 0.54560\nEpoch: 00 [16644/25439 ( 65%)], Train Loss: 0.54533\nEpoch: 00 [16684/25439 ( 66%)], Train Loss: 0.54494\nEpoch: 00 [16724/25439 ( 66%)], Train Loss: 0.54431\nEpoch: 00 [16764/25439 ( 66%)], Train Loss: 0.54391\nEpoch: 00 [16804/25439 ( 66%)], Train Loss: 0.54368\nEpoch: 00 [16844/25439 ( 66%)], Train Loss: 0.54346\nEpoch: 00 [16884/25439 ( 66%)], Train Loss: 0.54334\nEpoch: 00 [16924/25439 ( 67%)], Train Loss: 0.54273\nEpoch: 00 [16964/25439 ( 67%)], Train Loss: 0.54239\nEpoch: 00 [17004/25439 ( 67%)], Train Loss: 0.54233\nEpoch: 00 [17044/25439 ( 67%)], Train Loss: 0.54214\nEpoch: 00 [17084/25439 ( 67%)], Train Loss: 0.54217\nEpoch: 00 [17124/25439 ( 67%)], Train Loss: 0.54191\nEpoch: 00 [17164/25439 ( 67%)], Train Loss: 0.54156\nEpoch: 00 [17204/25439 ( 68%)], Train Loss: 0.54164\nEpoch: 00 [17244/25439 ( 68%)], Train Loss: 0.54121\nEpoch: 00 [17284/25439 ( 68%)], Train Loss: 0.54151\nEpoch: 00 [17324/25439 ( 68%)], Train Loss: 0.54108\nEpoch: 00 [17364/25439 ( 68%)], Train Loss: 0.54081\nEpoch: 00 [17404/25439 ( 68%)], Train Loss: 0.54087\nEpoch: 00 [17444/25439 ( 69%)], Train Loss: 0.54065\nEpoch: 00 [17484/25439 ( 69%)], Train Loss: 0.53971\nEpoch: 00 [17524/25439 ( 69%)], Train Loss: 0.53906\nEpoch: 00 [17564/25439 ( 69%)], Train Loss: 0.53910\nEpoch: 00 [17604/25439 ( 69%)], Train Loss: 0.53871\nEpoch: 00 [17644/25439 ( 69%)], Train Loss: 0.53802\nEpoch: 00 [17684/25439 ( 70%)], Train Loss: 0.53766\nEpoch: 00 [17724/25439 ( 70%)], Train Loss: 0.53734\nEpoch: 00 [17764/25439 ( 70%)], Train Loss: 0.53689\nEpoch: 00 [17804/25439 ( 70%)], Train Loss: 0.53649\nEpoch: 00 [17844/25439 ( 70%)], Train Loss: 0.53626\nEpoch: 00 [17884/25439 ( 70%)], Train Loss: 0.53587\nEpoch: 00 [17924/25439 ( 70%)], Train Loss: 0.53561\nEpoch: 00 [17964/25439 ( 71%)], Train Loss: 0.53539\nEpoch: 00 [18004/25439 ( 71%)], Train Loss: 0.53525\nEpoch: 00 [18044/25439 ( 71%)], Train Loss: 0.53528\nEpoch: 00 [18084/25439 ( 71%)], Train Loss: 0.53520\nEpoch: 00 [18124/25439 ( 71%)], Train Loss: 0.53489\nEpoch: 00 [18164/25439 ( 71%)], Train Loss: 0.53470\nEpoch: 00 [18204/25439 ( 72%)], Train Loss: 0.53445\nEpoch: 00 [18244/25439 ( 72%)], Train Loss: 0.53405\nEpoch: 00 [18284/25439 ( 72%)], Train Loss: 0.53363\nEpoch: 00 [18324/25439 ( 72%)], Train Loss: 0.53357\nEpoch: 00 [18364/25439 ( 72%)], Train Loss: 0.53312\nEpoch: 00 [18404/25439 ( 72%)], Train Loss: 0.53261\nEpoch: 00 [18444/25439 ( 73%)], Train Loss: 0.53233\nEpoch: 00 [18484/25439 ( 73%)], Train Loss: 0.53202\nEpoch: 00 [18524/25439 ( 73%)], Train Loss: 0.53194\nEpoch: 00 [18564/25439 ( 73%)], Train Loss: 0.53143\nEpoch: 00 [18604/25439 ( 73%)], Train Loss: 0.53148\nEpoch: 00 [18644/25439 ( 73%)], Train Loss: 0.53154\nEpoch: 00 [18684/25439 ( 73%)], Train Loss: 0.53081\nEpoch: 00 [18724/25439 ( 74%)], Train Loss: 0.53063\nEpoch: 00 [18764/25439 ( 74%)], Train Loss: 0.53015\nEpoch: 00 [18804/25439 ( 74%)], Train Loss: 0.53004\nEpoch: 00 [18844/25439 ( 74%)], Train Loss: 0.52986\nEpoch: 00 [18884/25439 ( 74%)], Train Loss: 0.52942\nEpoch: 00 [18924/25439 ( 74%)], Train Loss: 0.52893\nEpoch: 00 [18964/25439 ( 75%)], Train Loss: 0.52868\nEpoch: 00 [19004/25439 ( 75%)], Train Loss: 0.52826\nEpoch: 00 [19044/25439 ( 75%)], Train Loss: 0.52805\nEpoch: 00 [19084/25439 ( 75%)], Train Loss: 0.52823\nEpoch: 00 [19124/25439 ( 75%)], Train Loss: 0.52783\nEpoch: 00 [19164/25439 ( 75%)], Train Loss: 0.52722\nEpoch: 00 [19204/25439 ( 75%)], Train Loss: 0.52673\nEpoch: 00 [19244/25439 ( 76%)], Train Loss: 0.52619\nEpoch: 00 [19284/25439 ( 76%)], Train Loss: 0.52595\nEpoch: 00 [19324/25439 ( 76%)], Train Loss: 0.52550\nEpoch: 00 [19364/25439 ( 76%)], Train Loss: 0.52521\nEpoch: 00 [19404/25439 ( 76%)], Train Loss: 0.52553\nEpoch: 00 [19444/25439 ( 76%)], Train Loss: 0.52520\nEpoch: 00 [19484/25439 ( 77%)], Train Loss: 0.52468\nEpoch: 00 [19524/25439 ( 77%)], Train Loss: 0.52419\nEpoch: 00 [19564/25439 ( 77%)], Train Loss: 0.52397\nEpoch: 00 [19604/25439 ( 77%)], Train Loss: 0.52379\nEpoch: 00 [19644/25439 ( 77%)], Train Loss: 0.52339\nEpoch: 00 [19684/25439 ( 77%)], Train Loss: 0.52311\nEpoch: 00 [19724/25439 ( 78%)], Train Loss: 0.52247\nEpoch: 00 [19764/25439 ( 78%)], Train Loss: 0.52225\nEpoch: 00 [19804/25439 ( 78%)], Train Loss: 0.52234\nEpoch: 00 [19844/25439 ( 78%)], Train Loss: 0.52197\nEpoch: 00 [19884/25439 ( 78%)], Train Loss: 0.52180\nEpoch: 00 [19924/25439 ( 78%)], Train Loss: 0.52113\nEpoch: 00 [19964/25439 ( 78%)], Train Loss: 0.52048\nEpoch: 00 [20004/25439 ( 79%)], Train Loss: 0.51985\nEpoch: 00 [20044/25439 ( 79%)], Train Loss: 0.51930\nEpoch: 00 [20084/25439 ( 79%)], Train Loss: 0.51880\nEpoch: 00 [20124/25439 ( 79%)], Train Loss: 0.51840\nEpoch: 00 [20164/25439 ( 79%)], Train Loss: 0.51809\nEpoch: 00 [20204/25439 ( 79%)], Train Loss: 0.51786\nEpoch: 00 [20244/25439 ( 80%)], Train Loss: 0.51748\nEpoch: 00 [20284/25439 ( 80%)], Train Loss: 0.51715\nEpoch: 00 [20324/25439 ( 80%)], Train Loss: 0.51690\nEpoch: 00 [20364/25439 ( 80%)], Train Loss: 0.51668\nEpoch: 00 [20404/25439 ( 80%)], Train Loss: 0.51707\nEpoch: 00 [20444/25439 ( 80%)], Train Loss: 0.51675\nEpoch: 00 [20484/25439 ( 81%)], Train Loss: 0.51638\nEpoch: 00 [20524/25439 ( 81%)], Train Loss: 0.51631\nEpoch: 00 [20564/25439 ( 81%)], Train Loss: 0.51591\nEpoch: 00 [20604/25439 ( 81%)], Train Loss: 0.51564\nEpoch: 00 [20644/25439 ( 81%)], Train Loss: 0.51527\nEpoch: 00 [20684/25439 ( 81%)], Train Loss: 0.51501\nEpoch: 00 [20724/25439 ( 81%)], Train Loss: 0.51456\nEpoch: 00 [20764/25439 ( 82%)], Train Loss: 0.51412\nEpoch: 00 [20804/25439 ( 82%)], Train Loss: 0.51392\nEpoch: 00 [20844/25439 ( 82%)], Train Loss: 0.51335\nEpoch: 00 [20884/25439 ( 82%)], Train Loss: 0.51304\nEpoch: 00 [20924/25439 ( 82%)], Train Loss: 0.51291\nEpoch: 00 [20964/25439 ( 82%)], Train Loss: 0.51228\nEpoch: 00 [21004/25439 ( 83%)], Train Loss: 0.51212\nEpoch: 00 [21044/25439 ( 83%)], Train Loss: 0.51170\nEpoch: 00 [21084/25439 ( 83%)], Train Loss: 0.51156\nEpoch: 00 [21124/25439 ( 83%)], Train Loss: 0.51111\nEpoch: 00 [21164/25439 ( 83%)], Train Loss: 0.51126\nEpoch: 00 [21204/25439 ( 83%)], Train Loss: 0.51110\nEpoch: 00 [21244/25439 ( 84%)], Train Loss: 0.51072\nEpoch: 00 [21284/25439 ( 84%)], Train Loss: 0.51040\nEpoch: 00 [21324/25439 ( 84%)], Train Loss: 0.51028\nEpoch: 00 [21364/25439 ( 84%)], Train Loss: 0.50997\nEpoch: 00 [21404/25439 ( 84%)], Train Loss: 0.51001\nEpoch: 00 [21444/25439 ( 84%)], Train Loss: 0.51001\nEpoch: 00 [21484/25439 ( 84%)], Train Loss: 0.50959\nEpoch: 00 [21524/25439 ( 85%)], Train Loss: 0.50916\nEpoch: 00 [21564/25439 ( 85%)], Train Loss: 0.50903\nEpoch: 00 [21604/25439 ( 85%)], Train Loss: 0.50884\nEpoch: 00 [21644/25439 ( 85%)], Train Loss: 0.50824\nEpoch: 00 [21684/25439 ( 85%)], Train Loss: 0.50761\nEpoch: 00 [21724/25439 ( 85%)], Train Loss: 0.50718\nEpoch: 00 [21764/25439 ( 86%)], Train Loss: 0.50652\nEpoch: 00 [21804/25439 ( 86%)], Train Loss: 0.50612\nEpoch: 00 [21844/25439 ( 86%)], Train Loss: 0.50574\nEpoch: 00 [21884/25439 ( 86%)], Train Loss: 0.50526\nEpoch: 00 [21924/25439 ( 86%)], Train Loss: 0.50479\nEpoch: 00 [21964/25439 ( 86%)], Train Loss: 0.50452\nEpoch: 00 [22004/25439 ( 86%)], Train Loss: 0.50445\nEpoch: 00 [22044/25439 ( 87%)], Train Loss: 0.50427\nEpoch: 00 [22084/25439 ( 87%)], Train Loss: 0.50382\nEpoch: 00 [22124/25439 ( 87%)], Train Loss: 0.50369\nEpoch: 00 [22164/25439 ( 87%)], Train Loss: 0.50351\nEpoch: 00 [22204/25439 ( 87%)], Train Loss: 0.50324\nEpoch: 00 [22244/25439 ( 87%)], Train Loss: 0.50299\nEpoch: 00 [22284/25439 ( 88%)], Train Loss: 0.50313\nEpoch: 00 [22324/25439 ( 88%)], Train Loss: 0.50276\nEpoch: 00 [22364/25439 ( 88%)], Train Loss: 0.50268\nEpoch: 00 [22404/25439 ( 88%)], Train Loss: 0.50251\nEpoch: 00 [22444/25439 ( 88%)], Train Loss: 0.50217\nEpoch: 00 [22484/25439 ( 88%)], Train Loss: 0.50178\nEpoch: 00 [22524/25439 ( 89%)], Train Loss: 0.50116\nEpoch: 00 [22564/25439 ( 89%)], Train Loss: 0.50133\nEpoch: 00 [22604/25439 ( 89%)], Train Loss: 0.50090\nEpoch: 00 [22644/25439 ( 89%)], Train Loss: 0.50056\nEpoch: 00 [22684/25439 ( 89%)], Train Loss: 0.50033\nEpoch: 00 [22724/25439 ( 89%)], Train Loss: 0.50011\nEpoch: 00 [22764/25439 ( 89%)], Train Loss: 0.49971\nEpoch: 00 [22804/25439 ( 90%)], Train Loss: 0.49953\nEpoch: 00 [22844/25439 ( 90%)], Train Loss: 0.49933\nEpoch: 00 [22884/25439 ( 90%)], Train Loss: 0.49937\nEpoch: 00 [22924/25439 ( 90%)], Train Loss: 0.49912\nEpoch: 00 [22964/25439 ( 90%)], Train Loss: 0.49916\nEpoch: 00 [23004/25439 ( 90%)], Train Loss: 0.49877\nEpoch: 00 [23044/25439 ( 91%)], Train Loss: 0.49883\nEpoch: 00 [23084/25439 ( 91%)], Train Loss: 0.49827\nEpoch: 00 [23124/25439 ( 91%)], Train Loss: 0.49821\nEpoch: 00 [23164/25439 ( 91%)], Train Loss: 0.49800\nEpoch: 00 [23204/25439 ( 91%)], Train Loss: 0.49794\nEpoch: 00 [23244/25439 ( 91%)], Train Loss: 0.49758\nEpoch: 00 [23284/25439 ( 92%)], Train Loss: 0.49731\nEpoch: 00 [23324/25439 ( 92%)], Train Loss: 0.49724\nEpoch: 00 [23364/25439 ( 92%)], Train Loss: 0.49708\nEpoch: 00 [23404/25439 ( 92%)], Train Loss: 0.49653\nEpoch: 00 [23444/25439 ( 92%)], Train Loss: 0.49602\nEpoch: 00 [23484/25439 ( 92%)], Train Loss: 0.49561\nEpoch: 00 [23524/25439 ( 92%)], Train Loss: 0.49533\nEpoch: 00 [23564/25439 ( 93%)], Train Loss: 0.49489\nEpoch: 00 [23604/25439 ( 93%)], Train Loss: 0.49453\nEpoch: 00 [23644/25439 ( 93%)], Train Loss: 0.49416\nEpoch: 00 [23684/25439 ( 93%)], Train Loss: 0.49374\nEpoch: 00 [23724/25439 ( 93%)], Train Loss: 0.49341\nEpoch: 00 [23764/25439 ( 93%)], Train Loss: 0.49331\nEpoch: 00 [23804/25439 ( 94%)], Train Loss: 0.49307\nEpoch: 00 [23844/25439 ( 94%)], Train Loss: 0.49316\nEpoch: 00 [23884/25439 ( 94%)], Train Loss: 0.49335\nEpoch: 00 [23924/25439 ( 94%)], Train Loss: 0.49316\nEpoch: 00 [23964/25439 ( 94%)], Train Loss: 0.49354\nEpoch: 00 [24004/25439 ( 94%)], Train Loss: 0.49314\nEpoch: 00 [24044/25439 ( 95%)], Train Loss: 0.49279\nEpoch: 00 [24084/25439 ( 95%)], Train Loss: 0.49271\nEpoch: 00 [24124/25439 ( 95%)], Train Loss: 0.49227\nEpoch: 00 [24164/25439 ( 95%)], Train Loss: 0.49220\nEpoch: 00 [24204/25439 ( 95%)], Train Loss: 0.49177\nEpoch: 00 [24244/25439 ( 95%)], Train Loss: 0.49184\nEpoch: 00 [24284/25439 ( 95%)], Train Loss: 0.49166\nEpoch: 00 [24324/25439 ( 96%)], Train Loss: 0.49134\nEpoch: 00 [24364/25439 ( 96%)], Train Loss: 0.49149\nEpoch: 00 [24404/25439 ( 96%)], Train Loss: 0.49109\nEpoch: 00 [24444/25439 ( 96%)], Train Loss: 0.49094\nEpoch: 00 [24484/25439 ( 96%)], Train Loss: 0.49087\nEpoch: 00 [24524/25439 ( 96%)], Train Loss: 0.49045\nEpoch: 00 [24564/25439 ( 97%)], Train Loss: 0.48998\nEpoch: 00 [24604/25439 ( 97%)], Train Loss: 0.48968\nEpoch: 00 [24644/25439 ( 97%)], Train Loss: 0.48926\nEpoch: 00 [24684/25439 ( 97%)], Train Loss: 0.48886\nEpoch: 00 [24724/25439 ( 97%)], Train Loss: 0.48849\nEpoch: 00 [24764/25439 ( 97%)], Train Loss: 0.48834\nEpoch: 00 [24804/25439 ( 98%)], Train Loss: 0.48808\nEpoch: 00 [24844/25439 ( 98%)], Train Loss: 0.48812\nEpoch: 00 [24884/25439 ( 98%)], Train Loss: 0.48779\nEpoch: 00 [24924/25439 ( 98%)], Train Loss: 0.48782\nEpoch: 00 [24964/25439 ( 98%)], Train Loss: 0.48757\nEpoch: 00 [25004/25439 ( 98%)], Train Loss: 0.48718\nEpoch: 00 [25044/25439 ( 98%)], Train Loss: 0.48699\nEpoch: 00 [25084/25439 ( 99%)], Train Loss: 0.48678\nEpoch: 00 [25124/25439 ( 99%)], Train Loss: 0.48637\nEpoch: 00 [25164/25439 ( 99%)], Train Loss: 0.48601\nEpoch: 00 [25204/25439 ( 99%)], Train Loss: 0.48574\nEpoch: 00 [25244/25439 ( 99%)], Train Loss: 0.48532\nEpoch: 00 [25284/25439 ( 99%)], Train Loss: 0.48510\nEpoch: 00 [25324/25439 (100%)], Train Loss: 0.48495\nEpoch: 00 [25364/25439 (100%)], Train Loss: 0.48501\nEpoch: 00 [25404/25439 (100%)], Train Loss: 0.48493\nEpoch: 00 [25439/25439 (100%)], Train Loss: 0.48499\n----Validation Results Summary----\nEpoch: [0] Valid Loss: 0.20509\nPost-processing 223 example predictions split into 3061 features.\nvalid jaccard: 0.709434903661361\n0 Epoch, Best epoch was updated! Valid Loss: 0.20509\nSaving model checkpoint to output/checkpoint-fold-1-epoch-0.\n\nTotal Training Time: 3741.11159992218secs, Average Training Time per Epoch: 3741.11159992218secs.\nTotal Validation Time: 146.3530113697052secs, Average Validation Time per Epoch: 146.3530113697052secs.\n\n\n--------------------------------------------------\nFOLD: 2\n--------------------------------------------------\nModel pushed to 1 GPU(s), type Tesla P100-PCIE-16GB.\nNum examples Train= 25676, Num examples Valid=2824\nTotal Training Steps: 3210, Total Warmup Steps: 321\nEpoch: 00 [ 4/25676 ( 0%)], Train Loss: 3.55252\nEpoch: 00 [ 44/25676 ( 0%)], Train Loss: 3.32949\nEpoch: 00 [ 84/25676 ( 0%)], Train Loss: 3.28230\nEpoch: 00 [ 124/25676 ( 0%)], Train Loss: 3.26465\nEpoch: 00 [ 164/25676 ( 1%)], Train Loss: 3.20717\nEpoch: 00 [ 204/25676 ( 1%)], Train Loss: 3.15453\nEpoch: 00 [ 244/25676 ( 1%)], Train Loss: 3.09605\nEpoch: 00 [ 284/25676 ( 1%)], Train Loss: 3.01372\nEpoch: 00 [ 324/25676 ( 1%)], Train Loss: 2.92227\nEpoch: 00 [ 364/25676 ( 1%)], Train Loss: 2.82914\nEpoch: 00 [ 404/25676 ( 2%)], Train Loss: 2.72173\nEpoch: 00 [ 444/25676 ( 2%)], Train Loss: 2.59992\nEpoch: 00 [ 484/25676 ( 2%)], Train Loss: 2.44762\nEpoch: 00 [ 524/25676 ( 2%)], Train Loss: 2.34768\nEpoch: 00 [ 564/25676 ( 2%)], Train Loss: 2.24361\nEpoch: 00 [ 604/25676 ( 2%)], Train Loss: 2.15018\nEpoch: 00 [ 644/25676 ( 3%)], Train Loss: 2.05344\nEpoch: 00 [ 684/25676 ( 3%)], Train Loss: 1.96804\nEpoch: 00 [ 724/25676 ( 3%)], Train Loss: 1.89701\nEpoch: 00 [ 764/25676 ( 3%)], Train Loss: 1.82419\nEpoch: 00 [ 804/25676 ( 3%)], Train Loss: 1.77207\nEpoch: 00 [ 844/25676 ( 3%)], Train Loss: 1.71323\nEpoch: 00 [ 884/25676 ( 3%)], Train Loss: 1.66890\nEpoch: 00 [ 924/25676 ( 4%)], Train Loss: 1.62707\nEpoch: 00 [ 964/25676 ( 4%)], Train Loss: 1.57951\nEpoch: 00 [ 1004/25676 ( 4%)], Train Loss: 1.53701\nEpoch: 00 [ 1044/25676 ( 4%)], Train Loss: 1.49415\nEpoch: 00 [ 1084/25676 ( 4%)], Train Loss: 1.45517\nEpoch: 00 [ 1124/25676 ( 4%)], Train Loss: 1.41256\nEpoch: 00 [ 1164/25676 ( 5%)], Train Loss: 1.38422\nEpoch: 00 [ 1204/25676 ( 5%)], Train Loss: 1.34848\nEpoch: 00 [ 1244/25676 ( 5%)], Train Loss: 1.32852\nEpoch: 00 [ 1284/25676 ( 5%)], Train Loss: 1.30513\nEpoch: 00 [ 1324/25676 ( 5%)], Train Loss: 1.27881\nEpoch: 00 [ 1364/25676 ( 5%)], Train Loss: 1.24765\nEpoch: 00 [ 1404/25676 ( 5%)], Train Loss: 1.22406\nEpoch: 00 [ 1444/25676 ( 6%)], Train Loss: 1.20915\nEpoch: 00 [ 1484/25676 ( 6%)], Train Loss: 1.19222\nEpoch: 00 [ 1524/25676 ( 6%)], Train Loss: 1.17664\nEpoch: 00 [ 1564/25676 ( 6%)], Train Loss: 1.16189\nEpoch: 00 [ 1604/25676 ( 6%)], Train Loss: 1.14473\nEpoch: 00 [ 1644/25676 ( 6%)], Train Loss: 1.13276\nEpoch: 00 [ 1684/25676 ( 7%)], Train Loss: 1.12247\nEpoch: 00 [ 1724/25676 ( 7%)], Train Loss: 1.10985\nEpoch: 00 [ 1764/25676 ( 7%)], Train Loss: 1.09870\nEpoch: 00 [ 1804/25676 ( 7%)], Train Loss: 1.08361\nEpoch: 00 [ 1844/25676 ( 7%)], Train Loss: 1.06798\nEpoch: 00 [ 1884/25676 ( 7%)], Train Loss: 1.05534\nEpoch: 00 [ 1924/25676 ( 7%)], Train Loss: 1.04840\nEpoch: 00 [ 1964/25676 ( 8%)], Train Loss: 1.04063\nEpoch: 00 [ 2004/25676 ( 8%)], Train Loss: 1.03196\nEpoch: 00 [ 2044/25676 ( 8%)], Train Loss: 1.02527\nEpoch: 00 [ 2084/25676 ( 8%)], Train Loss: 1.01476\nEpoch: 00 [ 2124/25676 ( 8%)], Train Loss: 1.00496\nEpoch: 00 [ 2164/25676 ( 8%)], Train Loss: 0.99631\nEpoch: 00 [ 2204/25676 ( 9%)], Train Loss: 0.98582\nEpoch: 00 [ 2244/25676 ( 9%)], Train Loss: 0.97303\nEpoch: 00 [ 2284/25676 ( 9%)], Train Loss: 0.96562\nEpoch: 00 [ 2324/25676 ( 9%)], Train Loss: 0.96390\nEpoch: 00 [ 2364/25676 ( 9%)], Train Loss: 0.95566\nEpoch: 00 [ 2404/25676 ( 9%)], Train Loss: 0.94984\nEpoch: 00 [ 2444/25676 ( 10%)], Train Loss: 0.94646\nEpoch: 00 [ 2484/25676 ( 10%)], Train Loss: 0.94153\nEpoch: 00 [ 2524/25676 ( 10%)], Train Loss: 0.94092\nEpoch: 00 [ 2564/25676 ( 10%)], Train Loss: 0.93564\nEpoch: 00 [ 2604/25676 ( 10%)], Train Loss: 0.93196\nEpoch: 00 [ 2644/25676 ( 10%)], Train Loss: 0.92460\nEpoch: 00 [ 2684/25676 ( 10%)], Train Loss: 0.92144\nEpoch: 00 [ 2724/25676 ( 11%)], Train Loss: 0.91614\nEpoch: 00 [ 2764/25676 ( 11%)], Train Loss: 0.91035\nEpoch: 00 [ 2804/25676 ( 11%)], Train Loss: 0.90377\nEpoch: 00 [ 2844/25676 ( 11%)], Train Loss: 0.89562\nEpoch: 00 [ 2884/25676 ( 11%)], Train Loss: 0.89158\nEpoch: 00 [ 2924/25676 ( 11%)], Train Loss: 0.88615\nEpoch: 00 [ 2964/25676 ( 12%)], Train Loss: 0.88138\nEpoch: 00 [ 3004/25676 ( 12%)], Train Loss: 0.87656\nEpoch: 00 [ 3044/25676 ( 12%)], Train Loss: 0.87166\nEpoch: 00 [ 3084/25676 ( 12%)], Train Loss: 0.86839\nEpoch: 00 [ 3124/25676 ( 12%)], Train Loss: 0.86633\nEpoch: 00 [ 3164/25676 ( 12%)], Train Loss: 0.86403\nEpoch: 00 [ 3204/25676 ( 12%)], Train Loss: 0.86119\nEpoch: 00 [ 3244/25676 ( 13%)], Train Loss: 0.85858\nEpoch: 00 [ 3284/25676 ( 13%)], Train Loss: 0.85358\nEpoch: 00 [ 3324/25676 ( 13%)], Train Loss: 0.85273\nEpoch: 00 [ 3364/25676 ( 13%)], Train Loss: 0.84853\nEpoch: 00 [ 3404/25676 ( 13%)], Train Loss: 0.84215\nEpoch: 00 [ 3444/25676 ( 13%)], Train Loss: 0.83610\nEpoch: 00 [ 3484/25676 ( 14%)], Train Loss: 0.83096\nEpoch: 00 [ 3524/25676 ( 14%)], Train Loss: 0.82600\nEpoch: 00 [ 3564/25676 ( 14%)], Train Loss: 0.82343\nEpoch: 00 [ 3604/25676 ( 14%)], Train Loss: 0.82022\nEpoch: 00 [ 3644/25676 ( 14%)], Train Loss: 0.81812\nEpoch: 00 [ 3684/25676 ( 14%)], Train Loss: 0.81481\nEpoch: 00 [ 3724/25676 ( 15%)], Train Loss: 0.81240\nEpoch: 00 [ 3764/25676 ( 15%)], Train Loss: 0.80821\nEpoch: 00 [ 3804/25676 ( 15%)], Train Loss: 0.80497\nEpoch: 00 [ 3844/25676 ( 15%)], Train Loss: 0.80267\nEpoch: 00 [ 3884/25676 ( 15%)], Train Loss: 0.79854\nEpoch: 00 [ 3924/25676 ( 15%)], Train Loss: 0.79643\nEpoch: 00 [ 3964/25676 ( 15%)], Train Loss: 0.79679\nEpoch: 00 [ 4004/25676 ( 16%)], Train Loss: 0.79515\nEpoch: 00 [ 4044/25676 ( 16%)], Train Loss: 0.79009\nEpoch: 00 [ 4084/25676 ( 16%)], Train Loss: 0.78695\nEpoch: 00 [ 4124/25676 ( 16%)], Train Loss: 0.78596\nEpoch: 00 [ 4164/25676 ( 16%)], Train Loss: 0.78283\nEpoch: 00 [ 4204/25676 ( 16%)], Train Loss: 0.77986\nEpoch: 00 [ 4244/25676 ( 17%)], Train Loss: 0.78059\nEpoch: 00 [ 4284/25676 ( 17%)], Train Loss: 0.77812\nEpoch: 00 [ 4324/25676 ( 17%)], Train Loss: 0.77457\nEpoch: 00 [ 4364/25676 ( 17%)], Train Loss: 0.77133\nEpoch: 00 [ 4404/25676 ( 17%)], Train Loss: 0.76908\nEpoch: 00 [ 4444/25676 ( 17%)], Train Loss: 0.76721\nEpoch: 00 [ 4484/25676 ( 17%)], Train Loss: 0.76777\nEpoch: 00 [ 4524/25676 ( 18%)], Train Loss: 0.76684\nEpoch: 00 [ 4564/25676 ( 18%)], Train Loss: 0.76658\nEpoch: 00 [ 4604/25676 ( 18%)], Train Loss: 0.76510\nEpoch: 00 [ 4644/25676 ( 18%)], Train Loss: 0.76272\nEpoch: 00 [ 4684/25676 ( 18%)], Train Loss: 0.76134\nEpoch: 00 [ 4724/25676 ( 18%)], Train Loss: 0.75839\nEpoch: 00 [ 4764/25676 ( 19%)], Train Loss: 0.75727\nEpoch: 00 [ 4804/25676 ( 19%)], Train Loss: 0.75619\nEpoch: 00 [ 4844/25676 ( 19%)], Train Loss: 0.75382\nEpoch: 00 [ 4884/25676 ( 19%)], Train Loss: 0.75213\nEpoch: 00 [ 4924/25676 ( 19%)], Train Loss: 0.75065\nEpoch: 00 [ 4964/25676 ( 19%)], Train Loss: 0.74789\nEpoch: 00 [ 5004/25676 ( 19%)], Train Loss: 0.74563\nEpoch: 00 [ 5044/25676 ( 20%)], Train Loss: 0.74242\nEpoch: 00 [ 5084/25676 ( 20%)], Train Loss: 0.73806\nEpoch: 00 [ 5124/25676 ( 20%)], Train Loss: 0.73611\nEpoch: 00 [ 5164/25676 ( 20%)], Train Loss: 0.73392\nEpoch: 00 [ 5204/25676 ( 20%)], Train Loss: 0.73094\nEpoch: 00 [ 5244/25676 ( 20%)], Train Loss: 0.72828\nEpoch: 00 [ 5284/25676 ( 21%)], Train Loss: 0.72596\nEpoch: 00 [ 5324/25676 ( 21%)], Train Loss: 0.72479\nEpoch: 00 [ 5364/25676 ( 21%)], Train Loss: 0.72472\nEpoch: 00 [ 5404/25676 ( 21%)], Train Loss: 0.72454\nEpoch: 00 [ 5444/25676 ( 21%)], Train Loss: 0.72442\nEpoch: 00 [ 5484/25676 ( 21%)], Train Loss: 0.72133\nEpoch: 00 [ 5524/25676 ( 22%)], Train Loss: 0.71949\nEpoch: 00 [ 5564/25676 ( 22%)], Train Loss: 0.71750\nEpoch: 00 [ 5604/25676 ( 22%)], Train Loss: 0.71669\nEpoch: 00 [ 5644/25676 ( 22%)], Train Loss: 0.71527\nEpoch: 00 [ 5684/25676 ( 22%)], Train Loss: 0.71350\nEpoch: 00 [ 5724/25676 ( 22%)], Train Loss: 0.71342\nEpoch: 00 [ 5764/25676 ( 22%)], Train Loss: 0.71186\nEpoch: 00 [ 5804/25676 ( 23%)], Train Loss: 0.71035\nEpoch: 00 [ 5844/25676 ( 23%)], Train Loss: 0.70806\nEpoch: 00 [ 5884/25676 ( 23%)], Train Loss: 0.70647\nEpoch: 00 [ 5924/25676 ( 23%)], Train Loss: 0.70645\nEpoch: 00 [ 5964/25676 ( 23%)], Train Loss: 0.70425\nEpoch: 00 [ 6004/25676 ( 23%)], Train Loss: 0.70132\nEpoch: 00 [ 6044/25676 ( 24%)], Train Loss: 0.70083\nEpoch: 00 [ 6084/25676 ( 24%)], Train Loss: 0.69931\nEpoch: 00 [ 6124/25676 ( 24%)], Train Loss: 0.69858\nEpoch: 00 [ 6164/25676 ( 24%)], Train Loss: 0.69927\nEpoch: 00 [ 6204/25676 ( 24%)], Train Loss: 0.69783\nEpoch: 00 [ 6244/25676 ( 24%)], Train Loss: 0.69660\nEpoch: 00 [ 6284/25676 ( 24%)], Train Loss: 0.69502\nEpoch: 00 [ 6324/25676 ( 25%)], Train Loss: 0.69345\nEpoch: 00 [ 6364/25676 ( 25%)], Train Loss: 0.69442\nEpoch: 00 [ 6404/25676 ( 25%)], Train Loss: 0.69295\nEpoch: 00 [ 6444/25676 ( 25%)], Train Loss: 0.69137\nEpoch: 00 [ 6484/25676 ( 25%)], Train Loss: 0.69195\nEpoch: 00 [ 6524/25676 ( 25%)], Train Loss: 0.69008\nEpoch: 00 [ 6564/25676 ( 26%)], Train Loss: 0.68819\nEpoch: 00 [ 6604/25676 ( 26%)], Train Loss: 0.68872\nEpoch: 00 [ 6644/25676 ( 26%)], Train Loss: 0.68784\nEpoch: 00 [ 6684/25676 ( 26%)], Train Loss: 0.68653\nEpoch: 00 [ 6724/25676 ( 26%)], Train Loss: 0.68514\nEpoch: 00 [ 6764/25676 ( 26%)], Train Loss: 0.68354\nEpoch: 00 [ 6804/25676 ( 26%)], Train Loss: 0.68239\nEpoch: 00 [ 6844/25676 ( 27%)], Train Loss: 0.68010\nEpoch: 00 [ 6884/25676 ( 27%)], Train Loss: 0.67950\nEpoch: 00 [ 6924/25676 ( 27%)], Train Loss: 0.67712\nEpoch: 00 [ 6964/25676 ( 27%)], Train Loss: 0.67572\nEpoch: 00 [ 7004/25676 ( 27%)], Train Loss: 0.67448\nEpoch: 00 [ 7044/25676 ( 27%)], Train Loss: 0.67307\nEpoch: 00 [ 7084/25676 ( 28%)], Train Loss: 0.67214\nEpoch: 00 [ 7124/25676 ( 28%)], Train Loss: 0.67082\nEpoch: 00 [ 7164/25676 ( 28%)], Train Loss: 0.66978\nEpoch: 00 [ 7204/25676 ( 28%)], Train Loss: 0.66842\nEpoch: 00 [ 7244/25676 ( 28%)], Train Loss: 0.66717\nEpoch: 00 [ 7284/25676 ( 28%)], Train Loss: 0.66628\nEpoch: 00 [ 7324/25676 ( 29%)], Train Loss: 0.66546\nEpoch: 00 [ 7364/25676 ( 29%)], Train Loss: 0.66400\nEpoch: 00 [ 7404/25676 ( 29%)], Train Loss: 0.66211\nEpoch: 00 [ 7444/25676 ( 29%)], Train Loss: 0.66097\nEpoch: 00 [ 7484/25676 ( 29%)], Train Loss: 0.65855\nEpoch: 00 [ 7524/25676 ( 29%)], Train Loss: 0.65728\nEpoch: 00 [ 7564/25676 ( 29%)], Train Loss: 0.65735\nEpoch: 00 [ 7604/25676 ( 30%)], Train Loss: 0.65724\nEpoch: 00 [ 7644/25676 ( 30%)], Train Loss: 0.65701\nEpoch: 00 [ 7684/25676 ( 30%)], Train Loss: 0.65584\nEpoch: 00 [ 7724/25676 ( 30%)], Train Loss: 0.65478\nEpoch: 00 [ 7764/25676 ( 30%)], Train Loss: 0.65362\nEpoch: 00 [ 7804/25676 ( 30%)], Train Loss: 0.65197\nEpoch: 00 [ 7844/25676 ( 31%)], Train Loss: 0.65139\nEpoch: 00 [ 7884/25676 ( 31%)], Train Loss: 0.65044\nEpoch: 00 [ 7924/25676 ( 31%)], Train Loss: 0.64864\nEpoch: 00 [ 7964/25676 ( 31%)], Train Loss: 0.64804\nEpoch: 00 [ 8004/25676 ( 31%)], Train Loss: 0.64719\nEpoch: 00 [ 8044/25676 ( 31%)], Train Loss: 0.64583\nEpoch: 00 [ 8084/25676 ( 31%)], Train Loss: 0.64559\nEpoch: 00 [ 8124/25676 ( 32%)], Train Loss: 0.64536\nEpoch: 00 [ 8164/25676 ( 32%)], Train Loss: 0.64420\nEpoch: 00 [ 8204/25676 ( 32%)], Train Loss: 0.64362\nEpoch: 00 [ 8244/25676 ( 32%)], Train Loss: 0.64363\nEpoch: 00 [ 8284/25676 ( 32%)], Train Loss: 0.64382\nEpoch: 00 [ 8324/25676 ( 32%)], Train Loss: 0.64280\nEpoch: 00 [ 8364/25676 ( 33%)], Train Loss: 0.64172\nEpoch: 00 [ 8404/25676 ( 33%)], Train Loss: 0.64100\nEpoch: 00 [ 8444/25676 ( 33%)], Train Loss: 0.64091\nEpoch: 00 [ 8484/25676 ( 33%)], Train Loss: 0.63984\nEpoch: 00 [ 8524/25676 ( 33%)], Train Loss: 0.63876\nEpoch: 00 [ 8564/25676 ( 33%)], Train Loss: 0.63779\nEpoch: 00 [ 8604/25676 ( 34%)], Train Loss: 0.63660\nEpoch: 00 [ 8644/25676 ( 34%)], Train Loss: 0.63565\nEpoch: 00 [ 8684/25676 ( 34%)], Train Loss: 0.63619\nEpoch: 00 [ 8724/25676 ( 34%)], Train Loss: 0.63596\nEpoch: 00 [ 8764/25676 ( 34%)], Train Loss: 0.63499\nEpoch: 00 [ 8804/25676 ( 34%)], Train Loss: 0.63453\nEpoch: 00 [ 8844/25676 ( 34%)], Train Loss: 0.63364\nEpoch: 00 [ 8884/25676 ( 35%)], Train Loss: 0.63206\nEpoch: 00 [ 8924/25676 ( 35%)], Train Loss: 0.63274\nEpoch: 00 [ 8964/25676 ( 35%)], Train Loss: 0.63257\nEpoch: 00 [ 9004/25676 ( 35%)], Train Loss: 0.63227\nEpoch: 00 [ 9044/25676 ( 35%)], Train Loss: 0.63151\nEpoch: 00 [ 9084/25676 ( 35%)], Train Loss: 0.63025\nEpoch: 00 [ 9124/25676 ( 36%)], Train Loss: 0.62978\nEpoch: 00 [ 9164/25676 ( 36%)], Train Loss: 0.62957\nEpoch: 00 [ 9204/25676 ( 36%)], Train Loss: 0.62899\nEpoch: 00 [ 9244/25676 ( 36%)], Train Loss: 0.62875\nEpoch: 00 [ 9284/25676 ( 36%)], Train Loss: 0.62805\nEpoch: 00 [ 9324/25676 ( 36%)], Train Loss: 0.62812\nEpoch: 00 [ 9364/25676 ( 36%)], Train Loss: 0.62861\nEpoch: 00 [ 9404/25676 ( 37%)], Train Loss: 0.62798\nEpoch: 00 [ 9444/25676 ( 37%)], Train Loss: 0.62712\nEpoch: 00 [ 9484/25676 ( 37%)], Train Loss: 0.62634\nEpoch: 00 [ 9524/25676 ( 37%)], Train Loss: 0.62566\nEpoch: 00 [ 9564/25676 ( 37%)], Train Loss: 0.62470\nEpoch: 00 [ 9604/25676 ( 37%)], Train Loss: 0.62466\nEpoch: 00 [ 9644/25676 ( 38%)], Train Loss: 0.62396\nEpoch: 00 [ 9684/25676 ( 38%)], Train Loss: 0.62353\nEpoch: 00 [ 9724/25676 ( 38%)], Train Loss: 0.62295\nEpoch: 00 [ 9764/25676 ( 38%)], Train Loss: 0.62243\nEpoch: 00 [ 9804/25676 ( 38%)], Train Loss: 0.62165\nEpoch: 00 [ 9844/25676 ( 38%)], Train Loss: 0.62089\nEpoch: 00 [ 9884/25676 ( 38%)], Train Loss: 0.62048\nEpoch: 00 [ 9924/25676 ( 39%)], Train Loss: 0.62042\nEpoch: 00 [ 9964/25676 ( 39%)], Train Loss: 0.61884\nEpoch: 00 [10004/25676 ( 39%)], Train Loss: 0.61836\nEpoch: 00 [10044/25676 ( 39%)], Train Loss: 0.61786\nEpoch: 00 [10084/25676 ( 39%)], Train Loss: 0.61733\nEpoch: 00 [10124/25676 ( 39%)], Train Loss: 0.61678\nEpoch: 00 [10164/25676 ( 40%)], Train Loss: 0.61629\nEpoch: 00 [10204/25676 ( 40%)], Train Loss: 0.61564\nEpoch: 00 [10244/25676 ( 40%)], Train Loss: 0.61463\nEpoch: 00 [10284/25676 ( 40%)], Train Loss: 0.61404\nEpoch: 00 [10324/25676 ( 40%)], Train Loss: 0.61367\nEpoch: 00 [10364/25676 ( 40%)], Train Loss: 0.61314\nEpoch: 00 [10404/25676 ( 41%)], Train Loss: 0.61212\nEpoch: 00 [10444/25676 ( 41%)], Train Loss: 0.61115\nEpoch: 00 [10484/25676 ( 41%)], Train Loss: 0.61039\nEpoch: 00 [10524/25676 ( 41%)], Train Loss: 0.60863\nEpoch: 00 [10564/25676 ( 41%)], Train Loss: 0.60799\nEpoch: 00 [10604/25676 ( 41%)], Train Loss: 0.60759\nEpoch: 00 [10644/25676 ( 41%)], Train Loss: 0.60716\nEpoch: 00 [10684/25676 ( 42%)], Train Loss: 0.60715\nEpoch: 00 [10724/25676 ( 42%)], Train Loss: 0.60656\nEpoch: 00 [10764/25676 ( 42%)], Train Loss: 0.60646\nEpoch: 00 [10804/25676 ( 42%)], Train Loss: 0.60656\nEpoch: 00 [10844/25676 ( 42%)], Train Loss: 0.60545\nEpoch: 00 [10884/25676 ( 42%)], Train Loss: 0.60500\nEpoch: 00 [10924/25676 ( 43%)], Train Loss: 0.60419\nEpoch: 00 [10964/25676 ( 43%)], Train Loss: 0.60316\nEpoch: 00 [11004/25676 ( 43%)], Train Loss: 0.60251\nEpoch: 00 [11044/25676 ( 43%)], Train Loss: 0.60249\nEpoch: 00 [11084/25676 ( 43%)], Train Loss: 0.60202\nEpoch: 00 [11124/25676 ( 43%)], Train Loss: 0.60149\nEpoch: 00 [11164/25676 ( 43%)], Train Loss: 0.60026\nEpoch: 00 [11204/25676 ( 44%)], Train Loss: 0.59964\nEpoch: 00 [11244/25676 ( 44%)], Train Loss: 0.59926\nEpoch: 00 [11284/25676 ( 44%)], Train Loss: 0.59877\nEpoch: 00 [11324/25676 ( 44%)], Train Loss: 0.59771\nEpoch: 00 [11364/25676 ( 44%)], Train Loss: 0.59710\nEpoch: 00 [11404/25676 ( 44%)], Train Loss: 0.59589\nEpoch: 00 [11444/25676 ( 45%)], Train Loss: 0.59580\nEpoch: 00 [11484/25676 ( 45%)], Train Loss: 0.59536\nEpoch: 00 [11524/25676 ( 45%)], Train Loss: 0.59533\nEpoch: 00 [11564/25676 ( 45%)], Train Loss: 0.59489\nEpoch: 00 [11604/25676 ( 45%)], Train Loss: 0.59395\nEpoch: 00 [11644/25676 ( 45%)], Train Loss: 0.59354\nEpoch: 00 [11684/25676 ( 46%)], Train Loss: 0.59281\nEpoch: 00 [11724/25676 ( 46%)], Train Loss: 0.59240\nEpoch: 00 [11764/25676 ( 46%)], Train Loss: 0.59172\nEpoch: 00 [11804/25676 ( 46%)], Train Loss: 0.59117\nEpoch: 00 [11844/25676 ( 46%)], Train Loss: 0.59098\nEpoch: 00 [11884/25676 ( 46%)], Train Loss: 0.59022\nEpoch: 00 [11924/25676 ( 46%)], Train Loss: 0.59047\nEpoch: 00 [11964/25676 ( 47%)], Train Loss: 0.59093\nEpoch: 00 [12004/25676 ( 47%)], Train Loss: 0.59041\nEpoch: 00 [12044/25676 ( 47%)], Train Loss: 0.58978\nEpoch: 00 [12084/25676 ( 47%)], Train Loss: 0.58889\nEpoch: 00 [12124/25676 ( 47%)], Train Loss: 0.58860\nEpoch: 00 [12164/25676 ( 47%)], Train Loss: 0.58791\nEpoch: 00 [12204/25676 ( 48%)], Train Loss: 0.58678\nEpoch: 00 [12244/25676 ( 48%)], Train Loss: 0.58617\nEpoch: 00 [12284/25676 ( 48%)], Train Loss: 0.58513\nEpoch: 00 [12324/25676 ( 48%)], Train Loss: 0.58429\nEpoch: 00 [12364/25676 ( 48%)], Train Loss: 0.58355\nEpoch: 00 [12404/25676 ( 48%)], Train Loss: 0.58300\nEpoch: 00 [12444/25676 ( 48%)], Train Loss: 0.58212\nEpoch: 00 [12484/25676 ( 49%)], Train Loss: 0.58142\nEpoch: 00 [12524/25676 ( 49%)], Train Loss: 0.58069\nEpoch: 00 [12564/25676 ( 49%)], Train Loss: 0.58014\nEpoch: 00 [12604/25676 ( 49%)], Train Loss: 0.57937\nEpoch: 00 [12644/25676 ( 49%)], Train Loss: 0.57972\nEpoch: 00 [12684/25676 ( 49%)], Train Loss: 0.57957\nEpoch: 00 [12724/25676 ( 50%)], Train Loss: 0.57901\nEpoch: 00 [12764/25676 ( 50%)], Train Loss: 0.57825\nEpoch: 00 [12804/25676 ( 50%)], Train Loss: 0.57764\nEpoch: 00 [12844/25676 ( 50%)], Train Loss: 0.57668\nEpoch: 00 [12884/25676 ( 50%)], Train Loss: 0.57607\nEpoch: 00 [12924/25676 ( 50%)], Train Loss: 0.57517\nEpoch: 00 [12964/25676 ( 50%)], Train Loss: 0.57472\nEpoch: 00 [13004/25676 ( 51%)], Train Loss: 0.57440\nEpoch: 00 [13044/25676 ( 51%)], Train Loss: 0.57364\nEpoch: 00 [13084/25676 ( 51%)], Train Loss: 0.57307\nEpoch: 00 [13124/25676 ( 51%)], Train Loss: 0.57299\nEpoch: 00 [13164/25676 ( 51%)], Train Loss: 0.57213\nEpoch: 00 [13204/25676 ( 51%)], Train Loss: 0.57279\nEpoch: 00 [13244/25676 ( 52%)], Train Loss: 0.57209\nEpoch: 00 [13284/25676 ( 52%)], Train Loss: 0.57196\nEpoch: 00 [13324/25676 ( 52%)], Train Loss: 0.57127\nEpoch: 00 [13364/25676 ( 52%)], Train Loss: 0.57086\nEpoch: 00 [13404/25676 ( 52%)], Train Loss: 0.56980\nEpoch: 00 [13444/25676 ( 52%)], Train Loss: 0.56933\nEpoch: 00 [13484/25676 ( 53%)], Train Loss: 0.56834\nEpoch: 00 [13524/25676 ( 53%)], Train Loss: 0.56798\nEpoch: 00 [13564/25676 ( 53%)], Train Loss: 0.56798\nEpoch: 00 [13604/25676 ( 53%)], Train Loss: 0.56747\nEpoch: 00 [13644/25676 ( 53%)], Train Loss: 0.56762\nEpoch: 00 [13684/25676 ( 53%)], Train Loss: 0.56714\nEpoch: 00 [13724/25676 ( 53%)], Train Loss: 0.56685\nEpoch: 00 [13764/25676 ( 54%)], Train Loss: 0.56594\nEpoch: 00 [13804/25676 ( 54%)], Train Loss: 0.56525\nEpoch: 00 [13844/25676 ( 54%)], Train Loss: 0.56464\nEpoch: 00 [13884/25676 ( 54%)], Train Loss: 0.56429\nEpoch: 00 [13924/25676 ( 54%)], Train Loss: 0.56361\nEpoch: 00 [13964/25676 ( 54%)], Train Loss: 0.56285\nEpoch: 00 [14004/25676 ( 55%)], Train Loss: 0.56211\nEpoch: 00 [14044/25676 ( 55%)], Train Loss: 0.56162\nEpoch: 00 [14084/25676 ( 55%)], Train Loss: 0.56147\nEpoch: 00 [14124/25676 ( 55%)], Train Loss: 0.56107\nEpoch: 00 [14164/25676 ( 55%)], Train Loss: 0.56112\nEpoch: 00 [14204/25676 ( 55%)], Train Loss: 0.56104\nEpoch: 00 [14244/25676 ( 55%)], Train Loss: 0.56028\nEpoch: 00 [14284/25676 ( 56%)], Train Loss: 0.55942\nEpoch: 00 [14324/25676 ( 56%)], Train Loss: 0.55877\nEpoch: 00 [14364/25676 ( 56%)], Train Loss: 0.55840\nEpoch: 00 [14404/25676 ( 56%)], Train Loss: 0.55840\nEpoch: 00 [14444/25676 ( 56%)], Train Loss: 0.55804\nEpoch: 00 [14484/25676 ( 56%)], Train Loss: 0.55786\nEpoch: 00 [14524/25676 ( 57%)], Train Loss: 0.55800\nEpoch: 00 [14564/25676 ( 57%)], Train Loss: 0.55771\nEpoch: 00 [14604/25676 ( 57%)], Train Loss: 0.55745\nEpoch: 00 [14644/25676 ( 57%)], Train Loss: 0.55751\nEpoch: 00 [14684/25676 ( 57%)], Train Loss: 0.55709\nEpoch: 00 [14724/25676 ( 57%)], Train Loss: 0.55719\nEpoch: 00 [14764/25676 ( 58%)], Train Loss: 0.55731\nEpoch: 00 [14804/25676 ( 58%)], Train Loss: 0.55653\nEpoch: 00 [14844/25676 ( 58%)], Train Loss: 0.55608\nEpoch: 00 [14884/25676 ( 58%)], Train Loss: 0.55533\nEpoch: 00 [14924/25676 ( 58%)], Train Loss: 0.55476\nEpoch: 00 [14964/25676 ( 58%)], Train Loss: 0.55413\nEpoch: 00 [15004/25676 ( 58%)], Train Loss: 0.55333\nEpoch: 00 [15044/25676 ( 59%)], Train Loss: 0.55320\nEpoch: 00 [15084/25676 ( 59%)], Train Loss: 0.55283\nEpoch: 00 [15124/25676 ( 59%)], Train Loss: 0.55217\nEpoch: 00 [15164/25676 ( 59%)], Train Loss: 0.55211\nEpoch: 00 [15204/25676 ( 59%)], Train Loss: 0.55158\nEpoch: 00 [15244/25676 ( 59%)], Train Loss: 0.55137\nEpoch: 00 [15284/25676 ( 60%)], Train Loss: 0.55087\nEpoch: 00 [15324/25676 ( 60%)], Train Loss: 0.55011\nEpoch: 00 [15364/25676 ( 60%)], Train Loss: 0.54893\nEpoch: 00 [15404/25676 ( 60%)], Train Loss: 0.54866\nEpoch: 00 [15444/25676 ( 60%)], Train Loss: 0.54843\nEpoch: 00 [15484/25676 ( 60%)], Train Loss: 0.54858\nEpoch: 00 [15524/25676 ( 60%)], Train Loss: 0.54803\nEpoch: 00 [15564/25676 ( 61%)], Train Loss: 0.54757\nEpoch: 00 [15604/25676 ( 61%)], Train Loss: 0.54683\nEpoch: 00 [15644/25676 ( 61%)], Train Loss: 0.54601\nEpoch: 00 [15684/25676 ( 61%)], Train Loss: 0.54592\nEpoch: 00 [15724/25676 ( 61%)], Train Loss: 0.54546\nEpoch: 00 [15764/25676 ( 61%)], Train Loss: 0.54517\nEpoch: 00 [15804/25676 ( 62%)], Train Loss: 0.54473\nEpoch: 00 [15844/25676 ( 62%)], Train Loss: 0.54459\nEpoch: 00 [15884/25676 ( 62%)], Train Loss: 0.54422\nEpoch: 00 [15924/25676 ( 62%)], Train Loss: 0.54463\nEpoch: 00 [15964/25676 ( 62%)], Train Loss: 0.54420\nEpoch: 00 [16004/25676 ( 62%)], Train Loss: 0.54395\nEpoch: 00 [16044/25676 ( 62%)], Train Loss: 0.54337\nEpoch: 00 [16084/25676 ( 63%)], Train Loss: 0.54280\nEpoch: 00 [16124/25676 ( 63%)], Train Loss: 0.54219\nEpoch: 00 [16164/25676 ( 63%)], Train Loss: 0.54203\nEpoch: 00 [16204/25676 ( 63%)], Train Loss: 0.54230\nEpoch: 00 [16244/25676 ( 63%)], Train Loss: 0.54188\nEpoch: 00 [16284/25676 ( 63%)], Train Loss: 0.54149\nEpoch: 00 [16324/25676 ( 64%)], Train Loss: 0.54144\nEpoch: 00 [16364/25676 ( 64%)], Train Loss: 0.54118\nEpoch: 00 [16404/25676 ( 64%)], Train Loss: 0.54099\nEpoch: 00 [16444/25676 ( 64%)], Train Loss: 0.54079\nEpoch: 00 [16484/25676 ( 64%)], Train Loss: 0.54039\nEpoch: 00 [16524/25676 ( 64%)], Train Loss: 0.53941\nEpoch: 00 [16564/25676 ( 65%)], Train Loss: 0.53895\nEpoch: 00 [16604/25676 ( 65%)], Train Loss: 0.53901\nEpoch: 00 [16644/25676 ( 65%)], Train Loss: 0.53897\nEpoch: 00 [16684/25676 ( 65%)], Train Loss: 0.53827\nEpoch: 00 [16724/25676 ( 65%)], Train Loss: 0.53830\nEpoch: 00 [16764/25676 ( 65%)], Train Loss: 0.53817\nEpoch: 00 [16804/25676 ( 65%)], Train Loss: 0.53795\nEpoch: 00 [16844/25676 ( 66%)], Train Loss: 0.53735\nEpoch: 00 [16884/25676 ( 66%)], Train Loss: 0.53717\nEpoch: 00 [16924/25676 ( 66%)], Train Loss: 0.53632\nEpoch: 00 [16964/25676 ( 66%)], Train Loss: 0.53648\nEpoch: 00 [17004/25676 ( 66%)], Train Loss: 0.53621\nEpoch: 00 [17044/25676 ( 66%)], Train Loss: 0.53572\nEpoch: 00 [17084/25676 ( 67%)], Train Loss: 0.53556\nEpoch: 00 [17124/25676 ( 67%)], Train Loss: 0.53534\nEpoch: 00 [17164/25676 ( 67%)], Train Loss: 0.53479\nEpoch: 00 [17204/25676 ( 67%)], Train Loss: 0.53423\nEpoch: 00 [17244/25676 ( 67%)], Train Loss: 0.53429\nEpoch: 00 [17284/25676 ( 67%)], Train Loss: 0.53367\nEpoch: 00 [17324/25676 ( 67%)], Train Loss: 0.53318\nEpoch: 00 [17364/25676 ( 68%)], Train Loss: 0.53344\nEpoch: 00 [17404/25676 ( 68%)], Train Loss: 0.53281\nEpoch: 00 [17444/25676 ( 68%)], Train Loss: 0.53232\nEpoch: 00 [17484/25676 ( 68%)], Train Loss: 0.53215\nEpoch: 00 [17524/25676 ( 68%)], Train Loss: 0.53205\nEpoch: 00 [17564/25676 ( 68%)], Train Loss: 0.53212\nEpoch: 00 [17604/25676 ( 69%)], Train Loss: 0.53210\nEpoch: 00 [17644/25676 ( 69%)], Train Loss: 0.53154\nEpoch: 00 [17684/25676 ( 69%)], Train Loss: 0.53127\nEpoch: 00 [17724/25676 ( 69%)], Train Loss: 0.53083\nEpoch: 00 [17764/25676 ( 69%)], Train Loss: 0.53028\nEpoch: 00 [17804/25676 ( 69%)], Train Loss: 0.52989\nEpoch: 00 [17844/25676 ( 69%)], Train Loss: 0.52944\nEpoch: 00 [17884/25676 ( 70%)], Train Loss: 0.52889\nEpoch: 00 [17924/25676 ( 70%)], Train Loss: 0.52853\nEpoch: 00 [17964/25676 ( 70%)], Train Loss: 0.52867\nEpoch: 00 [18004/25676 ( 70%)], Train Loss: 0.52837\nEpoch: 00 [18044/25676 ( 70%)], Train Loss: 0.52848\nEpoch: 00 [18084/25676 ( 70%)], Train Loss: 0.52809\nEpoch: 00 [18124/25676 ( 71%)], Train Loss: 0.52754\nEpoch: 00 [18164/25676 ( 71%)], Train Loss: 0.52726\nEpoch: 00 [18204/25676 ( 71%)], Train Loss: 0.52661\nEpoch: 00 [18244/25676 ( 71%)], Train Loss: 0.52588\nEpoch: 00 [18284/25676 ( 71%)], Train Loss: 0.52619\nEpoch: 00 [18324/25676 ( 71%)], Train Loss: 0.52596\nEpoch: 00 [18364/25676 ( 72%)], Train Loss: 0.52569\nEpoch: 00 [18404/25676 ( 72%)], Train Loss: 0.52549\nEpoch: 00 [18444/25676 ( 72%)], Train Loss: 0.52517\nEpoch: 00 [18484/25676 ( 72%)], Train Loss: 0.52473\nEpoch: 00 [18524/25676 ( 72%)], Train Loss: 0.52423\nEpoch: 00 [18564/25676 ( 72%)], Train Loss: 0.52452\nEpoch: 00 [18604/25676 ( 72%)], Train Loss: 0.52415\nEpoch: 00 [18644/25676 ( 73%)], Train Loss: 0.52369\nEpoch: 00 [18684/25676 ( 73%)], Train Loss: 0.52357\nEpoch: 00 [18724/25676 ( 73%)], Train Loss: 0.52331\nEpoch: 00 [18764/25676 ( 73%)], Train Loss: 0.52297\nEpoch: 00 [18804/25676 ( 73%)], Train Loss: 0.52268\nEpoch: 00 [18844/25676 ( 73%)], Train Loss: 0.52221\nEpoch: 00 [18884/25676 ( 74%)], Train Loss: 0.52195\nEpoch: 00 [18924/25676 ( 74%)], Train Loss: 0.52150\nEpoch: 00 [18964/25676 ( 74%)], Train Loss: 0.52094\nEpoch: 00 [19004/25676 ( 74%)], Train Loss: 0.52040\nEpoch: 00 [19044/25676 ( 74%)], Train Loss: 0.52026\nEpoch: 00 [19084/25676 ( 74%)], Train Loss: 0.52002\nEpoch: 00 [19124/25676 ( 74%)], Train Loss: 0.51958\nEpoch: 00 [19164/25676 ( 75%)], Train Loss: 0.51940\nEpoch: 00 [19204/25676 ( 75%)], Train Loss: 0.51891\nEpoch: 00 [19244/25676 ( 75%)], Train Loss: 0.51890\nEpoch: 00 [19284/25676 ( 75%)], Train Loss: 0.51838\nEpoch: 00 [19324/25676 ( 75%)], Train Loss: 0.51825\nEpoch: 00 [19364/25676 ( 75%)], Train Loss: 0.51777\nEpoch: 00 [19404/25676 ( 76%)], Train Loss: 0.51725\nEpoch: 00 [19444/25676 ( 76%)], Train Loss: 0.51680\nEpoch: 00 [19484/25676 ( 76%)], Train Loss: 0.51660\nEpoch: 00 [19524/25676 ( 76%)], Train Loss: 0.51624\nEpoch: 00 [19564/25676 ( 76%)], Train Loss: 0.51604\nEpoch: 00 [19604/25676 ( 76%)], Train Loss: 0.51593\nEpoch: 00 [19644/25676 ( 77%)], Train Loss: 0.51559\nEpoch: 00 [19684/25676 ( 77%)], Train Loss: 0.51537\nEpoch: 00 [19724/25676 ( 77%)], Train Loss: 0.51520\nEpoch: 00 [19764/25676 ( 77%)], Train Loss: 0.51491\nEpoch: 00 [19804/25676 ( 77%)], Train Loss: 0.51449\nEpoch: 00 [19844/25676 ( 77%)], Train Loss: 0.51412\nEpoch: 00 [19884/25676 ( 77%)], Train Loss: 0.51371\nEpoch: 00 [19924/25676 ( 78%)], Train Loss: 0.51321\nEpoch: 00 [19964/25676 ( 78%)], Train Loss: 0.51297\nEpoch: 00 [20004/25676 ( 78%)], Train Loss: 0.51254\nEpoch: 00 [20044/25676 ( 78%)], Train Loss: 0.51216\nEpoch: 00 [20084/25676 ( 78%)], Train Loss: 0.51208\nEpoch: 00 [20124/25676 ( 78%)], Train Loss: 0.51210\nEpoch: 00 [20164/25676 ( 79%)], Train Loss: 0.51168\nEpoch: 00 [20204/25676 ( 79%)], Train Loss: 0.51108\nEpoch: 00 [20244/25676 ( 79%)], Train Loss: 0.51099\nEpoch: 00 [20284/25676 ( 79%)], Train Loss: 0.51061\nEpoch: 00 [20324/25676 ( 79%)], Train Loss: 0.51057\nEpoch: 00 [20364/25676 ( 79%)], Train Loss: 0.51040\nEpoch: 00 [20404/25676 ( 79%)], Train Loss: 0.51028\nEpoch: 00 [20444/25676 ( 80%)], Train Loss: 0.51002\nEpoch: 00 [20484/25676 ( 80%)], Train Loss: 0.50965\nEpoch: 00 [20524/25676 ( 80%)], Train Loss: 0.50976\nEpoch: 00 [20564/25676 ( 80%)], Train Loss: 0.50955\nEpoch: 00 [20604/25676 ( 80%)], Train Loss: 0.50930\nEpoch: 00 [20644/25676 ( 80%)], Train Loss: 0.50899\nEpoch: 00 [20684/25676 ( 81%)], Train Loss: 0.50882\nEpoch: 00 [20724/25676 ( 81%)], Train Loss: 0.50871\nEpoch: 00 [20764/25676 ( 81%)], Train Loss: 0.50858\nEpoch: 00 [20804/25676 ( 81%)], Train Loss: 0.50907\nEpoch: 00 [20844/25676 ( 81%)], Train Loss: 0.50876\nEpoch: 00 [20884/25676 ( 81%)], Train Loss: 0.50848\nEpoch: 00 [20924/25676 ( 81%)], Train Loss: 0.50855\nEpoch: 00 [20964/25676 ( 82%)], Train Loss: 0.50829\nEpoch: 00 [21004/25676 ( 82%)], Train Loss: 0.50797\nEpoch: 00 [21044/25676 ( 82%)], Train Loss: 0.50774\nEpoch: 00 [21084/25676 ( 82%)], Train Loss: 0.50734\nEpoch: 00 [21124/25676 ( 82%)], Train Loss: 0.50696\nEpoch: 00 [21164/25676 ( 82%)], Train Loss: 0.50720\nEpoch: 00 [21204/25676 ( 83%)], Train Loss: 0.50723\nEpoch: 00 [21244/25676 ( 83%)], Train Loss: 0.50710\nEpoch: 00 [21284/25676 ( 83%)], Train Loss: 0.50690\nEpoch: 00 [21324/25676 ( 83%)], Train Loss: 0.50648\nEpoch: 00 [21364/25676 ( 83%)], Train Loss: 0.50670\nEpoch: 00 [21404/25676 ( 83%)], Train Loss: 0.50645\nEpoch: 00 [21444/25676 ( 84%)], Train Loss: 0.50590\nEpoch: 00 [21484/25676 ( 84%)], Train Loss: 0.50576\nEpoch: 00 [21524/25676 ( 84%)], Train Loss: 0.50551\nEpoch: 00 [21564/25676 ( 84%)], Train Loss: 0.50512\nEpoch: 00 [21604/25676 ( 84%)], Train Loss: 0.50487\nEpoch: 00 [21644/25676 ( 84%)], Train Loss: 0.50487\nEpoch: 00 [21684/25676 ( 84%)], Train Loss: 0.50448\nEpoch: 00 [21724/25676 ( 85%)], Train Loss: 0.50409\nEpoch: 00 [21764/25676 ( 85%)], Train Loss: 0.50378\nEpoch: 00 [21804/25676 ( 85%)], Train Loss: 0.50338\nEpoch: 00 [21844/25676 ( 85%)], Train Loss: 0.50322\nEpoch: 00 [21884/25676 ( 85%)], Train Loss: 0.50293\nEpoch: 00 [21924/25676 ( 85%)], Train Loss: 0.50254\nEpoch: 00 [21964/25676 ( 86%)], Train Loss: 0.50252\nEpoch: 00 [22004/25676 ( 86%)], Train Loss: 0.50218\nEpoch: 00 [22044/25676 ( 86%)], Train Loss: 0.50191\nEpoch: 00 [22084/25676 ( 86%)], Train Loss: 0.50166\nEpoch: 00 [22124/25676 ( 86%)], Train Loss: 0.50175\nEpoch: 00 [22164/25676 ( 86%)], Train Loss: 0.50146\nEpoch: 00 [22204/25676 ( 86%)], Train Loss: 0.50113\nEpoch: 00 [22244/25676 ( 87%)], Train Loss: 0.50074\nEpoch: 00 [22284/25676 ( 87%)], Train Loss: 0.50093\nEpoch: 00 [22324/25676 ( 87%)], Train Loss: 0.50080\nEpoch: 00 [22364/25676 ( 87%)], Train Loss: 0.50067\nEpoch: 00 [22404/25676 ( 87%)], Train Loss: 0.50090\nEpoch: 00 [22444/25676 ( 87%)], Train Loss: 0.50034\nEpoch: 00 [22484/25676 ( 88%)], Train Loss: 0.50006\nEpoch: 00 [22524/25676 ( 88%)], Train Loss: 0.49982\nEpoch: 00 [22564/25676 ( 88%)], Train Loss: 0.49944\nEpoch: 00 [22604/25676 ( 88%)], Train Loss: 0.49919\nEpoch: 00 [22644/25676 ( 88%)], Train Loss: 0.49874\nEpoch: 00 [22684/25676 ( 88%)], Train Loss: 0.49835\nEpoch: 00 [22724/25676 ( 89%)], Train Loss: 0.49823\nEpoch: 00 [22764/25676 ( 89%)], Train Loss: 0.49807\nEpoch: 00 [22804/25676 ( 89%)], Train Loss: 0.49802\nEpoch: 00 [22844/25676 ( 89%)], Train Loss: 0.49785\nEpoch: 00 [22884/25676 ( 89%)], Train Loss: 0.49784\nEpoch: 00 [22924/25676 ( 89%)], Train Loss: 0.49748\nEpoch: 00 [22964/25676 ( 89%)], Train Loss: 0.49771\nEpoch: 00 [23004/25676 ( 90%)], Train Loss: 0.49762\nEpoch: 00 [23044/25676 ( 90%)], Train Loss: 0.49733\nEpoch: 00 [23084/25676 ( 90%)], Train Loss: 0.49741\nEpoch: 00 [23124/25676 ( 90%)], Train Loss: 0.49738\nEpoch: 00 [23164/25676 ( 90%)], Train Loss: 0.49760\nEpoch: 00 [23204/25676 ( 90%)], Train Loss: 0.49736\nEpoch: 00 [23244/25676 ( 91%)], Train Loss: 0.49720\nEpoch: 00 [23284/25676 ( 91%)], Train Loss: 0.49703\nEpoch: 00 [23324/25676 ( 91%)], Train Loss: 0.49663\nEpoch: 00 [23364/25676 ( 91%)], Train Loss: 0.49641\nEpoch: 00 [23404/25676 ( 91%)], Train Loss: 0.49611\nEpoch: 00 [23444/25676 ( 91%)], Train Loss: 0.49593\nEpoch: 00 [23484/25676 ( 91%)], Train Loss: 0.49591\nEpoch: 00 [23524/25676 ( 92%)], Train Loss: 0.49580\nEpoch: 00 [23564/25676 ( 92%)], Train Loss: 0.49589\nEpoch: 00 [23604/25676 ( 92%)], Train Loss: 0.49565\nEpoch: 00 [23644/25676 ( 92%)], Train Loss: 0.49523\nEpoch: 00 [23684/25676 ( 92%)], Train Loss: 0.49517\nEpoch: 00 [23724/25676 ( 92%)], Train Loss: 0.49497\nEpoch: 00 [23764/25676 ( 93%)], Train Loss: 0.49463\nEpoch: 00 [23804/25676 ( 93%)], Train Loss: 0.49455\nEpoch: 00 [23844/25676 ( 93%)], Train Loss: 0.49427\nEpoch: 00 [23884/25676 ( 93%)], Train Loss: 0.49393\nEpoch: 00 [23924/25676 ( 93%)], Train Loss: 0.49367\nEpoch: 00 [23964/25676 ( 93%)], Train Loss: 0.49343\nEpoch: 00 [24004/25676 ( 93%)], Train Loss: 0.49319\nEpoch: 00 [24044/25676 ( 94%)], Train Loss: 0.49296\nEpoch: 00 [24084/25676 ( 94%)], Train Loss: 0.49306\nEpoch: 00 [24124/25676 ( 94%)], Train Loss: 0.49283\nEpoch: 00 [24164/25676 ( 94%)], Train Loss: 0.49237\nEpoch: 00 [24204/25676 ( 94%)], Train Loss: 0.49191\nEpoch: 00 [24244/25676 ( 94%)], Train Loss: 0.49140\nEpoch: 00 [24284/25676 ( 95%)], Train Loss: 0.49114\nEpoch: 00 [24324/25676 ( 95%)], Train Loss: 0.49089\nEpoch: 00 [24364/25676 ( 95%)], Train Loss: 0.49128\nEpoch: 00 [24404/25676 ( 95%)], Train Loss: 0.49124\nEpoch: 00 [24444/25676 ( 95%)], Train Loss: 0.49097\nEpoch: 00 [24484/25676 ( 95%)], Train Loss: 0.49050\nEpoch: 00 [24524/25676 ( 96%)], Train Loss: 0.49049\nEpoch: 00 [24564/25676 ( 96%)], Train Loss: 0.49059\nEpoch: 00 [24604/25676 ( 96%)], Train Loss: 0.49036\nEpoch: 00 [24644/25676 ( 96%)], Train Loss: 0.49039\nEpoch: 00 [24684/25676 ( 96%)], Train Loss: 0.49034\nEpoch: 00 [24724/25676 ( 96%)], Train Loss: 0.49048\nEpoch: 00 [24764/25676 ( 96%)], Train Loss: 0.49026\nEpoch: 00 [24804/25676 ( 97%)], Train Loss: 0.48995\nEpoch: 00 [24844/25676 ( 97%)], Train Loss: 0.49000\nEpoch: 00 [24884/25676 ( 97%)], Train Loss: 0.48994\nEpoch: 00 [24924/25676 ( 97%)], Train Loss: 0.48976\nEpoch: 00 [24964/25676 ( 97%)], Train Loss: 0.48938\nEpoch: 00 [25004/25676 ( 97%)], Train Loss: 0.48889\nEpoch: 00 [25044/25676 ( 98%)], Train Loss: 0.48871\nEpoch: 00 [25084/25676 ( 98%)], Train Loss: 0.48853\nEpoch: 00 [25124/25676 ( 98%)], Train Loss: 0.48814\nEpoch: 00 [25164/25676 ( 98%)], Train Loss: 0.48790\nEpoch: 00 [25204/25676 ( 98%)], Train Loss: 0.48779\nEpoch: 00 [25244/25676 ( 98%)], Train Loss: 0.48766\nEpoch: 00 [25284/25676 ( 98%)], Train Loss: 0.48741\nEpoch: 00 [25324/25676 ( 99%)], Train Loss: 0.48721\nEpoch: 00 [25364/25676 ( 99%)], Train Loss: 0.48711\nEpoch: 00 [25404/25676 ( 99%)], Train Loss: 0.48689\nEpoch: 00 [25444/25676 ( 99%)], Train Loss: 0.48683\nEpoch: 00 [25484/25676 ( 99%)], Train Loss: 0.48636\nEpoch: 00 [25524/25676 ( 99%)], Train Loss: 0.48605\nEpoch: 00 [25564/25676 (100%)], Train Loss: 0.48607\nEpoch: 00 [25604/25676 (100%)], Train Loss: 0.48588\nEpoch: 00 [25644/25676 (100%)], Train Loss: 0.48568\nEpoch: 00 [25676/25676 (100%)], Train Loss: 0.48541\n----Validation Results Summary----\nEpoch: [0] Valid Loss: 0.22268\nPost-processing 223 example predictions split into 2824 features.\nvalid jaccard: 0.6857569933803119\n0 Epoch, Best epoch was updated! Valid Loss: 0.22268\nSaving model checkpoint to output/checkpoint-fold-2-epoch-0.\n\nTotal Training Time: 3775.903554201126secs, Average Training Time per Epoch: 3775.903554201126secs.\nTotal Validation Time: 134.03500247001648secs, Average Validation Time per Epoch: 134.03500247001648secs.\n\n\n--------------------------------------------------\nFOLD: 3\n--------------------------------------------------\nModel pushed to 1 GPU(s), type Tesla P100-PCIE-16GB.\nNum examples Train= 25556, Num examples Valid=2944\nTotal Training Steps: 3195, Total Warmup Steps: 319\nEpoch: 00 [ 4/25556 ( 0%)], Train Loss: 3.22703\nEpoch: 00 [ 44/25556 ( 0%)], Train Loss: 3.23851\nEpoch: 00 [ 84/25556 ( 0%)], Train Loss: 3.23416\nEpoch: 00 [ 124/25556 ( 0%)], Train Loss: 3.21727\nEpoch: 00 [ 164/25556 ( 1%)], Train Loss: 3.18484\nEpoch: 00 [ 204/25556 ( 1%)], Train Loss: 3.12983\nEpoch: 00 [ 244/25556 ( 1%)], Train Loss: 3.07307\nEpoch: 00 [ 284/25556 ( 1%)], Train Loss: 3.01422\nEpoch: 00 [ 324/25556 ( 1%)], Train Loss: 2.92604\nEpoch: 00 [ 364/25556 ( 1%)], Train Loss: 2.81388\nEpoch: 00 [ 404/25556 ( 2%)], Train Loss: 2.67985\nEpoch: 00 [ 444/25556 ( 2%)], Train Loss: 2.55288\nEpoch: 00 [ 484/25556 ( 2%)], Train Loss: 2.42740\nEpoch: 00 [ 524/25556 ( 2%)], Train Loss: 2.31967\nEpoch: 00 [ 564/25556 ( 2%)], Train Loss: 2.21068\nEpoch: 00 [ 604/25556 ( 2%)], Train Loss: 2.12760\nEpoch: 00 [ 644/25556 ( 3%)], Train Loss: 2.04085\nEpoch: 00 [ 684/25556 ( 3%)], Train Loss: 1.96316\nEpoch: 00 [ 724/25556 ( 3%)], Train Loss: 1.88602\nEpoch: 00 [ 764/25556 ( 3%)], Train Loss: 1.80940\nEpoch: 00 [ 804/25556 ( 3%)], Train Loss: 1.74834\nEpoch: 00 [ 844/25556 ( 3%)], Train Loss: 1.68965\nEpoch: 00 [ 884/25556 ( 3%)], Train Loss: 1.63775\nEpoch: 00 [ 924/25556 ( 4%)], Train Loss: 1.58810\nEpoch: 00 [ 964/25556 ( 4%)], Train Loss: 1.54252\nEpoch: 00 [ 1004/25556 ( 4%)], Train Loss: 1.49970\nEpoch: 00 [ 1044/25556 ( 4%)], Train Loss: 1.46749\nEpoch: 00 [ 1084/25556 ( 4%)], Train Loss: 1.43198\nEpoch: 00 [ 1124/25556 ( 4%)], Train Loss: 1.39890\nEpoch: 00 [ 1164/25556 ( 5%)], Train Loss: 1.37104\nEpoch: 00 [ 1204/25556 ( 5%)], Train Loss: 1.34810\nEpoch: 00 [ 1244/25556 ( 5%)], Train Loss: 1.32528\nEpoch: 00 [ 1284/25556 ( 5%)], Train Loss: 1.29649\nEpoch: 00 [ 1324/25556 ( 5%)], Train Loss: 1.28191\nEpoch: 00 [ 1364/25556 ( 5%)], Train Loss: 1.26189\nEpoch: 00 [ 1404/25556 ( 5%)], Train Loss: 1.24096\nEpoch: 00 [ 1444/25556 ( 6%)], Train Loss: 1.22695\nEpoch: 00 [ 1484/25556 ( 6%)], Train Loss: 1.20436\nEpoch: 00 [ 1524/25556 ( 6%)], Train Loss: 1.18015\nEpoch: 00 [ 1564/25556 ( 6%)], Train Loss: 1.16688\nEpoch: 00 [ 1604/25556 ( 6%)], Train Loss: 1.15696\nEpoch: 00 [ 1644/25556 ( 6%)], Train Loss: 1.14275\nEpoch: 00 [ 1684/25556 ( 7%)], Train Loss: 1.13019\nEpoch: 00 [ 1724/25556 ( 7%)], Train Loss: 1.11662\nEpoch: 00 [ 1764/25556 ( 7%)], Train Loss: 1.09757\nEpoch: 00 [ 1804/25556 ( 7%)], Train Loss: 1.08710\nEpoch: 00 [ 1844/25556 ( 7%)], Train Loss: 1.07187\nEpoch: 00 [ 1884/25556 ( 7%)], Train Loss: 1.05568\nEpoch: 00 [ 1924/25556 ( 8%)], Train Loss: 1.04437\nEpoch: 00 [ 1964/25556 ( 8%)], Train Loss: 1.02989\nEpoch: 00 [ 2004/25556 ( 8%)], Train Loss: 1.01646\nEpoch: 00 [ 2044/25556 ( 8%)], Train Loss: 1.00278\nEpoch: 00 [ 2084/25556 ( 8%)], Train Loss: 0.99237\nEpoch: 00 [ 2124/25556 ( 8%)], Train Loss: 0.98127\nEpoch: 00 [ 2164/25556 ( 8%)], Train Loss: 0.97616\nEpoch: 00 [ 2204/25556 ( 9%)], Train Loss: 0.96881\nEpoch: 00 [ 2244/25556 ( 9%)], Train Loss: 0.96226\nEpoch: 00 [ 2284/25556 ( 9%)], Train Loss: 0.95303\nEpoch: 00 [ 2324/25556 ( 9%)], Train Loss: 0.94058\nEpoch: 00 [ 2364/25556 ( 9%)], Train Loss: 0.93916\nEpoch: 00 [ 2404/25556 ( 9%)], Train Loss: 0.93198\nEpoch: 00 [ 2444/25556 ( 10%)], Train Loss: 0.92422\nEpoch: 00 [ 2484/25556 ( 10%)], Train Loss: 0.92038\nEpoch: 00 [ 2524/25556 ( 10%)], Train Loss: 0.91376\nEpoch: 00 [ 2564/25556 ( 10%)], Train Loss: 0.90446\nEpoch: 00 [ 2604/25556 ( 10%)], Train Loss: 0.90185\nEpoch: 00 [ 2644/25556 ( 10%)], Train Loss: 0.89373\nEpoch: 00 [ 2684/25556 ( 11%)], Train Loss: 0.88519\nEpoch: 00 [ 2724/25556 ( 11%)], Train Loss: 0.87992\nEpoch: 00 [ 2764/25556 ( 11%)], Train Loss: 0.87819\nEpoch: 00 [ 2804/25556 ( 11%)], Train Loss: 0.87520\nEpoch: 00 [ 2844/25556 ( 11%)], Train Loss: 0.87054\nEpoch: 00 [ 2884/25556 ( 11%)], Train Loss: 0.86881\nEpoch: 00 [ 2924/25556 ( 11%)], Train Loss: 0.86088\nEpoch: 00 [ 2964/25556 ( 12%)], Train Loss: 0.85570\nEpoch: 00 [ 3004/25556 ( 12%)], Train Loss: 0.85256\nEpoch: 00 [ 3044/25556 ( 12%)], Train Loss: 0.84632\nEpoch: 00 [ 3084/25556 ( 12%)], Train Loss: 0.84081\nEpoch: 00 [ 3124/25556 ( 12%)], Train Loss: 0.83434\nEpoch: 00 [ 3164/25556 ( 12%)], Train Loss: 0.83371\nEpoch: 00 [ 3204/25556 ( 13%)], Train Loss: 0.83157\nEpoch: 00 [ 3244/25556 ( 13%)], Train Loss: 0.82964\nEpoch: 00 [ 3284/25556 ( 13%)], Train Loss: 0.82310\nEpoch: 00 [ 3324/25556 ( 13%)], Train Loss: 0.82187\nEpoch: 00 [ 3364/25556 ( 13%)], Train Loss: 0.81751\nEpoch: 00 [ 3404/25556 ( 13%)], Train Loss: 0.81333\nEpoch: 00 [ 3444/25556 ( 13%)], Train Loss: 0.81128\nEpoch: 00 [ 3484/25556 ( 14%)], Train Loss: 0.80644\nEpoch: 00 [ 3524/25556 ( 14%)], Train Loss: 0.80136\nEpoch: 00 [ 3564/25556 ( 14%)], Train Loss: 0.79816\nEpoch: 00 [ 3604/25556 ( 14%)], Train Loss: 0.79299\nEpoch: 00 [ 3644/25556 ( 14%)], Train Loss: 0.79137\nEpoch: 00 [ 3684/25556 ( 14%)], Train Loss: 0.78724\nEpoch: 00 [ 3724/25556 ( 15%)], Train Loss: 0.78447\nEpoch: 00 [ 3764/25556 ( 15%)], Train Loss: 0.78381\nEpoch: 00 [ 3804/25556 ( 15%)], Train Loss: 0.77975\nEpoch: 00 [ 3844/25556 ( 15%)], Train Loss: 0.77627\nEpoch: 00 [ 3884/25556 ( 15%)], Train Loss: 0.77512\nEpoch: 00 [ 3924/25556 ( 15%)], Train Loss: 0.77309\nEpoch: 00 [ 3964/25556 ( 16%)], Train Loss: 0.77161\nEpoch: 00 [ 4004/25556 ( 16%)], Train Loss: 0.76818\nEpoch: 00 [ 4044/25556 ( 16%)], Train Loss: 0.76679\nEpoch: 00 [ 4084/25556 ( 16%)], Train Loss: 0.76654\nEpoch: 00 [ 4124/25556 ( 16%)], Train Loss: 0.76476\nEpoch: 00 [ 4164/25556 ( 16%)], Train Loss: 0.76175\nEpoch: 00 [ 4204/25556 ( 16%)], Train Loss: 0.76008\nEpoch: 00 [ 4244/25556 ( 17%)], Train Loss: 0.75890\nEpoch: 00 [ 4284/25556 ( 17%)], Train Loss: 0.75750\nEpoch: 00 [ 4324/25556 ( 17%)], Train Loss: 0.75424\nEpoch: 00 [ 4364/25556 ( 17%)], Train Loss: 0.75159\nEpoch: 00 [ 4404/25556 ( 17%)], Train Loss: 0.74918\nEpoch: 00 [ 4444/25556 ( 17%)], Train Loss: 0.74679\nEpoch: 00 [ 4484/25556 ( 18%)], Train Loss: 0.74628\nEpoch: 00 [ 4524/25556 ( 18%)], Train Loss: 0.74739\nEpoch: 00 [ 4564/25556 ( 18%)], Train Loss: 0.74615\nEpoch: 00 [ 4604/25556 ( 18%)], Train Loss: 0.74506\nEpoch: 00 [ 4644/25556 ( 18%)], Train Loss: 0.74246\nEpoch: 00 [ 4684/25556 ( 18%)], Train Loss: 0.73959\nEpoch: 00 [ 4724/25556 ( 18%)], Train Loss: 0.73769\nEpoch: 00 [ 4764/25556 ( 19%)], Train Loss: 0.73614\nEpoch: 00 [ 4804/25556 ( 19%)], Train Loss: 0.73349\nEpoch: 00 [ 4844/25556 ( 19%)], Train Loss: 0.73062\nEpoch: 00 [ 4884/25556 ( 19%)], Train Loss: 0.73008\nEpoch: 00 [ 4924/25556 ( 19%)], Train Loss: 0.72864\nEpoch: 00 [ 4964/25556 ( 19%)], Train Loss: 0.72617\nEpoch: 00 [ 5004/25556 ( 20%)], Train Loss: 0.72357\nEpoch: 00 [ 5044/25556 ( 20%)], Train Loss: 0.72012\nEpoch: 00 [ 5084/25556 ( 20%)], Train Loss: 0.71752\nEpoch: 00 [ 5124/25556 ( 20%)], Train Loss: 0.71619\nEpoch: 00 [ 5164/25556 ( 20%)], Train Loss: 0.71315\nEpoch: 00 [ 5204/25556 ( 20%)], Train Loss: 0.71212\nEpoch: 00 [ 5244/25556 ( 21%)], Train Loss: 0.71038\nEpoch: 00 [ 5284/25556 ( 21%)], Train Loss: 0.70872\nEpoch: 00 [ 5324/25556 ( 21%)], Train Loss: 0.70780\nEpoch: 00 [ 5364/25556 ( 21%)], Train Loss: 0.70524\nEpoch: 00 [ 5404/25556 ( 21%)], Train Loss: 0.70311\nEpoch: 00 [ 5444/25556 ( 21%)], Train Loss: 0.70142\nEpoch: 00 [ 5484/25556 ( 21%)], Train Loss: 0.69907\nEpoch: 00 [ 5524/25556 ( 22%)], Train Loss: 0.69714\nEpoch: 00 [ 5564/25556 ( 22%)], Train Loss: 0.69545\nEpoch: 00 [ 5604/25556 ( 22%)], Train Loss: 0.69238\nEpoch: 00 [ 5644/25556 ( 22%)], Train Loss: 0.69051\nEpoch: 00 [ 5684/25556 ( 22%)], Train Loss: 0.69004\nEpoch: 00 [ 5724/25556 ( 22%)], Train Loss: 0.68856\nEpoch: 00 [ 5764/25556 ( 23%)], Train Loss: 0.68580\nEpoch: 00 [ 5804/25556 ( 23%)], Train Loss: 0.68546\nEpoch: 00 [ 5844/25556 ( 23%)], Train Loss: 0.68556\nEpoch: 00 [ 5884/25556 ( 23%)], Train Loss: 0.68384\nEpoch: 00 [ 5924/25556 ( 23%)], Train Loss: 0.68336\nEpoch: 00 [ 5964/25556 ( 23%)], Train Loss: 0.68258\nEpoch: 00 [ 6004/25556 ( 23%)], Train Loss: 0.68058\nEpoch: 00 [ 6044/25556 ( 24%)], Train Loss: 0.68125\nEpoch: 00 [ 6084/25556 ( 24%)], Train Loss: 0.67998\nEpoch: 00 [ 6124/25556 ( 24%)], Train Loss: 0.67959\nEpoch: 00 [ 6164/25556 ( 24%)], Train Loss: 0.67942\nEpoch: 00 [ 6204/25556 ( 24%)], Train Loss: 0.67804\nEpoch: 00 [ 6244/25556 ( 24%)], Train Loss: 0.67871\nEpoch: 00 [ 6284/25556 ( 25%)], Train Loss: 0.67826\nEpoch: 00 [ 6324/25556 ( 25%)], Train Loss: 0.67998\nEpoch: 00 [ 6364/25556 ( 25%)], Train Loss: 0.67869\nEpoch: 00 [ 6404/25556 ( 25%)], Train Loss: 0.67963\nEpoch: 00 [ 6444/25556 ( 25%)], Train Loss: 0.67953\nEpoch: 00 [ 6484/25556 ( 25%)], Train Loss: 0.67783\nEpoch: 00 [ 6524/25556 ( 26%)], Train Loss: 0.67884\nEpoch: 00 [ 6564/25556 ( 26%)], Train Loss: 0.67910\nEpoch: 00 [ 6604/25556 ( 26%)], Train Loss: 0.67702\nEpoch: 00 [ 6644/25556 ( 26%)], Train Loss: 0.67723\nEpoch: 00 [ 6684/25556 ( 26%)], Train Loss: 0.67597\nEpoch: 00 [ 6724/25556 ( 26%)], Train Loss: 0.67525\nEpoch: 00 [ 6764/25556 ( 26%)], Train Loss: 0.67365\nEpoch: 00 [ 6804/25556 ( 27%)], Train Loss: 0.67161\nEpoch: 00 [ 6844/25556 ( 27%)], Train Loss: 0.67135\nEpoch: 00 [ 6884/25556 ( 27%)], Train Loss: 0.67013\nEpoch: 00 [ 6924/25556 ( 27%)], Train Loss: 0.66891\nEpoch: 00 [ 6964/25556 ( 27%)], Train Loss: 0.66797\nEpoch: 00 [ 7004/25556 ( 27%)], Train Loss: 0.66821\nEpoch: 00 [ 7044/25556 ( 28%)], Train Loss: 0.66680\nEpoch: 00 [ 7084/25556 ( 28%)], Train Loss: 0.66482\nEpoch: 00 [ 7124/25556 ( 28%)], Train Loss: 0.66331\nEpoch: 00 [ 7164/25556 ( 28%)], Train Loss: 0.66418\nEpoch: 00 [ 7204/25556 ( 28%)], Train Loss: 0.66297\nEpoch: 00 [ 7244/25556 ( 28%)], Train Loss: 0.66225\nEpoch: 00 [ 7284/25556 ( 29%)], Train Loss: 0.66137\nEpoch: 00 [ 7324/25556 ( 29%)], Train Loss: 0.66039\nEpoch: 00 [ 7364/25556 ( 29%)], Train Loss: 0.65979\nEpoch: 00 [ 7404/25556 ( 29%)], Train Loss: 0.65848\nEpoch: 00 [ 7444/25556 ( 29%)], Train Loss: 0.65760\nEpoch: 00 [ 7484/25556 ( 29%)], Train Loss: 0.65828\nEpoch: 00 [ 7524/25556 ( 29%)], Train Loss: 0.65825\nEpoch: 00 [ 7564/25556 ( 30%)], Train Loss: 0.65685\nEpoch: 00 [ 7604/25556 ( 30%)], Train Loss: 0.65582\nEpoch: 00 [ 7644/25556 ( 30%)], Train Loss: 0.65367\nEpoch: 00 [ 7684/25556 ( 30%)], Train Loss: 0.65213\nEpoch: 00 [ 7724/25556 ( 30%)], Train Loss: 0.65220\nEpoch: 00 [ 7764/25556 ( 30%)], Train Loss: 0.65113\nEpoch: 00 [ 7804/25556 ( 31%)], Train Loss: 0.64992\nEpoch: 00 [ 7844/25556 ( 31%)], Train Loss: 0.64953\nEpoch: 00 [ 7884/25556 ( 31%)], Train Loss: 0.64744\nEpoch: 00 [ 7924/25556 ( 31%)], Train Loss: 0.64543\nEpoch: 00 [ 7964/25556 ( 31%)], Train Loss: 0.64449\nEpoch: 00 [ 8004/25556 ( 31%)], Train Loss: 0.64333\nEpoch: 00 [ 8044/25556 ( 31%)], Train Loss: 0.64215\nEpoch: 00 [ 8084/25556 ( 32%)], Train Loss: 0.64196\nEpoch: 00 [ 8124/25556 ( 32%)], Train Loss: 0.64164\nEpoch: 00 [ 8164/25556 ( 32%)], Train Loss: 0.64158\nEpoch: 00 [ 8204/25556 ( 32%)], Train Loss: 0.64174\nEpoch: 00 [ 8244/25556 ( 32%)], Train Loss: 0.64060\nEpoch: 00 [ 8284/25556 ( 32%)], Train Loss: 0.63950\nEpoch: 00 [ 8324/25556 ( 33%)], Train Loss: 0.63894\nEpoch: 00 [ 8364/25556 ( 33%)], Train Loss: 0.63867\nEpoch: 00 [ 8404/25556 ( 33%)], Train Loss: 0.63719\nEpoch: 00 [ 8444/25556 ( 33%)], Train Loss: 0.63622\nEpoch: 00 [ 8484/25556 ( 33%)], Train Loss: 0.63547\nEpoch: 00 [ 8524/25556 ( 33%)], Train Loss: 0.63458\nEpoch: 00 [ 8564/25556 ( 34%)], Train Loss: 0.63351\nEpoch: 00 [ 8604/25556 ( 34%)], Train Loss: 0.63341\nEpoch: 00 [ 8644/25556 ( 34%)], Train Loss: 0.63271\nEpoch: 00 [ 8684/25556 ( 34%)], Train Loss: 0.63193\nEpoch: 00 [ 8724/25556 ( 34%)], Train Loss: 0.63116\nEpoch: 00 [ 8764/25556 ( 34%)], Train Loss: 0.63079\nEpoch: 00 [ 8804/25556 ( 34%)], Train Loss: 0.63054\nEpoch: 00 [ 8844/25556 ( 35%)], Train Loss: 0.63001\nEpoch: 00 [ 8884/25556 ( 35%)], Train Loss: 0.62889\nEpoch: 00 [ 8924/25556 ( 35%)], Train Loss: 0.62875\nEpoch: 00 [ 8964/25556 ( 35%)], Train Loss: 0.62956\nEpoch: 00 [ 9004/25556 ( 35%)], Train Loss: 0.62827\nEpoch: 00 [ 9044/25556 ( 35%)], Train Loss: 0.62831\nEpoch: 00 [ 9084/25556 ( 36%)], Train Loss: 0.62855\nEpoch: 00 [ 9124/25556 ( 36%)], Train Loss: 0.62768\nEpoch: 00 [ 9164/25556 ( 36%)], Train Loss: 0.62757\nEpoch: 00 [ 9204/25556 ( 36%)], Train Loss: 0.62676\nEpoch: 00 [ 9244/25556 ( 36%)], Train Loss: 0.62571\nEpoch: 00 [ 9284/25556 ( 36%)], Train Loss: 0.62508\nEpoch: 00 [ 9324/25556 ( 36%)], Train Loss: 0.62483\nEpoch: 00 [ 9364/25556 ( 37%)], Train Loss: 0.62523\nEpoch: 00 [ 9404/25556 ( 37%)], Train Loss: 0.62366\nEpoch: 00 [ 9444/25556 ( 37%)], Train Loss: 0.62420\nEpoch: 00 [ 9484/25556 ( 37%)], Train Loss: 0.62383\nEpoch: 00 [ 9524/25556 ( 37%)], Train Loss: 0.62369\nEpoch: 00 [ 9564/25556 ( 37%)], Train Loss: 0.62325\nEpoch: 00 [ 9604/25556 ( 38%)], Train Loss: 0.62238\nEpoch: 00 [ 9644/25556 ( 38%)], Train Loss: 0.62125\nEpoch: 00 [ 9684/25556 ( 38%)], Train Loss: 0.62104\nEpoch: 00 [ 9724/25556 ( 38%)], Train Loss: 0.61995\nEpoch: 00 [ 9764/25556 ( 38%)], Train Loss: 0.61949\nEpoch: 00 [ 9804/25556 ( 38%)], Train Loss: 0.61840\nEpoch: 00 [ 9844/25556 ( 39%)], Train Loss: 0.61746\nEpoch: 00 [ 9884/25556 ( 39%)], Train Loss: 0.61675\nEpoch: 00 [ 9924/25556 ( 39%)], Train Loss: 0.61616\nEpoch: 00 [ 9964/25556 ( 39%)], Train Loss: 0.61584\nEpoch: 00 [10004/25556 ( 39%)], Train Loss: 0.61654\nEpoch: 00 [10044/25556 ( 39%)], Train Loss: 0.61566\nEpoch: 00 [10084/25556 ( 39%)], Train Loss: 0.61460\nEpoch: 00 [10124/25556 ( 40%)], Train Loss: 0.61374\nEpoch: 00 [10164/25556 ( 40%)], Train Loss: 0.61292\nEpoch: 00 [10204/25556 ( 40%)], Train Loss: 0.61203\nEpoch: 00 [10244/25556 ( 40%)], Train Loss: 0.61116\nEpoch: 00 [10284/25556 ( 40%)], Train Loss: 0.61140\nEpoch: 00 [10324/25556 ( 40%)], Train Loss: 0.61056\nEpoch: 00 [10364/25556 ( 41%)], Train Loss: 0.61006\nEpoch: 00 [10404/25556 ( 41%)], Train Loss: 0.60944\nEpoch: 00 [10444/25556 ( 41%)], Train Loss: 0.60913\nEpoch: 00 [10484/25556 ( 41%)], Train Loss: 0.60824\nEpoch: 00 [10524/25556 ( 41%)], Train Loss: 0.60709\nEpoch: 00 [10564/25556 ( 41%)], Train Loss: 0.60628\nEpoch: 00 [10604/25556 ( 41%)], Train Loss: 0.60649\nEpoch: 00 [10644/25556 ( 42%)], Train Loss: 0.60650\nEpoch: 00 [10684/25556 ( 42%)], Train Loss: 0.60542\nEpoch: 00 [10724/25556 ( 42%)], Train Loss: 0.60592\nEpoch: 00 [10764/25556 ( 42%)], Train Loss: 0.60500\nEpoch: 00 [10804/25556 ( 42%)], Train Loss: 0.60440\nEpoch: 00 [10844/25556 ( 42%)], Train Loss: 0.60383\nEpoch: 00 [10884/25556 ( 43%)], Train Loss: 0.60319\nEpoch: 00 [10924/25556 ( 43%)], Train Loss: 0.60244\nEpoch: 00 [10964/25556 ( 43%)], Train Loss: 0.60196\nEpoch: 00 [11004/25556 ( 43%)], Train Loss: 0.60187\nEpoch: 00 [11044/25556 ( 43%)], Train Loss: 0.60111\nEpoch: 00 [11084/25556 ( 43%)], Train Loss: 0.60004\nEpoch: 00 [11124/25556 ( 44%)], Train Loss: 0.59960\nEpoch: 00 [11164/25556 ( 44%)], Train Loss: 0.59941\nEpoch: 00 [11204/25556 ( 44%)], Train Loss: 0.59862\nEpoch: 00 [11244/25556 ( 44%)], Train Loss: 0.59783\nEpoch: 00 [11284/25556 ( 44%)], Train Loss: 0.59664\nEpoch: 00 [11324/25556 ( 44%)], Train Loss: 0.59564\nEpoch: 00 [11364/25556 ( 44%)], Train Loss: 0.59469\nEpoch: 00 [11404/25556 ( 45%)], Train Loss: 0.59482\nEpoch: 00 [11444/25556 ( 45%)], Train Loss: 0.59455\nEpoch: 00 [11484/25556 ( 45%)], Train Loss: 0.59403\nEpoch: 00 [11524/25556 ( 45%)], Train Loss: 0.59345\nEpoch: 00 [11564/25556 ( 45%)], Train Loss: 0.59264\nEpoch: 00 [11604/25556 ( 45%)], Train Loss: 0.59309\nEpoch: 00 [11644/25556 ( 46%)], Train Loss: 0.59276\nEpoch: 00 [11684/25556 ( 46%)], Train Loss: 0.59218\nEpoch: 00 [11724/25556 ( 46%)], Train Loss: 0.59178\nEpoch: 00 [11764/25556 ( 46%)], Train Loss: 0.59099\nEpoch: 00 [11804/25556 ( 46%)], Train Loss: 0.59037\nEpoch: 00 [11844/25556 ( 46%)], Train Loss: 0.58987\nEpoch: 00 [11884/25556 ( 47%)], Train Loss: 0.58941\nEpoch: 00 [11924/25556 ( 47%)], Train Loss: 0.58854\nEpoch: 00 [11964/25556 ( 47%)], Train Loss: 0.58811\nEpoch: 00 [12004/25556 ( 47%)], Train Loss: 0.58747\nEpoch: 00 [12044/25556 ( 47%)], Train Loss: 0.58661\nEpoch: 00 [12084/25556 ( 47%)], Train Loss: 0.58659\nEpoch: 00 [12124/25556 ( 47%)], Train Loss: 0.58620\nEpoch: 00 [12164/25556 ( 48%)], Train Loss: 0.58554\nEpoch: 00 [12204/25556 ( 48%)], Train Loss: 0.58532\nEpoch: 00 [12244/25556 ( 48%)], Train Loss: 0.58515\nEpoch: 00 [12284/25556 ( 48%)], Train Loss: 0.58495\nEpoch: 00 [12324/25556 ( 48%)], Train Loss: 0.58464\nEpoch: 00 [12364/25556 ( 48%)], Train Loss: 0.58467\nEpoch: 00 [12404/25556 ( 49%)], Train Loss: 0.58424\nEpoch: 00 [12444/25556 ( 49%)], Train Loss: 0.58380\nEpoch: 00 [12484/25556 ( 49%)], Train Loss: 0.58288\nEpoch: 00 [12524/25556 ( 49%)], Train Loss: 0.58246\nEpoch: 00 [12564/25556 ( 49%)], Train Loss: 0.58184\nEpoch: 00 [12604/25556 ( 49%)], Train Loss: 0.58063\nEpoch: 00 [12644/25556 ( 49%)], Train Loss: 0.58036\nEpoch: 00 [12684/25556 ( 50%)], Train Loss: 0.58036\nEpoch: 00 [12724/25556 ( 50%)], Train Loss: 0.58033\nEpoch: 00 [12764/25556 ( 50%)], Train Loss: 0.58028\nEpoch: 00 [12804/25556 ( 50%)], Train Loss: 0.57985\nEpoch: 00 [12844/25556 ( 50%)], Train Loss: 0.57979\nEpoch: 00 [12884/25556 ( 50%)], Train Loss: 0.58006\nEpoch: 00 [12924/25556 ( 51%)], Train Loss: 0.57957\nEpoch: 00 [12964/25556 ( 51%)], Train Loss: 0.57924\nEpoch: 00 [13004/25556 ( 51%)], Train Loss: 0.57860\nEpoch: 00 [13044/25556 ( 51%)], Train Loss: 0.57814\nEpoch: 00 [13084/25556 ( 51%)], Train Loss: 0.57814\nEpoch: 00 [13124/25556 ( 51%)], Train Loss: 0.57749\nEpoch: 00 [13164/25556 ( 52%)], Train Loss: 0.57737\nEpoch: 00 [13204/25556 ( 52%)], Train Loss: 0.57698\nEpoch: 00 [13244/25556 ( 52%)], Train Loss: 0.57590\nEpoch: 00 [13284/25556 ( 52%)], Train Loss: 0.57477\nEpoch: 00 [13324/25556 ( 52%)], Train Loss: 0.57451\nEpoch: 00 [13364/25556 ( 52%)], Train Loss: 0.57380\nEpoch: 00 [13404/25556 ( 52%)], Train Loss: 0.57313\nEpoch: 00 [13444/25556 ( 53%)], Train Loss: 0.57259\nEpoch: 00 [13484/25556 ( 53%)], Train Loss: 0.57188\nEpoch: 00 [13524/25556 ( 53%)], Train Loss: 0.57153\nEpoch: 00 [13564/25556 ( 53%)], Train Loss: 0.57112\nEpoch: 00 [13604/25556 ( 53%)], Train Loss: 0.57066\nEpoch: 00 [13644/25556 ( 53%)], Train Loss: 0.57002\nEpoch: 00 [13684/25556 ( 54%)], Train Loss: 0.56931\nEpoch: 00 [13724/25556 ( 54%)], Train Loss: 0.56890\nEpoch: 00 [13764/25556 ( 54%)], Train Loss: 0.56805\nEpoch: 00 [13804/25556 ( 54%)], Train Loss: 0.56729\nEpoch: 00 [13844/25556 ( 54%)], Train Loss: 0.56670\nEpoch: 00 [13884/25556 ( 54%)], Train Loss: 0.56631\nEpoch: 00 [13924/25556 ( 54%)], Train Loss: 0.56591\nEpoch: 00 [13964/25556 ( 55%)], Train Loss: 0.56526\nEpoch: 00 [14004/25556 ( 55%)], Train Loss: 0.56540\nEpoch: 00 [14044/25556 ( 55%)], Train Loss: 0.56513\nEpoch: 00 [14084/25556 ( 55%)], Train Loss: 0.56448\nEpoch: 00 [14124/25556 ( 55%)], Train Loss: 0.56378\nEpoch: 00 [14164/25556 ( 55%)], Train Loss: 0.56353\nEpoch: 00 [14204/25556 ( 56%)], Train Loss: 0.56344\nEpoch: 00 [14244/25556 ( 56%)], Train Loss: 0.56288\nEpoch: 00 [14284/25556 ( 56%)], Train Loss: 0.56260\nEpoch: 00 [14324/25556 ( 56%)], Train Loss: 0.56203\nEpoch: 00 [14364/25556 ( 56%)], Train Loss: 0.56114\nEpoch: 00 [14404/25556 ( 56%)], Train Loss: 0.56138\nEpoch: 00 [14444/25556 ( 57%)], Train Loss: 0.56125\nEpoch: 00 [14484/25556 ( 57%)], Train Loss: 0.56125\nEpoch: 00 [14524/25556 ( 57%)], Train Loss: 0.56085\nEpoch: 00 [14564/25556 ( 57%)], Train Loss: 0.56042\nEpoch: 00 [14604/25556 ( 57%)], Train Loss: 0.55991\nEpoch: 00 [14644/25556 ( 57%)], Train Loss: 0.55977\nEpoch: 00 [14684/25556 ( 57%)], Train Loss: 0.55903\nEpoch: 00 [14724/25556 ( 58%)], Train Loss: 0.55884\nEpoch: 00 [14764/25556 ( 58%)], Train Loss: 0.55886\nEpoch: 00 [14804/25556 ( 58%)], Train Loss: 0.55848\nEpoch: 00 [14844/25556 ( 58%)], Train Loss: 0.55816\nEpoch: 00 [14884/25556 ( 58%)], Train Loss: 0.55761\nEpoch: 00 [14924/25556 ( 58%)], Train Loss: 0.55721\nEpoch: 00 [14964/25556 ( 59%)], Train Loss: 0.55743\nEpoch: 00 [15004/25556 ( 59%)], Train Loss: 0.55727\nEpoch: 00 [15044/25556 ( 59%)], Train Loss: 0.55664\nEpoch: 00 [15084/25556 ( 59%)], Train Loss: 0.55625\nEpoch: 00 [15124/25556 ( 59%)], Train Loss: 0.55559\nEpoch: 00 [15164/25556 ( 59%)], Train Loss: 0.55477\nEpoch: 00 [15204/25556 ( 59%)], Train Loss: 0.55417\nEpoch: 00 [15244/25556 ( 60%)], Train Loss: 0.55402\nEpoch: 00 [15284/25556 ( 60%)], Train Loss: 0.55348\nEpoch: 00 [15324/25556 ( 60%)], Train Loss: 0.55322\nEpoch: 00 [15364/25556 ( 60%)], Train Loss: 0.55250\nEpoch: 00 [15404/25556 ( 60%)], Train Loss: 0.55247\nEpoch: 00 [15444/25556 ( 60%)], Train Loss: 0.55196\nEpoch: 00 [15484/25556 ( 61%)], Train Loss: 0.55154\nEpoch: 00 [15524/25556 ( 61%)], Train Loss: 0.55077\nEpoch: 00 [15564/25556 ( 61%)], Train Loss: 0.55057\nEpoch: 00 [15604/25556 ( 61%)], Train Loss: 0.55065\nEpoch: 00 [15644/25556 ( 61%)], Train Loss: 0.55065\nEpoch: 00 [15684/25556 ( 61%)], Train Loss: 0.55050\nEpoch: 00 [15724/25556 ( 62%)], Train Loss: 0.54970\nEpoch: 00 [15764/25556 ( 62%)], Train Loss: 0.54887\nEpoch: 00 [15804/25556 ( 62%)], Train Loss: 0.54813\nEpoch: 00 [15844/25556 ( 62%)], Train Loss: 0.54796\nEpoch: 00 [15884/25556 ( 62%)], Train Loss: 0.54743\nEpoch: 00 [15924/25556 ( 62%)], Train Loss: 0.54744\nEpoch: 00 [15964/25556 ( 62%)], Train Loss: 0.54732\nEpoch: 00 [16004/25556 ( 63%)], Train Loss: 0.54691\nEpoch: 00 [16044/25556 ( 63%)], Train Loss: 0.54650\nEpoch: 00 [16084/25556 ( 63%)], Train Loss: 0.54626\nEpoch: 00 [16124/25556 ( 63%)], Train Loss: 0.54629\nEpoch: 00 [16164/25556 ( 63%)], Train Loss: 0.54600\nEpoch: 00 [16204/25556 ( 63%)], Train Loss: 0.54547\nEpoch: 00 [16244/25556 ( 64%)], Train Loss: 0.54540\nEpoch: 00 [16284/25556 ( 64%)], Train Loss: 0.54510\nEpoch: 00 [16324/25556 ( 64%)], Train Loss: 0.54476\nEpoch: 00 [16364/25556 ( 64%)], Train Loss: 0.54457\nEpoch: 00 [16404/25556 ( 64%)], Train Loss: 0.54397\nEpoch: 00 [16444/25556 ( 64%)], Train Loss: 0.54383\nEpoch: 00 [16484/25556 ( 65%)], Train Loss: 0.54383\nEpoch: 00 [16524/25556 ( 65%)], Train Loss: 0.54334\nEpoch: 00 [16564/25556 ( 65%)], Train Loss: 0.54338\nEpoch: 00 [16604/25556 ( 65%)], Train Loss: 0.54311\nEpoch: 00 [16644/25556 ( 65%)], Train Loss: 0.54259\nEpoch: 00 [16684/25556 ( 65%)], Train Loss: 0.54276\nEpoch: 00 [16724/25556 ( 65%)], Train Loss: 0.54283\nEpoch: 00 [16764/25556 ( 66%)], Train Loss: 0.54239\nEpoch: 00 [16804/25556 ( 66%)], Train Loss: 0.54181\nEpoch: 00 [16844/25556 ( 66%)], Train Loss: 0.54135\nEpoch: 00 [16884/25556 ( 66%)], Train Loss: 0.54057\nEpoch: 00 [16924/25556 ( 66%)], Train Loss: 0.54008\nEpoch: 00 [16964/25556 ( 66%)], Train Loss: 0.54002\nEpoch: 00 [17004/25556 ( 67%)], Train Loss: 0.53966\nEpoch: 00 [17044/25556 ( 67%)], Train Loss: 0.53908\nEpoch: 00 [17084/25556 ( 67%)], Train Loss: 0.53894\nEpoch: 00 [17124/25556 ( 67%)], Train Loss: 0.53827\nEpoch: 00 [17164/25556 ( 67%)], Train Loss: 0.53770\nEpoch: 00 [17204/25556 ( 67%)], Train Loss: 0.53778\nEpoch: 00 [17244/25556 ( 67%)], Train Loss: 0.53766\nEpoch: 00 [17284/25556 ( 68%)], Train Loss: 0.53733\nEpoch: 00 [17324/25556 ( 68%)], Train Loss: 0.53687\nEpoch: 00 [17364/25556 ( 68%)], Train Loss: 0.53641\nEpoch: 00 [17404/25556 ( 68%)], Train Loss: 0.53582\nEpoch: 00 [17444/25556 ( 68%)], Train Loss: 0.53632\nEpoch: 00 [17484/25556 ( 68%)], Train Loss: 0.53584\nEpoch: 00 [17524/25556 ( 69%)], Train Loss: 0.53545\nEpoch: 00 [17564/25556 ( 69%)], Train Loss: 0.53576\nEpoch: 00 [17604/25556 ( 69%)], Train Loss: 0.53522\nEpoch: 00 [17644/25556 ( 69%)], Train Loss: 0.53495\nEpoch: 00 [17684/25556 ( 69%)], Train Loss: 0.53454\nEpoch: 00 [17724/25556 ( 69%)], Train Loss: 0.53395\nEpoch: 00 [17764/25556 ( 70%)], Train Loss: 0.53340\nEpoch: 00 [17804/25556 ( 70%)], Train Loss: 0.53318\nEpoch: 00 [17844/25556 ( 70%)], Train Loss: 0.53306\nEpoch: 00 [17884/25556 ( 70%)], Train Loss: 0.53304\nEpoch: 00 [17924/25556 ( 70%)], Train Loss: 0.53284\nEpoch: 00 [17964/25556 ( 70%)], Train Loss: 0.53222\nEpoch: 00 [18004/25556 ( 70%)], Train Loss: 0.53173\nEpoch: 00 [18044/25556 ( 71%)], Train Loss: 0.53149\nEpoch: 00 [18084/25556 ( 71%)], Train Loss: 0.53100\nEpoch: 00 [18124/25556 ( 71%)], Train Loss: 0.53089\nEpoch: 00 [18164/25556 ( 71%)], Train Loss: 0.53115\nEpoch: 00 [18204/25556 ( 71%)], Train Loss: 0.53105\nEpoch: 00 [18244/25556 ( 71%)], Train Loss: 0.53079\nEpoch: 00 [18284/25556 ( 72%)], Train Loss: 0.53063\nEpoch: 00 [18324/25556 ( 72%)], Train Loss: 0.53065\nEpoch: 00 [18364/25556 ( 72%)], Train Loss: 0.53041\nEpoch: 00 [18404/25556 ( 72%)], Train Loss: 0.53015\nEpoch: 00 [18444/25556 ( 72%)], Train Loss: 0.52991\nEpoch: 00 [18484/25556 ( 72%)], Train Loss: 0.52958\nEpoch: 00 [18524/25556 ( 72%)], Train Loss: 0.52918\nEpoch: 00 [18564/25556 ( 73%)], Train Loss: 0.52871\nEpoch: 00 [18604/25556 ( 73%)], Train Loss: 0.52838\nEpoch: 00 [18644/25556 ( 73%)], Train Loss: 0.52823\nEpoch: 00 [18684/25556 ( 73%)], Train Loss: 0.52777\nEpoch: 00 [18724/25556 ( 73%)], Train Loss: 0.52707\nEpoch: 00 [18764/25556 ( 73%)], Train Loss: 0.52685\nEpoch: 00 [18804/25556 ( 74%)], Train Loss: 0.52644\nEpoch: 00 [18844/25556 ( 74%)], Train Loss: 0.52592\nEpoch: 00 [18884/25556 ( 74%)], Train Loss: 0.52592\nEpoch: 00 [18924/25556 ( 74%)], Train Loss: 0.52572\nEpoch: 00 [18964/25556 ( 74%)], Train Loss: 0.52535\nEpoch: 00 [19004/25556 ( 74%)], Train Loss: 0.52473\nEpoch: 00 [19044/25556 ( 75%)], Train Loss: 0.52409\nEpoch: 00 [19084/25556 ( 75%)], Train Loss: 0.52392\nEpoch: 00 [19124/25556 ( 75%)], Train Loss: 0.52362\nEpoch: 00 [19164/25556 ( 75%)], Train Loss: 0.52336\nEpoch: 00 [19204/25556 ( 75%)], Train Loss: 0.52312\nEpoch: 00 [19244/25556 ( 75%)], Train Loss: 0.52263\nEpoch: 00 [19284/25556 ( 75%)], Train Loss: 0.52229\nEpoch: 00 [19324/25556 ( 76%)], Train Loss: 0.52195\nEpoch: 00 [19364/25556 ( 76%)], Train Loss: 0.52192\nEpoch: 00 [19404/25556 ( 76%)], Train Loss: 0.52197\nEpoch: 00 [19444/25556 ( 76%)], Train Loss: 0.52183\nEpoch: 00 [19484/25556 ( 76%)], Train Loss: 0.52163\nEpoch: 00 [19524/25556 ( 76%)], Train Loss: 0.52131\nEpoch: 00 [19564/25556 ( 77%)], Train Loss: 0.52116\nEpoch: 00 [19604/25556 ( 77%)], Train Loss: 0.52092\nEpoch: 00 [19644/25556 ( 77%)], Train Loss: 0.52045\nEpoch: 00 [19684/25556 ( 77%)], Train Loss: 0.52035\nEpoch: 00 [19724/25556 ( 77%)], Train Loss: 0.51986\nEpoch: 00 [19764/25556 ( 77%)], Train Loss: 0.51937\nEpoch: 00 [19804/25556 ( 77%)], Train Loss: 0.51923\nEpoch: 00 [19844/25556 ( 78%)], Train Loss: 0.51890\nEpoch: 00 [19884/25556 ( 78%)], Train Loss: 0.51863\nEpoch: 00 [19924/25556 ( 78%)], Train Loss: 0.51823\nEpoch: 00 [19964/25556 ( 78%)], Train Loss: 0.51815\nEpoch: 00 [20004/25556 ( 78%)], Train Loss: 0.51769\nEpoch: 00 [20044/25556 ( 78%)], Train Loss: 0.51759\nEpoch: 00 [20084/25556 ( 79%)], Train Loss: 0.51709\nEpoch: 00 [20124/25556 ( 79%)], Train Loss: 0.51656\nEpoch: 00 [20164/25556 ( 79%)], Train Loss: 0.51647\nEpoch: 00 [20204/25556 ( 79%)], Train Loss: 0.51608\nEpoch: 00 [20244/25556 ( 79%)], Train Loss: 0.51580\nEpoch: 00 [20284/25556 ( 79%)], Train Loss: 0.51551\nEpoch: 00 [20324/25556 ( 80%)], Train Loss: 0.51496\nEpoch: 00 [20364/25556 ( 80%)], Train Loss: 0.51458\nEpoch: 00 [20404/25556 ( 80%)], Train Loss: 0.51450\nEpoch: 00 [20444/25556 ( 80%)], Train Loss: 0.51432\nEpoch: 00 [20484/25556 ( 80%)], Train Loss: 0.51428\nEpoch: 00 [20524/25556 ( 80%)], Train Loss: 0.51390\nEpoch: 00 [20564/25556 ( 80%)], Train Loss: 0.51370\nEpoch: 00 [20604/25556 ( 81%)], Train Loss: 0.51362\nEpoch: 00 [20644/25556 ( 81%)], Train Loss: 0.51348\nEpoch: 00 [20684/25556 ( 81%)], Train Loss: 0.51331\nEpoch: 00 [20724/25556 ( 81%)], Train Loss: 0.51309\nEpoch: 00 [20764/25556 ( 81%)], Train Loss: 0.51306\nEpoch: 00 [20804/25556 ( 81%)], Train Loss: 0.51263\nEpoch: 00 [20844/25556 ( 82%)], Train Loss: 0.51230\nEpoch: 00 [20884/25556 ( 82%)], Train Loss: 0.51180\nEpoch: 00 [20924/25556 ( 82%)], Train Loss: 0.51157\nEpoch: 00 [20964/25556 ( 82%)], Train Loss: 0.51161\nEpoch: 00 [21004/25556 ( 82%)], Train Loss: 0.51139\nEpoch: 00 [21044/25556 ( 82%)], Train Loss: 0.51082\nEpoch: 00 [21084/25556 ( 83%)], Train Loss: 0.51053\nEpoch: 00 [21124/25556 ( 83%)], Train Loss: 0.51025\nEpoch: 00 [21164/25556 ( 83%)], Train Loss: 0.50986\nEpoch: 00 [21204/25556 ( 83%)], Train Loss: 0.51035\nEpoch: 00 [21244/25556 ( 83%)], Train Loss: 0.51010\nEpoch: 00 [21284/25556 ( 83%)], Train Loss: 0.51007\nEpoch: 00 [21324/25556 ( 83%)], Train Loss: 0.50983\nEpoch: 00 [21364/25556 ( 84%)], Train Loss: 0.50960\nEpoch: 00 [21404/25556 ( 84%)], Train Loss: 0.50904\nEpoch: 00 [21444/25556 ( 84%)], Train Loss: 0.50879\nEpoch: 00 [21484/25556 ( 84%)], Train Loss: 0.50833\nEpoch: 00 [21524/25556 ( 84%)], Train Loss: 0.50810\nEpoch: 00 [21564/25556 ( 84%)], Train Loss: 0.50781\nEpoch: 00 [21604/25556 ( 85%)], Train Loss: 0.50801\nEpoch: 00 [21644/25556 ( 85%)], Train Loss: 0.50782\nEpoch: 00 [21684/25556 ( 85%)], Train Loss: 0.50780\nEpoch: 00 [21724/25556 ( 85%)], Train Loss: 0.50724\nEpoch: 00 [21764/25556 ( 85%)], Train Loss: 0.50678\nEpoch: 00 [21804/25556 ( 85%)], Train Loss: 0.50676\nEpoch: 00 [21844/25556 ( 85%)], Train Loss: 0.50662\nEpoch: 00 [21884/25556 ( 86%)], Train Loss: 0.50599\nEpoch: 00 [21924/25556 ( 86%)], Train Loss: 0.50569\nEpoch: 00 [21964/25556 ( 86%)], Train Loss: 0.50529\nEpoch: 00 [22004/25556 ( 86%)], Train Loss: 0.50517\nEpoch: 00 [22044/25556 ( 86%)], Train Loss: 0.50512\nEpoch: 00 [22084/25556 ( 86%)], Train Loss: 0.50458\nEpoch: 00 [22124/25556 ( 87%)], Train Loss: 0.50457\nEpoch: 00 [22164/25556 ( 87%)], Train Loss: 0.50437\nEpoch: 00 [22204/25556 ( 87%)], Train Loss: 0.50407\nEpoch: 00 [22244/25556 ( 87%)], Train Loss: 0.50401\nEpoch: 00 [22284/25556 ( 87%)], Train Loss: 0.50425\nEpoch: 00 [22324/25556 ( 87%)], Train Loss: 0.50404\nEpoch: 00 [22364/25556 ( 88%)], Train Loss: 0.50366\nEpoch: 00 [22404/25556 ( 88%)], Train Loss: 0.50325\nEpoch: 00 [22444/25556 ( 88%)], Train Loss: 0.50299\nEpoch: 00 [22484/25556 ( 88%)], Train Loss: 0.50285\nEpoch: 00 [22524/25556 ( 88%)], Train Loss: 0.50234\nEpoch: 00 [22564/25556 ( 88%)], Train Loss: 0.50192\nEpoch: 00 [22604/25556 ( 88%)], Train Loss: 0.50133\nEpoch: 00 [22644/25556 ( 89%)], Train Loss: 0.50138\nEpoch: 00 [22684/25556 ( 89%)], Train Loss: 0.50106\nEpoch: 00 [22724/25556 ( 89%)], Train Loss: 0.50109\nEpoch: 00 [22764/25556 ( 89%)], Train Loss: 0.50091\nEpoch: 00 [22804/25556 ( 89%)], Train Loss: 0.50075\nEpoch: 00 [22844/25556 ( 89%)], Train Loss: 0.50052\nEpoch: 00 [22884/25556 ( 90%)], Train Loss: 0.50031\nEpoch: 00 [22924/25556 ( 90%)], Train Loss: 0.50016\nEpoch: 00 [22964/25556 ( 90%)], Train Loss: 0.49985\nEpoch: 00 [23004/25556 ( 90%)], Train Loss: 0.49995\nEpoch: 00 [23044/25556 ( 90%)], Train Loss: 0.49968\nEpoch: 00 [23084/25556 ( 90%)], Train Loss: 0.49928\nEpoch: 00 [23124/25556 ( 90%)], Train Loss: 0.49890\nEpoch: 00 [23164/25556 ( 91%)], Train Loss: 0.49882\nEpoch: 00 [23204/25556 ( 91%)], Train Loss: 0.49866\nEpoch: 00 [23244/25556 ( 91%)], Train Loss: 0.49883\nEpoch: 00 [23284/25556 ( 91%)], Train Loss: 0.49834\nEpoch: 00 [23324/25556 ( 91%)], Train Loss: 0.49820\nEpoch: 00 [23364/25556 ( 91%)], Train Loss: 0.49780\nEpoch: 00 [23404/25556 ( 92%)], Train Loss: 0.49753\nEpoch: 00 [23444/25556 ( 92%)], Train Loss: 0.49738\nEpoch: 00 [23484/25556 ( 92%)], Train Loss: 0.49686\nEpoch: 00 [23524/25556 ( 92%)], Train Loss: 0.49653\nEpoch: 00 [23564/25556 ( 92%)], Train Loss: 0.49631\nEpoch: 00 [23604/25556 ( 92%)], Train Loss: 0.49627\nEpoch: 00 [23644/25556 ( 93%)], Train Loss: 0.49608\nEpoch: 00 [23684/25556 ( 93%)], Train Loss: 0.49564\nEpoch: 00 [23724/25556 ( 93%)], Train Loss: 0.49533\nEpoch: 00 [23764/25556 ( 93%)], Train Loss: 0.49526\nEpoch: 00 [23804/25556 ( 93%)], Train Loss: 0.49548\nEpoch: 00 [23844/25556 ( 93%)], Train Loss: 0.49543\nEpoch: 00 [23884/25556 ( 93%)], Train Loss: 0.49545\nEpoch: 00 [23924/25556 ( 94%)], Train Loss: 0.49534\nEpoch: 00 [23964/25556 ( 94%)], Train Loss: 0.49530\nEpoch: 00 [24004/25556 ( 94%)], Train Loss: 0.49500\nEpoch: 00 [24044/25556 ( 94%)], Train Loss: 0.49502\nEpoch: 00 [24084/25556 ( 94%)], Train Loss: 0.49479\nEpoch: 00 [24124/25556 ( 94%)], Train Loss: 0.49465\nEpoch: 00 [24164/25556 ( 95%)], Train Loss: 0.49437\nEpoch: 00 [24204/25556 ( 95%)], Train Loss: 0.49430\nEpoch: 00 [24244/25556 ( 95%)], Train Loss: 0.49389\nEpoch: 00 [24284/25556 ( 95%)], Train Loss: 0.49358\nEpoch: 00 [24324/25556 ( 95%)], Train Loss: 0.49321\nEpoch: 00 [24364/25556 ( 95%)], Train Loss: 0.49299\nEpoch: 00 [24404/25556 ( 95%)], Train Loss: 0.49275\nEpoch: 00 [24444/25556 ( 96%)], Train Loss: 0.49251\nEpoch: 00 [24484/25556 ( 96%)], Train Loss: 0.49254\nEpoch: 00 [24524/25556 ( 96%)], Train Loss: 0.49248\nEpoch: 00 [24564/25556 ( 96%)], Train Loss: 0.49221\nEpoch: 00 [24604/25556 ( 96%)], Train Loss: 0.49206\nEpoch: 00 [24644/25556 ( 96%)], Train Loss: 0.49173\nEpoch: 00 [24684/25556 ( 97%)], Train Loss: 0.49162\nEpoch: 00 [24724/25556 ( 97%)], Train Loss: 0.49127\nEpoch: 00 [24764/25556 ( 97%)], Train Loss: 0.49101\nEpoch: 00 [24804/25556 ( 97%)], Train Loss: 0.49099\nEpoch: 00 [24844/25556 ( 97%)], Train Loss: 0.49095\nEpoch: 00 [24884/25556 ( 97%)], Train Loss: 0.49049\nEpoch: 00 [24924/25556 ( 98%)], Train Loss: 0.49010\nEpoch: 00 [24964/25556 ( 98%)], Train Loss: 0.48980\nEpoch: 00 [25004/25556 ( 98%)], Train Loss: 0.48935\nEpoch: 00 [25044/25556 ( 98%)], Train Loss: 0.48908\nEpoch: 00 [25084/25556 ( 98%)], Train Loss: 0.48903\nEpoch: 00 [25124/25556 ( 98%)], Train Loss: 0.48884\nEpoch: 00 [25164/25556 ( 98%)], Train Loss: 0.48871\nEpoch: 00 [25204/25556 ( 99%)], Train Loss: 0.48866\nEpoch: 00 [25244/25556 ( 99%)], Train Loss: 0.48821\nEpoch: 00 [25284/25556 ( 99%)], Train Loss: 0.48809\nEpoch: 00 [25324/25556 ( 99%)], Train Loss: 0.48785\nEpoch: 00 [25364/25556 ( 99%)], Train Loss: 0.48764\nEpoch: 00 [25404/25556 ( 99%)], Train Loss: 0.48759\nEpoch: 00 [25444/25556 (100%)], Train Loss: 0.48759\nEpoch: 00 [25484/25556 (100%)], Train Loss: 0.48733\nEpoch: 00 [25524/25556 (100%)], Train Loss: 0.48709\nEpoch: 00 [25556/25556 (100%)], Train Loss: 0.48720\n----Validation Results Summary----\nEpoch: [0] Valid Loss: 0.21004\nPost-processing 223 example predictions split into 2944 features.\nvalid jaccard: 0.6792707666026053\n0 Epoch, Best epoch was updated! Valid Loss: 0.21004\nSaving model checkpoint to output/checkpoint-fold-3-epoch-0.\n\nTotal Training Time: 3759.957547903061secs, Average Training Time per Epoch: 3759.957547903061secs.\nTotal Validation Time: 141.084636926651secs, Average Validation Time per Epoch: 141.084636926651secs.\n\n\n--------------------------------------------------\nFOLD: 4\n--------------------------------------------------\nModel pushed to 1 GPU(s), type Tesla P100-PCIE-16GB.\nNum examples Train= 25568, Num examples Valid=2932\nTotal Training Steps: 3196, Total Warmup Steps: 319\nEpoch: 00 [ 4/25568 ( 0%)], Train Loss: 3.40332\nEpoch: 00 [ 44/25568 ( 0%)], Train Loss: 3.26017\nEpoch: 00 [ 84/25568 ( 0%)], Train Loss: 3.26337\nEpoch: 00 [ 124/25568 ( 0%)], Train Loss: 3.23496\nEpoch: 00 [ 164/25568 ( 1%)], Train Loss: 3.19414\nEpoch: 00 [ 204/25568 ( 1%)], Train Loss: 3.14809\nEpoch: 00 [ 244/25568 ( 1%)], Train Loss: 3.08096\nEpoch: 00 [ 284/25568 ( 1%)], Train Loss: 2.99897\nEpoch: 00 [ 324/25568 ( 1%)], Train Loss: 2.92017\nEpoch: 00 [ 364/25568 ( 1%)], Train Loss: 2.81433\nEpoch: 00 [ 404/25568 ( 2%)], Train Loss: 2.68277\nEpoch: 00 [ 444/25568 ( 2%)], Train Loss: 2.56041\nEpoch: 00 [ 484/25568 ( 2%)], Train Loss: 2.41492\nEpoch: 00 [ 524/25568 ( 2%)], Train Loss: 2.28793\nEpoch: 00 [ 564/25568 ( 2%)], Train Loss: 2.18793\nEpoch: 00 [ 604/25568 ( 2%)], Train Loss: 2.09279\nEpoch: 00 [ 644/25568 ( 3%)], Train Loss: 1.99546\nEpoch: 00 [ 684/25568 ( 3%)], Train Loss: 1.90980\nEpoch: 00 [ 724/25568 ( 3%)], Train Loss: 1.82842\nEpoch: 00 [ 764/25568 ( 3%)], Train Loss: 1.77230\nEpoch: 00 [ 804/25568 ( 3%)], Train Loss: 1.71666\nEpoch: 00 [ 844/25568 ( 3%)], Train Loss: 1.65501\nEpoch: 00 [ 884/25568 ( 3%)], Train Loss: 1.60776\nEpoch: 00 [ 924/25568 ( 4%)], Train Loss: 1.56171\nEpoch: 00 [ 964/25568 ( 4%)], Train Loss: 1.52044\nEpoch: 00 [ 1004/25568 ( 4%)], Train Loss: 1.47808\nEpoch: 00 [ 1044/25568 ( 4%)], Train Loss: 1.45143\nEpoch: 00 [ 1084/25568 ( 4%)], Train Loss: 1.42627\nEpoch: 00 [ 1124/25568 ( 4%)], Train Loss: 1.39472\nEpoch: 00 [ 1164/25568 ( 5%)], Train Loss: 1.37144\nEpoch: 00 [ 1204/25568 ( 5%)], Train Loss: 1.35142\nEpoch: 00 [ 1244/25568 ( 5%)], Train Loss: 1.32860\nEpoch: 00 [ 1284/25568 ( 5%)], Train Loss: 1.30219\nEpoch: 00 [ 1324/25568 ( 5%)], Train Loss: 1.27996\nEpoch: 00 [ 1364/25568 ( 5%)], Train Loss: 1.26045\nEpoch: 00 [ 1404/25568 ( 5%)], Train Loss: 1.23748\nEpoch: 00 [ 1444/25568 ( 6%)], Train Loss: 1.21720\nEpoch: 00 [ 1484/25568 ( 6%)], Train Loss: 1.19902\nEpoch: 00 [ 1524/25568 ( 6%)], Train Loss: 1.18442\nEpoch: 00 [ 1564/25568 ( 6%)], Train Loss: 1.16761\nEpoch: 00 [ 1604/25568 ( 6%)], Train Loss: 1.15270\nEpoch: 00 [ 1644/25568 ( 6%)], Train Loss: 1.13600\nEpoch: 00 [ 1684/25568 ( 7%)], Train Loss: 1.11886\nEpoch: 00 [ 1724/25568 ( 7%)], Train Loss: 1.10006\nEpoch: 00 [ 1764/25568 ( 7%)], Train Loss: 1.08491\nEpoch: 00 [ 1804/25568 ( 7%)], Train Loss: 1.07232\nEpoch: 00 [ 1844/25568 ( 7%)], Train Loss: 1.05973\nEpoch: 00 [ 1884/25568 ( 7%)], Train Loss: 1.05255\nEpoch: 00 [ 1924/25568 ( 8%)], Train Loss: 1.04338\nEpoch: 00 [ 1964/25568 ( 8%)], Train Loss: 1.03472\nEpoch: 00 [ 2004/25568 ( 8%)], Train Loss: 1.02527\nEpoch: 00 [ 2044/25568 ( 8%)], Train Loss: 1.01675\nEpoch: 00 [ 2084/25568 ( 8%)], Train Loss: 1.00708\nEpoch: 00 [ 2124/25568 ( 8%)], Train Loss: 0.99984\nEpoch: 00 [ 2164/25568 ( 8%)], Train Loss: 0.99444\nEpoch: 00 [ 2204/25568 ( 9%)], Train Loss: 0.98531\nEpoch: 00 [ 2244/25568 ( 9%)], Train Loss: 0.97716\nEpoch: 00 [ 2284/25568 ( 9%)], Train Loss: 0.97270\nEpoch: 00 [ 2324/25568 ( 9%)], Train Loss: 0.96411\nEpoch: 00 [ 2364/25568 ( 9%)], Train Loss: 0.95841\nEpoch: 00 [ 2404/25568 ( 9%)], Train Loss: 0.95431\nEpoch: 00 [ 2444/25568 ( 10%)], Train Loss: 0.94440\nEpoch: 00 [ 2484/25568 ( 10%)], Train Loss: 0.93735\nEpoch: 00 [ 2524/25568 ( 10%)], Train Loss: 0.93092\nEpoch: 00 [ 2564/25568 ( 10%)], Train Loss: 0.92445\nEpoch: 00 [ 2604/25568 ( 10%)], Train Loss: 0.91931\nEpoch: 00 [ 2644/25568 ( 10%)], Train Loss: 0.91483\nEpoch: 00 [ 2684/25568 ( 10%)], Train Loss: 0.91267\nEpoch: 00 [ 2724/25568 ( 11%)], Train Loss: 0.90739\nEpoch: 00 [ 2764/25568 ( 11%)], Train Loss: 0.90134\nEpoch: 00 [ 2804/25568 ( 11%)], Train Loss: 0.89625\nEpoch: 00 [ 2844/25568 ( 11%)], Train Loss: 0.89524\nEpoch: 00 [ 2884/25568 ( 11%)], Train Loss: 0.88949\nEpoch: 00 [ 2924/25568 ( 11%)], Train Loss: 0.88180\nEpoch: 00 [ 2964/25568 ( 12%)], Train Loss: 0.87647\nEpoch: 00 [ 3004/25568 ( 12%)], Train Loss: 0.87558\nEpoch: 00 [ 3044/25568 ( 12%)], Train Loss: 0.87193\nEpoch: 00 [ 3084/25568 ( 12%)], Train Loss: 0.86783\nEpoch: 00 [ 3124/25568 ( 12%)], Train Loss: 0.86296\nEpoch: 00 [ 3164/25568 ( 12%)], Train Loss: 0.85955\nEpoch: 00 [ 3204/25568 ( 13%)], Train Loss: 0.85394\nEpoch: 00 [ 3244/25568 ( 13%)], Train Loss: 0.84770\nEpoch: 00 [ 3284/25568 ( 13%)], Train Loss: 0.84407\nEpoch: 00 [ 3324/25568 ( 13%)], Train Loss: 0.84045\nEpoch: 00 [ 3364/25568 ( 13%)], Train Loss: 0.83717\nEpoch: 00 [ 3404/25568 ( 13%)], Train Loss: 0.83200\nEpoch: 00 [ 3444/25568 ( 13%)], Train Loss: 0.83076\nEpoch: 00 [ 3484/25568 ( 14%)], Train Loss: 0.82636\nEpoch: 00 [ 3524/25568 ( 14%)], Train Loss: 0.82366\nEpoch: 00 [ 3564/25568 ( 14%)], Train Loss: 0.82135\nEpoch: 00 [ 3604/25568 ( 14%)], Train Loss: 0.82003\nEpoch: 00 [ 3644/25568 ( 14%)], Train Loss: 0.81762\nEpoch: 00 [ 3684/25568 ( 14%)], Train Loss: 0.81605\nEpoch: 00 [ 3724/25568 ( 15%)], Train Loss: 0.81490\nEpoch: 00 [ 3764/25568 ( 15%)], Train Loss: 0.81407\nEpoch: 00 [ 3804/25568 ( 15%)], Train Loss: 0.81020\nEpoch: 00 [ 3844/25568 ( 15%)], Train Loss: 0.81078\nEpoch: 00 [ 3884/25568 ( 15%)], Train Loss: 0.80736\nEpoch: 00 [ 3924/25568 ( 15%)], Train Loss: 0.80741\nEpoch: 00 [ 3964/25568 ( 16%)], Train Loss: 0.80443\nEpoch: 00 [ 4004/25568 ( 16%)], Train Loss: 0.80186\nEpoch: 00 [ 4044/25568 ( 16%)], Train Loss: 0.79800\nEpoch: 00 [ 4084/25568 ( 16%)], Train Loss: 0.79349\nEpoch: 00 [ 4124/25568 ( 16%)], Train Loss: 0.78988\nEpoch: 00 [ 4164/25568 ( 16%)], Train Loss: 0.78798\nEpoch: 00 [ 4204/25568 ( 16%)], Train Loss: 0.78559\nEpoch: 00 [ 4244/25568 ( 17%)], Train Loss: 0.78311\nEpoch: 00 [ 4284/25568 ( 17%)], Train Loss: 0.78125\nEpoch: 00 [ 4324/25568 ( 17%)], Train Loss: 0.78073\nEpoch: 00 [ 4364/25568 ( 17%)], Train Loss: 0.77701\nEpoch: 00 [ 4404/25568 ( 17%)], Train Loss: 0.77623\nEpoch: 00 [ 4444/25568 ( 17%)], Train Loss: 0.77371\nEpoch: 00 [ 4484/25568 ( 18%)], Train Loss: 0.77299\nEpoch: 00 [ 4524/25568 ( 18%)], Train Loss: 0.77206\nEpoch: 00 [ 4564/25568 ( 18%)], Train Loss: 0.76947\nEpoch: 00 [ 4604/25568 ( 18%)], Train Loss: 0.76629\nEpoch: 00 [ 4644/25568 ( 18%)], Train Loss: 0.76343\nEpoch: 00 [ 4684/25568 ( 18%)], Train Loss: 0.76054\nEpoch: 00 [ 4724/25568 ( 18%)], Train Loss: 0.75988\nEpoch: 00 [ 4764/25568 ( 19%)], Train Loss: 0.75783\nEpoch: 00 [ 4804/25568 ( 19%)], Train Loss: 0.75628\nEpoch: 00 [ 4844/25568 ( 19%)], Train Loss: 0.75574\nEpoch: 00 [ 4884/25568 ( 19%)], Train Loss: 0.75278\nEpoch: 00 [ 4924/25568 ( 19%)], Train Loss: 0.74945\nEpoch: 00 [ 4964/25568 ( 19%)], Train Loss: 0.74692\nEpoch: 00 [ 5004/25568 ( 20%)], Train Loss: 0.74886\nEpoch: 00 [ 5044/25568 ( 20%)], Train Loss: 0.74882\nEpoch: 00 [ 5084/25568 ( 20%)], Train Loss: 0.74885\nEpoch: 00 [ 5124/25568 ( 20%)], Train Loss: 0.74648\nEpoch: 00 [ 5164/25568 ( 20%)], Train Loss: 0.74348\nEpoch: 00 [ 5204/25568 ( 20%)], Train Loss: 0.74134\nEpoch: 00 [ 5244/25568 ( 21%)], Train Loss: 0.74058\nEpoch: 00 [ 5284/25568 ( 21%)], Train Loss: 0.73877\nEpoch: 00 [ 5324/25568 ( 21%)], Train Loss: 0.73802\nEpoch: 00 [ 5364/25568 ( 21%)], Train Loss: 0.73497\nEpoch: 00 [ 5404/25568 ( 21%)], Train Loss: 0.73311\nEpoch: 00 [ 5444/25568 ( 21%)], Train Loss: 0.73055\nEpoch: 00 [ 5484/25568 ( 21%)], Train Loss: 0.72993\nEpoch: 00 [ 5524/25568 ( 22%)], Train Loss: 0.72891\nEpoch: 00 [ 5564/25568 ( 22%)], Train Loss: 0.72775\nEpoch: 00 [ 5604/25568 ( 22%)], Train Loss: 0.72658\nEpoch: 00 [ 5644/25568 ( 22%)], Train Loss: 0.72577\nEpoch: 00 [ 5684/25568 ( 22%)], Train Loss: 0.72370\nEpoch: 00 [ 5724/25568 ( 22%)], Train Loss: 0.72190\nEpoch: 00 [ 5764/25568 ( 23%)], Train Loss: 0.72071\nEpoch: 00 [ 5804/25568 ( 23%)], Train Loss: 0.71783\nEpoch: 00 [ 5844/25568 ( 23%)], Train Loss: 0.71595\nEpoch: 00 [ 5884/25568 ( 23%)], Train Loss: 0.71431\nEpoch: 00 [ 5924/25568 ( 23%)], Train Loss: 0.71298\nEpoch: 00 [ 5964/25568 ( 23%)], Train Loss: 0.71108\nEpoch: 00 [ 6004/25568 ( 23%)], Train Loss: 0.70891\nEpoch: 00 [ 6044/25568 ( 24%)], Train Loss: 0.70770\nEpoch: 00 [ 6084/25568 ( 24%)], Train Loss: 0.70568\nEpoch: 00 [ 6124/25568 ( 24%)], Train Loss: 0.70563\nEpoch: 00 [ 6164/25568 ( 24%)], Train Loss: 0.70462\nEpoch: 00 [ 6204/25568 ( 24%)], Train Loss: 0.70453\nEpoch: 00 [ 6244/25568 ( 24%)], Train Loss: 0.70283\nEpoch: 00 [ 6284/25568 ( 25%)], Train Loss: 0.70195\nEpoch: 00 [ 6324/25568 ( 25%)], Train Loss: 0.70153\nEpoch: 00 [ 6364/25568 ( 25%)], Train Loss: 0.69983\nEpoch: 00 [ 6404/25568 ( 25%)], Train Loss: 0.69830\nEpoch: 00 [ 6444/25568 ( 25%)], Train Loss: 0.69846\nEpoch: 00 [ 6484/25568 ( 25%)], Train Loss: 0.69735\nEpoch: 00 [ 6524/25568 ( 26%)], Train Loss: 0.69571\nEpoch: 00 [ 6564/25568 ( 26%)], Train Loss: 0.69397\nEpoch: 00 [ 6604/25568 ( 26%)], Train Loss: 0.69198\nEpoch: 00 [ 6644/25568 ( 26%)], Train Loss: 0.68959\nEpoch: 00 [ 6684/25568 ( 26%)], Train Loss: 0.69146\nEpoch: 00 [ 6724/25568 ( 26%)], Train Loss: 0.69148\nEpoch: 00 [ 6764/25568 ( 26%)], Train Loss: 0.68926\nEpoch: 00 [ 6804/25568 ( 27%)], Train Loss: 0.68813\nEpoch: 00 [ 6844/25568 ( 27%)], Train Loss: 0.68724\nEpoch: 00 [ 6884/25568 ( 27%)], Train Loss: 0.68577\nEpoch: 00 [ 6924/25568 ( 27%)], Train Loss: 0.68385\nEpoch: 00 [ 6964/25568 ( 27%)], Train Loss: 0.68369\nEpoch: 00 [ 7004/25568 ( 27%)], Train Loss: 0.68285\nEpoch: 00 [ 7044/25568 ( 28%)], Train Loss: 0.68281\nEpoch: 00 [ 7084/25568 ( 28%)], Train Loss: 0.68185\nEpoch: 00 [ 7124/25568 ( 28%)], Train Loss: 0.68002\nEpoch: 00 [ 7164/25568 ( 28%)], Train Loss: 0.67911\nEpoch: 00 [ 7204/25568 ( 28%)], Train Loss: 0.67714\nEpoch: 00 [ 7244/25568 ( 28%)], Train Loss: 0.67578\nEpoch: 00 [ 7284/25568 ( 28%)], Train Loss: 0.67569\nEpoch: 00 [ 7324/25568 ( 29%)], Train Loss: 0.67622\nEpoch: 00 [ 7364/25568 ( 29%)], Train Loss: 0.67501\nEpoch: 00 [ 7404/25568 ( 29%)], Train Loss: 0.67329\nEpoch: 00 [ 7444/25568 ( 29%)], Train Loss: 0.67215\nEpoch: 00 [ 7484/25568 ( 29%)], Train Loss: 0.67207\nEpoch: 00 [ 7524/25568 ( 29%)], Train Loss: 0.67077\nEpoch: 00 [ 7564/25568 ( 30%)], Train Loss: 0.66925\nEpoch: 00 [ 7604/25568 ( 30%)], Train Loss: 0.66797\nEpoch: 00 [ 7644/25568 ( 30%)], Train Loss: 0.66776\nEpoch: 00 [ 7684/25568 ( 30%)], Train Loss: 0.66703\nEpoch: 00 [ 7724/25568 ( 30%)], Train Loss: 0.66520\nEpoch: 00 [ 7764/25568 ( 30%)], Train Loss: 0.66497\nEpoch: 00 [ 7804/25568 ( 31%)], Train Loss: 0.66416\nEpoch: 00 [ 7844/25568 ( 31%)], Train Loss: 0.66349\nEpoch: 00 [ 7884/25568 ( 31%)], Train Loss: 0.66242\nEpoch: 00 [ 7924/25568 ( 31%)], Train Loss: 0.66221\nEpoch: 00 [ 7964/25568 ( 31%)], Train Loss: 0.66219\nEpoch: 00 [ 8004/25568 ( 31%)], Train Loss: 0.66204\nEpoch: 00 [ 8044/25568 ( 31%)], Train Loss: 0.66060\nEpoch: 00 [ 8084/25568 ( 32%)], Train Loss: 0.65950\nEpoch: 00 [ 8124/25568 ( 32%)], Train Loss: 0.65808\nEpoch: 00 [ 8164/25568 ( 32%)], Train Loss: 0.65742\nEpoch: 00 [ 8204/25568 ( 32%)], Train Loss: 0.65653\nEpoch: 00 [ 8244/25568 ( 32%)], Train Loss: 0.65689\nEpoch: 00 [ 8284/25568 ( 32%)], Train Loss: 0.65602\nEpoch: 00 [ 8324/25568 ( 33%)], Train Loss: 0.65404\nEpoch: 00 [ 8364/25568 ( 33%)], Train Loss: 0.65313\nEpoch: 00 [ 8404/25568 ( 33%)], Train Loss: 0.65182\nEpoch: 00 [ 8444/25568 ( 33%)], Train Loss: 0.65115\nEpoch: 00 [ 8484/25568 ( 33%)], Train Loss: 0.64967\nEpoch: 00 [ 8524/25568 ( 33%)], Train Loss: 0.64938\nEpoch: 00 [ 8564/25568 ( 33%)], Train Loss: 0.64796\nEpoch: 00 [ 8604/25568 ( 34%)], Train Loss: 0.64861\nEpoch: 00 [ 8644/25568 ( 34%)], Train Loss: 0.64731\nEpoch: 00 [ 8684/25568 ( 34%)], Train Loss: 0.64614\nEpoch: 00 [ 8724/25568 ( 34%)], Train Loss: 0.64535\nEpoch: 00 [ 8764/25568 ( 34%)], Train Loss: 0.64610\nEpoch: 00 [ 8804/25568 ( 34%)], Train Loss: 0.64674\nEpoch: 00 [ 8844/25568 ( 35%)], Train Loss: 0.64606\nEpoch: 00 [ 8884/25568 ( 35%)], Train Loss: 0.64607\nEpoch: 00 [ 8924/25568 ( 35%)], Train Loss: 0.64610\nEpoch: 00 [ 8964/25568 ( 35%)], Train Loss: 0.64536\nEpoch: 00 [ 9004/25568 ( 35%)], Train Loss: 0.64509\nEpoch: 00 [ 9044/25568 ( 35%)], Train Loss: 0.64462\nEpoch: 00 [ 9084/25568 ( 36%)], Train Loss: 0.64347\nEpoch: 00 [ 9124/25568 ( 36%)], Train Loss: 0.64245\nEpoch: 00 [ 9164/25568 ( 36%)], Train Loss: 0.64092\nEpoch: 00 [ 9204/25568 ( 36%)], Train Loss: 0.64049\nEpoch: 00 [ 9244/25568 ( 36%)], Train Loss: 0.63946\nEpoch: 00 [ 9284/25568 ( 36%)], Train Loss: 0.63847\nEpoch: 00 [ 9324/25568 ( 36%)], Train Loss: 0.63688\nEpoch: 00 [ 9364/25568 ( 37%)], Train Loss: 0.63539\nEpoch: 00 [ 9404/25568 ( 37%)], Train Loss: 0.63416\nEpoch: 00 [ 9444/25568 ( 37%)], Train Loss: 0.63402\nEpoch: 00 [ 9484/25568 ( 37%)], Train Loss: 0.63334\nEpoch: 00 [ 9524/25568 ( 37%)], Train Loss: 0.63208\nEpoch: 00 [ 9564/25568 ( 37%)], Train Loss: 0.63156\nEpoch: 00 [ 9604/25568 ( 38%)], Train Loss: 0.63018\nEpoch: 00 [ 9644/25568 ( 38%)], Train Loss: 0.63001\nEpoch: 00 [ 9684/25568 ( 38%)], Train Loss: 0.62977\nEpoch: 00 [ 9724/25568 ( 38%)], Train Loss: 0.62867\nEpoch: 00 [ 9764/25568 ( 38%)], Train Loss: 0.62822\nEpoch: 00 [ 9804/25568 ( 38%)], Train Loss: 0.62823\nEpoch: 00 [ 9844/25568 ( 39%)], Train Loss: 0.62758\nEpoch: 00 [ 9884/25568 ( 39%)], Train Loss: 0.62732\nEpoch: 00 [ 9924/25568 ( 39%)], Train Loss: 0.62659\nEpoch: 00 [ 9964/25568 ( 39%)], Train Loss: 0.62599\nEpoch: 00 [10004/25568 ( 39%)], Train Loss: 0.62534\nEpoch: 00 [10044/25568 ( 39%)], Train Loss: 0.62516\nEpoch: 00 [10084/25568 ( 39%)], Train Loss: 0.62481\nEpoch: 00 [10124/25568 ( 40%)], Train Loss: 0.62447\nEpoch: 00 [10164/25568 ( 40%)], Train Loss: 0.62327\nEpoch: 00 [10204/25568 ( 40%)], Train Loss: 0.62272\nEpoch: 00 [10244/25568 ( 40%)], Train Loss: 0.62217\nEpoch: 00 [10284/25568 ( 40%)], Train Loss: 0.62128\nEpoch: 00 [10324/25568 ( 40%)], Train Loss: 0.62131\nEpoch: 00 [10364/25568 ( 41%)], Train Loss: 0.62075\nEpoch: 00 [10404/25568 ( 41%)], Train Loss: 0.62066\nEpoch: 00 [10444/25568 ( 41%)], Train Loss: 0.62015\nEpoch: 00 [10484/25568 ( 41%)], Train Loss: 0.61975\nEpoch: 00 [10524/25568 ( 41%)], Train Loss: 0.61875\nEpoch: 00 [10564/25568 ( 41%)], Train Loss: 0.61751\nEpoch: 00 [10604/25568 ( 41%)], Train Loss: 0.61724\nEpoch: 00 [10644/25568 ( 42%)], Train Loss: 0.61662\nEpoch: 00 [10684/25568 ( 42%)], Train Loss: 0.61555\nEpoch: 00 [10724/25568 ( 42%)], Train Loss: 0.61488\nEpoch: 00 [10764/25568 ( 42%)], Train Loss: 0.61433\nEpoch: 00 [10804/25568 ( 42%)], Train Loss: 0.61396\nEpoch: 00 [10844/25568 ( 42%)], Train Loss: 0.61245\nEpoch: 00 [10884/25568 ( 43%)], Train Loss: 0.61125\nEpoch: 00 [10924/25568 ( 43%)], Train Loss: 0.61085\nEpoch: 00 [10964/25568 ( 43%)], Train Loss: 0.61013\nEpoch: 00 [11004/25568 ( 43%)], Train Loss: 0.61005\nEpoch: 00 [11044/25568 ( 43%)], Train Loss: 0.60936\nEpoch: 00 [11084/25568 ( 43%)], Train Loss: 0.60868\nEpoch: 00 [11124/25568 ( 44%)], Train Loss: 0.60866\nEpoch: 00 [11164/25568 ( 44%)], Train Loss: 0.60795\nEpoch: 00 [11204/25568 ( 44%)], Train Loss: 0.60701\nEpoch: 00 [11244/25568 ( 44%)], Train Loss: 0.60684\nEpoch: 00 [11284/25568 ( 44%)], Train Loss: 0.60585\nEpoch: 00 [11324/25568 ( 44%)], Train Loss: 0.60467\nEpoch: 00 [11364/25568 ( 44%)], Train Loss: 0.60359\nEpoch: 00 [11404/25568 ( 45%)], Train Loss: 0.60380\nEpoch: 00 [11444/25568 ( 45%)], Train Loss: 0.60294\nEpoch: 00 [11484/25568 ( 45%)], Train Loss: 0.60299\nEpoch: 00 [11524/25568 ( 45%)], Train Loss: 0.60208\nEpoch: 00 [11564/25568 ( 45%)], Train Loss: 0.60107\nEpoch: 00 [11604/25568 ( 45%)], Train Loss: 0.59998\nEpoch: 00 [11644/25568 ( 46%)], Train Loss: 0.59938\nEpoch: 00 [11684/25568 ( 46%)], Train Loss: 0.59848\nEpoch: 00 [11724/25568 ( 46%)], Train Loss: 0.59777\nEpoch: 00 [11764/25568 ( 46%)], Train Loss: 0.59767\nEpoch: 00 [11804/25568 ( 46%)], Train Loss: 0.59687\nEpoch: 00 [11844/25568 ( 46%)], Train Loss: 0.59605\nEpoch: 00 [11884/25568 ( 46%)], Train Loss: 0.59519\nEpoch: 00 [11924/25568 ( 47%)], Train Loss: 0.59514\nEpoch: 00 [11964/25568 ( 47%)], Train Loss: 0.59446\nEpoch: 00 [12004/25568 ( 47%)], Train Loss: 0.59451\nEpoch: 00 [12044/25568 ( 47%)], Train Loss: 0.59397\nEpoch: 00 [12084/25568 ( 47%)], Train Loss: 0.59350\nEpoch: 00 [12124/25568 ( 47%)], Train Loss: 0.59330\nEpoch: 00 [12164/25568 ( 48%)], Train Loss: 0.59229\nEpoch: 00 [12204/25568 ( 48%)], Train Loss: 0.59220\nEpoch: 00 [12244/25568 ( 48%)], Train Loss: 0.59192\nEpoch: 00 [12284/25568 ( 48%)], Train Loss: 0.59138\nEpoch: 00 [12324/25568 ( 48%)], Train Loss: 0.59070\nEpoch: 00 [12364/25568 ( 48%)], Train Loss: 0.59000\nEpoch: 00 [12404/25568 ( 49%)], Train Loss: 0.58974\nEpoch: 00 [12444/25568 ( 49%)], Train Loss: 0.58953\nEpoch: 00 [12484/25568 ( 49%)], Train Loss: 0.58881\nEpoch: 00 [12524/25568 ( 49%)], Train Loss: 0.58814\nEpoch: 00 [12564/25568 ( 49%)], Train Loss: 0.58743\nEpoch: 00 [12604/25568 ( 49%)], Train Loss: 0.58631\nEpoch: 00 [12644/25568 ( 49%)], Train Loss: 0.58582\nEpoch: 00 [12684/25568 ( 50%)], Train Loss: 0.58512\nEpoch: 00 [12724/25568 ( 50%)], Train Loss: 0.58469\nEpoch: 00 [12764/25568 ( 50%)], Train Loss: 0.58461\nEpoch: 00 [12804/25568 ( 50%)], Train Loss: 0.58401\nEpoch: 00 [12844/25568 ( 50%)], Train Loss: 0.58366\nEpoch: 00 [12884/25568 ( 50%)], Train Loss: 0.58337\nEpoch: 00 [12924/25568 ( 51%)], Train Loss: 0.58276\nEpoch: 00 [12964/25568 ( 51%)], Train Loss: 0.58227\nEpoch: 00 [13004/25568 ( 51%)], Train Loss: 0.58194\nEpoch: 00 [13044/25568 ( 51%)], Train Loss: 0.58151\nEpoch: 00 [13084/25568 ( 51%)], Train Loss: 0.58082\nEpoch: 00 [13124/25568 ( 51%)], Train Loss: 0.58035\nEpoch: 00 [13164/25568 ( 51%)], Train Loss: 0.58047\nEpoch: 00 [13204/25568 ( 52%)], Train Loss: 0.57977\nEpoch: 00 [13244/25568 ( 52%)], Train Loss: 0.57888\nEpoch: 00 [13284/25568 ( 52%)], Train Loss: 0.57791\nEpoch: 00 [13324/25568 ( 52%)], Train Loss: 0.57715\nEpoch: 00 [13364/25568 ( 52%)], Train Loss: 0.57649\nEpoch: 00 [13404/25568 ( 52%)], Train Loss: 0.57561\nEpoch: 00 [13444/25568 ( 53%)], Train Loss: 0.57475\nEpoch: 00 [13484/25568 ( 53%)], Train Loss: 0.57497\nEpoch: 00 [13524/25568 ( 53%)], Train Loss: 0.57453\nEpoch: 00 [13564/25568 ( 53%)], Train Loss: 0.57401\nEpoch: 00 [13604/25568 ( 53%)], Train Loss: 0.57384\nEpoch: 00 [13644/25568 ( 53%)], Train Loss: 0.57295\nEpoch: 00 [13684/25568 ( 54%)], Train Loss: 0.57234\nEpoch: 00 [13724/25568 ( 54%)], Train Loss: 0.57235\nEpoch: 00 [13764/25568 ( 54%)], Train Loss: 0.57120\nEpoch: 00 [13804/25568 ( 54%)], Train Loss: 0.57136\nEpoch: 00 [13844/25568 ( 54%)], Train Loss: 0.57103\nEpoch: 00 [13884/25568 ( 54%)], Train Loss: 0.57032\nEpoch: 00 [13924/25568 ( 54%)], Train Loss: 0.56945\nEpoch: 00 [13964/25568 ( 55%)], Train Loss: 0.56892\nEpoch: 00 [14004/25568 ( 55%)], Train Loss: 0.56846\nEpoch: 00 [14044/25568 ( 55%)], Train Loss: 0.56786\nEpoch: 00 [14084/25568 ( 55%)], Train Loss: 0.56676\nEpoch: 00 [14124/25568 ( 55%)], Train Loss: 0.56709\nEpoch: 00 [14164/25568 ( 55%)], Train Loss: 0.56695\nEpoch: 00 [14204/25568 ( 56%)], Train Loss: 0.56658\nEpoch: 00 [14244/25568 ( 56%)], Train Loss: 0.56683\nEpoch: 00 [14284/25568 ( 56%)], Train Loss: 0.56671\nEpoch: 00 [14324/25568 ( 56%)], Train Loss: 0.56585\nEpoch: 00 [14364/25568 ( 56%)], Train Loss: 0.56507\nEpoch: 00 [14404/25568 ( 56%)], Train Loss: 0.56429\nEpoch: 00 [14444/25568 ( 56%)], Train Loss: 0.56429\nEpoch: 00 [14484/25568 ( 57%)], Train Loss: 0.56389\nEpoch: 00 [14524/25568 ( 57%)], Train Loss: 0.56336\nEpoch: 00 [14564/25568 ( 57%)], Train Loss: 0.56346\nEpoch: 00 [14604/25568 ( 57%)], Train Loss: 0.56343\nEpoch: 00 [14644/25568 ( 57%)], Train Loss: 0.56304\nEpoch: 00 [14684/25568 ( 57%)], Train Loss: 0.56311\nEpoch: 00 [14724/25568 ( 58%)], Train Loss: 0.56331\nEpoch: 00 [14764/25568 ( 58%)], Train Loss: 0.56224\nEpoch: 00 [14804/25568 ( 58%)], Train Loss: 0.56205\nEpoch: 00 [14844/25568 ( 58%)], Train Loss: 0.56189\nEpoch: 00 [14884/25568 ( 58%)], Train Loss: 0.56152\nEpoch: 00 [14924/25568 ( 58%)], Train Loss: 0.56092\nEpoch: 00 [14964/25568 ( 59%)], Train Loss: 0.56065\nEpoch: 00 [15004/25568 ( 59%)], Train Loss: 0.55986\nEpoch: 00 [15044/25568 ( 59%)], Train Loss: 0.55930\nEpoch: 00 [15084/25568 ( 59%)], Train Loss: 0.55855\nEpoch: 00 [15124/25568 ( 59%)], Train Loss: 0.55757\nEpoch: 00 [15164/25568 ( 59%)], Train Loss: 0.55762\nEpoch: 00 [15204/25568 ( 59%)], Train Loss: 0.55748\nEpoch: 00 [15244/25568 ( 60%)], Train Loss: 0.55666\nEpoch: 00 [15284/25568 ( 60%)], Train Loss: 0.55650\nEpoch: 00 [15324/25568 ( 60%)], Train Loss: 0.55662\nEpoch: 00 [15364/25568 ( 60%)], Train Loss: 0.55584\nEpoch: 00 [15404/25568 ( 60%)], Train Loss: 0.55600\nEpoch: 00 [15444/25568 ( 60%)], Train Loss: 0.55537\nEpoch: 00 [15484/25568 ( 61%)], Train Loss: 0.55505\nEpoch: 00 [15524/25568 ( 61%)], Train Loss: 0.55465\nEpoch: 00 [15564/25568 ( 61%)], Train Loss: 0.55397\nEpoch: 00 [15604/25568 ( 61%)], Train Loss: 0.55364\nEpoch: 00 [15644/25568 ( 61%)], Train Loss: 0.55322\nEpoch: 00 [15684/25568 ( 61%)], Train Loss: 0.55291\nEpoch: 00 [15724/25568 ( 61%)], Train Loss: 0.55191\nEpoch: 00 [15764/25568 ( 62%)], Train Loss: 0.55141\nEpoch: 00 [15804/25568 ( 62%)], Train Loss: 0.55082\nEpoch: 00 [15844/25568 ( 62%)], Train Loss: 0.55053\nEpoch: 00 [15884/25568 ( 62%)], Train Loss: 0.55025\nEpoch: 00 [15924/25568 ( 62%)], Train Loss: 0.54940\nEpoch: 00 [15964/25568 ( 62%)], Train Loss: 0.54880\nEpoch: 00 [16004/25568 ( 63%)], Train Loss: 0.54805\nEpoch: 00 [16044/25568 ( 63%)], Train Loss: 0.54805\nEpoch: 00 [16084/25568 ( 63%)], Train Loss: 0.54787\nEpoch: 00 [16124/25568 ( 63%)], Train Loss: 0.54754\nEpoch: 00 [16164/25568 ( 63%)], Train Loss: 0.54692\nEpoch: 00 [16204/25568 ( 63%)], Train Loss: 0.54662\nEpoch: 00 [16244/25568 ( 64%)], Train Loss: 0.54605\nEpoch: 00 [16284/25568 ( 64%)], Train Loss: 0.54629\nEpoch: 00 [16324/25568 ( 64%)], Train Loss: 0.54600\nEpoch: 00 [16364/25568 ( 64%)], Train Loss: 0.54568\nEpoch: 00 [16404/25568 ( 64%)], Train Loss: 0.54516\nEpoch: 00 [16444/25568 ( 64%)], Train Loss: 0.54450\nEpoch: 00 [16484/25568 ( 64%)], Train Loss: 0.54434\nEpoch: 00 [16524/25568 ( 65%)], Train Loss: 0.54351\nEpoch: 00 [16564/25568 ( 65%)], Train Loss: 0.54281\nEpoch: 00 [16604/25568 ( 65%)], Train Loss: 0.54299\nEpoch: 00 [16644/25568 ( 65%)], Train Loss: 0.54281\nEpoch: 00 [16684/25568 ( 65%)], Train Loss: 0.54255\nEpoch: 00 [16724/25568 ( 65%)], Train Loss: 0.54240\nEpoch: 00 [16764/25568 ( 66%)], Train Loss: 0.54165\nEpoch: 00 [16804/25568 ( 66%)], Train Loss: 0.54122\nEpoch: 00 [16844/25568 ( 66%)], Train Loss: 0.54077\nEpoch: 00 [16884/25568 ( 66%)], Train Loss: 0.54066\nEpoch: 00 [16924/25568 ( 66%)], Train Loss: 0.54049\nEpoch: 00 [16964/25568 ( 66%)], Train Loss: 0.53980\nEpoch: 00 [17004/25568 ( 67%)], Train Loss: 0.54038\nEpoch: 00 [17044/25568 ( 67%)], Train Loss: 0.54020\nEpoch: 00 [17084/25568 ( 67%)], Train Loss: 0.53999\nEpoch: 00 [17124/25568 ( 67%)], Train Loss: 0.53957\nEpoch: 00 [17164/25568 ( 67%)], Train Loss: 0.53925\nEpoch: 00 [17204/25568 ( 67%)], Train Loss: 0.53873\nEpoch: 00 [17244/25568 ( 67%)], Train Loss: 0.53858\nEpoch: 00 [17284/25568 ( 68%)], Train Loss: 0.53840\nEpoch: 00 [17324/25568 ( 68%)], Train Loss: 0.53808\nEpoch: 00 [17364/25568 ( 68%)], Train Loss: 0.53784\nEpoch: 00 [17404/25568 ( 68%)], Train Loss: 0.53752\nEpoch: 00 [17444/25568 ( 68%)], Train Loss: 0.53688\nEpoch: 00 [17484/25568 ( 68%)], Train Loss: 0.53604\nEpoch: 00 [17524/25568 ( 69%)], Train Loss: 0.53604\nEpoch: 00 [17564/25568 ( 69%)], Train Loss: 0.53584\nEpoch: 00 [17604/25568 ( 69%)], Train Loss: 0.53608\nEpoch: 00 [17644/25568 ( 69%)], Train Loss: 0.53607\nEpoch: 00 [17684/25568 ( 69%)], Train Loss: 0.53601\nEpoch: 00 [17724/25568 ( 69%)], Train Loss: 0.53579\nEpoch: 00 [17764/25568 ( 69%)], Train Loss: 0.53511\nEpoch: 00 [17804/25568 ( 70%)], Train Loss: 0.53499\nEpoch: 00 [17844/25568 ( 70%)], Train Loss: 0.53475\nEpoch: 00 [17884/25568 ( 70%)], Train Loss: 0.53427\nEpoch: 00 [17924/25568 ( 70%)], Train Loss: 0.53402\nEpoch: 00 [17964/25568 ( 70%)], Train Loss: 0.53405\nEpoch: 00 [18004/25568 ( 70%)], Train Loss: 0.53355\nEpoch: 00 [18044/25568 ( 71%)], Train Loss: 0.53344\nEpoch: 00 [18084/25568 ( 71%)], Train Loss: 0.53311\nEpoch: 00 [18124/25568 ( 71%)], Train Loss: 0.53251\nEpoch: 00 [18164/25568 ( 71%)], Train Loss: 0.53276\nEpoch: 00 [18204/25568 ( 71%)], Train Loss: 0.53219\nEpoch: 00 [18244/25568 ( 71%)], Train Loss: 0.53186\nEpoch: 00 [18284/25568 ( 72%)], Train Loss: 0.53137\nEpoch: 00 [18324/25568 ( 72%)], Train Loss: 0.53130\nEpoch: 00 [18364/25568 ( 72%)], Train Loss: 0.53141\nEpoch: 00 [18404/25568 ( 72%)], Train Loss: 0.53126\nEpoch: 00 [18444/25568 ( 72%)], Train Loss: 0.53080\nEpoch: 00 [18484/25568 ( 72%)], Train Loss: 0.53078\nEpoch: 00 [18524/25568 ( 72%)], Train Loss: 0.53048\nEpoch: 00 [18564/25568 ( 73%)], Train Loss: 0.52993\nEpoch: 00 [18604/25568 ( 73%)], Train Loss: 0.52948\nEpoch: 00 [18644/25568 ( 73%)], Train Loss: 0.52910\nEpoch: 00 [18684/25568 ( 73%)], Train Loss: 0.52891\nEpoch: 00 [18724/25568 ( 73%)], Train Loss: 0.52868\nEpoch: 00 [18764/25568 ( 73%)], Train Loss: 0.52836\nEpoch: 00 [18804/25568 ( 74%)], Train Loss: 0.52816\nEpoch: 00 [18844/25568 ( 74%)], Train Loss: 0.52798\nEpoch: 00 [18884/25568 ( 74%)], Train Loss: 0.52757\nEpoch: 00 [18924/25568 ( 74%)], Train Loss: 0.52757\nEpoch: 00 [18964/25568 ( 74%)], Train Loss: 0.52705\nEpoch: 00 [19004/25568 ( 74%)], Train Loss: 0.52685\nEpoch: 00 [19044/25568 ( 74%)], Train Loss: 0.52677\nEpoch: 00 [19084/25568 ( 75%)], Train Loss: 0.52660\nEpoch: 00 [19124/25568 ( 75%)], Train Loss: 0.52626\nEpoch: 00 [19164/25568 ( 75%)], Train Loss: 0.52606\nEpoch: 00 [19204/25568 ( 75%)], Train Loss: 0.52595\nEpoch: 00 [19244/25568 ( 75%)], Train Loss: 0.52551\nEpoch: 00 [19284/25568 ( 75%)], Train Loss: 0.52540\nEpoch: 00 [19324/25568 ( 76%)], Train Loss: 0.52520\nEpoch: 00 [19364/25568 ( 76%)], Train Loss: 0.52445\nEpoch: 00 [19404/25568 ( 76%)], Train Loss: 0.52435\nEpoch: 00 [19444/25568 ( 76%)], Train Loss: 0.52398\nEpoch: 00 [19484/25568 ( 76%)], Train Loss: 0.52382\nEpoch: 00 [19524/25568 ( 76%)], Train Loss: 0.52357\nEpoch: 00 [19564/25568 ( 77%)], Train Loss: 0.52309\nEpoch: 00 [19604/25568 ( 77%)], Train Loss: 0.52264\nEpoch: 00 [19644/25568 ( 77%)], Train Loss: 0.52241\nEpoch: 00 [19684/25568 ( 77%)], Train Loss: 0.52195\nEpoch: 00 [19724/25568 ( 77%)], Train Loss: 0.52187\nEpoch: 00 [19764/25568 ( 77%)], Train Loss: 0.52162\nEpoch: 00 [19804/25568 ( 77%)], Train Loss: 0.52116\nEpoch: 00 [19844/25568 ( 78%)], Train Loss: 0.52117\nEpoch: 00 [19884/25568 ( 78%)], Train Loss: 0.52128\nEpoch: 00 [19924/25568 ( 78%)], Train Loss: 0.52148\nEpoch: 00 [19964/25568 ( 78%)], Train Loss: 0.52133\nEpoch: 00 [20004/25568 ( 78%)], Train Loss: 0.52088\nEpoch: 00 [20044/25568 ( 78%)], Train Loss: 0.52058\nEpoch: 00 [20084/25568 ( 79%)], Train Loss: 0.52058\nEpoch: 00 [20124/25568 ( 79%)], Train Loss: 0.52030\nEpoch: 00 [20164/25568 ( 79%)], Train Loss: 0.52006\nEpoch: 00 [20204/25568 ( 79%)], Train Loss: 0.51975\nEpoch: 00 [20244/25568 ( 79%)], Train Loss: 0.51944\nEpoch: 00 [20284/25568 ( 79%)], Train Loss: 0.51932\nEpoch: 00 [20324/25568 ( 79%)], Train Loss: 0.51882\nEpoch: 00 [20364/25568 ( 80%)], Train Loss: 0.51858\nEpoch: 00 [20404/25568 ( 80%)], Train Loss: 0.51838\nEpoch: 00 [20444/25568 ( 80%)], Train Loss: 0.51798\nEpoch: 00 [20484/25568 ( 80%)], Train Loss: 0.51760\nEpoch: 00 [20524/25568 ( 80%)], Train Loss: 0.51708\nEpoch: 00 [20564/25568 ( 80%)], Train Loss: 0.51667\nEpoch: 00 [20604/25568 ( 81%)], Train Loss: 0.51646\nEpoch: 00 [20644/25568 ( 81%)], Train Loss: 0.51632\nEpoch: 00 [20684/25568 ( 81%)], Train Loss: 0.51631\nEpoch: 00 [20724/25568 ( 81%)], Train Loss: 0.51577\nEpoch: 00 [20764/25568 ( 81%)], Train Loss: 0.51560\nEpoch: 00 [20804/25568 ( 81%)], Train Loss: 0.51517\nEpoch: 00 [20844/25568 ( 82%)], Train Loss: 0.51527\nEpoch: 00 [20884/25568 ( 82%)], Train Loss: 0.51495\nEpoch: 00 [20924/25568 ( 82%)], Train Loss: 0.51435\nEpoch: 00 [20964/25568 ( 82%)], Train Loss: 0.51413\nEpoch: 00 [21004/25568 ( 82%)], Train Loss: 0.51356\nEpoch: 00 [21044/25568 ( 82%)], Train Loss: 0.51317\nEpoch: 00 [21084/25568 ( 82%)], Train Loss: 0.51268\nEpoch: 00 [21124/25568 ( 83%)], Train Loss: 0.51232\nEpoch: 00 [21164/25568 ( 83%)], Train Loss: 0.51177\nEpoch: 00 [21204/25568 ( 83%)], Train Loss: 0.51147\nEpoch: 00 [21244/25568 ( 83%)], Train Loss: 0.51144\nEpoch: 00 [21284/25568 ( 83%)], Train Loss: 0.51122\nEpoch: 00 [21324/25568 ( 83%)], Train Loss: 0.51095\nEpoch: 00 [21364/25568 ( 84%)], Train Loss: 0.51058\nEpoch: 00 [21404/25568 ( 84%)], Train Loss: 0.50987\nEpoch: 00 [21444/25568 ( 84%)], Train Loss: 0.50945\nEpoch: 00 [21484/25568 ( 84%)], Train Loss: 0.50925\nEpoch: 00 [21524/25568 ( 84%)], Train Loss: 0.50895\nEpoch: 00 [21564/25568 ( 84%)], Train Loss: 0.50878\nEpoch: 00 [21604/25568 ( 84%)], Train Loss: 0.50841\nEpoch: 00 [21644/25568 ( 85%)], Train Loss: 0.50788\nEpoch: 00 [21684/25568 ( 85%)], Train Loss: 0.50737\nEpoch: 00 [21724/25568 ( 85%)], Train Loss: 0.50694\nEpoch: 00 [21764/25568 ( 85%)], Train Loss: 0.50668\nEpoch: 00 [21804/25568 ( 85%)], Train Loss: 0.50618\nEpoch: 00 [21844/25568 ( 85%)], Train Loss: 0.50583\nEpoch: 00 [21884/25568 ( 86%)], Train Loss: 0.50599\nEpoch: 00 [21924/25568 ( 86%)], Train Loss: 0.50579\nEpoch: 00 [21964/25568 ( 86%)], Train Loss: 0.50558\nEpoch: 00 [22004/25568 ( 86%)], Train Loss: 0.50568\nEpoch: 00 [22044/25568 ( 86%)], Train Loss: 0.50534\nEpoch: 00 [22084/25568 ( 86%)], Train Loss: 0.50519\nEpoch: 00 [22124/25568 ( 87%)], Train Loss: 0.50482\nEpoch: 00 [22164/25568 ( 87%)], Train Loss: 0.50460\nEpoch: 00 [22204/25568 ( 87%)], Train Loss: 0.50431\nEpoch: 00 [22244/25568 ( 87%)], Train Loss: 0.50419\nEpoch: 00 [22284/25568 ( 87%)], Train Loss: 0.50399\nEpoch: 00 [22324/25568 ( 87%)], Train Loss: 0.50415\nEpoch: 00 [22364/25568 ( 87%)], Train Loss: 0.50359\nEpoch: 00 [22404/25568 ( 88%)], Train Loss: 0.50327\nEpoch: 00 [22444/25568 ( 88%)], Train Loss: 0.50296\nEpoch: 00 [22484/25568 ( 88%)], Train Loss: 0.50265\nEpoch: 00 [22524/25568 ( 88%)], Train Loss: 0.50262\nEpoch: 00 [22564/25568 ( 88%)], Train Loss: 0.50238\nEpoch: 00 [22604/25568 ( 88%)], Train Loss: 0.50258\nEpoch: 00 [22644/25568 ( 89%)], Train Loss: 0.50226\nEpoch: 00 [22684/25568 ( 89%)], Train Loss: 0.50180\nEpoch: 00 [22724/25568 ( 89%)], Train Loss: 0.50147\nEpoch: 00 [22764/25568 ( 89%)], Train Loss: 0.50162\nEpoch: 00 [22804/25568 ( 89%)], Train Loss: 0.50125\nEpoch: 00 [22844/25568 ( 89%)], Train Loss: 0.50093\nEpoch: 00 [22884/25568 ( 90%)], Train Loss: 0.50053\nEpoch: 00 [22924/25568 ( 90%)], Train Loss: 0.50037\nEpoch: 00 [22964/25568 ( 90%)], Train Loss: 0.50062\nEpoch: 00 [23004/25568 ( 90%)], Train Loss: 0.50029\nEpoch: 00 [23044/25568 ( 90%)], Train Loss: 0.49987\nEpoch: 00 [23084/25568 ( 90%)], Train Loss: 0.49953\nEpoch: 00 [23124/25568 ( 90%)], Train Loss: 0.49909\nEpoch: 00 [23164/25568 ( 91%)], Train Loss: 0.49897\nEpoch: 00 [23204/25568 ( 91%)], Train Loss: 0.49864\nEpoch: 00 [23244/25568 ( 91%)], Train Loss: 0.49841\nEpoch: 00 [23284/25568 ( 91%)], Train Loss: 0.49814\nEpoch: 00 [23324/25568 ( 91%)], Train Loss: 0.49784\nEpoch: 00 [23364/25568 ( 91%)], Train Loss: 0.49771\nEpoch: 00 [23404/25568 ( 92%)], Train Loss: 0.49742\nEpoch: 00 [23444/25568 ( 92%)], Train Loss: 0.49720\nEpoch: 00 [23484/25568 ( 92%)], Train Loss: 0.49695\nEpoch: 00 [23524/25568 ( 92%)], Train Loss: 0.49650\nEpoch: 00 [23564/25568 ( 92%)], Train Loss: 0.49624\nEpoch: 00 [23604/25568 ( 92%)], Train Loss: 0.49568\nEpoch: 00 [23644/25568 ( 92%)], Train Loss: 0.49522\nEpoch: 00 [23684/25568 ( 93%)], Train Loss: 0.49536\nEpoch: 00 [23724/25568 ( 93%)], Train Loss: 0.49491\nEpoch: 00 [23764/25568 ( 93%)], Train Loss: 0.49464\nEpoch: 00 [23804/25568 ( 93%)], Train Loss: 0.49455\nEpoch: 00 [23844/25568 ( 93%)], Train Loss: 0.49439\nEpoch: 00 [23884/25568 ( 93%)], Train Loss: 0.49386\nEpoch: 00 [23924/25568 ( 94%)], Train Loss: 0.49347\nEpoch: 00 [23964/25568 ( 94%)], Train Loss: 0.49317\nEpoch: 00 [24004/25568 ( 94%)], Train Loss: 0.49276\nEpoch: 00 [24044/25568 ( 94%)], Train Loss: 0.49256\nEpoch: 00 [24084/25568 ( 94%)], Train Loss: 0.49223\nEpoch: 00 [24124/25568 ( 94%)], Train Loss: 0.49190\nEpoch: 00 [24164/25568 ( 95%)], Train Loss: 0.49161\nEpoch: 00 [24204/25568 ( 95%)], Train Loss: 0.49120\nEpoch: 00 [24244/25568 ( 95%)], Train Loss: 0.49086\nEpoch: 00 [24284/25568 ( 95%)], Train Loss: 0.49075\nEpoch: 00 [24324/25568 ( 95%)], Train Loss: 0.49046\nEpoch: 00 [24364/25568 ( 95%)], Train Loss: 0.49024\nEpoch: 00 [24404/25568 ( 95%)], Train Loss: 0.49006\nEpoch: 00 [24444/25568 ( 96%)], Train Loss: 0.48999\nEpoch: 00 [24484/25568 ( 96%)], Train Loss: 0.48998\nEpoch: 00 [24524/25568 ( 96%)], Train Loss: 0.48964\nEpoch: 00 [24564/25568 ( 96%)], Train Loss: 0.48929\nEpoch: 00 [24604/25568 ( 96%)], Train Loss: 0.48914\nEpoch: 00 [24644/25568 ( 96%)], Train Loss: 0.48877\nEpoch: 00 [24684/25568 ( 97%)], Train Loss: 0.48858\nEpoch: 00 [24724/25568 ( 97%)], Train Loss: 0.48847\nEpoch: 00 [24764/25568 ( 97%)], Train Loss: 0.48827\nEpoch: 00 [24804/25568 ( 97%)], Train Loss: 0.48835\nEpoch: 00 [24844/25568 ( 97%)], Train Loss: 0.48812\nEpoch: 00 [24884/25568 ( 97%)], Train Loss: 0.48782\nEpoch: 00 [24924/25568 ( 97%)], Train Loss: 0.48742\nEpoch: 00 [24964/25568 ( 98%)], Train Loss: 0.48749\nEpoch: 00 [25004/25568 ( 98%)], Train Loss: 0.48763\nEpoch: 00 [25044/25568 ( 98%)], Train Loss: 0.48741\nEpoch: 00 [25084/25568 ( 98%)], Train Loss: 0.48744\nEpoch: 00 [25124/25568 ( 98%)], Train Loss: 0.48732\nEpoch: 00 [25164/25568 ( 98%)], Train Loss: 0.48700\nEpoch: 00 [25204/25568 ( 99%)], Train Loss: 0.48666\nEpoch: 00 [25244/25568 ( 99%)], Train Loss: 0.48639\nEpoch: 00 [25284/25568 ( 99%)], Train Loss: 0.48594\nEpoch: 00 [25324/25568 ( 99%)], Train Loss: 0.48551\nEpoch: 00 [25364/25568 ( 99%)], Train Loss: 0.48536\nEpoch: 00 [25404/25568 ( 99%)], Train Loss: 0.48531\nEpoch: 00 [25444/25568 (100%)], Train Loss: 0.48514\nEpoch: 00 [25484/25568 (100%)], Train Loss: 0.48518\nEpoch: 00 [25524/25568 (100%)], Train Loss: 0.48487\nEpoch: 00 [25564/25568 (100%)], Train Loss: 0.48476\nEpoch: 00 [25568/25568 (100%)], Train Loss: 0.48478\n----Validation Results Summary----\nEpoch: [0] Valid Loss: 0.22463\nPost-processing 222 example predictions split into 2932 features.\nvalid jaccard: 0.6816324441324442\n0 Epoch, Best epoch was updated! Valid Loss: 0.22463\nSaving model checkpoint to output/checkpoint-fold-4-epoch-0.\n\nTotal Training Time: 3764.234701395035secs, Average Training Time per Epoch: 3764.234701395035secs.\nTotal Validation Time: 140.0582571029663secs, Average Validation Time per Epoch: 140.0582571029663secs.\n**************************************************\nFinal jacard scores, 5-fold: [0.67525 0.70943 0.68576 0.67927 0.68163]\nAverage jacard: 0.6862684912684913\n**************************************************\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
cb75089208109c4b10d541c7b1feb5fa1ace19da | 662,914 | ipynb | Jupyter Notebook | notebooks/MatplotLib lessons 2.ipynb | TolaAbiodun/2020-Pandas-tutorial_notes | dcea061fd0117fb8b56fb2470ecdef9d0b40f734 | [
"MIT"
]
| 1 | 2021-11-20T08:48:10.000Z | 2021-11-20T08:48:10.000Z | notebooks/MatplotLib lessons 2.ipynb | TolaAbiodun/2020-Pandas-tutorial_notes | dcea061fd0117fb8b56fb2470ecdef9d0b40f734 | [
"MIT"
]
| null | null | null | notebooks/MatplotLib lessons 2.ipynb | TolaAbiodun/2020-Pandas-tutorial_notes | dcea061fd0117fb8b56fb2470ecdef9d0b40f734 | [
"MIT"
]
| 3 | 2021-05-05T18:01:55.000Z | 2021-11-07T09:24:57.000Z | 662.914 | 135,328 | 0.948274 | [
[
[
"import matplotlib.pyplot as plt\nfrom matplotlib import style\nimport numpy as np\n%matplotlib inline",
"_____no_output_____"
],
[
"style.use('ggplot')",
"_____no_output_____"
],
[
"x = [20,30,50]\ny = [ 10,50,13]\n\nx2 = [4,10,47,]\ny2= [56,4,30]\n\nplt.plot(x, y, 'r', label='line one', linewidth=5)\nplt.plot(x2, y2, 'c', label ='line two', linewidth=5)\n\nplt.title('Interactive plot')\nplt.xlabel('X axis')\nplt.ylabel('Y axis')\n\nplt.legend()\n\n#plt.grid(True, color='k')\n\nplt.show()\n",
"_____no_output_____"
],
[
"#BAR GRAPH\n\nplt.bar([1,4,5,3,2],[4,7,8,10,11], label='Type 1')\nplt.bar([9,7,6,8,10],[3,6,9,11,15], label = 'Type 2', color='k')\nplt.legend()\nplt.xlabel('Bar Number')\nplt.ylabel('Bar Height')\nplt.title('Bar Graph')\nplt.show()",
"_____no_output_____"
]
],
[
[
"HISTOGRAM",
"_____no_output_____"
]
],
[
[
"#Bar plots have cateogrical variables while histogram has quantitative variables\n\npopulation_ages = [22,34,45,78,23,65,47,98,70,56,54,87,23,54,31,35,\n 64,76,87,80,60,73,47,63,79,52,75,64,51,46,83,62,36,74,63]\n\nfrom numpy.random import seed\nfrom numpy.random import randint\n\nseed(1)\n#generate some random integers\npopulation_ages_2 = randint(10,50,40)\n#print(population_ages_2)\n \n\nbins = [20,30,40,50,60,70,80,90,100]\n\nplt.hist(population_ages, bins, histtype='bar', color = 'm', rwidth = 0.5)\nplt.hist(population_ages_2, bins, histtype='bar', color = 'c', rwidth = 0.5)\n\nplt.xlabel('X asis')\nplt.ylabel('Y axis')\n\nplt.title('Histogram')\nplt.legend()\nplt.show()",
"No handles with labels found to put in legend.\n"
]
],
[
[
"AREA PLOT AND STACK PLOT",
"_____no_output_____"
]
],
[
[
"days = randint(1,5,5) \n\nseed(0)\n\nsleeping = randint(10,30,5)\neating = randint(40,60,5)\nworking = randint(70,100,5)\nplaying = randint(100,150,5)\n\nplt.plot([],[], color = 'm', label = 'sleeping', linewidth = 5)\nplt.plot([],[], color = 'c', label = 'eating', linewidth = 5)\nplt.plot([],[], color = 'r', label = 'working', linewidth = 5)\nplt.plot([],[], color = 'k', label = 'playing', linewidth = 5)\n\nplt.stackplot(days, sleeping, eating, working, playing, colors = ['m','c','r','k'])\nplt.legend()",
"_____no_output_____"
]
],
[
[
"PIE CHART",
"_____no_output_____"
]
],
[
[
"seed(0)\nslices = randint(20,100,5)\n\nactivities = ['balling','playing','sleeping','praying','eating']\n\ncols = ['c','m','r','b','y']\n\nplt.pie(slices,\n labels = activities,\n startangle = 90,\n shadow = True,\n colors = cols,\n autopct = '%.1f%%', #formats the percentage of the data given\n explode=(0,0.2,0,0,0.1)) #this is to explode the chart and takes positional argument\n\nplt.title('Pie Chart')\nplt.show()",
"_____no_output_____"
],
[
"#working with Multiple Plots\n\ndef f(t):\n return np.exp(-t) * np.cos(2*np.pi*t)\n\nt1 = np.arange(0.0,5.0,0.1)\nt2 = np.arange(0.0,6.0,0.4)\n\nplt.subplot(211)\nplt.plot(t1, f(t1),'bo',\n t2, f(t2))\n\nplt.subplot(212)\nplt.plot(t1, np.cos(2*np.pi*t1), color = 'k')\n\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"FURTHER PLOTTING IN MATPLOTLIB/PYLAB",
"_____no_output_____"
]
],
[
[
"from matplotlib import pylab\npylab.__version__",
"_____no_output_____"
],
[
"import numpy as np\nx = np.linspace(0,10,25)\ny = x*x+2\nprint()\nprint(x)\nprint()\nprint(y)\n#print(np.array([x,y]).reshape(25,2)) # to join the array together",
"\n[ 0. 0.41666667 0.83333333 1.25 1.66666667 2.08333333\n 2.5 2.91666667 3.33333333 3.75 4.16666667 4.58333333\n 5. 5.41666667 5.83333333 6.25 6.66666667 7.08333333\n 7.5 7.91666667 8.33333333 8.75 9.16666667 9.58333333\n 10. ]\n\n[ 2. 2.17361111 2.69444444 3.5625 4.77777778\n 6.34027778 8.25 10.50694444 13.11111111 16.0625\n 19.36111111 23.00694444 27. 31.34027778 36.02777778\n 41.0625 46.44444444 52.17361111 58.25 64.67361111\n 71.44444444 78.5625 86.02777778 93.84027778 102. ]\n"
],
[
"pylab.plot(x,y, 'r') #'r' stands for red",
"_____no_output_____"
],
[
"#drawing a subgraph\npylab.subplot(1,2,1) #rows, columns and indexes\npylab.plot(x,y, 'b--') \n\npylab.subplot(1,2,2)\npylab.plot(y,x, 'g*-')",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nfrom matplotlib import style\nstyle.use('ggplot')\n",
"_____no_output_____"
],
[
"fig = plt.figure()\n\nax = fig.add_axes([0.5,0.1,0.8,0.8]) #this controls the left,bottom,width and height of the canvas\n\nax.plot(x,y, 'r')",
"_____no_output_____"
],
[
"#we can also draw subgraphs\n\nfig, ax = plt.subplots(nrows=1, ncols=2)\nfor ax in ax:\n ax.plot(x,y, 'r')",
"_____no_output_____"
],
[
"#we can drw a picture or a graph inside of another graph\nfig = plt.figure()\nax1 = fig.add_axes([0.5,0.1,0.8,0.8]) #Big axes\nax2 = fig.add_axes([0.6,0.5,0.35,0.3]) #small canvas\n\nax1.plot(x,y,'r')\nax2.plot(y,x, 'g')\n",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(dpi=100)\nax.set_xlabel('X-axis')\nax.set_ylabel('Y-axis')\nax.set_title('tutorial plots')\n\n#ax.plot(x,y, 'r')\n\nax.plot(x,x**2)\nax.plot(x, x**3)\n\n\n#ax.legend(['label 1', 'label 2'])\nax.legend(['y = x**2', 'y = x**3'], loc=2) #plotting the legend",
"_____no_output_____"
],
[
"#you can also set other properties such as line color, transparency and more\n\nfig, ax = plt.subplots(dpi=100)\nax.plot(x, x**2, 'r', alpha=0.5) #alpha sets the line colour transparency\nax.plot(x, x+2, alpha=.5)\nax.plot(x, x+3, alpha=.5)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(dpi=100)\n#line width\nax.plot(x, x+1, 'b', lw=0.5 ) \nax.plot(x, x+2, 'b', lw=1.5)\nax.plot(x, x+3, 'b', lw=3)\nax.plot(x, x+4, 'b', lw=3.5)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(dpi=100)\nax.plot(x, x+1, 'b', lw=0.5, linestyle='-') \nax.plot(x, x+2, 'b', lw=1.5, linestyle='-.')\nax.plot(x, x+3, 'b', lw=3, linestyle=':')\nax.plot(x, x+4, 'b', lw=3.5, linestyle='-')",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(dpi=100)\nax.plot(x, x+1, 'b', lw=0.5 , marker='o', markersize=5, markerfacecolor='r') \nax.plot(x, x+2, 'b', lw=1.5, marker='+')\nax.plot(x, x+3, 'b', lw=3, marker='s')\nax.plot(x, x+4, 'b', lw=3.5, marker='1', markersize=10)",
"_____no_output_____"
]
],
[
[
"LIMITING OUR DATA",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(1,2, figsize=(10,5))\nax[0].plot(x,x**2, x,x**3, lw=3)\n#ax[0].grid(True) this applies if we are not using ggplot\nax[1].plot(x,x**2, x,x**3, lw=3)\n\n#we set the x and y limit on the second plot\nax[1].set_ylim([0,60])\nax[1].set_xlim([2,5])",
"_____no_output_____"
]
],
[
[
"Other 2_d Graphs",
"_____no_output_____"
]
],
[
[
"n = np.array([0,1,2,3,4,5])\n\nfig, ax = plt.subplots(1,4, figsize=(16,5)) \nax[0].set_title('scatter')\nax[0].scatter(x, x + 0.25*np.random.randn(len(x)))\n\nax[1].set_title('step plot')\nax[1].step(n, n**2, lw=2, color='b')\n\nax[2].set_title('Bar')\nax[2].bar(n, n**2, align='center', color ='g', alpha=0.5)\n\nax[3].set_title('fill between')\nax[3].fill_between(x, x**2, x**3, color ='g', alpha=0.5)\n\nplt.show()",
"_____no_output_____"
],
[
"#Draw a Histogram '''Very important''\n\nx = np.random.randn(10000)\nfig, ax = plt.subplots(1,2, figsize=(12,4))\nax[0].set_title('Histogram')\nax[0].hist(x, color='g', alpha=0.8)\n\nax[1].set_title('Cumulative detailed histogram')\nax[1].hist(x, cumulative=True, bins=9)\n\nplt.show()",
"_____no_output_____"
],
[
"#draw a contour map\n\n#lets create some data where X and Y are coordinates and Z is the depth or height\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.cm as cm\n\ndelta = 0.0075\nx = np.arange(-3, 3, delta)\ny = np.arange(-2, 2, delta)\n\nX, Y = np.meshgrid(x,y)\nZ1 = np.exp(-X**2 - Y**2)\nZ2 = np.exp(-(-X - 1)**2 - (Y - 1)**2)\nZ = (Z1 - Z2)*2",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(dpi=100)\nCS = ax.contour(X,Y,Z) #CS is contour surface\nax.clabel(CS, inline=1, fontsize=10)\nax.set_title('Contour Map')",
"_____no_output_____"
]
],
[
[
"3 D MAPS",
"_____no_output_____"
]
],
[
[
"from mpl_toolkits.mplot3d.axes3d import Axes3D\n\nfig = plt.figure(figsize=(14,6), dpi=100)\n\n#Specify the 3D graphics to draw with projection='3d'\n\nax = fig.add_subplot(1,2,1, projection='3d')\nax.plot_surface(X, Y, Z, rstride=10, cstride=10, lw=0, color='c')\n\n\n",
"_____no_output_____"
],
[
"#write a program to create a pie chart of the popularity of programming languages\n\npopularity = [200,334,890,290,679,300,980] #No of users of programming languages\n\nprog_lang = ['Java', 'C#', 'C++', 'CSS', 'Java Script', 'Python', 'R']\n\nfig = plt.figure(figsize=(14,6), dpi=100)\n\nplt.pie(popularity,\n shadow = True, \n autopct= '%.f%%', startangle = 180,\n explode=[0,0,0,0,0,0,0.1],\n labels = prog_lang)\n\nplt.title('Popularity of Programming languages')\n\nplt.show()\n\n\n\n",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
cb75089967f100eaa49bf5a2cb637d8b03550bed | 3,677 | ipynb | Jupyter Notebook | Poll.ipynb | motorcitydre/Python-Homework | 522aa71bc189f5d37447ea927baf6861237bcb9a | [
"MIT"
]
| null | null | null | Poll.ipynb | motorcitydre/Python-Homework | 522aa71bc189f5d37447ea927baf6861237bcb9a | [
"MIT"
]
| null | null | null | Poll.ipynb | motorcitydre/Python-Homework | 522aa71bc189f5d37447ea927baf6861237bcb9a | [
"MIT"
]
| null | null | null | 24.677852 | 171 | 0.492521 | [
[
[
"#import items\nimport os\nimport csv",
"_____no_output_____"
],
[
"#Load files\ncsvpath = os.path.join(\"Resources\", \"election_data.csv\")\npathout = os.path.join(\"Resources\", \"Election Analysis\")",
"_____no_output_____"
],
[
"# Read the csv and convert it into a list of dictionaries\nwith open(file_to_load) as emp_data:\n reader = csv.DictReader(emp_data)",
"_____no_output_____"
],
[
"#Variables I need to use\nvotes = 0\nwinner_votes = 0\ntotal_candidates = 0\ngreatest_votes = [\"\", o]\ncandidate_options = []\ncandidate_votes{}",
"_____no_output_____"
],
[
"#read the data\nwith open(csvpath) as election_data:\n reader = csv.DictReader(election_data)",
"_____no_output_____"
],
[
"#Create loop to do work within to find the finish products\n for row in reader:\n votes = votes + 1\n total_candidates = row[\"Candidate\"]\n\n if row[\"Candiate not in candiate_options\"]:\n candidate_options.append(row[\"Candidate\"])\n candidate_votes[row[\"Candidate\"]] = 1\n\n else \n\n candidate_votes[row[\"Candidate\"]] = candidates_votes[row[\"Candidate\"]] + 1",
"_____no_output_____"
],
[
"#print information\n print()\n print()\n print()\n print(\"Election Results\")\n print(\"-------------------------\")\n print(\"Total Votes \" + str(votes))\n print(\"-------------------------\")",
"_____no_output_____"
],
[
"#results\n for candidate in candidate_votes:\n print(candidate + \" \" + str(round(((candidate_votes[candidate]/votes)*100))) + \"%\" + \" (\" + str(candidate_votes[candidate]) + \")\") \n candidate_results = (candidate + \" \" + str(round(((candidate_votes[candidate]/votes)*100))) + \"%\" + \" (\" + str(candidate_votes[candidate]) + \")\") \n \ncandidate_votes\n\nwinner = sorted(candidate_votes.items(), key=itemgetter(1), reverse=True)",
"_____no_output_____"
],
[
"#results\nprint(\"-------------------------\")\nprint(\"Winner: \" + str(winner[0]))\nprint(\"-------------------------\")",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb7526ee50a047b003e081d45f34c0fa8463c6ec | 9,566 | ipynb | Jupyter Notebook | novice/sql/04-calc.ipynb | UCL/2014-11-05-UCL | 0e9ae92e266f205ee1610d14d0e257b6f066803a | [
"CC-BY-3.0"
]
| null | null | null | novice/sql/04-calc.ipynb | UCL/2014-11-05-UCL | 0e9ae92e266f205ee1610d14d0e257b6f066803a | [
"CC-BY-3.0"
]
| null | null | null | novice/sql/04-calc.ipynb | UCL/2014-11-05-UCL | 0e9ae92e266f205ee1610d14d0e257b6f066803a | [
"CC-BY-3.0"
]
| null | null | null | 29.433846 | 151 | 0.494355 | [
[
[
"empty"
]
]
]
| [
"empty"
]
| [
[
"empty"
]
]
|
cb75465cd897ead81ddb05dc1b9d5d0232ba7ca7 | 215,996 | ipynb | Jupyter Notebook | Models/Neural Networks/Hourly_RNN_win_2.ipynb | juweins/Time-Series-Forecasting-Using-Recurrent-Neural-Networks | f2f689188b4339a206b7cfe6578e81619462f171 | [
"MIT"
]
| null | null | null | Models/Neural Networks/Hourly_RNN_win_2.ipynb | juweins/Time-Series-Forecasting-Using-Recurrent-Neural-Networks | f2f689188b4339a206b7cfe6578e81619462f171 | [
"MIT"
]
| null | null | null | Models/Neural Networks/Hourly_RNN_win_2.ipynb | juweins/Time-Series-Forecasting-Using-Recurrent-Neural-Networks | f2f689188b4339a206b7cfe6578e81619462f171 | [
"MIT"
]
| null | null | null | 226.648478 | 75,580 | 0.903753 | [
[
[
"# Gas Price Prediction with Recurrent Neural Networks (Hourly, Window 2)\n\nThis notebook contains the generic RNN model used in the thesis project. The experiment includes two extracted datasets of a predefined gas station. The first dataset contains the daily maximum prices, while the other contains data of hourly granularity. The datasets are tested with basic recurrent, LSTM and GRU neurons. Extensive grid-search has been performed to build a set of different models. The following excerpt displays the different settings. <br> <br> \n\n**Excerpt of experiment settings (Thesis):**\n> [...] For this reason, the **hidden layer size (4, 8 or 12 neurons)** and the window size were modifed in a systematic way. Modifying the number of hidden neurons helps in determining a suitable architecture of the network. Different **window sizes (1, 2, 3, 7)** represent the number of input neurons. Furthermore, the various window sizes should test the dataset for different long or short-term dependencies. [...] The number of **training iterations** were **50, 100 and 200** for the daily dataset and **10, 25, 50** for the hourly dataset.\n\n<br> \nMain parts of this notebook are adapted from **[2]**.",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sklearn.metrics as metrics\nfrom sklearn.preprocessing import StandardScaler\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"# Model Building",
"_____no_output_____"
],
[
"## 1. Data Loading\n\nIn the following example, a preprocessed gas price dataset is used. The data contains the hourly prices in the period from 06-2014 - 05-2016. The dataset has been extracted from **[1]**.",
"_____no_output_____"
]
],
[
[
"gas_price = pd.read_csv(\"../Data/First_station_hour.csv\", sep=\";\")",
"_____no_output_____"
],
[
"gas_price.head()",
"_____no_output_____"
],
[
"data_to_use = gas_price['e10'].values",
"_____no_output_____"
]
],
[
[
"## 2. Data Preprocessing\nAt this step, the input data gets scaled. Scaling supports model training. The following figure shows the scaled data.\nIn addtition to that, the dataset gets windowed. With sliding windows, squences of different length can be fed to the network for network tuning. Predictions are made based on theses sequences. By appling windowing, the original dataset gets shorten by a few observations.",
"_____no_output_____"
],
[
"#### Step 2.1: Data Scaling",
"_____no_output_____"
]
],
[
[
"scaler = StandardScaler()",
"_____no_output_____"
],
[
"scaled_data = scaler.fit_transform(data_to_use.reshape(-1, 1))",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,7), frameon=False, edgecolor='blue')\nplt.title('Scaled gas prices from July 2014 to May 2016')\nplt.xlabel('Hours')\nplt.ylabel('Scaled prices')\nplt.plot(scaled_data, label='Price data (hourly)')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Step 2.2: Windowing the dataset\nIn order to modify the widow size, the window_size parameter must be changed here **and** in the hyperparameter section. (see Step 3)",
"_____no_output_____"
]
],
[
[
"def window_data(data, window_size):\n \n X = []\n y = []\n \n i = 0\n while (i + window_size) <= len(data) - 1:\n X.append(data[i:i+window_size])\n y.append(data[i+window_size])\n \n i += 1\n assert len(X) == len(y)\n return X, y",
"_____no_output_____"
],
[
"X, y = window_data(scaled_data, 2)",
"_____no_output_____"
]
],
[
[
"#### Step 2.3 Train/test splitting\nThe windowed dataset gets divided into 80/20 (%)",
"_____no_output_____"
]
],
[
[
"split = int(len(X) * 0.8)\nX_train = np.array(X[:split])\ny_train = np.array(y[:split])\n\nX_test = np.array(X[split:])\ny_test = np.array(y[split:])",
"_____no_output_____"
],
[
"print(\"X_train size: {}\".format(len(X_train)))\nprint(\"y_train size: {}\".format(len(X_test)))\nprint(\"X_test size: {}\".format(len(y_train)))\nprint(\"y_test size: {}\".format(len(y_test)))",
"X_train size: 13296\ny_train size: 3324\nX_test size: 13296\ny_test size: 3324\n"
],
[
"print(\"X_train size: {}\".format(X_train.shape))\nprint(\"y_train size: {}\".format(y_train.shape))\nprint(\"X_test size: {}\".format(X_test.shape))\nprint(\"y_test size: {}\".format(y_test.shape))",
"X_train size: (13296, 2, 1)\ny_train size: (13296, 1)\nX_test size: (3324, 2, 1)\ny_test size: (3324, 1)\n"
]
],
[
[
"## 3. Network Definition",
"_____no_output_____"
],
[
"#### Hyperparameter definition",
"_____no_output_____"
]
],
[
[
"#Hyperparameters to change\nwindow_size = 2 \nhidden_layer_size = 4\nepochs = 10\nnumber_of_layers = 1\n\n#Fixed Hyperparameters\nbatch_size = 7\ngradient_clip_margin = 4\nlearning_rate = 0.001\nnumber_of_classes = 1\n",
"_____no_output_____"
]
],
[
[
"#### Output layer\nFor comparison of various networks, weight initialization is fixed. Therefore, the seed has been set to 2222.",
"_____no_output_____"
]
],
[
[
"def output_layer(lstm_output, in_size, out_size):\n \n x = lstm_output[:, -1, :]\n weights = tf.Variable(tf.truncated_normal([in_size, out_size], stddev=0.05, seed=2222), name='output_layer_weights')\n bias = tf.Variable(tf.zeros([out_size]), name='output_layer_bias')\n output = tf.matmul(x, weights) + bias\n return output",
"_____no_output_____"
]
],
[
[
"#### Loss and optimization\nIn this function, the gradients are computed, adam optimizer and gradient clipping are getting applied. Furthermore, the loss function gets minimized.",
"_____no_output_____"
]
],
[
[
"def opt_loss(logits, targets, learning_rate, grad_clip_margin):\n \n losses = []\n for i in range(targets.get_shape()[0]):\n losses.append([(tf.pow(logits[i] - targets[i], 2))])\n \n loss = tf.losses.mean_squared_error(targets, logits)\n \n #Clipping the gradients\n gradients = tf.gradients(loss, tf.trainable_variables())\n clipper_, _ = tf.clip_by_global_norm(gradients, grad_clip_margin)\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_optimizer = optimizer.apply_gradients(zip(gradients, tf.trainable_variables()))\n return loss, train_optimizer",
"_____no_output_____"
]
],
[
[
"#### Build network\nAt this point, the entire network (computation graph) is built. To completely exclude randomness, the random seed gets also fixed on graph level. <br> <br>",
"_____no_output_____"
]
],
[
[
"tf.reset_default_graph()\ntf.set_random_seed(1111)\n\n#Inputs\ninputs = tf.placeholder(tf.float32, [batch_size, window_size, 1], name='input_data')\ntargets = tf.placeholder(tf.float32, [batch_size, 1], name='targets')\ndrop_rate = tf.placeholder(tf.float32, name='drop_rate')\n\n#Build Network\n#\n# Replace the following signature [GRUcell()] with:\n#\n# -> LSTMCell() for LSTM\n# -> BasicRNNCell() for RNN\n# -> GRUCell() for GRU\n# \n# The arguments remain unchanged.\n\nlstm_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_layer_size, activation=tf.nn.elu) \nlstm_dropout = tf.contrib.rnn.DropoutWrapper(lstm_cell,input_keep_prob=drop_rate)\ncell = tf.nn.rnn_cell.MultiRNNCell([lstm_dropout] * number_of_layers)\n\ninit_state = cell.zero_state(batch_size, tf.float32)\n\n\noutputs, states = tf.nn.dynamic_rnn(cell, inputs, initial_state=init_state)\n\nlogits = output_layer(outputs, hidden_layer_size, number_of_classes)\n\nloss, opt = opt_loss(logits, targets, learning_rate, gradient_clip_margin)",
"_____no_output_____"
]
],
[
[
"## 4. Network Training",
"_____no_output_____"
]
],
[
[
"session = tf.Session()",
"_____no_output_____"
],
[
"session.run(tf.global_variables_initializer())",
"_____no_output_____"
]
],
[
[
"#### 4.1. Model training\nAfter building the tf-graph, it is now possible to train the network. In order to do that, the computation graph gets called by session_run(). Placeholder are fed with the feed_dict argument.",
"_____no_output_____"
]
],
[
[
"step=0\n\n#global lists to save run-time statistics (loss and predictions)\nscores_per_epoch = []\nloss_per_epoch = []\n\nfor i in range(epochs):\n trained_scores = []\n epoch_loss = []\n ii = 0\n\n while(ii + batch_size) <= len(X_train):\n \n X_batch = X_train[ii:ii+batch_size]\n y_batch = y_train[ii:ii+batch_size]\n \n o, c, _ = session.run([logits, loss, opt], feed_dict={inputs:X_batch, targets:y_batch, drop_rate:1})\n \n epoch_loss.append(c)\n trained_scores.append(o)\n ii += batch_size\n step += 1\n #Add current statistics to global list \n scores_per_epoch.append(trained_scores)\n loss_per_epoch.append(np.mean(epoch_loss))\n \n \n if (i % 5) == 0:\n with session.as_default():\n \n print('Epoch {}/{}'.format(i, epochs), ' Current loss: {}'.format(np.mean(epoch_loss)))\n ",
"Epoch 0/10 Current loss: 0.2023065835237503\nEpoch 5/10 Current loss: 0.0673823431134224\n"
]
],
[
[
"#### 4.2. Plot of training score (loss per epoch)\nAs despicted in the figure, the network rapidly learns within the first few epochs. After that, the training performance is low.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(16, 7))\nplt.plot(loss_per_epoch, label='Original data')",
"_____no_output_____"
]
],
[
[
"#### 4.3. Collect training data\nIn this loop, the most recent predictions on the training set are collected for later use.",
"_____no_output_____"
]
],
[
[
"sup =[]\nfor i in range(len(trained_scores)):\n for j in range(len(trained_scores[i])):\n sup.append(trained_scores[i][j])",
"_____no_output_____"
]
],
[
[
"#### 4.4. Model Test\nIn the cell below, the model validation process is performed. It is possible to predict short-term price movements with the help of rolling forecasts. By doing so, test data is fed to the trained model. The forecast results (based on batch and window size) get stored in the list tests.",
"_____no_output_____"
]
],
[
[
"tests = []\ni = 0\nwhile i+batch_size <= len(X_test):\n o = session.run([logits], feed_dict={inputs:X_test[i:i+batch_size], drop_rate:1.0}) #, model.drop_rate:1\n i += batch_size\n tests.append(o)",
"_____no_output_____"
]
],
[
[
"#### 4.5. Remove duplicate entries\nThe list tests contains multiple predictions for one observation due to batched mode. To get rid of these, the following operation is performed.",
"_____no_output_____"
]
],
[
[
"tests_new = []\nfor i in range(len(tests)):\n for j in range(len(tests[i][0])):\n tests_new.append(tests[i][0][j])",
"_____no_output_____"
]
],
[
[
"#### 4.6 Postprocess predictions\nThe cleansed list tests_new must be processed in order to plot the results correctly. For this reason, the predictions get inserted at the right index.",
"_____no_output_____"
]
],
[
[
"pos = (len(X_train))\nsize = len(X)-(batch_size-window_size+5)\ntest_results = []\nfor i in range(size):\n if i >= pos:\n test_results.append(tests_new[i-pos])\n else:\n test_results.append(None)",
"_____no_output_____"
]
],
[
[
"#### 4.7. Plot (scaled) results\nThe blue line shows the original data. The network seems to make good predictions. Due to batched mode, the last observations cannot be predicted.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(16, 9))\nplt.title('Hourly Dataset')\nplt.xlabel('Pedicted Hours')\nplt.ylabel('Scaled Gas Prices')\nplt.plot(scaled_data, label='Original data')\nplt.plot(sup, label='Training data')\nplt.plot(test_results, label='Testing data')\nplt.legend()\nplt.show()\n\n",
"_____no_output_____"
]
],
[
[
"## 5. Analysis of Results",
"_____no_output_____"
]
],
[
[
"pred_rescaled = scaler.inverse_transform(tests_new, copy=None)",
"_____no_output_____"
],
[
"pred_rescaled_round = pred_rescaled.round()",
"_____no_output_____"
]
],
[
[
"#### 5.1. Plot rescaled results\nThe plot reveals that the network is able to predict the hourly data with deviations. The graphs further display that the network might have problems with the prediction of minima and maxima. Interestingly, the predictions show a spike into the right direction, but get adjusted after one step. Apart from that, rounding the values did not increase the accuracy.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(16, 7))\nplt.plot(data_to_use[len(X_train)+1:(len(X_train)+37)], label='Original data', linestyle='-', color='k')\nplt.plot(pred_rescaled[:36], label='Test data')\nplt.plot(pred_rescaled_round[:36], label='Test data (round)')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Metrics\n\nThe following metrics are used:\n+ Mean Absolute Error **(MAE)**\n+ Mean Squared Error **(MSE)**\n+ Root Mean Squared Error **(RMSE)**\n+ Mean Abs. Percentage Error **(MAPE)**$*$\n+ Mean Percentage Error **(MPE)**\n\n*$*$ Function **mean_absolute_percentage_error** has been adapted from **[3]**. **mean_percentage_error** is based on this.*",
"_____no_output_____"
]
],
[
[
"def mean_percentage_error(y_true, y_pred):\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean( y_true != y_pred ) *100",
"_____no_output_____"
],
[
"def mean_absolute_percentage_error(y_true, y_pred): \n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100",
"_____no_output_____"
],
[
"def print_metrics(prediction, rnd):\n \n start = len(X_train)+1\n end = len(X)-5\n \n local_data = data_to_use[start:end] #Test-Set, extracted\n \n if rnd == True: #if rounded data is investigated\n \n #cast to int does not harm data as it is already rounded when passed to function; \n #cast is not necessary, but performed for completeness.\n prediction = prediction.astype(np.int64) \n prediction = prediction.reshape(local_data.shape)\n \n else:\n prediction = prediction.reshape(local_data.shape)\n local_data = local_data.astype(np.float64)\n \n mae = metrics.mean_absolute_error(local_data, prediction)#data_to_use[start:end]\n mse = metrics.mean_squared_error(local_data, prediction)\n msle = metrics.mean_squared_log_error(local_data, prediction)\n mpe = mean_percentage_error(local_data, prediction)\n mape = mean_absolute_percentage_error(local_data, prediction)\n \n print(\"Mean Absolute Error: \", mae, sep=\"\\t\\t\")\n print(\"Mean Squared Error: \", mse, sep=\"\\t\\t\")\n print(\"Root Mean Squared Error: \", np.sqrt(mse), sep=\"\\t\")\n print(\"Mean Abs. Percentage Error: \", mape, sep=\"\\t\")\n print(\"Mean Percentage Error: \", mpe, sep=\"\\t\\t\") ",
"_____no_output_____"
]
],
[
[
"#### 5.2. Print metrics",
"_____no_output_____"
]
],
[
[
"print_metrics(pred_rescaled, False)",
"Mean Absolute Error: \t\t2.000386316852443\nMean Squared Error: \t\t5.968028248621651\nRoot Mean Squared Error: \t2.442954819193685\nMean Abs. Percentage Error: \t1.6108583178522557\nMean Percentage Error: \t\t100.0\n"
],
[
"print_metrics(pred_rescaled_round, True)",
"Mean Absolute Error: \t\t2.0572634116937913\nMean Squared Error: \t\t6.382760699216395\nRoot Mean Squared Error: \t2.526412614601264\nMean Abs. Percentage Error: \t1.6536954263778734\nMean Percentage Error: \t\t88.33634719710669\n"
],
[
"session.close()",
"_____no_output_____"
]
],
[
[
"##### Dataset:",
"_____no_output_____"
],
[
"[1] Martin Kurz. *Historische Preisdaten*. 2017. Retrieved from https://creativecommons.tankerkoenig.de/ and licensed under CC-BY 4.0.",
"_____no_output_____"
],
[
"##### References:",
"_____no_output_____"
],
[
"[2] Luka Anicin. *tensorflow_lstm.ipynb*. 2017. URL: https://github.com/lucko515/tesla-stocks-prediction/blob/master/lstm_from_scratch_tensorflow.ipynb. *(visited on 02/28/2018)*",
"_____no_output_____"
],
[
"[3] Antonín Hoskovec. *Mean absolute percentage error (MAPE) in Scikit-learn*. 2017. URL: https://stats.stackexchange.com/questions/58391/mean-absolute-percentage-error-mape-in-scikit-learn/294069#294069 (visited on 02/28/2018)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
]
|
cb7555321db652368b67ea58bda3a93696ae7bcf | 221,495 | ipynb | Jupyter Notebook | Svenskt Kvinnobiografiskt lexikon part 5.ipynb | salgo60/open-data-examples | 05a16a92c53117ff23a330a3fa5914a33b19ff6a | [
"MIT"
]
| 5 | 2019-05-30T13:10:32.000Z | 2021-06-30T06:04:29.000Z | Svenskt Kvinnobiografiskt lexikon part 5.ipynb | salgo60/open-data-examples | 05a16a92c53117ff23a330a3fa5914a33b19ff6a | [
"MIT"
]
| null | null | null | Svenskt Kvinnobiografiskt lexikon part 5.ipynb | salgo60/open-data-examples | 05a16a92c53117ff23a330a3fa5914a33b19ff6a | [
"MIT"
]
| null | null | null | 96.596162 | 243 | 0.74827 | [
[
[
"# Svenskt Kvinnobiografiskt lexikon part 5\nversion part 5 - 0.1 \n\nCheck SKBL women if Alvin has an authority for the women\n\n\n* this [Jupyter Notebook](https://github.com/salgo60/open-data-examples/blob/master/Svenskt%20Kvinnobiografiskt%20lexikon%20part%205.ipynb) \n * [part 1](https://github.com/salgo60/open-data-examples/blob/master/Svenskt%20Kvinnobiografiskt%20lexikon.ipynb) check Wikidata and SKBL\n * [part 2](https://github.com/salgo60/open-data-examples/blob/master/Svenskt%20Kvinnobiografiskt%20lexikon%20part%202.ipynb) more queries etc.\n * [part 4](https://github.com/salgo60/open-data-examples/blob/master/Svenskt%20Kvinnobiografiskt%20lexikon%20part%204.ipynb) get archives\n",
"_____no_output_____"
],
[
"# Wikidata\nget SKBL women not connected to Alvin",
"_____no_output_____"
]
],
[
[
"from datetime import datetime\nnow = datetime.now()\nprint(\"Last run: \", datetime.now())",
"_____no_output_____"
],
[
"# pip install sparqlwrapper\n# https://rdflib.github.io/sparqlwrapper/\n\nimport sys,json\nimport pandas as pd \n\nfrom SPARQLWrapper import SPARQLWrapper, JSON\n\nendpoint_url = \"https://query.wikidata.org/sparql\"\n\nquerySKBLAlvin = \"\"\"SELECT ?item (REPLACE(STR(?item), \".*Q\", \"Q\") AS ?wid) ?SKBL (URI(CONCAT(\"https://www.alvin-portal.org/alvin/resultList.jsf?query=\", ENCODE_FOR_URI(?itemLabel), \"&searchType=PERSON\")) AS ?Alvin) WHERE {\n ?item wdt:P4963 ?id.\n OPTIONAL { ?item wdt:P569 ?birth. }\n MINUS { ?item wdt:P6821 ?value. }\n BIND(URI(CONCAT(\"https://www.skbl.se/sv/artikel/\", ?id)) AS ?SKBL)\n SERVICE wikibase:label {\n bd:serviceParam wikibase:language \"sv\".\n ?item rdfs:label ?itemLabel.\n }\n}\nORDER BY (?itemLabel)\"\"\"\n\ndef get_sparql_dataframe(endpoint_url, query):\n \"\"\"\n Helper function to convert SPARQL results into a Pandas data frame.\n \"\"\"\n user_agent = \"salgo60/%s.%s\" % (sys.version_info[0], sys.version_info[1])\n \n sparql = SPARQLWrapper(endpoint_url, agent=user_agent)\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n result = sparql.query()\n\n processed_results = json.load(result.response)\n cols = processed_results['head']['vars']\n\n out = []\n for row in processed_results['results']['bindings']:\n item = []\n for c in cols:\n item.append(row.get(c, {}).get('value'))\n out.append(item)\n\n return pd.DataFrame(out, columns=cols)\n\nSKBLmissingAlvin = get_sparql_dataframe(endpoint_url, querySKBLAlvin )\n",
"_____no_output_____"
],
[
"SKBLmissingAlvin.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 991 entries, 0 to 990\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 item 991 non-null object\n 1 wid 991 non-null object\n 2 SKBL 991 non-null object\n 3 Alvin 991 non-null object\ndtypes: object(4)\nmemory usage: 31.1+ KB\n"
],
[
"import csv \nimport urllib3, json\nhttp = urllib3.PoolManager() \n\n\nlistNewItems =[]\nfor index,row in SKBLmissingAlvin.iterrows():\n url = row[\"Alvin\"] \n print(url)\n r = http.request('GET', url) \n print(len(r.data),url)\n #listNewItems.append(new_item) \n#print (len(listNewItems) ,\" antal poster\")\n",
"https://www.alvin-portal.org/alvin/resultList.jsf?query=Aase%20Nordmo%20L%C3%B8vberg&searchType=PERSON\n84756 https://www.alvin-portal.org/alvin/resultList.jsf?query=Aase%20Nordmo%20L%C3%B8vberg&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Agda%20Nordl%C3%B6f&searchType=PERSON\n84708 https://www.alvin-portal.org/alvin/resultList.jsf?query=Agda%20Nordl%C3%B6f&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Agda%20%C3%96sterberg&searchType=PERSON\n84708 https://www.alvin-portal.org/alvin/resultList.jsf?query=Agda%20%C3%96sterberg&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Agda%20%C3%96stlund&searchType=PERSON\n84699 https://www.alvin-portal.org/alvin/resultList.jsf?query=Agda%20%C3%96stlund&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Agnes%20Arvidson&searchType=PERSON\n84683 https://www.alvin-portal.org/alvin/resultList.jsf?query=Agnes%20Arvidson&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Agnes%20Bergendorff&searchType=PERSON\n84704 https://www.alvin-portal.org/alvin/resultList.jsf?query=Agnes%20Bergendorff&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Agnes%20B%C3%B6rjesson&searchType=PERSON\n99643 https://www.alvin-portal.org/alvin/resultList.jsf?query=Agnes%20B%C3%B6rjesson&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Agneta%20Wrede&searchType=PERSON\n84686 https://www.alvin-portal.org/alvin/resultList.jsf?query=Agneta%20Wrede&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Aida%20T%C3%B6rnell&searchType=PERSON\n84702 https://www.alvin-portal.org/alvin/resultList.jsf?query=Aida%20T%C3%B6rnell&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Aina%20Berg&searchType=PERSON\n84671 https://www.alvin-portal.org/alvin/resultList.jsf?query=Aina%20Berg&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Aina%20Cederblom&searchType=PERSON\n84689 https://www.alvin-portal.org/alvin/resultList.jsf?query=Aina%20Cederblom&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Aina%20Wifalk&searchType=PERSON\n84674 https://www.alvin-portal.org/alvin/resultList.jsf?query=Aina%20Wifalk&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alfa%20Olsson&searchType=PERSON\n84677 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alfa%20Olsson&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alice%20Bonthron&searchType=PERSON\n84692 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alice%20Bonthron&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alice%20Trolle-Wachtmeister&searchType=PERSON\n84767 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alice%20Trolle-Wachtmeister&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alina%20J%C3%A4gerstedt&searchType=PERSON\n84720 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alina%20J%C3%A4gerstedt&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Abrahamsson&searchType=PERSON\n84713 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Abrahamsson&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Andersson&searchType=PERSON\n98767 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Andersson&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Braathen&searchType=PERSON\n84692 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Braathen&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Detthow&searchType=PERSON\n84683 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Detthow&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Haag&searchType=PERSON\n84668 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Haag&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Johansson&searchType=PERSON\n94661 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Johansson&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Norsell&searchType=PERSON\n84677 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Norsell&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Petri&searchType=PERSON\n84665 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20Petri&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20S%C3%B6derhjelm&searchType=PERSON\n95112 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alma%20S%C3%B6derhjelm&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Alva%20Nordberg&searchType=PERSON\n84686 https://www.alvin-portal.org/alvin/resultList.jsf?query=Alva%20Nordberg&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amalia%20Eriksson&searchType=PERSON\n84695 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amalia%20Eriksson&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amalia%20J%C3%B6nsson&searchType=PERSON\n84726 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amalia%20J%C3%B6nsson&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amalia%20Lindegren&searchType=PERSON\n95089 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amalia%20Lindegren&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amalia%20Lundeberg&searchType=PERSON\n84710 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amalia%20Lundeberg&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amalia%20Planck&searchType=PERSON\n84683 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amalia%20Planck&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amalia%20Redec&searchType=PERSON\n84692 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amalia%20Redec&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amanda%20Adler&searchType=PERSON\n84680 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amanda%20Adler&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amanda%20Hammarlund&searchType=PERSON\n84710 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amanda%20Hammarlund&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amanda%20Horney&searchType=PERSON\n84695 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amanda%20Horney&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amanda%20Peralta&searchType=PERSON\n84701 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amanda%20Peralta&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amanda%20Sidvall&searchType=PERSON\n84704 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amanda%20Sidvall&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amelie%20von%20Braun&searchType=PERSON\n84707 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amelie%20von%20Braun&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amelina%20Mathilda%20Sterky&searchType=PERSON\n84740 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amelina%20Mathilda%20Sterky&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Amy%20Segerstedt&searchType=PERSON\n84701 https://www.alvin-portal.org/alvin/resultList.jsf?query=Amy%20Segerstedt&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Andrea%20Eneroth&searchType=PERSON\n84704 https://www.alvin-portal.org/alvin/resultList.jsf?query=Andrea%20Eneroth&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Anette%20Hasselgren&searchType=PERSON\n84719 https://www.alvin-portal.org/alvin/resultList.jsf?query=Anette%20Hasselgren&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Anita%20Bj%C3%B6rk&searchType=PERSON\n84708 https://www.alvin-portal.org/alvin/resultList.jsf?query=Anita%20Bj%C3%B6rk&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Anita%20Ekberg&searchType=PERSON\n84677 https://www.alvin-portal.org/alvin/resultList.jsf?query=Anita%20Ekberg&searchType=PERSON\nhttps://www.alvin-portal.org/alvin/resultList.jsf?query=Ann%20Elefalk&searchType=PERSON\n"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
]
|
cb75561f60e5b519d3cf45fd7fe23603bdfbe8c5 | 60,639 | ipynb | Jupyter Notebook | part_one.ipynb | projet-ML/infectious_disease_modelling | c1049482b2e117a06adae8afeb82a5b48f6296bc | [
"MIT"
]
| 36 | 2020-07-11T20:38:29.000Z | 2022-01-24T09:59:53.000Z | part_one.ipynb | projet-ML/infectious_disease_modelling | c1049482b2e117a06adae8afeb82a5b48f6296bc | [
"MIT"
]
| 2 | 2020-07-11T01:22:32.000Z | 2022-02-18T13:00:24.000Z | part_one.ipynb | projet-ML/infectious_disease_modelling | c1049482b2e117a06adae8afeb82a5b48f6296bc | [
"MIT"
]
| 37 | 2020-04-12T15:25:54.000Z | 2020-06-19T22:21:29.000Z | 295.8 | 27,518 | 0.776678 | [
[
[
"<a href=\"https://colab.research.google.com/github/hf2000510/infectious_disease_modelling/blob/master/part_one.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"from scipy.integrate import odeint\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline \n!pip install mpld3\nimport mpld3\nmpld3.enable_notebook()",
"_____no_output_____"
],
[
"def deriv(y, t, N, beta, gamma):\n S, I, R = y\n dSdt = -beta * S * I / N\n dIdt = beta * S * I / N - gamma * I\n dRdt = gamma * I\n return dSdt, dIdt, dRdt",
"_____no_output_____"
],
[
"N = 1000\nbeta = 1.0 # infected person infects 1 other person per day\nD = 4.0 # infections lasts four days\ngamma = 1.0 / D\n\nS0, I0, R0 = 999, 1, 0 # initial conditions: one infected, rest susceptible",
"_____no_output_____"
],
[
"t = np.linspace(0, 49, 50) # Grid of time points (in days)\ny0 = S0, I0, R0 # Initial conditions vector\n\n# Integrate the SIR equations over the time grid, t.\nret = odeint(deriv, y0, t, args=(N, beta, gamma))\nS, I, R = ret.T",
"_____no_output_____"
],
[
"def plotsir(t, S, I, R):\n f, ax = plt.subplots(1,1,figsize=(10,4))\n ax.plot(t, S, 'b', alpha=0.7, linewidth=2, label='Susceptible')\n ax.plot(t, I, 'y', alpha=0.7, linewidth=2, label='Infected')\n ax.plot(t, R, 'g', alpha=0.7, linewidth=2, label='Recovered')\n\n ax.set_xlabel('Time (days)')\n\n ax.yaxis.set_tick_params(length=0)\n ax.xaxis.set_tick_params(length=0)\n ax.grid(b=True, which='major', c='w', lw=2, ls='-')\n legend = ax.legend()\n legend.get_frame().set_alpha(0.5)\n for spine in ('top', 'right', 'bottom', 'left'):\n ax.spines[spine].set_visible(False)\n plt.show();",
"_____no_output_____"
],
[
"plotsir(t, S, I, R)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb755986f592eca0acf8634bb8b5ae78558ba92b | 44,585 | ipynb | Jupyter Notebook | doc/src/11-elektromagnetism/11-elektromagnetism.ipynb | physics-chalmers/ffm234 | b37a744e50604ba0956724883714ea3d87929f81 | [
"CC0-1.0"
]
| null | null | null | doc/src/11-elektromagnetism/11-elektromagnetism.ipynb | physics-chalmers/ffm234 | b37a744e50604ba0956724883714ea3d87929f81 | [
"CC0-1.0"
]
| null | null | null | doc/src/11-elektromagnetism/11-elektromagnetism.ipynb | physics-chalmers/ffm234 | b37a744e50604ba0956724883714ea3d87929f81 | [
"CC0-1.0"
]
| 2 | 2020-08-06T06:03:59.000Z | 2020-11-03T13:36:07.000Z | 27.970514 | 397 | 0.496086 | [
[
[
"empty"
]
]
]
| [
"empty"
]
| [
[
"empty"
]
]
|
cb756842c60a5588df41e8a34ed7102e1ddfb2f2 | 2,312 | ipynb | Jupyter Notebook | optimization_course/practice/09_decompositions.ipynb | ivanoleinik/interactive-visualization | 66438a8be1acc2e6119f31de41c84db76030ecfc | [
"MIT"
]
| 10 | 2021-06-14T09:02:21.000Z | 2022-01-24T14:20:50.000Z | optimization_course/practice/09_decompositions.ipynb | ivanoleinik/interactive-visualization | 66438a8be1acc2e6119f31de41c84db76030ecfc | [
"MIT"
]
| null | null | null | optimization_course/practice/09_decompositions.ipynb | ivanoleinik/interactive-visualization | 66438a8be1acc2e6119f31de41c84db76030ecfc | [
"MIT"
]
| 7 | 2019-07-23T09:29:49.000Z | 2021-01-15T14:58:26.000Z | 22.019048 | 109 | 0.50346 | [
[
[
"# Задача #1: Минимизация среднеквадратичного отклонения\nДан набор точек $y_1, \\ldots, y_m\\in\\mathbb{R}^n$, нужно минимизировать\n$$\n\\frac{1}{2}\\sum_{i=1}^m\\|x-y_i\\|^2.\n$$\nЭта игрушечная задача с теоретичеким решением\n$$\nx^*=\\frac{1}{m}\\sum_{i=1}^my_i\n$$",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
],
[
"def ADMM(Y, iters = 100):\n \"\"\"\n Вычисляет миинимум sum(||x-y_i||^2) методом ADMM с использованием консенсусной декомпозиции\n \n Args:\n Y: ndarray(m, n) \n \n Returns:\n [z_0, ..., z_iters] -- последовательность вспомогательных переменных z\n \"\"\"\npasspass",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"m = 50\nn = 10\nY = np.random.rand(m, n)\n\nvectors = ADMM(Y)\n#print(vectors)\n#print(np.mean(Y, axis=0))\nfig, ax = plt.subplots(1, 1, figsize=(10, 10))\nax.plot([i for i in range(len(vectors))], [np.linalg.norm(z - np.mean(Y, axis=0)) for z in vectors])\nax.set_yscale('log')\nplt.close(fig)\nfig",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
]
|
cb7569e761367ed5be35097c2d46214c29011da3 | 44,065 | ipynb | Jupyter Notebook | doc/source/methods/TreeSHAP.ipynb | zjzh/alibi | e696906681e836fe6b801ab606b0987599d028d9 | [
"Apache-2.0"
]
| 1,570 | 2019-05-03T06:43:03.000Z | 2022-03-31T02:49:34.000Z | doc/source/methods/TreeSHAP.ipynb | zjzh/alibi | e696906681e836fe6b801ab606b0987599d028d9 | [
"Apache-2.0"
]
| 511 | 2019-05-02T16:36:15.000Z | 2022-03-31T08:09:43.000Z | doc/source/methods/TreeSHAP.ipynb | zjzh/alibi | e696906681e836fe6b801ab606b0987599d028d9 | [
"Apache-2.0"
]
| 190 | 2019-05-02T13:41:38.000Z | 2022-03-14T21:18:56.000Z | 48.744469 | 1,290 | 0.648451 | [
[
[
"[[source]](../api/alibi.explainers.shap_wrappers.rst)",
"_____no_output_____"
],
[
"# Tree SHAP",
"_____no_output_____"
],
[
"<div class=\"alert alert-info\">\nNote\n \nTo enable SHAP support, you may need to run:\n```bash\npip install alibi[shap]\n```\n\n</div>",
"_____no_output_____"
],
[
"## Overview",
"_____no_output_____"
],
[
"The tree SHAP (**SH**apley **A**dditive ex**P**lanations) algorithm is based on the paper [From local explanations to global understanding with explainable AI for trees](https://www.nature.com/articles/s42256-019-0138-9) by Lundberg et al. and builds on the open source [shap library](https://github.com/slundberg/shap) from the paper's first author.\n\n\nThe algorithm provides human interpretable explanations suitable for regression and classification of models with tree structure applied to tabular data. This method is a member of the *additive feature attribution methods* class; feature attribution refers to the fact that the change of an outcome to be explained (e.g., a class probability in a classification problem) with respect to a *baseline* (e.g., average prediction probability for that class in the training set) can be attributed in different proportions to the model input features. \n\nA simple illustration of the explanation process is shown in Figure 1. Here we see depicted a tree-based model which takes as an input features such as `Age`, `BMI` or `Blood pressure` and outputs `Mortality risk score`, a continuous value. Let's assume that we aim to explain the difference between and observed outcome and no risk, corresponding to a base value of `0.0`. Using the Tree SHAP algorithm, we attribute the `4.0` difference to the input features. Because the sum of the attribute values equals `output - base value`, this method is _additive_. We can see for example that the `Sex` feature contributes negatively to this prediction whereas the remainder of the features have a positive contribution (i.e., increase the mortality risk). For explaining this particular data point, the `Blood Pressure` feature seems to have the largest effect, and corresponds to an increase in the mortality risk. See our example on how to perform explanations with this algorithm and visualise the results using the `shap` library visualisations [here](../examples/interventional_tree_shap_adult_xgb.ipynb) and [here](../examples/path_dependent_tree_shap_adult_xgb.ipynb).",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"Figure 1: Cartoon ilustration of explanation models with Tree SHAP.\n\nImage Credit: Scott Lundberg (see source [here](https://www.nature.com/articles/s42256-019-0138-9))",
"_____no_output_____"
],
[
"## Usage",
"_____no_output_____"
],
[
"In order to compute the shap values , the following arguments can optionally be set when calling the `explain` method:\n\n- `interactions`: set to `True` to decompose the shap value of every feature for every example into a main effect and interaction effects \n\n- `approximate`: set to `True` to calculate an approximation to shap values (see our [example](../examples/path_dependent_tree_shap_adult_xgb.ipynb))\n\n- `check_additivity`: if the explainer is initialised with `model_output = raw` and this option is `True` the explainer checks that the sum of the shap values is equal to model output - expected value\n\n- `tree_limit`: it an `int` is passed, an ensemble formed of only `tree_limit` trees is explained\n\nIf the dataset contains categorical variables that have been encoded before being passed to the explainer and a single shap value is desired for each categorical variable, the the following options should be specified:\n\n\n- `summarise_result`: set to `True`\n\n- `cat_var_start_idx`: a sequence of integers containing the column indices where categorical variables start. If the feature matrix contains a categorical feature starting at index 0 and one at index 10, then `cat_var_start_idx=[0, 10]`\n\n- `cat_vars_enc_dim`: a list containing the dimension of the encoded categorical variables. The number of columns specified in this list is summed for each categorical variable starting with the corresponding index in `cat_var_start_idx`. So if `cat_var_start_idx=[0, 10]` and `cat_vars_enc_dim=[3, 5]`, then the columns with indices `0, 1` and `2` and `10, 11, 12, 13` and `14` will be combined to return one shap value for each categorical variable, as opposed to `3` and `5`.\n",
"_____no_output_____"
],
[
"### Path-dependent feature perturbation algorithm",
"_____no_output_____"
],
[
"#### Initialiastion and fit",
"_____no_output_____"
],
[
"The explainer is initialised with the following agruments:\n\n- a model, which could be an `sklearn`, `xgboost`, `catboost` or `lightgbm` model. Note that some of the models in these packages or models trained with specific objectives may not be supported. In particular, passing raw strings as categorical levels for `catboost` and `lightgbm` is not supported\n\n- `model_output` should always default to `raw` for this algorithm\n\n- optionally, set `task` to `'classification'` or `'regression'` to indicate the type of prediction the model makes. If set to `regression` the `prediction` field of the response is empty\n\n- optionally, a list of feature names via `feature_names`. This is used to provide information about feature importances in the response\n\n- optionally, a dictionary, `category_names`, that maps the columns of the categorical variables to a list of strings representing the names of the categories. This may be used for visualisation in the future.\n",
"_____no_output_____"
],
[
"```python \nfrom alibi.explainers import TreeShap\n\nexplainer = TreeShap(\n model, \n feature_names=['size', 'age'], \n categorical_names={0: ['S', 'M', 'L', 'XL', 'XXL']}\n)\n```\n\nFor this algorithm, fit is called with no arguments:\n\n```python\nexplainer.fit()\n```",
"_____no_output_____"
],
[
"#### Explanation",
"_____no_output_____"
],
[
"To explain an instance `X`, we simply pass it to the explain method:\n\n```python \nexplanation = explainer.explain(X)\n```",
"_____no_output_____"
],
[
"The returned explanation object has the following fields:\n\n* `explanation.meta`: \n\n```python\n{'name': 'TreeShap',\n 'type': ['whitebox'],\n 'task': 'classification',\n 'explanations': ['local', 'global'],\n 'params': {'summarise_background': False, 'algorithm': 'tree_path_dependent' ,'kwargs': {}}\n}\n```\n\nThis field contains metadata such as the explainer name and type as well as the type of explanations this method can generate. In this case, the `params` attribute shows the Tree SHAP variant that will be used to explain the model in the `algorithm` attribute.\n\n* `explanation.data`:\n\n```python\ndata={'shap_values': [\n array([[ 5.0661433e-01, 2.7620478e-02],\n [-4.1725192e+00, 4.4859368e-03],\n [ 4.1338313e-01, -5.5618007e-02]],\n dtype=float32)\n ],\n 'shap_interaction_values': [array([], dtype=float64)],\n 'expected_value': array([-0.06472124]),\n 'model_output': 'raw',\n 'categorical_names': {0: ['S', 'M', 'L', 'XL', 'XXL']},\n 'feature_names': ['size', 'age'],\n 'raw': {\n 'raw_prediction': array([-0.73818872, -8.8434663 , -3.24204564]),\n 'loss': [],\n 'prediction': array([0, 0, 0]),\n 'instances': array([[0, 23],\n [4, 55],\n [2, 43]]),\n 'labels': array([], dtype=float64),\n 'importances': {\n '0': {\n 'ranked_effect': array([1.6975055 , 1.3598266], dtype=float32),\n 'names': [\n 'size',\n 'age',\n ]\n },\n 'aggregated': {\n 'ranked_effect': array([1.6975055 , 1.3598266], dtype=float32),\n 'names': [\n 'size',\n 'age',\n ]\n }\n }\n }\n}\n```",
"_____no_output_____"
],
[
"This field contains:\n\n* `shap_values`: a list of length equal to the number of model outputs, where each entry is an array of dimension samples x features of shap values. For the example above , 3 instances with 2 features has been explained so the shap values for each class are of dimension 3 x 2\n\n* `shap_interaction_values`: an empty list since this `interactions` was set to `False` in the explain call\n\n* `expected_value`: an array containing expected value for each model output \n\n* `model_output`: `raw` indicates that the model raw output was explained, the only option for the path dependent algorithm\n\n* `feature_names`: a list with the feature names\n\n* `categorical_names`: a mapping of the categorical variables (represented by indices in the shap_values columns) to the description of the category\n\n* `raw`: this field contains:\n\n * `raw_prediction`: a samples x n_outputs array of predictions for each instance to be explained. \n\n * `prediction`: an array containing the index of the maximum value in the `raw_prediction` array\n\n * `instances`: a samples x n_features array of instances which have been explained\n \n * `labels`: an array containing the labels for the instances to be explained \n\n * `importances`: a dictionary where each entry is a dictionary containing the sorted average magnitude of the shap value (ranked_effect) along with a list of feature names corresponding to the re-ordered shap values (names). There are n_outputs + 1 keys, corresponding to n_outputs and the aggregated output (obtained by summing all the arrays in shap_values)\n\n \nPlease see our examples on how to visualise these outputs using the shap library visualisations library visualisations [here](../examples/interventional_tree_shap_adult_xgb.ipynb) and [here](../examples/path_dependent_tree_shap_adult_xgb.ipynb).",
"_____no_output_____"
],
[
"#### Shapley interaction values",
"_____no_output_____"
],
[
"##### Initialisation and fit ",
"_____no_output_____"
],
[
"Shapley interaction values can only be calculated using the path-dependent feature perturbation algorithm in this release, so no arguments are passed to the `fit` method:\n\n```python \nexplainer = TreeShap(\n model, \n model_output='raw',\n)\n\nexplainer.fit()\n```",
"_____no_output_____"
],
[
"##### Explanation",
"_____no_output_____"
],
[
"To obtain the Shapley interaction values, the `explain` method is called with the option `interactions=True`:\n\n```python\nexplanation = explainer.explain(X, interactions=True)\n```\n\nThe explanation contains a list with the shap interaction values for each model output in the `shap_interaction_values` field of the `data` property.",
"_____no_output_____"
],
[
"### Interventional feature perturbation algorithm",
"_____no_output_____"
],
[
"#### Explaining model output",
"_____no_output_____"
],
[
"##### Initialiastion and fit",
"_____no_output_____"
],
[
"```python\nexplainer = TreeShap(\n model, \n model_output='raw',\n)\n\nexplainer.fit(X_reference)\n```",
"_____no_output_____"
],
[
"Model output can be set to `model_output='probability'` to explain models which return probabilities. Note that this requires the model to be trained with specific objectives. Please the footnote to our path-dependent feature perturbation [example](../examples/path_dependent_tree_shap_adult_xgb.ipynb) for an example of how to set the model training objective in order to explain probability outputs.",
"_____no_output_____"
],
[
"##### Explanation",
"_____no_output_____"
],
[
"To explain instances in `X`, the explainer is called as follows:",
"_____no_output_____"
],
[
"```python\nexplanation = explainer.explain(X)\n```",
"_____no_output_____"
],
[
"#### Explaining loss functions",
"_____no_output_____"
],
[
"##### Initialisation and fit ",
"_____no_output_____"
],
[
"To explain loss function, the following configuration and fit steps are necessary:\n\n```python \nexplainer = TreeShap(\n model, \n model_output='log_loss',\n)\n\nexplainer.fit(X_reference)\n```\n\nOnly square loss regression objectives and cross-entropy classification objectives are supported in this release.",
"_____no_output_____"
],
[
"##### Explanation",
"_____no_output_____"
],
[
"Note that the labels need to be passed to the `explain` method in order to obtain the explanation:\n\n```python\nexplanation = explainer.explain(X, y)\n```",
"_____no_output_____"
],
[
"### Miscellaneous",
"_____no_output_____"
],
[
"\n#### Runtime considerations",
"_____no_output_____"
],
[
"##### Adjusting the size of the reference dataset ",
"_____no_output_____"
],
[
"The algorithm automatically warns the user if a background dataset size of more than `1000` samples is passed. If the runtime of an explanation with the original dataset is too large, then the algorithm can automatically subsample the background dataset during the `fit` step. This can be achieve by specifying the fit step as \n\n```python\nexplainer.fit(\n X_reference,\n summarise_background=True,\n n_background_samples=300,\n)\n```\n\nor \n```python\nexplainer.fit(\n X_reference,\n summarise_background='auto'\n)\n```\n\nThe `auto` option will select `1000` examples, whereas using the boolean argument allows the user to directly control the size of the reference set. If categorical variables are specified, the algorithm uses subsampling of the data. Otherwise, a kmeans clustering algorithm is used to select the background dataset.\n\nAs describe above, the explanations are performed with respect to the expected output over this dataset so the shap values will be affected by the dataset selection. We recommend experimenting with various ways to choose the background dataset before deploying explanations.",
"_____no_output_____"
],
[
"## Theoretical overview",
"_____no_output_____"
],
[
"\nRecall that, for a model $f$, the Kernel SHAP algorithm [[1]](#References) explains a certain outcome with respect to a chosen reference (or an expected value) by estimating the shap values of each feature $i$ from $\\{1, ..., M\\}$, as follows:\n\n- enumerate all subsets $S$ of the set $F \\setminus \\{i\\}$ \n\n- for each $S \\subseteq F \\setminus \\{i\\}$, compute the contribution of feature $i$ as $C(i|S) = f(S \\cup \\{i\\}) - f(S)$\n\n- compute the shap value according to\n\n\\begin{equation}\\tag{1}\n\\phi_i := \\frac{1}{M} \\sum \\limits_{{S \\subseteq F \\setminus \\{i\\}}} \\frac{1}{\\binom{M - 1}{|S|}} C(i|S). \n\\end{equation}\n\n\nSince most models do not accept arbitrary patterns of missing values at inference time, $f(S)$ needs to be approximated. The original formulation of the Kernel Shap algorithm [[1]](#References) proposes to compute $f(S)$ as the _observational conditional expectation_\n\n\\begin{equation}\\tag{2}\nf(S) := \\mathbb{E}\\left[f(\\mathbf{x}_{S}, \\mathbf{X}_{\\bar{S}} | \\mathbf{X}_S = \\mathbf{x}_S) \\right]\n\\end{equation}\n\nwhere the expectation is taken over a *background dataset*, $\\mathcal{D}$, after conditioning. Computing this expectation involves drawing sufficiently many samples from $\\mathbf{X}_{\\bar{S}}$ for every sample from $\\mathbf{X}_S$, which is expensive. Instead, $(2)$ is approximated by \n\n$$\nf(S) := \\mathbb{E} \\left[f(\\mathbf{x}_{S}, \\mathbf{X}_{\\bar{S}})\\right]\n$$\n\nwhere features in a subset $S$ are fixed and features in $\\bar{S}$ are sampled from the background dataset. This quantity is referred to as _marginal_ or *interventional conditional expectation*, to emphasise that setting features in $S$ to the values $\\mathbf{x}_{S}$ can be viewed as an intervention on the instance to be explained.\n\nAs described in [[2]](#References), if estimating impact of a feature $i$ on the function value by $\\mathbb{E} \\left[ f | X_i = x_i \\right]$, one should bear in mind that observing $X_i = x_i$ changes the distribution of the features $X_{j \\neq i}$ if these variables are correlated. Hence, if the conditional expectation if used to estimate $f(S)$, the Shapley values might not be accurate since they also depend on the remaining variables, effect which becomes important if there are strong correlations amongst the independent variables. Furthermore, the authors show that estimating $f(S)$ using the conditional expectation violates the *sensitivity principle*, according to which the Shapley value of a redundant variable should be 0. On the other hand, the intervention breaks the dependencies, ensuring that the sensitivity holds. One potential drawback of this method is that setting a subset of values to certain values without regard to the values of the features in the complement (i.e., $\\bar{S}$) can generate instances that are outside the training data distribution, which will affect the model prediction and hence the contributions.\n\nThe following sections detail how these methods work and how, unlike Kernel SHAP, compute the exact shap values in polynomial time. The algorithm estimating contributions using interventional expectations is presented, with the remaining sections being dedicated to presenting an approximate algorithm for evaluating the interventional expectation that does not require a background dataset and Shapley interaction values.\n\n<a id='source_1'></a>\n\n",
"_____no_output_____"
],
[
"### Interventional feature perturbation\n<a id='interventional'></a>\n",
"_____no_output_____"
],
[
"The interventional feature perturbation algorithm provides an efficient way to calculate the expectation $f(S) := \\mathbb{E} \\left[f(\\mathbf{x}_{S}, \\mathbf{X}_{\\bar{S}})\\right]$ for all possible subsets $S$, and to combine these values according to equation $(1)$ in order to obtain the Shapley value. Intuitively, one can proceed as follows:\n\n- choose a background sample $r \\in \\mathcal{D}$\n\n- for each feature $i$, enumerate all subsets $S \\subseteq F \\setminus \\{i\\}$\n\n- for each such subset, $S$, compute $f(S)$ by traversing the tree with a _hybrid sample_ where the features in $\\bar{S}$ are replaced by their corresponding values in $r$\n\n- combine results according to equation $(1)$\n\nIf $R$ samples from the background distribution are used, then the complexity of this algorithm is $O(RM2^M)$ since we perform $2^M$ enumerations for each of the $M$ features, $R$ times. The key insight into this algorithm is that multiple hybrid samples will end up traversing identical paths and that this can be avoided if the shap values' calculation is reformulated as a summation over the paths in the tree (see [[4]](#References) for a proof):\n\n$$\n\\phi_i = \\sum_{P}\\phi_{i}^P\n$$\n\nwhere the summation is over paths $P$ in the tree descending from $i$. The value and sign of the contribution of each path descending through a node depends on whether the split from the node is due to a foreground or a background feature, as explained in the practical example below.\n\n<a id='source_4'></a>\n",
"_____no_output_____"
],
[
"#### Computing contributions with interventional Tree SHAP: a practical example.",
"_____no_output_____"
],
[
"\nFigure 2: Ilustration of the feature contribution and expected value estimation process using interventional perturbation Tree SHAP. The positive and the negative contributions of a node are represented in <span style=\"color:green\">green</span> and <span style=\"color:red\">red</span>, respectively.",
"_____no_output_____"
],
[
"In the figure above, the paths followed due the instance to be explained $x$ are coloured in red, paths followed due to the background sample in red, and common paths in yellow.\n\nThe instance to be explained is perturbed using a reference sample by the values of the features $F1$, $F3$ and $F5$ in $x$ with the corresponding values in $r$. This process gives the name of the algorithm since following the paths indicated by the background sample is akin to intervening on the instance to be explained with features from the background sample. Therefore, one defines the set $F$ in the previous section as $F = \\{ j: x_{j} \\neq r_{j}\\}$ for this case. Note that these are the only features for which one can estimate a contribution given this background sample; the same path is followed for features $F2$ and $F4$ for both the original and the perturbed sample, so these features do not contribute to explain the difference between the observed outcome ($v_6$) and the outcome that would have been observed if the tree had been traversed according to the reference $(v_{10})$.\n\n\nConsidering the structure of the tree for the given $x$ and $r$ together with equation $(1)$ reveals that the left subtree can be traversed to compute the negative terms in the summation whereas the right subtree will provide positive terms. This is because the nodes in the left subtree can only be reached if $F1$ takes the value from the background sample, that is, only $F1$ is missing. Because $F2$ and $F4$ do not contribute to explaining $f(x) - f(r)$, the negative contribution of the left subtree will be equal to the negative contribution of node $8$. This node sums two negative components: one when the downstream feature $F5$ is also missing (corresponding to evaluating $f$ at $S = \\varnothing$) and one when $F5$ is present (corresponding to evaluating $f$ at $S=\\{F5\\}$). These negative values are weighted according to the combinatorial factor in equation $(1)$. By a similar reasoning, the nodes in the right subtree are reached only if $F1$ is present and they provide the positive terms for the shap value computation. Note that the combinatorial factor in $(1)$ should be evaluated with $|S| \\gets |S| - 1$ for positive contributions since $|S|$ is increased by $1$ because of the feature whose contribution is calculated is present in the right subtree. \n\nA similar reasoning is applied to compute the contributions of the downstream nodes. For example, to estimate the contribution of $F5$, one considers a set $S = \\varnothing$ and observes the value of node $10$, and weighs that with the combinatorial factor from equation $(1)$ where $M-1 = 1$ and $|S|=0$ (because there are no features present on the path) and a positive contribution from node $9$ weighted by the same combinatorial factor (because $S = \\{F5\\}$ so $|S| - 1 = 0$). \n\nTo summarise, the efficient algorithm relies on the following key ideas:\n\n- each node in the tree is assigned a positive contribution reflecting membership of the splitting feature in a subset $S$ and a negative contribution to indicate the feature is missing ($i\\in \\bar{S}$)\n\n- the positive and negative contributions of a node can be computed by summing the positive and negative contributions of the children nodes, in keeping with the fact that the Shapley value can be computed by summing a contribution from each path the feature is on \n\n- to compute the contribution of a feature at a node, one adds a positive contribution from the node reached by splitting on the feature from the instance to be explained and a negative contribution from the node reached by splitting on the feature in the background sample\n\n- features for which the instance to be explained and the reference follow the same path are assigned $0$ contribution.\n",
"_____no_output_____"
],
[
"#### Explaining loss functions",
"_____no_output_____"
],
[
"One advantage of the interventional approach is that it allows to approximately transform the shap values to account for nonlinear transformation of outputs, such as the loss function. Recall that given $\\phi_i, ..., \\phi_M$ the local accuracy property guarantees that given $\\phi_0 = \\mathbb{E}[f(x)]$\n\n\\begin{equation}\\tag{3}\nf(x) = \\phi_0 + \\sum \\limits_{i=1}^M \\phi_i.\n\\end{equation}\n\nHence, in order to account for the effect of the nonlinear transformation $h$, one has to find the functions $g_0, ..., g_M$ such that\n\n\\begin{equation}\\tag{4}\nh(f(x)) = g(\\phi_0) + \\sum \\limits_{i=1}^M g_i(\\phi_i)\n\\end{equation}\n\nFor simplicity, let $y=h(x)$. Then using a first-order Taylor series expansion around $\\mathbb{E}[y]$ one obtains \n\n\\begin{equation}\\tag{5}\nh(y) \\approx h(\\mathbb{E}[y]) + \\frac{\\partial h(y) }{\\partial y} \\Bigr|_{y=\\mathbb{E}[y]}(y - \\mathbb{E}[y]).\n\\end{equation}\n\nSubstituting $(3)$ in $(5)$ and comparing coefficients with $(4)$ yields\n\n\\begin{equation*}\n\\begin{split}\ng_0 & \\approx h(\\mathbb{E}[y]) \\\\\ng_i &\\approx \\phi_i \\frac{\\partial h(y) }{\\partial y} \\Bigr|_{y=\\mathbb{E}[y]} .\n\\end{split}\n\\end{equation*}\n\nHence, an approximate correction is given by simply scaling the shap values using the gradient of the nonlinear function. Note that in practice one may take the Taylor series expansion at a reference point $r$ from the background dataset and average over the entire background dataset to compute the scaling factor. This introduces an additional source of noise since $h(\\mathbb{E}[y]) = \\mathbb{E}[h(y)]$ only when $h$ is linear.",
"_____no_output_____"
],
[
"#### Computational complexity",
"_____no_output_____"
],
[
"For a single foreground and background sample and a single tree, the algorithm runs in $O(LD)$ time. Thus, using $R$ background samples and a model containing $T$ trees, yields a complexity of $O(TRLD)$.",
"_____no_output_____"
],
[
"### Path dependent feature perturbation\n<a id='path_dependent'></a>",
"_____no_output_____"
],
[
"Another way to approximate equation $(2)$ to compute $f(S)$ given an instance $x$ and a set of missing features $\\bar{S}$ is to recursively follow the decision path through the tree and:\n\n- return the node value if a split on a feature $i \\in S$ is performed\n\n- take a weighted average of the values returned by children if $i \\in \\bar{S}$, where the weighing factor is equal to the proportion of training examples flowing down each branch. This proportion is a property of each node, sometimes referred to as _weight_ or _cover_ and measures how important is that node with regard to classifying the training data.\n\nTherefore, in the path-dependent perturbation method, we compute the expectations with respect to the training data distribution by weighting the leaf values according to the proportion of the training examples that flow to that leaf.\n\nTo avoid repeating the above recursion $M2^M$ times, one first notices that for a single decision tree, applying a perturbation would result in the sample ending up in a different leaf. Therefore, following each path from the root to a leaf in the tree is equivalent to perturbing subsets of features of varying cardinalities. Consequently, each leaf will contain a certain proportion of all possible subsets $S \\subseteq F$. Therefore, to compute the shap values, the following quantities are computed at each leaf, *for every feature $i$ on the path leading to that leaf*:\n\n- the proportion of subsets $S$ at the leaf that contain $i$ and the proportion of subsets $S$ that do not contain $i$\n\n- for each cardinality, the proportion of the sets of that cardinality contained at the leaf. Tracking each cardinality as opposed to a single count of subsets falling into a given leaf is necessary since it allows to apply the weighting factor in equation (1), which depends on the subset size, $|S|$.\n\nThis intuition can be summarised as follows:\n\\begin{equation}\\tag{6}\n\\phi_i := \\sum \\limits_{j=1}^L \\sum \\limits_{P \\in {S_j}} \\frac {w(|P|, j)}{ M_j {\\binom{M_j - 1}{|P|}}} (p_o^{i,j} - p_z^{i, j}) v_j\n\\end{equation}\n\nwhere $S_j$ is the set of present feature subsets at leaf $j$, $M_j$ is the length of the path and $w(|P|, j)$ is the proportion of all subsets of cardinality $P$ at leaf $j$, $p_o^{i, j}$ and $p_z^{i, j}$ represent the fractions of subsets that contain or do not contain feature $i$ respectively.",
"_____no_output_____"
],
[
"#### Computational complexity",
"_____no_output_____"
],
[
"Using the above quantities, one can compute the _contribution_ of each leaf to the Shapley value of every feature. This algorithm has complexity $O(TLD^2)$ for an ensemble of trees where $L$ is the number of leaves, $T$ the number of trees in the ensemble and $D$ the maximum tree depth. If the tree is balanced, then $D=\\log L$ and the complexity of our algorithm is $O(TL\\log^2L)$",
"_____no_output_____"
],
[
"#### Expected value for the path-dependent perturbation algorithm",
"_____no_output_____"
],
[
"Note that although a background dataset is not provided, the expected value is computed using the node cover information, stored at each node. The computation proceeds recursively, starting at the root. The contribution of a node to the expected value of the tree is a function of the expected values of the children and is computed as follows:\n\n$$\nc_j = \\frac{c_{r(j)}r_{r(j)} + c_{l(j)}r_{l(j)}}{r_j}\n$$\n\nwhere $j$ denotes the node index, $c_j$ denotes the node expected value, $r_j$ is the cover of the $j$th node and $r(j)$ and $l(j)$ represent the indices of the right and left children, respectively. The expected value used by the tree is simply $c_{root}$. Note that for tree ensembles, the expected values of the ensemble members is weighted according to the tree weight and the weighted expected values of all trees are summed to obtain a single value.\n\nThe cover depends on the objective function and the model chosen. For example, in a gradient boosted tree trained with squared loss objective, $r_j$ is simply the number of training examples flowing through $j$. For an arbitrary objective, this is the sum of the Hessian of the loss function evaluated at each point flowing through $j$, as explained [here](../examples/xgboost_model_fitting_adult.ipynb). ",
"_____no_output_____"
],
[
"### Shapley interaction values",
"_____no_output_____"
],
[
"While the Shapley values provide a solution to the problem of allocating a function variation to the input features, in practice it might be of interest to understand how the importance of a feature depends on the other features. The Shapley interaction values can solve this problem, by allocating the change in the function amongst the individual features (*main effects*) and all pairs of features (*interaction effects*). Thus, they are defined as \n\n\\begin{equation}\\tag{7}\n\\Phi_{i, j}(f, x) = \\sum_{S \\subseteq {F \\setminus \\{i, j\\}}} \\frac{1}{2|S| {\\binom{M-1}{|S| - 1}}} \\nabla_{ij}(f, x, S), \\; i \\neq j\n\\end{equation}\n\nand\n\n\\begin{equation}\\tag{8}\n\\nabla_{ij}(f, x, S) = \\underbrace{f_{x}(S \\cup \\{i, j\\}) - f_x(S \\cup \\{j\\})}_{j \\; present} - \\underbrace{[f_x(S \\cup \\{i\\}) - f_x(S)]}_{j \\; not \\; present}. \n\\end{equation}\n\nTherefore, the interaction of features $i$ and $j$ can be computed by taking the difference between the shap values of $i$ when $j$ is present and when $j$ is not present. The main effects are defined as \n\n$$\n\\Phi_{i,i}(f, x) = \\phi_i(f, x) - \\sum_{i \\neq j} \\Phi_{i, j}(f, x),\n$$\n\nSetting $\\Phi_{0, 0} = f_x(\\varnothing)$ yields the local accuracy property for Shapley interaction values:\n\n$$f(x) = \\sum \\limits_{i=0}^M \\sum \\limits_{j=0}^M \\Phi_{i, j}.(f, x) $$.\n\nThe interaction is split equally between feature $i$ and $j$, which is why the division by two appears in equation $(7)$. The total interaction effect is defined as $\\Phi_{i, j}(f, x) + \\Phi_{j, i}(f,x)$.",
"_____no_output_____"
],
[
"#### Computational complexity",
"_____no_output_____"
],
[
"According to equation $(8)$, the interaction values can be computed by applying either the interventional or path-dependent feature perturbation algorithm twice: once by fixing the value of feature $j$ to $x_j$ and computing the shapley value for feature $i$ in this configuration, and once by fixing $x_j$ to a \"missing\" value and performing the same computation. Thus, the interaction values can be computed in $O(TMLD^2)$ with the path-dependent perturbation algorithm and $O(TMLDR)$ with the interventional feature perturbation algorithm.\n",
"_____no_output_____"
],
[
"### Comparison to other methods",
"_____no_output_____"
],
[
"Tree-based models are widely used in areas where model interpretability is of interest because node-level statistics gathered from the training data can be used to provide insights into the behaviour of the model across the training dataset, providing a _global explanation_ technique. As shown in our [example](../examples/path_dependent_tree_shap_adult_xgb.ipynb), considering different statistics gives rise to different importance rankings. As discussed in [[1]](#References) and [[3]](#References), depending on the statistic chosen, feature importances derived from trees are not *consistent*, meaning that a model where a feature is known to have a bigger impact might fail to have a larger importance. As such, feature importances cannot be compared across models. In contrast, both the path-dependent and interventional perturbation algorithms tackle this limitation.\n\nIn contrast to feature importances derived from tree statistics, the Tree SHAP algorithms can also provide local explanations, allowing the identification of features that are globally \"not important\", but can affect specific outcomes significantly, as might be the case in healthcare applications. Additionally, it provides a means to succinctly summarise the effect magnitude and direction (positive or negative) across potentially large samples. Finally, as shown in [[1]](#References) (see [here](https://static-content.springer.com/esm/art%3A10.1038%2Fs42256-019-0138-9/MediaObjects/42256_2019_138_MOESM1_ESM.pdf), p. 26), averaging the instance-level shap values importance to derive a global score for each feature can result in improvements in feature selection tasks. \n\nAnother method to derive instance-level explanations for tree-based model has been proposed by Sabaas [here](https://github.com/andosa/treeinterpreter). This feature attribution method is similar in spirit to Shapley value, but does not account for the effect of variable order as explained [here](https://static-content.springer.com/esm/art%3A10.1038%2Fs42256-019-0138-9/MediaObjects/42256_2019_138_MOESM1_ESM.pdf) (pp. 10-11) as well as not satisfying consistency ([[3]](#References)).\n\nFinally, both Tree SHAP algorithms exploit model structure to provide exact Shapley values computation albeit using different estimates for the effect of missing features, achieving explanations in low-order polynomial time. The KernelShap method relies on post-hoc (black-box) function modelling and approximations to approximate the same quantities and given enough samples has been shown to to the exact values (see experiments [here](https://static-content.springer.com/esm/art%3A10.1038%2Fs42256-019-0138-9/MediaObjects/42256_2019_138_MOESM1_ESM.pdf) and our [example](../examples/interventional_tree_shap_adult_xgb.ipynb)). Our Kernel SHAP [documentation](KernelSHAP.ipynb) provides comparisons of feature attribution methods based on Shapley values with other algorithms such as LIME and [anchors](Anchors.ipynb). \n\n<a id='source_3'></a>\n",
"_____no_output_____"
],
[
"## References",
"_____no_output_____"
],
[
"<a id='References'></a>\n\n[[1]](#source_1) Lundberg, S.M. and Lee, S.I., 2017. A unified approach to interpreting model predictions. In Advances in neural information processing systems (pp. 4765-4774).\n\n[[2]](#source_2) Janzing, D., Minorics, L. and Blöbaum, P., 2019. Feature relevance quantification in explainable AI: A causality problem. arXiv preprint arXiv:1910.13413.\n\n[[3]](#source_3) Lundberg, S.M., Erion, G.G. and Lee, S.I., 2018. Consistent individualized feature attribution for tree ensembles. arXiv preprint arXiv:1802.03888.\n\n[[4]](#source_4) Chen, H., Lundberg, S.M. and Lee, S.I., 2018. Understanding Shapley value explanation algorithms for trees. Under review for publication in Distill, draft available [here](https://hughchen.github.io/its_blog/index.html).",
"_____no_output_____"
],
[
"## Examples",
"_____no_output_____"
],
[
"### Path-dependent Feature Perturbation Tree SHAP ",
"_____no_output_____"
],
[
"[Explaing tree models with path-dependent feature perturbation Tree SHAP](../examples/path_dependent_tree_shap_adult_xgb.ipynb)\n\n",
"_____no_output_____"
],
[
"### Interventional Feature Perturbation Tree SHAP",
"_____no_output_____"
],
[
"[Explaing tree models with path-dependent feature perturbation Tree SHAP](../examples/interventional_tree_shap_adult_xgb.ipynb)",
"_____no_output_____"
]
]
]
| [
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
]
|
cb75760553dca4f3a5974b1ccdfc388e2822f2f6 | 27,190 | ipynb | Jupyter Notebook | quickstart/IntroNotebooks/4. Using PyTorch through ONNX.ipynb | boh-inspur/TensorRT | e4d2f7f4406f1c8f4632cc67de33728cef90ca29 | [
"Apache-2.0"
]
| 1 | 2021-04-11T15:07:44.000Z | 2021-04-11T15:07:44.000Z | quickstart/IntroNotebooks/4. Using PyTorch through ONNX.ipynb | em-et/TensorRT | e724d31ab84626ca334b4284703b5048eb698c98 | [
"Apache-2.0"
]
| null | null | null | quickstart/IntroNotebooks/4. Using PyTorch through ONNX.ipynb | em-et/TensorRT | e724d31ab84626ca334b4284703b5048eb698c98 | [
"Apache-2.0"
]
| null | null | null | 45.468227 | 402 | 0.603126 | [
[
[
"# Using PyTorch with TensorRT through ONNX:",
"_____no_output_____"
],
[
"TensorRT is a great way to take a trained PyTorch model and optimize it to run more efficiently during inference on an NVIDIA GPU.\n\nOne approach to convert a PyTorch model to TensorRT is to export a PyTorch model to ONNX (an open format exchange for deep learning models) and then convert into a TensorRT engine. Essentially, we will follow this path to convert and deploy our model:\n\n\n\nBoth TensorFlow and PyTorch models can be exported to ONNX, as well as many other frameworks. This allows models created using either framework to flow into common downstream pipelines.\n\nTo get started, let's take a well-known computer vision model and follow five key steps to deploy it to the TensorRT Python runtime:\n\n1. __What format should I save my model in?__\n2. __What batch size(s) am I running inference at?__\n3. __What precision am I running inference at?__\n4. __What TensorRT path am I using to convert my model?__\n5. __What runtime am I targeting?__",
"_____no_output_____"
],
[
"## 1. What format should I save my model in?",
"_____no_output_____"
],
[
"We are going to use ResNet50, a widely used CNN architecture first described in <a href=https://arxiv.org/abs/1512.03385>this paper</a>.\n\nLet's start by loading dependencies and downloading the model:",
"_____no_output_____"
]
],
[
[
"import torchvision.models as models\nimport torch\nimport torch.onnx\n\n# load the pretrained model\nresnet50 = models.resnet50(pretrained=True, progress=False)",
"_____no_output_____"
]
],
[
[
"Next, we will select our batch size and export the model:",
"_____no_output_____"
]
],
[
[
"# set up a dummy input tensor and export the model to ONNX\nBATCH_SIZE = 32\ndummy_input=torch.randn(BATCH_SIZE, 3, 224, 224)\ntorch.onnx.export(resnet50, dummy_input, \"resnet50_pytorch.onnx\", verbose=False)",
"_____no_output_____"
]
],
[
[
"Note that we are picking a BATCH_SIZE of 4 in this example.",
"_____no_output_____"
],
[
"Let's use a benchmarking function included in this guide to time this model:",
"_____no_output_____"
]
],
[
[
"from benchmark import benchmark\n\nresnet50.to(\"cuda\").eval()\nbenchmark(resnet50)",
"Warm up ...\nStart timing ...\nIteration 1000/1000, ave batch time 10.19 ms\nInput shape: torch.Size([1, 3, 224, 224])\nOutput features size: torch.Size([1, 1000])\nAverage batch time: 10.19 ms\n"
]
],
[
[
"Now, let's restart our Jupyter Kernel so PyTorch doesn't collide with TensorRT: ",
"_____no_output_____"
]
],
[
[
"import os\n\nos._exit(0) # Shut down all kernels so TRT doesn't fight with PyTorch for GPU memory",
"_____no_output_____"
]
],
[
[
"## 2. What batch size(s) am I running inference at?\n\nWe are going to run with a fixed batch size of 4 for this example. Note that above we set BATCH_SIZE to 4 when saving our model to ONNX. We need to create another dummy batch of the same size (this time it will need to be in our target precision) to test out our engine.\n\nFirst, as before, we will set our BATCH_SIZE to 4. Note that our trtexec command above includes the '--explicitBatch' flag to signal to TensorRT that we will be using a fixed batch size at runtime.",
"_____no_output_____"
]
],
[
[
"BATCH_SIZE = 32",
"_____no_output_____"
]
],
[
[
"Importantly, by default TensorRT will use the input precision you give the runtime as the default precision for the rest of the network. So before we create our new dummy batch, we also need to choose a precision as in the next section:",
"_____no_output_____"
],
[
"## 3. What precision am I running inference at?\n\nRemember that lower precisions than FP32 tend to run faster. There are two common reduced precision modes - FP16 and INT8. Graphics cards that are designed to do inference well often have an affinity for one of these two types. This guide was developed on an NVIDIA V100, which favors FP16, so we will use that here by default. INT8 is a more complicated process that requires a calibration step.",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\nUSE_FP16 = True\n\ntarget_dtype = np.float16 if USE_FP16 else np.float32\ndummy_input_batch = np.zeros((BATCH_SIZE, 224, 224, 3), dtype = np.float32) ",
"_____no_output_____"
]
],
[
[
"## 4. What TensorRT path am I using to convert my model?",
"_____no_output_____"
],
[
"We can use trtexec, a command line tool for working with TensorRT, in order to convert an ONNX model originally from PyTorch to an engine file.\n\nLet's make sure we have TensorRT installed (this comes with trtexec):",
"_____no_output_____"
]
],
[
[
"import tensorrt",
"_____no_output_____"
]
],
[
[
"To convert the model we saved in the previous step, we need to point to the ONNX file, give trtexec a name to save the engine as, and last specify that we want to use a fixed batch size instead of a dynamic one.",
"_____no_output_____"
]
],
[
[
"# step out of Python for a moment to convert the ONNX model to a TRT engine using trtexec\nif USE_FP16:\n !trtexec --onnx=resnet50_pytorch.onnx --saveEngine=resnet_engine_pytorch.trt --explicitBatch --fp16\nelse:\n !trtexec --onnx=resnet50_pytorch.onnx --saveEngine=resnet_engine_pytorch.trt --explicitBatch",
"&&&& RUNNING TensorRT.trtexec # trtexec --onnx=resnet50_pytorch.onnx --saveEngine=resnet_engine_pytorch.trt --explicitBatch --fp16\n[01/30/2021-02:11:40] [I] === Model Options ===\n[01/30/2021-02:11:40] [I] Format: ONNX\n[01/30/2021-02:11:40] [I] Model: resnet50_pytorch.onnx\n[01/30/2021-02:11:40] [I] Output:\n[01/30/2021-02:11:40] [I] === Build Options ===\n[01/30/2021-02:11:40] [I] Max batch: explicit\n[01/30/2021-02:11:40] [I] Workspace: 16 MiB\n[01/30/2021-02:11:40] [I] minTiming: 1\n[01/30/2021-02:11:40] [I] avgTiming: 8\n[01/30/2021-02:11:40] [I] Precision: FP32+FP16\n[01/30/2021-02:11:40] [I] Calibration: \n[01/30/2021-02:11:40] [I] Refit: Disabled\n[01/30/2021-02:11:40] [I] Safe mode: Disabled\n[01/30/2021-02:11:40] [I] Save engine: resnet_engine_pytorch.trt\n[01/30/2021-02:11:40] [I] Load engine: \n[01/30/2021-02:11:40] [I] Builder Cache: Enabled\n[01/30/2021-02:11:40] [I] NVTX verbosity: 0\n[01/30/2021-02:11:40] [I] Tactic sources: Using default tactic sources\n[01/30/2021-02:11:40] [I] Input(s)s format: fp32:CHW\n[01/30/2021-02:11:40] [I] Output(s)s format: fp32:CHW\n[01/30/2021-02:11:40] [I] Input build shapes: model\n[01/30/2021-02:11:40] [I] Input calibration shapes: model\n[01/30/2021-02:11:40] [I] === System Options ===\n[01/30/2021-02:11:40] [I] Device: 0\n[01/30/2021-02:11:40] [I] DLACore: \n[01/30/2021-02:11:40] [I] Plugins:\n[01/30/2021-02:11:40] [I] === Inference Options ===\n[01/30/2021-02:11:40] [I] Batch: Explicit\n[01/30/2021-02:11:40] [I] Input inference shapes: model\n[01/30/2021-02:11:40] [I] Iterations: 10\n[01/30/2021-02:11:40] [I] Duration: 3s (+ 200ms warm up)\n[01/30/2021-02:11:40] [I] Sleep time: 0ms\n[01/30/2021-02:11:40] [I] Streams: 1\n[01/30/2021-02:11:40] [I] ExposeDMA: Disabled\n[01/30/2021-02:11:40] [I] Data transfers: Enabled\n[01/30/2021-02:11:40] [I] Spin-wait: Disabled\n[01/30/2021-02:11:40] [I] Multithreading: Disabled\n[01/30/2021-02:11:40] [I] CUDA Graph: Disabled\n[01/30/2021-02:11:40] [I] Separate profiling: Disabled\n[01/30/2021-02:11:40] [I] Skip inference: Disabled\n[01/30/2021-02:11:40] [I] Inputs:\n[01/30/2021-02:11:40] [I] === Reporting Options ===\n[01/30/2021-02:11:40] [I] Verbose: Disabled\n[01/30/2021-02:11:40] [I] Averages: 10 inferences\n[01/30/2021-02:11:40] [I] Percentile: 99\n[01/30/2021-02:11:40] [I] Dump refittable layers:Disabled\n[01/30/2021-02:11:40] [I] Dump output: Disabled\n[01/30/2021-02:11:40] [I] Profile: Disabled\n[01/30/2021-02:11:40] [I] Export timing to JSON file: \n[01/30/2021-02:11:40] [I] Export output to JSON file: \n[01/30/2021-02:11:40] [I] Export profile to JSON file: \n[01/30/2021-02:11:40] [I] \n[01/30/2021-02:11:40] [I] === Device Information ===\n[01/30/2021-02:11:40] [I] Selected Device: Tesla V100-DGXS-16GB\n[01/30/2021-02:11:40] [I] Compute Capability: 7.0\n[01/30/2021-02:11:40] [I] SMs: 80\n[01/30/2021-02:11:40] [I] Compute Clock Rate: 1.53 GHz\n[01/30/2021-02:11:40] [I] Device Global Memory: 16155 MiB\n[01/30/2021-02:11:40] [I] Shared Memory per SM: 96 KiB\n[01/30/2021-02:11:40] [I] Memory Bus Width: 4096 bits (ECC enabled)\n[01/30/2021-02:11:40] [I] Memory Clock Rate: 0.877 GHz\n[01/30/2021-02:11:40] [I] \n----------------------------------------------------------------\nInput filename: resnet50_pytorch.onnx\nONNX IR version: 0.0.6\nOpset version: 9\nProducer name: pytorch\nProducer version: 1.8\nDomain: \nModel version: 0\nDoc string: \n----------------------------------------------------------------\n[01/30/2021-02:11:57] [I] [TRT] Some tactics do not have sufficient workspace memory to run. Increasing workspace size may increase performance, please check verbose output.\n[01/30/2021-02:12:38] [I] [TRT] Detected 1 inputs and 1 output network tensors.\n[01/30/2021-02:12:38] [I] Engine built in 58.341 sec.\n[01/30/2021-02:12:39] [I] Starting inference\n[01/30/2021-02:12:42] [I] Warmup completed 0 queries over 200 ms\n[01/30/2021-02:12:42] [I] Timing trace has 0 queries over 3.01474 s\n[01/30/2021-02:12:42] [I] Trace averages of 10 runs:\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.54261 ms - Host latency: 7.12485 ms (end to end 11.0132 ms, enqueue 0.490388 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.54179 ms - Host latency: 7.13058 ms (end to end 10.3497 ms, enqueue 0.473805 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.49775 ms - Host latency: 7.08051 ms (end to end 10.7457 ms, enqueue 0.473593 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.48351 ms - Host latency: 7.0662 ms (end to end 10.6246 ms, enqueue 0.477765 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.48168 ms - Host latency: 7.07059 ms (end to end 10.6304 ms, enqueue 0.511209 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.46939 ms - Host latency: 7.05303 ms (end to end 10.8701 ms, enqueue 0.476138 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.46038 ms - Host latency: 7.04825 ms (end to end 10.844 ms, enqueue 0.510028 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45701 ms - Host latency: 7.04156 ms (end to end 10.4863 ms, enqueue 0.481628 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.46315 ms - Host latency: 7.04678 ms (end to end 10.8546 ms, enqueue 0.486493 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45135 ms - Host latency: 7.03293 ms (end to end 10.8368 ms, enqueue 0.451886 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45332 ms - Host latency: 7.03381 ms (end to end 10.4568 ms, enqueue 0.448792 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.47145 ms - Host latency: 7.05214 ms (end to end 10.8879 ms, enqueue 0.450238 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.46673 ms - Host latency: 7.0481 ms (end to end 10.8669 ms, enqueue 0.458594 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.4571 ms - Host latency: 7.03876 ms (end to end 10.8491 ms, enqueue 0.45224 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.44923 ms - Host latency: 7.03019 ms (end to end 10.8304 ms, enqueue 0.456989 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45555 ms - Host latency: 7.04651 ms (end to end 10.8335 ms, enqueue 0.452844 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.43733 ms - Host latency: 7.13279 ms (end to end 10.8064 ms, enqueue 0.456653 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.4527 ms - Host latency: 7.19491 ms (end to end 10.8319 ms, enqueue 0.471423 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45814 ms - Host latency: 7.21376 ms (end to end 10.8433 ms, enqueue 0.478052 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45199 ms - Host latency: 7.20497 ms (end to end 10.8342 ms, enqueue 0.491235 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45476 ms - Host latency: 7.20305 ms (end to end 10.835 ms, enqueue 0.481665 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45189 ms - Host latency: 7.20339 ms (end to end 10.8307 ms, enqueue 0.483386 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45106 ms - Host latency: 7.19846 ms (end to end 10.8319 ms, enqueue 0.509949 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.43886 ms - Host latency: 7.18525 ms (end to end 10.7492 ms, enqueue 0.416125 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.43108 ms - Host latency: 7.16941 ms (end to end 10.8011 ms, enqueue 0.404297 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.44432 ms - Host latency: 7.19043 ms (end to end 10.82 ms, enqueue 0.413928 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.44503 ms - Host latency: 7.19503 ms (end to end 10.8254 ms, enqueue 0.411731 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45649 ms - Host latency: 7.20958 ms (end to end 10.8403 ms, enqueue 0.412561 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.44401 ms - Host latency: 7.19244 ms (end to end 10.8213 ms, enqueue 0.412598 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.44102 ms - Host latency: 7.18861 ms (end to end 10.8133 ms, enqueue 0.436987 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45006 ms - Host latency: 7.20289 ms (end to end 10.833 ms, enqueue 0.439331 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.4575 ms - Host latency: 7.20284 ms (end to end 10.8427 ms, enqueue 0.436646 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45487 ms - Host latency: 7.20845 ms (end to end 10.8413 ms, enqueue 0.439319 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.4395 ms - Host latency: 7.1884 ms (end to end 10.8078 ms, enqueue 0.43324 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.48342 ms - Host latency: 7.23835 ms (end to end 10.8946 ms, enqueue 0.439404 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45503 ms - Host latency: 7.19844 ms (end to end 10.8375 ms, enqueue 0.440918 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.43992 ms - Host latency: 7.1866 ms (end to end 10.8096 ms, enqueue 0.442407 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45095 ms - Host latency: 7.19233 ms (end to end 10.8367 ms, enqueue 0.435132 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.4522 ms - Host latency: 7.20203 ms (end to end 10.8335 ms, enqueue 0.436938 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.44805 ms - Host latency: 7.19414 ms (end to end 10.8289 ms, enqueue 0.434473 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.44307 ms - Host latency: 7.19053 ms (end to end 10.8143 ms, enqueue 0.439136 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45256 ms - Host latency: 7.20137 ms (end to end 10.8323 ms, enqueue 0.440991 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45581 ms - Host latency: 7.20334 ms (end to end 10.8416 ms, enqueue 0.464575 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.44614 ms - Host latency: 7.19006 ms (end to end 10.8206 ms, enqueue 0.4677 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45139 ms - Host latency: 7.20149 ms (end to end 10.8334 ms, enqueue 0.46626 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.44536 ms - Host latency: 7.19265 ms (end to end 10.817 ms, enqueue 0.465039 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.44517 ms - Host latency: 7.19331 ms (end to end 10.8181 ms, enqueue 0.470776 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45247 ms - Host latency: 7.20457 ms (end to end 10.8336 ms, enqueue 0.47019 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45178 ms - Host latency: 7.20559 ms (end to end 10.83 ms, enqueue 0.464844 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.44983 ms - Host latency: 7.19641 ms (end to end 10.827 ms, enqueue 0.470581 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.45164 ms - Host latency: 7.19951 ms (end to end 10.834 ms, enqueue 0.469531 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.4571 ms - Host latency: 7.20801 ms (end to end 10.8466 ms, enqueue 0.462158 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.44883 ms - Host latency: 7.20139 ms (end to end 10.8204 ms, enqueue 0.473437 ms)\n[01/30/2021-02:12:42] [I] Average on 10 runs - GPU latency: 5.44988 ms - Host latency: 7.19773 ms (end to end 10.8268 ms, enqueue 0.466772 ms)\n[01/30/2021-02:12:42] [I] Host Latency\n[01/30/2021-02:12:42] [I] min: 6.96747 ms (end to end 7.06885 ms)\n[01/30/2021-02:12:42] [I] max: 7.6875 ms (end to end 11.3145 ms)\n[01/30/2021-02:12:42] [I] mean: 7.15645 ms (end to end 10.8042 ms)\n[01/30/2021-02:12:42] [I] median: 7.18567 ms (end to end 10.8351 ms)\n[01/30/2021-02:12:42] [I] percentile: 7.24304 ms at 99% (end to end 11.0522 ms at 99%)\n[01/30/2021-02:12:42] [I] throughput: 0 qps\n[01/30/2021-02:12:42] [I] walltime: 3.01474 s\n[01/30/2021-02:12:42] [I] Enqueue Time\n[01/30/2021-02:12:42] [I] min: 0.349854 ms\n[01/30/2021-02:12:42] [I] max: 0.832886 ms\n[01/30/2021-02:12:42] [I] median: 0.460693 ms\n[01/30/2021-02:12:42] [I] GPU Compute\n[01/30/2021-02:12:42] [I] min: 5.38318 ms\n[01/30/2021-02:12:42] [I] max: 5.94751 ms\n[01/30/2021-02:12:42] [I] mean: 5.45704 ms\n[01/30/2021-02:12:42] [I] median: 5.45483 ms\n[01/30/2021-02:12:42] [I] percentile: 5.5603 ms at 99%\n[01/30/2021-02:12:42] [I] total compute time: 2.97955 s\n&&&& PASSED TensorRT.trtexec # trtexec --onnx=resnet50_pytorch.onnx --saveEngine=resnet_engine_pytorch.trt --explicitBatch --fp16\n"
]
],
[
[
"This will save our model as 'resnet_engine.trt'.",
"_____no_output_____"
],
[
"## 5. What TensorRT runtime am I targeting?\n\nNow, we have a converted our model to a TensorRT engine. Great! That means we are ready to load it into the native Python TensorRT runtime. This runtime strikes a balance between the ease of use of the high level Python APIs used in frameworks and the fast, low level C++ runtimes available in TensorRT.",
"_____no_output_____"
]
],
[
[
"import tensorrt as trt\nimport pycuda.driver as cuda\nimport pycuda.autoinit\n\nf = open(\"resnet_engine_pytorch.trt\", \"rb\")\nruntime = trt.Runtime(trt.Logger(trt.Logger.WARNING)) \n\nengine = runtime.deserialize_cuda_engine(f.read())\ncontext = engine.create_execution_context()",
"_____no_output_____"
]
],
[
[
"Now allocate input and output memory, give TRT pointers (bindings) to it:",
"_____no_output_____"
]
],
[
[
"# need to set input and output precisions to FP16 to fully enable it\noutput = np.empty([BATCH_SIZE, 1000], dtype = target_dtype) \n\n# allocate device memory\nd_input = cuda.mem_alloc(1 * dummy_input_batch.nbytes)\nd_output = cuda.mem_alloc(1 * output.nbytes)\n\nbindings = [int(d_input), int(d_output)]\n\nstream = cuda.Stream()",
"_____no_output_____"
]
],
[
[
"Next, set up the prediction function.\n\nThis involves a copy from CPU RAM to GPU VRAM, executing the model, then copying the results back from GPU VRAM to CPU RAM:",
"_____no_output_____"
]
],
[
[
"def predict(batch): # result gets copied into output\n # transfer input data to device\n cuda.memcpy_htod_async(d_input, batch, stream)\n # execute model\n context.execute_async_v2(bindings, stream.handle, None)\n # transfer predictions back\n cuda.memcpy_dtoh_async(output, d_output, stream)\n # syncronize threads\n stream.synchronize()\n \n return output",
"_____no_output_____"
]
],
[
[
"Finally, let's time the function!\n\nNote that we're going to include the extra CPU-GPU copy time in this evaluation, so it won't be directly comparable with our TRTorch model performance as it also includes additional overhead.",
"_____no_output_____"
]
],
[
[
"print(\"Warming up...\")\n\npredict(dummy_input_batch)\n\nprint(\"Done warming up!\")",
"Warming up...\nDone warming up!\n"
],
[
"%%timeit\n\npred = predict(dummy_input_batch)",
"7.15 ms ± 4.73 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
]
],
[
[
"However, even with the CPU-GPU copy, this is still faster than our raw PyTorch model!",
"_____no_output_____"
],
[
"## Next Steps:",
"_____no_output_____"
],
[
"<h4> Profiling </h4>\n\nThis is a great next step for further optimizing and debugging models you are working on productionizing\n\nYou can find it here: https://docs.nvidia.com/deeplearning/tensorrt/best-practices/index.html\n\n<h4> TRT Dev Docs </h4>\n\nMain documentation page for the ONNX, layer builder, C++, and legacy APIs\n\nYou can find it here: https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html\n\n<h4> TRT OSS GitHub </h4>\n\nContains OSS TRT components, sample applications, and plugin examples\n\nYou can find it here: https://github.com/NVIDIA/TensorRT\n\n\n#### TRT Supported Layers:\n\nhttps://github.com/NVIDIA/TensorRT/tree/master/samples/opensource/samplePlugin\n\n#### TRT ONNX Plugin Example:\n\nhttps://docs.nvidia.com/deeplearning/tensorrt/support-matrix/index.html#layers-precision-matrix",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
]
|
cb757c709c99c883ebf9a31153648ec4c32a19e2 | 127,904 | ipynb | Jupyter Notebook | community/quantiles-w-custom-loss-func.ipynb | chrisgschon/M5Forecasting-Uncertainty | 4e009b2ea67a5f7f7564d985fb8be09e35eb7d4d | [
"MIT"
]
| null | null | null | community/quantiles-w-custom-loss-func.ipynb | chrisgschon/M5Forecasting-Uncertainty | 4e009b2ea67a5f7f7564d985fb8be09e35eb7d4d | [
"MIT"
]
| null | null | null | community/quantiles-w-custom-loss-func.ipynb | chrisgschon/M5Forecasting-Uncertainty | 4e009b2ea67a5f7f7564d985fb8be09e35eb7d4d | [
"MIT"
]
| null | null | null | 34.116831 | 1,739 | 0.551437 | [
[
[
"Added custom loss function base on @kyakvolev 's work. Credit to the author.",
"_____no_output_____"
],
[
"The forum post is here: https://www.kaggle.com/c/m5-forecasting-uncertainty/discussion/139515",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm.auto import tqdm as tqdm\n\nfrom ipywidgets import widgets, interactive, interact\nimport ipywidgets as widgets\nfrom IPython.display import display\n\nimport os\nfor dirname, _, filenames in os.walk('data/raw'):\n for filename in filenames:\n print(os.path.join(dirname, filename))",
"data/raw/calendar.csv\ndata/raw/sell_prices.csv\ndata/raw/sales_train_validation.csv\ndata/raw/sample_submission.csv\n"
]
],
[
[
"## Reading data",
"_____no_output_____"
]
],
[
[
"train_sales = pd.read_csv('data/raw/sales_train_validation.csv')\ncalendar_df = pd.read_csv('data/raw/calendar.csv')\nsubmission_file = pd.read_csv('data/raw/sales_train_validation.csv')\nsell_prices = pd.read_csv('data/raw/sample_submission.csv')",
"_____no_output_____"
]
],
[
[
"## Variables to help with aggregation",
"_____no_output_____"
]
],
[
[
"total = ['Total']\ntrain_sales['Total'] = 'Total'\ntrain_sales['state_cat'] = train_sales.state_id + \"_\" + train_sales.cat_id\ntrain_sales['state_dept'] = train_sales.state_id + \"_\" + train_sales.dept_id\ntrain_sales['store_cat'] = train_sales.store_id + \"_\" + train_sales.cat_id\ntrain_sales['store_dept'] = train_sales.store_id + \"_\" + train_sales.dept_id\ntrain_sales['state_item'] = train_sales.state_id + \"_\" + train_sales.item_id\ntrain_sales['item_store'] = train_sales.item_id + \"_\" + train_sales.store_id",
"_____no_output_____"
],
[
"val_eval = ['validation', 'evaluation']\n\n# creating lists for different aggregation levels\ntotal = ['Total']\nstates = ['CA', 'TX', 'WI']\nnum_stores = [('CA',4), ('TX',3), ('WI',3)]\nstores = [x[0] + \"_\" + str(y + 1) for x in num_stores for y in range(x[1])]\ncats = ['FOODS', 'HOBBIES', 'HOUSEHOLD']\nnum_depts = [('FOODS',3), ('HOBBIES',2), ('HOUSEHOLD',2)]\ndepts = [x[0] + \"_\" + str(y + 1) for x in num_depts for y in range(x[1])]\nstate_cats = [state + \"_\" + cat for state in states for cat in cats]\nstate_depts = [state + \"_\" + dept for state in states for dept in depts]\nstore_cats = [store + \"_\" + cat for store in stores for cat in cats]\nstore_depts = [store + \"_\" + dept for store in stores for dept in depts]\nprods = list(train_sales.item_id.unique())\nprod_state = [prod + \"_\" + state for prod in prods for state in states]\nprod_store = [prod + \"_\" + store for prod in prods for store in stores]",
"_____no_output_____"
],
[
"print(\"Departments: \", depts)\nprint(\"Categories by state: \", state_cats)",
"Departments: ['FOODS_1', 'FOODS_2', 'FOODS_3', 'HOBBIES_1', 'HOBBIES_2', 'HOUSEHOLD_1', 'HOUSEHOLD_2']\nCategories by state: ['CA_FOODS', 'CA_HOBBIES', 'CA_HOUSEHOLD', 'TX_FOODS', 'TX_HOBBIES', 'TX_HOUSEHOLD', 'WI_FOODS', 'WI_HOBBIES', 'WI_HOUSEHOLD']\n"
],
[
"quants = ['0.005', '0.025', '0.165', '0.250', '0.500', '0.750', '0.835', '0.975', '0.995']\ndays = range(1, 1913 + 1)\ntime_series_columns = [f'd_{i}' for i in days]",
"_____no_output_____"
]
],
[
[
"## Getting aggregated sales",
"_____no_output_____"
]
],
[
[
"def CreateSales(name_list, group):\n '''\n This function returns a dataframe (sales) on the aggregation level given by name list and group\n '''\n rows_ve = [(name + \"_X_\" + str(q) + \"_\" + ve, str(q)) for name in name_list for q in quants for ve in val_eval]\n sales = train_sales.groupby(group)[time_series_columns].sum() #would not be necessary for lowest level\n return sales",
"_____no_output_____"
],
[
"total = ['Total']\ntrain_sales['Total'] = 'Total'\ntrain_sales['state_cat'] = train_sales.state_id + \"_\" + train_sales.cat_id\ntrain_sales['state_dept'] = train_sales.state_id + \"_\" + train_sales.dept_id\ntrain_sales['store_cat'] = train_sales.store_id + \"_\" + train_sales.cat_id\ntrain_sales['store_dept'] = train_sales.store_id + \"_\" + train_sales.dept_id\ntrain_sales['state_item'] = train_sales.state_id + \"_\" + train_sales.item_id\ntrain_sales['item_store'] = train_sales.item_id + \"_\" + train_sales.store_id",
"_____no_output_____"
],
[
"#example usage of CreateSales\nsales_by_state_cats = CreateSales(state_cats, 'state_cat')\nsales_by_state_cats",
"_____no_output_____"
]
],
[
[
"## Getting quantiles adjusted by day-of-week",
"_____no_output_____"
]
],
[
[
"def CreateQuantileDict(name_list = stores, group = 'store_id' ,X = False):\n '''\n This function writes creates sales data on given aggregation level, and then writes predictions to the global dictionary my_dict\n '''\n sales = CreateSales(name_list, group)\n sales = sales.iloc[:, 2:] #starting from d_3 because it is a monday, needed to make daily_factors work\n sales_quants = pd.DataFrame(index = sales.index)\n for q in quants:\n sales_quants[q] = np.quantile(sales, float(q), axis = 1)\n full_mean = pd.DataFrame(np.mean(sales, axis = 1))\n daily_means = pd.DataFrame(index = sales.index)\n for i in range(7):\n daily_means[str(i)] = np.mean(sales.iloc[:, i::7], axis = 1)\n daily_factors = daily_means / np.array(full_mean)\n\n daily_factors = pd.concat([daily_factors, daily_factors, daily_factors, daily_factors], axis = 1)\n daily_factors_np = np.array(daily_factors)\n\n factor_df = pd.DataFrame(daily_factors_np, columns = submission_file.columns[1:])\n factor_df.index = daily_factors.index\n\n for i,x in enumerate(tqdm(sales_quants.index)):\n for q in quants:\n v = sales_quants.loc[x, q] * np.array(factor_df.loc[x, :])\n if X:\n my_dict[x + \"_X_\" + q + \"_validation\"] = v\n my_dict[x + \"_X_\" + q + \"_evaluation\"] = v\n else:\n my_dict[x + \"_\" + q + \"_validation\"] = v\n my_dict[x + \"_\" + q + \"_evaluation\"] = v",
"_____no_output_____"
],
[
"my_dict = {}\n#adding prediction to my_dict on all 12 aggregation levels\nCreateQuantileDict(total, 'Total', X=True) #1\nCreateQuantileDict(states, 'state_id', X=True) #2\nCreateQuantileDict(stores, 'store_id', X=True) #3\nCreateQuantileDict(cats, 'cat_id', X=True) #4\nCreateQuantileDict(depts, 'dept_id', X=True) #5\nCreateQuantileDict(state_cats, 'state_cat') #6\nCreateQuantileDict(state_depts, 'state_dept') #7\nCreateQuantileDict(store_cats, 'store_cat') #8\nCreateQuantileDict(store_depts, 'store_dept') #9\nCreateQuantileDict(prods, 'item_id', X=True) #10\nCreateQuantileDict(prod_state, 'state_item') #11\nCreateQuantileDict(prod_store, 'item_store') #12",
"_____no_output_____"
],
[
"total",
"_____no_output_____"
]
],
[
[
"## Creating valid prediction df from my_dict",
"_____no_output_____"
]
],
[
[
"pred_df = pd.DataFrame(my_dict)\npred_df = pred_df.transpose()\npred_df_reset = pred_df.reset_index()\nfinal_pred = pd.merge(pd.DataFrame(submission_file.id), pred_df_reset, left_on = 'id', right_on = 'index')\ndel final_pred['index']\nfinal_pred = final_pred.rename(columns={0: 'F1', 1: 'F2', 2: 'F3', 3: 'F4', 4: 'F5', 5: 'F6', 6: 'F7', 7: 'F8', 8: 'F9',\n 9: 'F10', 10: 'F11', 11: 'F12', 12: 'F13', 13: 'F14', 14: 'F15', 15: 'F16',\n 16: 'F17', 17: 'F18', 18: 'F19', 19: 'F20', 20: 'F21', 21: 'F22', \n 22: 'F23', 23: 'F24', 24: 'F25', 25: 'F26', 26: 'F27', 27: 'F28'})\nfinal_pred = final_pred.fillna(0)",
"_____no_output_____"
],
[
"for i in range(1,29):\n final_pred['F'+str(i)] *= 1.170\nfinal_pred.to_csv('return_of_the_blend.csv', index=False)",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
]
|
cb75902e0aa3170cb02b2984725a5f061cd45dca | 1,397 | ipynb | Jupyter Notebook | src/pipeline/FirebaseTest.ipynb | HalmonLui/hack-the-north-2019 | 0f44774b3404c651cc2a1667d2d8cafaba56587b | [
"MIT"
]
| null | null | null | src/pipeline/FirebaseTest.ipynb | HalmonLui/hack-the-north-2019 | 0f44774b3404c651cc2a1667d2d8cafaba56587b | [
"MIT"
]
| null | null | null | src/pipeline/FirebaseTest.ipynb | HalmonLui/hack-the-north-2019 | 0f44774b3404c651cc2a1667d2d8cafaba56587b | [
"MIT"
]
| 1 | 2020-04-24T23:33:18.000Z | 2020-04-24T23:33:18.000Z | 21.166667 | 82 | 0.549034 | [
[
[
"import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\n# Use a service account\ncred = credentials.Certificate('./serviceAccount.json')\nfirebase_admin.initialize_app(cred)\n\ndb = firestore.client()",
"_____no_output_____"
],
[
"doc_ref = db.collection(u'users')\ndoc_ref.add({u'name': u'test', u'added': u'just now'})",
"_____no_output_____"
]
]
]
| [
"code"
]
| [
[
"code",
"code"
]
]
|
cb7591287212c604e56b592cf6d8d663bdece595 | 212,839 | ipynb | Jupyter Notebook | notebooks/nflDataOrg.ipynb | arw13/Fantasy-Football-Analysis | 17a502066c1a468ce421f913564613cd806f51f5 | [
"MIT"
]
| null | null | null | notebooks/nflDataOrg.ipynb | arw13/Fantasy-Football-Analysis | 17a502066c1a468ce421f913564613cd806f51f5 | [
"MIT"
]
| null | null | null | notebooks/nflDataOrg.ipynb | arw13/Fantasy-Football-Analysis | 17a502066c1a468ce421f913564613cd806f51f5 | [
"MIT"
]
| null | null | null | 36.049966 | 124 | 0.234393 | [
[
[
"# Analysis of NFL csv data for analysis",
"_____no_output_____"
]
],
[
[
"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom math import pi\n# import seaborn as sns\n# import matplotlib as plt",
"_____no_output_____"
],
[
"data_dir = '../seasonData/'",
"_____no_output_____"
]
],
[
[
"use 2009 data as a test set",
"_____no_output_____"
]
],
[
[
"df_2009 = pd.read_csv(data_dir+'season2009.csv')\ndf_2009.head()",
"_____no_output_____"
]
],
[
[
"remove defensive players",
"_____no_output_____"
]
],
[
[
"df_2009 = df_2009[df_2009.defense_ast.isnull()]\n# df_2009.head()",
"_____no_output_____"
]
],
[
[
"Remove punters",
"_____no_output_____"
]
],
[
[
"df_2009 = df_2009[df_2009.punting_avg.isnull()]",
"_____no_output_____"
]
],
[
[
"remove kicking stats",
"_____no_output_____"
]
],
[
[
"df_2009 = df_2009[df_2009.kicking_xpb.isnull()]",
"_____no_output_____"
]
],
[
[
"remove defensive columns",
"_____no_output_____"
]
],
[
[
"list(df_2009)",
"_____no_output_____"
],
[
"df_2009 = df_2009.fillna(0)",
"_____no_output_____"
]
],
[
[
"Decided to remove kicking because it could confuse the model?",
"_____no_output_____"
]
],
[
[
"df_2009 = df_2009.drop(labels=['home', 'pos','defense_ast', 'defense_ffum', 'defense_int', 'defense_sk', \n 'defense_tkl','punting_avg', 'punting_i20', 'punting_lng', 'punting_pts', \n 'punting_yds', 'puntret_avg', 'puntret_lng', 'puntret_ret','kickret_avg', \n 'kickret_lng', 'kickret_ret', 'rushing_twopta', 'receiving_twopta','kicking_fga',\n 'kicking_fgm', 'kicking_fgyds', 'kicking_totpfg', 'kicking_xpa', 'kicking_xpb',\n 'kicking_xpmade', 'kicking_xpmissed', 'kicking_xptot'], axis =1)\n",
"_____no_output_____"
]
],
[
[
"One hot encoding for players team",
"_____no_output_____"
]
],
[
[
"df_2009 = pd.get_dummies(df_2009, columns=[\"team\"])",
"_____no_output_____"
],
[
"df_2009.shape",
"_____no_output_____"
]
],
[
[
"Calculate score for each year",
"_____no_output_____"
]
],
[
[
"### Column names used for scoring \n\n'fumbles_lost', 'fumbles_rcv', 'fumbles_tot', 'fumbles_trcv', 'fumbles_yds', 'kicking_fga', 'kicking_fgm', \n 'kicking_fgyds', 'kicking_totpfg', 'kicking_xpa', 'kicking_xpb', 'kicking_xpmade', 'kicking_xpmissed',\n 'kicking_xptot', 'kickret_tds', 'passing_att', 'passing_cmp', 'passing_ints', 'passing_tds',\n 'passing_twopta','passing_twoptm', 'passing_yds', 'puntret_tds', 'receiving_lng', 'receiving_rec', \n 'receiving_tds', 'receiving_twopta', 'receiving_twoptm', 'receiving_yds', 'rushing_att', 'rushing_lng',\n 'rushing_tds','rushing_twopta', 'rushing_twoptm', 'rushing_yds'",
"_____no_output_____"
]
],
[
[
"df_2009.fillna(0)",
"_____no_output_____"
],
[
"# passing yds\npoints = df_2009['passing_yds'].apply(lambda x: x/25)\n# passing TDs\npoints = points + df_2009['passing_tds'].apply(lambda x: x*4)\n# interceptions\npoints = points + df_2009['passing_ints'].apply(lambda x: x*-1)\n# rushing yards\npoints = points + df_2009['rushing_yds'].apply(lambda x: x/10)\n# rushing TDs\npoints = points + df_2009['rushing_tds'].apply(lambda x: x*6)\n# receiving yards\npoints = points + df_2009['receiving_yds'].apply(lambda x: x/10)\n# receiving tds\npoints = points + df_2009['receiving_tds'].apply(lambda x: x*6)\n# return tds\npoints = points + df_2009['kickret_tds'].apply(lambda x: x*10) + df_2009['puntret_tds'].apply(lambda x: x*10)\n# 2 pt convs\npoints = points + df_2009['receiving_twoptm'].apply(lambda x: x*2) + df_2009['rushing_twoptm'].apply(lambda x: x*2)\n# Fumbles lost \npoints = points + df_2009['fumbles_lost'].apply(lambda x: x*-2)",
"_____no_output_____"
],
[
"df_2009['FtsyPts'] = points",
"_____no_output_____"
],
[
"df_2009.sort_values(by='id')\n",
"_____no_output_____"
],
[
"df_2010 = pd.read_csv('../sortedSeasonData/sortedSeason2010.csv')",
"_____no_output_____"
]
],
[
[
"Sort tags and see if they match",
"_____no_output_____"
]
],
[
[
"df_2010.sort_values(by='id')",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"raw"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
]
|
cb75a2e0d6e9838de4c09f26e41c14c6467d0885 | 24,069 | ipynb | Jupyter Notebook | Week_3/week_3.ipynb | MeganStalker/Advanced_Practical_Chemistry_Year_3 | f4dc048890d59822d1f4041c73e7dcb4aaadb5e7 | [
"MIT"
]
| null | null | null | Week_3/week_3.ipynb | MeganStalker/Advanced_Practical_Chemistry_Year_3 | f4dc048890d59822d1f4041c73e7dcb4aaadb5e7 | [
"MIT"
]
| null | null | null | Week_3/week_3.ipynb | MeganStalker/Advanced_Practical_Chemistry_Year_3 | f4dc048890d59822d1f4041c73e7dcb4aaadb5e7 | [
"MIT"
]
| 1 | 2020-10-08T09:54:46.000Z | 2020-10-08T09:54:46.000Z | 42.827402 | 461 | 0.636877 | [
[
[
"# Week 3 \n## Introduction to Solid State ",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport subprocess\nfrom polypy.read import History\nfrom polypy.msd import MSD\nfrom polypy import plotting\n\ndef get_diffusion(file, atom):\n \n with open(file) as f:\n y = False\n for line in f:\n if str(\"atom D \") in line:\n y = True\n if y == True and str(atom) in line:\n d = line.split()\n break\n return d",
"_____no_output_____"
]
],
[
[
"# Background#\n\n\nNow that you are familiar with molecular dynamics, you are now going to use it to tackle some real world problems. \n\nThe transport properties of a material determine many properties that are utilised for modern technological applications. For example, solid oxide fuel cell (SOFCs), which are an alternative to batteries, materials are dependent on the movement of charge carriers through the solid electrolyte. Another example are nuclear fuel materials which oxidise and fall apart - this corrosive behaviour is dependent on the diffusion of oxygen into the lattice. \n\nDue to the importance of the transport properties of these materials, scientists and engineers spend large amounts of their time tring to optomise these properties using different stoichiometries, introducing defects and by using different syntheisis techniques.",
"_____no_output_____"
],
[
"# Aim and Objectives #\n\nThe **Aim** of the next **five weeks** is to **investigate** the transport properties of a simple fluorite material - CaF$_2$. \n\nThe **first objective** is to **investigate** how the transport properties of CaF$_2$ are affected by temperature\n\nThe **second objective** is to **investigate** how the transport properties of CaF$_2$ are affected by structural defects (Schottky and Frenkel)\n\nThe **third objective** is to **investigate** how the transport properties of CaF$_2$ are affected by chemcial dopants (e.g. different cations)\n\nA rough breakdown looks as follows:\n\n**Week 3** \n- Molecular dynamics simulations of stoichiomteric CaF$_2$\n\n\n**Week 4**\n- Molecular dynamics simulations of CaF$_2$ containing Schottky defects \n\n\n**Week 5**\n- Molecular dynamics simulations of CaF$_2$ containing Frenkel defects \n\n\n**Week 6**\n- Molecular dynamics simulations of CaF$_2$ containing various dopants \n\n\n**Week 7**\n- Molecular dynamics simulations of CaF$_2$ containing various dopants \n \n\nBy these **five weeks** you will be able to:\n\n- **Perform** molecular dynamics simulations at different temperatures\n- **Manipulate** the input files\n- **Adjust** the ensemble for the simulation\n- **Examine** the volume and energy of different simulations\n- **Apply** VMD to visualize the simulation cell and evaluate radial distribution - coefficients\n\nThe **Aim** of this **week** (week 3) is to **investigate** the temperature-dependence of the transport properties of a simple fluorite material CaF$_2$ using molecular dynamics (MD). \n\nThe **first objective** is to **familiarise** yourself with the molecular simulation software package <code>DL_POLY</code>\n\nThe **second objective** is to **complete** a tutorial which demonstrates how to calculate diffusion coefficients\n\nThe **third objective** is to is to **complete** a tutorial which demonstrates how to **calculate** the activation energy barrier of F diffusion ",
"_____no_output_____"
],
[
"## Introduction to DL_POLY\n\n<code>DL_POLY</code> is a molecular dynamics (MD) program maintained by Daresbury laboratories. In contrast to <code>pylj</code>, <code>DL_POLY</code> is a three-dimensional MD code that is used worldwide by computational scientists for molecular simulation, but it should be noted that the theory is exactly the same and any understanding gained from <code>pylj</code> is completely applicable to <code>DL_POLY</code>. \n\nFor the next five weeks you will use <code>DL_POLY</code> to run short MD simulations on CaF$_2$. You first need to understand the input files required for <code>DL_POLY</code>.\n\n<code>**CONTROL**</code>\n\nThis is the file that contains all of the simulation parameters, e.g. simulation temperature, pressure, number of steps etc.\n\n<code>**CONFIG**</code>\n\nThis is the file that contains the structure - i.e. the atomic coordinates of each atom.\n\n<code>**FIELD**</code> \n\nThis is the file that contains the force field or potential model e.g. Lennard-Jones. ",
"_____no_output_____"
],
[
"# Exercise 1: Setting Up an MD Simulation#\n\nFirst, we will use <code>METADISE</code> to produce <code>DL_POLY</code> input files.\n\nContained within the folder <code>Input/</code> you will find a file called <code>input.txt</code>. \n\nThis is the main file that you will interact with over the next five weeks and is the input file for <code>METADISE</code> which generates the 3 <code>DL_POLY</code> input files: <code>FIELD</code>, <code>CONTROL</code> and <code>CONFIG</code>. \n\nEssentially it is easier to meddle with <code>input.txt</code> than it is to meddle with the 3 <code>DL_POLY</code> files everytime you want to change something. \n\nTo run <code>METADISE</code> we will use the <code>subprocess</code> <code>python</code> module. \n\nTo use <code>subprocess</code> - specify what program you want to run and the file that you want to run it in, you will need to ensure the file path is correct. \n\nTo **generate** the 3 <code>DL_POLY</code> input files: <code>FIELD</code>, <code>CONTROL</code> and <code>CONFIG</code>, **run** the cell below:\n\n#### It is essential that the codes that were downloaded from [here](https://people.bath.ac.uk/chsscp/teach/adv.bho/progs.zip) are in the Codes/ folder in the parent directory, or this following cell will crash. ",
"_____no_output_____"
]
],
[
[
"subprocess.call('../Codes/metadise.exe', cwd='Input/')\nos.rename('Input/control_o0001.dlp', 'Input/CONTROL')\nos.rename('Input/config__o0001.dlp', 'Input/CONFIG')\nos.rename('Input/field___o0001.dlp', 'Input/FIELD')",
"_____no_output_____"
]
],
[
[
"Now you should have a <code>CONFIG</code>, <code>CONTROL</code> and <code>FIELD</code> file within the <code>Input/</code> directory. \n\nIn theory you could just call the <code>DL_POLY</code> program in this directory and your simulation would run. \n\nHowever, we need to tweak the <code>CONTROL</code> file in order to set up our desired simulation. \n\n1. **Make** a new subdirectory in the <code>week 3</code> directory named <code>\"Example/\"</code> and copy <code>CONFIG</code>, <code>CONTROL</code> and <code>FIELD</code> to that subdirectory. \n\n2. Now **edit** the <code>CONTROL</code> file to change the following: \n\n<code>Temperature 300 ---> Temperature 1500 \nSteps 5001 ---> Steps 40000 \nensemble nve ---> ensemble npt hoover 0.1 0.5 \ntrajectory nstraj= 1 istraj= 250 keytrj=0 ---> trajectory nstraj= 0 istraj= 100 keytrj=0</code> \n\n3.Now your simulation is ready, **check** the structure before you run the simulation. You can view the <code>CONFIG</code> file in three dimensions using the VESTA program \n\nIt is always good to **check** your structure before (<code>CONFIG</code>) and after (<code>REVCON</code>) the simulation. \nYou can view the <code>CONFIG</code> and <code>REVCON</code> files in three dimensions using the <code>VESTA</code> program. <code>VESTA</code> can generate nice pictures which will look very good in a lab report. \n\n<center>\n <br>\n <img src=\"./figures/vesta.png\\\" width=\\\"400px\\\">\n <i>Figure 1. Fluorite CaF$_2$ unit cell visualised in VESTA.</i>\n <br>\n</center>",
"_____no_output_____"
],
[
"# Exercise 2: Running an MD Simulation\n\nNow we have <code>DL_POLY</code> input files, we will run an MD simulation using <code>DL_POLY</code>.\n\n1. **Run** <code>DL_POLY</code> from within the notebook use the command below \n\nKeep in mind that this simulation will take 20 or so minutes so be patient. \n\nIf you are not comfortable with running things through this notebook then you can copy and paste the <code>dlpoly_classic.exe</code> executable into the Example/ sub directory and then **double click** the <code>.exe</code> file",
"_____no_output_____"
]
],
[
[
"subprocess.call('../Codes/dlpoly_classic.exe', cwd='Example/')",
"_____no_output_____"
]
],
[
[
"# Exercise 3: Inspecting an MD Simulation\n\nNow we have run an MD simulation using <code>DL_POLY</code> we can analyse the data using the <code>VESTA</code>\n\n\nOnce <code>DL_POLY</code> has completed you will find several files relating to your simulaton. \n\n<code> **HISTORY** </code>\n\nThis file contains the configuration of your system at each step during the simulation, known as a _trajectory_. You can view this as a movie using <code>VMD</code> \n\n<code> **STATIS** </code>\n\nContains the statistics at each step of the simulation.\n\n<code> **OUTPUT** </code>\n\nContains various properties of the simulation. \n\n<code> **REVCON** </code> \n\nThis is the configuration at the end of the simulation. Can be viewed in <code>VESTA</code>. **Check** to see how it has changed, compare it to the <code>CONFIG</code> file. \n",
"_____no_output_____"
],
[
"# Exercise 4: Analysing the Diffusion Properties\n\n\nNow we have inspected the final structure from the simulation, we can calculate the diffusion coefficient.\n\n\n## Mean Squared Displacements - Calculating Diffusion Coefficients\n\nAs we have seen molecules in liquds, gases and solids do not stay in the same place and move constantly. Think about a drop of dye in a glass of water, as time passes the dye distributes throughout the water. This process is called diffusion and is common throughout nature. \n\nUsing the dye as an example, the motion of a dye molecule is not simple. As it moves it is jostled by collisions with other molecules, preventing it from moving in a straight path. If the path is examined in close detail, it will be seen to be a good approximation to a _random walk_. \n\nIn mathmatics, a random walk is a series of steps, each taken in a random direction. This was analysed by Albert Einstein in a study of _Brownian motion_ and he showed that the mean square of the distance travelled by a particle following a random walk is proportional to the time elapsed, as given by: \n\\begin{align}\n\\Big \\langle r^2 \\big \\rangle & = 6 D_t + C \n\\end{align}\n\nwhere $\\Big \\langle r^2 \\big \\rangle$ is the mean squared distance, t is time, D is the diffusion rate and C is a constant. \n\n## What is the Mean Squared Displacement?\n\nGoing back to the example of the dye in water, lets assume for the sake of simplicity that we are in one dimension. Each step can either be forwards or backwards and we cannot predict which.\nFrom a given starting position, what distance is our dye molecule likely to travel after 1000 steps? This can be determined simply by adding together the steps, taking into account the fact that steps backwards subtract from the total, while steps forward add to the total. Since both forward and backward steps are equally probable, we come to the surprising conclusion that the probable distance travelled sums up to zero.\n\nBy adding the square of the distance we will always be adding positive numbers to our total which now increases linearly with time. Based upon equation 1 it should now be clear that a plot of $\\Big \\langle r^2 \\big \\rangle$ vs time with produce a line, the gradient of which is equal to 6D. Giving us direct access to the diffusion coefficient of the system. \n\nLets try explore this with an example. \n\n1. **Run** a short <code>DL_POLY</code> simulation on the input files provided.",
"_____no_output_____"
],
[
"You will run a small MSD program called <code>MSD.py</code> to analyse your simulation results.\n\nFirst you need to **read** in the data. The <code>HISTORY</code> file contains a list of the atomic coordiantes held by the atoms during the simulation. \n\n2.**Run** the cell below to read the <code>HISTORY</code> file into the <code>Jupyter Notebook</code>",
"_____no_output_____"
]
],
[
[
"## Provide the path to the simulation and the atom that you want data for.\ndata = History(\"Example/HISTORY\", \"F\")",
"_____no_output_____"
]
],
[
[
"<code>data</code> is a class object containing information about the trajectory. \nMore information can be found here https://polypy.readthedocs.io/en/latest/reading_data.html and here https://github.com/symmy596/Polypy/blob/master/polypy/read.py .\n\nThe next step is to calculate the MSD. \n\n3.**Run** the cell below to calculate the MSD of the chosen atom throughout the course of the simulation",
"_____no_output_____"
]
],
[
[
"# Run the MSD calculation\n\nf_msd = MSD(data.trajectory, sweeps=2)\n\noutput = f_msd.msd()",
"_____no_output_____"
]
],
[
[
"The MSD calculation function returns an object with imformation about the MSD calculation. \n\nMore information and a full tutorial on this functionality can be found here https://polypy.readthedocs.io/en/latest/msd_tutorial.html\n\n4.**Run** the cell below to give plots of the MSD which have a nice linear relationship. ",
"_____no_output_____"
]
],
[
[
"ax = plotting.msd_plot(output)\nplt.show()",
"_____no_output_____"
],
[
"print(\"Three Dimensional Diffusion Coefficient\", output.xyz_diffusion_coefficient())\nprint(\"One Dimensional Diffusion Coefficient in X\", output.x_diffusion_coefficient())\nprint(\"One Dimensional Diffusion Coefficient in Y\", output.y_diffusion_coefficient())\nprint(\"One Dimensional Diffusion Coefficient in Z\", output.z_diffusion_coefficient())",
"_____no_output_____"
]
],
[
[
"# Exercise 5: The Effect of Simulation Length\n\nNow we have calculated the diffusion coefficient, we can investigate the influence of simulation length on the diffusion coefficient.\n\nIt is important to consider the length of your simulation (the number of steps). \n\n1. **Create** a new folder called <code>\"Example_2/\"</code>\n2. **Copy** the <code>CONFIG</code>, <code>FIELD</code> and <code>CONTROL</code> files from your previous simulation \n3. **Change** the number of steps to 10000\n4. **Rerun** the simulation by **running** the cell below",
"_____no_output_____"
]
],
[
[
"subprocess.call('../Codes/dlpoly_classic.exe', cwd='Example_2/')",
"_____no_output_____"
]
],
[
[
"5.**Run** the cell below to calculate and plot the MSD of the chosen atom throughout the course of the simulation",
"_____no_output_____"
]
],
[
[
"data = History(\"Example_2/HISTORY\", \"F\")\n\n# Run the MSD calculation\nf_msd = MSD(data.trajectory, sweeps=2)\n\noutput = f_msd.msd()\nax = plotting.msd_plot(output)\nplt.show()",
"_____no_output_____"
],
[
"print(\"Three Dimensional Diffusion Coefficient\", output.xyz_diffusion_coefficient())\nprint(\"One Dimensional Diffusion Coefficient in X\", output.x_diffusion_coefficient())\nprint(\"One Dimensional Diffusion Coefficient in Y\", output.y_diffusion_coefficient())\nprint(\"One Dimensional Diffusion Coefficient in Z\", output.z_diffusion_coefficient())",
"_____no_output_____"
]
],
[
[
"You will hopefully see that your MSD plot has become considerably less linear. This shows that your simulation has not run long enough and your results will be unrealiable. \n\nYou will hopefully also see a change to the value of your diffusion coefficient. \n**The length of your simulation is something that you should keep in mind for the next 5 weeks.** ",
"_____no_output_____"
],
[
"# Exercise 6: Calculating the Activation Energy",
"_____no_output_____"
],
[
"Now we have investigated the influence of simulation length on the diffusion coefficient, we can calculate the activation energy for F diffusion by applying the Arrhenius equation. \n\nTo apply the Arrhensius equation, diffusion coefficients from a range of temperatures are required. \n\nCommon sense and chemical intuition suggest that the higher the temperature, the faster a given chemical reaction will proceed. Quantitatively, this relationship between the rate a reaction proceeds and the temperature is determined by the Arrhenius Equation. \n\nAt higher temperatures, the probability that two molecules will collide is higher. This higher collision rate results in a higher kinetic energy, which has an effect on the activation energy of the reaction. The activation energy is the amount of energy required to ensure that a reaction happens. \n \n\\begin{align}\nk = A * e^{(-Ea / RT)}\n\\end{align}\n \nwhere k is the rate coefficient, A is a constant, Ea is the activation energy, R is the universal gas constant, and T is the temperature (in kelvin).",
"_____no_output_____"
],
[
"# Exercise 7: Putting it All Together\n\n\nUsing what you have learned through the tutorials above, your task this week is to calculate the activation energy of F diffusion in CaF$_2$. \n\n1. You will need to **select** a temperature range and carry out simulations at different temperatures within that range. \n\n#### Questions to answer:\n\n- In what temperature range is CaF$_2$ completely solid i.e. no diffusion?\n- In what range is fluorine essentially liquid i.e. fluorine diffusion with no calcium diffusion?\n- What is the melting temperature of CaF$_2$?\n- Plot an Arrhenius plot and determine the activation energies in temperature range - You will need to rearange the equation. \n\n\nYou are encouraged to split the work up within your group and to learn how to view the simulation \"movie\" using VMD (Ask a demonstrator). VMD is a fantastic program that allows you to visualise your simulation, included below is a video showing a short snippet of an MD simulation of CaF$_2$. A single F atom has been highlighted to show that diffusion is occuring. ",
"_____no_output_____"
]
],
[
[
"%%HTML\n<div align=\"middle\">\n<video width=\"80%\" controls>\n <source src=\"./figures/VMD_example.mp4\" type=\"video/mp4\">\n</video></div>",
"_____no_output_____"
]
],
[
[
"Furthermore, VMD can also be used to generate images showing the entire trajectory of the simulation, e.g.\n\n\n<center>\n <br>\n <img src=\"./figures/CaF2.png\\\" width=\\\"400px\\\">\n <i>Figure 2. A figure showing all positions occupied by F during an MD simulation at 1500 K. F positions are shown in orange and Ca atoms are shown in green.</i>\n <br>\n</center>\n ",
"_____no_output_____"
],
[
"To save you time you can use the function declared at the start of this notebook to pull out a diffusion coefficient directly from the simulation output file. <code>MSD.py</code> is a small code to allow visualisation of the MSD plot but it is not neccesary every time you want the diffusion coefficient. \n\nIt is up to you how you organise/create your directories but it is reccomended that you start a new notebook. \n\nUse the commands/functions used in this notebook to: \n1. **Generate** your input files\n2. **Run** <code>DL_POLY</code>\n3. **Extract** the diffusion coefficient of F diffusion \n\nThen write your own code to:\n\n4. **Generate** an Arrhenius plot \n5. **Calculate** the activation energies of F diffusion\n\nIf you finish early then feel free to start the week 4 exercises. ",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
]
| [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
]
|
cb75a3c5c444d9ea98f247775c66bd483f4d0618 | 188,486 | ipynb | Jupyter Notebook | notebooks/archive/RadarIntergration_DefecateWeight0924.ipynb | JiajunSong629/download-project | 83e31b3db18b83add4c7293a3375adb61b0c59c3 | [
"MIT"
]
| null | null | null | notebooks/archive/RadarIntergration_DefecateWeight0924.ipynb | JiajunSong629/download-project | 83e31b3db18b83add4c7293a3375adb61b0c59c3 | [
"MIT"
]
| null | null | null | notebooks/archive/RadarIntergration_DefecateWeight0924.ipynb | JiajunSong629/download-project | 83e31b3db18b83add4c7293a3375adb61b0c59c3 | [
"MIT"
]
| null | null | null | 287.764885 | 75,556 | 0.915856 | [
[
[
"import pywt\nimport numpy as np\nimport pandas as pa\nimport sqlite3, os\nfrom skimage.restoration import denoise_wavelet\nimport matplotlib.pyplot as plt\nimport warnings\nimport ruptures as rpt\nfrom scipy.signal import savgol_filter, medfilt\nimport numpy as np\nimport pylab as pl\nfrom scipy.signal import hilbert\nfrom scipy import signal\n\n%matplotlib inline\nwarnings.filterwarnings(\"ignore\")\n\nDATA_PATH = \"data\"",
"_____no_output_____"
],
[
"ActualWeight = pa.read_excel(\"Seq2Seq/Actual_Weight_Urine_Stool_1736_1745.xlsx\")\nActualWeight['Total Weight (g)'] = ActualWeight.iloc[:, 1:].sum(axis = 1)\nActualWeight",
"_____no_output_____"
]
],
[
[
"# Defecation",
"_____no_output_____"
]
],
[
[
"def GetSensor(use_i,sensor_i):\n sql_s = \"SELECT timestamp_ms, value FROM data WHERE data_capture_id={} AND sensor_id={}\".format(use_i,sensor_i)\n conn = sqlite3.connect('data/toilet.db')\n cursor = conn.execute(sql_s)\n time_measurements = []\n distance_measurements = []\n for row in cursor:\n time_measurements.append(row[0])\n distance_measurements.append(row[1])\n #endfor\n data_t = (time_measurements,distance_measurements)\n return data_t\n#enddef\n\ndef cleanSensors(sensor1_t_l,sensor1_y_l,sensor2_t_l,sensor2_y_l):\n\n # get min / max of time-series\n #sensor1_t_l = data_d[1][0]\n #sensor2_t_l = data_d[2][0]\n #sensor1_y_l = data_d[1][1]\n #sensor2_y_l = data_d[2][1]\n min_t = min(min(sensor1_t_l),min(sensor2_t_l))\n max_t = max(max(sensor1_t_l),max(sensor2_t_l))\n\n # setup partitions\n step_t = 500\n min_floor_t = int(np.floor(min_t/step_t)*step_t)\n max_ceil_t = int(np.ceil(max_t/step_t)*step_t)\n \n step1_d = {}\n step2_d = {}\n for i in range(min_floor_t,max_ceil_t+step_t,step_t):\n step1_d[i] = []\n step2_d[i] = []\n #endfor\n\n # step through both and assign values to each partition\n for i in range(len(sensor1_t_l)):\n interval_t = int(np.floor(sensor1_t_l[i]/step_t)*step_t)\n step1_d[interval_t].append(sensor1_y_l[i])\n #endfor\n for i in range(len(sensor2_t_l)):\n interval_t = int(np.floor(sensor2_t_l[i]/step_t)*step_t)\n step2_d[interval_t].append(sensor2_y_l[i])\n #endfor\n\n # step through each partition and either take averages or set to nan\n clean1_d = {}\n for i in step1_d.keys():\n if(len(step1_d[i]) > 0):\n clean1_d[i] = np.mean(step1_d[i])\n #endfor\n clean1_sz = pa.Series(clean1_d)\n\n clean2_d = {}\n for i in step2_d.keys():\n if(len(step2_d[i]) > 0):\n clean2_d[i] = np.mean(step2_d[i])\n #endfor\n clean2_sz = pa.Series(clean2_d)\n \n return clean1_sz, clean2_sz\n\ndef GetTotalWeight(data_capture_id):\n data_d = {}\n data_d[2] = GetSensor(data_capture_id, 2) # seat scale\n data_d[3] = GetSensor(data_capture_id, 3) # foot scale\n \n #t0 = data_d[2][0][0]\n \n clean1_sz, clean2_sz = cleanSensors(data_d[2][0],data_d[2][1],data_d[3][0],data_d[3][1])\n seatScale_sz = clean1_sz/1000\n footScale_sz = clean2_sz/1000\n sumScale_sz = seatScale_sz + footScale_sz\n #sumScaleFiltered_sz = pd.Series(signal.medfilt(sumScale_sz, 11))\n \n sumScale_sz.index = (sumScale_sz.index - sumScale_sz.index[0])/1000\n #x_ix = sumScale_sz.index\n return sumScale_sz\n\ndef GetRadarSum(data_capture_id):\n\n data_fn = 'data/data_frames/data_capture_{}/radar_data.txt'.format(data_capture_id)\n data_f = open(data_fn,'rt')\n line_s = data_f.read()\n data_l = eval(line_s)\n\n # save array of images\n t0_sz = pa.Series(data_l[0]['data'])\n data_d = {}\n for j in range(len(data_l)):\n t = data_l[j]['timestamp_ms']\n j_sz = pa.Series(data_l[j]['data'][0])\n data_d[t] = j_sz\n #endfor\n data_df = pa.DataFrame(data_d)\n \n area_d = {}\n floor_i = 50\n ceil_i = 200\n for i in data_df.columns:\n sq_sz = (data_df[i])**2\n area_d[i] = sum(sq_sz.iloc[floor_i:ceil_i])\n #endfor\n area_sz = pa.Series(area_d)\n area_sz = area_sz / 1e9\n area_sz = area_sz - area_sz.median()\n t0 = data_l[0]['timestamp_ms']\n area_sz.index = (area_sz.index-t0)/1000 #\n \n return area_sz",
"_____no_output_____"
],
[
"def ApplyEnvelope(sz):\n analytic_signal = hilbert(sz)\n env_sz = pa.Series(np.abs(analytic_signal))\n env_sz.index = sz.index\n return env_sz\n\ndef GetValuesAboveThreshold(sz, threshold):\n return sz > threshold\n\ndef GetValuesBelowThreshold(sz, threshold):\n return sz < threshold\n\ndef ApplyMedianFilter(sz, window_size):\n filt_sz = pa.Series(signal.medfilt(sz, window_size))\n filt_sz.index = sz.index\n return filt_sz\n\ndef GetStartEndTimesOfBooleanSz(sz):\n ts = sz.index\n start_end_times = []\n \n i = 0\n while i < len(sz):\n if sz.values[i] == True:\n j = i\n while (j < len(sz)-1) and (sz.values[j+1] == True):\n j += 1\n start_end_times.append([ts[i], ts[j]])\n i = j + 1\n else:\n i += 1\n\n return start_end_times\n\ndef GetWeightChange(weight_sz, start_time, end_time):\n start_idx = (pa.Series(weight_sz.index) > start_time).idxmax() - 1\n end_idx = (pa.Series(weight_sz.index) > end_time).idxmax()\n #print(\"Weight at start time: {}\".format(weight_sz.iloc[start_idx]))\n #print(\"Weight at end time: {}\".format(weight_sz.iloc[end_idx]))\n return weight_sz.iloc[start_idx] - weight_sz.iloc[end_idx]\n\ndef GetWeightChangeMinMax(weight_sz, start_time, end_time):\n #start_idx = (pa.Series(weight_sz.index) > start_time).idxmax() - 1\n end_idx = (pa.Series(weight_sz.index) > end_time).idxmax()\n weight_sz_start_end = weight_sz[(weight_sz.index > start_time) & (weight_sz.index < end_time)]\n return max(weight_sz_start_end) - weight_sz.iloc[end_idx]\n\ndef RightExtendBooleanTrueValues(sz, extension_time):\n temp_sz = sz.copy()\n i = 1\n while i < len(temp_sz):\n if((temp_sz.values[i-1] == True) and (temp_sz.values[i] == False)):\n extension_end_time = temp_sz.index[i] + extension_time\n while (i < len(temp_sz)) and (temp_sz.index[i] < extension_end_time):\n temp_sz.values[i] = True\n i += 1\n i += 1\n return temp_sz\n\ndef LeftExtendBooleanTrueValues(sz, extension_time):\n temp_sz = sz.copy()\n i = len(temp_sz) - 2\n while i >= 0:\n if((temp_sz.values[i] == False) and (temp_sz.values[i+1] == True)):\n extension_end_time = temp_sz.index[i] - extension_time\n while (i >= 0) and (temp_sz.index[i] > extension_end_time):\n temp_sz.values[i] = True\n i -= 1\n i -= 1\n return temp_sz",
"_____no_output_____"
],
[
"def GetDefecationWeightLoss(DATA_CAPTURE_ID, filter_window_size, threshold, extension_time):\n defecation_start_end_times = GetDefecationStartEndTimes(DATA_CAPTURE_ID, filter_window_size, threshold, extension_time)\n defecation_weight_loss = 0\n \n total_weight_sz = GetTotalWeight(DATA_CAPTURE_ID)\n total_weight_filt_sz = ApplyMedianFilter(total_weight_sz, filter_window_size)\n \n for start_end in defecation_start_end_times:\n weight_loss = GetWeightChange(total_weight_filt_sz, start_end[0], start_end[1])\n if weight_loss > 0:\n defecation_weight_loss += weight_loss\n #print(\"Between {} and {}, weight loss:{}\\n\".format(start_end[0], start_end[1], weight_loss))\n return defecation_weight_loss\n\ndef GetDefecationStartEndTimes(DATA_CAPTURE_ID, filter_window_size, threshold, extension_time):\n radar_sum_sz = GetRadarSum(DATA_CAPTURE_ID)\n radar_sum_env_sz = ApplyEnvelope(radar_sum_sz)\n radar_sum_env_filt_sz = ApplyMedianFilter(radar_sum_env_sz, filter_window_size)\n radar_sum_filt_sz = ApplyMedianFilter(radar_sum_sz, filter_window_size)\n radar_vals_above_threshold = GetValuesAboveThreshold(radar_sum_filt_sz, threshold)\n radar_vals_above_threshold = RightExtendBooleanTrueValues(radar_vals_above_threshold, extension_time)\n radar_vals_above_threshold = LeftExtendBooleanTrueValues(radar_vals_above_threshold, extension_time)\n defecation_start_end_times = GetStartEndTimesOfBooleanSz(radar_vals_above_threshold)\n \n return defecation_start_end_times",
"_____no_output_____"
],
[
"def PlotDefecationWeightRadar(DATA_CAPTURE_ID, filter_window_size, threshold, extension_time):\n start_end_times = GetDefecationStartEndTimes(DATA_CAPTURE_ID, filter_window_size, threshold, extension_time)\n \n total_weight_loss = GetDefecationWeightLoss(DATA_CAPTURE_ID, filter_window_size, threshold, extension_time)\n print(\"Predicted total: {}\".format(total_weight_loss))\n if DATA_CAPTURE_ID in ActualWeight.ID:\n print(\"Actual total: {}\".format(ActualWeight[ActualWeight.ID == DATA_CAPTURE_ID].iloc[:, 2].values[0]/1000))\n \n radar_sum_sz = GetRadarSum(DATA_CAPTURE_ID)\n total_weight_sz = GetTotalWeight(DATA_CAPTURE_ID)\n total_weight_filt_sz = ApplyMedianFilter(total_weight_sz, 5)\n \n fig, ax = plt.subplots(3, 1, figsize = (10, 6))\n ax[0].plot(total_weight_sz)\n ax[1].plot(total_weight_filt_sz)\n ax[2].plot(radar_sum_sz)\n \n ax[0].set_ylim(total_weight_sz.median()-0.5, total_weight_sz.median()+0.5)\n ax[1].set_ylim(total_weight_sz.median()-0.5, total_weight_sz.median()+0.5)\n \n for i in range(3):\n for start_end_time in start_end_times:\n ax[i].axvspan(start_end_time[0], start_end_time[1], alpha=0.5, color='orange')\n \n plt.show()",
"_____no_output_____"
],
[
"PlotDefecationWeightRadar(1763, 5, .15, 1.83)",
"Predicted total: 0.07450000000000045\n"
],
[
"PlotDefecationWeightRadar(1767, 5, .15, 1.83)",
"Predicted total: 0.18549999999999045\n"
]
],
[
[
"# Area under Radar vs Defecation Weight",
"_____no_output_____"
]
],
[
[
"from scipy.integrate import simps, trapz\nfrom scipy.stats.stats import pearsonr",
"_____no_output_____"
],
[
"def GetAreaUnderRadar(DATA_CAPTURE_ID):\n radar_sum_sz = GetRadarSum(DATA_CAPTURE_ID)\n x = np.array(radar_sum_sz.index)\n f = radar_sum_sz.values\n \n return simps(f, x), trapz(f, x)",
"_____no_output_____"
],
[
"data_captures = np.arange(1736, 1746)\nactual_defecate_weights = []\narea_under_radar = []\n\nfor DATA_CAPTURE_ID in data_captures:\n row = ActualWeight[ActualWeight.ID == DATA_CAPTURE_ID]\n actual_defecate_weights.append(row.iloc[:, 2].values[0] / 1000)\n area_under_radar.append(GetAreaUnderRadar(DATA_CAPTURE_ID))",
"_____no_output_____"
],
[
"area_under_radar = np.array(area_under_radar)\nactual_defecate_weights = np.array(actual_defecate_weights)",
"_____no_output_____"
],
[
"data_captures[further]",
"_____no_output_____"
],
[
"x, y = actual_defecate_weights, area_under_radar[:, 1]\nm, b = np.polyfit(x, y, 1)\nyhat = m * x + b\nfurther = np.abs(yhat - y).argsort()[-3:][::-1]\ncorr = pearsonr(x, y)\n\nplt.figure(figsize = (10, 6))\nplt.plot(x, y, '.')\nplt.plot(x[further], y[further], 'o')\nplt.plot(x, m * x + b)\nplt.xlabel(\"Actual Defecate Weights\")\nplt.ylabel(\"Area under Radar\")\nplt.title(\"$y = {:.2f}x + {:.2f}$, Cor = {:.3f}\".format(m, b, corr[0]));",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb75a5d7ca7ad6cd4aac58228bd354eb02e393d1 | 490,024 | ipynb | Jupyter Notebook | tests/j_notebooks/plotly iplot examples.ipynb | KarrLab/wc_sim | 5b0ee03c3d19193fa67a3797d4258b753e6bc576 | [
"MIT"
]
| 8 | 2018-03-27T21:35:25.000Z | 2022-01-18T08:32:20.000Z | tests/j_notebooks/plotly iplot examples.ipynb | KarrLab/wc_sim | 5b0ee03c3d19193fa67a3797d4258b753e6bc576 | [
"MIT"
]
| 114 | 2018-02-27T14:14:39.000Z | 2020-12-30T15:06:51.000Z | tests/j_notebooks/plotly iplot examples.ipynb | KarrLab/wc_sim | 5b0ee03c3d19193fa67a3797d4258b753e6bc576 | [
"MIT"
]
| 2 | 2019-04-05T16:17:28.000Z | 2020-05-17T12:55:20.000Z | 31.488498 | 54,220 | 0.400858 | [
[
[
"## Test",
"_____no_output_____"
]
],
[
[
"!pip install plotly cufflinks",
"Defaulting to user installation because normal site-packages is not writeable\nCollecting plotly\n Downloading plotly-4.14.1-py2.py3-none-any.whl (13.2 MB)\n\u001b[K |████████████████████████████████| 13.2 MB 1.4 MB/s eta 0:00:01 |█████████████████▋ | 7.3 MB 5.0 MB/s eta 0:00:02�██████▏| 12.8 MB 1.4 MB/s eta 0:00:01\n\u001b[?25hCollecting cufflinks\n Downloading cufflinks-0.17.3.tar.gz (81 kB)\n\u001b[K |████████████████████████████████| 81 kB 11.4 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: numpy>=1.9.2 in /anaconda3/lib/python3.6/site-packages (from cufflinks) (1.14.3)\nRequirement already satisfied: pandas>=0.19.2 in /anaconda3/lib/python3.6/site-packages (from cufflinks) (0.23.0)\nRequirement already satisfied: six>=1.9.0 in /anaconda3/lib/python3.6/site-packages (from cufflinks) (1.11.0)\nCollecting colorlover>=0.2.1\n Downloading colorlover-0.3.0-py3-none-any.whl (8.9 kB)\nRequirement already satisfied: setuptools>=34.4.1 in /anaconda3/lib/python3.6/site-packages (from cufflinks) (39.1.0)\nRequirement already satisfied: ipython>=5.3.0 in /anaconda3/lib/python3.6/site-packages (from cufflinks) (6.4.0)\nRequirement already satisfied: ipywidgets>=7.0.0 in /anaconda3/lib/python3.6/site-packages (from cufflinks) (7.2.1)\nCollecting retrying>=1.3.3\n Downloading retrying-1.3.3.tar.gz (10 kB)\nRequirement already satisfied: pickleshare in /anaconda3/lib/python3.6/site-packages (from ipython>=5.3.0->cufflinks) (0.7.4)\nRequirement already satisfied: simplegeneric>0.8 in /anaconda3/lib/python3.6/site-packages (from ipython>=5.3.0->cufflinks) (0.8.1)\nRequirement already satisfied: pexpect in /anaconda3/lib/python3.6/site-packages (from ipython>=5.3.0->cufflinks) (4.5.0)\nRequirement already satisfied: decorator in /anaconda3/lib/python3.6/site-packages (from ipython>=5.3.0->cufflinks) (4.3.0)\nRequirement already satisfied: pygments in /anaconda3/lib/python3.6/site-packages (from ipython>=5.3.0->cufflinks) (2.2.0)\nRequirement already satisfied: backcall in /anaconda3/lib/python3.6/site-packages (from ipython>=5.3.0->cufflinks) (0.1.0)\nRequirement already satisfied: prompt-toolkit<2.0.0,>=1.0.15 in /anaconda3/lib/python3.6/site-packages (from ipython>=5.3.0->cufflinks) (1.0.15)\nRequirement already satisfied: appnope in /anaconda3/lib/python3.6/site-packages (from ipython>=5.3.0->cufflinks) (0.1.0)\nRequirement already satisfied: traitlets>=4.2 in /anaconda3/lib/python3.6/site-packages (from ipython>=5.3.0->cufflinks) (4.3.2)\nRequirement already satisfied: jedi>=0.10 in /anaconda3/lib/python3.6/site-packages (from ipython>=5.3.0->cufflinks) (0.12.0)\nRequirement already satisfied: ipykernel>=4.5.1 in /anaconda3/lib/python3.6/site-packages (from ipywidgets>=7.0.0->cufflinks) (4.8.2)\nRequirement already satisfied: nbformat>=4.2.0 in /anaconda3/lib/python3.6/site-packages (from ipywidgets>=7.0.0->cufflinks) (4.4.0)\nRequirement already satisfied: widgetsnbextension~=3.2.0 in /anaconda3/lib/python3.6/site-packages (from ipywidgets>=7.0.0->cufflinks) (3.2.1)\nRequirement already satisfied: jupyter_client in /anaconda3/lib/python3.6/site-packages (from ipykernel>=4.5.1->ipywidgets>=7.0.0->cufflinks) (5.2.3)\nRequirement already satisfied: tornado>=4.0 in /anaconda3/lib/python3.6/site-packages (from ipykernel>=4.5.1->ipywidgets>=7.0.0->cufflinks) (5.0.2)\nRequirement already satisfied: parso>=0.2.0 in /anaconda3/lib/python3.6/site-packages (from jedi>=0.10->ipython>=5.3.0->cufflinks) (0.2.0)\nRequirement already satisfied: ipython_genutils in /anaconda3/lib/python3.6/site-packages (from nbformat>=4.2.0->ipywidgets>=7.0.0->cufflinks) (0.2.0)\nRequirement already satisfied: jsonschema!=2.5.0,>=2.4 in /anaconda3/lib/python3.6/site-packages (from nbformat>=4.2.0->ipywidgets>=7.0.0->cufflinks) (2.6.0)\nRequirement already satisfied: jupyter_core in /anaconda3/lib/python3.6/site-packages (from nbformat>=4.2.0->ipywidgets>=7.0.0->cufflinks) (4.4.0)\nRequirement already satisfied: python-dateutil>=2.5.0 in /anaconda3/lib/python3.6/site-packages (from pandas>=0.19.2->cufflinks) (2.7.3)\nRequirement already satisfied: pytz>=2011k in /anaconda3/lib/python3.6/site-packages (from pandas>=0.19.2->cufflinks) (2018.4)\nRequirement already satisfied: wcwidth in /anaconda3/lib/python3.6/site-packages (from prompt-toolkit<2.0.0,>=1.0.15->ipython>=5.3.0->cufflinks) (0.1.7)\nRequirement already satisfied: notebook>=4.4.1 in /anaconda3/lib/python3.6/site-packages (from widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (5.5.0)\nRequirement already satisfied: terminado>=0.8.1 in /anaconda3/lib/python3.6/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (0.8.1)\nRequirement already satisfied: nbconvert in /anaconda3/lib/python3.6/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (5.3.1)\nRequirement already satisfied: pyzmq>=17 in /anaconda3/lib/python3.6/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (17.0.0)\nRequirement already satisfied: jinja2 in /anaconda3/lib/python3.6/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (2.10)\nRequirement already satisfied: Send2Trash in /anaconda3/lib/python3.6/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (1.5.0)\nRequirement already satisfied: MarkupSafe>=0.23 in /anaconda3/lib/python3.6/site-packages (from jinja2->notebook>=4.4.1->widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (1.0)\nRequirement already satisfied: mistune>=0.7.4 in /anaconda3/lib/python3.6/site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (0.8.3)\nRequirement already satisfied: entrypoints>=0.2.2 in /anaconda3/lib/python3.6/site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (0.2.3)\nRequirement already satisfied: bleach in /anaconda3/lib/python3.6/site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (2.1.3)\nRequirement already satisfied: pandocfilters>=1.4.1 in /anaconda3/lib/python3.6/site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (1.4.2)\nRequirement already satisfied: testpath in /anaconda3/lib/python3.6/site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (0.3.1)\nRequirement already satisfied: html5lib!=1.0b1,!=1.0b2,!=1.0b3,!=1.0b4,!=1.0b5,!=1.0b6,!=1.0b7,!=1.0b8,>=0.99999999pre in /anaconda3/lib/python3.6/site-packages (from bleach->nbconvert->notebook>=4.4.1->widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (1.0.1)\nRequirement already satisfied: webencodings in /anaconda3/lib/python3.6/site-packages (from html5lib!=1.0b1,!=1.0b2,!=1.0b3,!=1.0b4,!=1.0b5,!=1.0b6,!=1.0b7,!=1.0b8,>=0.99999999pre->bleach->nbconvert->notebook>=4.4.1->widgetsnbextension~=3.2.0->ipywidgets>=7.0.0->cufflinks) (0.5.1)\nRequirement already satisfied: ptyprocess>=0.5 in /anaconda3/lib/python3.6/site-packages (from pexpect->ipython>=5.3.0->cufflinks) (0.5.2)\nBuilding wheels for collected packages: cufflinks, retrying\n Building wheel for cufflinks (setup.py) ... \u001b[?25ldone\n\u001b[?25h Created wheel for cufflinks: filename=cufflinks-0.17.3-py3-none-any.whl size=67927 sha256=83adf8c5d55c811c65617202e0962cd685fdf1e573579f855bbb09b2ec91a7e4\n Stored in directory: /Users/arthur_at_sinai/Library/Caches/pip/wheels/1c/db/ce/70cf35eb5a61b9ea3d34434072a8821dddc4f21eb5127e5415\n Building wheel for retrying (setup.py) ... \u001b[?25ldone\n\u001b[?25h Created wheel for retrying: filename=retrying-1.3.3-py3-none-any.whl size=6236 sha256=2aaca2bb89012b32e8448447f203974fb8f47f39fd911a3b7aeb985f9bf90c0c\n Stored in directory: /Users/arthur_at_sinai/Library/Caches/pip/wheels/ac/cb/8a/b27bf6323e2f4c462dcbf77d70b7c5e7868a7fbe12871770cf\nSuccessfully built cufflinks retrying\nInstalling collected packages: retrying, plotly, colorlover, cufflinks\nSuccessfully installed colorlover-0.3.0 cufflinks-0.17.3 plotly-4.14.1 retrying-1.3.3\n\u001b[33mWARNING: You are using pip version 20.3.2; however, version 20.3.3 is available.\nYou should consider upgrading via the '/anaconda3/bin/python -m pip install --upgrade pip' command.\u001b[0m\n"
],
[
"# from https://stackabuse.com/using-plotly-library-for-interactive-data-visualization-in-python/\nimport pandas as pd\nimport numpy as np\n%matplotlib inline\n\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n\ninit_notebook_mode(connected=True)\n\nimport cufflinks as cf\ncf.go_offline()\n",
"_____no_output_____"
],
[
"8 * 24",
"_____no_output_____"
],
[
"# from https://stackabuse.com/using-plotly-library-for-interactive-data-visualization-in-python/\nimport seaborn as sns\ndataset = sns.load_dataset('tips')\ndataset.head()\n",
"_____no_output_____"
],
[
"dataset2 = dataset[[\"total_bill\", \"tip\", \"size\"]]\ndataset2.plot()\n",
"_____no_output_____"
],
[
"dataset2.iplot()\n",
"_____no_output_____"
],
[
"dataset.iplot(kind='bar', x=['time', 'sex'],y='total_bill')",
"_____no_output_____"
],
[
"dataset.mean().iplot(kind='bar')",
"_____no_output_____"
],
[
"dataset.mean().iplot(kind='barh')",
"_____no_output_____"
],
[
"dataset.iplot(kind='scatter', x='total_bill', y='tip', mode='markers',\n title='Scatter plot', xTitle='Bill ($)', yTitle='Tip ($)')",
"_____no_output_____"
],
[
"dataset.iplot(kind='scatter', x='total_bill', y='tip', mode='markers',\n # log y axis\n layout=dict(xaxis=dict(title='Bill ($)'),\n yaxis=dict(type='log',\n title='Tip ($)'),\n title='Scatter plot'))",
"_____no_output_____"
],
[
"dataset2.iplot(kind='box')",
"_____no_output_____"
],
[
"dataset2.scatter_matrix()\n",
"_____no_output_____"
]
]
]
| [
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb75b6ab2822c4270d1309fa11a76d11db2ebdd8 | 206,829 | ipynb | Jupyter Notebook | benchmark/5.percent_replicating_negcon.ipynb | jump-cellpainting/neurips-cpjump1 | ed0f918e5920021a3c6f9a8c2d63cf22c2067039 | [
"CC0-1.0",
"BSD-3-Clause"
]
| 5 | 2021-06-15T17:47:54.000Z | 2021-12-15T10:23:55.000Z | benchmark/5.percent_replicating_negcon.ipynb | jump-cellpainting/neurips-cpjump1 | ed0f918e5920021a3c6f9a8c2d63cf22c2067039 | [
"CC0-1.0",
"BSD-3-Clause"
]
| 7 | 2021-07-09T08:27:04.000Z | 2021-11-17T03:09:40.000Z | benchmark/5.percent_replicating_negcon.ipynb | jump-cellpainting/neurips-cpjump1 | ed0f918e5920021a3c6f9a8c2d63cf22c2067039 | [
"CC0-1.0",
"BSD-3-Clause"
]
| 4 | 2021-09-28T16:40:38.000Z | 2021-12-13T13:00:22.000Z | 713.203448 | 148,755 | 0.946555 | [
[
[
"import pandas as pd\nimport utils\nimport matplotlib.pyplot as plt\nimport random\nimport plotly.express as px\nimport numpy as np\n\nrandom.seed(9000)\n\nplt.style.use(\"seaborn-ticks\")\nplt.rcParams[\"image.cmap\"] = \"Set1\"\nplt.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.cm.Set1.colors)\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"In this notebook the Percent Replicating score for DMSO at each position is computed for the following U2OS 48h time point compound plates\n1. Whole plate normalized CP profiles\n2. Spherized CP profiles\n3. Spherized DL profiles\n\nThe following are the steps taken\n1. Whole plate normalized CP profiles, Spherized CP profiles and Spherized DL profiles from the 48h Compound experiment are read and the replicates plates merged into a single dataframe.\n2. All the non-negative control wells are removed.\n3. DMSO wells in the same position are considered replicates while DMSO wells in different positions are considered non-replicates.\n4. The signal distribution, which is the median pairwise replicate correlation, is computed for each replicate.\n5. The null distribution, which is the median pairwise correlation of non-replicates, is computed for 1000 combinations of non-replicates.\n6. Percent Replicating is computed as the percentage of the signal distribution that is the greater than the 95th percentile of null distribution\n7. The signal and noise distributions and the Percent Replicating values are plotted and the table of Percent Replicating is printed.",
"_____no_output_____"
]
],
[
[
"n_samples = 10000\nn_replicates = 4\n\ncorr_replicating_df = pd.DataFrame()\ngroup_by_feature = 'Metadata_Well'\nperturbation = \"compound\"\ncell = \"U2OS\"\ntime = \"48\"\n\nexperiment_df = (\n pd.read_csv('output/experiment-metadata.tsv', sep='\\t')\n .query('Batch==\"2020_11_04_CPJUMP1\" or Batch==\"2020_11_04_CPJUMP1_DL\"')\n .query('Perturbation==@perturbation')\n .query('Cell_type==@cell')\n .query('Time==@time')\n)\n\nbatches = {\n \"2020_11_04_CPJUMP1\": {\n \"normalized\": \"normalized.csv.gz\",\n \"spherized\": \"spherized.csv.gz\"\n },\n \"2020_11_04_CPJUMP1_DL\": {\n \"spherized\": \"spherized.csv.gz\"\n }\n}",
"_____no_output_____"
],
[
"for batch in experiment_df.Batch.unique():\n for type in batches[batch]:\n filename = batches[batch][type]\n batch_df = experiment_df.query('Batch==@batch')\n data_df = pd.DataFrame()\n for plate in experiment_df.Assay_Plate_Barcode.unique():\n plate_df = utils.load_data(batch, plate, filename)\n data_df = utils.concat_profiles(data_df, plate_df)\n\n data_df = data_df.query('Metadata_control_type==\"negcon\"')\n\n metadata_df = utils.get_metadata(data_df)\n features_df = utils.get_featuredata(data_df).replace(np.inf, np.nan).dropna(axis=1, how=\"any\")\n data_df = pd.concat([metadata_df, features_df], axis=1)\n\n replicating_corr = list(utils.corr_between_replicates(data_df, group_by_feature)) # signal distribution\n null_replicating = list(utils.corr_between_non_replicates(data_df, n_samples=n_samples, n_replicates=n_replicates, metadata_compound_name = group_by_feature)) # null distribution\n\n prop_95_replicating, value_95_replicating = utils.percent_score(null_replicating,\n replicating_corr,\n how='right')\n\n if batch == \"2020_11_04_CPJUMP1\":\n features = 'CellProfiler'\n else:\n features = 'DeepProfiler'\n\n corr_replicating_df = corr_replicating_df.append({'Description':f'{features}_{type}',\n 'Modality':f'{perturbation}',\n 'Cell':f'{cell}',\n 'time':f'{time}',\n 'Replicating':replicating_corr,\n 'Null_Replicating':null_replicating,\n 'Percent_Replicating':'%.1f'%prop_95_replicating,\n 'Value_95':value_95_replicating}, ignore_index=True)",
"_____no_output_____"
],
[
"print(corr_replicating_df[['Description', 'Percent_Replicating']].to_markdown(index=False))",
"| Description | Percent_Replicating |\n|:------------------------|----------------------:|\n| CellProfiler_normalized | 42.2 |\n| CellProfiler_spherized | 26.6 |\n| DeepProfiler_spherized | 31.2 |\n"
],
[
"utils.distribution_plot(df=corr_replicating_df, output_file=\"5.percent_replicating.png\", metric=\"Percent Replicating\")",
"_____no_output_____"
],
[
"corr_replicating_df['Percent_Replicating'] = corr_replicating_df['Percent_Replicating'].astype(float)\n\ncorr_replicating_df.loc[(corr_replicating_df.Modality=='compound') & (corr_replicating_df.time=='48'), 'time'] = 'long'\n\nplot_corr_replicating_df = (\n corr_replicating_df.rename(columns={'Modality':'Perturbation'})\n .drop(columns=['Null_Replicating','Value_95','Replicating'])\n)",
"_____no_output_____"
],
[
"fig = px.bar(data_frame=plot_corr_replicating_df,\n x='Description',\n y='Percent_Replicating',\n facet_row='time',\n facet_col='Cell')\nfig.update_layout(title='Percent Replicating vs. Perturbation - U2OS 48h Compound plates',\n xaxis=dict(title='Feature set'),\n yaxis=dict(title='Percent Replicating'),\n yaxis3=dict(title='Percent Replicating'))\nfig.show(\"png\")\nfig.write_image(f'figures/5.percent_replicating_facet.png', width=640, height=480, scale=2)",
"_____no_output_____"
],
[
"print(plot_corr_replicating_df[['Description','Perturbation','time', 'Cell' ,'Percent_Replicating']].to_markdown(index=False))\n",
"| Description | Perturbation | time | Cell | Percent_Replicating |\n|:------------------------|:---------------|:-------|:-------|----------------------:|\n| CellProfiler_normalized | compound | long | U2OS | 42.2 |\n| CellProfiler_spherized | compound | long | U2OS | 26.6 |\n| DeepProfiler_spherized | compound | long | U2OS | 31.2 |\n"
]
]
]
| [
"code",
"markdown",
"code"
]
| [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb75bf805a2a820e054b6b86a33eb7a79f4aef2d | 62,536 | ipynb | Jupyter Notebook | Section 2.ipynb | archielv/Getting-Started-with-Machine-Learning-in-Python- | c3bec7169c89b1a89da316b3dab4d1315fce92f7 | [
"MIT"
]
| 2 | 2019-07-15T21:19:22.000Z | 2021-10-04T04:52:54.000Z | Section 2.ipynb | archielv/Getting-Started-with-Machine-Learning-in-Python- | c3bec7169c89b1a89da316b3dab4d1315fce92f7 | [
"MIT"
]
| null | null | null | Section 2.ipynb | archielv/Getting-Started-with-Machine-Learning-in-Python- | c3bec7169c89b1a89da316b3dab4d1315fce92f7 | [
"MIT"
]
| 6 | 2018-09-27T00:56:03.000Z | 2020-04-24T10:00:14.000Z | 29.581835 | 223 | 0.324981 | [
[
[
"# Load the dataset\nfrom sklearn.datasets import load_boston # there are a few datasets built into ML library scikit-learn\nboston_house_prices_data = load_boston() # let's put this dataset into a variable",
"_____no_output_____"
],
[
"print(boston_house_prices_data.DESCR)",
"Boston House Prices dataset\n===========================\n\nNotes\n------\nData Set Characteristics: \n\n :Number of Instances: 506 \n\n :Number of Attributes: 13 numeric/categorical predictive\n \n :Median Value (attribute 14) is usually the target\n\n :Attribute Information (in order):\n - CRIM per capita crime rate by town\n - ZN proportion of residential land zoned for lots over 25,000 sq.ft.\n - INDUS proportion of non-retail business acres per town\n - CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)\n - NOX nitric oxides concentration (parts per 10 million)\n - RM average number of rooms per dwelling\n - AGE proportion of owner-occupied units built prior to 1940\n - DIS weighted distances to five Boston employment centres\n - RAD index of accessibility to radial highways\n - TAX full-value property-tax rate per $10,000\n - PTRATIO pupil-teacher ratio by town\n - B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town\n - LSTAT % lower status of the population\n - MEDV Median value of owner-occupied homes in $1000's\n\n :Missing Attribute Values: None\n\n :Creator: Harrison, D. and Rubinfeld, D.L.\n\nThis is a copy of UCI ML housing dataset.\nhttp://archive.ics.uci.edu/ml/datasets/Housing\n\n\nThis dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.\n\nThe Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic\nprices and the demand for clean air', J. Environ. Economics & Management,\nvol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics\n...', Wiley, 1980. N.B. Various transformations are used in the table on\npages 244-261 of the latter.\n\nThe Boston house-price data has been used in many machine learning papers that address regression\nproblems. \n \n**References**\n\n - Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.\n - Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.\n - many more! (see http://archive.ics.uci.edu/ml/datasets/Housing)\n\n"
]
],
[
[
"**Dealing with missing values**",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"# number of rooms across 5 data points\npd.Series([1, 2, 3, 1, 2])",
"_____no_output_____"
],
[
"pd.Series([1, np.nan, 3, 1, None])",
"_____no_output_____"
],
[
"num_rooms = pd.Series([1, np.nan, 3, 1, None])\nnum_rooms.isnull()",
"_____no_output_____"
],
[
"num_rooms[num_rooms.notnull()]",
"_____no_output_____"
],
[
"num_rooms.dropna()",
"_____no_output_____"
],
[
"df = pd.DataFrame(\n [[1, np.nan, 2],\n [2, 300, 5],\n [1, np.nan, np.nan]]\n)\ndf",
"_____no_output_____"
],
[
"df.isnull()",
"_____no_output_____"
],
[
"df.dropna()",
"_____no_output_____"
],
[
"means = df.mean(axis=0)",
"_____no_output_____"
],
[
"means",
"_____no_output_____"
],
[
"df.fillna(means)",
"_____no_output_____"
]
],
[
[
"# Standardization and Normalization",
"_____no_output_____"
]
],
[
[
"data = [\n [0, 0], [0, 0],\n [1, 1], [1, 1]\n]\ndata",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler",
"_____no_output_____"
],
[
"StandardScaler().fit_transform(data)",
"_____no_output_____"
],
[
"from sklearn import preprocessing",
"_____no_output_____"
],
[
"preprocessing.normalize(data, norm='l2')",
"_____no_output_____"
]
],
[
[
"# Eliminating duplicate entries",
"_____no_output_____"
]
],
[
[
"# Load the dataset\nfrom sklearn.datasets import load_boston # there are a few datasets built into ML library scikit-learn\nboston_house_prices_data = load_boston() # let's put this dataset into a variable",
"_____no_output_____"
],
[
"import pandas as pd",
"_____no_output_____"
],
[
"boston_house_prices_data.data",
"_____no_output_____"
],
[
"X = pd.DataFrame(boston_house_prices_data.data)",
"_____no_output_____"
],
[
"X",
"_____no_output_____"
],
[
"X[X.duplicated()]",
"_____no_output_____"
],
[
"X = X.append(X.iloc[0,:], ignore_index=True)",
"_____no_output_____"
],
[
"X.shape",
"_____no_output_____"
],
[
"X[X.duplicated()]",
"_____no_output_____"
],
[
"X = X.drop_duplicates()",
"_____no_output_____"
],
[
"X[X.duplicated()]",
"_____no_output_____"
]
]
]
| [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
]
|
cb75ca5909ebd940a36db38afa33ece34a5e60fa | 983,017 | ipynb | Jupyter Notebook | .ipynb_checkpoints/law_eda-checkpoint.ipynb | Adishah3103/Legal-AI | d7b7bb2a6e766519b227c7e5b4e82ab8f153b699 | [
"MIT"
]
| 3 | 2018-12-19T06:09:51.000Z | 2019-08-21T06:22:24.000Z | .ipynb_checkpoints/law_eda-checkpoint.ipynb | Adishah3103/Legal-AI | d7b7bb2a6e766519b227c7e5b4e82ab8f153b699 | [
"MIT"
]
| 15 | 2020-09-04T15:00:15.000Z | 2022-03-11T23:36:46.000Z | .ipynb_checkpoints/law_eda-checkpoint.ipynb | Aditya-shahh/Legal-AI | d7b7bb2a6e766519b227c7e5b4e82ab8f153b699 | [
"MIT"
]
| 4 | 2018-12-15T14:45:19.000Z | 2020-09-08T14:46:46.000Z | 31.935837 | 28,793 | 0.494801 | [
[
[
"# CaseLaw dataset to assist with Law-Research - EDA\n---\n<dl>\n <dt>Acquiring the dataset</dt>\n <dd>We initially use dataset of all cases in USA to be able to train it and as a proof of concept.</dd>\n <dd>The dataset is available in XML format, which we will put in mongodb or firebase format based on how unstructured the dataset is.</dd>\n <dd>dataset url: (https://case.law/)\n</dd>\n\n <dt>Research</dt>\n <dd>We are looking into <em>NLP</em>, <em>LSTM</em> and <em>Sentiment Analysis</em>.</dd>\n</dl>",
"_____no_output_____"
]
],
[
[
"import jsonlines\nfrom pymongo import MongoClient",
"_____no_output_____"
],
[
"# client = MongoClient()\nclient = MongoClient()\ndb = client.legal_ai\ncases = db.cases",
"_____no_output_____"
],
[
"some_date = '1820-01'",
"_____no_output_____"
],
[
"print(int(some_date[0:4])<1950)",
"True\n"
],
[
"id_saved = []\nwith jsonlines.open('../data.jsonl') as reader:\n for obj in reader:\n if int(obj['decision_date'][0:4])>1950:\n case_id = cases.insert_one(obj).inserted_id\n id_saved.append(case_id)",
"_____no_output_____"
],
[
"len(id_saved)",
"_____no_output_____"
]
],
[
[
"## Testing out Similarity Mechanism\n---\n### Setup\n- Test PyDictionary to build keywords\n- Construct a mechanism, to extract keywords, and store in a searchable manner.\n---\n### Search\n- Build keywords out of your search\n- Search among dataset keywords\n- Nearest dates, highest weight, highest precidence shows up\n- Pagination scroll, continues the search.",
"_____no_output_____"
]
],
[
[
"# NLTK\n",
"_____no_output_____"
]
],
[
[
"## Transforming dataset\n---\n### Extract the first data and study it\n- Identify the key elements that need to be transformed & list them\n- Build a mechanism to transform for one datapoint.\n---\n### Perform for entire dataset\n- Run a loop and apply the same changes for every datapoints.",
"_____no_output_____"
]
],
[
[
"# Extracting the first element\nfirst_case = cases.find_one()",
"_____no_output_____"
],
[
"import xml.etree.ElementTree as ET\nroot = ET.fromstring(first_case['casebody']['data'])",
"_____no_output_____"
],
[
"root",
"_____no_output_____"
]
],
[
[
"# Getting the case body cleaned into a seperate field on db\n> ",
"_____no_output_____"
]
],
[
[
"summary=''\nfor child in root:\n for sub_child in child:\n if 'footnotemark' in sub_child.tag[sub_child.tag.index(\"}\")+1:] or 'author' in sub_child.tag[sub_child.tag.index(\"}\")+1:]:\n continue\n summary+=sub_child.text + \"\\n\"",
"_____no_output_____"
],
[
"print(summary)",
"The executive secretary of the State Game and Fish, Commission declined to authorize payment of $105 to four persons who claimed to have killed seven wolves over six months old, as evidenced by certificate of the Boone County Court. See Act 183 of 1949. The statute is entitled “An Act to authorize . . . counties ... to pay bounties for the killing of wolves and to provide that the State . . . shall pay an equal sum as a bounty, and for other purposes.” The emergency clause is a finding that farmers are suffering irreparable damage ‘£ from wolves destroying cattle and other live stock.” The measure received two-thirds of the votes of all members elected to each branch of the General Assembly.\nThe complaint is a petition for mandamus alleging that Boone county has paid bounties on the basis of $20 for each wolf killed; but, § 2 of Act 183, the amount payable by the State cannot exceed $15.\nThe answer, among other defenses, asserts that T. H. McAmis as Secretary of the Game and Fish Commission, Sec. 7, Amendment No. 35 to the Constitution, is not permitted to disburse the fund unless the Commission expressly directs payment; that none of the scalps for which a bounty is claimed is that of a wolf, hence the public has been defrauded by the substitution of dog scalps; and further, Act 183 is void because it is in conflict with Amendment 35 in that appropriations by the General Assembly may only be made for purposes enumerated in the Amendment or coming within its scope by necessary implication.\nThe Chancellor held that the certificate issued by the County Judge was conclusive and rejected expert witnesses who would have testified that the scalps came from dogs. One of the witnesses had spent 22 years as an employe of the TJ. S. Fish and Wild Life Service, trapping wolves in ten states. His reasons for asserting that the scalps in question were spurious were set out in detail for the purpose of making up a record.\nPower of the Legislature to change an initiated constitutional amendment is presented by the plea that Act 183 received two-thirds of the votes of each branch of the lawmaking body. The Act, by § 4, appropriates from the Game Protection Fund $6,000 for each year of the biennium ending June 30, 1951, to pay bounties as set out in § 2.\nIn W. R. Wrape Stave Company v. Arkansas State Game and Fish Commission, 215 Ark. 229, 219 S. W. 2d 948, we said that the underlying purpose of Amendment 35 was to vest in the Commission power to control, manage, restore, conserve, and regulate the State’s bird, fish, game, and wild life resources, and that funds arising from all sources were to be spent by the Commission for the purposes mentioned. Section 8 of the Amendment contains this additional language: “All moneys shall be deposited in the Game Protection Fund with the State Treasurer, and such moneys as are necessary, including an emergency fund, shall be appropriated by the Legislature at each legislative session for the use of the Game and Fish Commission as hereto set forth.”\nWe said in the Wrape Stave case that “money received from sources mentioned in the Amendment is not available — even with legislative approval — for any uses other than those expressed or necessarily implied.”\nAppellant is correct in saying that the General Assembly cannot disburse Game and Fish funds. It should, as the Amendment contemplates, make appropriations to carry into effect the will of the people who adopted the instrument as a part of our Constitution; but in doing this the fundamental intent must be kept in sight.\nThis brings us to a consideration of the legislative right to amend, repeal, or otherwise change an initiated constitutional amendment. Under ‘ ‘ General Provisions, ’ ’ Amendment No. 7, “measure’.’ is defined as including any bill, law, resolution, ordinance, charter, constitutional amendment, or legislative proposal or enactment of any character. Neither the Governor nor any Mayor shall have power to veto measures initiated by or referred to the people. The following paragraph then appears: “No measure approved by a vote of the people shall be amended or repealed by the General Assembly or by any City Council, except upon a yea and nay vote on roll call of two-thirds of all members elected to each house of the General Assembly, or of the City Council, as the case may be.” Another provision prohibits the General Assembly from submitting “measures” to the people “except a proposed constitutional amendment or amendments as provided for in this Constitution. ’ ’\nSome of the cases in which reference is made to Amendment No. 7, and the construction given it where issues differed from those before us, are shown in the footnote.\nThe word “measure” or “measures” appears in each of the eight paragraphs under General Provisions. There are no numbered sections other than § 1. It will be observed that the definition of “measure” is that it shall include constitutional amendment[s] “or legislative proposals of any character.” If the language should be literally construed, then a constitutional amendment applicable to Little Rock alone, or to any other city, could be repealed by a vote of two-thirds of the members elected to the city council. By § 22 of Art. 19 of the Constitution, the General Assembly may submit constitutional amendments — not exceeding three at a session— for approval or rejection by the people. These amendments are spoken of as “measures,” hence if the definition in Amendment and Repeal is applied throughout, then any or all of the more than forty amendments to the constitution could be repealed by the required vote of the legislature. The result would be that initiated Acts and constitutional amendments would stand on about the same footing.\nIt is inconceivable that in defining constitutional amendment as a measure the purpose was to invest the General Assembly with power (a) to repeal a constitutional amendment, or (b) with authority to amend an amendment — a power that could be exercised to such an extent that the entire meaning of a constitutional provision achieved through amendment could be changed by legislative action.\nThe clear intent of the Initiative and Beferendum Amendment was to give the people enlarged legislative and constitutional powers. Certainly if the purpose had been to take away fundamental security then enjoyed or to be acquired under the Amendment, the right of two-thirds of those elected to the General Assembly to treat amendments as though they had been referred to it would have been expressed in more emphatic terms.\nThe decree is reversed and the cause dismissed.\n[The decision in this case was reached in December, 1950, and the result concurred in by all of the judges, two of whom are. not now on the court. Mr. Justice Ward and Mr. Justice Bobinson, therefore, did not participate in a determination of the issues, or in consideration of the opinion].\n \n\n"
]
],
[
[
"# Do the same for all the files now!",
"_____no_output_____"
]
],
[
[
"all_cases = cases.find()",
"_____no_output_____"
],
[
"all_cases.count()",
"/Users/flynsequeira/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: DeprecationWarning: count is deprecated. Use Collection.count_documents instead.\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"check_one = True\nfor each_case in all_cases:\n root = ET.fromstring(each_case['casebody']['data'])\n summary=''\n for child in root:\n for sub_child in child:\n if 'footnotemark' in sub_child.tag[sub_child.tag.index(\"}\")+1:] or 'author' in sub_child.tag[sub_child.tag.index(\"}\")+1:]:\n continue\n summary+=sub_child.text + \"\\n\"\n myquery = { \"_id\": each_case['_id'] }\n newvalues = { \"$set\": { \"summary\": summary } }\n cases.update_one(myquery, newvalues)",
"checking for one\nadded to case check: 5c1549de08ef9543808eabd0\n{'_id': ObjectId('5c1549de08ef9543808eabd0'), 'id': 1611964, 'name': 'Keith v. State', 'name_abbreviation': 'Keith v. State', 'decision_date': '1951-01-15', 'docket_number': '4641', 'first_page': '174', 'last_page': '181', 'citations': [{'type': 'official', 'cite': '218 Ark. 174'}, {'type': 'parallel', 'cite': '235 S.W.2d 539'}], 'volume': {'volume_number': '218'}, 'reporter': {'full_name': 'Arkansas Reports'}, 'court': {'id': 8808, 'name': 'Arkansas Supreme Court', 'name_abbreviation': 'Ark.', 'jurisdiction_url': None, 'slug': 'ark'}, 'jurisdiction': {'id': 34, 'slug': 'ark', 'name': 'Ark.', 'name_long': 'Arkansas', 'whitelisted': True}, 'casebody': {'data': '<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>\\n<casebody xmlns=\"http://nrs.harvard.edu/urn-3:HLS.Libr.US_Case_Law.Schema.Case_Body:v1\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" firstpage=\"174\" lastpage=\"181\"> <docketnumber id=\"b198-5\" pgmap=\"198\">4641</docketnumber> <citation id=\"Aq_\" pgmap=\"198\">235 S. W. 2d 539</citation> <parties id=\"b198-4\" pgmap=\"198\">Keith v. State.</parties> <decisiondate id=\"b198-6\" pgmap=\"198\">Opinion delivered January 15, 1951.</decisiondate> <judges id=\"b205-7\" pgmap=\"205\">Robinson, J., dissents.</judges> <attorneys id=\"b199-8\" pgmap=\"199\">L. Weems Tmssell, for appellant.</attorneys> <attorneys id=\"b199-9\" pgmap=\"199\">Ike Murry, Attorney General and Jeff Duty, Assistant Attorney General, for appellee.</attorneys> <opinion type=\"majority\"> <author id=\"b199-10\" pgmap=\"199\">Minor W. Mill wee, Justice.</author> <p id=\"AaX\" pgmap=\"199\">The defendant, Josie Bell Keith, was tided and convicted under an indictment charging her with assault with intent to kill Sadie Hughes, the punishment being fixed by the jury at five years in the penitentiary. Defendant has appealed, and the first five assignments in the motion for new trial challenge the sufficiency of the evidence to support the verdict and judgment. The defendant and prosecuting witness, Sadie Hughes, are negro women residing at Sparkman, Arkansas.</p> <p id=\"b199-11\" pgmap=\"199(158) 200(38)\">Sadie Hughes testified that shortly before noon on June 28, 1949, she was walking along the road from her home to the business section of Sparkman when she noticed defendant’s automobile parked in the road ahead. Defendant was seated in the car and the two women spoke to each other as Sadie walked by. Defendant then started her car, drove past the prosecuting witness, and again stopped across the road in front of Sadie. Defendant then jumped out of the car and fired a pistol twice at Sadie who ran a short distance into the front yard and through the front door of the home of Lizzie \"Wright. Defendant went into the Wright house through the back door and chased Sadie from room to room onto the back porch where Sadie grabbed hold of either the gun, or defendant’s arm, and they both fell from the porch to the ground. In the ensuing scuffle on the ground, defendant fired the gun again, the bullet grazing the side of Sadie’s head. According to Sadie, the defendant then said: “Don’t wipe the G-- d— blood off your face; if I had another bullet, I’d finish you up.”</p> <p id=\"b200-5\" pgmap=\"200\">The testimony of Sadie relative to the shooting in front of the Lizzie Wright home was corroborated by the testimony of Dorothy Jean Daniels, a 1-6-year-old girl. Dorothy testified that she was sitting on Lizzie Wright’s front porch and that defendant fired the first shot before she alighted from her car and then fired the second shot as Sadie was running toward the Wright home. The third shot was fired as witness was going around to the back of the house. When she reached the back yard, defendant was standing beside Sadie with the pistol in her hand and Sadie’s face was bleeding. Shortly thereafter Maggie Cowan, an elderly woman, came up and persuaded defendant to leave.</p> <p id=\"b200-6\" pgmap=\"200\">In opposition to the State’s testimony, defendant offered several witnesses who testified that they heard only the one shot fired in the Wrig’ht back yard. They also testified that in the altercation in the back-yard defendant struck Sadie with the pistol after the latter struck her with a stick and that the gun was discharged in the ensuing scuffle.</p> <p id=\"b200-7\" pgmap=\"200\">Maggie Cowan, a witness for the State, who lived a block and a half away, also testified that she heard only one shot and “didn’t pay any attention to just what it was” until informed of the trouble. On cross-examination she described Sadie’s wound as follows: “I judged it was a shot. It didn’t seem to be a lick that she had struck her. It seemed to be a glance, I just judged, where a bullet had struck. It didn’t cut the width of it except it cut a gash.”</p> <p id=\"b200-8\" pgmap=\"200\">In rebuttal the State offered proof tending to show that some of the defense witnesses were not present at the scene of the shooting.</p> <p id=\"b200-9\" pgmap=\"200(29) 201(101)\">Defendant interposed the plea of self defense. Although she did not testify in the case, there was evidence by both the State and defendant that the shooting was motivated by defendant’s belief that the prosecuting witness had been having illicit relations with defendant’s husband. The prosecuting witness is a married woman with four children. She strenuously denied having any improper relations with defendant’s husband. The husband of Sadie Hughes testified that defendant came to his barber shop\\' twice before the shooting and told him that she had heard that her husband was having illicit relations with Sadie. This witness stated that he had never seen anything that would cause him to be suspicious of his wife and told defendant he, “didn’t think there was anything to it.”</p> <p id=\"b201-3\" pgmap=\"201\">We have repeatedly held that in order to constitute the crime of assault with intent to kill a specific intent to take the life of the person.assaulted must be shown, and the evidence\\' must be such as to warrant a conviction for murder if death had resulted from the assault. Allen v. State, 117 Ark. 432, 174 S. W. 1179. Appellant earnestly contends that the evidence here does not measure up to these requirements. It is argued that the testimony of Sadie Hughes and other witnesses for the State is exaggerated, unreasonable and unworthy of belief and our attention is directed to certain inconsistencies in such testimony and the version of the shooting given by defense witnesses. The jury were the judges of the credibility of the various witnesses and we must consider the evidence in the light most favorable to the State. As thus viewed, we hold it ample to sustain the verdict.</p> <p id=\"b201-4\" pgmap=\"201(107) 202(97)\">The most serious question in the case arises in connection with the sixth assignment in the motion for new trial which alleges error in the court’s ruling on the State’s objection to certain testimony of the witness, Benjamin Daniels. This witness testified that about a month before the shooting Sadie Hughes told him to tell defendant that she, Sadie, was having improper relations with the defendant’s husband; that she had an automatic and was ready any time. The prosecuting attorney objected to that part of the testimony concerning improper relations between Sadie and defendant’s husband. In sustaining the objection the court ruled as ■ follows: “Ladies and gentlemen of the jury, the only purpose for which this testimony could be given would be to go to the credibility of the prosecuting witness when she testified she had had no relationship with the husband of the defendant and for that purpose you may consider it but not as a defense to this alleged crime.” Daniels further testified that when Sadie Hughes asked him to deliver the message to defendant, he informed her that he was not a news carrier. There is no showing that defendant ever received information of the alleged conversation.</p> <p id=\"b202-5\" pgmap=\"202\">In Flowers v. State, 152 Ark. 295, 238 S. W. 37, the accused testified in detail about information that he had received óf improper relations between his wife and the person assaulted shortly before the assault. In affirming the conviction for assault with intent to kill the court said: “The fact of intimacy between appellant’s wife and the assaulted person and appellant’s receiving information thereof did not constitute a justification in law for the assault (Fisher v. State, 149 Ark. 48, 231 S. W. 181), but those facts were proper for the consideration of the jury in mitigation of the offense and also in determining whether or not the assault was made upon a sudden heat of passion and upon apparently sufficient provocation. Appellant was therefore entitled to an instruction on those subjects, and, if he had asked for it in proper form, the court should have given an instruction telling the jury that they should consider these facts and the circumstances under which appellant received the information, the length of time before the assault and the circumstances under which he made the assault, in determining whether the passion under which he acted at the time Avas suddenly aroused, and whether the provocation was apparently sufficient to make the passion irresistible. ’ ’</p> <p id=\"b202-6\" pgmap=\"202(58) 203(18)\">It is noted that the rule stated is predicated upon a showing that the accused had received information of the illicit relations between his wife and the person assaulted. There is an absence of proof in the instant case that defendant was ever infprmed of the alleged conversation between Sadie Hughes and Benjamin Daniels and no passion could be aroused or provocation furnished by a statement which was never communicated to the defendant.</p> <p id=\"b203-5\" pgmap=\"203\">In some jurisdictions evidence of improper conduct by a deceased toward defendant’s wife has been held admissible in homicide cases, even though uncommunicated to the defendant, in support of a plea of self-defense where such evidence tends to shed light as to who was the aggressor. See Anno. 44 A. L. R 860. The record shows that the testimony objected to in the instant case was not offered for that purpose, and the fact that defendant voluntarily entered into the difficulty with the prosecuting witness and was the aggressor throughout seems to be undisputed. Moreover, other evidence was admitted without objection tending to show that defendant did receive information from other sources relative to alleged improper relations between her husband and Sadie Hughes, and the jury was told in Instruction No. 10 that it could not convict defendant of assault with intent to kill if she acted under a sudden heat of passion caused by a provocation apparently sufficient to make such passion irresistible. Since it was not shown that defendant was ever informed of the conversation between Sadie Hughes and Benjamin Daniels, we think the court properly limited the jury’s consideration of such testimony.</p> <p id=\"b203-6\" pgmap=\"203\">Assignments Nos. 7, 8, 9 and 11 of the motion for new trial allege improper influence upon and misconduct of the jury which resulted in defendant’s not receiving a fair trial. In the absence of anything in the record to support these assignments of error, they will not be considered. Conley v. State, 180 Ark. 278, 21 S. W. 2d 176.</p> <p id=\"b203-7\" pgmap=\"203(84) 204(30)\">In assignments 10 and 12 error is alleged in the court’s refusal to declare a mistrial when the prosecuting attorney asked Benjamin Daniels on cross-examination: “Q. Isn’t it a fact that all the colored people are afraid of Josie Bell Keith?” The court promptly sustained defendant’s objection to the unanswered question, told the jury that the question was improper and further asked that each juror raise his hand if he could and would disregard the question. The record reflects that each juror raised Ms hand. The action of the trial court removed any prejudice resulting* from the unanswered question. Jutson and Winters v. State, 213 Ark. 193, 209 S. W. 2d 681.</p> <p id=\"b204-5\" pgmap=\"204\">Assignment No. 13 is that the court erred in holding Sterling Hughes, a 10-year-old boy, not qualified to testify. In the course of the examination of the boy by counsel and the court, questions were asked and answers given as shown below.<footnotemark>1</footnotemark> In Crosby v. State, 93 Ark. 156, 124 S. W. 781, it was held that the trial court was in error in holding that a 10-year-old boy was competent to testify under an examination very similar to that disclosed here. The common law rule prevails in this state as to the competency of witnesses in criminal cases. This rule is that a witness of any age may testify if, upon examination by the court, the witness appears to have sufficient intelligence to comprehend the nature and obligation of an oath and understands that there may be punishment for false swearing. Durham v. State, 179 Ark. 507, 16 S. W. 2d 991.</p> <p id=\"b204-6\" pgmap=\"204(75) 205(156)\">Another well settled rule is that the question of competency is left to the sound discretion of the trial judge and in the absence of clear abuse of the judicial discretion exercised, it is not reviewable upon appeal. Yother v. State, 167 Ark. 492, 268 S. W. 861. In Payne v. State, 177 Ark. 413, 6 S. W. 2d 832, the court quoted with approval the following* language of Justice Brewer, speaking for the court, in Wheeler v. United States, 159 U. S. 523, 16 S. Ct. 93, 40 L. Ed. 244: “The decision of this question rests primarily with the trial judge, who sees the proposed witness, notices his manner, his apparent possession or lack of intelligence, and may resort to any examination which will tend to disclose his capacity and intelligence as well as his understanding of the obligations of an oath. As many of these matters cannot be photographed into the record, the decision of the trial judge will not be disturbed on review, unless from that which is preserved it is clear that it was erroneous.” See, also, Hudson v. State, 207 Ark. 18, 179 S. W. 2d 165. We cannot say that there was an abuse of discretion on the part of the trial judge in holding that the Hughes boy did not comprehend the sanctity and obligations of an oath.</p> <p id=\"b205-5\" pgmap=\"205\">The last assignment of error is that the court erred in giving instructions 2 to 14, inclusive. There was only a general objection to each of the instructions which were given on the court’s own motion. We have carefully examined the instructions and find that they correctly declare the law as generally given in such cases. None of the instructions are inherently erroneous and we do not review the ruling of the trial court unless a specific objection is made to such instructions. Tong v. State, 169 Ark. 708, 276 S. W. 1004.</p> <p id=\"b205-6\" pgmap=\"205\">Other alleged errors are argued in the brief which were not brought forward in the motion for new trial and no objections were made to the ruling of the court on such matters at the trial. We find no prejudicial error, and the judgment is affirmed.</p> <footnote label=\"1\"> <p id=\"b204-7\" pgmap=\"204\"> Q. What happens to boys and girls that don’t tell the truth? A. They tell stories. Q. What happens to them if they tell stories? A. I don’t know. Q. What does the Sunday School teacher tell you happens to them if they tell lies or stories? A. I don’t know. Q. Do you know it is wrong to tell a story? A. Yes, sir. Q. Can you tell this jury the truth about what happened over there near your home about a year ago? A. Yes, sir. Q. Do you remember about it and can you tell them? A. Yes, sir. By the Court: Q. Boy, what would happen to you if you were to tell a story, would you be punished in any way if you should tell a story? A. Yes, sir. Q. Who would punish you? A. Whipping. Q. Would you get any other punishment besides that? A. I don’t know. By the Court: I don’t believe he understands the solemnity of an oath. By Mr. Trussell: Q. When I asked you if you went to Sunday School do they teach you anything about God in Sunday School? A. Yes, sir. Q. What do they tell you in Sunday School will happen to bad boys that do wrong and tell lies? A. Go to jail. Q. Do they go anywhere else? A. Yes, sir. Q. Where do they go, did they tell you anything about that? A. No, sir.</p> </footnote> </opinion> </casebody> ', 'status': 'ok'}, 'summary': 'The defendant, Josie Bell Keith, was tided and convicted under an indictment charging her with assault with intent to kill Sadie Hughes, the punishment being fixed by the jury at five years in the penitentiary. Defendant has appealed, and the first five assignments in the motion for new trial challenge the sufficiency of the evidence to support the verdict and judgment. The defendant and prosecuting witness, Sadie Hughes, are negro women residing at Sparkman, Arkansas.\\nSadie Hughes testified that shortly before noon on June 28, 1949, she was walking along the road from her home to the business section of Sparkman when she noticed defendant’s automobile parked in the road ahead. Defendant was seated in the car and the two women spoke to each other as Sadie walked by. Defendant then started her car, drove past the prosecuting witness, and again stopped across the road in front of Sadie. Defendant then jumped out of the car and fired a pistol twice at Sadie who ran a short distance into the front yard and through the front door of the home of Lizzie \"Wright. Defendant went into the Wright house through the back door and chased Sadie from room to room onto the back porch where Sadie grabbed hold of either the gun, or defendant’s arm, and they both fell from the porch to the ground. In the ensuing scuffle on the ground, defendant fired the gun again, the bullet grazing the side of Sadie’s head. According to Sadie, the defendant then said: “Don’t wipe the G-- d— blood off your face; if I had another bullet, I’d finish you up.”\\nThe testimony of Sadie relative to the shooting in front of the Lizzie Wright home was corroborated by the testimony of Dorothy Jean Daniels, a 1-6-year-old girl. Dorothy testified that she was sitting on Lizzie Wright’s front porch and that defendant fired the first shot before she alighted from her car and then fired the second shot as Sadie was running toward the Wright home. The third shot was fired as witness was going around to the back of the house. When she reached the back yard, defendant was standing beside Sadie with the pistol in her hand and Sadie’s face was bleeding. Shortly thereafter Maggie Cowan, an elderly woman, came up and persuaded defendant to leave.\\nIn opposition to the State’s testimony, defendant offered several witnesses who testified that they heard only the one shot fired in the Wrig’ht back yard. They also testified that in the altercation in the back-yard defendant struck Sadie with the pistol after the latter struck her with a stick and that the gun was discharged in the ensuing scuffle.\\nMaggie Cowan, a witness for the State, who lived a block and a half away, also testified that she heard only one shot and “didn’t pay any attention to just what it was” until informed of the trouble. On cross-examination she described Sadie’s wound as follows: “I judged it was a shot. It didn’t seem to be a lick that she had struck her. It seemed to be a glance, I just judged, where a bullet had struck. It didn’t cut the width of it except it cut a gash.”\\nIn rebuttal the State offered proof tending to show that some of the defense witnesses were not present at the scene of the shooting.\\nDefendant interposed the plea of self defense. Although she did not testify in the case, there was evidence by both the State and defendant that the shooting was motivated by defendant’s belief that the prosecuting witness had been having illicit relations with defendant’s husband. The prosecuting witness is a married woman with four children. She strenuously denied having any improper relations with defendant’s husband. The husband of Sadie Hughes testified that defendant came to his barber shop\\' twice before the shooting and told him that she had heard that her husband was having illicit relations with Sadie. This witness stated that he had never seen anything that would cause him to be suspicious of his wife and told defendant he, “didn’t think there was anything to it.”\\nWe have repeatedly held that in order to constitute the crime of assault with intent to kill a specific intent to take the life of the person.assaulted must be shown, and the evidence\\' must be such as to warrant a conviction for murder if death had resulted from the assault. Allen v. State, 117 Ark. 432, 174 S. W. 1179. Appellant earnestly contends that the evidence here does not measure up to these requirements. It is argued that the testimony of Sadie Hughes and other witnesses for the State is exaggerated, unreasonable and unworthy of belief and our attention is directed to certain inconsistencies in such testimony and the version of the shooting given by defense witnesses. The jury were the judges of the credibility of the various witnesses and we must consider the evidence in the light most favorable to the State. As thus viewed, we hold it ample to sustain the verdict.\\nThe most serious question in the case arises in connection with the sixth assignment in the motion for new trial which alleges error in the court’s ruling on the State’s objection to certain testimony of the witness, Benjamin Daniels. This witness testified that about a month before the shooting Sadie Hughes told him to tell defendant that she, Sadie, was having improper relations with the defendant’s husband; that she had an automatic and was ready any time. The prosecuting attorney objected to that part of the testimony concerning improper relations between Sadie and defendant’s husband. In sustaining the objection the court ruled as ■ follows: “Ladies and gentlemen of the jury, the only purpose for which this testimony could be given would be to go to the credibility of the prosecuting witness when she testified she had had no relationship with the husband of the defendant and for that purpose you may consider it but not as a defense to this alleged crime.” Daniels further testified that when Sadie Hughes asked him to deliver the message to defendant, he informed her that he was not a news carrier. There is no showing that defendant ever received information of the alleged conversation.\\nIn Flowers v. State, 152 Ark. 295, 238 S. W. 37, the accused testified in detail about information that he had received óf improper relations between his wife and the person assaulted shortly before the assault. In affirming the conviction for assault with intent to kill the court said: “The fact of intimacy between appellant’s wife and the assaulted person and appellant’s receiving information thereof did not constitute a justification in law for the assault (Fisher v. State, 149 Ark. 48, 231 S. W. 181), but those facts were proper for the consideration of the jury in mitigation of the offense and also in determining whether or not the assault was made upon a sudden heat of passion and upon apparently sufficient provocation. Appellant was therefore entitled to an instruction on those subjects, and, if he had asked for it in proper form, the court should have given an instruction telling the jury that they should consider these facts and the circumstances under which appellant received the information, the length of time before the assault and the circumstances under which he made the assault, in determining whether the passion under which he acted at the time Avas suddenly aroused, and whether the provocation was apparently sufficient to make the passion irresistible. ’ ’\\nIt is noted that the rule stated is predicated upon a showing that the accused had received information of the illicit relations between his wife and the person assaulted. There is an absence of proof in the instant case that defendant was ever infprmed of the alleged conversation between Sadie Hughes and Benjamin Daniels and no passion could be aroused or provocation furnished by a statement which was never communicated to the defendant.\\nIn some jurisdictions evidence of improper conduct by a deceased toward defendant’s wife has been held admissible in homicide cases, even though uncommunicated to the defendant, in support of a plea of self-defense where such evidence tends to shed light as to who was the aggressor. See Anno. 44 A. L. R 860. The record shows that the testimony objected to in the instant case was not offered for that purpose, and the fact that defendant voluntarily entered into the difficulty with the prosecuting witness and was the aggressor throughout seems to be undisputed. Moreover, other evidence was admitted without objection tending to show that defendant did receive information from other sources relative to alleged improper relations between her husband and Sadie Hughes, and the jury was told in Instruction No. 10 that it could not convict defendant of assault with intent to kill if she acted under a sudden heat of passion caused by a provocation apparently sufficient to make such passion irresistible. Since it was not shown that defendant was ever informed of the conversation between Sadie Hughes and Benjamin Daniels, we think the court properly limited the jury’s consideration of such testimony.\\nAssignments Nos. 7, 8, 9 and 11 of the motion for new trial allege improper influence upon and misconduct of the jury which resulted in defendant’s not receiving a fair trial. In the absence of anything in the record to support these assignments of error, they will not be considered. Conley v. State, 180 Ark. 278, 21 S. W. 2d 176.\\nIn assignments 10 and 12 error is alleged in the court’s refusal to declare a mistrial when the prosecuting attorney asked Benjamin Daniels on cross-examination: “Q. Isn’t it a fact that all the colored people are afraid of Josie Bell Keith?” The court promptly sustained defendant’s objection to the unanswered question, told the jury that the question was improper and further asked that each juror raise his hand if he could and would disregard the question. The record reflects that each juror raised Ms hand. The action of the trial court removed any prejudice resulting* from the unanswered question. Jutson and Winters v. State, 213 Ark. 193, 209 S. W. 2d 681.\\nAssignment No. 13 is that the court erred in holding Sterling Hughes, a 10-year-old boy, not qualified to testify. In the course of the examination of the boy by counsel and the court, questions were asked and answers given as shown below.\\nAnother well settled rule is that the question of competency is left to the sound discretion of the trial judge and in the absence of clear abuse of the judicial discretion exercised, it is not reviewable upon appeal. Yother v. State, 167 Ark. 492, 268 S. W. 861. In Payne v. State, 177 Ark. 413, 6 S. W. 2d 832, the court quoted with approval the following* language of Justice Brewer, speaking for the court, in Wheeler v. United States, 159 U. S. 523, 16 S. Ct. 93, 40 L. Ed. 244: “The decision of this question rests primarily with the trial judge, who sees the proposed witness, notices his manner, his apparent possession or lack of intelligence, and may resort to any examination which will tend to disclose his capacity and intelligence as well as his understanding of the obligations of an oath. As many of these matters cannot be photographed into the record, the decision of the trial judge will not be disturbed on review, unless from that which is preserved it is clear that it was erroneous.” See, also, Hudson v. State, 207 Ark. 18, 179 S. W. 2d 165. We cannot say that there was an abuse of discretion on the part of the trial judge in holding that the Hughes boy did not comprehend the sanctity and obligations of an oath.\\nThe last assignment of error is that the court erred in giving instructions 2 to 14, inclusive. There was only a general objection to each of the instructions which were given on the court’s own motion. We have carefully examined the instructions and find that they correctly declare the law as generally given in such cases. None of the instructions are inherently erroneous and we do not review the ruling of the trial court unless a specific objection is made to such instructions. Tong v. State, 169 Ark. 708, 276 S. W. 1004.\\nOther alleged errors are argued in the brief which were not brought forward in the motion for new trial and no objections were made to the ruling of the court on such matters at the trial. We find no prejudicial error, and the judgment is affirmed.\\n \\n'}\n"
]
],
[
[
"# Change Decision Date to mongodb date format",
"_____no_output_____"
]
],
[
[
"import datetime\n",
"_____no_output_____"
],
[
"count = 0\nfor each_case in all_cases:\n try:\n decision_date = datetime.datetime.strptime(each_case['decision_date'], \"%Y-%m-%d %H:%M:%S\")\n except:\n try:\n decision_date = datetime.datetime.strptime(each_case['decision_date'], \"%Y-%m-%d\")\n except:\n try:\n decision_date = datetime.datetime.strptime(each_case['decision_date'], \"%Y-%m\")\n except:\n try:\n decision_date = datetime.datetime.strptime(each_case['decision_date'], \"%Y\")\n except:\n pass\n myquery = { \"_id\": each_case['_id'] }\n newvalues = { \"$set\": { \"decision_date\": decision_date } }\n cases.update_one(myquery, newvalues)",
"1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n1994-12-05 00:00:00\n"
]
],
[
[
"# Elastic Search",
"_____no_output_____"
]
],
[
[
"import elasticsearch\nfrom datetime import date\ndate = date(2000, 1, 1)",
"_____no_output_____"
],
[
"cases.find({ \"decision_date\":{\"$gte\":date}})",
"_____no_output_____"
],
[
"# Take only the latest cases\nall_cases = cases.find({ \"decision_date\":{\"$gte\":date}})\nall_cases.count()",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
]
|
cb75d8e76db18d0beb1308368c0aec765cba945a | 789,819 | ipynb | Jupyter Notebook | notebooks/MonteCarloHOWTO.ipynb | willclarkson/astroStatsHOWTOsDearborn | b2c8753cc3e00bfdb695320045b2a328bcfaacce | [
"MIT"
]
| null | null | null | notebooks/MonteCarloHOWTO.ipynb | willclarkson/astroStatsHOWTOsDearborn | b2c8753cc3e00bfdb695320045b2a328bcfaacce | [
"MIT"
]
| null | null | null | notebooks/MonteCarloHOWTO.ipynb | willclarkson/astroStatsHOWTOsDearborn | b2c8753cc3e00bfdb695320045b2a328bcfaacce | [
"MIT"
]
| null | null | null | 300.083207 | 81,352 | 0.926949 | [
[
[
"## HOWTO estimate parameter-errors using Monte Carlo - an example with python",
"_____no_output_____"
],
[
"Will Clarkson, Sat March 8th 2014\n\nUPDATED Sun March 14th 2021 with more recent system version and a few other minor style updates (now runs on python 3 and should be backwards-compatible to python 2.7). \n\nI have started the process of updating this series of HOWTOs to run on more recent systems than what I had access to in 2014 (and also with improvements to the code, e.g. \"N\" --> \"np\" for numpy). I make no promises that this will be regularly updated. The new versions will be curated at this public github repository (MIT license): https://github.com/willclarkson/astroStatsHOWTOsDearborn",
"_____no_output_____"
],
[
"*(Note: This is the first really in-depth HOWTO I've put up at Dearborn, and it contains a number of other useful points about data analysis with python (e.g. how to fit a curve to data, how to annotate plots...). Even if you know Monte Carlo backwards and forwards, you may find the syntax examples below useful. As with all python \"Notebooks,\" you should be able to reproduce everything here just by pasting the commands into your IPython interpreter.*)",
"_____no_output_____"
],
[
"A few summary points (since this HOWTO has become fairly long:\n\n[1]. Monte Carlo is quick (to code), flexible, and easy to do - the working code examples below are only a few lines long.\n\n[2]. It produces empirical error estimates on your fitted parameters, no matter how complicated the relationships of the parameters to the data;\n\n[3]. The estimated range of parameters depends on the details of the simulation, so make sure it's as realistic as you can make it;\n\n[4]. There are some interesting subtleties in how you report the error estimates; the best way to report them is to show not only a one- or two-sided range, or (better) the full covariance matrix, but (best) to provide the complete distribution of parameter-estimates over your simulations.\n\n[5]. You can use Monte Carlo to realistically investigate how to improve your experiment to allow parameter-uncertainties sufficiently small to be scientifically useful.",
"_____no_output_____"
],
[
"### Introduction",
"_____no_output_____"
],
[
"One of the most important pieces of model-fitting is to determine the \"uncertainty\" in the value of some parameter in the model. You might have fit some value of your model parameter to data, and it may even go through most of the datapoints and be consistent with your prior expectation on the parameter-values. But unless you know what range of values of this parameter are consistent with the data, you really don't know if your model fits at all. \n\nSo, how do we know what range of values of a parameter are consistent with the data?",
"_____no_output_____"
],
[
"### Background",
"_____no_output_____"
],
[
"In the material below I'm skirting round some very deep and interesting ideas in order to show a practical way to determine this range. However a few points are in order to clarify what I'm talking about here. \n\nIn what follows, I'll denote the \"error on the parameter\" to mean \"the spread in the parameter values that are consistent with the data.\" This is a somewhat vague definition (see any standard text for more detail). In the context of empirical estimators on the parameter-spread, one might report the \"68 percent confidence interval\" to mean \"the range of fitted values we would obtain 68% of the time\" when the true value is at or near our best-fit value. Since we can always draw likelihood contours centered on any point in the distribution, we can tighten this up a bit by requiring the range to be \"centered\" on the most likely value, in the sense that the trials are ordered in increasing order of likelihood and the middle set returned as the range. This is also formally a bit vague, but good enough for our purposes here. It's how we know what we'd get \"X% of the time\" that is the important part. \n\nIt's common in the literature to condense (massively) the information contained in the deviations from the \"best-fit\" value to report the \"1-sigma\" range, often reported as $a \\pm s$ where $s$ is the \"1-sigma\" range. In most cases this means the range of values that bound 68 percent of the measured values under a large number of experiments (or simulations). Formally, this practice throws away most of the information the reader might want to know: even under gaussian measurement errors the posterior distribution of the best-fit parameter can be highly asymmetric and non-gaussian. Simply reporting one number throws away the true distribution and is not good practice. It's also (less) common to report a two-sided error, like: $a^{+s}_{-r}$, where $s$ is a measure of the standard deviation of the part of the distribution above the best-fit, and $r$ below it (there is a factor-two to think about here; if the distribution were symmetric, you'd want $x^{+s}_{-s}$ to denote $x\\pm s$ not $x \\pm 2s$...). This usually implicitly approximates the upper part to half a gaussian and the lower part to a gaussian with a different standard deviation. This still may not be a good approximation to the true distribution of best-fit parameters. However in many cases this may be sufficient (say, when you are reporting positions and their errors for ten thousand stars in a catalog and don't want to plot the full posterior for each one - although even here you can provide the graphs electronically.)\n\nI will also use a rather classical definition of what I mean by \"consistent with data\" and \"best-fit.\" When finding the model-parameters that best fit the data, we can maximize the probability of getting the measured data values given the model and our choice of best-fit parameter values. If the parameter-values are a good fit, then the deviation between observed data and model predictions is small; if they are a bad-fit, then this deviation is large. This \"maximum-likelihood\" way of thinking is from some points of view backwards - we are maximizing the probability that the model match the data (the \"Bayesian\" approach) by maximizing the probability that the data match the model. In many practical cases the two approaches give similar values and ranges, and the one approach can be tweaked to approach the other. (For more, see any standard text on data analysis.)\n\nWe make the assumptions that:\n\n[1] our model f(x) really does describe the behavior we are measuring:\n\n[2] any deviations between the perfect underlying pattern predicted by f(x) and those we measure y(x), are due only to measurement error that we can parameterise. (This assumption can be relaxed, but in this HOWTO I preserve it to keep things simple). A common choice of this parameterization is a Gaussian - under this parameterization then curve-fitting by minimising the chi-square statistic is formally identical to maximizing the likelihood of (data given model).\n\nIf we were to conduct a large (~infinite?) number of identical experiments, then, the \"true\" parameters of our model f(x) would not change, but those are inaccessible to us - the parameters that best describe the data would change a little between experiments because we don't measure exactly the underlying behaviour. **The range of best-fit values returned across the set of experiments is then a reasonable estimate for the range in the parameter-values allowed by the data.**",
"_____no_output_____"
],
[
"### Error-estimates from Monte Carlo",
"_____no_output_____"
],
[
"Since we cannot do an infinite number of repeat-experiments (sometimes we cannot even do one if the behaviour we measure is transient), we need another way to predict what range of parameter values would be returned if we could do them. \n\nOne way is the formal error-estimate: - *IF* the measurement errors all follow the same distribution, and if they are \"small enough,\" then you can use standard error-propagation to take the measurement error and propagate it through to get a formal prediction on the error of the parameter. *BUT* there's no guarantee that this will work in all cases, for at least three obvious reasons. \n\n(i) You can think of simple models in which the Taylor series approximation behind standard error-propagation may become pathological (to think about: what is the formal variance of the Lorentz distribution, for example? How well might error-propagation work for, say, 1/x near x=0?). Or, \n\n(ii) your real-life science example may include a model whose error propagation is quite nasty formally. Or, \n\n(iii) for various real-world reasons you might be using a model-fitting scenario that breaks the chain of error-propagation in some way (might be e.g. numerical approximations in there if you're near a singularity in the model, or you might have something apparently innocuous like $|x|$ in the model).\n\nWhat you need in the real world, is a method that will empirically find the range of parameters that fit the model to some level of \"confidence\" without actually doing ten thousand re-runs of the experiment to determine this range.\n\n",
"_____no_output_____"
],
[
"This is what Monte Carlo does in this context$^1$: ** *simulate a large number of fake datasets and find the best-fit parameters using exactly the same method that you're using to fit your real data.* ** The range of returned parameters under these fake experiments is then a reasonable approximation to the true underlying error in the best-fit parameters.\n\nEven here there are important subtleties. The uncertainty in the best-fit parameter (i.e., the range of parameters consistent with the data) can depend strongly on the truth-value of the parameter - which is unknown. The formally correct procedure in these cases is to find the distribution of returned values under a range of truth-values, and use an ordering principle in the likelihood to find the range of recovered values when the truth-value is allowed to vary. The famous (to Physicists!) paper by Feldman & Cousins illustrates how to properly do this (link below). \n\nFeldman & Cousins 1997: A Unified Approach to the Classical Statistical Analysis of Small Signals\nhttp://arxiv.org/abs/physics/9711021\n\nIn many cases, however, you can assume the range of consistent-values does not change much with the truth-value (or verify that this is so through simulation), and simulate your fake experiments using the same truth-value for each trial. The range of best-fit values when this truth-model is \"experimentally\" sampled is then a reasonable estimate for the uncertainty on the parameter-value. This is what we do in this HOWTO.\n\n$^1$(I say \"in this context\" to distinguish error-estimates by Monte Carlo from Monte Carlo integration).",
"_____no_output_____"
],
[
"### Contexts in which you might see Monte Carlo error-estimates",
"_____no_output_____"
],
[
"Before (finally) moving on to the example with code, it's worth listing a few of the contexts in which you might see this. Any decent modern textbook will have lots more (e.g. Wall & Jenkins, Practical Statistics for Astronomers has a good view from 40,000 feet). Typical cases:\n\n[1]: Well-understood model, error distribution understood, want parameter errors (the case in this HOWTO); \n\n[2]: Well-understood model, error distribution understood, want to know what signal-strength you might mistakenly ascribe to data that doesn't actually contain a signal (\"detection limits\");\n\n[3]: Well-understood model, error distribution not well-behaved or well-understood (in this case use bootstrap resampling; more about this in a future HOWTO);\n\n[4]: Well-understood model, error distribution understood, we have information from some other measurements that constrain one or more of the relevant parameters (i.e. Bayesian framework: Markov Chain Monte Carlo is a good choice here);\n\n[5]: Well-understood model, error distribution not understood, high-dimensional parameter space (Markov Chain Monte Carlo again)\n\nAs I hope to show in a future HOWTO, Markov Chain Monte Carlo in some sense is a superset of the techniques I describe here, as it allows these methods to be extended to prior information.\n\nIf the model is NOT well-understood, or there are a few good choices for the models that parameterise the observed variation, then we are in the range of model-comparison, or alternatively non-parametric comparisons. Those are also outside the scope of this HOWTO.",
"_____no_output_____"
],
[
"## A practical example: 1/t decay with only six measurements",
"_____no_output_____"
],
[
"With that material aside, here's a practical example. First we generate a \"measured\" dataset that has been perturbed from the \"truth\" parameters (this corresponds to our experiment). Then we fit this dataset to estimate the value of the power-law index by which y(x) decays over time. Then we use Monte-Carlo to estimate the uncertainty in this best-fit value.",
"_____no_output_____"
],
[
"First we import a few modules we'll need. NOTE: if you enter the lines below into your python command-line (all but [8]) in order, you should be able to reproduce all the steps I'm doing here. ",
"_____no_output_____"
]
],
[
[
"import pylab as P\nimport numpy as np\nfrom scipy import optimize\n",
"_____no_output_____"
]
],
[
[
"(The following line is needed in the ipython notebook: you wouldn't need to type this from the python prompt)",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"### \"Experimental\" data",
"_____no_output_____"
],
[
"First we do the experiment - here we simulate the data from 1/t decay. I use uniform error for simplicity of exposition, but there's no reason we could not make things more realistic later on. Let's suppose we have a small-ish number of datapoints:",
"_____no_output_____"
]
],
[
[
"xMeas = np.random.uniform(0.5,3.0,size=6)\nyTrue = 1.5/xMeas\nsError = 0.1\nyMeas = yTrue + np.random.normal(scale=sError, size=np.size(yTrue))\n",
"_____no_output_____"
]
],
[
[
"Let's plot this to see how our experiment looked:",
"_____no_output_____"
]
],
[
[
"P.errorbar(xMeas,yMeas,yerr=sError,lw=0,elinewidth=1,ecolor='b', fmt='ko',markersize=2)\nP.xlabel('\"Time\"')\nP.ylabel('Measured value')\nP.xlim(0.4,3.0)",
"_____no_output_____"
]
],
[
[
"### Fitting our experimental data",
"_____no_output_____"
],
[
"Now we fit this data with our model. For this example, I'll assume that for whatever reason we've decided to use scipy's \"curve_fit\", which is pretty robust (although does not include measurement error in its fitting). No matter - the Monte Carlo will tell us what range of parameters come out under our chosen fitter.\n\nFirst we define the function to fit to this data. We want to have enough free parameters to actually capture the behavior we think is going on, but not introduce redundant parameters. We also want to furnish the fitter with an initial guess, which I'll call \"vGuess\" below:",
"_____no_output_____"
]
],
[
[
"def f_decay(x,a,b):\n return a*x**(b)\n",
"_____no_output_____"
]
],
[
[
"We need to supply the fitter with an initial guess of the parameters. Since we'll be using the same guess for our Monte Carlo below, I'll define this as a separate element here. I'll also make the initial guess obviously \"wrong\" - i.e. assuming a quadratic when the underlying behavior is 1/t - to see what happens.",
"_____no_output_____"
]
],
[
[
"vGuess = [2.0,-2.0]",
"_____no_output_____"
]
],
[
[
"Now we run the fitter. Like many of scipy's optimization routines, the fitter needs to know (i) what function to use, (ii) the data to fit, and finally (iii) an initial guess of the parameteres. curve_fit happens to return the best-fit parameters as the first of two return-values. So we need to send those two returned values into two new variables - \"vPars\" will hold the returned parameters-fit.",
"_____no_output_____"
]
],
[
[
"vPars, aCova = optimize.curve_fit(f_decay, xMeas, yMeas, vGuess)",
"_____no_output_____"
]
],
[
[
"Let's take a look at those best-fit parameters:",
"_____no_output_____"
]
],
[
[
"print(vPars)\n",
"[ 1.49525025 -0.9674022 ]\n"
]
],
[
[
"That's not too bad - the \"Truth\" values were y(x) = 1.5/x and we have found y(x) = 1.46/x^(1.13). Let's take a look at what this model looks like over the data:",
"_____no_output_____"
]
],
[
[
"xFine = np.linspace(0.4,3.0,100)\nP.errorbar(xMeas,yMeas,yerr=sError,lw=0,elinewidth=1,ecolor='b', fmt='ko',markersize=2)\nP.plot(xFine, f_decay(xFine,*vPars), 'g-', lw=1) # Fitted parameters\nP.plot(xFine, f_decay(xFine,1.5,-1.0), 'r--', lw=1) # Parameters used to generate data\nP.title('Fitted curve (green) and \"truth\" curve (red dashed)')",
"_____no_output_____"
]
],
[
[
"Visually, this isn't **too** horrendous. At this point we might be tempted to claim that \"obviously\" our data shows y(x) = constant/$x^{0.97}$ since that model goes through the points.\n\nBut what range of parameter-values are consistent with a dataset like this?",
"_____no_output_____"
],
[
"### Monte Carlo - allowing observing times to vary",
"_____no_output_____"
],
[
"What we do next depends on what level we think our hypothetical experiments might differ from each other. I'll make the assumption here that the times of measurement between x=0.5 and x=3.0 were random. In that case, we would need to include this variation of measurement-time in our simulations in order to report the range of values another experimenter might find if they used a similar setup. So, we will generate a large number of datasets, re-fit the parameter values where the measurement-times are also not under our experimenter's control, and then find the range of parameters that match the data. \n\nWe need to set up a few things first: The number of trials and the combined set of best-fit parameters, for all the model parameters (initially empty). So:",
"_____no_output_____"
]
],
[
[
"nTrials = 4000\naFitPars = np.array([])",
"_____no_output_____"
]
],
[
[
"Now we actually do the simulations. Each time we need to generate the data as well as fit it. \n\n(There is one syntax complication: we cannot stack a 1d vector onto an empty array in python, so there is an if/then for the FitPars array: if it's empty, copy the latest round of fitted parameters into it, if not then stack the latest round of fitted parameters onto what we have so far.)",
"_____no_output_____"
]
],
[
[
"for iTrial in range(nTrials):\n xTrial = np.random.uniform(0.5,3.0,size=np.size(xMeas))\n yGen = 1.5/xTrial\n yTrial = yGen + np.random.normal(scale=sError,size=np.size(yGen))\n \n # We use a try/except clause to catch pathologies\n try:\n vTrial, aCova = optimize.curve_fit(f_decay,xTrial,yTrial,vGuess)\n except:\n dumdum=1\n continue # This moves us to the next loop without stacking.\n \n #here follows the syntax for stacking the trial onto the running sample:\n if np.size(aFitPars) < 1:\n aFitPars=np.copy(vTrial)\n else:\n aFitPars = np.vstack(( aFitPars, vTrial ))\n ",
"_____no_output_____"
]
],
[
[
"A couple points to note in the above chunk: \n\n(i) All those np.size() calls are to ensure that the various arrays are consistent with the size of the measured data. We could equally well have typed \"6\" in most of those places, but then we'd have to change it each time a new experiment was done with different numbers of datapoints. Also, \n\n(ii) Your fitting routine might sometimes not work. A more sophisticated analysis would catch these errors: here I'm just using python's \"try/except\" clause to gracefully ignore the bad cases. (If you're finding that more than a percent or so of cases are breaking, you may want to double-check whether your model has too few or too many parameters for the data). Finally:\n\n(iii) In this example, I am starting with an empty aFitPars array and then stacking on the fit-values only if the fitting routine ran without failing. The \"continue\" statement stops the routine from dumbly stacking on the last fit-value if the fit failed. I do things this way so that the fitpars array is always the correct size to match the number of correctly-run trials.\n\nHaving done all that, let's look at the size of the set of trials:",
"_____no_output_____"
]
],
[
[
"np.shape(aFitPars)",
"_____no_output_____"
]
],
[
[
"This shows that all our 4000 trials were successful, which isn't too bad. Now, let's look at the distribution of powers of x that came out of the fit:",
"_____no_output_____"
]
],
[
[
"print(np.median(aFitPars[:,1]))\nprint(np.std(aFitPars[:,1]))",
"-0.9981492043446679\n0.13778476093391592\n"
]
],
[
[
"Let's take a graphical look at this parameter. We'll use matplotlib's \"hist\" feature to generate and plot the distribution for convenience, but there are other better tools you'll likely come across.",
"_____no_output_____"
]
],
[
[
"P.hist(aFitPars[:,1],bins=50)\nP.xlabel('Power-law index b')\nP.ylabel('N(b)')\n\nprint(np.std(aFitPars[:,1]))\n",
"0.13778476093391592\n"
]
],
[
[
"We see that the standard deviation of our fitted parameter is pretty high - our measurement of (constant/$x^{1.13}$) is more accurately (constant/$x^{0.97 ~ \\pm ~0.138}$). This is consistent with 1/x within the range of values we have recovered.\n\nNotice also that our 1D distribution looks nice and gaussian. But is the situation really this simple? Let's look at both power-law components together:",
"_____no_output_____"
]
],
[
[
"P.scatter(aFitPars[:,0], aFitPars[:,1], alpha=0.5, s=9, edgecolor='none')\nP.xlabel('Normalization of power-law a')\nP.ylabel('Power-law index b')",
"_____no_output_____"
]
],
[
[
"Here follows a little bit of matplotlib syntax to show this in a slightly more visually appealing way:",
"_____no_output_____"
]
],
[
[
"from scipy.stats import kde\nx,y=aFitPars.T\n\n# Use a kernel density estimator to produce local-counts in this space, and grid them to plot.\nk = kde.gaussian_kde(aFitPars.T)\nnbins=200\nxi, yi = np.mgrid[x.min():x.max():nbins*1j, y.min():y.max():nbins*1j]\nzi = k(np.vstack([xi.flatten(), yi.flatten()]))\n\n# Show the density\nP.pcolormesh(xi, yi, zi.reshape(xi.shape), zorder=3)\nP.colorbar()\n\n# Show the datapoints on top of this, and also the contours. \"zorder\" sets the vertical order in the plot.\nP.scatter(aFitPars[:,0], aFitPars[:,1], c='w', s=2, zorder=15, edgecolor='none',alpha=0.75)\nP.contour(xi,yi,zi.reshape(xi.shape), zorder=25, colors='0.25')\n\n\nP.ylim(-1.45,-0.55)\nP.xlim(1.25,1.80)\nP.xlabel('Power-law normalization a')\nP.ylabel('Power-law index b')\n",
"_____no_output_____"
]
],
[
[
"Even in our simple two-parameter problem the results are quite interesting. For example, the correlation between parameters appears to switch sign the farther from the center of the cloud we go - perhaps indicating different regimes depending on the clustering of measurement-times.\n\n",
"_____no_output_____"
],
[
"### Were our observing times special?",
"_____no_output_____"
],
[
"Now suppose instead that we had good reason to make measurements at the times (x-values) that we did. Perhaps a realistic estimate for the errors should not allow the measurement times to vary.\n\nLet's try another Monte-Carlo, this time asking what parameter values we recover if we make identical experiments at the same times as our real data, but still subject to experimental error at those times:",
"_____no_output_____"
]
],
[
[
"aFitSameTimes=np.array([])\nfor iTrial in range(nTrials):\n yGen = 1.5/xMeas # Same measured times this time!\n yTrial = yGen + np.random.normal(scale=sError,size=np.size(yGen))\n \n # We use a try/except clause to catch pathologies\n try:\n vTrial, aCova = optimize.curve_fit(f_decay,xMeas,yTrial,vGuess)\n except:\n dumdum=1\n continue # This moves us to the next loop without stacking.\n \n #here follows the syntax for stacking the trial onto the running sample:\n if np.size(aFitSameTimes) < 1:\n aFitSameTimes=np.copy(vTrial)\n else:\n aFitSameTimes = np.vstack(( aFitSameTimes, vTrial ))",
"_____no_output_____"
],
[
"np.shape(aFitSameTimes)",
"_____no_output_____"
]
],
[
[
"Let's look at the spread in recovered values as we did before:",
"_____no_output_____"
]
],
[
[
"P.hist(aFitSameTimes[:,0],bins=50, alpha=0.5,color='r')\nP.xlabel('Power-law index b')\nP.ylabel('N(c)')\nP.title('Same measurement times each trial')",
"_____no_output_____"
],
[
"print(np.median(aFitSameTimes[:,1]))\nprint(np.std(aFitSameTimes[:,1]))\n",
"-0.9988972317956054\n0.06646615314299915\n"
]
],
[
[
"Let's look at those parameters plotted against each other as we did before.",
"_____no_output_____"
]
],
[
[
"P.scatter(aFitSameTimes[:,0], aFitSameTimes[:,1],c='r', s=36, edgecolor='k', alpha=0.5)\nP.xlabel('Normalization of power-law a')\nP.ylabel('Power-law index b')\nP.title('Same measurement times each trial')\n\n# Set the same axis-ranges as above for visual comparison\n#P.xlim(1.30, 1.70)\n#P.ylim(-1.4,-0.6)",
"_____no_output_____"
]
],
[
[
"As we might expect, the measurements are still correlated, but the distribution is tighter. Let's take a look at the two sets of parameters on top of each other:",
"_____no_output_____"
]
],
[
[
"# the alpha values below are transparency values for plots.\nP.scatter(aFitSameTimes[:,0], aFitSameTimes[:,1],c='r', s=9, edgecolor='none', zorder=25, alpha=0.5)\nP.scatter(aFitPars[:,0], aFitPars[:,1],c='b', s=9, edgecolor='none', zorder=5)\n\nP.xlabel('Normalization of power-law a')\nP.ylabel('Power-law index b')\nP.title('Random observing times (blue) and frozen times (red)')",
"_____no_output_____"
]
],
[
[
"Or we can generate our contours and compare the two sets visually:",
"_____no_output_____"
]
],
[
[
"xS,yS=aFitSameTimes.T\nkS = kde.gaussian_kde(aFitSameTimes.T)\nnbins=50\nxiS, yiS = np.mgrid[xS.min():xS.max():nbins*1j, yS.min():yS.max():nbins*1j]\nziS = kS(np.vstack([xiS.flatten(), yiS.flatten()]))\n\n# Now let's plot this over the previous (xi,yi,zi) case:\nP.contour(xi,yi,zi.reshape(xi.shape),colors='b',lw=2, zorder=5, alpha=0.75, linestyles='dashed', label='random times')\nP.contour(xiS, yiS, ziS.reshape(xiS.shape), colors='r', zorder=15, alpha=1.0, label='times frozen')\n\nP.xlim(1.0,2.0)\nP.ylim(-1.5,-0.50)\nP.title('Random-times (blue dashed) and constant-times (red) compared', fontsize=10)\n\nP.xlabel('Normalization of power-law a')\nP.ylabel('Power-law index b')",
"_____no_output_____"
]
],
[
[
"That these two sets are not terribly different (but not identical!) indicates that the particular experiment (xMeas, yMeas at the beginning) didn't happen to pick a hugely fortuitous set of observing \"times\" (i.e. x-values), although it looks like the values that were picked were generally a bit better than any random set of six observing times.",
"_____no_output_____"
],
[
"### Discussion",
"_____no_output_____"
],
[
"So, which value for the spread of the power-law index \"b\" should we use in our hypothetical publication?\n\nThat depends on which of the scenarios simulated you believe to be the most honest representation of the differences between predicted and actual data one would encounter in real life. What you CANNOT do is just pick the scenario that gives the smallest range just because you want to report the smaller error!\n\nIt's usually best to be as upfront as possible about what your errors mean. This is where in your paper you would report not just the range, but also under what circumstances this was estimated. If you assumed your measurement times were constant when making the monte carlo, then say so - and you should also justify in the paper why you made this assumption. In this simple case above, the differences between assuming any set of random times (blue) and the exact times (red) is not very large, but you still want the reader to understand as much as possible about your data.\n\nIn most cases - even the simple toy problem here - you should really go one better, and give the reader not just the range of values consistent with your data, but the full likelihood function of the fitted parameters. This is usually hard to parameterise but easy to show - just show the graph of the recovered parameters (any of the example graphs above would be good)!\n\nNotice also that in the case of the toy problem here, even a two-parameter model with a very simple form has led to real covariance between the fitted parameters under our monte carlo experiments. Under this situation, what would the 1-sigma variation in one of the parameters mean? \n",
"_____no_output_____"
],
[
"In a situation like this, you can easily report not just the standard deviation (or its square, the variance) but instead the *Covariance* of the parameters. The diagonal elements are the variance of each parameter, while the off-diagonals indicate the covariance between each pair of parameters. In python, this is easy:",
"_____no_output_____"
]
],
[
[
"aCovFit = np.cov(np.transpose(aFitSameTimes))",
"_____no_output_____"
]
],
[
[
"Looking at the resulting covariance matrix, we see that - like our graphs above suggest - the two parameters do indeed vary together:",
"_____no_output_____"
]
],
[
[
"print(aCovFit)\n",
"[[0.00220256 0.00190299]\n [0.00190299 0.00441885]]\n"
],
[
"print(np.std(aFitPars[:,0]))\nprint(np.sqrt(aCovFit[0,0]))\n",
"0.13556475825680783\n0.04693146004038742\n"
]
],
[
[
"That difference between the diagonal element and the standard deviation of the fitted parameter \"a\" is small but significant! It means there is a nonzero covariance. We can get a little bit more insight by computing the normalized covariance (the correlation). We see that the off-diagonal terms are about 61 percent of the diagonal terms (expressed as variance not standard deviation). ",
"_____no_output_____"
]
],
[
[
"np.corrcoef(np.transpose(aFitSameTimes))\n",
"_____no_output_____"
]
],
[
[
"If you're more familiar with the standard deviation rather than the variance, you might take the square root to get a visual handle on how large this correlation is, remembering to use the absolute value in case of negative off-diagonal terms (which we'd get in the case of variables anti-correlated with each other). I have not seen this done much, but you might find it more intuitive. Your mileage may vary.",
"_____no_output_____"
]
],
[
[
"np.sqrt(np.abs(np.corrcoef(np.transpose(aFitSameTimes))))",
"_____no_output_____"
]
],
[
[
"The above has been a quick introduction into what monte carlo is, how it works, and how to do it in python. \n\nFor more on the ways to report the ranges when two parameters vary against each other, take a look at any standard text on data analysis in the sciences. Bevington & Robson has a good discussion at about the right level, Numerical Recipes also has some interesting advice.",
"_____no_output_____"
],
[
"# A more interesting example: powerlaw plus constant background",
"_____no_output_____"
],
[
"Now we move on to a more \"realistic\" example: there is a power-law decay above some unknown constant background, which we include in our model. As we will see, this leads to significant deviations from the bivariate gaussian-like posterior distrbutions we saw above, because with only a few datapoints it is statistically difficult to determine which of the background, normalization should account for this offset level. \n\n(Note that we could flip this around and say that if we KNOW that the two-parameter model does fit our data, then throwing in a third parameter significantly complicates the range of consistent values.)",
"_____no_output_____"
],
[
"We begin as before, this time with the background term included, and assuming our experimenter has been able to take just a few more datapoints. We'll define our slightly more complex function and use the same function to generate the \"experimental\" data, the \"truth\" values and the monte-carlo simulations. ",
"_____no_output_____"
]
],
[
[
"def f_expt(x,a,b,c):\n return a*x**(b)+c",
"_____no_output_____"
],
[
"nData=14\nsError=0.1\nxMeas=np.random.uniform(0.5,5.0,size=nData)\nyTrue=f_expt(xMeas,1.5,-1.0,0.5)\nyMeas = yTrue + np.random.normal(scale=sError, size=np.size(yTrue))\nP.errorbar(xMeas,yMeas,yerr=sError,lw=0,elinewidth=1,ecolor='b', fmt='ko',markersize=2)\n\n# Some syntax to make the plot a bit clearer\nP.xlim(0.4,5.0)\nP.ylim(0.0,3.0)\nP.title('Experimental plus background')\nP.xlabel('Time')\nP.ylabel('Measured value')\n\n# Plot the total model and the constant background\nxFine=np.linspace(0.4,5.0,100)\nP.plot([np.min(xFine),np.max(xFine)], [0.5,0.5],'r-.')\nP.plot(xFine,f_expt(xFine,1.5,-1.0,0.5), 'r--')\n",
"_____no_output_____"
]
],
[
[
"As before, we'll fit our new model to this data with background. We'll assume an optimistic guess with lower than true background:",
"_____no_output_____"
]
],
[
[
"vGuess=[2.0,-2.0,0.2]",
"_____no_output_____"
],
[
"vPars, aCova = optimize.curve_fit(f_expt, xMeas, yMeas, vGuess)",
"_____no_output_____"
],
[
"print(vPars)",
"[ 1.4563466 -0.9404785 0.46574376]\n"
]
],
[
[
"This time the parameters are quite a bit different than input: the \"truth\" values were [1.5, -1.0, 0.5]. \n\nBut is this really so \"bad?\" How do we know? Let's view this graphically, plotting the fitted parameters (green) over the generated parameters (red dashed):",
"_____no_output_____"
]
],
[
[
"P.errorbar(xMeas,yMeas,yerr=sError,lw=0,elinewidth=1,ecolor='b', fmt='ko',markersize=2)\nP.plot(xFine,f_expt(xFine,1.5,-1.0,0.5), 'r--')\nP.plot(xFine,f_expt(xFine,*vPars), 'g-')\n\n# Same labels as before:\nP.xlim(0.4,5.0)\nP.ylim(0.0,3.0)\nP.title('Power law plus background')\nP.xlabel('Time')\nP.ylabel('Measured value')",
"_____no_output_____"
]
],
[
[
"We see that, even though the fitted parameters are different from the generated parameters by quite a bit more than in the two-parameter case, the two sets of best-fit parameters produce quite similar curves. This is an indication that our experimental setup might not be sufficient to distinguish the parameters of our model. \n\nPressing on with this, what range of parameters are consistent with the data we do have? Let's use Monte Carlo to find out. Once again, we initialise our set of fit parameters:",
"_____no_output_____"
]
],
[
[
"nTrials = 4000\naFitExpt = np.array([])",
"_____no_output_____"
],
[
"for iTrial in range(nTrials):\n xTrial = np.random.uniform(0.5,5.0,size=np.size(xMeas))\n yGen = f_expt(xTrial,1.5,-1.0,0.5)\n yTrial = yGen + np.random.normal(scale=sError,size=np.size(yGen))\n \n # We use a try/except clause to catch pathologies\n try:\n vTrial, aCova = optimize.curve_fit(f_expt,xTrial,yTrial,vGuess)\n except:\n dumdum=1\n continue # This moves us to the next loop without stacking.\n \n #here follows the syntax for stacking the trial onto the running sample:\n if np.size(aFitExpt) < 1:\n aFitExpt=np.copy(vTrial)\n else:\n aFitExpt = np.vstack(( aFitExpt, vTrial ))",
"_____no_output_____"
]
],
[
[
"Since our model is now more complex given the data, let's see what fraction of trials were successful:",
"_____no_output_____"
]
],
[
[
"np.shape(aFitExpt)",
"_____no_output_____"
]
],
[
[
"As we might have expected, a small fraction (about 1-2 percent) of the trials failed. The \"try/except\" clause above handled this gracefully. So - let's take a look at the distribution of parameters under these simulations:",
"_____no_output_____"
]
],
[
[
"P.scatter(aFitExpt[:,0], aFitExpt[:,1], c=aFitExpt[:,2], alpha=0.5, s=9, edgecolor='none')\nP.xlabel('Normalization of power-law a')\nP.ylabel('Power-law index b')\nP.title('Three-parameter model')\nP.colorbar(label='Background component c')",
"_____no_output_____"
]
],
[
[
"We see that the distribution of fitted parameters is completely different from the two-parameter case above. Let's zoom in:",
"_____no_output_____"
]
],
[
[
"P.scatter(aFitExpt[:,0], aFitExpt[:,1], alpha=0.5, s=9, edgecolor='none')\nP.xlabel('Normalization of power-law a')\nP.ylabel('Power-law index b')\nP.title('Three-parameter model - zoomed in')\nP.xlim(0,5)\nP.ylim(-2,0)",
"_____no_output_____"
]
],
[
[
"Let's see what range of parameters comes out of those simulations. Note a couple of things: \n\n(i) two of the histograms below have a log10 scale due to the very long tails of the distributions;\n\n(ii) We have set limits on those histograms. This is a little dangerous in practice - we don't want to throw away samples when computing the range - but those limits were set after examining the full range (we also don't want to include the really pathological cases like the very bottom-right datapoint in the scatterplot two figures up). So:",
"_____no_output_____"
]
],
[
[
"P.hist(aFitExpt[:,0],bins=250,alpha=0.5,range=[-10,50], log=True)\nP.xlabel('Power-law normalization a')\nP.ylabel('N(a)')",
"_____no_output_____"
],
[
"P.hist(aFitExpt[:,1],bins=150,alpha=0.5)\nP.xlabel('Power-law index b')\nP.ylabel('N(b)')",
"_____no_output_____"
],
[
"P.hist(aFitExpt[:,2],bins=250,alpha=0.5,log=True, range=[-10,3])\nP.xlabel('Constant background c')\nP.ylabel('N(c)')",
"_____no_output_____"
]
],
[
[
"Compared to the two-parameter case, the range of allowed power-law indices is considerable! \n\nWhat about the co-variance of the background and the power-law normalization?",
"_____no_output_____"
]
],
[
[
"P.scatter(aFitExpt[:,0], aFitExpt[:,2], alpha=0.5, s=9, edgecolor='none')\nP.xlabel('Normalization of power-law a')\nP.ylabel('Constant-background c')\nP.title('Three-parameter model - zoomed in')\nP.xlim(0,6)\nP.ylim(-5,2)",
"_____no_output_____"
]
],
[
[
"What can we conclude with behavior like this? At least three things are going on here. Under random time-sampling within the (0.5-5.0) range:\n\n[1]. The fitter we've used here, curve_fit, does not always do a good job fitting given the 3-parameter and the model. Ideally we should be able to fold in other information we might have (e.g. 1/x^3 or steeper might be unphysical). There are (simple!) methods for including these outside constraints, but they're beyond the scope of this HOWTO.\n\n[2]. Even though we have 14 datapoints and 3 model-parameters (so formally 11 degrees of freedom), the range of the data is not sufficient to distinguish the constant background from the power-law normalisation. Our model is too complicated for the data.\n\n[3]. Notice: even with gaussian errors, the distributions of posterior values for the best-fit parameters are not nice well-behaved gaussians! ",
"_____no_output_____"
],
[
"### Making progress in sub-optimal situations",
"_____no_output_____"
],
[
"Let's try asking a restricted set of simulations as before: assuming the experimenter is able to spread their experiments over time (thus avoiding bunching up of measurements in some cases), what happens then?",
"_____no_output_____"
]
],
[
[
"aStandard=np.array([])\n\n# suppose we believe the \"true\" values really are 1.5, -1.0, 0.5\nyTrue=f_expt(xMeas,1.5,-1.0,0.5)\nfor iTrial in range(nTrials):\n \n # Note that the errors are the only source of variation here!\n yTrial = yTrue + np.random.normal(scale=sError,size=np.size(yTrue))\n \n # We use a try/except clause to catch pathologies\n try:\n vTrial, aCova = optimize.curve_fit(f_expt,xMeas,yTrial,vGuess)\n except:\n dumdum=1\n continue # This moves us to the next loop without stacking.\n \n #here follows the syntax for stacking the trial onto the running sample:\n if np.size(aStandard) < 1:\n aStandard=np.copy(vTrial)\n else:\n aStandard = np.vstack(( aStandard, vTrial ))",
"_____no_output_____"
],
[
"np.shape(aStandard)\n",
"_____no_output_____"
],
[
"P.scatter(aStandard[:,0], aStandard[:,1], alpha=0.5, s=9, edgecolor='none')\nP.xlabel('Normalization of power-law a')\nP.ylabel('Power-law index b')\nP.title('Three-parameter model, measurement times frozen')",
"_____no_output_____"
],
[
"np.shape(aStandard[:,0:2])",
"_____no_output_____"
]
],
[
[
"We'll apply the same incantations in matplotlib to see what this distribution now looks like:",
"_____no_output_____"
]
],
[
[
"from scipy.stats import kde\nxS,yS=aStandard[:,0:2].T\nkS = kde.gaussian_kde(aStandard[:,0:2].T)\nnbins=250\nxiS, yiS = np.mgrid[xS.min():xS.max():nbins*1j, yS.min():yS.max():nbins*1j]\nziS = kS(np.vstack([xiS.flatten(), yiS.flatten()]))\n\nP.pcolormesh(xiS, yiS, ziS.reshape(xiS.shape), zorder=3)\nP.colorbar()\n\n# Show the datapoints on top of this, and also the contours. \"zorder\" sets the vertical order in the plot.\nP.scatter(aStandard[:,0], aStandard[:,1], c='w', s=2, zorder=15, edgecolor='none',alpha=0.5)\nP.contour(xiS,yiS,ziS.reshape(xiS.shape), zorder=25, colors='0.25')\nP.xlabel('Power-law normalization a')\nP.ylabel('Power-law index b')\n\nP.xlim(0.8,4)\n",
"_____no_output_____"
]
],
[
[
"Again - complex, but more well-behaved. This time the parameter-values and ranges are the following:",
"_____no_output_____"
]
],
[
[
"print( \"Median of best-fit parameters:\", np.median(aStandard, axis=0) )",
"Median of best-fit parameters: [ 1.60161655 -1.03806104 0.51450306]\n"
],
[
"print(\"Covariance matrix:\")\nprint( np.cov(np.transpose(aStandard)) )\nprint( \"1-parameter deviations:\", np.std(aStandard, axis=0) )\n",
"Covariance matrix:\n[[ 6.54313464 0.53726724 -6.67738485]\n [ 0.53726724 0.38323566 -0.66401557]\n [-6.67738485 -0.66401557 6.86205094]]\n1-parameter deviations: [2.55761553 0.61897811 2.61920386]\n"
]
],
[
[
"Interestingly, the median values returned *when we sample at the times we did* are similar to the truth values we used to simulate the data, but the scatters are difficult to interpret when stated as simple standard deviations!",
"_____no_output_____"
]
],
[
[
"P.hist(aStandard[:,0],bins=250,alpha=0.5)\nP.title('Power-law normalization')\nP.xlabel('Power-law normalization a')",
"_____no_output_____"
],
[
"P.hist(aStandard[:,1],bins=50,alpha=0.5)\nP.title('Power-law index b')\nP.xlabel('Power-law index b')",
"_____no_output_____"
],
[
"P.hist(aStandard[:,2],bins=150,alpha=0.5)\nP.title('Background c')\nP.xlabel('Background c')",
"_____no_output_____"
]
],
[
[
"### Discussion - signal plus unknown background",
"_____no_output_____"
],
[
"While the frozen-time example above produces more \"well-behaved\" results, does it do a better job of representing the parameter-error one would actually encounter? \n\nAgain, that depends on the situation. In some fraction of trials, the uniform random number generator used to make the fake measurement trials, will sometimes produce all 14 measurements at one end of the time interval, in which case the relationship between the \"truth\" value and the best-fit could change. It might be that your hypothetical experimenter would never let this happen. Or, it might be that this is quite realistic - if, say, you're a ground-based astronomer and weather can very much cause your observations to be bunched up in time (if the only gap in the clouds were near the beginning of the inverse-t decay here).\n\nMy view is that it's up to the experimenter to be careful to communicate what they're actually doing, and give the reader as much information as possible to enable them to understand what was actually done, and thus how to interpret the results.",
"_____no_output_____"
],
[
"In the general case, it's usually better to err on the side of caution and allow \"things to go wrong\" in the monte carlo trials. In this case we would conclude that perhaps we got lucky in our experimental data, and any given example of an experiment with ony 14 datapoints from time 0.5-5.0 could return any value within the wider range we found above. What to do?",
"_____no_output_____"
],
[
"### Using Monte-Carlo to design better experiments",
"_____no_output_____"
],
[
"We can use our simulator to find out what happens if we had just a bit more data, or a long enough time-baseline to actually see the background separate from the power-law - this can be crucial when designing future experiments to really tie down the parameter-values we want. \n\nIn our example, let's suppose we were able to take many more points (35 compared to 14) over a just slightly longer time-baseline (interval 0.5-7 compared to 0.5-5):",
"_____no_output_____"
]
],
[
[
"xExtend = np.random.uniform(0.5,7.0,size=35)\nyGenera = f_expt(xExtend,1.5,-1.0,0.5)\nyMeasur = yGenera + np.random.normal(scale=sError,size=np.size(yGenera))\nP.errorbar(xExtend,yMeasur,yerr=sError,lw=0,elinewidth=1,ecolor='b', fmt='ko',markersize=2)\n",
"_____no_output_____"
],
[
"vExten, aExten = optimize.curve_fit(f_expt, xExtend, yMeasur, [2.0,-2.0,0.2])",
"_____no_output_____"
],
[
"print(vExten)",
"[ 1.47477859 -0.99706574 0.50679487]\n"
]
],
[
[
"Let's see how our best-fit parameters compare to the data and to the \"truth\" parameters:",
"_____no_output_____"
]
],
[
[
"P.errorbar(xExtend,yMeasur,yerr=sError,lw=0,elinewidth=1,ecolor='b', fmt='ko',markersize=2)\nP.xlabel('X')\nP.ylabel('Hypothetical data')\n\nxFine=np.linspace(0.5,7.0,100)\nP.plot(xFine,f_expt(xFine,1.5,-1.0,0.5), 'r--')\nP.plot(xFine,f_expt(xFine,*vExten), 'g-')\n\n# Show the \"truth\" background level for comparison with our planned experimental data\nP.plot([0.0,7.0],[0.5,0.5],'r-.')",
"_____no_output_____"
]
],
[
[
"Now with our better dataset, let's see what happens when we try to recover parameter-ranges on this, without any assumptions on the specific times of the measurements:",
"_____no_output_____"
]
],
[
[
"aExtend=np.array([])\nfor iTrial in range(nTrials):\n xTrial = np.random.uniform(0.5,5.0,size=np.size(xExtend))\n yGen = f_expt(xTrial,1.61,-0.97,0.42)\n yTrial = yGen + np.random.normal(scale=sError,size=np.size(yGen))\n \n # We use a try/except clause to catch pathologies\n try:\n vTrial, aCova = optimize.curve_fit(f_expt,xTrial,yTrial,vGuess)\n except:\n dumdum=1\n continue # This moves us to the next loop without stacking.\n \n #here follows the syntax for stacking the trial onto the running sample:\n if np.size(aExtend) < 1:\n aExtend=np.copy(vTrial)\n else:\n aExtend = np.vstack(( aExtend, vTrial ))",
"_____no_output_____"
],
[
"P.scatter(aExtend[:,0],aExtend[:,1], alpha=0.5, s=9, edgecolor='none')\nP.xlabel('Normalization of power-law a')\nP.ylabel('Power-law index b')\nP.title('Three-parameter model, better data, no assumption on measurement times')",
"_____no_output_____"
],
[
"xS,yS=aExtend[:,0:2].T\nkS = kde.gaussian_kde(aExtend[:,0:2].T)\nnbins=150\nxiS, yiS = np.mgrid[xS.min():xS.max():nbins*1j, yS.min():yS.max():nbins*1j]\nziS = kS(np.vstack([xiS.flatten(), yiS.flatten()]))\n\nP.pcolormesh(xiS, yiS, ziS.reshape(xiS.shape), zorder=3)\nP.colorbar()\n\n# Show the datapoints on top of this, and also the contours. \"zorder\" sets the vertical order in the plot.\nP.scatter(aExtend[:,0], aExtend[:,1], c='w', s=2, zorder=15, edgecolor='none',alpha=0.75)\nP.contour(xiS,yiS,ziS.reshape(xiS.shape), zorder=25, colors='0.25')\n\nP.xlim(1.0,4.0)\n#P.ylim(-1.6,-0.45)\n\nP.xlabel('Power-law normalization a')\nP.ylabel('Power-law index b')",
"_____no_output_____"
],
[
"xS,yS=aExtend[:,1:3].T\nkS = kde.gaussian_kde(aExtend[:,1:3].T)\nnbins=150\nxiS, yiS = np.mgrid[xS.min():xS.max():nbins*1j, yS.min():yS.max():nbins*1j]\nziS = kS(np.vstack([xiS.flatten(), yiS.flatten()]))\n\nP.pcolormesh(xiS, yiS, ziS.reshape(xiS.shape), zorder=3)\nP.colorbar()\n\n# Show the datapoints on top of this, and also the contours. \"zorder\" sets the vertical order in the plot.\nP.scatter(aExtend[:,1], aExtend[:,2], c='w', s=2, zorder=15, edgecolor='none',alpha=0.75)\nP.contour(xiS,yiS,ziS.reshape(xiS.shape), zorder=25, colors='0.25')\n\n#P.xlim(1.21,2.5)\nP.ylim(-1.0,1.0)\n\nP.xlabel('Power-law index b')\nP.ylabel('Constant background c')",
"_____no_output_____"
]
],
[
[
"This is already much better-behaved than both previous versions. \n\nThis illustrates another use of monte carlo - to find out how to make our experiment sufficient to set the constraints we want to set.",
"_____no_output_____"
],
[
"### Actually reporting the range of returned parameters",
"_____no_output_____"
],
[
"Finishing off, let's decide on the range of parameter values to report. Since there are three parameters beyond the experimenter's control, it makes sense to report the range of one at a time, when all three are varying. This is just the projection of our cloud of points onto the parameter-space we want.\n\n(Technique note: quite a lot of the code below is repeated. In practice, you would write a method to do these plots and then just call the method each time you wanted to use it.)\n\nWe'll also calculate the two-sided limits from these distributions. We'll start with the 68% limits (\"1-sigma\") for our hypothetical \"Extended\" dataset:",
"_____no_output_____"
]
],
[
[
"nBins=200\nP.hist(aExtend[:,0],bins=nBins,alpha=0.5, color='g')\nP.xlim(1,3)\nP.xlabel('Power-law normalization a')\n\n# We use the median of the distribution as a decent estimate for \n# our best-fit value. Let's choose a \"1-sigma\" limit, i.e. the limits\n# that enclose 68% of the points between the median and the upper and lower \n# bounds:\nMed = np.median(aExtend[:,0])\ngHi = np.where(aExtend[:,0] >= np.median(aExtend[:,0]))[0]\ngLo = np.where(aExtend[:,0] < np.median(aExtend[:,0]))[0]\n\n# This trick does the limit-setting - try to see how it works:\nsLim = 0.68\nvSortLo=np.sort(aExtend[gLo,0])\nvSortHi=np.sort(aExtend[gHi,0])\n\nNormLo = vSortLo[np.int((1.0-sLim)*np.size(vSortLo))]\nNormHi = vSortHi[np.int(sLim *np.size(vSortHi))]\n\n## Let's take a look - how do those limits look on the histogram?\nfor quant, ls in zip([Med, NormLo, NormHi],['-', '--', '--']):\n P.axvline(quant, color='k', ls=ls, lw=1)\n\n# Print the limits:\nprint(\"INFO: Lower and upper 68 percent ranges are: %.3f %.3f\" % (Med-NormLo, NormHi-Med) )\n",
"INFO: Lower and upper 68 percent ranges are: 0.122 0.146\n"
],
[
"nBins=50\nP.hist(aExtend[:,1],bins=nBins,alpha=0.5)\nP.xlabel('Power-law index b')\n\n# We use the median of the distribution as a decent estimate for \n# our best-fit value. Let's choose a \"1-sigma\" limit, i.e. the limits\n# that enclose 68% of the points between the median and the upper and lower \n# bounds:\nMed = np.median(aExtend[:,1])\ngHi = np.where(aExtend[:,1] >= np.median(aExtend[:,1]))[0]\ngLo = np.where(aExtend[:,1] < np.median(aExtend[:,1]))[0]\n\n# This trick does the limit-setting - try to see how it works:\nvSortLo=np.sort(aExtend[gLo,1])\nvSortHi=np.sort(aExtend[gHi,1])\n\nsLim = 0.68\nNormLo = vSortLo[np.int((1.0-sLim)*np.size(vSortLo))]\nNormHi = vSortHi[np.int(sLim *np.size(vSortHi))]\n\n## Let's take a look - how do those limits look on the histogram?\nfor quant, ls in zip([Med, NormLo, NormHi],['-', '--', '--']):\n P.axvline(quant, color='k', ls=ls, lw=1)\n\n# Print the limits:\nprint(\"INFO: Lower and upper %i percent limits are: %.3f %.3f\" % (sLim*100, Med-NormLo, NormHi-Med) )\n",
"INFO: Lower and upper 68 percent limits are: 0.109 0.109\n"
]
],
[
[
"Just for interest, let's try a wider limit on the power-law normalization; how asymmetric does the distribution become once we get farther from the median?",
"_____no_output_____"
]
],
[
[
"sLim=0.99\nnBins=200\nP.hist(aExtend[:,0],bins=nBins,alpha=0.5, color='g')\nP.xlim(1,3)\nP.xlabel('Power-law normalization a')\n\n# Let's find the values at the lower- and upper- \"sLim\" bounds:\nMed = np.median(aExtend[:,0])\ngHi = np.where(aExtend[:,0] >= np.median(aExtend[:,0]))[0]\ngLo = np.where(aExtend[:,0] < np.median(aExtend[:,0]))[0]\n\nvSortLo=np.sort(aExtend[gLo,0])\nvSortHi=np.sort(aExtend[gHi,0])\n\nNormLo = vSortLo[np.int((1.0-sLim)*np.size(vSortLo))]\nNormHi = vSortHi[np.int(sLim *np.size(vSortHi))]\n\n## Let's take a look - how do those limits look on the histogram?\nfor quant, ls in zip([Med, NormLo, NormHi],['-', '--', '--']):\n P.axvline(quant, color='k', ls=ls, lw=1)\n\n\n# Do some annotations on the plot with these limits:\nP.annotate('%i percent limits' % (sLim*100), (0.6,0.9), xycoords='axes fraction')\nP.title('Parameter: <%.3f> -%.3f +%.3f' % (Med, Med-NormLo, NormHi-Med))\n\n\n#Print the limits:\nprint(\"INFO: Lower and upper %i percent ranges are: %.3f %.3f\" % (sLim*100,Med-NormLo, NormHi-Med) )\n",
"INFO: Lower and upper 99 percent ranges are: 0.286 0.510\n"
]
],
[
[
"We see the not-so-hidden dangers of reporting and interpreting just a symmetric 1-sigma limit. Even though our measurement errors were gaussian in all cases - and known - the posterior distribution of recovered parameters is (i) not gaussian, (ii) is asymmetric, and (iii) gets more asymmetric the more extreme we make our confidence level (e.g. 99% versus 68%). \n\nIf you have a single 68% range reported (which would be about 0.131), say, how does the likelihood of measuring a=2.2 under this model compare to the actual likelihood of getting this value? Beware of claiming signals only 2 or 3 sigma from the median without first checking the actual distribution of recovered parameters! ",
"_____no_output_____"
],
[
"Just for completeness, let's try this on our 14-point data from above, whose monte carlo output we put into aFitExpt earlier. We'll use a log-scale on the histogram to show the long tail of the normalization constant:",
"_____no_output_____"
]
],
[
[
"sLim=0.99\nnBins=400\nP.hist(aFitExpt[:,0],bins=nBins,alpha=0.5, color='g',range=[0,50], log=True)\nP.xlim(0,10)\nP.xlabel('Power-law normalization a')\n\n# Let's find the values at the lower- and upper- \"sLim\" bounds:\nMed = np.median(aFitExpt[:,0])\ngHi = np.where(aFitExpt[:,0] >= Med)[0]\ngLo = np.where(aFitExpt[:,0] < Med)[0]\n\nvSortLo=np.sort(aFitExpt[gLo,0])\nvSortHi=np.sort(aFitExpt[gHi,0])\n\nNormLo = vSortLo[np.int((1.0-sLim)*np.size(vSortLo))]\nNormHi = vSortHi[np.int(sLim *np.size(vSortHi))]\n\n## Let's take a look - how do those limits look on the histogram?\nP.axvline(Med, color='k', ls='-', lw=1)\nP.axvline(NormLo, color='k', ls='--', lw=1)\nP.axvline(NormHi, color='k', ls='--', lw=1)\n\n#P.plot([Med, Med],[1,1000.0], 'k-', lw=2)\n#P.plot([NormLo, NormLo],[1,1000.0], 'k--', lw=2)\n#P.plot([NormHi, NormHi],[1,1000.0], 'k--', lw=2)\n\n# Do some annotations on the plot with these limits:\nP.annotate('%i percent limits' % (sLim*100), (0.6,0.9), xycoords='axes fraction')\nP.title('Parameter: <%.3f> - %.3f +%.3f' % (Med, Med-NormLo, NormHi-Med))\n\n\n#Print the limits:\nprint(\"INFO: Lower and upper %i percent ranges are: %.3f %.3f\" % (sLim*100,Med-NormLo, NormHi-Med) )\n",
"INFO: Lower and upper 99 percent ranges are: 0.475 3.141\n"
]
],
[
[
"Within the context of \"designing a better experiment,\" notice the improvement in the range when using more data over a wider time-baseline over the version with the fewer datapoints just plotted in the above panel; the range of parameters allowed by the data is much narrower when more data over a wider time interval is added. Monte Carlo allows us to quantify the improvement.",
"_____no_output_____"
],
[
"Something else is worth noticing here: the coverage of the high-a regime in our simulations is not very good in the long-tail to high positive \"a.\" \n\nIf you want to explore confidence limits in the >90% or so regime, you are likely to need a larger number of simulations just to get good statistics towards the corners of the distribution. Whether you want to do this depends on your use-case and how important the wings of the distribution are likely to be to your hypothetical reader who is trying to reproduce your results.",
"_____no_output_____"
]
],
[
[
"nBins=50\nsLim=0.99\nP.hist(aExtend[:,1],bins=nBins,alpha=0.5)\nP.xlabel('Power-law index b')\nP.ylabel('N(b)')\n\n# We use the median of the distribution as a decent estimate for \n# our best-fit value. Let's choose a \"1-sigma\" limit, i.e. the limits\n# that enclose 68% of the points between the median and the upper and lower \n# bounds:\nMed = np.median(aExtend[:,1])\ngHi = np.where(aExtend[:,1] >= np.median(aExtend[:,1]))[0]\ngLo = np.where(aExtend[:,1] < np.median(aExtend[:,1]))[0]\n\n# This trick does the limit-setting - try to see how it works:\nvSortLo=np.sort(aExtend[gLo,1])\nvSortHi=np.sort(aExtend[gHi,1])\n\nNormLo = vSortLo[np.int((1.0-sLim)*np.size(vSortLo))]\nNormHi = vSortHi[np.int(sLim *np.size(vSortHi))]\n\n## Let's take a look - how do those limits look on the histogram?\nfor quant, ls in zip([Med, NormLo, NormHi],['-', '--', '--']):\n P.axvline(quant, color='k', ls=ls, lw=1)\n#P.plot([Med, Med],[1,500.0], 'k-', lw=2)\n#P.plot([NormLo, NormLo],[1,500.0], 'k--', lw=2)\n#P.plot([NormHi, NormHi],[1,500.0], 'k--', lw=2)\n\n# Print the limits:\nprint(\"INFO: Lower and upper %i percent ranges are: %.3f %.3f\" % (sLim*100, Med-NormLo, NormHi-Med) )\n",
"INFO: Lower and upper 99 percent ranges are: 0.361 0.333\n"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
]
|
cb75e3edf2480c7442535dd0ffa4e15744d81054 | 54,041 | ipynb | Jupyter Notebook | reports/fine_tune_en-ru_marian-nmt_model/train_by_multi_gpu_function.ipynb | eleldar/Translator | 33e41e545d63c2319cdf74284230f6ca70a3e9e7 | [
"MIT"
]
| null | null | null | reports/fine_tune_en-ru_marian-nmt_model/train_by_multi_gpu_function.ipynb | eleldar/Translator | 33e41e545d63c2319cdf74284230f6ca70a3e9e7 | [
"MIT"
]
| null | null | null | reports/fine_tune_en-ru_marian-nmt_model/train_by_multi_gpu_function.ipynb | eleldar/Translator | 33e41e545d63c2319cdf74284230f6ca70a3e9e7 | [
"MIT"
]
| null | null | null | 33.839073 | 172 | 0.489684 | [
[
[
"# Fine tuning Marian-NMT en-ru model",
"_____no_output_____"
],
[
"## Установка зависимостей",
"_____no_output_____"
]
],
[
[
"!pip install datasets transformers[sentencepiece]\n!pip install sacrebleu\n!pip install accelerate\n!pip install openpyxl\n!apt install git-lfs\n!pip install matplotlib",
"_____no_output_____"
],
[
"# загрузим репозиторий; нужен для предобработки\n!git clone https://github.com/eleldar/Translator.git",
"_____no_output_____"
],
[
"# загрузка исходной модели\n!git clone https://huggingface.co/Helsinki-NLP/opus-mt-en-ru && ls opus-mt-en-ru",
"Cloning into 'opus-mt-en-ru'...\nremote: Enumerating objects: 58, done.\u001b[K\nremote: Counting objects: 100% (58/58), done.\u001b[K\nremote: Compressing objects: 100% (26/26), done.\u001b[K\nremote: Total 58 (delta 30), reused 58 (delta 30), pack-reused 0\u001b[K\nUnpacking objects: 100% (58/58), done.\nFiltering content: 100% (2/2), 829.76 MiB | 20.66 MiB/s, done.\n\u001b[H\u001b[2JREADME.md pytorch_model.bin\tsource.spm tokenizer_config.json\nconfig.json rust_model.ot\ttarget.spm vocab.json\n"
]
],
[
[
"## Настройка git",
"_____no_output_____"
]
],
[
[
"!git config --global user.email \"[email protected]\"\n!git config --global user.name \"eleldar\"\n!git config --global credential.helper store",
"_____no_output_____"
]
],
[
[
"## Импортирование зависимостей",
"_____no_output_____"
]
],
[
[
"import os\nimport torch\nimport numpy as np\nimport pandas as pd\nfrom tqdm.auto import tqdm\nfrom torch.utils.data import DataLoader\nfrom time import gmtime, strftime\nfrom huggingface_hub import Repository\nfrom accelerate import Accelerator, notebook_launcher\nimport datasets\nfrom datasets import (\n Dataset, DatasetDict, load_dataset, load_metric,\n concatenate_datasets, interleave_datasets\n)\nimport transformers\nfrom transformers import (\n AdamW, AutoTokenizer, AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq,\n get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, \n get_linear_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, \n get_polynomial_decay_schedule_with_warmup\n)",
"_____no_output_____"
]
],
[
[
"## Загрузка данных",
"_____no_output_____"
],
[
"### Общий корпус длинных предложений",
"_____no_output_____"
]
],
[
[
"normal_url = 'https://github.com/eleldar/Translator/blob/master/test_dataset/flores101_dataset/101_languages.xlsx?raw=true'\nnormal_df = pd.read_excel(normal_url)[[\"eng\", \"rus\"]].rename(columns={\"eng\": \"en\", \"rus\": \"ru\"})\nnormal_df",
"_____no_output_____"
]
],
[
[
"### Общий корпус кратких предложений",
"_____no_output_____"
]
],
[
[
"short_url = 'https://github.com/eleldar/Translator/blob/master/test_dataset/normal.xlsx?raw=true'\nshort_df = pd.read_excel(short_url).rename(columns={\"en_sent\": \"en\", \"ru_sent\": \"ru\"})\nshort_df",
"_____no_output_____"
]
],
[
[
"### Предметный корпус",
"_____no_output_____"
]
],
[
[
"subject_url = 'https://github.com/eleldar/Translator/blob/master/test_dataset/corrected_vocab.xlsx?raw=true'\nsubject_df = pd.read_excel(subject_url).drop(columns=['en_keys', 'ru_keys']).rename(columns={\"en_sent\": \"en\", \"ru_sent\": \"ru\"})\nsubject_df",
"_____no_output_____"
]
],
[
[
"### Тестовый корпус (из модели)",
"_____no_output_____"
]
],
[
[
"test_url = 'https://github.com/eleldar/Translator/blob/master/test_dataset/test_opus_en-ru_dataset.xlsx?raw=true'\ntest_df = pd.read_excel(test_url).drop(columns=['Unnamed: 0'])\ntest_df",
"_____no_output_____"
]
],
[
[
"## Предобработка данных\n\n> Требуется замена символов юникода, т.к. встроенный токенизатор этого не выполняет",
"_____no_output_____"
]
],
[
[
"os.getcwd()",
"_____no_output_____"
],
[
"# переключим на каталог с импортируемыми модулями\nos.chdir('/mnt/home/Translator/OpenAPI/')",
"_____no_output_____"
],
[
"os.getcwd()",
"_____no_output_____"
],
[
"!ls",
"README.md api\t\t main.py requirements.txt\r\n__pycache__ documented_endpoints models venv\r\n"
],
[
"# импортировали\nfrom api.tools.preprocess import get_commands, preprocess_text",
"_____no_output_____"
],
[
"# словарь команд для предобработки на основе файла с расширением направления перевода и checkpoints\ncheckpoints = {'en-ru', 'ar-ru', 'ru-ar', 'ru-en', 'en-ar', 'ar-en'}\ncommands = get_commands(checkpoints)\nlist(commands['en-ru'])[:5], list(commands['ru-en'])[:5]",
"_____no_output_____"
],
[
"# замена спецсимволов\n# normalisation = lambda text: 1 # preprocess_text(commands['en-ru'], text['en_sent']) if direct in commands else text['en_sent']\ndef normalisation(text):\n text['en'] = preprocess_text(commands['en-ru'], text['en'])\n text['ru'] = preprocess_text(commands['ru-en'], text['ru'])\n return text",
"_____no_output_____"
],
[
"# вернули рабочую директорию\nos.chdir('/mnt/home')",
"_____no_output_____"
]
],
[
[
"## Сборка наборов данных",
"_____no_output_____"
],
[
"### Создадим объекты Dataset",
"_____no_output_____"
]
],
[
[
"# Общий корпус длинных предложений\n# normal_df\nnormal_dataset = Dataset.from_pandas(normal_df)\nnormal_dataset = normal_dataset.map(normalisation)\nnormal_dataset",
"_____no_output_____"
],
[
"# Общий корпус кратких предложений\n# short_df\nshort_dataset = Dataset.from_pandas(short_df)\nshort_dataset = short_dataset.map(normalisation)\nshort_dataset",
"_____no_output_____"
],
[
"# Предметный корпус\n# subject_df\nsubject_dataset = Dataset.from_pandas(subject_df).shuffle()\nsubject_dataset = subject_dataset.map(normalisation)\nsubject_dataset",
"_____no_output_____"
],
[
"# Тестовый корпус\n# test_df\ntest_dataset = Dataset.from_pandas(test_df)\ntest_dataset = test_dataset.map(normalisation)\ntest_dataset",
"_____no_output_____"
]
],
[
[
"### Объединим обучающую часть предметного и тестовые набора",
"_____no_output_____"
]
],
[
[
"# целевой \"словарь\"\nsplit_datasets = DatasetDict()",
"_____no_output_____"
],
[
"split_datasets['normal'] = normal_dataset\nsplit_datasets['short'] = short_dataset\nsplit_datasets",
"_____no_output_____"
],
[
"sub_train_and_test = subject_dataset.train_test_split(test_size=0.2)\nsub_train_and_test",
"_____no_output_____"
],
[
"tmp = test_dataset.train_test_split(test_size=0.166)\ntmp",
"_____no_output_____"
],
[
"split_datasets['train'] = interleave_datasets(\n [sub_train_and_test['train'], tmp['test']]\n).shuffle()\nsplit_datasets['validation'] = sub_train_and_test.pop(\"test\")\nsplit_datasets",
"_____no_output_____"
],
[
"## Расскоментровать для использования всего модельного датасета для обучения; также в функции приедется изменить методы оценки\n# split_datasets['train'] = concatenate_datasets(\n# [sub_train_and_test['train'], test_dataset]\n# ).shuffle()\n# split_datasets['validation'] = sub_train_and_test.pop(\"test\")\n# split_datasets",
"_____no_output_____"
],
[
"split_datasets['test'] = tmp['train']\nsplit_datasets",
"_____no_output_____"
]
],
[
[
"## Функция обучения\n\n> Не поддерживает использование в качестве метода либо в цикл (проверено эмпирическим путем), т.к. используется параллельное использование нескольких GPU",
"_____no_output_____"
]
],
[
[
"lr_schedulers = ['get_constant_schedule', 'get_constant_schedule_with_warmup',\n 'get_cosine_schedule_with_warmup', 'get_cosine_with_hard_restarts_schedule_with_warmup',\n 'get_linear_schedule_with_warmup', 'get_polynomial_decay_schedule_with_warmup',\n 'torch_optim_lr_scheduler_one_cycle_lr']\n\nhyperparameters = {\n \"learning_rate\": 1e-6,\n \"num_epochs\": 2,\n \"train_batch_size\": 8,\n \"eval_batch_size\": 32, \n \"model_checkpoint\": \"opus-mt-en-ru\",\n \"max_input_length\": 128,\n \"max_target_length\": 128,\n \"max_generate_length\": 128, \n \"output_dir\": f'experiences/fine_tuned_en_ru_model_{strftime(\"%Y-%m-%d_%H-%M-%S\", gmtime())}',\n \"file_scores\": 'scores.txt',\n \"scheduler\": lr_schedulers[0], # настраиваемый параметр\n}\n\ntokenizer = AutoTokenizer.from_pretrained(hyperparameters[\"model_checkpoint\"], return_tensors=\"pt\")\n\ndef preprocess_function(examples, hyperparameters=hyperparameters, tokenizer=tokenizer):\n '''Получение IDs'''\n model_inputs = tokenizer(examples[\"en\"], max_length=hyperparameters['max_input_length'], truncation=True)\n with tokenizer.as_target_tokenizer():\n labels = tokenizer(examples[\"ru\"], max_length=hyperparameters['max_target_length'], truncation=True)\n model_inputs[\"labels\"] = labels[\"input_ids\"] # присвоили исходному языку IDs целевого языка\n return model_inputs\n\n\ndef postprocess(predictions, labels, tokenizer=tokenizer):\n '''Получение текста из IDs'''\n predictions = predictions.cpu().numpy()\n labels = labels.cpu().numpy()\n # Декодированные токены из IDs, спрогнозированные моделью\n decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)\n # Замена -100 в метках, так как их нельзя декодировать.\n labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n # Декодированные метки токены из IDs, являющиеся эталонным переводом\n decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n # Пост-обрабработка, т.к. для прогноза нужен список, а для эталона список списков\n decoded_preds = [pred.strip() for pred in decoded_preds]\n decoded_labels = [[label.strip()] for label in decoded_labels]\n return decoded_preds, decoded_labels\n\n\ndef evaluate(model, accelerator, examples, epoch='base', note=\"sub\", hyperparameters=hyperparameters):\n '''Оценка'''\n metric = load_metric(\"sacrebleu\")\n model.eval()\n for batch in tqdm(examples):\n with torch.no_grad():\n generated_tokens = accelerator.unwrap_model(model).generate(\n batch[\"input_ids\"],\n attention_mask=batch[\"attention_mask\"],\n max_length=hyperparameters[\"max_generate_length\"],\n )\n labels = batch[\"labels\"]\n # Необходимое выравнивание для заполнения прогнозов и меток для метода accelerator.gather()\n generated_tokens = accelerator.pad_across_processes(\n generated_tokens, dim=1, pad_index=tokenizer.pad_token_id\n )\n labels = accelerator.pad_across_processes(labels, dim=1, pad_index=-100)\n predictions_gathered = accelerator.gather(generated_tokens)\n labels_gathered = accelerator.gather(labels)\n # подготовка данных для оценки\n decoded_preds, decoded_labels = postprocess(predictions_gathered, labels_gathered)\n # примечение пакетной метрики\n metric.add_batch(predictions=decoded_preds, references=decoded_labels)\n results = metric.compute()\n response = f\"{note}_score for {epoch} epoch: {results['score']}\\n\"\n with open(f\"{hyperparameters['output_dir']}_{hyperparameters['scheduler']}/{hyperparameters['file_scores']}\", 'a') as file:\n file.write(response)\n print(f\"{note}_score for epoch {epoch}, BLEU score: {results['score']:.2f}\")\n \n\ndef get_image(hyperparameters):\n '''Создание и сохранение графика'''\n with open(f\"{hyperparameters['output_dir']}_{hyperparameters['scheduler']}/{hyperparameters['file_scores']}\") as f:\n score = f.readlines()\n sub = [float(i.strip().split(': ')[1]) for i in score[0::4][0::4]]\n normal = [float(i.strip().split(': ')[1]) for i in score[0::4][1::4]]\n short = [float(i.strip().split(': ')[1]) for i in score[0::4][2::4]]\n test = [float(i.strip().split(': ')[1]) for i in score[0::4][3::4]]\n X = [i for i in range(hyperparameters[\"num_epochs\"] + 1)]\n Y = [i for i in range(0, 61)]\n score_df = pd.DataFrame({'Предметный': sub, 'Обычные': normal, 'Короткие': short, 'Модельные': test})\n mx_sub = max(sub)\n inx = sub.index(mx_sub)\n modscore = test[inx]\n img = score_df.plot(xticks=X, yticks=Y, style='^', figsize=(15,12));\n img.axvline(inx, color='grey')\n img.legend(loc='lower left')\n img.set_xlabel(\"Epochs\")\n img.set_ylabel(\"BLEU\")\n img.annotate(f'sub {mx_sub:.2f}', xy=(inx, mx_sub), xytext=(inx, mx_sub),\n arrowprops=dict(facecolor='blue', shrink=0.05),\n )\n img.annotate(f'mod {modscore:.2f}', xy=(inx, modscore), xytext=(inx, modscore),\n arrowprops=dict(facecolor='red', shrink=0.05),\n )\n img.annotate(f\"{hyperparameters['scheduler'].upper()} by LR:{hyperparameters['learning_rate']}\", xy=(0, 58), xytext=(0, 58))\n directory = f\"{hyperparameters['output_dir']}_{hyperparameters['scheduler']}\"\n img.get_figure().savefig(f\"{directory}/maxsub-{mx_sub:.2f}_mod-{modscore:.2f}_epoch-{inx}_{hyperparameters['scheduler']}.png\") \n \ndef training_function(hyperparameters, tokenized_datasets, tokenizer):\n directory = f'{hyperparameters[\"output_dir\"]}_{hyperparameters[\"scheduler\"]}'\n try:\n repo = Repository(directory, clone_from='eleldar/train')\n except Exception as e:\n pass\n if not os.path.isfile(f\"{hyperparameters['output_dir']}/{hyperparameters['file_scores']}_{hyperparameters['scheduler']}\"):\n with open(f\"{hyperparameters['output_dir']}_{hyperparameters['scheduler']}/{hyperparameters['file_scores']}\", 'w') as file: # файл для складывания оценок\n file.write('')\n with open(f\"{hyperparameters['output_dir']}_{hyperparameters['scheduler']}/.gitignore\", 'w') as file:\n file.write(\"*.png\\n\")\n accelerator = Accelerator()\n if accelerator.is_main_process:\n datasets.utils.logging.set_verbosity_warning()\n transformers.utils.logging.set_verbosity_info()\n else:\n datasets.utils.logging.set_verbosity_error()\n transformers.utils.logging.set_verbosity_error()\n \n model = AutoModelForSeq2SeqLM.from_pretrained(hyperparameters[\"model_checkpoint\"])\n data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)\n \n tokenized_datasets.set_format(\"torch\")\n train_dataloader = DataLoader(tokenized_datasets[\"train\"], shuffle=True, \n collate_fn=data_collator, batch_size=hyperparameters['train_batch_size'])\n eval_dataloader = DataLoader(tokenized_datasets[\"validation\"], shuffle=False,\n collate_fn=data_collator, batch_size=hyperparameters['eval_batch_size'])\n normal_dataloader = DataLoader(tokenized_datasets[\"normal\"], shuffle=False,\n collate_fn=data_collator, batch_size=hyperparameters['eval_batch_size'])\n short_dataloader = DataLoader(tokenized_datasets[\"short\"], shuffle=False,\n collate_fn=data_collator, batch_size=hyperparameters['eval_batch_size'])\n test_dataloader = DataLoader(tokenized_datasets[\"test\"], shuffle=False,\n collate_fn=data_collator, batch_size=hyperparameters['eval_batch_size'])\n\n optimizer = AdamW(model.parameters(), lr=hyperparameters[\"learning_rate\"]) \n model, optimizer, train_dataloader, eval_dataloader, normal_dataloader, short_dataloader, test_dataloader = accelerator.prepare(\n model, optimizer, train_dataloader, eval_dataloader, normal_dataloader, short_dataloader, test_dataloader\n )\n num_epochs = hyperparameters[\"num_epochs\"]\n lr_schedulers = {'get_constant_schedule': get_constant_schedule(\n optimizer=optimizer\n ),\n 'get_constant_schedule_with_warmup': get_constant_schedule_with_warmup(\n optimizer=optimizer, num_warmup_steps=100\n ),\n 'get_cosine_schedule_with_warmup': get_cosine_schedule_with_warmup(\n optimizer=optimizer, num_warmup_steps=100, \n num_training_steps=len(train_dataloader) * num_epochs,\n num_cycles=0.5\n ),\n 'get_cosine_with_hard_restarts_schedule_with_warmup': get_cosine_with_hard_restarts_schedule_with_warmup(\n optimizer=optimizer, num_warmup_steps=100,\n num_training_steps=len(train_dataloader) * num_epochs,\n num_cycles=1\n ),\n 'get_linear_schedule_with_warmup': get_linear_schedule_with_warmup(\n optimizer=optimizer, num_warmup_steps=100,\n num_training_steps=len(train_dataloader) * num_epochs,\n ),\n 'get_polynomial_decay_schedule_with_warmup': get_polynomial_decay_schedule_with_warmup(\n optimizer=optimizer, num_warmup_steps=100,\n num_training_steps=len(train_dataloader) * num_epochs,\n lr_end=1e-7, power=1.0\n ),\n 'torch_optim_lr_scheduler_one_cycle_lr': torch.optim.lr_scheduler.OneCycleLR(\n optimizer=optimizer, max_lr=1e-5, pct_start=1 / (num_epochs),\n total_steps=len(train_dataloader) * num_epochs + 10, div_factor=1e+3, final_div_factor=1e+4,\n anneal_strategy='cos'\n )\n }\n lr_scheduler = lr_schedulers[hyperparameters['scheduler']]\n\n # оценка перед обучением\n evaluate(model, accelerator, eval_dataloader, note=\"sub\")\n evaluate(model, accelerator, normal_dataloader, note=\"normal\")\n evaluate(model, accelerator, short_dataloader, note=\"short\")\n evaluate(model, accelerator, test_dataloader, note=\"test\")\n \n try:\n repo.git_add(\".\")\n repo.git_commit(commit_message=\"base and gitignore\")\n except Exception as e:\n pass\n\n # обучение \n progress_bar = tqdm(range(num_epochs * len(train_dataloader)), disable=not accelerator.is_main_process)\n for epoch in range(1, num_epochs + 1):\n model.train()\n for batch in train_dataloader:\n outputs = model(**batch)\n loss = outputs.loss\n accelerator.backward(loss)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n progress_bar.update(1)\n \n # оценка в момент обучения\n evaluate(model, accelerator, eval_dataloader, epoch=epoch, note=\"sub\")\n evaluate(model, accelerator, normal_dataloader, epoch=epoch, note=\"normal\")\n evaluate(model, accelerator, short_dataloader, epoch=epoch, note=\"short\")\n evaluate(model, accelerator, test_dataloader, epoch=epoch, note=\"test\") \n \n # Сохранение и обновление\n accelerator.wait_for_everyone()\n unwrapped_model = accelerator.unwrap_model(model)\n unwrapped_model.save_pretrained(directory, save_function=accelerator.save)\n if accelerator.is_main_process:\n tokenizer.save_pretrained(directory)\n try: \n repo.git_add(\".\") \n repo.git_commit(commit_message=f\"Training in progress epoch {epoch}\")\n except Exception as e:\n pass\n get_image(hyperparameters)\n \ndef decorator(function, *args):\n '''для добавление аргументов в функцию для обучения'''\n def wrapper():\n return function(*args)\n return wrapper\n\ntokenized_datasets = split_datasets.map(\n preprocess_function,\n batched=True,\n remove_columns=split_datasets[\"train\"].column_names,\n) \ntraining_function = decorator(training_function, hyperparameters, \n tokenized_datasets, tokenizer\n )\nnotebook_launcher(training_function, num_processes=4)",
"_____no_output_____"
]
],
[
[
"# Использование",
"_____no_output_____"
]
],
[
[
"split_datasets['validation'][60]",
"_____no_output_____"
],
[
"# до\nfrom transformers import pipeline\n\nmodel_checkpoint = \"Helsinki-NLP/opus-mt-en-ru\"\ntranslator = pipeline(\"translation\", model=model_checkpoint)\ntranslator(\"Companies need to buy routers to direct data traffic and connect to the internet.\")",
"_____no_output_____"
],
[
"# после\nfrom transformers import pipeline\n\nmodel_checkpoint = hyperparameters['output_dir']\ntranslator = pipeline(\"translation\", model=model_checkpoint)\ntranslator(\"Companies need to buy routers to direct data traffic and connect to the internet.\")",
"_____no_output_____"
]
]
]
| [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
]
| [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.