hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
ec552f6043221b76ba6d4cd02458f0e9b715d139
238,931
ipynb
Jupyter Notebook
code/anitapolis/RTP_anomaly.ipynb
pinga-lab/magnetic-radial-inversion
ac7e04a143ddc29eb4ded78671a5382a2869d5d8
[ "BSD-3-Clause" ]
1
2022-03-15T11:35:41.000Z
2022-03-15T11:35:41.000Z
code/anitapolis/RTP_anomaly.ipynb
pinga-lab/magnetic-radial-inversion
ac7e04a143ddc29eb4ded78671a5382a2869d5d8
[ "BSD-3-Clause" ]
null
null
null
code/anitapolis/RTP_anomaly.ipynb
pinga-lab/magnetic-radial-inversion
ac7e04a143ddc29eb4ded78671a5382a2869d5d8
[ "BSD-3-Clause" ]
1
2022-03-01T02:14:31.000Z
2022-03-01T02:14:31.000Z
411.95
112,528
0.940795
[ [ [ "# RTP anomaly", "_____no_output_____" ], [ "This code calculates the RTP anomaly of the simple model data.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport cPickle as pickle\nimport os\nimport pandas as pd\n\nfrom fatiando import utils\nfrom fatiando.gravmag import polyprism\nfrom fatiando.mesher import PolygonalPrism\nfrom fatiando.vis import mpl, myv\nfrom matplotlib import colors, ticker, cm\nfrom IPython.display import Image as img\nfrom matplotlib.mlab import normpdf", "/home/leonardo/anaconda2/lib/python2.7/site-packages/fatiando/vis/mpl.py:76: UserWarning: This module will be removed in v0.6. We recommend the use of matplotlib.pyplot module directly. Some of the fatiando specific functions will remain.\n \"specific functions will remain.\")\n/home/leonardo/anaconda2/lib/python2.7/site-packages/fatiando/vis/myv.py:51: UserWarning: This module will be removed in v0.7.\n warnings.warn(\"This module will be removed in v0.7.\")\n" ] ], [ [ "### Auxiliary functions", "_____no_output_____" ] ], [ [ "import sys\nsys.path.insert(0, '../../code')\n\nimport mag_polyprism_functions as mfun\nimport mag_sphere_functions as sf", "_____no_output_____" ], [ "# importing the pickle file of results\nresultpath = 'l2-tfa-inversion/multiple-43485/'\n\nwith open(resultpath+'inversion.pickle') as w:\n inversion = pickle.load(w)", "_____no_output_____" ], [ "z0 = inversion['z0']\nintensity = inversion['intensity']\nz0_plot, int_plot = np.meshgrid(z0, intensity)\nz0_plot = z0_plot.ravel()\nint_plot = int_plot.ravel()\nn = z0.size\nm = intensity.size\nN = inversion['x'].size\ny = inversion['y']\nx = inversion['x']\nobs = inversion['observed_data']\ninitial = inversion['results'][9][2][0][0]\nsolutions = [[0,3],[1,3]]\nx0 = 0.\ny0 = 0.", "_____no_output_____" ], [ "inversion['results'][0][1][-1]", "_____no_output_____" ], [ "# building the matrix of the goal function\ngamma_matrix = mfun.goal_matrix(n, m, inversion['results'])", "_____no_output_____" ] ], [ [ "# Input", "_____no_output_____" ] ], [ [ "data = pd.read_csv('anitapolis_decimated_tfa_residual_cut.txt', skipinitialspace=True, delim_whitespace=True)", "_____no_output_____" ], [ "data['GPSALT'] = - data['GPSALT'] + 800", "_____no_output_____" ], [ "mask = (data['GPSALT'].get_values()<0.)\ndata = data[mask]", "_____no_output_____" ], [ "xp = data['X'].get_values()\nyp = data['Y'].get_values()\nzp = data['GPSALT'].get_values()\nalt = data['ALTURA'].get_values()\ndobs = data['mag_res'].get_values()", "_____no_output_____" ] ], [ [ "## Calculating the RTP anomaly", "_____no_output_____" ] ], [ [ "zc = np.zeros_like(xp) + 800\n\ninc, dec = [-37.05, -18.17]\nincs = -21\ndecs = -11\n\nA = sf.sm_tf_sphere(xp, yp, zp, xp, yp, zc, inc, dec, incs, decs)", "_____no_output_____" ], [ "mu = 1.e-2*np.trace(np.dot(A.T, A))/xp.size\nlp = np.linalg.solve(np.dot(A.T,A) + mu*np.identity(xp.size), np.dot(A.T,dobs))", "_____no_output_____" ], [ "dp = np.dot(A, lp)", "_____no_output_____" ], [ "plt.figure(figsize=(6,5))\n\nplt.title('Layer momentum', fontsize=20)\nplt.tricontourf(yp, xp, lp, 20, cmap='RdBu_r').ax.tick_params(labelsize=12)\nplt.xlabel('$y$(km)', fontsize=18)\nplt.ylabel('$x$(km)', fontsize=18)\nclb = plt.colorbar(pad=0.025, aspect=40, shrink=1)\nclb.ax.tick_params(labelsize=13)\nclb.ax.set_title('A/m')\nmpl.m2km()\n\nplt.show()", "_____no_output_____" ], [ "plt.figure(figsize=(19,5))\n\nplt.subplot(131)\nplt.title('Observed TFA', fontsize=20)\nplt.tricontourf(yp, xp, dobs, 20, cmap='RdBu_r').ax.tick_params(labelsize=12)\nplt.xlabel('$y$(km)', fontsize=18)\nplt.ylabel('$x$(km)', fontsize=18)\nclb = plt.colorbar(pad=0.025, aspect=40, shrink=1)\nclb.ax.tick_params(labelsize=13)\nclb.ax.set_title('nT')\nmpl.m2km()\n\nplt.subplot(132)\nplt.title('Predicted TFA', fontsize=20)\nplt.tricontourf(yp, xp, dp, 20, cmap='RdBu_r').ax.tick_params(labelsize=12)\nplt.xlabel('$y$(km)', fontsize=18)\nplt.ylabel('$x$(km)', fontsize=18)\nclb = plt.colorbar(pad=0.025, aspect=40, shrink=1)\nclb.ax.tick_params(labelsize=13)\nclb.ax.set_title('nT')\nmpl.m2km()\n\nplt.subplot(133)\nplt.title('Residual TFA', fontsize=20)\nplt.tricontourf(yp, xp, dobs - dp, 20, cmap='RdBu_r').ax.tick_params(labelsize=12)\nplt.xlabel('$y$(km)', fontsize=18)\nplt.ylabel('$x$(km)', fontsize=18)\nclb = plt.colorbar(pad=0.025, aspect=40, shrink=1)\nclb.ax.tick_params(labelsize=13)\nclb.ax.set_title('nT')\nmpl.m2km()\n\nplt.show()", "_____no_output_____" ], [ "G = sf.sm_tf_sphere(xp, yp, zp, xp, yp, zc, 90., 0., 90., 0.)", "_____no_output_____" ], [ "rtp = np.dot(G, lp)", "_____no_output_____" ], [ "plt.figure(figsize=(6,5))\n\nx0 = 6921000.\ny0 = 688000.\n\nax = plt.subplot(111)\n#plt.title('Predicted RTP', fontsize=20)\ncircle1 = plt.Circle((y0, x0), 700, color='r', linewidth=3., fill=False)\nplt.tricontourf(yp, xp, rtp, 20, cmap='RdBu_r',\n vmin=-np.max(rtp), vmax=np.max(rtp)).ax.tick_params(labelsize=14)\nplt.plot(yp, xp, '.k', markersize=.8)\nplt.xlabel('$y$(km)', fontsize=14)\nplt.ylabel('$x$(km)', fontsize=14)\nclb = plt.colorbar(pad=0.025, aspect=40, shrink=1)\nclb.ax.tick_params(labelsize=14)\nclb.ax.set_title('nT', pad=-290, fontsize=14)\nax.add_artist(circle1)\nmpl.m2km()\n#plt.savefig('../../manuscript/anitapolis_rtp.png', dpi=300, bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "filename = '../../manuscript/figures/anitapolis_rtp.png'", "_____no_output_____" ], [ "np.min(gamma_matrix)", "_____no_output_____" ], [ "np.argmin(gamma_matrix)", "_____no_output_____" ], [ "inversion['regularization']", "_____no_output_____" ], [ "plt.figure(figsize=(12,10))\n\nax1 = plt.subplot(221)\n#plt.title('Observed TFA', fontsize=20)\nplt.tricontour(yp, xp, dobs, 20, linewidths=0.2, colors='k')\nplt.tricontourf(yp, xp, dobs, 20, cmap='RdBu_r', vmax=np.max(dobs), vmin=-np.max(dobs)).ax.tick_params(labelsize=14)\nplt.plot(yp, xp, '.k', markersize=.1)\nplt.xlabel('$y$(km)', fontsize=14)\nplt.ylabel('$x$(km)', fontsize=14)\nclb = plt.colorbar(pad=0.025, aspect=40, shrink=1)\nclb.ax.tick_params(labelsize=14)\nclb.ax.set_title('nT', pad=-264, fontsize=14)\ncircle1 = plt.Circle((y0, x0), 700, color='r', linewidth=1., fill=False)\nax1.add_artist(circle1)\nax1.text(np.min(yp)-2000, np.max(xp)+800, '(a)', fontsize=20)\nmpl.m2km()\n\nax3 = plt.subplot(222)\n#plt.title('Residual TFA', fontsize=20)\nplt.plot(yp, xp, '.k', markersize=.1)\nplt.tricontour(yp, xp, rtp, 20, linewidths=0.2, colors='k')\nplt.tricontourf(yp, xp, rtp, 20, cmap='RdBu_r', vmax=np.max(rtp), vmin=-np.max(rtp)).ax.tick_params(labelsize=14)\nplt.xlabel('$y$(km)', fontsize=14)\nplt.ylabel('$x$(km)', fontsize=14)\nclb = plt.colorbar(pad=0.025, aspect=40, shrink=1)\nclb.ax.tick_params(labelsize=14)\nclb.ax.set_title('nT', pad=-264, fontsize=14)\ncircle1 = plt.Circle((y0, x0), 700, color='r', linewidth=1., fill=False)\nax3.add_artist(circle1)\nax3.text(np.min(yp)-2000, np.max(xp)+800, '(b)', fontsize=20)\nmpl.m2km()\n\nax2 = plt.subplot(2,2,3)\nw = 3\nimg = ax2.imshow(gamma_matrix, vmin=np.min(gamma_matrix),\n vmax=1000, origin='lower',extent=[0,w,0,w])\nclb = plt.colorbar(img, pad=0.012, shrink=1)\nclb.ax.set_title('nT$^2$', pad=-265, fontsize=14)\nclb.ax.tick_params(labelsize=14)\nimg.axes.tick_params(labelsize=14)\nplt.ylabel('$z_0 (m)$', fontsize=14)\nplt.xlabel('$m_0 (A/m)$', fontsize=14)\nax2.text(-0.6, 3.1, '(c)', fontsize=20)\n#plt.plot((2.*truevalues[1]+1.)*w/(2.*m), (2.*truevalues[0]+1.)*w/(2.*n), '^r', markersize=10)\ncolors = ['Dw', 'Dm']\nfor s, c in zip(solutions, colors):\n plt.plot((2.*s[1]+1.)*w/(2.*m), (2.*s[0]+1.)*w/(2.*n), c, markersize=12)\nx_label_list = []\ny_label_list = []\nfor xl, yl in zip(intensity,z0):\n x_label_list.append(str(xl)[:-2])\n y_label_list.append(str(yl)[:-2])\nax2.set_xticks(np.linspace(w/(2.*n), w - w/(2.*n), n))\nax2.set_yticks(np.linspace(w/(2.*m), w - w/(2.*m), m))\nax2.set_xticklabels(x_label_list)\nax2.set_yticklabels(y_label_list)\n# Minor ticks\nax2.set_xticks(np.linspace(0, w, n+1), minor=True)\nax2.set_yticks(np.linspace(0, w, m+1), minor=True)\nax2.grid(which='minor', color='k', linewidth=1.5)\n\nplt.subplots_adjust(wspace=.3)\n\nplt.savefig(filename, dpi=300, bbob_inches='tight')\n\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec553c89d4c894e8d23209d429ce94b40cdb6ab9
201,623
ipynb
Jupyter Notebook
R_1.ipynb
OliverZijia/-pravega-for-AI-
0731d72f4ffadacfe64f5c8073869bad2e8ea361
[ "MIT" ]
null
null
null
R_1.ipynb
OliverZijia/-pravega-for-AI-
0731d72f4ffadacfe64f5c8073869bad2e8ea361
[ "MIT" ]
null
null
null
R_1.ipynb
OliverZijia/-pravega-for-AI-
0731d72f4ffadacfe64f5c8073869bad2e8ea361
[ "MIT" ]
null
null
null
85.542215
114,392
0.781096
[ [ [ "import torch\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torchvision\nfrom torchvision import datasets, models, transforms", "_____no_output_____" ], [ "import os\nimport numpy as np\nimport torch\nfrom PIL import Image\n\n\nclass InnovaDataset(object):\n def __init__(self, root, transforms):\n self.root = root\n self.transforms = transforms\n # load all image files, sorting them to\n # ensure that they are aligned\n list_all = list(sorted(os.listdir(os.path.join(root))))\n self.imgs = list(i for i in list_all if \".jpg\" in i)\n self.masks = list(i for i in list_all if \".json\" in i)\n\n def get_json_record_path(self, ix):\n # return os.path.join(self.path, 'record_'+str(ix).zfill(6)+'.json') #fill zeros\n return os.path.join(self.root, 'record_' + str(ix) + '.json') # don't fill zeros\n \n def make_record_paths_absolute(self, record_dict):\n d = {}\n for k, v in record_dict.items():\n if type(v) == str: # filename\n if '.' in v:\n v = os.path.join(self.root, v)\n d[k] = v\n\n return d\n\n def get_json_record(self, ix):\n path = self.get_json_record_path(ix)\n try:\n with open(path, 'r') as fp:\n json_data = json.load(fp)\n except UnicodeDecodeError:\n raise Exception('bad record: %d. You may want to run `python manage.py check --fix`' % ix)\n except FileNotFoundError:\n raise\n except:\n logger.error('Unexpected error: {}'.format(sys.exc_info()[0]))\n raise\n\n record_dict = self.make_record_paths_absolute(json_data)\n return record_dict\n\n def get_record(self, ix):\n json_data = self.get_json_record(ix) # 返回的应该是字典对象\n# data = self.read_record(json_data)\n return json_data\n\n def read_record(self, record_dict):\n data = {}\n for key, val in record_dict.items():\n typ = self.get_input_type(key)\n\n # load objects that were saved as separate files\n if typ == 'image_array':\n img = Image.open((val))\n val = np.array(img)\n\n data[key] = val\n return data\n\n def __getitem__(self, idx):\n # load images ad masks\n img_path = os.path.join(self.root, self.imgs[idx])\n# mask_path = os.path.join(self.root, \"PedMasks\", self.masks[idx])\n img = Image.open(img_path).convert(\"RGB\")\n# img = np.array(img,np.float32,copy=False)\n # note that we haven't converted the mask to RGB,\n # because each color corresponds to a different instance\n # with 0 being background\n mask = self.get_record(idx)\n # convert the PIL Image into a numpy array\n label = torch.tensor(mask['user/angle'])+1\n \n# img = label\n img = np.array(img)\n return img, label\n\n def __len__(self):\n return len(self.imgs)", "_____no_output_____" ], [ "ls", "LICENSE \u001b[0m\u001b[01;34mdata\u001b[0m/ requirements.txt\nREADME.md \u001b[01;34mdatasets\u001b[0m/ \u001b[01;34mresults\u001b[0m/\nR_1.ipynb \u001b[01;34mdocs\u001b[0m/ test_train_distilled_image.py\n\u001b[01;34m__pycache__\u001b[0m/ log.txt tox.ini\nbase_options.py main.py train_distilled_image.py\nbasics.py \u001b[01;34mnetworks\u001b[0m/ \u001b[01;34mutils\u001b[0m/\n" ], [ "# This Part is just for visualization of our dataset\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# functions to show an image\n\ndef imshow(img):\n img = img / 2 + 0.5 # unnormalize\n# npimg = img.numpy()\n# plt.imshow(np.transpose(img, (1, 2, 0)))\n plt.show()\n \n \n \ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])", "_____no_output_____" ], [ "# get some random training images\nroot=\"./tub_2026_huanshan/\"\ndataset = InnovaDataset(root, transform)\ndata_loader = torch.utils.data.DataLoader(\n dataset, batch_size=12, shuffle=True, num_workers=1)\ndataiter = iter(data_loader)\nimages, labels = dataiter.next()\nlabels = torch.nn.functional.one_hot(labels, num_classes=3)\nprint(labels)\nprint(images.shape)\nplt.imshow(torch.squeeze(images[0], 0))\n# show images\n# print labels\n# print(' '.join('%5s' % classes[labels[j]] for j in range(4)))", "tensor([[0, 1, 0],\n [0, 1, 0],\n [0, 1, 0],\n [1, 0, 0],\n [0, 1, 0],\n [0, 1, 0],\n [1, 0, 0],\n [0, 1, 0],\n [0, 1, 0],\n [1, 0, 0],\n [1, 0, 0],\n [1, 0, 0]])\ntorch.Size([12, 240, 440, 3])\n" ], [ "root=\"../tub_2026_huanshan/\"\nlist_all = list(sorted(os.listdir(os.path.join(root))))\nlist_1 = list(i for i in list_all if \".jpg\" in i)\nlist_2 = list(i for i in list_all if \".json\" in i)", "_____no_output_____" ], [ "list_1[1::]", "_____no_output_____" ], [ "list_2[1::]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec55406a3d591a1d5464e681930609d5c5472c3b
30,685
ipynb
Jupyter Notebook
jupyter_notebooks/9_0_round_4_annotations_out.ipynb
charlieccarey/rdoc
2e857f29e128f893706d042d583eec698c0bc56a
[ "CC-BY-4.0" ]
null
null
null
jupyter_notebooks/9_0_round_4_annotations_out.ipynb
charlieccarey/rdoc
2e857f29e128f893706d042d583eec698c0bc56a
[ "CC-BY-4.0" ]
5
2016-05-07T04:42:06.000Z
2018-04-19T01:08:38.000Z
jupyter_notebooks/9_0_round_4_annotations_out.ipynb
charlieccarey/rdoc
2e857f29e128f893706d042d583eec698c0bc56a
[ "CC-BY-4.0" ]
null
null
null
39.902471
1,414
0.590582
[ [ [ "# Select and distribute additional Arousal articles for phrase annotation", "_____no_output_____" ], [ "## Goal is deeper annotation.\n- Doubling or more our previous annotation for arousal.\n- Phrase based annotation so we can be most flexible and assess at higher levels.\n\n## Secondary goal, more cross-validation.\n- 1/2 of new articles will be annotated by all.\n- 1/2 of new articles will be annotated by only 1 annotator.", "_____no_output_____" ] ], [ [ "dest_dir = '/Users/ccarey/Documents/Projects/NAMI/rdoc/pdfs/20160122_rdoc_project'\n%mkdir {dest_dir}\n%cd {dest_dir}", "_____no_output_____" ] ], [ [ "### Get a list of all our previous used pubmed ids.", "_____no_output_____" ] ], [ [ "from __future__ import print_function\nimport glob\nimport os\nimport re\nprev_annotated_pdfs_dir = '/Users/ccarey/Documents/Projects/NAMI/rdoc/pdfs/all_pdfs_annotated_pmid_names/*.pdf'\npdfs = glob.glob(prev_annotated_pdfs_dir)", "_____no_output_____" ], [ "pdfs = [os.path.basename(pdf) for pdf in pdfs]", "_____no_output_____" ] ], [ [ "#### Crosscheck article counts in previous annotations.\nShould find this many minus the 3 marked as 'NA' i.e. 3 documents did not have a pubmed id.\n(171 - 3) = 168", "_____no_output_____" ] ], [ [ "%ls $prev_annotated_pdfs_dir | wc -l", "_____no_output_____" ], [ "pattern = '([0-9]{8,8})'\np = re.compile(pattern)\ntot = len([p.search(pdf).group() for pdf in pdfs if p.search(pdf)])\npmids = set([p.search(pdf).group() for pdf in pdfs if p.search(pdf)])\npmids = list(pmids)\npmids.sort()\nuniq = len(pmids)\n# print('So... {} pubmed_ids were found from total of {} previously annotated '\n# 'or marked irrelevant / misclassified'.format(uniq, tot))\n# print(pmids)", "_____no_output_____" ] ], [ [ "So... 73 pubmed_ids were found from total of 168 previously annotated or marked irrelevant / misclassified\n\n['20685988', '20695690', '20815182', '21319926', '21531705', '21613467', '21699821', '21849230', '21957257', '22379238', '22379245', '22438994', '22447249', '22575329', '23074247', '23083918', '23088207', '23143607', '23452958', '23503620', '23558179', '23622762', '23646134', '23647728', '23709163', '23744445', '23904684', '23928891', '23954763', '23957953', '24023823', '24045586', '24101292', '24116095', '24231418', '24285346', '24293773', '24333377', '24333745', '24359877', '24376698', '24388670', '24470693', '24511281', '24725811', '24740391', '24770625', '24804717', '24806675', '24870123', '24933724', '24980898', '25017671', '25036160', '25126029', '25126038', '25136085', '25142564', '25154749', '25160677', '25197810', '25258728', '25261920', '25280468', '25348131', '25734385', '25740534', '25773639', '25774613', '25788679', '25834059', '25898427', '25913552']", "_____no_output_____" ], [ "#### Crosscheck articles counts in existing medic database.\nMay be necessary to restart postgres server.", "_____no_output_____" ] ], [ [ "# !pg_ctl -D /usr/local/var/postgres -l /usr/local/var/postgres/server.log start\nmedic_pmids = !medic --format tsv write ALL 2>/dev/null | cut -f 1 | uniq | sort\nprint(set(pmids).difference(set(in_medic)))\nprint(set(medic_pmids).difference(set(pmids)))", "_____no_output_____" ] ], [ [ "From previous notes in 'fetch_pdfs_batch3_2015.10.12.py':\n - \\#\\# caught output as this, but seems incorrect as it had AR04 '23941878' instead of '25258728'\n\nArticle 23941878 is an arousal document that did not go out in previous rounds due to a mislabeling issue. (25258728 did go out and we corrected the naming)", "_____no_output_____" ] ], [ [ "prev_pmids = pmids\nnew_pmids = ['23941878']", "_____no_output_____" ], [ "#from __future__ import print_function\nfrom Bio import Entrez\nfrom subprocess import check_call\nfrom shutil import copy2\n#import glob\nimport time\nimport imp\nimport os\nurl2p = imp.load_source('Url2PubmedPmcPdf', '/Users/ccarey/Documents/Projects/NAMI/rdoc/scripts/Url2PubmedPmcPdf.py')\nEntrez.email = \"[email protected]\"\n\ndef narrow_id_list(found_ids, omit_ids):\n found_but_omit = list(set(found_ids) & set(omit_ids))\n found_and_keep = list(set(found_ids) - set(omit_ids))\n print('Removed this many ids: {}'.format(len(found_but_omit)))\n return(found_and_keep)\n\ndef pubmed_central_search_to_pubmed_id(search_string, retmax=20):\n # verify how many records match\n handle = Entrez.egquery(term=search_string)\n record = Entrez.read(handle)\n # maybe useful if we are dealing with 100s of ids and don't want to overwhelm server?\n for row in record[\"eGQueryResult\"]:\n if row[\"DbName\"] == \"pubmed\":\n print(row[\"Count\"])\n # fetch the ids for those records\n handle = Entrez.esearch(db=\"pubmed\", retmax=retmax, term=search_string)\n record = Entrez.read(handle)\n pubmed_ids = record[\"IdList\"]\n return(pubmed_ids)\n\ndef fetch_pdfs(pubmed_ids, stub_name):\n u = url2p.Url2PubmedPmcPdf(pubmed_ids)\n urls = u.get_urls()\n found = []\n for url in urls:\n if url['url'] is not None:\n cmd = 'curl -L {} -o {}.pdf'.format(url['url'], stub_name + url['pubmed'])\n # print(cmd)\n check_call(cmd, shell = True)\n time.sleep(10)\n found.append(url['pubmed'])\n return(found)\n\n# def copy_pdf_append_initials(initials):\n# pdfs = glob.glob('*.pdf')\n# for i in initials:\n# os.mkdir(i)\n# for p in pdfs:\n# pi = p.replace('.pdf', '_' + i + '.pdf')\n# copy2(p, os.path.join(i, pi))\n\ndef search_and_summarize(search_name, query, omit_ids):\n ids = pubmed_central_search_to_pubmed_id(query, retmax=1000000)\n new_ids = narrow_id_list(ids, omit_ids)\n print('{} search of pubmed found {} ids of which {} are new'.format(search_name, len(ids), len(new_ids)))\n return(new_ids)", "_____no_output_____" ] ], [ [ "## Might be limiting our results too much by using pubmed. Pubmed central seems to have more free full text results.\n\nFor example, modifying the searches slightly for pubmed central vs. pubmed.\n\nAR00_pmc = (\"arousal\"[MeSH Terms] OR \"arousal\"[All Fields]) AND (\"free full text\"[sb] AND \"2010/06/24\"[PubDate] : \"2015/06/22\"[PubDate] AND \"humans\"[MeSH Terms])\n\nAR00_pubmed ='(\"arousal\"[MeSH Terms] OR \"arousal\"[All Fields]) AND (\"loattrfree full text\"[sb] AND \"2010/06/24\"[PDat] : \"2015/06/22\"[PDat] AND \"humans\"[MeSH Terms])'\n\npmc :\n- 13656 \n- some are sponsored in readable format but have to click through to publisher to get pdf.\n\npubmed : \n- 6503 \n- all pdf.", "_____no_output_____" ], [ "### Desire to send out more articles. 30 in common to all, 30 unique to all\n - 30 common to all + 30 unique per annotator * 4 annotators\n - 150 articles total desired.", "_____no_output_____" ] ], [ [ "AR00 = '(\"arousal\"[MeSH Terms] OR \"arousal\"[All Fields]) AND (\"loattrfree full text\"[sb] AND \"2010/06/24\"[PDat] : \"2015/06/22\"[PDat] AND \"humans\"[MeSH Terms])'\nAR01 = '\"arousal\"[All Fields] AND ((\"arousal\"[MeSH Terms] OR \"arousal\"[All Fields]) AND (\"self report\"[MeSH Terms] OR (\"self\"[All Fields] AND \"report\"[All Fields]) OR \"self report\"[All Fields]) AND (\"weights and measures\"[MeSH Terms] OR (\"weights\"[All Fields] AND \"measures\"[All Fields]) OR \"weights and measures\"[All Fields] OR \"scales\"[All Fields])) AND (\"loattrfree full text\"[sb] AND \"2010/06/24\"[PDat] : \"2016/01/22\"[PDat] AND \"humans\"[MeSH Terms])'\nAR02 = '\"arousal\"[All Fields] AND ((\"self-assessment\"[MeSH Terms] OR \"self-assessment\"[All Fields] OR (\"self\"[All Fields] AND \"assessment\"[All Fields]) OR \"self assessment\"[All Fields]) AND (\"manikins\"[MeSH Terms] OR \"manikins\"[All Fields] OR \"mannequin\"[All Fields])) AND (\"loattrfree full text\"[sb] AND \"2010/06/24\"[PDat] : \"2016/01/22\"[PDat] AND \"humans\"[MeSH Terms])'\nAR03 = '\"arousal\"[All Fields] AND \"EEG\"[All Fields] AND (\"loattrfree full text\"[sb] AND \"2010/06/24\"[PDat] : \"2016/01/22\"[PDat] AND \"humans\"[MeSH Terms])'\nAR04 = '\"arousal\"[All Fields] AND \"psychomotor vigilance\"[All Fields] AND (\"loattrfree full text\"[sb] AND \"2010/06/24\"[PDat] : \"2016/01/22\"[PDat] AND \"humans\"[MeSH Terms])'\nAR05 = '\"arousal\"[All Fields] AND \"startle\"[All Fields] AND (\"loattrfree full text\"[sb] AND \"2010/06/24\"[PDat] : \"2016/01/22\"[PDat] AND \"humans\"[MeSH Terms])'\nAR05_plus = '\"arousal\"[All Fields] AND \"startle\"[All Fields] AND (\"loattrfree full text\"[sb] AND \"2010/06/24\"[PDat] : \"2016/01/22\"[PDat] AND (\"humans\"[MeSH Terms] OR \"humans\"[All Fields] OR \"human\"[All Fields]))'\n\n# AR00: there would be a lot. Use if not enough of others.\n# AR01: some used already. USE.\n# AR02: 0 found in original search, don't use.\n# AR03: some used already. USE.\n# AR04: some used already. USE.\n# AR05: some used already. USE.\n\n# already sent out for review under batches 2 or 3 (labels 02_ or 03_): prev_pmids\n\nAR00_ids = search_and_summarize(search_name='AR00', query=AR00, omit_ids=prev_pmids)\nprint()\nAR01_ids = search_and_summarize(search_name='AR01', query=AR01, omit_ids=prev_pmids)\nprint()\nAR02_ids = search_and_summarize(search_name='AR02', query=AR02, omit_ids=prev_pmids)\nprint()\nAR03_ids = search_and_summarize(search_name='AR03', query=AR03, omit_ids=prev_pmids)\nprint()\nAR04_ids = search_and_summarize(search_name='AR04', query=AR04, omit_ids=prev_pmids)\nprint()\nAR05_ids = search_and_summarize(search_name='AR05', query=AR05, omit_ids=prev_pmids)\nprint()", "_____no_output_____" ], [ "import collections\n\n# total found across all searches\nids = [AR01_ids, AR02_ids, AR03_ids, AR04_ids, AR05_ids]\nids = [j for i in ids for j in i]\nprint('total new pmids found across all searches : {}'.format(len(ids)))\n\nids = [AR01_ids, AR02_ids, AR03_ids, AR04_ids, AR05_ids]\nids = [j for i in ids for j in i]\ndups = [item for item, count in collections.Counter(ids).items() if count > 1]\nprint('duplicate pmids found across all searches : {}'.format(dups))\n\nids = [AR01_ids, AR02_ids, AR03_ids, AR04_ids, AR05_ids]\nids = [set(id_list) for id_list in ids]\nids = set.union(*ids)\nprint('unique new pmids found across all searches : {}'.format(len(ids)))", "_____no_output_____" ] ], [ [ "### Found nearly 150 new articles, add a few more from general search.\n\n- total new pmids found across all searches : 144\n- duplicate pmids found across all searches : ['20408942']\n- unique new pmids found across all searches : 143\n\n### Find 7 additional ids to get total 150 new ids.", "_____no_output_____" ] ], [ [ "AR00_specific_ids = narrow_id_list(AR00_ids, omit_ids=ids)", "Removed this many ids: 143\n" ] ], [ [ "### Fetch the pdfs for each search term.", "_____no_output_____" ], [ "Note, we should be here: '/Users/ccarey/Documents/Projects/NAMI/rdoc/pdfs/20160122_rdoc_project'", "_____no_output_____" ] ], [ [ "%pwd", "_____no_output_____" ], [ "# 4th batch, Arousal, subterm...\n# Note: There were no ids for AR02.\n\nbatch = '04'\n\nfetch_pdfs(AR01_ids, batch + '_AR01_')\nfetch_pdfs(AR03_ids, batch + '_AR03_')\nfetch_pdfs(AR04_ids, batch + '_AR04_')\nfetch_pdfs(AR05_ids, batch + '_AR05_')", "_____no_output_____" ], [ "# Extend to a few more generic results to round out\n# to ~150 and account for some that will be missing.\nfetch_pdfs(AR00_specific_ids[0:40], batch + '_AR00_')", "_____no_output_____" ], [ "all_found = glob.glob( './*.pdf')\nlen(all_found)", "_____no_output_____" ], [ "import collections\n\ndef report_found(search_ids, glob_pattern, id_pattern):\n '''Report the pmids found within the search set of files.'''\n files = glob.glob(glob_pattern)\n p = re.compile('[0-9]{8,8}')\n found = [p.search(pdf).group() for pdf in files if p.search(pdf)]\n not_found = set(search_ids).difference(found)\n print()\n #print('{} searched = {}'.format(len(search_ids), search_ids))\n #print('{} found = {}'.format(len(found), found))\n print('{} {} searched.'.format(len(search_ids), glob_pattern))\n print('{} {} found.'.format(len(found), glob_pattern))\n print('{} {} not_found = {}'.format(len(not_found), glob_pattern, not_found))\n return(found)\n\nall_found_d = {}\nall_found_d['AR00'] = report_found(AR00_specific_ids[0:40], glob_pattern='04_AR00_*', id_pattern='[0-9]{8,8}')\nall_found_d['AR01'] = report_found(AR01_ids, glob_pattern='04_AR01_*', id_pattern='[0-9]{8,8}')\nall_found_d['AR03'] = report_found(AR03_ids, glob_pattern='04_AR03_*', id_pattern='[0-9]{8,8}')\nall_found_d['AR04'] = report_found(AR04_ids, glob_pattern='04_AR04_*', id_pattern='[0-9]{8,8}')\nall_found_d['AR05'] = report_found(AR05_ids, glob_pattern='04_AR05_*', id_pattern='[0-9]{8,8}')\n\nprint()\nprint('------- Reporting Found: ----------')\nprint()\nprint('{} Total found'.format(sum([len(v) for v in all_found_d.values()])))\nprint()\nall_found_d = collections.OrderedDict(sorted(all_found_d.items()))\nfor k,v in all_found_d.iteritems():\n print('{}: {}'.format(k, v))", "_____no_output_____" ] ], [ [ "### Reported found or not found for batch 4\n 40 04_AR00_* searched.\n 32 04_AR00_* found.\n 8 04_AR00_* not_found = set(['22715192', '25109588', '22608701', '21304250', '22998863', '21697711', '26035194', '24330893'])\n\n 2 04_AR01_* searched.\n 1 04_AR01_* found.\n 1 04_AR01_* not_found = set(['22044631'])\n\n 99 04_AR03_* searched.\n 86 04_AR03_* found.\n 13 04_AR03_* not_found = set(['24718047', '24920020', '25972166', '24552365', '20729099', '23440753', '22068747', '25794203', '24177246', '20408942', '24169072', '24469936', '21051491'])\n\n 17 04_AR04_* searched.\n 12 04_AR04_* found.\n 5 04_AR04_* not_found = set(['21564364', '20408942', '24905752', '22436093', '23171222'])\n\n 26 04_AR05_* searched.\n 25 04_AR05_* found.\n 1 04_AR05_* not_found = set(['22387928'])\n\n ------- Reporting Found: ----------\n \n 156 Total found\n\n AR00: ['20410871', '20532489', '21050743', '21278378', '21278907', '22004270', '22048839', '22146934', '22169884', '22233352', '22378876', '22414937', '22474609', '22505867', '22686386', '22715197', '22805501', '22832959', '22968207', '23190433', '23299717', '23504052', '23555220', '23770566', '23934417', '24126129', '24466064', '24512610', '24651580', '24933663', '24964082', '25521352']\n AR01: ['22664396']\n AR03: ['20450941', '20525011', '20584721', '20615239', '20620104', '20634711', '20663220', '20842164', '20857862', '21077571', '21118712', '21120131', '21179552', '21203376', '21206465', '21276977', '21280045', '21335015', '21397252', '21419826', '21426626', '21675365', '21731598', '21816115', '21854953', '21886801', '21909371', '21954087', '22043127', '22119526', '22131608', '22163262', '22215928', '22314045', '22377810', '22379239', '22467988', '22496862', '22665872', '22986355', '22998925', '23055094', '23060019', '23127585', '23193115', '23326604', '23643925', '23707592', '23731439', '23786695', '23810448', '23899724', '23929944', '23940642', '23990240', '23997704', '24015304', '24125792', '24127147', '24214921', '24235891', '24252875', '24259275', '24260331', '24266644', '24412227', '24426818', '24453310', '24457211', '24505292', '24529045', '24571111', '24618591', '24705497', '24899756', '24930577', '25024660', '25051268', '25061837', '25348125', '25424865', '25456277', '25581922', '25756280', '25759762', '25883640']\n AR04: ['20955866', '21677894', '21826029', '22035386', '22215925', '22239924', '22294809', '22470524', '22959616', '23941878', '25142762', '25325584']\n AR05: ['20049632', '20497902', '20661292', '20939652', '21034683', '21259270', '21376761', '21392554', '21440905', '21463060', '21477924', '21550590', '21623612', '21626350', '21898707', '22088577', '22285891', '22286850', '22315106', '22776995', '22911829', '24156344', '25036222', '25107317', '25749431']", "_____no_output_____" ], [ "#### 156 pubmed ids were uniquely found and downloaded successfully.\nRandomly choose 32 to assign to everyone. And from the remaining 124 (=156-31) assign 31 different ones to each of 4 people.\n\nNote: '23941878' which we missed previously is now covered in set AR04.", "_____no_output_____" ] ], [ [ "import random\nfiles = glob.glob('04_AR*.pdf')\nidxs = range(len(files))\nrandom.shuffle(idxs)\n\neveryone = [files[idx] for idx in idxs[0:32]]\nmk = [files[idx] for idx in idxs[32:63]]\njl = [files[idx] for idx in idxs[63:94]]\ntc = [files[idx] for idx in idxs[94:125]]\ncc = [files[idx] for idx in idxs[125:156]]", "_____no_output_____" ], [ "def copy_to_annotator(files, annotator, suffix):\n %mkdir {annotator}\n %mkdir {annotator}/annotated\n %mkdir {annotator}/irrelevant\n for f in files:\n f_dest = f.split('.')[0] + suffix\n %cp {f} ./{annotator}/{f_dest}\n\ncopy_to_annotator(mk, 'mk', '_mk.pdf')\ncopy_to_annotator(everyone, 'mk', '_mk.pdf')\n\ncopy_to_annotator(jl, 'jl', '_jl.pdf')\ncopy_to_annotator(everyone, 'jl', '_jl.pdf')\n\ncopy_to_annotator(mk, 'tc', '_tc.pdf')\ncopy_to_annotator(everyone, 'tc', '_tc.pdf')\n\ncopy_to_annotator(mk, 'cc', '_cc.pdf')\ncopy_to_annotator(everyone, 'cc', '_cc.pdf')\n", "mkdir: mk: File exists\nmkdir: mk: File exists\nmkdir: mk/annotated: File exists\nmkdir: mk/irrelevant: File exists\nmkdir: jl: File exists\nmkdir: jl: File exists\nmkdir: jl/annotated: File exists\nmkdir: jl/irrelevant: File exists\nmkdir: tc: File exists\nmkdir: tc: File exists\nmkdir: tc/annotated: File exists\nmkdir: tc/irrelevant: File exists\nmkdir: cc: File exists\nmkdir: cc: File exists\nmkdir: cc/annotated: File exists\nmkdir: cc/irrelevant: File exists\n" ], [ "for f in mk:\n f_dest = f.split('.')[0] + '_mk.pdf'\n %cp {f} ./mk/{f_dest}\n\nfor f in jl:\n f_dest = f.split('.')[0] + '_jl.pdf'\n %cp {f} ./jl/{f_dest}\n \nfor f in tc:\n f_dest = f.split('.')[0] + '_tc.pdf'\n %cp {f} ./tc/{f_dest}\n\nfor f in cc:\n f_dest = f.split('.')[0] + '_cc.pdf'\n %cp {f} ./tc/{f_dest}", "_____no_output_____" ] ], [ [ "## Add the new abstracts to our medic database\nInsert fails if any are already in db.\nUpdate will overwrite previous records.", "_____no_output_____" ], [ "#### Previous set for our records:", "_____no_output_____" ] ], [ [ "%cd /Users/ccarey/Documents/Projects/NAMI/rdoc/tasks/task_data_temp\nprev = !medic --format tsv write ALL 2> /dev/null | cut -f 1\nprint(prev)", "/Users/ccarey/Documents/Projects/NAMI/rdoc/tasks/task_data_temp\n['20695690', '21699821', '21849230', '22379238', '22438994', '23074247', '23088207', '23143607', '23452958', '23622762', '23709163', '23904684', '23941878', '24725811', '24770625', '24806675', '24980898', '25017671', '25258728', '25348131', '23928891', '23744445', '25774613', '25773639', '25740534', '24870123', '24740391', '21957257', '21613467', '22447249', '21319926', '23503620', '21531705', '24023823', '24231418', '23558179', '25734385', '23647728', '23083918', '22575329', '22379245', '25197810', '25136085', '25126038', '25142564', '25126029', '24470693', '24333377', '24293773', '24116095', '24101292', '24045586', '23954763', '23646134', '20815182', '25898427', '24285346', '25036160', '24511281', '23957953', '25280468', '24388670', '25154749', '24933724', '24359877', '24333745', '24804717', '25834059', '25788679', '25261920', '25160677', '25913552', '24376698', '20685988']\n" ], [ "all_found_d = {'AR00': ['20410871', '20532489', '21050743', '21278378', '21278907', '22004270', '22048839', '22146934', '22169884', '22233352', '22378876', '22414937', '22474609', '22505867', '22686386', '22715197', '22805501', '22832959', '22968207', '23190433', '23299717', '23504052', '23555220', '23770566', '23934417', '24126129', '24466064', '24512610', '24651580', '24933663', '24964082', '25521352'],\n'AR01': ['22664396'],\n'AR03': ['20450941', '20525011', '20584721', '20615239', '20620104', '20634711', '20663220', '20842164', '20857862', '21077571', '21118712', '21120131', '21179552', '21203376', '21206465', '21276977', '21280045', '21335015', '21397252', '21419826', '21426626', '21675365', '21731598', '21816115', '21854953', '21886801', '21909371', '21954087', '22043127', '22119526', '22131608', '22163262', '22215928', '22314045', '22377810', '22379239', '22467988', '22496862', '22665872', '22986355', '22998925', '23055094', '23060019', '23127585', '23193115', '23326604', '23643925', '23707592', '23731439', '23786695', '23810448', '23899724', '23929944', '23940642', '23990240', '23997704', '24015304', '24125792', '24127147', '24214921', '24235891', '24252875', '24259275', '24260331', '24266644', '24412227', '24426818', '24453310', '24457211', '24505292', '24529045', '24571111', '24618591', '24705497', '24899756', '24930577', '25024660', '25051268', '25061837', '25348125', '25424865', '25456277', '25581922', '25756280', '25759762', '25883640'],\n'AR04': ['20955866', '21677894', '21826029', '22035386', '22215925', '22239924', '22294809', '22470524', '22959616', '23941878', '25142762', '25325584'],\n'AR05': ['20049632', '20497902', '20661292', '20939652', '21034683', '21259270', '21376761', '21392554', '21440905', '21463060', '21477924', '21550590', '21623612', '21626350', '21898707', '22088577', '22285891', '22286850', '22315106', '22776995', '22911829', '24156344', '25036222', '25107317', '25749431']}", "_____no_output_____" ], [ "update_list = [pmid for subset in all_found_d.values() for pmid in subset]\nlen(update_list)", "_____no_output_____" ], [ "cmd = ' '.join(update_list)\nprint(cmd)\n!medic update {cmd} 2> /dev/null\nprint()\n!medic --format tsv write ALL 2> /dev/null | cut -f 1 | wc -l", "20049632 20497902 20661292 20939652 21034683 21259270 21376761 21392554 21440905 21463060 21477924 21550590 21623612 21626350 21898707 22088577 22285891 22286850 22315106 22776995 22911829 24156344 25036222 25107317 25749431 20955866 21677894 21826029 22035386 22215925 22239924 22294809 22470524 22959616 23941878 25142762 25325584 22664396 20410871 20532489 21050743 21278378 21278907 22004270 22048839 22146934 22169884 22233352 22378876 22414937 22474609 22505867 22686386 22715197 22805501 22832959 22968207 23190433 23299717 23504052 23555220 23770566 23934417 24126129 24466064 24512610 24651580 24933663 24964082 25521352 20450941 20525011 20584721 20615239 20620104 20634711 20663220 20842164 20857862 21077571 21118712 21120131 21179552 21203376 21206465 21276977 21280045 21335015 21397252 21419826 21426626 21675365 21731598 21816115 21854953 21886801 21909371 21954087 22043127 22119526 22131608 22163262 22215928 22314045 22377810 22379239 22467988 22496862 22665872 22986355 22998925 23055094 23060019 23127585 23193115 23326604 23643925 23707592 23731439 23786695 23810448 23899724 23929944 23940642 23990240 23997704 24015304 24125792 24127147 24214921 24235891 24252875 24259275 24260331 24266644 24412227 24426818 24453310 24457211 24505292 24529045 24571111 24618591 24705497 24899756 24930577 25024660 25051268 25061837 25348125 25424865 25456277 25581922 25756280 25759762 25883640\n 229\n" ] ], [ [ "229 pubmed ids in database", "_____no_output_____" ], [ "## write the new pmids to file batch_04_AR\n/Users/ccarey/Documents/Projects/NAMI/rdoc/tasks/task_data_temp/batch_04_AR\n", "_____no_output_____" ], [ "# Need new copies of:\n\n'04_AR04_23941878.pdf'\n'04_AR03_23193115.pdf'", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
ec554b7ee2614b2c56f9c9b959c76aed679f4a76
403,595
ipynb
Jupyter Notebook
Investigate_a_Dataset.ipynb
Bidhulata/Movies
9df4bb22e41e5fd1167a40f647e91475e0f61d8d
[ "MIT" ]
null
null
null
Investigate_a_Dataset.ipynb
Bidhulata/Movies
9df4bb22e41e5fd1167a40f647e91475e0f61d8d
[ "MIT" ]
null
null
null
Investigate_a_Dataset.ipynb
Bidhulata/Movies
9df4bb22e41e5fd1167a40f647e91475e0f61d8d
[ "MIT" ]
null
null
null
71.839623
38,892
0.730361
[ [ [ "# Project: Investigate TMDB movie data\n\n## Table of Contents\n<ul>\n<li><a href=\"#intro\">Introduction</a></li>\n<li><a href=\"#wrangling\">Data Wrangling</a></li>\n<li><a href=\"#eda\">Exploratory Data Analysis</a></li>\n<li><a href=\"#conclusions\">Conclusions</a></li>\n</ul>", "_____no_output_____" ], [ "<a id='intro'></a>\n## Introduction", "_____no_output_____" ], [ "For this Data Analyst project, I selected the TMDb movie dataset from kaggle to investigate. According to kaggle introduction [page](https://www.kaggle.com/tmdb/tmdb-movie-metadata), the data contains information that are provided from The Movie Database (TMDb). It collects 5000+ movies and their rating and basic move information, including user ratings and revenue data.\n\n### The potiental problem that can be discussed in the dataset:\nAccroding Kaggle data overview, the dataset provides some metrics that measure how successful these movies are. These metrics include popularity, revenue and vote average. It also contains some basic information corresponding to the movie like cast, director, keywords, runtime, genres, etc. Any of the basic information can be a key to a success movie. More specificly, these factors can be classified to two categrories as follows:\n#### Metrics for Evaluating the Success Movie\n- popularity\n- revenue\n- vote average score\n\n#### Potential Key to Affect the Success of a Movie\n- Budget\n- Cast\n- Director\n- Tagline\n- Keywords\n- Runtime\n- Genres\n- Production Companies\n- Release Date\n- Vote Average", "_____no_output_____" ], [ "Since the dataset is featured with the rating of movies as mentioned above, it contains plentiful information for exploring the properties that are associated with successful movies, which can be defined by high popularity, high revenue and high rating score movies. Besides, the dataset also contains the movie released year, so it also can let us to explore the trend in these movie metrics. Therefore, the qestions I am going to explore are including three parts:\n\n**Research Part 1: General Explore**\n - Question 1: Popularity Over Years\n - Question 2: The distribution of revenue in different popularity levels in recent five years.\n - Question 3: The distribution of revenue in different score rating levels in recent five years.\n \n**Research Part 2 : Find the Properties are Associated with Successful Movies**\n - Question 1: What kinds of properties are associated with movies that have high popularity?\n - Question 2: What kinds of properties are associated with movies that have high voting score?\n\n**Research Part 3 Top Keywords and Genres Trends by Generation**\n - Question 1: Number of movie released year by year\n - Question 2: Keywords Trends by Generation\n - Question 3: Genres Trends by Generation", "_____no_output_____" ], [ "<a id='wrangling'></a>\n## Data Wrangling\n\n### General Dataset Properties ", "_____no_output_____" ], [ "**First, let's look what the dataset looks like for preceeding to investigate.**", "_____no_output_____" ] ], [ [ "# Import statements for all of the packages that I plan to use.\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom collections import Counter\n% matplotlib inline", "_____no_output_____" ], [ "# Load the data and print out a few lines. Perform operations to inspect data\n# types and look for instances of missing or possibly errant data.\ndf = pd.read_csv('tmdb-movies (1).csv')\ndf.head(1)", "_____no_output_____" ] ], [ [ "**Then, find the basic information of the dataset.**", "_____no_output_____" ] ], [ [ "#see the column info and null values in the dataset\ndf.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 10866 entries, 0 to 10865\nData columns (total 21 columns):\nid 10866 non-null int64\nimdb_id 10856 non-null object\npopularity 10866 non-null float64\nbudget 10866 non-null int64\nrevenue 10866 non-null int64\noriginal_title 10866 non-null object\ncast 10790 non-null object\nhomepage 2936 non-null object\ndirector 10822 non-null object\ntagline 8042 non-null object\nkeywords 9373 non-null object\noverview 10862 non-null object\nruntime 10866 non-null int64\ngenres 10843 non-null object\nproduction_companies 9836 non-null object\nrelease_date 10866 non-null object\nvote_count 10866 non-null int64\nvote_average 10866 non-null float64\nrelease_year 10866 non-null int64\nbudget_adj 10866 non-null float64\nrevenue_adj 10866 non-null float64\ndtypes: float64(4), int64(6), object(11)\nmemory usage: 1.7+ MB\n" ] ], [ [ "From the table above, there are totally 10866 entries and total 21 columns. And there exists some null value in the cast, director, overview and genres columns. But some columns are with **a lot of null value rows** like `homepage`, `tagline`, `keywords` and `production_companies`, especially the **`homepage`** and **`tagline`** columns are even not necessary for answering the question, so I decide to drop both of the columns on the stage.", "_____no_output_____" ], [ "Let's see some descriptive statistics for the data set.", "_____no_output_____" ] ], [ [ "df.describe()", "_____no_output_____" ] ], [ [ "As the table shown above, we can find outliers in `popularity` data, but according to the [forum](https://www.themoviedb.org/talk/5141d424760ee34da71431b0), the popularity score is measured by number of favourites and number of watched list etc, since it has no upperbond, I decided to retain the original data. Also, there are a lot of **zero number** in `budget` and `revenue` data, so is `runtime`. Didn't these movies be released? Look at the data in `release_year column`, I find all movies in the dataset are released becauce **the minimum value is 1960 and there is no null value for it**. So I assume the zero values in the `budget` and `revenue` are missing data. But under the risk that these zero values may be just small values, I preceed to take a look for some zero data content to decide whether it is just a missing value or small value.", "_____no_output_____" ], [ "Let's take a look at some zero `budget` and `revenue` data.", "_____no_output_____" ] ], [ [ "#filter the zero budget data\ndf_budget_zero = df.query('budget == 0')\n# choice the first three randomly\ndf_budget_zero.head(3)", "_____no_output_____" ], [ "#filter the zero revenue data\ndf_revenue_zero = df.query('revenue == 0')\n# choice the first three randomly\ndf_revenue_zero.head(3)", "_____no_output_____" ] ], [ [ "Among the `budget` data in zero values, I randomly choose _Mr. Holmes_ and google search it. And I found it's [Wikipedia](https://en.wikipedia.org/wiki/Mr._Holmes#cite_note-2) page and there is **definitely a budget record**. Further more, I also find the same result for `revenue` data in zero value. So **I assume the zero value in revenue and budget column are missing**. Maybe I had better drop them out or set them as null values. Since if I include these quantification number in dateset, It will affect some statistics and the visualiation result in those question.\n\nTo decide whether to drop them out or set them as null values, I count the number of the zero values in the two columns.", "_____no_output_____" ] ], [ [ "#count zero values in budget data using groupby\ndf_budget_0count = df.groupby('budget').count()['id']\ndf_budget_0count.head(2)", "_____no_output_____" ] ], [ [ "I count the zero value in the `budget` cloumn and there are 5696 rows in zero value. In case I drop too many raw data to keep the data integrity, I decide to retain these rows and replace zero values with null values.", "_____no_output_____" ], [ "So does the `revenue` column.", "_____no_output_____" ] ], [ [ "#count zero values in revenue data using groupby\ndf_revenue_0count = df.groupby('revenue').count()['id']\ndf_revenue_0count.head(2)", "_____no_output_____" ] ], [ [ "It contains 6016 rows in zero values, so I also dicide to keep these rows and replace zero values with null values.", "_____no_output_____" ], [ "Finally, let's investigate the `runtime` column to decide whether drop zero or just replace it with null value.", "_____no_output_____" ] ], [ [ "#count zero values in runtime data using groupby\ndf_runtime_0count = df.groupby('runtime').count()['id']\ndf_runtime_0count.head(2)", "_____no_output_____" ] ], [ [ "It's just has a small number of zero value rows in runtime column, so I decide to drop them.", "_____no_output_____" ], [ "### Cleaning Decision Summary\n1. Drop unnecessary columns for answering those questions : `homepage`, `tagline`, `imdb_id`, `overview`,`budget_adj`, `revenue_adj`.\n2. Drop duplicates.\n3. Drop null values columns that with small quantity of nulls : `cast`, `director`, and `genres`.\n4. Replace zero values with null values in the `budget` and `revenue` column.\n5. Drop zero values columns that with small quantity of zeros : `runtime`.", "_____no_output_____" ], [ "### Data Cleaning ", "_____no_output_____" ], [ "**First, according to the previous decision, let's drop unncessary columns : `imdb_id`, `homepage`, `tagline`, `overview`.**", "_____no_output_____" ] ], [ [ "# After discussing the structure of the data and any problems that need to be\n# cleaned, perform those cleaning steps in the second part of this section.\n# Drop extraneous columns\ncol = ['imdb_id', 'homepage', 'tagline', 'overview', 'budget_adj', 'revenue_adj']\ndf.drop(col, axis=1, inplace=True)", "_____no_output_____" ], [ "# see if these columns are dropped.\ndf.head(1)", "_____no_output_____" ] ], [ [ "** Drop the duplicates.**", "_____no_output_____" ] ], [ [ "#Drop the duplicates\ndf.drop_duplicates(inplace=True)", "_____no_output_____" ] ], [ [ "**Then, drop the null values in `cast`, `director`, `genres` columns.**", "_____no_output_____" ] ], [ [ "#drop the null values in cast, director, genres columns\ncal2 = ['cast', 'director', 'genres']\ndf.dropna(subset = cal2, how='any', inplace=True)", "_____no_output_____" ], [ "# see if nulls are dropped.\ndf.isnull().sum()", "_____no_output_____" ] ], [ [ "**Then, replace zero values with null values in the budget and revenue column.**", "_____no_output_____" ] ], [ [ "#replace zero values with null values in the budget and revenue column.\ndf['budget'] = df['budget'].replace(0, np.NaN)\ndf['revenue'] = df['revenue'].replace(0, np.NaN)\n# see if nulls are added in budget and revenue columns\ndf.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 10731 entries, 0 to 10865\nData columns (total 15 columns):\nid 10731 non-null int64\npopularity 10731 non-null float64\nbudget 5153 non-null float64\nrevenue 4843 non-null float64\noriginal_title 10731 non-null object\ncast 10731 non-null object\ndirector 10731 non-null object\nkeywords 9306 non-null object\nruntime 10731 non-null int64\ngenres 10731 non-null object\nproduction_companies 9772 non-null object\nrelease_date 10731 non-null object\nvote_count 10731 non-null int64\nvote_average 10731 non-null float64\nrelease_year 10731 non-null int64\ndtypes: float64(4), int64(4), object(7)\nmemory usage: 1.3+ MB\n" ] ], [ [ "**Finally, drop columns with small quantity of zero values : runtime.**", "_____no_output_____" ] ], [ [ "# directly filter the runtime data with nonzero value\ndf.query('runtime != 0', inplace=True)\n#check\ndf.query('runtime == 0')", "_____no_output_____" ] ], [ [ "### Cleaning Result Summary", "_____no_output_____" ], [ "<b>From the table bellow, we can see that the data in each column are almost clear without too many null values. And my clearning goal is also to keep the data integrity from the original one. Although there are some null values in `keywords` and `production companies` columns, it is still useful for analysis, and in fact the number of their null values are not very huge, so I just kept both of them. The data now with 10703 entries and 17 columns.</b>", "_____no_output_____" ] ], [ [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 10703 entries, 0 to 10865\nData columns (total 15 columns):\nid 10703 non-null int64\npopularity 10703 non-null float64\nbudget 5150 non-null float64\nrevenue 4843 non-null float64\noriginal_title 10703 non-null object\ncast 10703 non-null object\ndirector 10703 non-null object\nkeywords 9293 non-null object\nruntime 10703 non-null int64\ngenres 10703 non-null object\nproduction_companies 9759 non-null object\nrelease_date 10703 non-null object\nvote_count 10703 non-null int64\nvote_average 10703 non-null float64\nrelease_year 10703 non-null int64\ndtypes: float64(4), int64(4), object(7)\nmemory usage: 1.3+ MB\n" ] ], [ [ "<b>And from the table bellow, after transfer all zero values to null values in `budget` and `revenue` data, we can see that both the distribution of budget and revenue are much better, without too concentrate on the zero value or small values. And after deleting the zero values of runtime, we can see the minimum value of runtime is more reasonable.</b>", "_____no_output_____" ] ], [ [ "df.describe()", "_____no_output_____" ] ], [ [ "<a id='eda'></a>\n## Exploratory Data Analysis", "_____no_output_____" ], [ "**<a href=\"#r1\">Research Part 1: General Explore</a>**\n - <a href=\"#r1q1\">Question 1: Popularity Over Years.</a>\n - <a href=\"#r1q2\">Question 2: The distribution of popularity in different revenue levels in recent five years. </a>\n - <a href=\"#r1q3\">Question 3: The distribution of score rating in different revenue levels in recent five years.</a>\n \n**<a href=\"#r2\">Research Part 2 : Find the Properties are Associated with Successful Movies</a>**\n - <a href=\"#r2q1\">Question 1: What kinds of properties are associated with movies that have high popularity?</a>\n - <a href=\"#r2q2\">Question 2: What kinds of properties are associated with movies that have high voting score?</a>\n \n**<a href=\"#r3\">*Research Part 3 Top Keywords and Genres Trends by Generation</a>**\n - <a href=\"#r3q1\">Question 1: Number of movie released year by year.</a>\n - <a href=\"#r3q2\">Question 2: Keywords Trends by Generation.</a>\n - <a href=\"#r3q3\">Question 3: Genres Trends by Generation.</a>", "_____no_output_____" ], [ "<a id='r1'></a>\n## Research Part 1: General Explore", "_____no_output_____" ], [ "<a id='r1q1'></a>\n### Question 1: Popularity Over Years", "_____no_output_____" ], [ "To explore this question, let's take a look of the dataset.", "_____no_output_____" ] ], [ [ "df.head(2)", "_____no_output_____" ] ], [ [ "To analysis the question, I computed the mean of popularity in each year, and then plot lines to see the trends. Moreever, since the popularity has no upper bound, in case the mean of popularity is affected by the outlier, I also compute the median for analysising this question. ", "_____no_output_____" ] ], [ [ "# compute the mean for popularity\np_mean = df.groupby('release_year').mean()['popularity']\np_mean.tail()", "_____no_output_____" ], [ "# compute the median for popularity\np_median = df.groupby('release_year').median()['popularity']\np_median.tail()", "_____no_output_____" ] ], [ [ "We can see that the median data for popularity is more smoother.\n\nNow, let's visualize it.", "_____no_output_____" ] ], [ [ "# build the index location for x-axis\nindex_mean = p_mean.index\nindex_median = p_median.index", "_____no_output_____" ], [ "#set style\nsns.set_style('whitegrid')\n#set x, y axis data\n#x1, y1 for mean data; x2, y2 for median data\nx1, y1 = index_mean, p_mean\nx2, y2 = index_median, p_median\n#set size\nplt.figure(figsize=(9, 4))\n#plot line chart for mean and median\nplt.plot(x1, y1, color = 'g', label = 'mean')\nplt.plot(x2, y2, color = 'r', label = 'median')\n#set title and labels\nplt.title('Popularity Over Years')\nplt.xlabel('Year')\nplt.ylabel('Popularity');\n#set legend\nplt.legend(loc='upper left')", "_____no_output_____" ] ], [ [ "From the figure above, we can see that the trend of popularity mean is upward year to year, and the peak is in the 2015, while the trend of popularity median is slightly smoother in recent years. We still can conclude that on average, popularity over years is going up in recent years. The trend is reasonable due to the eaiser access of movie information nowadays. Moreover, in the Internet age, people can easily search and gether movie information, even watch the content through different sources. Maybe it is such the backgroud that boost the movie popularity metrics.", "_____no_output_____" ], [ "<a id='r1q2'></a>\n### Question 2: The distribution of popularity in different revenue levels in recent five years. ", "_____no_output_____" ], [ "The movies popularity is growing up in recently years, but how about the popularity in different revenue levels? will popularity be more higher in high revenue level? In this research I don't dicuss the revenue trend since it is affected by many factors like inflation. Although the database contains the adjusted data but I just want the analysis be more simple. Moreever, if I find out the movie revenue trend is growing up, it still can't infer that the trend up is related to popularity just by looking the revenue trend line chart year by yaer.\n\nHence, it leads me that what to find out the distribution of popularity look like in terms of different revenue levels. Which means I can see the what popularity with which revenue levels. Dou to the revenue data contains wide range, to be more specific, I divided the revenue data into five levels: Low', 'Medium', 'Moderately High', 'High' according to their quartile. Also I choose the recent five years data to dicuss in order to focus on the current data feature.", "_____no_output_____" ], [ "**For the further usage of the level-diveded procedure with quartile, I build a `cut_into_quantile` function to divided data into four levels according to their quartile: 'Low', 'Medium', 'Moderately High', 'High'.**", "_____no_output_____" ], [ "**The cut_into_quantile function- general use.**", "_____no_output_____" ] ], [ [ "# quartile function\ndef cut_into_quantile(dfname ,column_name):\n# find quartile, max and min values\n min_value = dfname[column_name].min()\n first_quantile = dfname[column_name].describe()[4]\n second_quantile = dfname[column_name].describe()[5]\n third_quantile = dfname[column_name].describe()[6]\n max_value = dfname[column_name].max()\n# Bin edges that will be used to \"cut\" the data into groups\n bin_edges = [ min_value, first_quantile, second_quantile, third_quantile, max_value]\n# Labels for the four budget level groups\n bin_names = [ 'Low', 'Medium', 'Moderately High', 'High'] \n# Creates budget_levels column\n name = '{}_levels'.format(column_name)\n dfname[name] = pd.cut(dfname[column_name], bin_edges, labels=bin_names, include_lowest = True)\n return dfname", "_____no_output_____" ] ], [ [ "**Since I want to explore the data by year to year in the question, so to avoide the different level affecting among each year's revenue, I divide revenue levels by with each year's revenue quartile .**", "_____no_output_____" ] ], [ [ "#choose the recent five years \ndfyear =[2011,2012,2013,2014,2015]\n#creat a empty dataframe,df_q2\ndf_q2 = pd.DataFrame()\n\n#for each year, do the following procedure\nfor year in dfyear:\n dfn = df.query('release_year == \"%s\"' % year) # first filter dataframe with the selected year \n dfn2 = cut_into_quantile(dfn,'revenue') #apply the cut_into_quantile with the selected frame, store it to dfn2 \n df_q2 = df_q2.append(dfn2) #append dfn2 to df_q2\ndf_q2.info()", "/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:15: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n from ipykernel import kernelapp as app\n" ] ], [ [ "Now we can see we create a `revenue_levels` column with the same rows with `revenue`.", "_____no_output_____" ], [ "Then use the dataset to explore the popularity in each level each year.", "_____no_output_____" ] ], [ [ "# group the dataframe we created above with each revenue levels in each year, find the popularity meadian\ndfq2_summary = df_q2.groupby(['release_year','revenue_levels']).median()\ndfq2_summary.tail(8)", "_____no_output_____" ] ], [ [ "**Then plot a bar chart.**", "_____no_output_____" ] ], [ [ "# Setting the positions and width for the bars\npos = list(range(len(dfq2_summary.query('revenue_levels ==\"Low\"'))))\nwidth = 0.2 \n\n# Plotting the bars\nfig, ax = plt.subplots(figsize=(10,5))\n\n# Create a bar with Low data, in position pos,\nplt.bar(pos, \n #using 'Low' data,\n dfq2_summary.query('revenue_levels ==\"Low\"')['popularity'], \n # of width\n width, \n # with alpha 0.5\n alpha=0.5, \n # with color\n color='#EE3224', \n # with label Low\n label= 'Low') \n\n# Create a bar with Medium data,\n# in position pos + some width buffer,\nplt.bar([p + width for p in pos], \n #using Medium data,\n dfq2_summary.query('revenue_levels ==\"Medium\"')['popularity'],\n # of width\n width, \n # with alpha 0.5\n alpha=0.5, \n # with color\n color='#F78F1E', \n # with label Medium\n label='Medium') \n\n# Create a bar with Moderately High data,\n# in position pos + some width buffer,\nplt.bar([p + width*2 for p in pos], \n #using Moderately High data,\n dfq2_summary.query('revenue_levels ==\"Moderately High\"')['popularity'], \n # of width\n width, \n # with alpha 0.5\n alpha=0.5, \n # with color\n color='#FFC222', \n # with label Moderately High\n label='Moderately High') \n\n# Create a bar with High data,\n# in position pos + some width buffer,\nplt.bar([p + width*3 for p in pos], \n #using High data,\n dfq2_summary.query('revenue_levels ==\"High\"')['popularity'], \n # of width\n width, \n # with alpha 0.5\n alpha=0.5, \n # with color\n color='#4fb427', \n # with label High\n label='High')\n\n# Set the y axis label\nax.set_ylabel('popularity')\n\n# Set the chart's title\nax.set_title('Popularity in Different Revenue Levels in Recent Five Years')\n\n# Set the position of the x ticks\nax.set_xticks([p + 1.5 * width for p in pos])\n\n# Set the labels for the x ticks\nax.set_xticklabels([2011,2012,2013,2014,2015])\n\n# Adding the legend and showing the plot\nplt.legend( loc='upper left')\nplt.grid()\nplt.show()", "_____no_output_____" ] ], [ [ "**We can see that movies with higher revenue level are with higher popularity in recent five years.**\n\nWe can see that revenue level has postive relation with popularity. The result is reasonable since it makes me think of if movie producer wants to make high revenue movies, the first thing they always is **to promote it and make it popular.** So according the result from the previous question, I infer that a high revenue movie is always with a higher popularity than movies with lower revenue levels. So if we define success of a movie is it's revenue, one property it has is the high popularity.\n\n**But what about the score rating distribution in different revenue levels of movies? Does high revenue level movie has the property of high score rating?** Let's explore on the next question.", "_____no_output_____" ], [ "<a id='r1q3'></a>\n### Question 3: The distribution of revenue in different score rating levels in recent five years. ", "_____no_output_____" ], [ "Use the same procedure on Question 2 to explore this question.", "_____no_output_____" ] ], [ [ "# group the dataframe we created above with each revenue levels in each year, find the vote_average mean\ndfq2_summary = df_q2.groupby(['release_year','revenue_levels']).mean()\ndfq2_summary.tail(4)", "_____no_output_____" ] ], [ [ "Plot the bar chart.", "_____no_output_____" ] ], [ [ "# Setting the positions and width for the bars\npos = list(range(len(dfq2_summary.query('revenue_levels ==\"Low\"'))))\nwidth = 0.2 \n\n# Plotting the bars\nfig, ax = plt.subplots(figsize=(12,3))\n\n# Create a bar with Low data, in position pos,\nplt.bar(pos, \n #using 'Low' data,\n dfq2_summary.query('revenue_levels ==\"Low\"')['vote_average'], \n # of width\n width, \n # with alpha 0.5\n alpha=0.5, \n # with color\n color='#EE3224', \n # with label Low\n label= 'Low') \n\n# Create a bar with Medium data,\n# in position pos + some width buffer,\nplt.bar([p + width for p in pos], \n #using Medium data,\n dfq2_summary.query('revenue_levels ==\"Medium\"')['vote_average'],\n # of width\n width, \n # with alpha 0.5\n alpha=0.5, \n # with color\n color='#F78F1E', \n # with label Medium\n label='Medium') \n\n# Create a bar with Moderately High data,\n# in position pos + some width buffer,\nplt.bar([p + width*2 for p in pos], \n #using Moderately High data,\n dfq2_summary.query('revenue_levels ==\"Moderately High\"')['vote_average'], \n # of width\n width, \n # with alpha 0.5\n alpha=0.5, \n # with color\n color='#FFC222', \n # with label Moderately High\n label='Moderately High') \n\n# Create a bar with High data,\n# in position pos + some width buffer,\nplt.bar([p + width*3 for p in pos], \n #using High data,\n dfq2_summary.query('revenue_levels ==\"High\"')['vote_average'], \n # of width\n width, \n # with alpha 0.5\n alpha=0.5, \n # with color\n color='#4fb427', \n # with label High\n label='High')\n\n# Set the y axis label\nax.set_ylabel('vote average')\n\n# Set the chart's title\nax.set_title('Vote Average Score in Different Revenue Levels in Recent Five Years')\n\n# Set the position of the x ticks\nax.set_xticks([p + 1.5 * width for p in pos])\n\n# Set the labels for the x ticks\nax.set_xticklabels([2011,2012,2013,2014,2015])\n\n#set y-axis height\nplt.ylim(3, 10)\n\n# Adding the legend and showing the plot\nplt.legend(loc='upper left')\nplt.grid()\nplt.show()", "_____no_output_____" ] ], [ [ "From the chart above, we can see that there is no big difference of movie rating between each revenue level. So it can be concluded that the high revenue movies don't have the significant high score rating.", "_____no_output_____" ], [ "## Part 1 Question Explore Summary", "_____no_output_____" ], [ "<b>\n1. Movie popularity trend is growing from 1960, I infer that it is with the background that nowadays movie information and rating system are more accessible by Internet with different channels.\n2. Movies with higher revenue level are with higher popularity in recent five years. In other words, a high revenue movie always with a higher popularity. So on the next part, I will explore: `What's properties that are associated with high popularity movies?` \n3. Movies with higher revenue level don't have the significant high score rating than other revenue levels in recent five years. So on the next part, I will explore: `What's properties that are associated with high rating movies?`\n</b>\n", "_____no_output_____" ], [ "<a id='r2'></a>\n## Research Part 2 : Find the Properties are Associated with Successful Movies\n- <b>Question 1: What kinds of properties are associated with movies that have high popularity?</b>\n 1. What's the budget level movie are associated with movies that have high popularity?\n 2. What's the runtime level are associated with movies that have high popularity on average?\n 3. What's casts, directors, keywords, genres and production companies are associated with high popularity?\n \n\n- <b>Question 2: What kinds of properties are associated with movies that have high voting score?</b>\n 1. What's the budget level are associated with movies that have high voting score?\n 2. What's the runtime level are associated with movies that have high voting score?\n 3. What's the directors, keywords, genres are associated with voting score?\n ", "_____no_output_____" ], [ "### Function and research sample prepare", "_____no_output_____" ], [ "<b>\nIn the dataset, the potential properties associated with movies can be `runtime`, `budget`, `cast`, `director`, `keywords`, `genres`, `production companies`. These data are including two types: quantitative data and categorical data. Both `runtime` and `budget` data are quantitative data; the others are categorical data.\n\n- For quantitative data, since the data is quantitative, I can devide the data into various levels and find the properties in all range of movies success, I choose to use the whole dataset and then divided runtime and budget into four levels according to their quartile: 'Low', 'Medium', 'Moderately High', 'High' in all time range. And then find out what's the runtime and budget level with higher degree of movies popularity/voting score.\n\n\n- For categorical data, which are `cast`, `director`, `keywords` and `genres`, since we are not necessary to discuss all the range of of movies success(which is also difficult to dicuss), I just focus on the high popularity or high rating, so I filter the top 100 popular/ high voting score movies data *in each year*, and then count the number of occurrences in every category every year to find their properties. Forthermore, in case that the top frequent occurrences are also appeared in the worst popular/ high voting score movies, I also filter the worst 100 popular/ high voting score movies in every year and then compare the result to top 100's. If the top frequent occurrences also appear in the worst movies, I am going to include these factors as properties associated with top movies as well as worst movies. Besides, these data are contain the pipe (|) characters so first I have to spilt them.\n</b>", "_____no_output_____" ], [ "### <b>A. Function Prepare-- Build a level-devide function and a split string function.<b/>\n\n**A)The cut_into_quantile function- general use.**\n\n The function is the same I ued in the Part 1 Question. So I just past it again below.", "_____no_output_____" ] ], [ [ "# quartile function\ndef cut_into_quantile(dfname ,column_name):\n# find quartile, max and min values\n min_value = dfname[column_name].min()\n first_quantile = dfname[column_name].describe()[4]\n second_quantile = dfname[column_name].describe()[5]\n third_quantile = dfname[column_name].describe()[6]\n max_value = dfname[column_name].max()\n# Bin edges that will be used to \"cut\" the data into groups\n bin_edges = [ min_value, first_quantile, second_quantile, third_quantile, max_value]\n# Labels for the four budget level groups\n bin_names = [ 'Low', 'Medium', 'Moderately High', 'High'] \n# Creates budget_levels column\n name = '{}_levels'.format(column_name)\n dfname[name] = pd.cut(dfname[column_name], bin_edges, labels=bin_names, include_lowest = True)\n return dfname", "_____no_output_____" ] ], [ [ "**B) Split pipe (|) characters and then count their number of appeared times, then find the top three factor.**", "_____no_output_____" ] ], [ [ "# split pipe characters and count their number of appeared times\n#argument:dataframe_col is the target dataframe&column; num is the number of the top factor\ndef find_top(dataframe_col, num=3):\n # split the characters in the input column \n #and make it to a list\n alist = dataframe_col.str.cat(sep='|').split('|')\n #transfer it to a dataframe\n new = pd.DataFrame({'top' :alist})\n #count their number of appeared times and\n #choose the top3\n top = new['top'].value_counts().head(num)\n return top", "_____no_output_____" ] ], [ [ "### <b>B. Sample prepare-- Filter Top 100 and Worst 100 movies in each year as the research sample.<b/>", "_____no_output_____" ], [ "<b>A) Select Top 100 popular movies in every year.</b>", "_____no_output_____" ] ], [ [ "# Select Top 100 popular movies.\n# fisrt sort it by release year ascending and popularity descending\ndf_top_p = df.sort_values(['release_year','popularity'], ascending=[True, False])\n#group by year and choose the top 100 high\ndf_top_p = df_top_p.groupby('release_year').head(100).reset_index(drop=True)\n#check, it must start from 1960, and with high popularity to low\ndf_top_p.head(2)", "_____no_output_____" ] ], [ [ "<b>B) Select Top 100 high revenue movies in every year.</b>", "_____no_output_____" ] ], [ [ "# Select Top 100 high revenue movies.\n# fisrt sort it by release year ascending and revenue descending\ndf_top_r = df.sort_values(['release_year','revenue'], ascending=[True, False])\n#group by year and choose the top 100 high\ndf_top_r = df_top_r.groupby('release_year').head(100).reset_index(drop=True)\n#check, it must start from 1960, and with high revenue to low\ndf_top_r.head(2)", "_____no_output_____" ] ], [ [ "<b>C) Select Top 100 high score rating movies in every year.</b>", "_____no_output_____" ] ], [ [ "# Select Top 100 high scorer ating movies.\n# fisrt sort it by release year ascending and high scorer ating descending\ndf_top_s = df.sort_values(['release_year','vote_average'], ascending=[True, False])\n#group by year and choose the top 100 high\ndf_top_s = df_top_s.groupby('release_year').head(100).reset_index(drop=True)\n#check, it must start from 1960, and with high scorer ating to low\ndf_top_s.head(2)", "_____no_output_____" ] ], [ [ "<b>D) To compare to results, I also create three subdataset for the last 100 movies.</b>", "_____no_output_____" ] ], [ [ "# the last 100 popular movies in every year\ndf_low_p = df.sort_values(['release_year','popularity'], ascending=[True, True])\ndf_low_p = df_low_p.groupby('release_year').head(100).reset_index(drop=True)\n# the last 100 high revenue movies in every year\ndf_low_r = df.sort_values(['release_year','revenue'], ascending=[True, True])\ndf_low_r = df_low_r.groupby('release_year').head(100).reset_index(drop=True)\n# the last 100 score rating movies in every year\ndf_low_s = df.sort_values(['release_year','vote_average'], ascending=[True, True])\ndf_low_s = df_low_s.groupby('release_year').head(100).reset_index(drop=True)", "_____no_output_____" ] ], [ [ "<a id='r2q1'></a>\n## Question 1: What kinds of properties are associated with movies that have high popularity?\n\n<b>\n1. What's the budget level movie are associated with movies that have high popularity?\n2. What's the runtime level are associated with movies that have high popularity on average?\n3. What's casts, directors, keywords, genres and production companies are associated with high popularity?\n</b>", "_____no_output_____" ], [ "## 1.1 What's the budget level movie are associated with movies that have high popularity?", "_____no_output_____" ], [ "First, divided budget data into four levels with it's quartile: 'Low', 'Medium', 'Moderately High', 'High' and create a level column.", "_____no_output_____" ] ], [ [ "# use cut_into_quantile function to build a level column\ndf = cut_into_quantile(df,'budget')\ndf.head(1)", "_____no_output_____" ] ], [ [ "From the table above, I built a `budget_levels` columns.", "_____no_output_____" ] ], [ [ "# Find the mean and median popularity of each level with groupby\nresult_mean = df.groupby('budget_levels')['popularity'].mean()\nresult_mean ", "_____no_output_____" ], [ "result_median = df.groupby('budget_levels')['popularity'].median()\nresult_median", "_____no_output_____" ] ], [ [ "Let's visualize it.", "_____no_output_____" ] ], [ [ "# the x locations for the groups\nind = np.arange(len(result_mean)) \n# the width of the bars\nwidth = 0.5 \nind", "_____no_output_____" ], [ "# plot bars\n#set style\nsns.set_style('darkgrid')\nbars = plt.bar(ind, result_mean, width, color='g', alpha=.7, label='mean')\n\n# title and labels\nplt.ylabel('popularity')\nplt.xlabel('budget levels')\nplt.title('Popularity with Budget Levels')\nlocations = ind # xtick locations,345...\nlabels = result_median.index \nplt.xticks(locations, labels)\n# legend\nplt.legend() ", "_____no_output_____" ] ], [ [ "**From the figure above, we can see that movies with higher popularity are with higher budget level.** The result is reasonable since movies with higher popularity may has a higher promoting advertising cost. And with the high promotion level people always have more probability to get know these movies. ", "_____no_output_____" ], [ "## 1.2 What's the runtime level are associated with movies that have high popularity on average?", "_____no_output_____" ], [ "Divided runtime data into four levels with it's quartile: 'Short', 'Medium', 'Moderately Long', 'Long'.", "_____no_output_____" ] ], [ [ "df = cut_into_quantile(df,'runtime')\ndf.head(1)", "_____no_output_____" ], [ "# Find the mean popularity of each level with groupby\nresult_mean = df.groupby('runtime_levels')['popularity'].mean()\nresult_mean", "_____no_output_____" ], [ "# Find the median popularity of each level with groupby\nresult_median = df.groupby('runtime_levels')['popularity'].median()\nresult_median", "_____no_output_____" ] ], [ [ "Let's visualize it.", "_____no_output_____" ] ], [ [ "ind = np.arange(len(result_median)) # the x locations for the groups\nwidth = 0.5 # the width of the bars", "_____no_output_____" ], [ "# plot bars\nbars = plt.bar(ind, result_median, width, color='#1ea2bc', alpha=.7, label='median')\n\n# title and labels\nplt.ylabel('popularity')\nplt.xlabel('runtime levels')\nplt.title('Popularity with Runtime Levels')\nlocations = ind # xtick locations,345...\nlabels = result_median.index \nplt.xticks(locations, labels)\n# legend\nplt.legend() ", "_____no_output_____" ] ], [ [ "**We can see that the higher popularity movies has longer run time.**", "_____no_output_____" ], [ "## 1.3 What's casts, directors, keywords, genres and production companies are associated with high popularity?", "_____no_output_____" ], [ "**First, choose the dataset-df_top_p. It is the dataframe about top 100 popular movies in each year.**", "_____no_output_____" ] ], [ [ "df_top_p.head(2)", "_____no_output_____" ] ], [ [ "**Then, find the three highest occurrences in each category among the top 100 popular movies. And store the result table into variables in order to create a summary table.**", "_____no_output_____" ] ], [ [ "# find top three cast\na = find_top(df_top_p.cast)\n# find top three director\nb = find_top(df_top_p.director)\n# find top three keywords\nc = find_top(df_top_p.keywords)\n# find top three genres\nd = find_top(df_top_p.genres)\n# find top three production companies\ne = find_top(df_top_p.production_companies)", "_____no_output_____" ] ], [ [ "**Use the result above to create a summary table.**", "_____no_output_____" ] ], [ [ "#Use the result above to create a summary dataframe.\ndf_popular = pd.DataFrame({'popular_cast': a.index, 'popular_director': b.index, 'popular_keywords': c.index, 'popular_genres': d.index, 'popular_producer': e.index})\ndf_popular", "_____no_output_____" ] ], [ [ "**Finally, find the three highest occurrences in each category among the 100 unpopular movies.**", "_____no_output_____" ] ], [ [ "# call the dataset wiht the 100 unpopular movies in each year\ndf_low_p.head(2)", "_____no_output_____" ], [ "# find top three cast among the among the 100 unpopular movies\nna = find_top(df_low_p.cast)\n# find top three director among the among the 100 unpopular movies\nnb = find_top(df_low_p.director)\n# find top three keywords among the among the 100 unpopular movies\nnc = find_top(df_low_p.keywords)\n# find top three genres among the among the 100 unpopular movies\nnd = find_top(df_low_p.genres)\n# find top three production companiess among the among the 100 unpopular movies\nne = find_top(df_low_p.production_companies)", "_____no_output_____" ], [ "df_unpopular = pd.DataFrame({'unpopular_cast': na.index, 'unpopular_director': nb.index, 'unpopular_keywords': nc.index, 'unpopular_genres': nd.index, 'unpopular_producer': ne.index})\ndf_unpopular", "_____no_output_____" ] ], [ [ "**Now, we get the two table that list the properties occurred the most among the top 100 popular movies each year, among the top 100 unpopular movies each year respectively.**\n\n**Now we can campare the two tables and find out What's casts, directors, keywords, genres and production companies are associated with high popularity.**", "_____no_output_____" ] ], [ [ "# compare\ndf_popular", "_____no_output_____" ] ], [ [ "From the tabbles above, we can find that cast *Michael Caine* is appeared in both popular and unpopular movies; director *Woody Allen* and *Clint Eastwood* are appeared in both popular and unpopular movies; all three genres *Drama*, *Comedy*, *Thriller* are appeared in both popular and unpopular movies; *sex* is appeared in both popular and unpopular movies; all three producer *Universal Pictures*, *Warner Bros*, *Paramount Pictures* are appeared in both popular and unpopular movies. The summary are as follows:\n\n\n- **Cast associated with high popularity movies**: `Robert De Niro` and `Bruce Willis`. It's really reasonable because I have seen a lot of promoted movies content which are performed by them in my country. On average I think they do have the huge popularity in past years!\n- **Director associated with high popularity movies**: `Steven Spielberg`. It's no doubt that he got the first place since he has won so many awards and honors for his high quality and popular work!\n- **Both of the most popular and unpopular movies are associated three mainly genres: Drama, Comedy, and Thriller.** I just can infer that these genres are common in the movie industry.\n- **Keywords associated with high popularity movies**: `based on novel` and `dystopia`. It' also no doubt it comes out the result. Especially the based on novel movies, since nowadays tons of movies are made based on novel like Harry Potter, The Hunger Games etc, and they were also famous in my country.\n- **Producer associated with high popularity movies and unpopularity movies**: `Warner Bros.`, `Universal Pictures` and `Paramount Pictures`. The three giants of movie indusry did produce such a various quality movies!\n", "_____no_output_____" ], [ "<a id='r2q2'></a>\n### Question 2: What kinds of properties are associated with movies that have high voting score?\n\n<b>\n1. What's the budget level are associated with movies that have high voting score?\n2. What's the runtime level are associated with movies that have high voting score?\n3. What's the directors, keywords, genres are associated with voting score?\n</b>", "_____no_output_____" ], [ "Use the same procedure with Research 2, Question 1 to answer these questions.", "_____no_output_____" ], [ "## 2.1 What's the budget level are associated with movies that have high voting score?", "_____no_output_____" ], [ "**First, use the dataframe with `budget level` I have created in the previous question. Then find the mean and median of `vote_average` group by different budget level.**", "_____no_output_____" ] ], [ [ "# Find the mean and median voting score of each level with groupby\nresult_mean = df.groupby('budget_levels')['vote_average'].mean()\nresult_mean", "_____no_output_____" ], [ "result_median = df.groupby('budget_levels')['vote_average'].median()\nresult_median", "_____no_output_____" ] ], [ [ "**Let's use the mean table above to visualize it.**", "_____no_output_____" ] ], [ [ "# plot bars\n#set style\nsns.set_style('darkgrid')\nind = np.arange(len(result_mean)) # the x locations for the groups\nwidth = 0.5 # the width of the bars\n\n# plot bars\nplt.subplots(figsize=(8, 6))\nbars = plt.bar(ind, result_median, width, color='y', alpha=.7, label='mean')\n\n# title and labels\nplt.ylabel('rating')\nplt.xlabel('budget levels')\nplt.title('Rating with Budget Levels')\nlocations = ind # xtick locations,345...\nlabels = result_median.index \nplt.xticks(locations, labels)\n# legend\nplt.legend( loc='upper left') ", "_____no_output_____" ] ], [ [ "We can see that there is no big difference in average voting score at different budget levels. **So from the result, maybe high budget of a movie is not necessary to a good quality of movie!**", "_____no_output_____" ], [ "## 2.2 What's the runtime level are associated with movies that have high voting score?", "_____no_output_____" ], [ "**First, use the dataframe with `runtime level` I have created in the previous question. Then find the mean and median of `vote_average` group by different runtime level.**", "_____no_output_____" ] ], [ [ "# Find the mean popularity of each level with groupby\nresult_mean = df.groupby('runtime_levels')['vote_average'].mean()\nresult_mean", "_____no_output_____" ], [ "result_median = df.groupby('runtime_levels')['vote_average'].median()\nresult_median", "_____no_output_____" ] ], [ [ "**Let's visualize it.**", "_____no_output_____" ] ], [ [ "sns.set_style('darkgrid')\nind = np.arange(len(result_mean)) # the x locations for the groups\nwidth = 0.5 # the width of the bars\n\n# plot bars\nbars = plt.bar(ind, result_median, width, color='g', alpha=.7, label='mean')\n\n# title and labels\nplt.ylabel('rating')\nplt.xlabel('runtime levels')\nplt.title('Rating with Runtime Levels')\nlocations = ind # xtick locations,345...\nlabels = result_median.index \nplt.xticks(locations, labels)\n# legend\nplt.legend() ", "_____no_output_____" ] ], [ [ "**We can see that there is no big difference in average voting score in different runtime levels. So from the result, maybe long runtime of a movie is not necessary to a good quality of movie!**", "_____no_output_____" ], [ "## 2.3 What's the directors, keywords, genres are associated with voting score?", "_____no_output_____" ], [ "**First, choose the dataset-df_top_s. It is the dateframe about top 100 high voting score movies in each year.**", "_____no_output_____" ] ], [ [ "df_top_s.head(2)", "_____no_output_____" ] ], [ [ "**Then, find the three highest occurrences in each category among the top 100 high voting score movies. And store the result table into variables in order to create a summary table.**", "_____no_output_____" ] ], [ [ "# find top three director\na = find_top(df_top_s.director)\n# find top three keywords\nb = find_top(df_top_s.keywords)\n# find top three genres\nc = find_top(df_top_s.genres)", "_____no_output_____" ] ], [ [ "**Use the result above to create a summary table.**", "_____no_output_____" ] ], [ [ "#create a summary dataframe.\ndf_high_score = pd.DataFrame({'high_score_director': a.index, 'high_score_keywords': b.index, 'high_score_genres': c.index})\ndf_high_score", "_____no_output_____" ] ], [ [ "**Finally, find the three highest occurrences in each category of the worst 100 rating score movies.**", "_____no_output_____" ] ], [ [ "# call the dataset wiht the 100 low rating movies in each year\ndf_low_s.head(2)", "_____no_output_____" ], [ "# find top three director among the among the 100 low rating movies\nna = find_top(df_low_s.director)\n# find top three keywords among the among the 100 low rating movies\nnb = find_top(df_low_s.keywords)\n# find top three genres among the among the 100 low rating movies\nnc = find_top(df_low_s.genres)", "_____no_output_____" ] ], [ [ "Use the result above to create a summary table.", "_____no_output_____" ] ], [ [ "df_low_score = pd.DataFrame({'low_score_director': na.index, 'low_score_keywords': nb.index, 'low_score_genres': nc.index})\ndf_low_score", "_____no_output_____" ], [ "# compare\ndf_high_score", "_____no_output_____" ] ], [ [ "<b>\nAfter summing up both tables above, we can find that:\n1. Martin Scorsese and Clint Eastwood have made top quality movies on average over the past years from 1960.\n2. The top quality movies have the keywords with *based on novel* and *woman director* over the past years from 1960. The *based on novel* keyword are also with the top popular movies, but the result of woman director amazed me! \n </b>", "_____no_output_____" ], [ "## Part 2 Question Explore Summary", "_____no_output_____" ], [ "1. For the properties are associated with high popularity movies, they are **high budget levels** and **longer run time**. And `cast` associated with high popularity movies are **Robert De Niro** and **Bruce Willis**; `director` associated with high popularity movies are **Steven Spielberg**; `genres` associated with high popularity movies are **drama**, **comedy**, and **thriller** but they also appeared in the most unpopular movies; `keywords` associated with high popularity movies are based on **novel** and **dystopia**; `producer` associated with high popularity movies are **Warner Bros.**, **Universal Pictures** and **Paramount Pictures**, but they are also appeared in the most unpopular movies.\n\n2. Each level in both `runtime` and `budget` don't have obvious different high rating score. In other words, the low budget level or the low budget may still have a high rating. And **Martin Scorsese** and **Clint Eastwood** have made top quality movies on average over the past years from 1960; the top quality movies have the `keywords` with **based on novel** and **woman director** over the past years from 1960.", "_____no_output_____" ], [ "<a id='r3'></a>\n## Research Part 3 Top Keywords and Genres Trends by Generation", "_____no_output_____" ], [ "<b>\n- Question 1: Number of movie released year by year\n- Question 2: Keywords Trends by Generation\n- Question 3: Genres Trends by Generation\n </b>", "_____no_output_____" ], [ "<b>\nIn question 1, I am going to find out the number of movie released year by year.\n \nIn question 2 and 3, I am going to find out what's the keyword and genre appeared most by generation? To do this:\n- Step one: group the dataframe into five generations: 1960s, 1970s, 1980s, 1990s and 2000s\n- Step two: use the `find_top` function to count out the most appeared keyword and genre in each generation dataframe.\n </b>", "_____no_output_____" ], [ "<a id='r3q1'></a>\n## Question 1: Number of movie released year by year", "_____no_output_____" ], [ "First, use group by release year and count the number of movie released in each year.", "_____no_output_____" ] ], [ [ "movie_count = df.groupby('release_year').count()['id']\nmovie_count.head()", "_____no_output_____" ] ], [ [ "Then visualize the result.", "_____no_output_____" ] ], [ [ "#set style\nsns.set_style('darkgrid')\n#set x, y axis data\n# x is movie release year\nx = movie_count.index\n# y is number of movie released\ny = movie_count\n#set size\nplt.figure(figsize=(10, 5))\n#plot line chart \nplt.plot(x, y, color = 'g', label = 'mean')\n#set title and labels\nplt.title('Number of Movie Released year by year')\nplt.xlabel('Year')\nplt.ylabel('Number of Movie Released');", "_____no_output_____" ] ], [ [ "We can see that the number of movie released are increasing year by year. And the it is the accelerated growth since the curve is concave upward. ", "_____no_output_____" ], [ "<a id='r3q2'></a>\n## Question 2: Keywords Trends by Generation", "_____no_output_____" ], [ "First, sort the movie release year list to group the dataframe into generation.", "_____no_output_____" ] ], [ [ "# sort the movie release year list.\ndfyear= df.release_year.unique()\ndfyear= np.sort(dfyear)\ndfyear", "_____no_output_____" ] ], [ [ "Then, build the generation catagory of 1960s, 1970s, 1980s, 1990s and 2000s.", "_____no_output_____" ] ], [ [ "# year list of 1960s\ny1960s =dfyear[:10]\n# year list of 1970s\ny1970s =dfyear[10:20]\n# year list of 1980s\ny1980s =dfyear[20:30]\n# year list of 1990s\ny1990s = dfyear[30:40]\n# year list of afer 2000\ny2000 = dfyear[40:]", "_____no_output_____" ] ], [ [ "**Then for each generation dataframe, use the find_top to find out the most appeared keywords, then combine this result to a new datafram.**", "_____no_output_____" ] ], [ [ "# year list of each generation\ntimes = [y1960s, y1970s, y1980s, y1990s, y2000]\n#generation name\nnames = ['1960s', '1970s', '1980s', '1990s', 'after2000']\n#creat a empty dataframe,df_r3\ndf_r3 = pd.DataFrame()\nindex = 0\n#for each generation, do the following procedure\nfor s in times:\n # first filter dataframe with the selected generation, and store it to dfn\n dfn = df[df.release_year.isin(s)] \n #apply the find_top function with the selected frame, using the result create a dataframe, store it to dfn2 \n dfn2 = pd.DataFrame({'year' :names[index],'top': find_top(dfn.keywords,1)})\n #append dfn2 to df_q2\n df_r3 = df_r3.append(dfn2)\n index +=1\ndf_r3", "_____no_output_____" ] ], [ [ "Now, we get the keywords of most filmed movies in each generation. We can see that in 1960s and 1970s, the top keywords was *based on novel*, which means movies with the keyword based on novel are released most according the dataset. In 1980s, the top keyword was *nudity*, what a special trend! In 1990s, *independent film* became the top keyword. And after 2000, the movie with the feature *woman director* were released most. It's sounds great!", "_____no_output_____" ], [ "Now let's visualize the result.", "_____no_output_____" ] ], [ [ "# Setting the positions\ngeneration = ['1960s', '1970s', '1980s', '1990s', 'after2000']\nkeywords = df_r3.index\ny_pos = np.arange(len(generation))\nfig, ax = plt.subplots()\n# Setting y1: the keywords number\ny1 = df_r3.top\n# Setting y2 again to present the right-side y axis labels\ny2 = df_r3.top\n#plot the bar\nax.barh(y_pos,y1, color = '#007482')\n#set the left side y axis ticks position\nax.set_yticks(y_pos)\n#set the left side y axis tick label\nax.set_yticklabels(keywords)\n#set left side y axis label\nax.set_ylabel('keywords')\n\n#create another axis to present the right-side y axis labels\nax2 = ax.twinx()\n#plot the bar\nax2.barh(y_pos,y2, color = '#27a5b4')\n#set the right side y axis ticks position\nax2.set_yticks(y_pos)\n#set the right side y axis tick label\nax2.set_yticklabels(generation)\n#set right side y axis label\nax2.set_ylabel('generation')\n#set title\nax.set_title('Keywords Trends by Generation')", "_____no_output_____" ] ], [ [ "One more thing, we can see that the number of the keywords appeared changes from 16 to 347 by generation, and it is resonable since the trend is consistent with the number of movie released.", "_____no_output_____" ], [ "<a id='r3q3'></a>\n## Question 3: Genres Trends by Generation", "_____no_output_____" ], [ "Use the same procedure as Question 2, first use the `find_top` to find out the most appeared genres, then combine this result to a new datafram.", "_____no_output_____" ] ], [ [ "# year list of each generation\ntimes = [y1960s, y1970s, y1980s, y1990s, y2000]\n#generation name\nnames = ['1960s', '1970s', '1980s', '1990s', 'after2000']\n#creat a empty dataframe,df_r3\ndf_r3 = pd.DataFrame()\nindex = 0\n#for each generation, do the following procedure\nfor s in times:\n # first filter dataframe with the selected generation, and store it to dfn\n dfn = df[df.release_year.isin(s)] \n #apply the find_top function with the selected frame, using the result create a dataframe, store it to dfn2 \n dfn2 = pd.DataFrame({'year' :names[index],'top': find_top(dfn.genres,1)})\n #append dfn2 to df_q2\n df_r3 = df_r3.append(dfn2)\n index +=1\ndf_r3", "_____no_output_____" ] ], [ [ "Visualize the result.", "_____no_output_____" ] ], [ [ "# Setting the positions\ngeneration = ['1960s', '1970s', '1980s', '1990s', 'after2000']\ngenres = df_r3.index\ny_pos = np.arange(len(generation))\nfig, ax = plt.subplots()\n# Setting y1: the genre number\ny1 = df_r3.top\n# Setting y2 again to present the right-side y axis labels\ny2 = df_r3.top\n#plot the bar\nax.barh(y_pos,y1, color = '#007482')\n#set the left side y axis ticks position\nax.set_yticks(y_pos)\n#set the left side y axis tick label\nax.set_yticklabels(genres)\n#set left side y axis label\nax.set_ylabel('genres')\n\n#create another axis to present the right-side y axis labels\nax2 = ax.twinx()\n#plot the bar\nax2.barh(y_pos,y2, color = '#27b466')\n#set the right side y axis ticks position\nax2.set_yticks(y_pos)\n#set the right side y axis tick label\nax2.set_yticklabels(generation)\n#set right side y axis label\nax2.set_ylabel('generation')\n#set title\nax.set_title('Genres Trends by Generation')", "_____no_output_____" ] ], [ [ "We can see that the genre `Drama` are the most filmed in almost all generation. Only the 1980s are dominated by the `comedy` type. ", "_____no_output_____" ], [ "## Part 3 Question Explore Summary", "_____no_output_____" ], [ "1. The number of movie released are increasing year by year. And the it is in the accelerated growth trend.\n2. In 1960s and 1970s, the top keywords was `based on novel`, which means movies with the keyword based on novel are released most according the dataset. In 1980s, the top keyword was `nudity`. In 1990s, `independent film` became the top keyword. And after 2000, the movie with the feature `woman director` were released most.\n3. The genre `Drama` are the most filmed in almost all generation. Only the 1980s are dominated by the `comedy` type.", "_____no_output_____" ], [ "<a id='conclusions'></a>\n## Conclusions\n\nThe goal in the research is primary to explore three parts questions:\n\n** Part one: General Explore**\n \n At part one, I explored some general questions. The result turned out that the movie popularity trend is growing from 1960 on average. Moreever, I focused on the movies which are with high revenue. I found movies with higher revenue level are with higher popularity in recent five years on average. Besides, movies with higher revenue level don't have the significant high score rating in recent five years. And this results made me want to learn more: What's properties that are associated with high popularity movies? What's properties that are associated with high high voting score?\n \n** Part two: Find the Properties are Associated with Successful Movies**\n \n At this part, I first found out the properties that are associated with high popularity movies. They were with high budget levels and longer run time. And cast associated with high popularity movies are Robert De Niro and Bruce Willis; director associated with high popularity movies are Steven Spielberg; genres associated with high popularity movies are drama, comedy, and thriller but they also appeared in the most unpopular movies; keywords associated with high popularity movies are based on novel and dystopia; producer associated with high popularity movies are Warner Bros., Universal Pictures and Paramount Pictures, but they are also appeared in the most unpopular movies.\n \n And the I found out the properties that are associated with high high voting score. Each level in both runtime and budget don't have obvious different high rating score. In other words, the low budget level or the low budget may still have a high rating. And Martin Scorsese and Clint Eastwood have made top quality movies on average over the past years from 1960; the top quality movies have the keywords with based on novel and woman director over the past years from 1960.\n \n \n**Part three: Top Keywords and Genres Trends by Generation**\n \n In this part, I explored the number of movie released trend year by year. Then explored the keywords and genres trends, with group the dataframe into five generations: 1960s, 1970s, 1980s, 1990s and 2000s.\n \n The number of movie released are increasing year by year. And the it is in the accelerated growth trend. Besides, In 1960s and 1970s, the top keywords was based on novel, in 1980s, the top keyword was nudity. In 1990s, independent film became the top keyword. And after 2000, the movie with the feature woman director were released most.\n Further more, the genre Drama are the most filmed in almost all generation. Only the 1980s are dominated by the comedy type.\n\nTo sum up, I did find a lot of interesting information among the dataset, just hope that I can dig more! But there are still some limitations.", "_____no_output_____" ], [ "## Limitation\n1. Data quality: althought I assume the zero values in revenue and budget column are missing, there are still a lot of unreasonable small/big value in the both of the columns. Also, the metrics about rating or popularity are not defined clearly, and the basis of them may be changing year by year.\n2. Although the the `popularity` doesn't have the upperbound , it actually have the high probability of having outliers. But I choose to retain the data to keep the data originalty. Maybe there are still the reason that I should take it into account.\n2. Units of revenue and budget column: I am not sure that the budgets and revenues all in US dollars? \n3. The inflation effect: I used the revenue and budget data to explore, but I didn't use the adjusted data, although it is provided the adjusted data based on the year 2010.\n4. In my reseach one, although I discussed the distribution of popularity in different revenue levels in recent five years, but I just cut the revenue levels based on it's quantile. I didn't find out the whole revenue distributin in the fisrt, so there may be exist risks that the high revenue level still cover a wide of range, and may affect the final result. Besides, in the part, I just discuss data in the recent five year, maybe in other year there are some different distribution.\n5. In research two, I dicussed the properties are associated with successful movies. The successful I defined here are high popularity and high voting score. But I didn't find the properties of high revenue since I just assume the high revenue level are with higher popularity, which is I found in research one, so it makes me just leave out the finding the properties of high revenue movie. But I think there must be some other factor that are associated with high revenue movies.\n6. In research two, I dicussed the budget level and runtime level properties, but I just cut both of them based on the whole time quantile data not year by year. Also, to cut them into four levels based on quantile still rough.\n7. The categorical data, when I analysed them, I just split them one by one, and count them one by one. But the thing is, there must be some effect when these words combine. For example, the keyword `based on novel` is popular, but what truly keyword that makes the movie sucess maybe the `based on novel`&`adventure`. \n8. I didn't count number of votes into consideration, so the rating score may be a bias whe the vote number is few.", "_____no_output_____" ] ], [ [ "from subprocess import call\ncall(['python', '-m', 'nbconvert', 'Investigate_a_Dataset.ipynb'])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ] ]
ec5552a33525060f623aeff3cce2bff520e9affa
111,371
ipynb
Jupyter Notebook
Crime_Education_corr.ipynb
HusseinAndia/crime-education-correlation-USA
414b8e4812647addddfedc8ec6575094c00213cf
[ "MIT" ]
null
null
null
Crime_Education_corr.ipynb
HusseinAndia/crime-education-correlation-USA
414b8e4812647addddfedc8ec6575094c00213cf
[ "MIT" ]
null
null
null
Crime_Education_corr.ipynb
HusseinAndia/crime-education-correlation-USA
414b8e4812647addddfedc8ec6575094c00213cf
[ "MIT" ]
null
null
null
261.434272
99,440
0.902003
[ [ [ "import pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker", "_____no_output_____" ], [ "#H:\\\\Dataset\\\\crime-rates\\\\\n#H:\\\\Dataset\\\\usa-education-budget-analysis\\\\data\\\\\n#import 2 files \n#First one is crime : has all crimes in USA 1975-2015 and their classifications\n#Second one is edu_budget : has USA education budget, GDP and ratio between them for 1976-2016\ncrime = pd.read_csv('crime-rates.csv')\nedu_budget = pd.read_csv('data.csv')", "_____no_output_____" ], [ "#The file contains all crimes collected for each year in the end after all cities\n#We pull all crimes for United States, rename the columns and slicing the years\nall_us_crimes = crime[crime['agency_jurisdiction'] == 'United States']\nall_us_crimes = all_us_crimes[[ 'report_year', 'agency_jurisdiction', 'violent_crimes']]\nall_us_crimes.columns = ['year', 'country', 'all_crimes']\nall_us_crimes = all_us_crimes.reset_index().iloc[1:,].drop('index', axis=1)\n", "_____no_output_____" ], [ "edu_budget = edu_budget[['YEAR', 'BUDGET_ON_EDUCATION']]\nedu_budget.columns = ['year', 'edu_spending']\nedu_budget = edu_budget.iloc[:-1,]\n\n\n#all_us_crimes\nedu_budget", "_____no_output_____" ], [ "fig, ax1 = plt.subplots(figsize=(20, 10))\nax1.set_xlabel('Year', fontsize=20)\nax1.set_ylabel('Total Crimes in US', color='g', fontsize=20)\nax1.plot(all_us_crimes.year, all_us_crimes.all_crimes, 'o-g')\nax1.tick_params(axis='y', labelsize=15, labelcolor='g')#Control tick's labelsize or labelcolor\nax1.tick_params(axis='x', labelsize=15)\nax1.yaxis.set_major_formatter(ticker.EngFormatter())#Format axis tick labels from number to thousands or Millions.\n\nax2 = ax1.twinx() \n\nax2.set_ylabel('Education spending\\n (in millions of dollars)', color='r', fontsize=20) \nax2.plot(edu_budget.year, edu_budget.edu_spending, 'o-r')\nax2.tick_params(axis='y', labelsize=15, labelcolor='r')\nax2.yaxis.set_major_formatter(ticker.EngFormatter())\nfig.tight_layout()\nplt.show()", "_____no_output_____" ], [ "np.corrcoef(edu_budget['edu_spending'], all_us_crimes['all_crimes'])", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
ec55577025c9808e3f5284285f52ce8b5e0d08ab
742,571
ipynb
Jupyter Notebook
1_15_Computer_Vision_Exercises/SobelGradientDirection.ipynb
Alyxion/Udacity_SelfDrivingCarEngineerNd
7da27ec7ee86fc65d07c9e1b316088be6975f2d3
[ "MIT" ]
7
2018-12-27T00:12:50.000Z
2022-03-29T13:13:42.000Z
1_15_Computer_Vision_Exercises/SobelGradientDirection.ipynb
Alyxion/Udacity_SelfDrivingCarEngineerNd
7da27ec7ee86fc65d07c9e1b316088be6975f2d3
[ "MIT" ]
null
null
null
1_15_Computer_Vision_Exercises/SobelGradientDirection.ipynb
Alyxion/Udacity_SelfDrivingCarEngineerNd
7da27ec7ee86fc65d07c9e1b316088be6975f2d3
[ "MIT" ]
8
2018-10-03T16:46:39.000Z
2021-01-11T18:29:42.000Z
7,140.105769
739,496
0.96096
[ [ [ "# Detection of line direction\n\nThe task here is to detect vertical lines by calculating the arctan of the sobel derivatives.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport pickle\n\n%matplotlib inline\n\n# Read in an image\nimage = mpimg.imread('data/signs_vehicles_xygrad.png')\n\n# Define a function that applies Sobel x and y, \n# then computes the direction of the gradient\n# and applies a threshold.\ndef dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):\n \n # Apply the following steps to img\n # 1) Convert to grayscale\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # 2) Take the gradient in x and y separately\n sobel_x = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobel_y = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # 3) Take the absolute value of the x and y gradients\n abs_x = np.abs(sobel_x)\n abs_y = np.abs(sobel_y)\n # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient \n sob_dir = np.arctan2(abs_y, abs_x)\n # 5) Create a binary mask where direction thresholds are met \n # 6) Return this mask as your binary_output image\n binary_output = np.zeros_like(sob_dir)\n binary_output[(sob_dir>=thresh[0]) & (sob_dir<=thresh[1])] = 1\n return binary_output\n \n# Run the function\ndir_binary = dir_threshold(image, sobel_kernel=15, thresh=(0.7, 1.3))\n# Plot the result\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))\nf.tight_layout()\nax1.imshow(image)\nax1.set_title('Original Image', fontsize=50)\nax2.imshow(dir_binary, cmap='gray')\nax2.set_title('Thresholded Grad. Dir.', fontsize=50)\nplt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
ec555bef15f61d387c079d87c37dfe1cf3e37785
5,810
ipynb
Jupyter Notebook
source/examples/basics/gog/geom_crossbar.ipynb
JetBrains/lets-plot-docs
73583bce5308d34b341d9f8a7249ccb34a95f504
[ "MIT" ]
2
2021-06-02T10:24:24.000Z
2021-11-08T09:50:22.000Z
source/examples/basics/gog/geom_crossbar.ipynb
JetBrains/lets-plot-docs
73583bce5308d34b341d9f8a7249ccb34a95f504
[ "MIT" ]
13
2021-05-25T19:49:50.000Z
2022-03-22T12:30:29.000Z
source/examples/basics/gog/geom_crossbar.ipynb
JetBrains/lets-plot-docs
73583bce5308d34b341d9f8a7249ccb34a95f504
[ "MIT" ]
4
2021-01-19T12:26:21.000Z
2022-03-19T07:47:52.000Z
32.458101
138
0.446127
[ [ [ "# geom_crossbar()", "_____no_output_____" ] ], [ [ "import pandas as pd\n\nfrom lets_plot import *\nLetsPlot.setup_html()", "_____no_output_____" ], [ "df = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/mpg.csv')\nclass_df = df.groupby('class').hwy.agg(['min', 'median', 'max']).reset_index()", "_____no_output_____" ], [ "ggplot(class_df, aes(x='class')) + geom_crossbar(aes(ymin='min', middle='median', ymax='max'))", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
ec5563bcaec63b320896f81b7cfcae9e2d1a734f
26,453
ipynb
Jupyter Notebook
student-projects/fall-2020/GGWP-Identify-Toxic-Behavior-in-Gaming/WebAppDeployment/ModelServer - Test.ipynb
UCBerkeley-SCET/DataX-Berkeley
f912d22c838b511d3ada4ecfa3548afd80437b74
[ "Apache-2.0" ]
28
2020-06-15T23:53:36.000Z
2022-03-19T09:27:02.000Z
student-projects/fall-2020/GGWP-Identify-Toxic-Behavior-in-Gaming/WebAppDeployment/ModelServer - Test.ipynb
UCBerkeley-SCET/DataX-Berkeley
f912d22c838b511d3ada4ecfa3548afd80437b74
[ "Apache-2.0" ]
4
2020-06-24T22:20:31.000Z
2022-02-28T01:37:36.000Z
student-projects/fall-2020/GGWP-Identify-Toxic-Behavior-in-Gaming/WebAppDeployment/ModelServer - Test.ipynb
UCBerkeley-SCET/DataX-Berkeley
f912d22c838b511d3ada4ecfa3548afd80437b74
[ "Apache-2.0" ]
78
2020-06-19T09:41:01.000Z
2022-02-05T00:13:29.000Z
54.768116
1,808
0.597815
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import make_union\nfrom scipy.sparse import hstack\nfrom sklearn import metrics\nfrom gensim.models.keyedvectors import KeyedVectors\nimport warnings\nimport joblib # for deserialization saved models \n", "_____no_output_____" ], [ "from mlserving import ServingApp\nfrom mlserving.predictors import RESTPredictor\n\nimport joblib # for deserialization saved models \n\n\nclass MyPredictor(RESTPredictor):\n def __init__(self):\n # Loading a saved model and other relevant pkls necessary for preprocessing.\n self.gloveModel = joblib.load('gloveModel.pkl')\n self.vectorizer1 = joblib.load('vectorizer.pkl')\n self.vectorizer2 = joblib.load('vectorizer2.pkl')\n self.feature_names = self.vectorizer1.get_feature_names()\n self.lr_insult = joblib.load('lr_insult.pkl')\n self.lr_obscene = joblib.load('lr_obscene.pkl')\n self.lr_threat = joblib.load('lr_threat.pkl')\n self.lr_toxic = joblib.load('lr_toxic.pkl')\n self.lr_identity_hate = joblib.load('lr_identity_hate.pkl')\n\n def get_word_weight(self, text):\n \"\"\"Returns a dictionary where keys are the words of the text and values are their weights.\"\"\"\n tfidf_matrix = self.vectorizer1.transform([text]).todense()\n feature_index = tfidf_matrix[0,:].nonzero()[1]\n tfidf_scores = zip([self.feature_names[i] for i in feature_index], [tfidf_matrix[0, x] for x in feature_index])\n return dict(tfidf_scores)\n \n def num_upper(self, text):\n \"\"\"Returns the number of capital letters in a string.\"\"\"\n num = 0\n for i in text:\n if i.isupper():\n num += 1\n return num\n \n def weighted_vector_mean(self, text):\n \"\"\"Gets the weighted vector mean of a sentence by averaging the word vectors according to Tfidf weights.\"\"\"\n sentence_vects = []\n sentence_weights = []\n words = text.split(\" \")\n words = [word for word in words if word in self.gloveModel.wv.vocab]\n\n text_dict = self.get_word_weight(text)\n total = sum(text_dict.values())\n text_dict = {key:(val/total) for key,val in text_dict.items()}\n\n for word in words:\n sentence_vects.append(self.gloveModel[word]) # get word vectors\n if word.lower() in text_dict.keys():\n sentence_weights.append(text_dict[word.lower()]) # get weights of words\n else:\n sentence_weights.append(0)\n\n if len(sentence_vects) > 0:\n return np.transpose(sentence_vects) @ sentence_weights / len(sentence_vects)\n else:\n return np.zeros(300)\n\n def create_df(self, text):\n \"\"\"Creates the dataframe with the input text. Df required for preprocessing.\"\"\"\n txt = text\n d = {'text': [txt]}\n df = pd.DataFrame(data=d)\n return df\n \n def generate_features(self, df):\n \"\"\"Generates features like proportioin of caps, caps, vector means etc. for the provided df containing Text.\"\"\"\n dfc = df.copy()\n # Cleaning text\n dfc['text'] = dfc['text'].str.replace(r\"[(\\.),(\\|)!:='&(\\*)(\\\")]\", \"\")\n dfc['text'] = dfc['text'].str.replace(\"\\n\", \"\")\n\n # Getting length\n dfc['len'] = dfc['text'].apply(len) - dfc['text'].str.count(\" \")\n len_min = 0\n len_max = 127\n dfc['len'] = (dfc['len'].values - len_min) / (len_max - len_min)\n\n # Getting proportion of caps\n dfc['caps'] = dfc['text'].apply(self.num_upper)\n dfc['proportion of caps'] = dfc['caps'] / dfc['len']\n\n # Accounting for division by 0\n dfc['proportion of caps'] = dfc['proportion of caps'].fillna(0)\n\n # Adding the 300D vector means, weighted by Tfidf weights\n dfc['vector mean'] = dfc['text'].apply(self.weighted_vector_mean)\n tmp = pd.DataFrame(dfc['vector mean'].tolist())\n dfc = dfc.join(tmp)\n dfc = dfc.drop(['vector mean', 'text', 'caps'], axis=1)\n return dfc\n \n def pre_process(self, input_data, req):\n \"\"\"Calls and coordinates the various steps required for preprocessing for our model.\"\"\"\n text = input_data['features']\n print(text)\n df_text = self.create_df(text)\n dfc_text = self.generate_features(df_text)\n t_text = df_text['text']\n t_vector = self.vectorizer2.transform(t_text)\n final_testing = hstack([t_vector, dfc_text[['len', 'proportion of caps']]])\n return final_testing\n# return input_data['features']\n\n def predict(self, processed_data, req):\n \"\"\"Takes the features to predict the appropriate labels as relevant to the text and returns as a JSON.\"\"\"\n count = 0\n results = {\n \"Obscenity\": False,\n \"Toxicity\": False,\n \"Identity Hate\": False,\n \"Threat\": False,\n \"Insult\": False,\n \"Count\": 0\n }\n if self.lr_obscene.predict(processed_data)[0] == 1:\n count += 1\n results[\"Obscenity\"] = True\n if self.lr_toxic.predict(processed_data)[0] == 1:\n count += 1;\n results[\"Toxicity\"] = True\n if self.lr_identity_hate.predict(processed_data)[0] == 1:\n count += 1;\n results[\"Identity Hate\"] = True\n if self.lr_threat.predict(processed_data)[0] == 1:\n count += 1;\n results[\"Threat\"] = True\n if self.lr_insult.predict(processed_data)[0] == 1:\n results[\"Insult\"] = True\n count += 1;\n results[\"Count\"] = count\n\n return results\n\n def post_process(self, prediction, req):\n return prediction\n\n", "_____no_output_____" ], [ "app = ServingApp()\napp.add_inference_handler('/api/v1/predict', MyPredictor())\napp.run()", "c:\\users\\dheeraj\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\sklearn\\base.py:334: UserWarning: Trying to unpickle estimator TfidfTransformer from version 0.22.1 when using version 0.23.2. This might lead to breaking code or invalid results. Use at your own risk.\n UserWarning)\nc:\\users\\dheeraj\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\sklearn\\base.py:334: UserWarning: Trying to unpickle estimator TfidfVectorizer from version 0.22.1 when using version 0.23.2. This might lead to breaking code or invalid results. Use at your own risk.\n UserWarning)\nc:\\users\\dheeraj\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\sklearn\\base.py:334: UserWarning: Trying to unpickle estimator FeatureUnion from version 0.22.1 when using version 0.23.2. This might lead to breaking code or invalid results. Use at your own risk.\n UserWarning)\n" ], [ "\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
ec55683f85df0e2df74afba7600def3ec036d0cd
25,241
ipynb
Jupyter Notebook
GoogleCloudPlatform/DataProc-Training/05_functions.ipynb
jpacerqueira/project_lost_saturn
ce23e2881242441c9c74b2f0a66af1ebc5d3f351
[ "Apache-2.0" ]
1
2020-04-13T09:19:43.000Z
2020-04-13T09:19:43.000Z
GoogleCloudPlatform/DataProc-Training/05_functions.ipynb
jpacerqueira/project_lost_saturn
ce23e2881242441c9c74b2f0a66af1ebc5d3f351
[ "Apache-2.0" ]
1
2020-10-28T12:45:32.000Z
2020-10-28T12:45:32.000Z
GoogleCloudPlatform/DataProc-Training/05_functions.ipynb
jpacerqueira/project_lost_saturn
ce23e2881242441c9c74b2f0a66af1ebc5d3f351
[ "Apache-2.0" ]
4
2020-03-14T05:17:37.000Z
2022-01-06T16:29:38.000Z
57.627854
1,610
0.543124
[ [ [ "## Migrating from Spark to BigQuery via Dataproc -- Part 5\n\n* [Part 1](01_spark.ipynb): The original Spark code, now running on Dataproc (lift-and-shift).\n* [Part 2](02_gcs.ipynb): Replace HDFS by Google Cloud Storage. This enables job-specific-clusters. (cloud-native)\n* [Part 3](03_automate.ipynb): Automate everything, so that we can run in a job-specific cluster. (cloud-optimized)\n* [Part 4](04_bigquery.ipynb): Load CSV into BigQuery, use BigQuery. (modernize)\n* [Part 5](05_functions.ipynb): Using Cloud Functions, launch analysis every time there is a new file in the bucket. (serverless)\n", "_____no_output_____" ], [ "### Catch-up cell", "_____no_output_____" ] ], [ [ "%%bash\nwget http://kdd.ics.uci.edu/databases/kddcup99/kddcup.data_10_percent.gz\ngunzip kddcup.data_10_percent.gz\n#BUCKET='cloud-training-demos-ml' # CHANGE\nBUCKET = $(gcloud info --format='value(config.project)')[0]\ngsutil cp kdd* gs://$BUCKET/\nbq mk sparktobq\n#wget http://kdd.ics.uci.edu/databases/kddcup99/kddcup.data_10_percent.gz\n#gunzip kddcup.data_10_percent.gz\n#BUCKET = $(gcloud info --format='value(config.project)')\n#gsutil cp kdd* gs://$BUCKET/", "BigQuery error in mk operation: Dataset\n'qwiklabs-gcp-576b6b9a967fad32:sparktobq' already exists.\n" ] ], [ [ "### Create reporting function", "_____no_output_____" ] ], [ [ "%%writefile main.py\n\nfrom google.cloud import bigquery\nimport google.cloud.storage as gcs\nimport tempfile\nimport os\n\ndef create_report(BUCKET, gcsfilename, tmpdir):\n \"\"\"\n Creates report in gs://BUCKET/ based on contents in gcsfilename (gs://bucket/some/dir/filename)\n \"\"\"\n # connect to BigQuery\n client = bigquery.Client()\n destination_table = client.get_table('sparktobq.kdd_cup')\n \n # Specify table schema. Autodetect is not a good idea for production code\n job_config = bigquery.LoadJobConfig()\n schema = [\n bigquery.SchemaField(\"duration\", \"INT64\"),\n ]\n for name in ['protocol_type', 'service', 'flag']:\n schema.append(bigquery.SchemaField(name, \"STRING\"))\n for name in 'src_bytes,dst_bytes,wrong_fragment,urgent,hot,num_failed_logins'.split(','):\n schema.append(bigquery.SchemaField(name, \"INT64\"))\n schema.append(bigquery.SchemaField(\"unused_10\", \"STRING\"))\n schema.append(bigquery.SchemaField(\"num_compromised\", \"INT64\"))\n schema.append(bigquery.SchemaField(\"unused_12\", \"STRING\"))\n for name in 'su_attempted,num_root,num_file_creations'.split(','):\n schema.append(bigquery.SchemaField(name, \"INT64\")) \n for fieldno in range(16, 41):\n schema.append(bigquery.SchemaField(\"unused_{}\".format(fieldno), \"STRING\"))\n schema.append(bigquery.SchemaField(\"label\", \"STRING\"))\n job_config.schema = schema\n\n # Load CSV data into BigQuery, replacing any rows that were there before\n job_config.create_disposition = bigquery.CreateDisposition.CREATE_IF_NEEDED\n job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE\n job_config.skip_leading_rows = 0\n job_config.source_format = bigquery.SourceFormat.CSV\n load_job = client.load_table_from_uri(gcsfilename, destination_table, job_config=job_config)\n print(\"Starting LOAD job {} for {}\".format(load_job.job_id, gcsfilename))\n load_job.result() # Waits for table load to complete.\n print(\"Finished LOAD job {}\".format(load_job.job_id))\n \n # connections by protocol\n sql = \"\"\"\n SELECT COUNT(*) AS count\n FROM sparktobq.kdd_cup\n GROUP BY protocol_type\n ORDER by count ASC \n \"\"\"\n connections_by_protocol = client.query(sql).to_dataframe()\n connections_by_protocol.to_csv(os.path.join(tmpdir,\"connections_by_protocol.csv\"))\n print(\"Finished analyzing connections\")\n \n # attacks plot\n sql = \"\"\"\n SELECT \n protocol_type, \n CASE label\n WHEN 'normal.' THEN 'no attack'\n ELSE 'attack'\n END AS state,\n COUNT(*) as total_freq,\n ROUND(AVG(src_bytes), 2) as mean_src_bytes,\n ROUND(AVG(dst_bytes), 2) as mean_dst_bytes,\n ROUND(AVG(duration), 2) as mean_duration,\n SUM(num_failed_logins) as total_failed_logins,\n SUM(num_compromised) as total_compromised,\n SUM(num_file_creations) as total_file_creations,\n SUM(su_attempted) as total_root_attempts,\n SUM(num_root) as total_root_acceses\n FROM sparktobq.kdd_cup\n GROUP BY protocol_type, state\n ORDER BY 3 DESC\n \"\"\"\n attack_stats = client.query(sql).to_dataframe()\n ax = attack_stats.plot.bar(x='protocol_type', subplots=True, figsize=(10,25))\n ax[0].get_figure().savefig(os.path.join(tmpdir,'report.png'));\n print(\"Finished analyzing attacks\")\n \n bucket = gcs.Client().get_bucket(BUCKET)\n for blob in bucket.list_blobs(prefix='sparktobq/'):\n blob.delete()\n for fname in ['report.png', 'connections_by_protocol.csv']:\n bucket.blob('sparktobq/{}'.format(fname)).upload_from_filename(os.path.join(tmpdir,fname))\n print(\"Uploaded report based on {} to {}\".format(gcsfilename, BUCKET))\n\n\ndef bigquery_analysis_cf(data, context):\n # check that trigger is for a file of interest\n bucket = data['bucket']\n name = data['name']\n if ('kddcup' in name) and not ('gz' in name):\n filename = 'gs://{}/{}'.format(bucket, data['name'])\n print(bucket, filename)\n with tempfile.TemporaryDirectory() as tmpdir:\n create_report(bucket, filename, tmpdir)", "Overwriting main.py\n" ], [ "%%writefile requirements.txt\ngoogle-cloud-bigquery\ngoogle-cloud-storage\npandas\nmatplotlib", "Writing requirements.txt\n" ], [ "# verify that the code in the CF works\nname='kddcup.data_10_percent'\nif 'kddcup' in name and not ('gz' in name):\n print(True)", "True\n" ] ], [ [ "## Test that the function endpoint works", "_____no_output_____" ] ], [ [ "# test that the function works\nimport main as bq\n\n#BUCKET='cloud-training-demos-ml' # CHANGE\nBUCKET_list = !gcloud info --format='value(config.project)'\nBUCKET=BUCKET_list[0]\n\ntry:\n bq.create_report(BUCKET, 'gs://{}/kddcup.data_10_percent'.format(BUCKET), \"/tmp\")\nexcept Exception as e:\n print(e.errors)", "[{'domain': 'global', 'message': 'Not found: Table qwiklabs-gcp-576b6b9a967fad32:sparktobq.kdd_cup', 'reason': 'notFound'}]\n" ] ], [ [ "## Deploy the cloud function", "_____no_output_____" ] ], [ [ "!gcloud functions deploy bigquery_analysis_cf --runtime python37 --trigger-resource $BUCKET --trigger-event google.storage.object.finalize", "Deploying function (may take a while - up to 2 minutes)...failed.\n\u001b[1;31mERROR:\u001b[0m (gcloud.functions.deploy) OperationError: code=13, message=Failed to configure trigger GCS Bucket: qwiklabs-gcp-576b6b9a967fad32\n" ] ], [ [ "## Try it out\n\nCopy the file to the bucket:", "_____no_output_____" ] ], [ [ "!gsutil rm -rf gs://$BUCKET/sparktobq\n!gsutil cp kddcup.data_10_percent gs://$BUCKET/", "BucketNotFoundException: 404 gs://qwiklabs-gcp-576b6b9a967fad32 bucket does not exist.\nCommandException: Encountered non-existent bucket during listing\nCopying file://kddcup.data_10_percent [Content-Type=application/octet-stream]...\nCaught ResumableUploadStartOverException for upload of file://kddcup.data_10_percent.\nChecking that bucket qwiklabs-gcp-576b6b9a967fad32 exists before retrying upload...\nDeleted tracker file /home/jupyter/.gsutil/tracker-files/upload_TRACKER_99d009f5de19b8ad80688322284d08a004eae2a4.ercent__JSON.url for resumable upload of file://kddcup.data_10_percent before retrying.\nBucketNotFoundException: 404 gs://qwiklabs-gcp-576b6b9a967fad32 bucket does not exist.\n" ] ], [ [ "Verify that the Cloud Function is being run. You can do this from the [Cloud Functions](https://console.cloud.google.com/functions/) part of the GCP Console.\n\nOnce the function is complete (in about 30 seconds), see if the output folder contains the report:", "_____no_output_____" ] ], [ [ "!gsutil ls gs://$BUCKET/sparktobq", "BucketNotFoundException: 404 gs://qwiklabs-gcp-576b6b9a967fad32 bucket does not exist.\n" ] ], [ [ "Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec557b55ad0318e27e06cc636d536cabec5047f3
598,379
ipynb
Jupyter Notebook
content/lessons/13-Visualization/WMC3-Folium-Map.ipynb
IST256/learn-python
b2373b5cf596a318976a816e3102cc704d6b9b57
[ "MIT" ]
14
2017-02-23T21:00:46.000Z
2021-03-19T09:29:40.000Z
content/lessons/13-Visualization/WMC3-Folium-Map.ipynb
IST256/learn-python
b2373b5cf596a318976a816e3102cc704d6b9b57
[ "MIT" ]
null
null
null
content/lessons/13-Visualization/WMC3-Folium-Map.ipynb
IST256/learn-python
b2373b5cf596a318976a816e3102cc704d6b9b57
[ "MIT" ]
38
2017-02-03T13:49:19.000Z
2021-08-15T16:47:56.000Z
754.576293
159,347
0.97514
[ [ [ "# Watch Me Code 3: Mapping with Folium\n\nFolium is a Python wrapper library for the OpenStreetMaps api. It allows you to place data on a map in a variety of ways.\n", "_____no_output_____" ] ], [ [ "! pip install folium", "Requirement already satisfied: folium in c:\\anaconda3\\lib\\site-packages\nRequirement already satisfied: Jinja2 in c:\\anaconda3\\lib\\site-packages (from folium)\nRequirement already satisfied: MarkupSafe in c:\\anaconda3\\lib\\site-packages (from Jinja2->folium)\n" ], [ "import folium\nimport pandas as pd\nimport random ", "_____no_output_____" ], [ "# we need to center the map in the middle of the US. I googled for the location.\nCENTER_US = (39.8333333,-98.585522)\nlondon = (51.5074, -0.1278)\nmap = folium.Map(location=CENTER_US, zoom_start=4)\nmap", "_____no_output_____" ] ], [ [ "## Map Pins", "_____no_output_____" ] ], [ [ "# read in a data file of IP address to locations.\ndata = pd.read_csv('https://raw.githubusercontent.com/mafudge/datasets/master/clickstream/ip_lookup.csv')\ndata.sample(5)", "_____no_output_____" ], [ "from IPython.display import display\n# Let's place each location on the map\nfor row in data.to_records():\n pos = (row['ApproxLat'],row['ApproxLng'])\n marker = folium.Marker(location=pos, \n popup=f\"{row['City']},{row['State']}\"\n )\n map.add_child(marker)\ndisplay(map)", "_____no_output_____" ], [ "# Same thing with a different icon and colors. Icons come from http://fontawesome.io/icons/ but its an older version.\ncolors = ['red', 'blue', 'green', 'purple', 'orange', 'darkred',\n 'lightred', 'beige', 'darkblue', 'darkgreen', 'cadetblue',\n 'darkpurple', 'pink', 'lightblue', 'lightgreen',\n 'gray', 'black', 'lightgray']\nfor row in data.to_records():\n pos = (row['ApproxLat'],row['ApproxLng'])\n marker = folium.Marker(location=pos, \n popup=\"%s, %s\" % (row['City'],row['State']),\n icon = folium.Icon(color = random.choice(colors), icon='user')\n )\n map.add_child(marker)\nmap", "_____no_output_____" ], [ "# There are other map tiles available. See https://folium.readthedocs.io/en/latest/quickstart.html\n# Instead of Markers we use circles colors are HTML color codes http://htmlcolorcodes.com/\nCENTER_US = (39.8333333,-98.585522)\nmap2 = folium.Map(location=CENTER_US, zoom_start=4)\nfor row in data.to_records():\n map2.add_child(folium.CircleMarker(location=(row['ApproxLat'],row['ApproxLng']), \n popup=row['City'], radius=10, color='#0000FF', fill_color='#FF3333'))\n \nmap2", "_____no_output_____" ] ], [ [ "# Choropleths\n\nChoropleths are cartographic overlays based on boundries defined in a geo JSON file.\n", "_____no_output_____" ] ], [ [ "# State level geo-json overlay choropleth\nCENTER_US = (39.8333333,-98.585522)\nstate_geojson = 'WMC3-us-states.json'\nmap3 = folium.Map(location=CENTER_US, zoom_start=4, tiles=' Open Street Map')\nmap3.choropleth(geo_data=state_geojson)\nmap3\n", "_____no_output_____" ], [ "states = pd.read_csv('https://raw.githubusercontent.com/jasonong/List-of-US-States/master/states.csv')\nstate_counts = pd.DataFrame( {'Counts' : data['State']. value_counts() } ).sort_index()\nstate_counts['StateCode'] = state_counts.index\nstate_data = states.merge(state_counts, how=\"left\", left_on='Abbreviation', right_on='StateCode')\nstate_data = state_data[['Abbreviation','Counts']]\nstate_data = state_data.fillna(0)\nstate_data\n", "_____no_output_____" ], [ "CENTER_US = (39.8333333,-98.585522)\nstate_geojson = 'WMC3-us-states.json'\nmap3 = folium.Map(location=CENTER_US, zoom_start=4, tiles=' Open Street Map')\nfolium.Choropleth(geo_data=state_geojson,data=state_data, columns=['Abbreviation','Counts'], \n key_on ='feature.id', fill_color='BuGn', legend_name='Website Visitors').add_to(map3)\nmap3", "_____no_output_____" ], [ "# Here's a more straigtforward example with unemployment data:\nunemployment = pd.read_csv('https://raw.githubusercontent.com/wrobstory/vincent/master/examples/data/US_Unemployment_Oct2012.csv')\nstate_geojson = 'WMC3-us-states.json'\nmap4 = folium.Map(location=CENTER_US, zoom_start=4, tiles=' Open Street Map')\nfolium.Choropleth(geo_data=state_geojson,data=unemployment, \n columns=['State','Unemployment'], key_on ='feature.id', fill_color='YlGn', \n legend_name='2012 US Unemployment Rate %').add_to(map4)\nmap4\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec557e09ede377195d60b2a4555a2208a9b8055a
26,008
ipynb
Jupyter Notebook
TheNumbers_Missing_Data_Recovery.ipynb
oupton/startup-ds
2aca6b119baabb187779f8afcb816c2bc25dc34d
[ "MIT" ]
2
2018-04-26T03:17:11.000Z
2018-04-26T03:17:12.000Z
TheNumbers_Missing_Data_Recovery.ipynb
oupton/startup-ds
2aca6b119baabb187779f8afcb816c2bc25dc34d
[ "MIT" ]
null
null
null
TheNumbers_Missing_Data_Recovery.ipynb
oupton/startup-ds
2aca6b119baabb187779f8afcb816c2bc25dc34d
[ "MIT" ]
null
null
null
33.173469
213
0.384536
[ [ [ "import pandas as pd\nimport numpy as np\nimport os\nimport ast\n#from joblib import Parallel, delayed\nimport matplotlib\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport ast\n\nfrom datetime import date, datetime", "_____no_output_____" ], [ "# Recovering misssing financial data from data scraped from the-numbers.com", "_____no_output_____" ], [ "missing = pd.read_csv('data/missing_financial_data.csv')", "_____no_output_____" ], [ "all_movies = pd.read_csv('data/movies_metadata.csv')", "/Users/shahidhn/anaconda/envs/py27/lib/python2.7/site-packages/IPython/core/interactiveshell.py:2718: DtypeWarning: Columns (10) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n" ], [ "the_numbers = pd.read_csv('data/the_numbers_data.csv')", "_____no_output_____" ], [ "all_ids = all_movies['imdb_id'].tolist()\nmissing_names = [all_movies['title'][all_ids.index(id)] for id in missing['recovered_ids']]\nmissing['title'] = missing_names", "_____no_output_____" ], [ "the_numbers_names = the_numbers['name'].tolist()", "_____no_output_____" ], [ "to_add = pd.DataFrame(columns=missing.dtypes.index.tolist())\n\n# Name, budget, revenue (worldwide gross), imdb_id\nfor title in missing_names:\n if title in the_numbers_names:\n numbers_i = the_numbers_names.index(title)\n budget = the_numbers.iloc[numbers_i]['budget']\n revenue = the_numbers.iloc[numbers_i]['worldwide']\n if budget > 0 and revenue > 0:\n '''\n temp = []\n\n temp.append(the_numbers.iloc[numbers_i]['name'])\n temp.append(the_numbers.iloc[numbers_i]['budget'])\n temp.append(the_numbers.iloc[numbers_i]['worldwide'])\n '''\n\n missing_i = missing_names.index(title)\n add_row = missing.iloc[missing_i]\n add_row['budget'] = budget\n add_row['revenue'] = revenue\n to_add = to_add.append(add_row)", "/Users/shahidhn/anaconda/envs/py27/lib/python2.7/site-packages/ipykernel_launcher.py:20: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n/Users/shahidhn/anaconda/envs/py27/lib/python2.7/site-packages/ipykernel_launcher.py:21: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n" ], [ "to_add.head()", "_____no_output_____" ], [ "# to_add.to_csv(path_or_buf='data/the_numbers_recovered.csv', mode='w+')", "_____no_output_____" ], [ "# Adding date features\n\ndate_df = pd.read_csv(os.path.join('data', 'movies_metadata.csv'))[['imdb_id', 'release_date']]", "_____no_output_____" ], [ "date_df.head()", "_____no_output_____" ], [ "to_add_dates = []\nfor id in to_add['recovered_ids'].tolist():\n i = date_df['imdb_id'].tolist().index(id)\n to_add_dates.append(date_df.iloc[i].release_date)", "_____no_output_____" ], [ "to_add['release_date'] = to_add_dates", "_____no_output_____" ], [ "epoch = datetime(1915, 2, 8)\ndaysSinceEpoch = []\n\ndates = []\nfor strdate in to_add_dates:\n dates.append(datetime.strptime(strdate, '%Y-%m-%d'))\n \nfor date in dates:\n daysSinceEpoch.append((date - epoch).days)\n\nto_add['daysSinceStart'] = daysSinceEpoch", "_____no_output_____" ], [ "dayInYear = []\nfor mdate in dates:\n dayInYear.append((mdate - datetime(mdate.year, 1, 1)).days)\n\nto_add['dayInYear']= dayInYear", "_____no_output_____" ], [ "to_add.head()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec55b36039109c523fb76d8303ff2ff3298b9d9f
54,908
ipynb
Jupyter Notebook
tnp/tnp.ipynb
mattwthompson/mbuild-examples
79d95f15d62c7388cd7d5219c94e7148a916b2bb
[ "MIT" ]
null
null
null
tnp/tnp.ipynb
mattwthompson/mbuild-examples
79d95f15d62c7388cd7d5219c94e7148a916b2bb
[ "MIT" ]
1
2019-09-18T04:59:45.000Z
2019-09-18T04:59:45.000Z
mbuild/examples/tnp/tnp.ipynb
mikemhenry/mbuild
68ecd750f7d41d344aaf58cf7a0414e1a6c06ffb
[ "MIT" ]
null
null
null
343.175
25,491
0.373261
[ [ [ "from numpy import sqrt, pi\n\nimport mbuild as mb\n\nfrom mbuild.examples.tnp.bead import Bead\nfrom mbuild.examples.tnp.sphere import Sphere\n\n\nclass Tnp(mb.Compound):\n \"\"\"A spherical nanoparticle with tethered chains. \"\"\"\n def __init__(self, ball_radius=10, n_chains=4, chain_length=10, monomer=None):\n \"\"\"Initialize a tethered nanoparticle.\n\n Args:\n ball_radius (float): Radius of the nanoparticle.\n n_chains (int): Number of chains to attach to the nanoparticle.\n chain_length (int): Length of the chains being attached.\n monomer (Compound, optional): Type of chain being attached.\n \"\"\"\n super(Tnp, self).__init__()\n\n if not monomer:\n monomer = Bead(particle_kind='t')\n\n n = 129 # TODO: make this tweakable\n self.add(Sphere(n=n, radius=ball_radius, port_distance_from_surface=0.7), label=\"np\")\n\n # Generate 65 points on the surface of a unit sphere.\n pattern = mb.SpherePattern(n_chains)\n\n # Magnify it a bit.\n pattern.scale(ball_radius)\n chain_proto = mb.lib.recipes.polymer.Polymer(monomer, n=chain_length)\n\n # Apply chains to pattern.\n chain_protos, empty_backfill = pattern.apply_to_compound(chain_proto,\n guest_port_name=\"down\", host=self['np'])\n self.add(chain_protos)\n\n self.generate_bonds('np', 'np', sqrt(4 * ball_radius ** 2 * pi / n) - 0.5,\n sqrt(4 * ball_radius**2 * pi / n) + 0.5)\n self.generate_bonds('np', 't', 0.1, 0.3)\n self.generate_bonds('t', 'np', 0.1, 0.3)", "_____no_output_____" ], [ "nano_particle = Tnp(n_chains=5, chain_length=10)\nprint(nano_particle)\nnano_particle.visualize()", "<Tnp 179 particles, non-periodic, 312 bonds, id: 4369860592>\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
ec55bab657b6fa9f176086e62cb7dbdf057b6f44
6,077
ipynb
Jupyter Notebook
cats_dogs.ipynb
emilios8/BootstrapPortfolioProject
f792c60e770260d88c9db04f4fd9ba180fa05679
[ "MIT" ]
null
null
null
cats_dogs.ipynb
emilios8/BootstrapPortfolioProject
f792c60e770260d88c9db04f4fd9ba180fa05679
[ "MIT" ]
null
null
null
cats_dogs.ipynb
emilios8/BootstrapPortfolioProject
f792c60e770260d88c9db04f4fd9ba180fa05679
[ "MIT" ]
null
null
null
30.084158
260
0.483298
[ [ [ "<a href=\"https://colab.research.google.com/github/emilios8/BootstrapPortfolioProject/blob/master/cats_dogs.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "from keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout\n\nimport os\nimport zipfile\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimage\n\n%matplotlib inline", "_____no_output_____" ], [ "!wget --no-check-certificate https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip -O /tmp/cats_and_dogs_filtered.zip", "--2019-04-12 11:04:58-- https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip\nResolving storage.googleapis.com (storage.googleapis.com)... 74.125.141.128, 2607:f8b0:400c:c06::80\nConnecting to storage.googleapis.com (storage.googleapis.com)|74.125.141.128|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 68606236 (65M) [application/zip]\nSaving to: ‘/tmp/cats_and_dogs_filtered.zip’\n\n\r /tmp/cats 0%[ ] 0 --.-KB/s \r /tmp/cats_ 59%[==========> ] 38.77M 194MB/s \r/tmp/cats_and_dogs_ 100%[===================>] 65.43M 213MB/s in 0.3s \n\n2019-04-12 11:04:59 (213 MB/s) - ‘/tmp/cats_and_dogs_filtered.zip’ saved [68606236/68606236]\n\n" ], [ "#Dekompresja za pomocą biblioteki ZLIB :)", "_____no_output_____" ], [ "local_zip = '/tmp/cats_and_dogs_filtered.zip'\nzip_ = zipfile.ZipFile(local_zip)\nzip_.extractall()\nzip_.close()", "_____no_output_____" ], [ "!ls /content/cats_and_dogs_filtered/", "train validation vectorize.py\n" ], [ "# Ścieżki dostepu do plików\n\nbase_dir = '/content/cats_and_dogs_filtered'\ntrain_dir = os.path.join(base_dir, 'train')\n\ntrain_cats_dir = os.path.join(train_dir, 'cats')\ntrain_dogs_dir = os.path.join(train_dir, 'dogs')\n\nvalidation_dir = os.path.join(base_dir, 'validation')\nvalidation_cats_dir = os.path.join(validation_dir, 'cats')\nvalidation_dogs_dir = os.path.join(validation_dir, 'dogs')\n\n#base_dir, train_cats_dir, train_dogs_dir, validation_dir, validation_cats_dir, validation_dogs_dir\n", "_____no_output_____" ], [ "# Wczytywanie i wyświetlanie 25 zdjęć w postaci siatki 5 x 5.\n# -----------------------------------------------------------\n\nall_cat_files = os.listdir(train_cats_dir)\ncats_files = [os.path.join(train_cats_dir, file) for file in all_cat_files][:25]\n\nall_dog_files = os.listdir(train_dogs_dir)\ndogs_files = [os.path.join(train_dogs_dir, file) for file in all_dog_files][:25]\n\nfor file in all_cat_files:\n plt.subplot(5, 5, idx+1)\n img = mpimage.imread\n \n", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec55e2734ef2a7623b904f1989668933b15f3253
9,695
ipynb
Jupyter Notebook
notebooks/preproc-nn-sequences-fasttext.ipynb
MinuteswithMetrics/kaggle-quora-question-pairs
009bf3bd029d7598293f1944596d2db31aaf5710
[ "MIT" ]
125
2017-06-21T08:14:19.000Z
2021-10-20T03:46:51.000Z
notebooks/preproc-nn-sequences-fasttext.ipynb
GUOhm230/quora-question-pairs
739538a4566ead453a58139bbc8d99b8141e4368
[ "MIT" ]
6
2018-03-20T22:05:08.000Z
2018-11-09T06:02:32.000Z
notebooks/preproc-nn-sequences-fasttext.ipynb
GUOhm230/quora-question-pairs
739538a4566ead453a58139bbc8d99b8141e4368
[ "MIT" ]
39
2017-06-21T19:35:39.000Z
2021-04-30T13:15:38.000Z
20.984848
219
0.568128
[ [ [ "# Preprocessing: FastText Sequences & Embeddings", "_____no_output_____" ], [ "Based on the tokenized questions and a pre-built word embedding database, build fixed-length (padded) sequences of word indices for each question, as well as a lookup matrix that maps word indices to word vectors.", "_____no_output_____" ], [ "## Imports", "_____no_output_____" ], [ "This utility package imports `numpy`, `pandas`, `matplotlib` and a helper `kg` module into the root namespace.", "_____no_output_____" ] ], [ [ "from pygoose import *", "_____no_output_____" ], [ "from gensim.models.wrappers.fasttext import FastText", "_____no_output_____" ] ], [ [ "Hide all GPUs from TensorFlow to not automatically occupy any GPU RAM.", "_____no_output_____" ] ], [ [ "kg.gpu.cuda_disable_gpus()", "_____no_output_____" ], [ "from keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences", "Using TensorFlow backend.\n" ] ], [ [ "## Config", "_____no_output_____" ], [ "Automatically discover the paths to various data folders and compose the project structure.", "_____no_output_____" ] ], [ [ "project = kg.Project.discover()", "_____no_output_____" ] ], [ [ "The maximum allowed size of the embedding matrix and the maximum length our sequences will be padded/trimmed to.", "_____no_output_____" ] ], [ [ "MAX_VOCAB_SIZE = 125000\nMAX_SEQUENCE_LENGTH = 30", "_____no_output_____" ] ], [ [ "## Load data", "_____no_output_____" ], [ "Preprocessed and tokenized questions. Stopwords should be kept for neural models.", "_____no_output_____" ] ], [ [ "tokens_train = kg.io.load(project.preprocessed_data_dir + 'tokens_lowercase_spellcheck_train.pickle')\ntokens_test = kg.io.load(project.preprocessed_data_dir + 'tokens_lowercase_spellcheck_test.pickle')", "_____no_output_____" ] ], [ [ "Word embedding database queried from the trained FastText model.", "_____no_output_____" ] ], [ [ "embedding_model = FastText.load_word2vec_format(project.aux_dir + 'fasttext_vocab.vec')", "_____no_output_____" ], [ "EMBEDDING_DIM = len(embedding_model['apple'])", "_____no_output_____" ] ], [ [ "## Build features", "_____no_output_____" ], [ "### Collect all texts", "_____no_output_____" ] ], [ [ "texts_q1_train = [' '.join(pair[0]) for pair in tokens_train]\ntexts_q2_train = [' '.join(pair[1]) for pair in tokens_train]", "_____no_output_____" ], [ "texts_q1_test = [' '.join(pair[0]) for pair in tokens_test]\ntexts_q2_test = [' '.join(pair[1]) for pair in tokens_test]", "_____no_output_____" ], [ "unique_question_texts = list(set(texts_q1_train + texts_q2_train + texts_q1_test + texts_q2_test))", "_____no_output_____" ] ], [ [ "### Create question sequences", "_____no_output_____" ] ], [ [ "tokenizer = Tokenizer(\n num_words=MAX_VOCAB_SIZE,\n split=' ',\n lower=True,\n char_level=False,\n)", "_____no_output_____" ], [ "tokenizer.fit_on_texts(unique_question_texts)", "_____no_output_____" ], [ "sequences_q1_train = tokenizer.texts_to_sequences(texts_q1_train)\nsequences_q2_train = tokenizer.texts_to_sequences(texts_q2_train)", "_____no_output_____" ], [ "sequences_q1_test = tokenizer.texts_to_sequences(texts_q1_test)\nsequences_q2_test = tokenizer.texts_to_sequences(texts_q2_test)", "_____no_output_____" ] ], [ [ "### Create embedding lookup matrix", "_____no_output_____" ] ], [ [ "num_words = min(MAX_VOCAB_SIZE, len(tokenizer.word_index))", "_____no_output_____" ] ], [ [ "Allocate an embedding matrix. Include the NULL word.", "_____no_output_____" ] ], [ [ "embedding_matrix = np.zeros((num_words + 1, EMBEDDING_DIM))", "_____no_output_____" ] ], [ [ "Fill the matrix using the vectors for individual words.", "_____no_output_____" ] ], [ [ "for word, index in progressbar(tokenizer.word_index.items()):\n if word in embedding_model.vocab:\n embedding_matrix[index] = embedding_model[word]", "100%|██████████| 101563/101563 [00:00<00:00, 153373.20it/s]\n" ] ], [ [ "## Save features", "_____no_output_____" ], [ "Word embedding lookup matrix.", "_____no_output_____" ] ], [ [ "kg.io.save(embedding_matrix, project.aux_dir + 'fasttext_vocab_embedding_matrix.pickle')", "_____no_output_____" ] ], [ [ "Padded word index sequences.", "_____no_output_____" ] ], [ [ "sequences_q1_padded_train = pad_sequences(sequences_q1_train, maxlen=MAX_SEQUENCE_LENGTH)\nsequences_q2_padded_train = pad_sequences(sequences_q2_train, maxlen=MAX_SEQUENCE_LENGTH)", "_____no_output_____" ], [ "sequences_q1_padded_test = pad_sequences(sequences_q1_test, maxlen=MAX_SEQUENCE_LENGTH)\nsequences_q2_padded_test = pad_sequences(sequences_q2_test, maxlen=MAX_SEQUENCE_LENGTH)", "_____no_output_____" ], [ "kg.io.save(sequences_q1_padded_train, project.preprocessed_data_dir + 'sequences_q1_fasttext_train.pickle')\nkg.io.save(sequences_q2_padded_train, project.preprocessed_data_dir + 'sequences_q2_fasttext_train.pickle')", "_____no_output_____" ], [ "kg.io.save(sequences_q1_padded_test, project.preprocessed_data_dir + 'sequences_q1_fasttext_test.pickle')\nkg.io.save(sequences_q2_padded_test, project.preprocessed_data_dir + 'sequences_q2_fasttext_test.pickle')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec55eb191729125d917642c4750d0b8e0d9c739c
1,184
ipynb
Jupyter Notebook
Coursera/Cisco Networking Basics Specializations/Course_4-Home_Networking_Basics/Week-2/Configuringa-a-Wireless-Router-and-Client.ipynb
manipiradi/Online-Courses-Learning
2a4ce7590d1f6d1dfa5cfde632660b562fcff596
[ "MIT" ]
331
2019-10-22T09:06:28.000Z
2022-03-27T13:36:03.000Z
Coursera/Cisco Networking Basics Specializations/Course_4-Home_Networking_Basics/Week-2/Configuringa-a-Wireless-Router-and-Client.ipynb
manipiradi/Online-Courses-Learning
2a4ce7590d1f6d1dfa5cfde632660b562fcff596
[ "MIT" ]
8
2020-04-10T07:59:06.000Z
2022-02-06T11:36:47.000Z
Coursera/Cisco Networking Basics Specializations/Course_4-Home_Networking_Basics/Week-2/Configuringa-a-Wireless-Router-and-Client.ipynb
manipiradi/Online-Courses-Learning
2a4ce7590d1f6d1dfa5cfde632660b562fcff596
[ "MIT" ]
572
2019-07-28T23:43:35.000Z
2022-03-27T22:40:08.000Z
20.067797
103
0.535473
[ [ [ "#### 1. What three features can be implemented to help protect your wireless network? (Choose 3.)", "_____no_output_____" ], [ "##### Ans: \n- add a passphrase\n- enable mac address filtering\n- disable SSID broadcasting", "_____no_output_____" ], [ "#### 2. What two statements are true of the SSID? (Choose 2.)", "_____no_output_____" ], [ "##### Ans: \n- The SSID identifies a specific wireless network or the name of the network.\n- The SSID is case-sensitive.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ] ]
ec55ec6a0af0bc65554cdbf054129d16b5c4f83a
4,869
ipynb
Jupyter Notebook
Laborator9/Assignment/Laborator9.ipynb
MateiSirbu/IDS_Sirbu_Tataru
6e5435e02165b4db3aeb2d2f0d009f5ce04ed21a
[ "MIT" ]
null
null
null
Laborator9/Assignment/Laborator9.ipynb
MateiSirbu/IDS_Sirbu_Tataru
6e5435e02165b4db3aeb2d2f0d009f5ce04ed21a
[ "MIT" ]
null
null
null
Laborator9/Assignment/Laborator9.ipynb
MateiSirbu/IDS_Sirbu_Tataru
6e5435e02165b4db3aeb2d2f0d009f5ce04ed21a
[ "MIT" ]
null
null
null
41.974138
762
0.675293
[ [ [ "# Laborator 9", "_____no_output_____" ], [ "## Modele de regresie\n\nFolositi urmatoarele seturi de date:\n1. [CPU Computer Hardware](https://archive.ics.uci.edu/ml/datasets/Computer+Hardware); excludeti din dataset coloanele: vendor name, model name, estimated relative performance; se va estima coloana \"published relative performance\".\n1. [Boston Housing](http://archive.ics.uci.edu/ml/machine-learning-databases/housing/)\n1. [Wisconsin Breast Cancer](http://www.dcc.fc.up.pt/~ltorgo/Regression/DataSets.html); cautati in panelul din stanga Wisconsin Breast Cancer si urmati pasii din \"My personal Notes\"\n1. [Communities and Crime](http://archive.ics.uci.edu/ml/datasets/communities+and+crime); stergeti primele 5 dimensiuni si trasaturile cu missing values.\n\nPentru fiecare set de date aplicati minim 5 modele de regresie din scikit learn. Pentru fiecare raportati: mean absolute error, mean squared error, median absolute error - a se vedea [sklearn.metrics](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics) - folosind 5 fold cross validation. Valorile hiperparametrilor trebuie cautate cu grid search (cv=3) si random search (n_iter dat de voi). Metrica folosita pentru cautarea hiperparametrilor va fi mean squared error. Raportati mediile rezultatelor atat pentru fold-urile de antrenare, cat si pentru cele de testare; indicatie: puteti folosi metoda `cross_validate` cu parametrul `return_train_score=True`, iar ca model un obiect de tip `GridSearchCV` sau `RandomizedSearchCV`.\n\nRezultatele vor fi trecute intr-un dataframe. Intr-o stare intermediara, valorile vor fi calculate cu semnul minus: din motive de implementare, biblioteca sklearn transforma scorurile in numere negative; a se vedea imaginea de mai jos:\n\n![intermediate report](./images/cpu_intermediate_blurred.png)\n\n\nValorile vor fi aduse la interval pozitiv, apoi vor fi marcate cele maxime si minime; orientativ, se poate folosi imaginea de mai jos, reprezentand dataframe afisat in notebook; puteti folosi alte variante de styling pe dataframe precum la https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html#. \n\nSe va crea un raport final in format HTML sau PDF - fisier(e) separat(e). Raportul trebuie sa contina minimal: numele setului de date si obiectul dataframe; preferabil sa se pastreze marcajul de culori realizat in notebook.\n\n![report](./images/cpu_results_blurred.png)\n\nNotare:\n1. Se acorda 20 de puncte din oficiu.\n1. Optimizare si cuantificare de performanta a modelelor: 3 puncte pentru fiecare combinatie set de date + model = 60 de puncte\n1. Documentare modele: numar modele * 2 puncte = 10 puncte. Documentati in jupyter notebook fiecare din modelele folosite, in limba romana. Puteti face o sectiune separata cu documentarea algoritmilor. Fiecare model trebuie sa aiba o descriere de minim 20 de randuri, minim o imagine asociata si minim 2 referinte bibliografice.\n1. 10 puncte: export in format HTML sau PDF.\n\n", "_____no_output_____" ], [ "*Notare:* rezolvarea va fi incarcata pe platforma de elearning in saptamana 11-15 mai.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
ec55ecae9aa6c869e15f25a2c282c953993aacdd
172,575
ipynb
Jupyter Notebook
.ipynb_checkpoints/Untitled-checkpoint.ipynb
DisenWang/tweet_analysis
e4fdad6d6bf4b6ae01c821b5162c676ca37b32e4
[ "MIT" ]
null
null
null
.ipynb_checkpoints/Untitled-checkpoint.ipynb
DisenWang/tweet_analysis
e4fdad6d6bf4b6ae01c821b5162c676ca37b32e4
[ "MIT" ]
null
null
null
.ipynb_checkpoints/Untitled-checkpoint.ipynb
DisenWang/tweet_analysis
e4fdad6d6bf4b6ae01c821b5162c676ca37b32e4
[ "MIT" ]
null
null
null
184.769807
69,910
0.868347
[ [ [ "# General:\nimport tweepy # To consume Twitter's API\nimport pandas as pd # To handle data\nimport numpy as np # For number computing\n\n# For plotting and visualization:\nfrom IPython.display import display\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline", "_____no_output_____" ], [ "# We import our access keys:\nfrom credentials import * # This will allow us to use the keys as variables\n\n# API's setup:\ndef twitter_config():\n \"\"\"\n Utility function to setup the Twitter's API\n with our access keys provided.\n \"\"\"\n # Authentication and access using keys:\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n\n # Return API with authentication:\n api = tweepy.API(auth)\n return api", "_____no_output_____" ], [ "# Creamos un objeto extractor:\nextractor = twitter_config()\n\n# Creamos una lista de tweets:\ntweets = extractor.user_timeline(screen_name=\"rogerfederer\", count=200)\nprint(\"Tweets obtenidos: {}.\\n\".format(len(tweets)))\n\n# Imprimimos los primeros 5 tweets:\nprint(\"Los primeros 5 tweets:\\n\")\nfor tweet in tweets[:5]:\n print(tweet.text)\n print()", "Tweets obtenidos: 200.\n\nLos primeros 5 tweets:\n\nIt's been a long road, and sometimes windy, but feels surreal to be back at the top. I'm just happy to be healthy a… https://t.co/g0q4mIKI6f\n\nApparently I'm the oldest tennis player with a #1️⃣ ranking. Somebody might have mentioned that to me already but I… https://t.co/sZ3NFoddzk\n\n🇨🇭🥇 woooooow @dariocologna https://t.co/Nr79FCQ9lg\n\n🎿🇨🇭🥈 @BeatFeuz yeaaaahhhhh https://t.co/53VlXTogYt\n\n🎿🇨🇭🥈💪 Congratulations Wendy https://t.co/MCiMrmx74Q\n\n" ], [ "# We create a pandas dataframe as follows:\ndata = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweets'])\n\n# We display the first 10 elements of the dataframe:\ndisplay(data.head(10))", "_____no_output_____" ], [ "# Internal methods of a single tweet object:\nprint(dir(tweets[0]))", "['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getstate__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_api', '_json', 'author', 'contributors', 'coordinates', 'created_at', 'destroy', 'entities', 'favorite', 'favorite_count', 'favorited', 'geo', 'id', 'id_str', 'in_reply_to_screen_name', 'in_reply_to_status_id', 'in_reply_to_status_id_str', 'in_reply_to_user_id', 'in_reply_to_user_id_str', 'is_quote_status', 'lang', 'parse', 'parse_list', 'place', 'retweet', 'retweet_count', 'retweeted', 'retweets', 'source', 'source_url', 'text', 'truncated', 'user']\n" ], [ "# We print info from the first tweet:\nprint(tweets[0].id)\nprint(tweets[0].created_at)\nprint(tweets[0].source)\nprint(tweets[0].favorite_count)\nprint(tweets[0].retweet_count)\nprint(tweets[0].geo)\nprint(tweets[0].coordinates)\nprint(tweets[0].entities)", "964664653107249153\n2018-02-17 00:55:50\nTwitter for iPhone\n15416\n3434\nNone\nNone\n{'hashtags': [], 'symbols': [], 'user_mentions': [], 'urls': [{'url': 'https://t.co/g0q4mIKI6f', 'expanded_url': 'https://twitter.com/i/web/status/964664653107249153', 'display_url': 'twitter.com/i/web/status/9…', 'indices': [117, 140]}]}\n" ], [ "# We add relevant data:\ndata['len'] = np.array([len(tweet.text) for tweet in tweets])\ndata['ID'] = np.array([tweet.id for tweet in tweets])\ndata['Date'] = np.array([tweet.created_at for tweet in tweets])\ndata['Source'] = np.array([tweet.source for tweet in tweets])\ndata['Likes'] = np.array([tweet.favorite_count for tweet in tweets])\ndata['RTs'] = np.array([tweet.retweet_count for tweet in tweets])", "_____no_output_____" ], [ "# Display of first 10 elements from dataframe:\ndisplay(data.head(10))", "_____no_output_____" ], [ "# We extract the mean of lenghts:\nmean = np.mean(data['len'])\n\nprint(\"The lenght's average in tweets: {}\".format(mean))", "The lenght's average in tweets: 74.04\n" ], [ "# We extract the tweet with more FAVs and more RTs:\n\nfav_max = np.max(data['Likes'])\nrt_max = np.max(data['RTs'])\n\nfav = data[data.Likes == fav_max].index[0]\nrt = data[data.RTs == rt_max].index[0]\n\n# Max FAVs:\nprint(\"The tweet with more likes is: \\n{}\".format(data['Tweets'][fav]))\nprint(\"Number of likes: {}\".format(fav_max))\nprint(\"{} characters.\\n\".format(data['len'][fav]))\n\n# Max RTs:\nprint(\"The tweet with more retweets is: \\n{}\".format(data['Tweets'][rt]))\nprint(\"Number of retweets: {}\".format(rt_max))\nprint(\"{} characters.\\n\".format(data['len'][rt]))", "The tweet with more likes is: \n🏆20. ❤️ https://t.co/WqUiSo3fd5\nNumber of likes: 508264\n31 characters.\n\nThe tweet with more retweets is: \n#19 tastes great https://t.co/3Hv3lM5Rk9\nNumber of retweets: 145382\n40 characters.\n\n" ], [ "# We create time series for data:\n\ntlen = pd.Series(data=data['len'].values, index=data['Date'])\ntfav = pd.Series(data=data['Likes'].values, index=data['Date'])\ntret = pd.Series(data=data['RTs'].values, index=data['Date'])\n", "_____no_output_____" ], [ "# Lenghts along time:\ntlen.plot(figsize=(16,4), color='r');", "_____no_output_____" ], [ "# Likes vs retweets visualization:\ntfav.plot(figsize=(16,4), label=\"Likes\", legend=True)\ntret.plot(figsize=(16,4), label=\"Retweets\", legend=True);", "_____no_output_____" ], [ "# We obtain all possible sources:\nsources = []\nfor source in data['Source']:\n if source not in sources:\n sources.append(source)\n\n# We print sources list:\nprint(\"Creation of content sources:\")\nfor source in sources:\n print(\"* {}\".format(source))", "Creation of content sources:\n* Twitter for iPhone\n* Tweetbot for iΟS\n* Media Studio\n* Twitter Web Client\n* Twitter for iPhone\n* TweetDeck\n" ], [ "# We create a numpy vector mapped to labels:\npercent = np.zeros(len(sources))\n\nfor source in data['Source']:\n for index in range(len(sources)):\n if source == sources[index]:\n percent[index] += 1\n pass\n\npercent /= 100\n\n# Pie chart:\npie_chart = pd.Series(percent, index=sources, name='Sources')\npie_chart.plot.pie(fontsize=11, autopct='%.2f', figsize=(6, 6));", "_____no_output_____" ], [ "from textblob import TextBlob\nimport re\n\ndef clean_tweet(tweet):\n '''\n Utility function to clean the text in a tweet by removing \n links and special characters using regex.\n '''\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n\ndef analize_sentiment(tweet):\n '''\n Utility function to classify the polarity of a tweet\n using textblob.\n '''\n analysis = TextBlob(clean_tweet(tweet))\n if analysis.sentiment.polarity > 0:\n return 1\n elif analysis.sentiment.polarity == 0:\n return 0\n else:\n return -1", "_____no_output_____" ], [ "# We create a column with the result of the analysis:\ndata['SA'] = np.array([ analize_sentiment(tweet) for tweet in data['Tweets'] ])\n\n# We display the updated dataframe with the new column:\ndisplay(data.head(10))", "_____no_output_____" ], [ "# We construct lists with classified tweets:\n\npos_tweets = [ tweet for index, tweet in enumerate(data['Tweets']) if data['SA'][index] > 0]\nneu_tweets = [ tweet for index, tweet in enumerate(data['Tweets']) if data['SA'][index] == 0]\nneg_tweets = [ tweet for index, tweet in enumerate(data['Tweets']) if data['SA'][index] < 0]", "_____no_output_____" ], [ "# We print percentages:\n\nprint(\"Percentage of positive tweets: {}%\".format(len(pos_tweets)*100/len(data['Tweets'])))\nprint(\"Percentage of neutral tweets: {}%\".format(len(neu_tweets)*100/len(data['Tweets'])))\nprint(\"Percentage de negative tweets: {}%\".format(len(neg_tweets)*100/len(data['Tweets'])))", "Percentage of positive tweets: 38.0%\nPercentage of neutral tweets: 60.5%\nPercentage de negative tweets: 1.5%\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec55ef373bff4c525ec41463ac3de4ed5763b82d
824,385
ipynb
Jupyter Notebook
cremi/Collection_of_naive_cleft_analysis.ipynb
SiyanZHOU1997/synapse-prediction-on-EM-image-
dfcab951ee91646e59a263813d5e0781e7f3eb8d
[ "MIT" ]
null
null
null
cremi/Collection_of_naive_cleft_analysis.ipynb
SiyanZHOU1997/synapse-prediction-on-EM-image-
dfcab951ee91646e59a263813d5e0781e7f3eb8d
[ "MIT" ]
null
null
null
cremi/Collection_of_naive_cleft_analysis.ipynb
SiyanZHOU1997/synapse-prediction-on-EM-image-
dfcab951ee91646e59a263813d5e0781e7f3eb8d
[ "MIT" ]
null
null
null
284.761658
97,796
0.914282
[ [ [ "from sklearn.cluster import KMeans\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport scipy\nfrom PIL import Image", "_____no_output_____" ] ], [ [ "This is the very beginning of my attempt to do synaptic cleft analysis. At that time I was so naive and unfamiliar with python and pythonic module, so many code and functions here might be really wierd. However, there WERE indeed some good try, like the minimum bounding box and 2D skeleton. Here also include SIFT, 3D skeleton and plot smoothing, I didn't delete them in case these naive exploration might be needed one day.", "_____no_output_____" ], [ "# (1) import clefts", "_____no_output_____" ] ], [ [ "cd /home/siyan/projects/cluster", "/home/siyan/projects/cluster\n" ], [ "with h5py.File('data/cremi/gt-syn/syn_A_v2_200.h5','r') as f:\n cleftsA = f['main'][:]", "_____no_output_____" ], [ "cleftsA.shape", "_____no_output_____" ], [ "fig,ax = plt.subplots(2,4,figsize = (40,20))\nfor i in range(2):\n for j in range(4):\n ax[i,j].imshow(cleftsA[0+4*i+j])", "_____no_output_____" ], [ "np.unique(cleftsA,return_counts=True)", "_____no_output_____" ] ], [ [ "# (2) connected region detection", "_____no_output_____" ] ], [ [ "from skimage.measure import label", "_____no_output_____" ], [ "lb = label(input=cleftsA, background=0, connectivity=2, return_num=True)", "_____no_output_____" ], [ "lb", "_____no_output_____" ], [ "l = []\nfor i in range(153):\n for j in range(1842):\n for k in range(1727):\n if lb[0][i,j,k] == 1:\n l.append([i,j,k])", "_____no_output_____" ], [ "len(l)", "_____no_output_____" ], [ "l", "_____no_output_____" ], [ "n = np.array(l)\nn = n.T\nxMAX = max(n[1,:])\nxMIN = min(n[1,:])\nyMAX = max(n[2,:])\nyMIN = min(n[2,:])\nzMAX = max(n[0,:])\nzMIN = min(n[0,:])", "_____no_output_____" ], [ "xMIN,yMIN", "_____no_output_____" ], [ "n.T", "_____no_output_____" ], [ "s = lb[0][zMIN:zMAX+1,xMIN:xMAX+1,yMIN:yMAX+1]", "_____no_output_____" ], [ "s", "_____no_output_____" ], [ "Axes3D.scatter(xs=n[1,:],ys=n[2,:],zs=n[0,:])", "_____no_output_____" ], [ "fig,ax = plt.subplots(1,4,figsize = (20,20))\nfor i in range(4):\n ax[i].imshow(s[i,:,:])\n", "_____no_output_____" ], [ "fig,ax = plt.subplots(1,2,figsize = (20,20))\nax[0].imshow(lb[0][zMIN,xMIN:xMAX+1,yMIN:yMAX+1])\nax[1].imshow(lb[0][zMAX,xMIN:xMAX+1,yMIN:yMAX+1])\n", "_____no_output_____" ], [ "scipy.misc.imsave('0.png', lb[0][14,xMIN:xMAX,yMIN:yMAX])", "/home/siyan/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:1: DeprecationWarning: `imsave` is deprecated!\n`imsave` is deprecated in SciPy 1.0.0, and will be removed in 1.2.0.\nUse ``imageio.imwrite`` instead.\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "scipy.misc.imsave('1.png', lb[0][14,:,:])", "/home/siyan/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:1: DeprecationWarning: `imsave` is deprecated!\n`imsave` is deprecated in SciPy 1.0.0, and will be removed in 1.2.0.\nUse ``imageio.imwrite`` instead.\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "i = Image.open('/home/siyan/projects/cluster/0.png')\nplt.imshow(i)", "_____no_output_____" ] ], [ [ "# (3) Rotation invarient feature - SIFT", "_____no_output_____" ] ], [ [ "import cv2", "_____no_output_____" ], [ "img = cv2.imread('/home/siyan/projects/cluster/0.png')", "_____no_output_____" ], [ "img2 = cv2.imread('/home/siyan/projects/cluster/1.png')", "_____no_output_____" ], [ "plt.imshow(img)", "_____no_output_____" ], [ "fig,ax = plt.subplots(1,1,figsize = (20,20))\nax.imshow(img2)", "_____no_output_____" ], [ "detector = cv2.xfeatures2d.SIFT_create()", "_____no_output_____" ], [ "detector2 = cv2.xfeatures2d.SIFT_create()", "_____no_output_____" ], [ "keypoints = detector2.detect(img,None)", "_____no_output_____" ], [ "keypoints", "_____no_output_____" ], [ "keypoints2 = detector.detect(img2,None)", "_____no_output_____" ], [ "keypoints2", "_____no_output_____" ], [ "img = cv2.drawKeypoints(img,keypoints,img)", "_____no_output_____" ], [ "img2 = cv2.drawKeypoints(img2,keypoints2,img2)", "_____no_output_____" ], [ "plt.imshow(img)", "_____no_output_____" ], [ "fig,ax = plt.subplots(1,1,figsize = (20,20))\nax.imshow(img2)", "_____no_output_____" ] ], [ [ "# (4) minimum bounding box", "_____no_output_____" ] ], [ [ "from mpl_toolkits.mplot3d import Axes3D\nfrom scipy.spatial import ConvexHull\n\n# 8 points defining the cube corners\npts = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0],\n [0, 0, 1], [1, 0, 1], [1, 1, 1], [0, 1, 1],[0.2,0.2,0.3]])\n\nhull = ConvexHull(pts)\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection=\"3d\")\n\n# Plot defining corner points\nax.plot(pts.T[0], pts.T[1], pts.T[2], \"ko\")\n\n# 12 = 2 * 6 faces are the simplices (2 simplices per square face)\nfor s in hull.simplices:\n print (s)\n s = np.append(s, s[0]) # Here we cycle back to the first coordinate\n print (s)\n ax.plot(pts[s, 0], pts[s, 1], pts[s, 2], \"r-\")\n print (pts[s, 0], pts[s, 1], pts[s, 2])\n\nplt.show()", "[3 2 1]\n[3 2 1 3]\n[0. 1. 1. 0.] [1. 1. 0. 1.] [0. 0. 0. 0.]\n[3 1 0]\n[3 1 0 3]\n[0. 1. 0. 0.] [1. 0. 0. 1.] [0. 0. 0. 0.]\n[5 1 0]\n[5 1 0 5]\n[1. 1. 0. 1.] [0. 0. 0. 0.] [1. 0. 0. 1.]\n[5 4 0]\n[5 4 0 5]\n[1. 0. 0. 1.] [0. 0. 0. 0.] [1. 1. 0. 1.]\n[7 3 0]\n[7 3 0 7]\n[0. 0. 0. 0.] [1. 1. 0. 1.] [1. 0. 0. 1.]\n[7 4 0]\n[7 4 0 7]\n[0. 0. 0. 0.] [1. 0. 0. 1.] [1. 1. 0. 1.]\n[6 2 1]\n[6 2 1 6]\n[1. 1. 1. 1.] [1. 1. 0. 1.] [1. 0. 0. 1.]\n[6 5 1]\n[6 5 1 6]\n[1. 1. 1. 1.] [1. 0. 0. 1.] [1. 1. 0. 1.]\n[6 3 2]\n[6 3 2 6]\n[1. 0. 1. 1.] [1. 1. 1. 1.] [1. 0. 0. 1.]\n[6 7 3]\n[6 7 3 6]\n[1. 0. 0. 1.] [1. 1. 1. 1.] [1. 1. 0. 1.]\n[6 5 4]\n[6 5 4 6]\n[1. 1. 0. 1.] [1. 0. 0. 1.] [1. 1. 1. 1.]\n[6 7 4]\n[6 7 4 6]\n[1. 0. 0. 1.] [1. 1. 0. 1.] [1. 1. 1. 1.]\n" ], [ "l3D = []", "_____no_output_____" ], [ "for i in l:\n for j in range(10):\n l3D.append([i[1],i[2],(i[0]-zMIN)*10+j])", "_____no_output_____" ], [ "np.array(l3D).shape", "_____no_output_____" ], [ "# 8 points defining the cube corners\npts = np.array(l3D)\nedges = []\n\nhull = ConvexHull(pts)\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection=\"3d\")\n\n# Plot defining corner points\nax.plot(pts.T[0], pts.T[1], pts.T[2], \"ko\")\n\n# 12 = 2 * 6 faces are the simplices (2 simplices per square face)\nfor s in hull.simplices:\n if ((s[0],s[1]) not in edges) and ((s[1],s[0]) not in edges):\n edges.append((s[0],s[1]))\n if ((s[1],s[2]) not in edges) and ((s[2],s[1]) not in edges):\n edges.append((s[1],s[2]))\n if ((s[0],s[2]) not in edges) and ((s[2],s[0]) not in edges):\n edges.append((s[0],s[2]))\n s = np.append(s, s[0]) # Here we cycle back to the first coordinate\n ax.plot(pts[s, 0], pts[s, 1], pts[s, 2], \"r-\")\n \nplt.show()", "_____no_output_____" ], [ "pts.shape", "_____no_output_____" ] ], [ [ "minimum bounding box", "_____no_output_____" ] ], [ [ "### this function is writen by nathanieltroutman. URL = nathanieltroutman.net/?q=content/calculating-minimum-volume-bounding-box\n### many thanks to this implementation!\nimport sys\nimport numpy as np\n\ndef findOBBEdge(edges, points):\n # shift the points such that the minimum x, y, z values\n # in the entire set of points is 0.\n shift = points.min(axis=0)\n points = points - shift\n \n min_volume = sys.maxsize\n \n # try every pair of edges (ordering is not important)\n for idx, edge1_idx in enumerate(edges): \n e1 = points[edge1_idx[0]] - points[edge1_idx[1]]\n for idx2, edge2_index in enumerate(edges[(idx+1):]): \n e2 = points[edge2_index[0]] - points[edge2_index[1]]\n \n # transform the two edges into a orthogonal basis\n w = vec_cross(e1, e2) # returns normalized vector\n u = vec_cross(w, e1)\n v = vec_cross(u, w)\n \n # project all the points on to the basis u1, u2 u3\n p = calcProjections(points, u, v, w)\n\n volume, mins, maxes = calcVolume(p)\n \n # we are looking for the minimum volume box\n if volume <= min_volume:\n min_volume = volume\n specs = u, v, w, mins, maxes, volume\n \n u, v, w, mins, maxes, volume = specs\n \n # get the corner by using our projections, then shift it to move\n # it back into the same origin as the original set of points\n corner = u * mins[0] + v * mins[1] + w * mins[2]\n corner += shift\n \n # create the sides which are vectors with the magnitude the length\n # of that side\n v1 = u * (maxes[0] - mins[0])\n v2 = v * (maxes[1] - mins[1])\n v3 = w * (maxes[2] - mins[2])\n \n return corner, v1, v2, v3\n \ndef calcVolume(p):\n \"\"\"Calculates the volume of the box that would encompass the given\n points using the given projection. projection is sized (NxM) where\n N is the number of points and M is the number of vectors they were\n projected onto. Also return the minimum and maximum bounds along\n each of those vectors.\"\"\"\n \n # the minimum and maximum projection of each basis vector\n mins = p.min(axis=0)\n maxes = p.max(axis=0)\n \n # the volume product of each difference between the maximum and\n # minimum values from the projection onto each basis vector\n volume = np.prod(maxes - mins)\n \n return volume, mins, maxes \n\ndef calcProjections(points, *vectors):\n \"\"\"Calculates the projection of points (NxD) onto the vectors \n (MxD) and return the projections p which is a matrix sized (N, M) \n where N is the number of points and M is the number of vectors.\n p[i][j], is the projection of points[i] onto vectors[j] (which is\n between 0 and 1).\"\"\"\n \n u = np.array(vectors)\n \n # project the points onto the vectors into on fell swoop\n d = np.dot(points, u.T)\n \n # this is the dot product of each vector with itself\n v2 = np.diag(np.inner(u, u))\n \n p = d / v2\n \n return p\n \ndef vec_cross(u, v):\n \"\"\"Return the normalized cross product of u and v.\"\"\"\n w = np.cross(u, v)\n w = w/float(np.sqrt(np.sum(v**2)))\n return w\n\nmbb = findOBBEdge(edges,pts)", "/home/siyan/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:87: RuntimeWarning: invalid value encountered in true_divide\n/home/siyan/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:80: RuntimeWarning: invalid value encountered in true_divide\n" ], [ "mbb", "_____no_output_____" ], [ "u = (mbb[1][0]**2 + mbb[1][1]**2+mbb[1][2]**2)**0.5\nv = (mbb[2][0]**2 + mbb[2][1]**2+mbb[2][2]**2)**0.5\nw = (mbb[3][0]**2 + mbb[3][1]**2+mbb[3][2]**2)**0.5", "_____no_output_____" ], [ "u,v,w", "_____no_output_____" ], [ "p000 = mbb[0]\np100 = p000 + mbb[1]\np110 = p100 + mbb[2]\np101 = p100 + mbb[3]\np111 = p110 + mbb[3]\np010 = p000 + mbb[2]\np011 = p010 + mbb[3]\np001 = p000 + mbb[3]\nP = np.array([p000,p100,p110,p101,p111,p010,p011,p001])", "_____no_output_____" ], [ "P", "_____no_output_____" ], [ "fig = plt.figure()\nax1 = fig.add_subplot(111, projection=\"3d\")\n\n# Plot defining corner points\nax1.plot(pts.T[0], pts.T[1], pts.T[2], \"ko\")\nax1.plot(P.T[0], P.T[1], P.T[2], \"ro\")\n\n# draw surface\n\n# draw edge\n#ax1.plot(P.T[[1,2],0],P.T[[1,2,3,1],1],P.T[[1,2,3,1],2],'r-')", "_____no_output_____" ], [ "fig = plt.figure()\nax1 = fig.add_subplot(111, projection=\"3d\")\n\n# Plot defining corner points\nax1.plot(pts.T[0], pts.T[1], pts.T[2], \"ko\")\nax1.plot(P.T[0], P.T[1], P.T[2], \"ro\")\nax1.view_init(elev=65., azim=8)\n\n# draw surface\n\n# draw edge\n#ax1.plot(P.T[[1,2],0],P.T[[1,2,3,1],1],P.T[[1,2,3,1],2],'r-')", "_____no_output_____" ], [ "fig = plt.figure()\nax1 = fig.add_subplot(111, projection=\"3d\")\n\n# Plot defining corner points\nax1.plot(pts.T[0], pts.T[1], pts.T[2], \"ko\")\nax1.plot(P.T[0], P.T[1], P.T[2], \"ro\")\n\nax1.view_init(elev=30, azim=60)\n# draw surface\n\n# draw edge\n#ax1.plot(P.T[[1,2],0],P.T[[1,2,3,1],1],P.T[[1,2,3,1],2],'r-')", "_____no_output_____" ] ], [ [ "# (5) 2D skeleton", "_____no_output_____" ] ], [ [ "from skimage import morphology,draw\nimport cv2", "_____no_output_____" ], [ "def point(image):\n img = np.uint8(image)\n contours = cv2.findContours(img.copy(),1,2)\n M= cv2.moments(contours[0]) #求矩\n cx = int(M['m10']/M['m00']) # 求x坐标\n cy = int(M['m01']/M['m00']) # 求y坐标\n return cx,cy\n\ndef adjust(skeleton,a,b):\n ske = np.uint8(skeleton)\n H = np.float32([[1,0,x/2 - a+15],[0,1,y/2 - b-13]])\n s = ske.shape \n res = cv2.warpAffine(ske,H,(s[1],s[0])) #需要图像、变换矩阵、变换后的大小\n return res\n\nx, y = lb[0][14,xMIN:xMAX+1,yMIN:yMAX+1].shape\nsALL = np.zeros([x,y])\nsAdjust = np.zeros([x,y])\nfor i in range(4):\n image = lb[0][14 + i,xMIN:xMAX+1,yMIN:yMAX+1]\n skeleton =morphology.skeletonize(image)\n a, b = point(image)\n sad = adjust(skeleton,a,b)\n sALL = sALL + skeleton\n sAdjust = sAdjust + sad\n\n fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3))\n\n ax1.imshow(image, cmap=plt.cm.gray)\n ax1.axis('off')\n ax1.set_title('original', fontsize=20)\n\n ax2.imshow(skeleton, cmap=plt.cm.gray)\n ax2.axis('off')\n ax2.set_title('skeleton', fontsize=20)\n \n ax3.imshow(sad, cmap=plt.cm.gray)\n ax3.axis('off')\n ax3.set_title('skeleton adjust', fontsize=20)\n\n plt.show()\n\nplt.imshow(sAdjust)\n\n", "_____no_output_____" ] ], [ [ "projection to surfaces of the minimum bounding box", "_____no_output_____" ] ], [ [ "def augment(a):\n \"\"\"Add a final column of ones to input data\"\"\"\n arr = np.ones((a.shape[0],a.shape[1]+1))\n arr[:,:-1] = a\n return arr\n\nA = np.array([p000,p100,p010,p001])\nB = np.array([[0,0,0],[u,0,0],[0,v,0],[0,0,w]])\nT = np.linalg.inv(augment(A)).dot(B)\n\nfor i in range(4):\n sALL_z = []\n image = lb[0][14+i,xMIN:xMAX+1,yMIN:yMAX+1]\n skeleton =morphology.skeletonize(image)\n for j in range(skeleton.shape[0]):\n for k in range(skeleton.shape[1]):\n if skeleton[j][k] == True:\n sALL_z.append([j+xMIN,k+yMIN,10*i])\n S = augment(np.array(sALL_z))\n M= np.array(sALL_z)\n\n S_ = S.dot(T)\n\n fig, ax = plt.subplots(1, 4, figsize=(40, 10))\n ax[0].plot(S_.T[0], S_.T[1], \"ko\")\n ax[1].plot(S_.T[1], S_.T[2], \"ko\")\n ax[2].plot(S_.T[0], S_.T[2], \"ko\")\n ax[3].plot(M.T[0],M.T[1],\"ko\")", "_____no_output_____" ] ], [ [ "# (6) 3D skeleton", "_____no_output_____" ] ], [ [ "x, y = lb[0][14,xMIN:xMAX+1,yMIN:yMAX+1].shape\nsALL = np.zeros([x,y])\n\nimage = lb[0][14:18,xMIN:xMAX+1,yMIN:yMAX+1]\nM = np.empty((image.shape[0]*10,image.shape[1],image.shape[2]))\nfor i in range(4):\n for j in range(10):\n M[10*i+j,:,:] = image[i,:,:]\n\nskeleton =morphology.skeletonize_3d(M)\nprint (skeleton.shape)\ns = []\nfor i in range(skeleton.shape[0]):\n for j in range(skeleton.shape[1]):\n for k in range(skeleton.shape[2]):\n if skeleton[i,j,k] != 0:\n s.append([i,j,k])\n \ns= np.array(s)\n\nfigs = plt.figure()\naxs = figs.add_subplot(111, projection=\"3d\")\n\naxs.plot(s.T[1], s.T[2], s.T[0], \"ko\")\naxs.view_init(elev=90, azim=0)", "/home/siyan/anaconda3/lib/python3.6/site-packages/skimage/util/dtype.py:122: UserWarning: Possible precision loss when converting from float64 to uint8\n .format(dtypeobj_in, dtypeobj_out))\n" ] ], [ [ "Conclusion: need smooth before 3d skeleton", "_____no_output_____" ], [ "# (7) 3D surface smooth", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom scipy import interpolate\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d, Axes3D\n\nX, Y = np.mgrid[-1:1:20j, -1:1:20j]\nZ = (X+Y) * np.exp(-6.0*(X*X+Y*Y)) + np.random.rand(X.shape[0])\n\nxnew, ynew = np.mgrid[-1:1:80j, -1:1:80j]\ntck = interpolate.bisplrep(X, Y, Z, s=10)\nznew = interpolate.bisplev(xnew[:,0], ynew[0,:], tck)\n\nfig = plt.figure(figsize=(12,12))\nax = fig.gca(projection='3d')\nax.plot_surface(X, Y, Z, cmap='summer', rstride=1, cstride=1, alpha=None)\nax.view_init(elev=0, azim=0)\nplt.show()\n\nfig = plt.figure(figsize=(12,12))\nax = fig.gca(projection='3d')\nax.plot_surface(xnew, ynew, znew, cmap='summer', rstride=1, cstride=1, alpha=None, antialiased=True)\nax.view_init(elev=0, azim=0)\nplt.show()", "_____no_output_____" ], [ "tck = interpolate.bisplrep(pts.T[0], pts.T[1], pts.T[2], s=10)\nznew = interpolate.bisplev(pts.T[0], pts.T[1], tck)", "/home/siyan/anaconda3/lib/python3.6/site-packages/scipy/interpolate/_fitpack_impl.py:976: RuntimeWarning: A theoretically impossible result when finding a smoothing spline\nwith fp = s. Probable causes: s too small or badly chosen eps.\n(abs(fp-s)/s>0.001)\n\tkx,ky=3,3 nx,ny=23,23 m=13300 fp=40510095570.785385 s=10.000000\n warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess))\n" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d\nimport pylab as p\nimport mpl_toolkits.mplot3d.axes3d as p3\n\nk,b=np.mgrid[1:3:3j,4:6:3j]\nf_kb=3*k**2+2*b+1\n\n#k.shape=-1,1\n#b.shape=-1,1\n#f_kb.shape=-1,1 #统统转成9行1列\n\n\nfig = plt.figure(figsize=(12,12))\nax = fig.gca(projection='3d')\nax.plot_surface(k,b,f_kb, cmap='summer', rstride=1, cstride=1, alpha=None)\nax.set_xlabel('k')\nax.set_ylabel('b')\nax.set_zlabel('ErrorArray')\nax.view_init(elev=0, azim=0)\nfig.show()", "/home/siyan/anaconda3/lib/python3.6/site-packages/matplotlib/figure.py:459: UserWarning: matplotlib is currently using a non-GUI backend, so cannot show the figure\n \"matplotlib is currently using a non-GUI backend, \"\n" ], [ "import numpy as np \n\npoint_grid =np.array([[0.0,0.0,0.0],[0.4,0.4,0.4],[0.8,0.8,0.8],[1.0,1.0,1.0]])#网格点坐标\n\ndef func(x, y, z):\n return x*(1-x)*np.cos(4*np.pi*x) * (np.sin(4*np.pi*y**2)**2)*z\n\npoints = np.random.rand(10, 3)#实际点坐标\nvalues = func(points[:,0], points[:,1],points[:,2])#实际点的值\n\nfrom scipy.interpolate import griddata\ngrid_z0 = griddata(points, values, point_grid, method='nearest')#插值计算,计算出网格点的值\n\nvalues\n#fig = plt.figure()\n#ax1 = fig.add_subplot(111, projection=\"3d\")\n#ax1.plot(pts.T[0], pts.T[1], pts.T[2], \"ko\")", "_____no_output_____" ], [ "grid_z0", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ec55fc906c253abeae019e2acd51a1500afe1dac
175,602
ipynb
Jupyter Notebook
notebooks/metrics.ipynb
antonkulaga/hello-molecular-dynamics
7b27490c76c471aa922fa9e5698a4ea0c8bb4bff
[ "MIT" ]
null
null
null
notebooks/metrics.ipynb
antonkulaga/hello-molecular-dynamics
7b27490c76c471aa922fa9e5698a4ea0c8bb4bff
[ "MIT" ]
null
null
null
notebooks/metrics.ipynb
antonkulaga/hello-molecular-dynamics
7b27490c76c471aa922fa9e5698a4ea0c8bb4bff
[ "MIT" ]
null
null
null
41.327842
62,032
0.549145
[ [ [ "Measuring RMSD and other metrics\n================================", "_____no_output_____" ] ], [ [ "from pathlib import Path\nimport sys\nfrom sys import stdout\nimport inspect\nfrom typing import Callable\n\ndebug_local = True#False\nlocal = Path(\"..\").resolve()\ncode = local / \"mm\"\ndata = local / \"data\"\ninputs = data / \"input\"\noutput = data / \"output\"\n\nif debug_local and code.exists():\n sys.path.insert(0, code.as_posix())\n print(\"extending pathes with local yspecies\")\n print(sys.path)\n %load_ext autoreload\n %autoreload 2", "extending pathes with local yspecies\n['/data/sources/antibody-mm/mm', '/data/sources/antibody-mm/mm', '/data/sources/antibody-mm/mm', '/data/sources/antibody-mm/notebooks', '/home/antonkulaga/micromamba/envs/antibody-mm/lib/python39.zip', '/home/antonkulaga/micromamba/envs/antibody-mm/lib/python3.9', '/home/antonkulaga/micromamba/envs/antibody-mm/lib/python3.9/lib-dynload', '', '/home/antonkulaga/micromamba/envs/antibody-mm/lib/python3.9/site-packages', '/home/antonkulaga/micromamba/envs/antibody-mm/lib/python3.9/site-packages/IPython/extensions', '/home/antonkulaga/.ipython']\nThe autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ], [ "from pycomfort.files import *", "_____no_output_____" ], [ "from prody import *\nfrom pylab import *\nion()", "_____no_output_____" ] ], [ [ "Measuring deepab accuracy\n=========================", "_____no_output_____" ] ], [ [ "def printMatch(match):\n for i in match:\n print(f'Chain {i} : {match[i]}')\n print(f'Length : {len(match[0])}')\n print(f'Seq identity: {match[2]}')\n print(f'Seq overlap : {match[3]}')\n print(f'RMSD : {calcRMSD(match[0], match[1])}\\n')", "_____no_output_____" ], [ "inputs", "_____no_output_____" ], [ "ra_real = parsePDB(str(inputs / \"RA\" /\"1adq_fixed_RF_short.pdb\"))", "@> 3481 atoms and 1 coordinate set(s) were parsed in 0.04s.\n" ], [ "ra_predicted = parsePDB(str(inputs / \"RA\" / \"1ADQ_FV_deep.pdb\" ))", "@> 3481 atoms and 1 coordinate set(s) were parsed in 0.04s.\n" ], [ "showProtein(ra_real, ra_predicted)\nlegend()", "_____no_output_____" ], [ "#matches = matchChains(ra_real, ra_predicted)\n#for match in matches:\n# printMatch(match)", "_____no_output_____" ] ], [ [ "Benchmark evaluation\n====================", "_____no_output_____" ] ], [ [ "benchmark_path = inputs / \"antibodies_benchmark\"\nstructures_path = benchmark_path / \"structures\"", "_____no_output_____" ], [ "#tprint(structures_path)", "_____no_output_____" ], [ "import polars as pl\ndf = pl.read_csv(benchmark_path / \"antibodies.tsv\", sep=\"\\t\")", "_____no_output_____" ] ], [ [ "### Extracting antibodies", "_____no_output_____" ] ], [ [ "antibody_col = pl.col(\"Protein 1\")\nantibodies = df.filter(pl.col(\"Cat.\").str.contains(\"AA\"))\nantibodies", "_____no_output_____" ], [ "def path_from_complex(s: str, antibody: bool = True, bound: bool = True) -> Path:\n return structures_path / (s[0:s.index(\"_\")] + f\"_{'r' if antibody else 'l'}\" f\"_{'b' if bound else 'u'}\" + \".pdb\")\n\ndef antibody_bound(row) -> str:\n return str(path_from_complex(row[0], True, True))\n\ndef antibody_unbound(row) -> str:\n return str(path_from_complex(row[0], True, False))\n\ndef antigen_bound(row) -> str:\n return str(path_from_complex(row[0], False, True))\n\ndef antigen_unbound(row) -> str:\n return str(path_from_complex(row[0], False, False))", "_____no_output_____" ], [ "def with_column(df: pl.DataFrame, name: str, fun: Callable) -> pl.DataFrame:\n series: pl.Series = df.apply(fun)[\"apply\"]\n return df.with_column(series.rename(name))\n\ndef with_complex_column(df: pl.DataFrame, name: str, fun: Callable) -> pl.DataFrame:\n return with_column(df, name, lambda row: fun(row[0]))", "_____no_output_____" ], [ "def with_antigens(df: pl.DataFrame) -> pl.DataFrame:\n bound = with_column(df, \"antigen_bound\", antigen_bound)\n return with_column(bound, \"antigen_unbound\", antigen_unbound)\n\ndef with_antibodies(df: pl.DataFrame) -> pl.DataFrame:\n with_chains = with_complex_column(df, \"chains\", lambda s: s[s.index(\"_\")+1:s.index(\":\")])\n with_bound = with_column(with_chains, \"antibody_bound\", antibody_bound)\n return with_column(with_bound, \"antibody_unbound\", antibody_unbound)", "_____no_output_____" ], [ "antibodies = with_antigens(with_antibodies( df.filter(pl.col(\"Cat.\").str.contains(\"AA\"))))\nantibodies", "_____no_output_____" ], [ "antibodies_HL = antibodies_extended[antibodies_extended[\"chains\"] == \"HL\"]\nantibodies_HL", "_____no_output_____" ], [ "fabs = antibodies_HL.filter(antibody_col.str.contains(\"Fab\"))\nfabs", "_____no_output_____" ], [ "fvs = antibodies.filter(antibody_col.str.contains(\"Fv\"))\nfvs", "_____no_output_____" ], [ "import shutil\noutput = Path(\"/data/samples/docking/benchmarks/selected\")\nfor r in bound_receptors:\n shutil.copy(r, output / r.name)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec560f968f34426fd5cbc296da3be68754e4788d
5,042
ipynb
Jupyter Notebook
12_re/02_re.ipynb
trhgu/TIL_python
08d9799172cf9e569155383f0d30ef3b5b102e1b
[ "MIT" ]
null
null
null
12_re/02_re.ipynb
trhgu/TIL_python
08d9799172cf9e569155383f0d30ef3b5b102e1b
[ "MIT" ]
null
null
null
12_re/02_re.ipynb
trhgu/TIL_python
08d9799172cf9e569155383f0d30ef3b5b102e1b
[ "MIT" ]
null
null
null
19.542636
104
0.482745
[ [ [ "import re", "_____no_output_____" ] ], [ [ "##### match\n- search 와 유사하나, 주어진 문자열의 시작부터 비교하여 패턴이 있는지 확인\n- 시작부터 해당 패턴이 존재하지 않다면 None 반환", "_____no_output_____" ] ], [ [ "re.match(r'\\d\\d\\d', 'my number is 123')", "_____no_output_____" ], [ "re.match(r'\\d\\d\\d', '123 is my number')", "_____no_output_____" ], [ "re.search(r'^\\d\\d\\d', '123 is my number')", "_____no_output_____" ] ], [ [ "##### findall\n- search가 최초로 매칭되는 패턴만 반환한다면, findall은 매칭되는 저체의 패턴을 반환\n- 매칭되는 모든 결과를 리스트 형태로 반환", "_____no_output_____" ] ], [ [ "re.findall(r'[\\w-]+@[\\w.]+','[email protected] haha [email protected] nice test test')", "_____no_output_____" ] ], [ [ "##### sub\n- 주어진 문자열에서 일치하는 모든 패턴을 replace\n- 그 결과를 문자열로 다시 반환함\n- 두번째 인자는 특정 문자열이 될 수도 있고, 함수가 될 수 도 있음\n- count가 0인 경우는 전체를, 1 이상이면 해당 숫자만큼 치환됨", "_____no_output_____" ] ], [ [ "re.sub(r'[\\w-]+@[\\w.]+', 'great', '[email protected] haha [email protected] nice test test')", "_____no_output_____" ], [ "re.sub(r'[\\w-]+@[\\w.]+', 'great', '[email protected] haha [email protected] nice test test', count=1)", "_____no_output_____" ] ], [ [ "##### compile\n- 동일한 정규표현식을 매번 다시 쓰기 번거로움을 해결\n- compile로 해당표현식을 re.RegaxObject객체로 저장하여 사용가능", "_____no_output_____" ] ], [ [ "email_reg = re.compile(r'[\\w-]+@[\\w.]+')", "_____no_output_____" ], [ "email_reg.search('[email protected] haha [email protected] nice test test')", "_____no_output_____" ], [ "email_reg.findall('[email protected] haha [email protected] nice test test')", "_____no_output_____" ], [ "email_reg.('great', '[email protected] haha [email protected] nice test test')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec56154c1dd8398ab2ede0118f1d28540bbfe7fc
19,172
ipynb
Jupyter Notebook
nbs/33_text.models.core.ipynb
Peshlex/fastai
07c481487e459aac97342bc83f6219fce3c5682c
[ "Apache-2.0" ]
1
2020-03-04T04:56:26.000Z
2020-03-04T04:56:26.000Z
nbs/33_text.models.core.ipynb
Peshlex/fastai
07c481487e459aac97342bc83f6219fce3c5682c
[ "Apache-2.0" ]
null
null
null
nbs/33_text.models.core.ipynb
Peshlex/fastai
07c481487e459aac97342bc83f6219fce3c5682c
[ "Apache-2.0" ]
null
null
null
33.517483
247
0.557897
[ [ [ "#export\nfrom fastai2.data.all import *\nfrom fastai2.text.core import *\nfrom fastai2.text.models.awdlstm import *", "_____no_output_____" ], [ "from nbdev.showdoc import *", "_____no_output_____" ], [ "#default_exp text.models.core\n#default_cls_lvl 3", "_____no_output_____" ] ], [ [ "# Core text modules\n\n> Contain the modules common between different architectures and the generic functions to get models", "_____no_output_____" ] ], [ [ "#export \n_model_meta = {AWD_LSTM: {'hid_name':'emb_sz', 'url':URLs.WT103_FWD, 'url_bwd':URLs.WT103_BWD,\n 'config_lm':awd_lstm_lm_config, 'split_lm': awd_lstm_lm_split,\n 'config_clas':awd_lstm_clas_config, 'split_clas': awd_lstm_clas_split},\n AWD_QRNN: {'hid_name':'emb_sz',\n 'config_lm':awd_qrnn_lm_config, 'split_lm': awd_lstm_lm_split,\n 'config_clas':awd_qrnn_clas_config, 'split_clas': awd_lstm_clas_split},}\n # Transformer: {'hid_name':'d_model', 'url':URLs.OPENAI_TRANSFORMER,\n # 'config_lm':tfmer_lm_config, 'split_lm': tfmer_lm_split,\n # 'config_clas':tfmer_clas_config, 'split_clas': tfmer_clas_split},\n # TransformerXL: {'hid_name':'d_model',\n # 'config_lm':tfmerXL_lm_config, 'split_lm': tfmerXL_lm_split,\n # 'config_clas':tfmerXL_clas_config, 'split_clas': tfmerXL_clas_split}}", "_____no_output_____" ] ], [ [ "## Language models", "_____no_output_____" ] ], [ [ "#export\nclass LinearDecoder(Module):\n \"To go on top of a RNNCore module and create a Language Model.\"\n initrange=0.1\n\n def __init__(self, n_out, n_hid, output_p=0.1, tie_encoder=None, bias=True):\n self.decoder = nn.Linear(n_hid, n_out, bias=bias)\n self.decoder.weight.data.uniform_(-self.initrange, self.initrange)\n self.output_dp = RNNDropout(output_p)\n if bias: self.decoder.bias.data.zero_()\n if tie_encoder: self.decoder.weight = tie_encoder.weight\n\n def forward(self, input):\n dp_inp = self.output_dp(input)\n return self.decoder(dp_inp), input, dp_inp", "_____no_output_____" ], [ "from fastai2.text.models.awdlstm import *\nenc = AWD_LSTM(100, 20, 10, 2)\nx = torch.randint(0, 100, (10,5))\nr = enc(x)\n\ntst = LinearDecoder(100, 20, 0.1)\ny = tst(r)\ntest_eq(y[1], r)\ntest_eq(y[2].shape, r.shape)\ntest_eq(y[0].shape, [10, 5, 100])\n\ntst = LinearDecoder(100, 20, 0.1, tie_encoder=enc.encoder)\ntest_eq(tst.decoder.weight, enc.encoder.weight)", "_____no_output_____" ], [ "#export\nclass SequentialRNN(nn.Sequential):\n \"A sequential module that passes the reset call to its children.\"\n def reset(self):\n for c in self.children(): getattr(c, 'reset', noop)()", "_____no_output_____" ], [ "class _TstMod(Module):\n def reset(self): print('reset')\n\ntst = SequentialRNN(_TstMod(), _TstMod())\ntest_stdout(tst.reset, 'reset\\nreset')", "_____no_output_____" ], [ "#export\ndef get_language_model(arch, vocab_sz, config=None, drop_mult=1.):\n \"Create a language model from `arch` and its `config`.\"\n meta = _model_meta[arch]\n config = ifnone(config, meta['config_lm']).copy()\n for k in config.keys():\n if k.endswith('_p'): config[k] *= drop_mult\n tie_weights,output_p,out_bias = map(config.pop, ['tie_weights', 'output_p', 'out_bias'])\n init = config.pop('init') if 'init' in config else None\n encoder = arch(vocab_sz, **config)\n enc = encoder.encoder if tie_weights else None\n decoder = LinearDecoder(vocab_sz, config[meta['hid_name']], output_p, tie_encoder=enc, bias=out_bias)\n model = SequentialRNN(encoder, decoder)\n return model if init is None else model.apply(init)", "_____no_output_____" ] ], [ [ "The default `config` used can be found in `_model_meta[arch]['config_lm']`. `drop_mult` is applied to all the probabilities of dropout in that config.", "_____no_output_____" ] ], [ [ "config = awd_lstm_lm_config.copy()\nconfig.update({'n_hid':10, 'emb_sz':20})\n\ntst = get_language_model(AWD_LSTM, 100, config=config)\nx = torch.randint(0, 100, (10,5))\ny = tst(x)\ntest_eq(y[0].shape, [10, 5, 100])\ntest_eq(y[1].shape, [10, 5, 20])\ntest_eq(y[2].shape, [10, 5, 20])\ntest_eq(tst[1].decoder.weight, tst[0].encoder.weight)", "_____no_output_____" ], [ "#test drop_mult\ntst = get_language_model(AWD_LSTM, 100, config=config, drop_mult=0.5)\ntest_eq(tst[1].output_dp.p, config['output_p']*0.5)\nfor rnn in tst[0].rnns: test_eq(rnn.weight_p, config['weight_p']*0.5)\nfor dp in tst[0].hidden_dps: test_eq(dp.p, config['hidden_p']*0.5)\ntest_eq(tst[0].encoder_dp.embed_p, config['embed_p']*0.5)\ntest_eq(tst[0].input_dp.p, config['input_p']*0.5)", "_____no_output_____" ] ], [ [ "## Classification models", "_____no_output_____" ] ], [ [ "#export\ndef _pad_tensor(t, bs):\n if t.size(0) < bs: return torch.cat([t, t.new_zeros(bs-t.size(0), *t.shape[1:])])\n return t", "_____no_output_____" ], [ "#export\nclass SentenceEncoder(Module):\n \"Create an encoder over `module` that can process a full sentence.\"\n def __init__(self, bptt, module, pad_idx=1, max_len=None): store_attr(self, 'bptt,module,pad_idx,max_len')\n def reset(self): getattr(self.module, 'reset', noop)()\n\n def forward(self, input):\n bs,sl = input.size()\n self.reset()\n mask = input == self.pad_idx\n outs,masks = [],[]\n for i in range(0, sl, self.bptt):\n #Note: this expects that sequence really begins on a round multiple of bptt\n real_bs = (input[:,i] != self.pad_idx).long().sum()\n o = self.module(input[:real_bs,i: min(i+self.bptt, sl)])\n if self.max_len is None or sl-i <= self.max_len:\n outs.append(o)\n masks.append(mask[:,i: min(i+self.bptt, sl)])\n outs = torch.cat([_pad_tensor(o, bs) for o in outs], dim=1)\n mask = torch.cat(masks, dim=1)\n return outs,mask", "_____no_output_____" ] ], [ [ "> Warning: This module expects the inputs padded with most of the padding first, with the sequence beginning at a round multiple of `bptt` (and the rest of the padding at the end). Use `pad_input_chunk` to get your data in a suitable format.", "_____no_output_____" ] ], [ [ "mod = nn.Embedding(5, 10)\ntst = SentenceEncoder(5, mod, pad_idx=0)\nx = torch.randint(1, 5, (3, 15))\nx[2,:5]=0\nout,mask = tst(x)\n\ntest_eq(out[:1], mod(x)[:1])\ntest_eq(out[2,5:], mod(x)[2,5:])\ntest_eq(mask, x==0)", "_____no_output_____" ], [ "#export\ndef masked_concat_pool(output, mask, bptt):\n \"Pool `MultiBatchEncoder` outputs into one vector [last_hidden, max_pool, avg_pool]\"\n lens = output.shape[1] - mask.long().sum(dim=1)\n last_lens = mask[:,-bptt:].long().sum(dim=1)\n avg_pool = output.masked_fill(mask[:, :, None], 0).sum(dim=1)\n avg_pool.div_(lens.type(avg_pool.dtype)[:,None])\n max_pool = output.masked_fill(mask[:,:,None], -float('inf')).max(dim=1)[0]\n x = torch.cat([output[torch.arange(0, output.size(0)),-last_lens-1], max_pool, avg_pool], 1) #Concat pooling.\n return x", "_____no_output_____" ], [ "out = torch.randn(2,4,5)\nmask = tensor([[True,True,False,False], [False,False,False,True]])\nx = masked_concat_pool(out, mask, 2)\n\ntest_close(x[0,:5], out[0,-1])\ntest_close(x[1,:5], out[1,-2])\ntest_close(x[0,5:10], out[0,2:].max(dim=0)[0])\ntest_close(x[1,5:10], out[1,:3].max(dim=0)[0])\ntest_close(x[0,10:], out[0,2:].mean(dim=0))\ntest_close(x[1,10:], out[1,:3].mean(dim=0))", "_____no_output_____" ], [ "#Test the result is independent of padding by replacing the padded part by some random content\nout1 = torch.randn(2,4,5)\nout1[0,2:] = out[0,2:].clone()\nout1[1,:3] = out[1,:3].clone()\nx1 = masked_concat_pool(out1, mask, 2)\ntest_eq(x, x1)", "_____no_output_____" ], [ "#export\nclass PoolingLinearClassifier(Module):\n \"Create a linear classifier with pooling\"\n def __init__(self, dims, ps, bptt, y_range=None):\n if len(ps) != len(dims)-1: raise ValueError(\"Number of layers and dropout values do not match.\")\n acts = [nn.ReLU(inplace=True)] * (len(dims) - 2) + [None]\n layers = [LinBnDrop(i, o, p=p, act=a) for i,o,p,a in zip(dims[:-1], dims[1:], ps, acts)]\n if y_range is not None: layers.append(SigmoidRange(*y_range))\n self.layers = nn.Sequential(*layers)\n self.bptt = bptt\n\n def forward(self, input):\n out,mask = input\n x = masked_concat_pool(out, mask, self.bptt)\n x = self.layers(x)\n return x, out, out", "_____no_output_____" ], [ "mod = nn.Embedding(5, 10)\ntst = SentenceEncoder(5, mod, pad_idx=0)\nx = torch.randint(1, 5, (3, 15))\nx[2,:5]=0\nout,mask = tst(x)\n\ntest_eq(out[:1], mod(x)[:1])\ntest_eq(out[2,5:], mod(x)[2,5:])\ntest_eq(mask, x==0)", "_____no_output_____" ], [ "#hide\nmod = nn.Embedding(5, 10)\ntst = nn.Sequential(SentenceEncoder(5, mod, pad_idx=0), PoolingLinearClassifier([10*3,4], [0.], 5))\n\nx = torch.randint(1, 5, (3, 14))\nx[2,:5] = 0\nres,raw,out = tst(x) \n\ntest_eq(raw[:1], mod(x)[:1])\ntest_eq(raw[2,5:], mod(x)[2,5:])\ntest_eq(out[:1], mod(x)[:1])\ntest_eq(out[2,5:], mod(x)[2,5:])\ntest_eq(res.shape, [3,4])\n\nx1 = torch.cat([x, tensor([0,0,0])[:,None]], dim=1)\nres1,raw1,out1 = tst(x1) \ntest_eq(res, res1)", "_____no_output_____" ], [ "#export\ndef get_text_classifier(arch, vocab_sz, n_class, seq_len=72, config=None, drop_mult=1., lin_ftrs=None,\n ps=None, pad_idx=1, max_len=72*20, y_range=None):\n \"Create a text classifier from `arch` and its `config`, maybe `pretrained`\"\n meta = _model_meta[arch]\n config = ifnone(config, meta['config_clas']).copy()\n for k in config.keys():\n if k.endswith('_p'): config[k] *= drop_mult\n if lin_ftrs is None: lin_ftrs = [50]\n if ps is None: ps = [0.1]*len(lin_ftrs)\n layers = [config[meta['hid_name']] * 3] + lin_ftrs + [n_class]\n ps = [config.pop('output_p')] + ps\n init = config.pop('init') if 'init' in config else None\n encoder = SentenceEncoder(seq_len, arch(vocab_sz, **config), pad_idx=pad_idx, max_len=max_len)\n model = SequentialRNN(encoder, PoolingLinearClassifier(layers, ps, bptt=seq_len, y_range=y_range))\n return model if init is None else model.apply(init)", "_____no_output_____" ], [ "config = awd_lstm_clas_config.copy()\nconfig.update({'n_hid':10, 'emb_sz':20})\n\ntst = get_text_classifier(AWD_LSTM, 100, 3, config=config)\nx = torch.randint(2, 100, (10,5))\ny = tst(x)\ntest_eq(y[0].shape, [10, 3])\ntest_eq(y[1].shape, [10, 5, 20])\ntest_eq(y[2].shape, [10, 5, 20])", "_____no_output_____" ], [ "#test padding gives same results\ntst.eval()\ny = tst(x)\nx1 = torch.cat([x, tensor([2,1,1,1,1,1,1,1,1,1])[:,None]], dim=1)\ny1 = tst(x1)\ntest_close(y[0][1:],y1[0][1:])", "_____no_output_____" ], [ "#test drop_mult\ntst = get_text_classifier(AWD_LSTM, 100, 3, config=config, drop_mult=0.5)\ntest_eq(tst[1].layers[1][1].p, 0.1)\ntest_eq(tst[1].layers[0][1].p, config['output_p']*0.5)\nfor rnn in tst[0].module.rnns: test_eq(rnn.weight_p, config['weight_p']*0.5)\nfor dp in tst[0].module.hidden_dps: test_eq(dp.p, config['hidden_p']*0.5)\ntest_eq(tst[0].module.encoder_dp.embed_p, config['embed_p']*0.5)\ntest_eq(tst[0].module.input_dp.p, config['input_p']*0.5)", "_____no_output_____" ] ], [ [ "## Export -", "_____no_output_____" ] ], [ [ "#hide\nfrom nbdev.export import notebook2script\nnotebook2script()", "Converted 00_torch_core.ipynb.\nConverted 01_layers.ipynb.\nConverted 02_data.load.ipynb.\nConverted 03_data.core.ipynb.\nConverted 04_data.external.ipynb.\nConverted 05_data.transforms.ipynb.\nConverted 06_data.block.ipynb.\nConverted 07_vision.core.ipynb.\nConverted 08_vision.data.ipynb.\nConverted 09_vision.augment.ipynb.\nConverted 09b_vision.utils.ipynb.\nConverted 09c_vision.widgets.ipynb.\nConverted 10_tutorial.pets.ipynb.\nConverted 11_vision.models.xresnet.ipynb.\nConverted 12_optimizer.ipynb.\nConverted 13_callback.core.ipynb.\nConverted 13a_learner.ipynb.\nConverted 13b_metrics.ipynb.\nConverted 14_callback.schedule.ipynb.\nConverted 14a_callback.data.ipynb.\nConverted 15_callback.hook.ipynb.\nConverted 15a_vision.models.unet.ipynb.\nConverted 16_callback.progress.ipynb.\nConverted 17_callback.tracker.ipynb.\nConverted 18_callback.fp16.ipynb.\nConverted 18a_callback.training.ipynb.\nConverted 19_callback.mixup.ipynb.\nConverted 20_interpret.ipynb.\nConverted 20a_distributed.ipynb.\nConverted 21_vision.learner.ipynb.\nConverted 22_tutorial.imagenette.ipynb.\nConverted 23_tutorial.vision.ipynb.\nConverted 24_tutorial.siamese.ipynb.\nConverted 24_vision.gan.ipynb.\nConverted 30_text.core.ipynb.\nConverted 31_text.data.ipynb.\nConverted 32_text.models.awdlstm.ipynb.\nConverted 33_text.models.core.ipynb.\nConverted 34_callback.rnn.ipynb.\nConverted 35_tutorial.wikitext.ipynb.\nConverted 36_text.models.qrnn.ipynb.\nConverted 37_text.learner.ipynb.\nConverted 38_tutorial.text.ipynb.\nConverted 39_tutorial.transformers.ipynb.\nConverted 40_tabular.core.ipynb.\nConverted 41_tabular.data.ipynb.\nConverted 42_tabular.model.ipynb.\nConverted 43_tabular.learner.ipynb.\nConverted 44_tutorial.tabular.ipynb.\nConverted 45_collab.ipynb.\nConverted 46_tutorial.collab.ipynb.\nConverted 50_tutorial.datablock.ipynb.\nConverted 60_medical.imaging.ipynb.\nConverted 61_tutorial.medical_imaging.ipynb.\nConverted 65_medical.text.ipynb.\nConverted 70_callback.wandb.ipynb.\nConverted 71_callback.tensorboard.ipynb.\nConverted 72_callback.neptune.ipynb.\nConverted 73_callback.captum.ipynb.\nConverted 74_callback.cutmix.ipynb.\nConverted 97_test_utils.ipynb.\nConverted 99_pytorch_doc.ipynb.\nConverted index.ipynb.\nConverted tutorial.ipynb.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec5619b652cd27c97d5d6fdcb471642e2de1a545
273,134
ipynb
Jupyter Notebook
week1/Regularization/Regularization+-+v2.ipynb
willweil/improving_deep_neural_networks
c9af4e8e0fc32dc90207c08606af17938e2ac560
[ "Apache-2.0" ]
null
null
null
week1/Regularization/Regularization+-+v2.ipynb
willweil/improving_deep_neural_networks
c9af4e8e0fc32dc90207c08606af17938e2ac560
[ "Apache-2.0" ]
null
null
null
week1/Regularization/Regularization+-+v2.ipynb
willweil/improving_deep_neural_networks
c9af4e8e0fc32dc90207c08606af17938e2ac560
[ "Apache-2.0" ]
null
null
null
240.646696
56,104
0.89139
[ [ [ "# Regularization\n\nWelcome to the second assignment of this week. Deep Learning models have so much flexibility and capacity that **overfitting can be a serious problem**, if the training dataset is not big enough. Sure it does well on the training set, but the learned network **doesn't generalize to new examples** that it has never seen!\n\n**You will learn to:** Use regularization in your deep learning models.\n\nLet's first import the packages you are going to use.", "_____no_output_____" ] ], [ [ "# import packages\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec\nfrom reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters\nimport sklearn\nimport sklearn.datasets\nimport scipy.io\nfrom testCases import *\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'", "_____no_output_____" ] ], [ [ "**Problem Statement**: You have just been hired as an AI expert by the French Football Corporation. They would like you to recommend positions where France's goal keeper should kick the ball so that the French team's players can then hit it with their head. \n\n<img src=\"images/field_kiank.png\" style=\"width:600px;height:350px;\">\n<caption><center> <u> **Figure 1** </u>: **Football field**<br> The goal keeper kicks the ball in the air, the players of each team are fighting to hit the ball with their head </center></caption>\n\n\nThey give you the following 2D dataset from France's past 10 games.", "_____no_output_____" ] ], [ [ "train_X, train_Y, test_X, test_Y = load_2D_dataset()", "_____no_output_____" ] ], [ [ "Each dot corresponds to a position on the football field where a football player has hit the ball with his/her head after the French goal keeper has shot the ball from the left side of the football field.\n- If the dot is blue, it means the French player managed to hit the ball with his/her head\n- If the dot is red, it means the other team's player hit the ball with their head\n\n**Your goal**: Use a deep learning model to find the positions on the field where the goalkeeper should kick the ball.", "_____no_output_____" ], [ "**Analysis of the dataset**: This dataset is a little noisy, but it looks like a diagonal line separating the upper left half (blue) from the lower right half (red) would work well. \n\nYou will first try a non-regularized model. Then you'll learn how to regularize it and decide which model you will choose to solve the French Football Corporation's problem. ", "_____no_output_____" ], [ "## 1 - Non-regularized model\n\nYou will use the following neural network (already implemented for you below). This model can be used:\n- in *regularization mode* -- by setting the `lambd` input to a non-zero value. We use \"`lambd`\" instead of \"`lambda`\" because \"`lambda`\" is a reserved keyword in Python. \n- in *dropout mode* -- by setting the `keep_prob` to a value less than one\n\nYou will first try the model without any regularization. Then, you will implement:\n- *L2 regularization* -- functions: \"`compute_cost_with_regularization()`\" and \"`backward_propagation_with_regularization()`\"\n- *Dropout* -- functions: \"`forward_propagation_with_dropout()`\" and \"`backward_propagation_with_dropout()`\"\n\nIn each part, you will run this model with the correct inputs so that it calls the functions you've implemented. Take a look at the code below to familiarize yourself with the model.", "_____no_output_____" ] ], [ [ "def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1):\n \"\"\"\n Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.\n \n Arguments:\n X -- input data, of shape (input size, number of examples)\n Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)\n learning_rate -- learning rate of the optimization\n num_iterations -- number of iterations of the optimization loop\n print_cost -- If True, print the cost every 10000 iterations\n lambd -- regularization hyperparameter, scalar\n keep_prob - probability of keeping a neuron active during drop-out, scalar.\n \n Returns:\n parameters -- parameters learned by the model. They can then be used to predict.\n \"\"\"\n \n grads = {}\n costs = [] # to keep track of the cost\n m = X.shape[1] # number of examples\n layers_dims = [X.shape[0], 20, 3, 1]\n \n # Initialize parameters dictionary.\n parameters = initialize_parameters(layers_dims)\n\n # Loop (gradient descent)\n\n for i in range(0, num_iterations):\n\n # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.\n if keep_prob == 1:\n a3, cache = forward_propagation(X, parameters)\n elif keep_prob < 1:\n a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)\n \n # Cost function\n if lambd == 0:\n cost = compute_cost(a3, Y)\n else:\n cost = compute_cost_with_regularization(a3, Y, parameters, lambd)\n \n # Backward propagation.\n assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout, \n # but this assignment will only explore one at a time\n if lambd == 0 and keep_prob == 1:\n grads = backward_propagation(X, Y, cache)\n elif lambd != 0:\n grads = backward_propagation_with_regularization(X, Y, cache, lambd)\n elif keep_prob < 1:\n grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)\n \n # Update parameters.\n parameters = update_parameters(parameters, grads, learning_rate)\n \n # Print the loss every 10000 iterations\n if print_cost and i % 10000 == 0:\n print(\"Cost after iteration {}: {}\".format(i, cost))\n if print_cost and i % 1000 == 0:\n costs.append(cost)\n \n # plot the cost\n plt.plot(costs)\n plt.ylabel('cost')\n plt.xlabel('iterations (x1,000)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n \n return parameters", "_____no_output_____" ] ], [ [ "Let's train the model without any regularization, and observe the accuracy on the train/test sets.", "_____no_output_____" ] ], [ [ "parameters = model(train_X, train_Y)\nprint (\"On the training set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)", "Cost after iteration 0: 0.6557412523481002\nCost after iteration 10000: 0.16329987525724216\nCost after iteration 20000: 0.13851642423255986\n" ] ], [ [ "The train accuracy is 94.8% while the test accuracy is 91.5%. This is the **baseline model** (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model.", "_____no_output_____" ] ], [ [ "plt.title(\"Model without regularization\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)", "_____no_output_____" ] ], [ [ "The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting.", "_____no_output_____" ], [ "## 2 - L2 Regularization\n\nThe standard way to avoid overfitting is called **L2 regularization**. It consists of appropriately modifying your cost function, from:\n$$J = -\\frac{1}{m} \\sum\\limits_{i = 1}^{m} \\large{(}\\small y^{(i)}\\log\\left(a^{[L](i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[L](i)}\\right) \\large{)} \\tag{1}$$\nTo:\n$$J_{regularized} = \\small \\underbrace{-\\frac{1}{m} \\sum\\limits_{i = 1}^{m} \\large{(}\\small y^{(i)}\\log\\left(a^{[L](i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[L](i)}\\right) \\large{)} }_\\text{cross-entropy cost} + \\underbrace{\\frac{1}{m} \\frac{\\lambda}{2} \\sum\\limits_l\\sum\\limits_k\\sum\\limits_j W_{k,j}^{[l]2} }_\\text{L2 regularization cost} \\tag{2}$$\n\nLet's modify your cost and observe the consequences.\n\n**Exercise**: Implement `compute_cost_with_regularization()` which computes the cost given by formula (2). To calculate $\\sum\\limits_k\\sum\\limits_j W_{k,j}^{[l]2}$ , use :\n```python\nnp.sum(np.square(Wl))\n```\nNote that you have to do this for $W^{[1]}$, $W^{[2]}$ and $W^{[3]}$, then sum the three terms and multiply by $ \\frac{1}{m} \\frac{\\lambda}{2} $.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: compute_cost_with_regularization\n\ndef compute_cost_with_regularization(A3, Y, parameters, lambd):\n \"\"\"\n Implement the cost function with L2 regularization. See formula (2) above.\n \n Arguments:\n A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)\n Y -- \"true\" labels vector, of shape (output size, number of examples)\n parameters -- python dictionary containing parameters of the model\n \n Returns:\n cost - value of the regularized loss function (formula (2))\n \"\"\"\n m = Y.shape[1]\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n W3 = parameters[\"W3\"]\n \n cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost\n \n ### START CODE HERE ### (approx. 1 line)\n L2_regularization_cost = lambd / 2 / m * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3)))\n ### END CODER HERE ###\n \n cost = cross_entropy_cost + L2_regularization_cost\n \n return cost", "_____no_output_____" ], [ "A3, Y_assess, parameters = compute_cost_with_regularization_test_case()\n\nprint(\"cost = \" + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd = 0.1)))", "cost = 1.78648594516\n" ] ], [ [ "**Expected Output**: \n\n<table> \n <tr>\n <td>\n **cost**\n </td>\n <td>\n 1.78648594516\n </td>\n \n </tr>\n\n</table> ", "_____no_output_____" ], [ "Of course, because you changed the cost, you have to change backward propagation as well! All the gradients have to be computed with respect to this new cost. \n\n**Exercise**: Implement the changes needed in backward propagation to take into account regularization. The changes only concern dW1, dW2 and dW3. For each, you have to add the regularization term's gradient ($\\frac{d}{dW} ( \\frac{1}{2}\\frac{\\lambda}{m} W^2) = \\frac{\\lambda}{m} W$).", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: backward_propagation_with_regularization\n\ndef backward_propagation_with_regularization(X, Y, cache, lambd):\n \"\"\"\n Implements the backward propagation of our baseline model to which we added an L2 regularization.\n \n Arguments:\n X -- input dataset, of shape (input size, number of examples)\n Y -- \"true\" labels vector, of shape (output size, number of examples)\n cache -- cache output from forward_propagation()\n lambd -- regularization hyperparameter, scalar\n \n Returns:\n gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables\n \"\"\"\n \n m = X.shape[1]\n (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n \n ### START CODE HERE ### (approx. 1 line)\n dW3 = 1./m * np.dot(dZ3, A2.T) + lambd / m * W3\n ### END CODE HERE ###\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n \n dA2 = np.dot(W3.T, dZ3)\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n ### START CODE HERE ### (approx. 1 line)\n dW2 = 1./m * np.dot(dZ2, A1.T) + lambd / m * W2\n ### END CODE HERE ###\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n ### START CODE HERE ### (approx. 1 line)\n dW1 = 1./m * np.dot(dZ1, X.T) + lambd / m * W1\n ### END CODE HERE ###\n db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\"dA2\": dA2,\n \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2, \"dA1\": dA1, \n \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients", "_____no_output_____" ], [ "X_assess, Y_assess, cache = backward_propagation_with_regularization_test_case()\n\ngrads = backward_propagation_with_regularization(X_assess, Y_assess, cache, lambd = 0.7)\nprint (\"dW1 = \"+ str(grads[\"dW1\"]))\nprint (\"dW2 = \"+ str(grads[\"dW2\"]))\nprint (\"dW3 = \"+ str(grads[\"dW3\"]))", "dW1 = [[-0.25604646 0.12298827 -0.28297129]\n [-0.17706303 0.34536094 -0.4410571 ]]\ndW2 = [[ 0.79276486 0.85133918]\n [-0.0957219 -0.01720463]\n [-0.13100772 -0.03750433]]\ndW3 = [[-1.77691347 -0.11832879 -0.09397446]]\n" ] ], [ [ "**Expected Output**:\n\n<table> \n <tr>\n <td>\n **dW1**\n </td>\n <td>\n [[-0.25604646 0.12298827 -0.28297129]\n [-0.17706303 0.34536094 -0.4410571 ]]\n </td>\n </tr>\n <tr>\n <td>\n **dW2**\n </td>\n <td>\n [[ 0.79276486 0.85133918]\n [-0.0957219 -0.01720463]\n [-0.13100772 -0.03750433]]\n </td>\n </tr>\n <tr>\n <td>\n **dW3**\n </td>\n <td>\n [[-1.77691347 -0.11832879 -0.09397446]]\n </td>\n </tr>\n</table> ", "_____no_output_____" ], [ "Let's now run the model with L2 regularization $(\\lambda = 0.7)$. The `model()` function will call: \n- `compute_cost_with_regularization` instead of `compute_cost`\n- `backward_propagation_with_regularization` instead of `backward_propagation`", "_____no_output_____" ] ], [ [ "parameters = model(train_X, train_Y, lambd = 0.7)\nprint (\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)", "Cost after iteration 0: 0.6974484493131264\nCost after iteration 10000: 0.2684918873282239\nCost after iteration 20000: 0.2680916337127301\n" ] ], [ [ "Congrats, the test set accuracy increased to 93%. You have saved the French football team!\n\nYou are not overfitting the training data anymore. Let's plot the decision boundary.", "_____no_output_____" ] ], [ [ "plt.title(\"Model with L2-regularization\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)", "_____no_output_____" ] ], [ [ "**Observations**:\n- The value of $\\lambda$ is a hyperparameter that you can tune using a dev set.\n- L2 regularization makes your decision boundary smoother. If $\\lambda$ is too large, it is also possible to \"oversmooth\", resulting in a model with high bias.\n\n**What is L2-regularization actually doing?**:\n\nL2-regularization relies on the assumption that a model with small weights is simpler than a model with large weights. Thus, by penalizing the square values of the weights in the cost function you drive all the weights to smaller values. It becomes too costly for the cost to have large weights! This leads to a smoother model in which the output changes more slowly as the input changes. \n\n<font color='blue'>\n**What you should remember** -- the implications of L2-regularization on:\n- The cost computation:\n - A regularization term is added to the cost\n- The backpropagation function:\n - There are extra terms in the gradients with respect to weight matrices\n- Weights end up smaller (\"weight decay\"): \n - Weights are pushed to smaller values.", "_____no_output_____" ], [ "## 3 - Dropout\n\nFinally, **dropout** is a widely used regularization technique that is specific to deep learning. \n**It randomly shuts down some neurons in each iteration.** Watch these two videos to see what this means!\n\n<!--\nTo understand drop-out, consider this conversation with a friend:\n- Friend: \"Why do you need all these neurons to train your network and classify images?\". \n- You: \"Because each neuron contains a weight and can learn specific features/details/shape of an image. The more neurons I have, the more featurse my model learns!\"\n- Friend: \"I see, but are you sure that your neurons are learning different features and not all the same features?\"\n- You: \"Good point... Neurons in the same layer actually don't talk to each other. It should be definitly possible that they learn the same image features/shapes/forms/details... which would be redundant. There should be a solution.\"\n!--> \n\n\n<center>\n<video width=\"620\" height=\"440\" src=\"images/dropout1_kiank.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n<br>\n<caption><center> <u> Figure 2 </u>: Drop-out on the second hidden layer. <br> At each iteration, you shut down (= set to zero) each neuron of a layer with probability $1 - keep\\_prob$ or keep it with probability $keep\\_prob$ (50% here). The dropped neurons don't contribute to the training in both the forward and backward propagations of the iteration. </center></caption>\n\n<center>\n<video width=\"620\" height=\"440\" src=\"images/dropout2_kiank.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n\n<caption><center> <u> Figure 3 </u>: Drop-out on the first and third hidden layers. <br> $1^{st}$ layer: we shut down on average 40% of the neurons. $3^{rd}$ layer: we shut down on average 20% of the neurons. </center></caption>\n\n\nWhen you shut some neurons down, you actually modify your model. The idea behind drop-out is that at each iteration, you train a different model that uses only a subset of your neurons. With dropout, your neurons thus become less sensitive to the activation of one other specific neuron, because that other neuron might be shut down at any time. \n\n### 3.1 - Forward propagation with dropout\n\n**Exercise**: Implement the forward propagation with dropout. You are using a 3 layer neural network, and will add dropout to the first and second hidden layers. We will not apply dropout to the input layer or output layer. \n\n**Instructions**:\nYou would like to shut down some neurons in the first and second layers. To do that, you are going to carry out 4 Steps:\n1. In lecture, we dicussed creating a variable $d^{[1]}$ with the same shape as $a^{[1]}$ using `np.random.rand()` to randomly get numbers between 0 and 1. Here, you will use a vectorized implementation, so create a random matrix $D^{[1]} = [d^{[1](1)} d^{[1](2)} ... d^{[1](m)}] $ of the same dimension as $A^{[1]}$.\n2. Set each entry of $D^{[1]}$ to be 0 with probability (`1-keep_prob`) or 1 with probability (`keep_prob`), by thresholding values in $D^{[1]}$ appropriately. Hint: to set all the entries of a matrix X to 0 (if entry is less than 0.5) or 1 (if entry is more than 0.5) you would do: `X = (X < 0.5)`. Note that 0 and 1 are respectively equivalent to False and True.\n3. Set $A^{[1]}$ to $A^{[1]} * D^{[1]}$. (You are shutting down some neurons). You can think of $D^{[1]}$ as a mask, so that when it is multiplied with another matrix, it shuts down some of the values.\n4. Divide $A^{[1]}$ by `keep_prob`. By doing this you are assuring that the result of the cost will still have the same expected value as without drop-out. (This technique is also called inverted dropout.)", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: forward_propagation_with_dropout\n\ndef forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):\n \"\"\"\n Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.\n \n Arguments:\n X -- input dataset, of shape (2, number of examples)\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\":\n W1 -- weight matrix of shape (20, 2)\n b1 -- bias vector of shape (20, 1)\n W2 -- weight matrix of shape (3, 20)\n b2 -- bias vector of shape (3, 1)\n W3 -- weight matrix of shape (1, 3)\n b3 -- bias vector of shape (1, 1)\n keep_prob - probability of keeping a neuron active during drop-out, scalar\n \n Returns:\n A3 -- last activation value, output of the forward propagation, of shape (1,1)\n cache -- tuple, information stored for computing the backward propagation\n \"\"\"\n \n np.random.seed(1)\n \n # retrieve parameters\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n \n # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID\n Z1 = np.dot(W1, X) + b1\n A1 = relu(Z1)\n ### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above. \n D1 = np.random.rand(A1.shape[0], A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...)\n D1 = (D1 < keep_prob) # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)\n A1 = A1 * D1 # Step 3: shut down some neurons of A1\n A1 = A1 / keep_prob # Step 4: scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n Z2 = np.dot(W2, A1) + b2\n A2 = relu(Z2)\n ### START CODE HERE ### (approx. 4 lines)\n D2 = np.random.rand(A2.shape[0], A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...)\n D2 = (D2 < keep_prob) # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold)\n A2 = A2 * D2 # Step 3: shut down some neurons of A2\n A2 = A2 / keep_prob # Step 4: scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n Z3 = np.dot(W3, A2) + b3\n A3 = sigmoid(Z3)\n \n cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)\n \n return A3, cache", "_____no_output_____" ], [ "X_assess, parameters = forward_propagation_with_dropout_test_case()\n\nA3, cache = forward_propagation_with_dropout(X_assess, parameters, keep_prob = 0.7)\nprint (\"A3 = \" + str(A3))", "A3 = [[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]]\n" ] ], [ [ "**Expected Output**: \n\n<table> \n <tr>\n <td>\n **A3**\n </td>\n <td>\n [[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]]\n </td>\n \n </tr>\n\n</table> ", "_____no_output_____" ], [ "### 3.2 - Backward propagation with dropout\n\n**Exercise**: Implement the backward propagation with dropout. As before, you are training a 3 layer network. Add dropout to the first and second hidden layers, using the masks $D^{[1]}$ and $D^{[2]}$ stored in the cache. \n\n**Instruction**:\nBackpropagation with dropout is actually quite easy. You will have to carry out 2 Steps:\n1. You had previously shut down some neurons during forward propagation, by applying a mask $D^{[1]}$ to `A1`. In backpropagation, you will have to shut down the same neurons, by reapplying the same mask $D^{[1]}$ to `dA1`. \n2. During forward propagation, you had divided `A1` by `keep_prob`. In backpropagation, you'll therefore have to divide `dA1` by `keep_prob` again (the calculus interpretation is that if $A^{[1]}$ is scaled by `keep_prob`, then its derivative $dA^{[1]}$ is also scaled by the same `keep_prob`).\n", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: backward_propagation_with_dropout\n\ndef backward_propagation_with_dropout(X, Y, cache, keep_prob):\n \"\"\"\n Implements the backward propagation of our baseline model to which we added dropout.\n \n Arguments:\n X -- input dataset, of shape (2, number of examples)\n Y -- \"true\" labels vector, of shape (output size, number of examples)\n cache -- cache output from forward_propagation_with_dropout()\n keep_prob - probability of keeping a neuron active during drop-out, scalar\n \n Returns:\n gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables\n \"\"\"\n \n m = X.shape[1]\n (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n dW3 = 1./m * np.dot(dZ3, A2.T)\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n dA2 = np.dot(W3.T, dZ3)\n ### START CODE HERE ### (≈ 2 lines of code)\n dA2 = dA2 * D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation\n dA2 = dA2 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = 1./m * np.dot(dZ2, A1.T)\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n ### START CODE HERE ### (≈ 2 lines of code)\n dA1 = dA1 * D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation\n dA1 = dA1 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = 1./m * np.dot(dZ1, X.T)\n db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\"dA2\": dA2,\n \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2, \"dA1\": dA1, \n \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients", "_____no_output_____" ], [ "X_assess, Y_assess, cache = backward_propagation_with_dropout_test_case()\n\ngradients = backward_propagation_with_dropout(X_assess, Y_assess, cache, keep_prob = 0.8)\n\nprint (\"dA1 = \" + str(gradients[\"dA1\"]))\nprint (\"dA2 = \" + str(gradients[\"dA2\"]))", "dA1 = [[ 0.36544439 0. -0.00188233 0. -0.17408748]\n [ 0.65515713 0. -0.00337459 0. -0. ]]\ndA2 = [[ 0.58180856 0. -0.00299679 0. -0.27715731]\n [ 0. 0.53159854 -0. 0.53159854 -0.34089673]\n [ 0. 0. -0.00292733 0. -0. ]]\n" ] ], [ [ "**Expected Output**: \n\n<table> \n <tr>\n <td>\n **dA1**\n </td>\n <td>\n [[ 0.36544439 0. -0.00188233 0. -0.17408748]\n [ 0.65515713 0. -0.00337459 0. -0. ]]\n </td>\n \n </tr>\n <tr>\n <td>\n **dA2**\n </td>\n <td>\n [[ 0.58180856 0. -0.00299679 0. -0.27715731]\n [ 0. 0.53159854 -0. 0.53159854 -0.34089673]\n [ 0. 0. -0.00292733 0. -0. ]]\n </td>\n \n </tr>\n</table> ", "_____no_output_____" ], [ "Let's now run the model with dropout (`keep_prob = 0.86`). It means at every iteration you shut down each neurons of layer 1 and 2 with 14% probability. The function `model()` will now call:\n- `forward_propagation_with_dropout` instead of `forward_propagation`.\n- `backward_propagation_with_dropout` instead of `backward_propagation`.", "_____no_output_____" ] ], [ [ "parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3)\n\nprint (\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)", "Cost after iteration 0: 0.6543912405149825\n" ] ], [ [ "Dropout works great! The test accuracy has increased again (to 95%)! Your model is not overfitting the training set and does a great job on the test set. The French football team will be forever grateful to you! \n\nRun the code below to plot the decision boundary.", "_____no_output_____" ] ], [ [ "plt.title(\"Model with dropout\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)", "_____no_output_____" ] ], [ [ "**Note**:\n- A **common mistake** when using dropout is to use it both in training and testing. You should use dropout (randomly eliminate nodes) only in training. \n- Deep learning frameworks like [tensorflow](https://www.tensorflow.org/api_docs/python/tf/nn/dropout), [PaddlePaddle](http://doc.paddlepaddle.org/release_doc/0.9.0/doc/ui/api/trainer_config_helpers/attrs.html), [keras](https://keras.io/layers/core/#dropout) or [caffe](http://caffe.berkeleyvision.org/tutorial/layers/dropout.html) come with a dropout layer implementation. Don't stress - you will soon learn some of these frameworks.\n\n<font color='blue'>\n**What you should remember about dropout:**\n- Dropout is a regularization technique.\n- You only use dropout during training. Don't use dropout (randomly eliminate nodes) during test time.\n- Apply dropout both during forward and backward propagation.\n- During training time, divide each dropout layer by keep_prob to keep the same expected value for the activations. For example, if keep_prob is 0.5, then we will on average shut down half the nodes, so the output will be scaled by 0.5 since only the remaining half are contributing to the solution. Dividing by 0.5 is equivalent to multiplying by 2. Hence, the output now has the same expected value. You can check that this works even when keep_prob is other values than 0.5. ", "_____no_output_____" ], [ "## 4 - Conclusions", "_____no_output_____" ], [ "**Here are the results of our three models**: \n\n<table> \n <tr>\n <td>\n **model**\n </td>\n <td>\n **train accuracy**\n </td>\n <td>\n **test accuracy**\n </td>\n\n </tr>\n <td>\n 3-layer NN without regularization\n </td>\n <td>\n 95%\n </td>\n <td>\n 91.5%\n </td>\n <tr>\n <td>\n 3-layer NN with L2-regularization\n </td>\n <td>\n 94%\n </td>\n <td>\n 93%\n </td>\n </tr>\n <tr>\n <td>\n 3-layer NN with dropout\n </td>\n <td>\n 93%\n </td>\n <td>\n 95%\n </td>\n </tr>\n</table> ", "_____no_output_____" ], [ "Note that regularization hurts training set performance! This is because it limits the ability of the network to overfit to the training set. But since it ultimately gives better test accuracy, it is helping your system. ", "_____no_output_____" ], [ "Congratulations for finishing this assignment! And also for revolutionizing French football. :-) ", "_____no_output_____" ], [ "<font color='blue'>\n**What we want you to remember from this notebook**:\n- Regularization will help you reduce overfitting.\n- Regularization will drive your weights to lower values.\n- L2 regularization and Dropout are two very effective regularization techniques.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec5624c67d3d3d149a028d05c7d3796870c65967
32,988
ipynb
Jupyter Notebook
Examples/Faulted/Three_Faults/Cutting_Faults.ipynb
Ali1990dashti/GeoMeshPy
ec1acd13b77b7177fa3b6e2e5b7ae95fbeac6ad6
[ "MIT" ]
null
null
null
Examples/Faulted/Three_Faults/Cutting_Faults.ipynb
Ali1990dashti/GeoMeshPy
ec1acd13b77b7177fa3b6e2e5b7ae95fbeac6ad6
[ "MIT" ]
null
null
null
Examples/Faulted/Three_Faults/Cutting_Faults.ipynb
Ali1990dashti/GeoMeshPy
ec1acd13b77b7177fa3b6e2e5b7ae95fbeac6ad6
[ "MIT" ]
null
null
null
50.286585
143
0.539681
[ [ [ "# Models with three faults and two layers\nThis example tries to visualize how simply _GeoMeshPy_ can export results from _Gempy_. <br>\nThis notebook include a complicated model with two layers and three fault. <br>\nTo see how _Gempy_ works, please see https://github.com/cgre-aachen/gempy", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport copy\nimport math\nimport gempy as gp\nimport numpy as np\nfrom numpy import savetxt\ngeo_model = gp.create_model('3_F')\ngp.init_data(geo_model, [0., 1400., 0., 1000., -1800., -1000.], [40, 40, 40],\n path_i = 'thr_F_interfaces.csv',\n path_o = 'thr_F_orientations.csv');\ngeo_model.add_surfaces('Reservoir')\ngp.map_series_to_surfaces(geo_model,\n {\"Fault1_series\":'fault1',\n \"Fault2_series\":'fault2',\n \"Fault3_series\":'fault3',\n \"Strati_series\":('Cap_rock', 'Reservoir')})\ngeo_model.set_is_fault(['Fault1_series', 'Fault2_series', 'Fault3_series'], change_color=False)\nfr = np.array([[False, False, False, True, True],\n [False, False, False, True, True],\n [False, False, False, False, False],\n [False, False, False, False, False],\n [False, False, False, False, False]])\ngeo_model.set_fault_relation(fr)\ngp.set_interpolator(geo_model,\n compile_theano=True,\n theano_optimizer='fast_compile',\n verbose=[])\nsol = gp.compute_model(geo_model)\nextent = geo_model.grid.regular_grid.extent\nresolution = geo_model.grid.regular_grid.resolution.reshape(-1,1)\ndf=geo_model.series.df\nif len (np.unique (sol.fault_block))>1:\n no_of_faults=df.groupby(by='BottomRelation').count().iloc[1,0]\nelse:\n no_of_faults=0\n\nsurfaces=geo_model.surface_points.df['surface']\nif no_of_faults==0:\n surfaces_layer=[i for i in surfaces.unique()]\nelse:\n surfaces_layer=[i for i in surfaces.unique()[no_of_faults:]]\n fault_name=[i for i in surfaces.unique()[:no_of_faults]]\ngrid=geo_model.grid.values\nz_resolution = abs (grid[0,-1] - grid[1,-1])\nres_x=abs(extent[1]-extent[0])/resolution[0,0]\nsurfaces_layer.append('Basement')\nlith_blocks = np.array([])\nver=[]\nfault_ind=[]\nn_iter = 10\nfor i in range(n_iter):\n# Initialization of the Gempy model\n df_int_X = copy.copy(geo_model.surface_points.df['X'])\n df_int_Y = copy.copy(geo_model.surface_points.df['Y'])\n df_int_Z = copy.copy(geo_model.surface_points.df['Z'])\n df_or_X = copy.copy(geo_model.orientations.df['X'])\n df_or_Y = copy.copy(geo_model.orientations.df['Y'])\n df_or_Z = copy.copy(geo_model.orientations.df['Z'])\n df_or_dip = copy.copy(geo_model.orientations.df['dip'])\n df_or_azimuth = copy.copy(geo_model.orientations.df['azimuth'])\n surfindexes = list(geo_model.surface_points.df.index)\n orindexes = list(geo_model.orientations.df.index)\n geo_model.modify_surface_points(surfindexes, X=df_int_X, Y=df_int_Y, Z=df_int_Z)\n geo_model.modify_orientations(orindexes, X=df_or_X, Y=df_or_Y, Z=df_or_Z,dip = df_or_dip, azimuth = df_or_azimuth)\n \n fault_3_surfpoints = geo_model.surface_points.df.surface.isin(['fault3'])\n indexes_Fa_3_sp = geo_model.surface_points.df[fault_3_surfpoints].index\n fault_3_orient = geo_model.orientations.df.surface.isin(['fault3'])\n index_Fa_3_o = geo_model.orientations.df[fault_3_orient].index\n# Randomization_Method\n if i == 0: # in the first step we do not want any change\n std1=std2=0\n else:\n std1=10\n std1=5\n rand1 = np.random.uniform(-std1, std1, size=1)\n rand2 = np.random.uniform(-std2, std2, size=1)\n# Randomized_input \n a = geo_model.surface_points.df['Z'].values[fault_3_surfpoints][0] + rand1\n b = geo_model.surface_points.df['Z'].values[fault_3_surfpoints][1] + rand1\n new_Z_fa_3 = np.array([a,b])\n new_Z_fa_3 = new_Z_fa_3.flatten()\n new_Y_fa_3 = geo_model.surface_points.df['Y'].values[fault_3_surfpoints]\n new_X_fa_3 = geo_model.surface_points.df['X'].values[fault_3_surfpoints]\n new_o_fa_3 =geo_model.orientations.df['azimuth'].values[fault_3_orient] + rand2\n# Modifier\n geo_model.modify_surface_points(indexes_Fa_3_sp, Z = new_Z_fa_3)\n geo_model.modify_orientations(index_Fa_3_o, azimuth = new_o_fa_3)\n# this block updates the model\n geo_model.set_is_fault(['Fault1_series', 'Fault2_series', 'Fault3_series'], change_color=False)\n fr = np.array([[False, False, False, True, True],\n [False, False, False, True, True],\n [False, False, False, False, False],\n [False, False, False, False, False],\n [False, False, False, False, False]])\n geo_model.set_fault_relation(fr)\n geo_model.update_to_interpolator()\n sol=gp.compute_model(geo_model)\n # Export Block\n ver.append(geo_model.solutions.vertices)\n lith_blocks = np.append(lith_blocks, geo_model.solutions.lith_block)\n fault_ind.append (np.hstack([grid,np.round(sol.fault_block.T[0:sol.grid.values.shape[0]])]))", "_____no_output_____" ], [ "lith_blocks = lith_blocks.reshape(n_iter, -1)\nlays_fault_name=geo_model.surface_points.df.loc[:, 'surface'].unique()\nall_vers=[list(column) for column in zip(*ver)]\ndf=geo_model.series.df\nno_of_faults=df.groupby(by='BottomRelation').count().iloc[1,0]\nname_of_faults=lays_fault_name[0:no_of_faults].tolist()\nname_of_layers=lays_fault_name[no_of_faults:].tolist()", "_____no_output_____" ], [ "from GeoMeshPy import vmod # the class vmod allows you for doing all the required calculations", "_____no_output_____" ], [ "z_resolution = 20\nfr = np.array([[True],\n [True],\n [False]])\nname_of_layers = np.array(['Cap_rock', 'Reservoir'])\nmodel_faulted = vmod.vertice_modifier(n_iter, no_of_faults, all_vers, name_of_layers, z_resolution, fr, extent, resolution)\nsub_fourc_list = model_faulted.faults_corners()[0]\nlen_fal = model_faulted.faults_corners()[1]\nnew_result_list = model_faulted.contact_generator()[0]\nlength_layers = model_faulted.contact_generator()[1]\nrepre_pts = model_faulted.contact_generator()[2]", "_____no_output_____" ] ], [ [ "The visialization in the next block reveals some facts: <br>\n1. There are extra redundant point in verticed coming out of Gempy\n2. Gempy does not cut layers when while they are relocated by fault. in the other words,\nlayers just strech along the fault surfaces.\n3. Almost caused by 2, contact of the layer is uniform while there ae two faults cutting it. To solve\nthis issue, surfaces are deivided based on the existing fault.\nFor example, in this case the cotact should be split into three patches which are \nshown by different color in the visualization cell. If you zoom in the contact of \nlayer and two cutting fault, you will see some vertices of Gempy there.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.ticker import (AutoMinorLocator, MultipleLocator)\n%matplotlib qt5\n\nfig = plt.figure()\nax = fig.add_subplot (111, projection=\"3d\")\n\n# Gempy utputs for the only layers and third fault\nGempy_out_layer = all_vers[3][0]\nGempy_out_fault_3 = all_vers[2][0]\nx2 = Gempy_out_layer[:,0]; y2 = Gempy_out_layer[:,1]; z2 = Gempy_out_layer[:,2]\nax.scatter3D(x2,y2,z2, color='k', s=1, label='Raw output')\nx2 = Gempy_out_fault_3[:,0]; y2 = Gempy_out_fault_3[:,1]; z2 = Gempy_out_fault_3[:,2]\nax.scatter3D(x2,y2,z2, color='k', s=1, label='Raw output')\n\n\n# cleaned and separated data coming from GeoMeshPy\nfaults = np.array(sub_fourc_list[0])\nf1 = faults[:4,:]\nf2 = faults[4:8,:]\nf3 = faults[8:12,:]\nx2=faults[:,0]; y2=faults[:,1]; z2=faults[:,2]\nax.scatter3D(x2,y2,z2, color='r', s=10, marker= '*', label='Faults corners')\nax.plot_surface(np.array([[f1[0,0], f1[1,0]], [f1[3,0], f1[2,0]]]),\n np.array([[f1[0,1], f1[1,1]], [f1[3,1], f1[2,1]]]),\n np.array([[f1[0,2], f1[1,2]], [f1[3,2], f1[2,2]]]), color='b', alpha = 0.5, label='Injection Fault')\nax.plot_surface(np.array([[f2[0,0], f2[1,0]], [f2[3,0], f2[2,0]]]),\n np.array([[f2[0,1], f2[1,1]], [f2[3,1], f2[2,1]]]),\n np.array([[f2[0,2], f2[1,2]], [f2[3,2], f2[2,2]]]), color='g', alpha = 0.5, label='Production Fault')\nax.plot_surface(np.array([[f3[0,0], f3[1,0]], [f3[3,0], f3[2,0]]]),\n np.array([[f3[0,1], f3[1,1]], [f3[3,1], f3[2,1]]]),\n np.array([[f3[0,2], f3[1,2]], [f3[3,2], f3[2,2]]]), color='r', alpha = 0.8, label='Connecting Fault')\n\npo = np.array(new_result_list[0])\nle = np.array(length_layers[0]).astype('int')\nle = np.cumsum (le)\nax.scatter3D(po[:le[0],0],po[:le[0],1],po[:le[0],2], facecolor='None', color='orange', s=5, linewidths=0.5)\nax.scatter3D(po[le[0]:le[1],0],po[le[0]:le[1],1],po[le[0]:le[1],2],facecolor='None',linewidths=0.5, color='b', s=5)\nax.scatter3D(po[le[1]:le[2],0],po[le[1]:le[2],1],po[le[1]:le[2],2],facecolor='None',linewidths=0.5, color='r', s=5)\n\n\n# representative point proposed by GeoMeshPy\nreps = np.array(repre_pts[0])[:,:-1].astype('float')\nax.scatter3D(reps[:3,0],reps[:3,1],reps[:3,2], marker= '*', color='orange', s=50)\nax.scatter3D(reps[3:,0],reps[3:,1],reps[3:,2], marker= '*', color='c', s=50)\n\n\nax.set_yticks([0, 500, 1000])\nax.set_xticks([0, 700, 1400])\nax.set_zticks([-1000, -1400, -1800])\nax.set_ylim(0, 1000)\nax.set_xlim([0, 1400])\nax.set_zlim([-1800, -1000])\nax.tick_params(axis='both', which='major', labelsize=10)\nax._facecolors2d = ax._facecolor\nax.grid(None)\nplt.show()\nax.view_init(5, 270)", "_____no_output_____" ], [ "# in this block you can export outputs of GeoMeshPy to avoir running GemPY and GeomeshPy again\nfrom numpy import savetxt\nsets = zip(sub_fourc_list, new_result_list, repre_pts, len_fal)\nfor ind, (crn_fal, vertices, rep_pnt, len_fals) in enumerate(sets):\n savetxt(f'fal_crn_{ind}.csv', np.array(crn_fal), delimiter=',')\n savetxt(f'vertices_{ind}.csv', np.array(vertices), delimiter=',')\n savetxt(f'rep_pnt_{ind}.csv', np.array(rep_pnt), delimiter=',', fmt=\"%s\")\n savetxt(f'len_fals_{ind}.csv', np.array(len_fals), delimiter=',')\nsavetxt('len_layer.csv', length_layers, delimiter=',')", "_____no_output_____" ], [ "import numpy as np\nimport copy\nextent = np.array([0., 1400., 0., 1000., -1800., -1000.])\nfrom GeoMeshPy import vmod\nn_iter=10\n\n# this model has two wells peneterating into the two faults\n\nwells=[[600.,500.,-1350.], [550.,500.,-1450.],\\\n [850.,500.,-1350.], [900.,500.,-1450.]]\nwel_iter=np.split (np.tile(wells, (n_iter, 1)), n_iter)\nwells_cord=[i.tolist() for i in wel_iter]\nwell_points=[[2., 2]]\nwell_p_iter=np.split (np.tile(well_points, (n_iter, 1)), n_iter)\nwell_points=[i.tolist() for i in well_p_iter]\nwl_names=['W_1', 'W_2']\n\n\nname_of_faults=['fault1', 'fault2', 'fault3']\nno_of_faults= len (name_of_faults)\nsub_fourc_list=[]\nnew_result_list=[]\nrepre_pts=[]\nlen_fal=[]\nfrom numpy import genfromtxt\nlength_layers=genfromtxt('len_layer.csv', delimiter=',').tolist()\nimport glob\nfiles_fal_cr = glob.glob(\"fal_crn_*.csv\")\nfiles_fal_crn= sorted(files_fal_cr, key=lambda name: int(name[8:-4]))\nfiles_ve = glob.glob(\"vertices_*.csv\")\nfiles_ver = sorted(files_ve, key=lambda name: int(name[9:-4]))\nfiles_repr= glob.glob(\"rep_pnt_*.csv\")\nfiles_repre= sorted(files_repr, key=lambda name: int(name[8:-4]))\nfiles_le= glob.glob(\"len_fals_*.csv\")\nfiles_len= sorted(files_le, key=lambda name: int(name[9:-4]))\nset_names = zip(files_fal_crn, files_ver, files_repre, files_len)\nfor name_fal, name_ver, name_rep, name_len in set_names:\n fal_crn=np.around(genfromtxt(name_fal, delimiter=','), decimals=6)\n sub_fourc_list.append(fal_crn.tolist())\n new_result_list.append(np.around(genfromtxt(name_ver, delimiter=','), decimals=6).tolist())\n repre_pts.append(genfromtxt(name_rep, delimiter=',', dtype=str).tolist())\n len_fal.append([genfromtxt(name_len, delimiter=',').tolist()])", "_____no_output_____" ] ], [ [ "This cell will add another representative point to the model becasue you have <br>\na fault which is just passing through the model. Thins fault in GMSH will make an extra volume <br>\nbut the volume is not close to any surface.", "_____no_output_____" ] ], [ [ "extra = ['750.', '500.', '-1700.', 'Reservoir']\nfor i in range (len(repre_pts)):\n repre_pts[i].insert(len(repre_pts[i]),extra)", "_____no_output_____" ], [ "from numpy import savetxt\nimport gmsh\nimport itertools\nfrom itertools import chain\ngmsh.initialize()\nif no_of_faults>0:\n def cleanup_and_mesh(entities_to_preserve):\n # remove all embedded constraints, i.e. the entities that are not on the\n # boundary of others\n entities = gmsh.model.getEntities()\n for e in entities:\n emb = gmsh.model.mesh.getEmbedded(e[0], e[1])\n gmsh.model.mesh.removeEmbedded([e])\n for p in entities_to_preserve:\n if p in emb:\n gmsh.model.mesh.embed(p[0], [p[1]], e[0], e[1])\n # remove all surfaces, curves and points that are not connected to any\n # higher-dimensional entities\n gmsh.model.removeEntities(gmsh.model.getEntities(2), True)\n cc = gmsh.model.getEntities(1)\n for c in curves_to_preserve:\n cc.remove(c)\n gmsh.model.removeEntities(cc, True)\n gmsh.model.removeEntities(gmsh.model.getEntities(0))\n # get all surfaces that are not of type \"Plane\", i.e. all surfaces except the\n # box\n surfaces = [s[1] for s in gmsh.model.getEntities(2) if gmsh.model.getType(s[0], s[1])\n != 'Plane']\n # also refine close to the wells\n surface_after = gmsh.model.getEntities(2)\n points=copy.deepcopy(surface_new_tag)\n check_values=[row[-1] for row in surface_after]\n extracted = []\n for sublist in points:\n second_vals = [sec for fir, sec in sublist]\n if all(val in check_values for val in second_vals):\n extracted.append(second_vals)\n fl=[item for sublist in extracted[6:] for item in sublist]\n# fl_sur.append(fl)\n contact_surfaces = list(set(surfaces) - set(fl))\n # create a distance + threshold mesh size field w.r.t. these surfaces\n gmsh.model.mesh.field.add(\"Distance\", 1)\n gmsh.model.mesh.field.setNumbers(1, \"SurfacesList\", [sp_fls[0][0], sp_fls[1][1], sp_fls[2][1]])\n gmsh.model.mesh.field.setNumber(1, \"Sampling\", 100)\n gmsh.model.mesh.field.add(\"Threshold\", 2)\n gmsh.model.mesh.field.setNumber(2, \"InField\", 1)\n gmsh.model.mesh.field.setNumber(2, \"SizeMin\", 30)\n gmsh.model.mesh.field.setNumber(2, \"SizeMax\", 100)\n gmsh.model.mesh.field.setNumber(2, \"DistMin\", 35)\n gmsh.model.mesh.field.setNumber(2, \"DistMax\", 100)\n gmsh.model.mesh.field.add(\"Distance\", 3)\n gmsh.model.mesh.field.setNumbers(3, \"PointsList\", fal_wl_conection)\n gmsh.model.mesh.field.setNumber(3, \"Sampling\", 100)\n gmsh.model.mesh.field.add(\"Threshold\", 4)\n gmsh.model.mesh.field.setNumber(4, \"InField\", 3)\n gmsh.model.mesh.field.setNumber(4, \"SizeMin\", 10.0)\n gmsh.model.mesh.field.setNumber(4, \"SizeMax\", 100)\n gmsh.model.mesh.field.setNumber(4, \"DistMin\", 10.)\n gmsh.model.mesh.field.setNumber(4, \"DistMax\", 50.)\n gmsh.model.mesh.field.add(\"Distance\", 5)\n gmsh.model.mesh.field.setNumbers(5, \"CurvesList\", np.array(curves_to_preserve)[:,1].tolist())\n gmsh.model.mesh.field.setNumber(5, \"Sampling\", 1000)\n gmsh.model.mesh.field.add(\"Threshold\", 6)\n gmsh.model.mesh.field.setNumber(6, \"InField\", 5)\n gmsh.model.mesh.field.setNumber(6, \"SizeMin\", 5.)\n gmsh.model.mesh.field.setNumber(6, \"SizeMax\", 100)\n gmsh.model.mesh.field.setNumber(6, \"DistMin\", 6)\n gmsh.model.mesh.field.setNumber(6, \"DistMax\", 100)\n gmsh.model.mesh.field.add(\"Distance\", 7)\n gmsh.model.mesh.field.setNumbers(7, \"SurfacesList\", contact_surfaces)\n gmsh.model.mesh.field.setNumber(7, \"Sampling\", 100)\n gmsh.model.mesh.field.add(\"Threshold\", 8)\n gmsh.model.mesh.field.setNumber(8, \"InField\", 7)\n gmsh.model.mesh.field.setNumber(8, \"SizeMin\", 15)\n gmsh.model.mesh.field.setNumber(8, \"SizeMax\", 100)\n gmsh.model.mesh.field.setNumber(8, \"DistMin\", 20)\n gmsh.model.mesh.field.setNumber(8, \"DistMax\", 100)\n gmsh.model.mesh.field.add(\"Min\", 9)\n gmsh.model.mesh.field.setNumbers(9, \"FieldsList\", [2,4,6,8])\n gmsh.model.mesh.field.setAsBackgroundMesh(9)\n gmsh.option.setNumber(\"Mesh.MeshSizeMax\", 100)\n # don't extend mesh sizes from boundaries and use new 3D algo\n gmsh.option.setNumber(\"Mesh.MeshSizeExtendFromBoundary\", 0)\n gmsh.option.setNumber(\"Mesh.Algorithm3D\", 10)\n gmsh.model.mesh.generate(3)\n \n # using representative points to create physical volumes\n rep=[list(x) for _,x in itertools.groupby(rep_pnt,lambda x:x[3])]\n vol_num=np.arange(1,1+len(rep))\n for ind, surfaces in enumerate (rep):\n tags=[]\n for sects in surfaces:\n eleTag = gmsh.model.mesh.getElementByCoordinates(float (sects[0]), float (sects[1]), float (sects[2]))[0]\n eleType, eleNodes, entDim, entTag = gmsh.model.mesh.getElement(eleTag)\n tags.append(entTag)\n gmsh.model.addPhysicalGroup(3, tags, vol_num[ind])\n gmsh.model.setPhysicalName(3, vol_num[ind], sects[-1]) \n for tag_nu, name in zip (sp_fls, name_of_faults):\n ps1 = gmsh.model.addPhysicalGroup(2, tag_nu)\n gmsh.model.setPhysicalName(2, ps1, name) \n #adding wells as physical lines\n for lines, well_name in zip (sp_well, wl_names):\n l1 = gmsh.model.addPhysicalGroup(1, lines.tolist())\n gmsh.model.setPhysicalName(1, l1, well_name) \n around_box=['in', 'out', 'front', 'back', 'bottom', 'top'] \n for tag_nu, name in zip (extracted[:6], around_box):\n ps1 = gmsh.model.addPhysicalGroup(2, tag_nu)\n gmsh.model.setPhysicalName(2, ps1, name)\n inj_prod=['Injection_point', 'Production_point']\n inj_prod_t=[points_to_preserve[0][1], points_to_preserve [len (sp_well[0])*2][1]]\n for t, po_n in zip (inj_prod_t, inj_prod):\n p1=gmsh.model.addPhysicalGroup(0, [t])\n gmsh.model.setPhysicalName(0, p1, po_n)\n \n connections=['Conj_1', 'Conj_2']\n for t, po_n in zip (fal_wl_conection, connections):\n p1=gmsh.model.addPhysicalGroup(0, [t])\n gmsh.model.setPhysicalName(0, p1, po_n)\n \n \n gmsh.write(\"Moded_3Faults_\" + str(kk) + \".msh\")\n gmsh.fltk.run()\n gmsh.initialize()\n degree = 3\n numPointsOnCurves = 10\n numIter = 10\n anisotropic = False\n tol2d = 0.00001\n tol3d = .1\n tolAng = 1\n tolCurv = 1\n maxDegree = 3\n maxSegments = 100\n sets = zip(sub_fourc_list, new_result_list, repre_pts, wells_cord, well_points, len_fal)\n for kk, (crn_fal, vertices, rep_pnt, well_cord, well_p, len_fals) in enumerate(sets):\n ar=np.array(vertices)\n l_tags=[]\n sp_fal_num=np.cumsum(np.array(len_fals).astype('int'))\n sp_fal=np.split (np.array(crn_fal), sp_fal_num[:-1])\n gmsh.model.occ.addBox(min(ar[:,0]),crn_fal[0][1],crn_fal[0][2],max(ar[:,0])-min(ar[:,0]),\n crn_fal[1][1]-crn_fal[0][1],crn_fal[2][2]-crn_fal[0][2])\n\n for i in range (len(sp_fal)):\n for [x, y, z] in sp_fal[i]: \n gmsh.model.occ.addPoint(x, y, z)\n tag_p_fal=np.arange(9, len (crn_fal)+9)\n tag_sp_fal=np.split (tag_p_fal, sp_fal_num[:-1])\n for i in tag_sp_fal:\n for j in range (len(i)):\n if j==len(i)-1:\n gmsh.model.occ.addLine (i[j], i[0])\n else:\n gmsh.model.occ.addLine (i[j], i[j+1])\n tag_l_fal=np.arange(13, len (crn_fal)+13)\n tag_sl_fal=np.split (tag_l_fal, sp_fal_num[:-1])\n for i in tag_sl_fal:\n lop=i.tolist()\n gmsh.model.occ.addCurveLoop(lop, lop[0]*10)\n gmsh.model.occ.addSurfaceFilling(lop[0]*10, lop[0]*10)\n\n spl_num=np.cumsum(length_layers[kk]).tolist()[:-1] # each cloud of points is separated\n spl_num=[int (i) for i in spl_num]\n sep_ar=np.split(ar,spl_num)\n for ind, point_clouds in enumerate (sep_ar):\n i_l=point_clouds.tolist()\n for [x, y, z] in i_l:\n gmsh.model.occ.addPoint(x, y, z)\n if len (point_clouds)>3:\n y_sub=np.unique(point_clouds[:,1].round(5),return_counts=True)[1]\n x_sub=np.unique(point_clouds[:,0].round(5),return_counts=True)[1]\n pts=[]\n for j in np.split (point_clouds, np.cumsum(x_sub)[:-1]):\n if (j[0]!=j[-1]).any():\n pts.append([j[0], j[-1]])\n for m in np.split (point_clouds[np.lexsort((point_clouds[:,0],point_clouds[:,1]))], np.cumsum(y_sub)[:-1]):\n if (m[0]!=m[-1]).any():\n pts.append([m[0], m[-1]])\n a=[[j.tolist() for j in i] for i in pts]\n b = list(chain.from_iterable(a))\n c=list(set(tuple(x) for x in b))\n d=[list(i) for i in c]\n f= [sublist for sublist in d]\n g=np.array(f)\n h=g[np.lexsort((g[:,1],g[:,0]))] # it include all the extrerior points of the cloud\n pnt=h[:,0:-1].tolist()\n arround_pts=vmod.vertice_modifier.rotational_sort(pnt, (np.mean(np.array(pnt)[:,0]),np.mean(np.array(pnt)[:,1])),True)\n tags=np.where((point_clouds[:,:-1]==np.array(arround_pts)[:,None]).all(-1))[1] + 1\n l_tags.append(len(tags))\n start_point=int (8+len(crn_fal)+np.sum(length_layers[kk][0:ind]))\n start_line=int (12+len(crn_fal)+1+np.sum(l_tags[0:ind]))\n for i in range (len(tags)): # this for loop creates the exterior lines of each cloud\n if i!=len(tags)-1:\n gmsh.model.occ.addSpline([tags[i]+start_point,tags[i+1]+start_point])\n else:\n gmsh.model.occ.addSpline([tags[i]+start_point,tags[0]+start_point])\n gmsh.model.occ.addCurveLoop([i for i in range (start_line, start_line+len(tags))], start_line*10)\n gmsh.model.occ.addSurfaceFilling(start_line*10, start_line*10,\n [m for m in range (start_point+1, start_point+np.max(tags))\n if m not in tags+start_point],\n degree,\n numPointsOnCurves,\n numIter,\n anisotropic,\n tol2d,\n tol3d,\n tolAng,\n tolCurv,\n maxDegree,\n maxSegments) # create surface by connecting exterior lines\n\n # and inclding interior ones\n gmsh.model.occ.synchronize()\n gmsh.option.setNumber('Geometry.ToleranceBoolean', 0.01)\n\n in_surf = gmsh.model.occ.getEntities(2)\n\n # TODO generalize here for all your wells:\n\n tag_well=np.arange(10000, 10000+len(np.array(well_cord)))\n well_p=np.array(well_p[0]).astype('int')\n tag_well_po=np.split (tag_well, np.cumsum(well_p)[:-1])\n well_po=np.split (np.array(well_cord), np.cumsum(well_p)[:-1]) \n for cord, tag_nu in zip (well_po, tag_well_po):\n for [x, y, z], num in zip (cord,tag_nu):\n gmsh.model.occ.addPoint(x, y, z, tag=num)\n well_l=well_p-1 \n tag_w_l=np.arange(5000, 5000+np.sum(well_l))\n wl=[i.tolist() for i in tag_w_l]\n tag_well_l=np.split (tag_w_l, np.cumsum(well_l)[:-1])\n\n for po, tag_num in zip (tag_well_po, tag_well_l):\n for i in range (len(po)-1):\n gmsh.model.occ.addLine (po[i], po[int(i+1)], tag=tag_num[i])\n in_wells=[(1, i) for i in tag_w_l]\n out_all=gmsh.model.occ.fragment(in_surf+in_wells, gmsh.model.occ.getEntities(3))#[1]\n out=out_all[1]\n surface_new_tag = out[0:len(in_surf)]\n c = out[len(in_surf):len(in_surf+in_wells)]\n curves_to_preserve = [item for sublist in c for item in sublist]\n gmsh.model.occ.synchronize()\n points_to_preserve=gmsh.model.getBoundary(curves_to_preserve, combined=False)\n line_sp=np.array([])\n for i in range (len(points_to_preserve)-1):\n if i%2!=0:\n if points_to_preserve[i][1]!=points_to_preserve[i+1][1]:\n brk=int ((i+1)/2)\n line_sp=np.append(line_sp, brk)\n sp_well=np.split(np.array(curves_to_preserve)[:,1],line_sp.astype('int'))\n preserving=[i[1] for i in points_to_preserve]\n pres_cord=[]\n for i in preserving:\n no_parametrization = []\n [x_p, y_p, z_p] = gmsh.model.getValue(0, i, no_parametrization)\n pres_cord.append([x_p, y_p, z_p])\n nons= np.where((np.array(pres_cord)==np.array(well_cord)[:,None]).all(-1))[1] # indices of new_points in wells\n # that are created by their connection with fault\n fal_wl_conection=list (set (np.delete(np.array(preserving), nons))) # tag of new_points in wells\n lst = [m for i,m in enumerate (pres_cord) if i not in nons.tolist()]\n p_ar = np.unique(np.array (lst), axis=0)\n\n# extracting fault indices\n fault_tag_num = [i[0]*10 for i in tag_sl_fal]\n ind_fault_surface = [x for x, y in enumerate(in_surf) if y[1] in fault_tag_num]\n sp_f = [i for ind, i in enumerate (surface_new_tag) if ind in ind_fault_surface]\n sp_fls = [[i[1] for i in j] for j in sp_f]\n cleanup_and_mesh(curves_to_preserve + points_to_preserve)\n gmsh.clear()\n gmsh.finalize()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ec5626e0938c696b58169eed9475d80effc4f2b9
63,063
ipynb
Jupyter Notebook
ComparePretrained_InceptionXferLearning.ipynb
suvarnak/PretrainingCustomModelsKeras
10d2cfeab4362b0970a92b522894fc44005a42c2
[ "Apache-2.0" ]
null
null
null
ComparePretrained_InceptionXferLearning.ipynb
suvarnak/PretrainingCustomModelsKeras
10d2cfeab4362b0970a92b522894fc44005a42c2
[ "Apache-2.0" ]
null
null
null
ComparePretrained_InceptionXferLearning.ipynb
suvarnak/PretrainingCustomModelsKeras
10d2cfeab4362b0970a92b522894fc44005a42c2
[ "Apache-2.0" ]
null
null
null
73.158933
357
0.569002
[ [ [ "!rm -r keras-transfer-learning-for-oxford102\n!git clone https://github.com/arsey/keras-transfer-learning-for-oxford102.git\n!git clone https://github.com/suvarnak/code-colt.git", "rm: cannot remove 'keras-transfer-learning-for-oxford102': No such file or directory\nCloning into 'keras-transfer-learning-for-oxford102'...\nremote: Counting objects: 975, done.\u001b[K\nremote: Total 975 (delta 0), reused 0 (delta 0), pack-reused 974\u001b[K\nReceiving objects: 100% (975/975), 39.19 MiB | 56.60 MiB/s, done.\nResolving deltas: 100% (309/309), done.\nCloning into 'code-colt'...\nremote: Counting objects: 27637, done.\u001b[K\nremote: Compressing objects: 100% (4/4), done.\u001b[K\nremote: Total 27637 (delta 0), reused 2 (delta 0), pack-reused 27633\u001b[K\nReceiving objects: 100% (27637/27637), 1.69 GiB | 46.87 MiB/s, done.\nResolving deltas: 100% (15/15), done.\n" ], [ "!ls\nimport os\nos.chdir('/content/keras-transfer-learning-for-oxford102')", "code-colt datalab keras-transfer-learning-for-oxford102\r\n" ], [ "!pip install -r requirements.txt\n!pip install tensorflow-gpu==1.4\n", "Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 1))\r\nCollecting scipy==0.19.0 (from -r requirements.txt (line 2))\n Downloading scipy-0.19.0-cp36-cp36m-manylinux1_x86_64.whl (48.2MB)\n\u001b[K 100% |████████████████████████████████| 48.2MB 29kB/s \n\u001b[?25hCollecting numpy==1.12.1 (from -r requirements.txt (line 3))\n Downloading numpy-1.12.1-cp36-cp36m-manylinux1_x86_64.whl (16.8MB)\n\u001b[K 100% |████████████████████████████████| 16.8MB 83kB/s \n\u001b[?25hCollecting pandas==0.19.2 (from -r requirements.txt (line 4))\n Downloading pandas-0.19.2-cp36-cp36m-manylinux1_x86_64.whl (18.9MB)\n\u001b[K 100% |████████████████████████████████| 18.9MB 75kB/s \n\u001b[?25hRequirement already satisfied: seaborn in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 5))\nRequirement already satisfied: Keras in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 6))\nRequirement already satisfied: scikit_learn in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 7))\nCollecting theano==0.9.0 (from -r requirements.txt (line 8))\n Downloading Theano-0.9.0.tar.gz (3.1MB)\n\u001b[K 100% |████████████████████████████████| 3.1MB 438kB/s \n\u001b[?25hRequirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 9))\nRequirement already satisfied: Pillow in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 10))\nCollecting tensorflow-gpu (from -r requirements.txt (line 11))\n Downloading tensorflow_gpu-1.7.0-cp36-cp36m-manylinux1_x86_64.whl (256.2MB)\n\u001b[K 34% |███████████ | 88.8MB 43.0MB/s eta 0:00:04" ], [ "!python train.py --model=resnet50 --nb_epoch=10 --data_dir=/content/code-colt/data/KitchenUtensils-6/\n!python predict.py --model=resnet50 --path=/content/code-colt/data/KitchenUtensils-6/test/DINNER_FORK/ --data_dir=/content/code-colt/data/KitchenUtensils-6/ --accuracy", "/usr/local/lib/python3.6/dist-packages/IPython/html.py:14: ShimWarning: The `IPython.html` package has been deprecated since IPython 4.0. You should import from `notebook` instead. `IPython.html.widgets` has moved to `ipywidgets`.\r\n \"`IPython.html.widgets` has moved to `ipywidgets`.\", ShimWarning)\nUsing TensorFlow backend.\n/usr/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6\n return f(*args, **kwds)\nCreating model...\n2018-03-30 16:58:56.956554: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA\n2018-03-30 16:58:57.059424: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:892] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2018-03-30 16:58:57.059837: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Found device 0 with properties: \nname: Tesla K80 major: 3 minor: 7 memoryClockRate(GHz): 0.8235\npciBusID: 0000:00:04.0\ntotalMemory: 11.17GiB freeMemory: 11.10GiB\n2018-03-30 16:58:57.059905: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1120] Creating TensorFlow device (/device:GPU:0) -> (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)\nDownloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5\n38666240/94653016 [===========>..................] - ETA: 5s" ], [ "!python train.py --model=resnet50 --nb_epoch=10 --data_dir=/content/code-colt/data// --freeze_layers_number=12\n!python predict.py --model=resnet50 --path=/content/code-colt/data/KitchenUtensils-6/test/DINNER_FORK/ --data_dir=/content/code-colt/data/KitchenUtensils-6/ --accuracy", "/usr/local/lib/python3.6/dist-packages/IPython/html.py:14: ShimWarning: The `IPython.html` package has been deprecated since IPython 4.0. You should import from `notebook` instead. `IPython.html.widgets` has moved to `ipywidgets`.\r\n \"`IPython.html.widgets` has moved to `ipywidgets`.\", ShimWarning)\nUsing TensorFlow backend.\n/usr/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6\n return f(*args, **kwds)\n[Errno 2] No such file or directory: '/content/code-colt/data//train/'\nTraceback (most recent call last):\n File \"train.py\", line 53, in <module>\n init()\n File \"train.py\", line 29, in init\n util.set_classes_from_train_dir()\n File \"/content/keras-transfer-learning-for-oxford102/util.py\", line 147, in set_classes_from_train_dir\n config.classes = sorted([o for o in os.listdir(d) if os.path.isdir(os.path.join(d, o))])\nFileNotFoundError: [Errno 2] No such file or directory: '/content/code-colt/data//train/'\n/usr/local/lib/python3.6/dist-packages/IPython/html.py:14: ShimWarning: The `IPython.html` package has been deprecated since IPython 4.0. You should import from `notebook` instead. `IPython.html.widgets` has moved to `ipywidgets`.\n \"`IPython.html.widgets` has moved to `ipywidgets`.\", ShimWarning)\nUsing TensorFlow backend.\n/usr/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6\n return f(*args, **kwds)\n==================================================\nCalled with args:\nNamespace(accuracy=True, batch_size=500, data_dir='/content/code-colt/data/KitchenUtensils-6/', execution_time=False, model='resnet50', novelty_detection=False, path='/content/code-colt/data/KitchenUtensils-6/test/DINNER_FORK/', plot_confusion_matrix=False, store_activations=False)\nCreating model\n2018-03-30 17:01:57.677234: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA\n2018-03-30 17:01:57.775101: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:892] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2018-03-30 17:01:57.775486: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Found device 0 with properties: \nname: Tesla K80 major: 3 minor: 7 memoryClockRate(GHz): 0.8235\npciBusID: 0000:00:04.0\ntotalMemory: 11.17GiB freeMemory: 11.10GiB\n2018-03-30 17:01:57.775531: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1120] Creating TensorFlow device (/device:GPU:0) -> (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)\n/content/keras-transfer-learning-for-oxford102/models/resnet50.py:32: UserWarning: Update your `Model` call to the Keras 2 API: `Model(inputs=Tensor(\"in..., outputs=Tensor(\"pr...)`\n self.model = Model(input=base_model.input, output=predictions)\nFound 35 files\nBatch 0\nWarming up the model\nWarming up took 0.9281399999999991 s\nPrediction on batch 0 took: 0.7258750000000003\n| should be 0 (DINNER_FORK) -> predicted as 0.0 (DINNER_FORK)\n| should be 0 (DINNER_FORK) -> predicted as 2.0 (FISH_SLICE)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 2.0 (FISH_SLICE)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 5.0 (WOODEN_SPOON)\n| should be 0 (DINNER_FORK) -> predicted as 2.0 (FISH_SLICE)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 2.0 (FISH_SLICE)\n| should be 0 (DINNER_FORK) -> predicted as 5.0 (WOODEN_SPOON)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 0.0 (DINNER_FORK)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 2.0 (FISH_SLICE)\n| should be 0 (DINNER_FORK) -> predicted as 5.0 (WOODEN_SPOON)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 2.0 (FISH_SLICE)\n| should be 0 (DINNER_FORK) -> predicted as 2.0 (FISH_SLICE)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 2.0 (FISH_SLICE)\n| should be 0 (DINNER_FORK) -> predicted as 5.0 (WOODEN_SPOON)\n| should be 0 (DINNER_FORK) -> predicted as 2.0 (FISH_SLICE)\n| should be 0 (DINNER_FORK) -> predicted as 2.0 (FISH_SLICE)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\n| should be 0 (DINNER_FORK) -> predicted as 4.0 (SPATULA)\nAccuracy 0.05714285714285714\n" ], [ "!python train.py --model=resnet50 --nb_epoch=10 --data_dir=/content/code-colt/data/KitchenUtensils-6/ --freeze_layers_number=8\n!python predict.py --model=resnet50 --path=/content/code-colt/data/KitchenUtensils-6/test/DINNER_FORK/ --data_dir=/content/code-colt/data/KitchenUtensils-6/ --accuracy", "/usr/local/lib/python3.6/dist-packages/IPython/html.py:14: ShimWarning: The `IPython.html` package has been deprecated since IPython 4.0. You should import from `notebook` instead. `IPython.html.widgets` has moved to `ipywidgets`.\r\n \"`IPython.html.widgets` has moved to `ipywidgets`.\", ShimWarning)\nUsing TensorFlow backend.\n/usr/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6\n return f(*args, **kwds)\nCreating model...\n2018-03-30 17:02:15.515174: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA\n2018-03-30 17:02:15.594028: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:892] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2018-03-30 17:02:15.594630: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Found device 0 with properties: \nname: Tesla K80 major: 3 minor: 7 memoryClockRate(GHz): 0.8235\npciBusID: 0000:00:04.0\ntotalMemory: 11.17GiB freeMemory: 11.10GiB\n2018-03-30 17:02:15.594708: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1120] Creating TensorFlow device (/device:GPU:0) -> (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)\n/content/keras-transfer-learning-for-oxford102/models/resnet50.py:32: UserWarning: Update your `Model` call to the Keras 2 API: `Model(inputs=Tensor(\"in..., outputs=Tensor(\"pr...)`\n self.model = Model(input=base_model.input, output=predictions)\nModel is created\nFine tuning...\nFreezing 8 layers\nFound 71 images belonging to 6 classes.\nFound 72 images belonging to 6 classes.\nEpoch 1/10\n3/2 [========================================] - 20s 7s/step - loss: 2.6356 - acc: 0.1133 - val_loss: 2.3199 - val_acc: 0.1667\nEpoch 2/10\n3/2 [========================================] - 11s 4s/step - loss: 2.1672 - acc: 0.2310 - val_loss: 2.1303 - val_acc: 0.1806\nEpoch 3/10\n3/2 [========================================] - 12s 4s/step - loss: 2.4291 - acc: 0.1691 - val_loss: 1.9863 - val_acc: 0.1944\nEpoch 4/10\n3/2 [========================================] - 12s 4s/step - loss: 2.1736 - acc: 0.2659 - val_loss: 1.8954 - val_acc: 0.2361\nEpoch 5/10\n3/2 [========================================] - 12s 4s/step - loss: 1.9462 - acc: 0.3112 - val_loss: 1.8324 - val_acc: 0.2917\nEpoch 6/10\n3/2 [========================================] - 12s 4s/step - loss: 1.6325 - acc: 0.4124 - val_loss: 1.7761 - val_acc: 0.3472\nEpoch 7/10\n3/2 [========================================] - 12s 4s/step - loss: 1.6751 - acc: 0.3670 - val_loss: 1.7502 - val_acc: 0.3333\nEpoch 8/10\n3/2 [========================================] - 12s 4s/step - loss: 1.7883 - acc: 0.3557 - val_loss: 1.7241 - val_acc: 0.3194\nEpoch 9/10\n3/2 [========================================] - 11s 4s/step - loss: 1.4774 - acc: 0.4577 - val_loss: 1.6985 - val_acc: 0.3194\nEpoch 10/10\n" ], [ "!python train.py --model=resnet50 --nb_epoch=10 --data_dir=/content/code-colt/data/KitchenUtensils-6/ --freeze_layers_number=4\n!python predict.py --model=resnet50 --path=/content/code-colt/data/KitchenUtensils-6/test/DINNER_FORK/ --data_dir=/content/code-colt/data/KitchenUtensils-6/ --accuracy", "/usr/local/lib/python3.6/dist-packages/IPython/html.py:14: ShimWarning: The `IPython.html` package has been deprecated since IPython 4.0. You should import from `notebook` instead. `IPython.html.widgets` has moved to `ipywidgets`.\r\n \"`IPython.html.widgets` has moved to `ipywidgets`.\", ShimWarning)\nUsing TensorFlow backend.\n/usr/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6\n return f(*args, **kwds)\nCreating model...\n2018-03-30 17:05:18.787833: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA\n2018-03-30 17:05:18.866419: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:892] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2018-03-30 17:05:18.866841: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Found device 0 with properties: \nname: Tesla K80 major: 3 minor: 7 memoryClockRate(GHz): 0.8235\npciBusID: 0000:00:04.0\ntotalMemory: 11.17GiB freeMemory: 11.10GiB\n2018-03-30 17:05:18.866879: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1120] Creating TensorFlow device (/device:GPU:0) -> (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)\n/content/keras-transfer-learning-for-oxford102/models/resnet50.py:32: UserWarning: Update your `Model` call to the Keras 2 API: `Model(inputs=Tensor(\"in..., outputs=Tensor(\"pr...)`\n self.model = Model(input=base_model.input, output=predictions)\nModel is created\nFine tuning...\nFreezing 4 layers\nFound 71 images belonging to 6 classes.\nFound 72 images belonging to 6 classes.\nEpoch 1/10\n3/2 [========================================] - 18s 6s/step - loss: 2.4336 - acc: 0.2206 - val_loss: 2.0878 - val_acc: 0.2361\nEpoch 2/10\n3/2 [========================================] - 11s 4s/step - loss: 2.1653 - acc: 0.2711 - val_loss: 1.9693 - val_acc: 0.2639\nEpoch 3/10\n3/2 [========================================] - 12s 4s/step - loss: 2.2309 - acc: 0.1866 - val_loss: 1.8750 - val_acc: 0.2778\nEpoch 4/10\n3/2 [========================================] - 12s 4s/step - loss: 2.2251 - acc: 0.2424 - val_loss: 1.8185 - val_acc: 0.3194\nEpoch 5/10\n3/2 [========================================] - 12s 4s/step - loss: 1.9572 - acc: 0.2886 - val_loss: 1.7705 - val_acc: 0.3472\nEpoch 6/10\n3/2 [========================================] - 12s 4s/step - loss: 1.6348 - acc: 0.4350 - val_loss: 1.7312 - val_acc: 0.3750\nEpoch 7/10\n3/2 [========================================] - 12s 4s/step - loss: 1.8974 - acc: 0.2040 - val_loss: 1.7039 - val_acc: 0.3750\nEpoch 8/10\n3/2 [========================================] - 12s 4s/step - loss: 1.8817 - acc: 0.3566 - val_loss: 1.6830 - val_acc: 0.4028\nEpoch 9/10\n3/2 [========================================] - 12s 4s/step - loss: 1.5210 - acc: 0.3444 - val_loss: 1.6622 - val_acc: 0.3889\nEpoch 10/10\n" ], [ "!python train.py --model=resnet50 --nb_epoch=20 --data_dir=/content/code-colt/data/KitchenUtensils-6/ --freeze_layers_number=2\n!python predict.py --model=resnet50 --path=/content/code-colt/data/KitchenUtensils-6/test/DINNER_FORK/ --data_dir=/content/code-colt/data/KitchenUtensils-6/ --accuracy", "/usr/local/lib/python3.6/dist-packages/IPython/html.py:14: ShimWarning: The `IPython.html` package has been deprecated since IPython 4.0. You should import from `notebook` instead. `IPython.html.widgets` has moved to `ipywidgets`.\r\n \"`IPython.html.widgets` has moved to `ipywidgets`.\", ShimWarning)\nUsing TensorFlow backend.\n/usr/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6\n return f(*args, **kwds)\nCreating model...\n2018-03-30 17:15:46.545430: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA\n2018-03-30 17:15:46.640598: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:892] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2018-03-30 17:15:46.641097: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Found device 0 with properties: \nname: Tesla K80 major: 3 minor: 7 memoryClockRate(GHz): 0.8235\npciBusID: 0000:00:04.0\ntotalMemory: 11.17GiB freeMemory: 11.10GiB\n2018-03-30 17:15:46.641164: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1120] Creating TensorFlow device (/device:GPU:0) -> (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)\n/content/keras-transfer-learning-for-oxford102/models/resnet50.py:32: UserWarning: Update your `Model` call to the Keras 2 API: `Model(inputs=Tensor(\"in..., outputs=Tensor(\"pr...)`\n self.model = Model(input=base_model.input, output=predictions)\nModel is created\nFine tuning...\nFreezing 2 layers\nFound 71 images belonging to 6 classes.\nFound 72 images belonging to 6 classes.\nEpoch 1/20\n3/2 [========================================] - 21s 7s/step - loss: 2.8183 - acc: 0.1186 - val_loss: 2.0385 - val_acc: 0.2083\nEpoch 2/20\n3/2 [========================================] - 11s 4s/step - loss: 2.3130 - acc: 0.2485 - val_loss: 1.9295 - val_acc: 0.2222\nEpoch 3/20\n3/2 [========================================] - 12s 4s/step - loss: 2.4064 - acc: 0.2031 - val_loss: 1.8481 - val_acc: 0.3056\nEpoch 4/20\n3/2 [========================================] - 12s 4s/step - loss: 2.1599 - acc: 0.2659 - val_loss: 1.7848 - val_acc: 0.3194\nEpoch 5/20\n3/2 [========================================] - 12s 4s/step - loss: 1.9126 - acc: 0.2772 - val_loss: 1.7303 - val_acc: 0.2917\nEpoch 6/20\n3/2 [========================================] - 12s 4s/step - loss: 1.8212 - acc: 0.3391 - val_loss: 1.6849 - val_acc: 0.3056\nEpoch 7/20\n3/2 [========================================] - 13s 4s/step - loss: 1.7252 - acc: 0.2711 - val_loss: 1.6452 - val_acc: 0.3472\nEpoch 8/20\n3/2 [========================================] - 12s 4s/step - loss: 1.7805 - acc: 0.3330 - val_loss: 1.6093 - val_acc: 0.3611\nEpoch 9/20\n3/2 [========================================] - 12s 4s/step - loss: 1.5371 - acc: 0.3112 - val_loss: 1.5749 - val_acc: 0.3750\nEpoch 10/20\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5633daa3483761d9cde368030d11443bfb9852
56,096
ipynb
Jupyter Notebook
src/models/cryptocurrency_weighted_average_similarity_matrix.ipynb
raj1034/Sysc-4906-Introduction-to-Data-Science-Project
72f2c1a01e3a9ea5f47a9480b3ba0631b6d6ddcb
[ "MIT" ]
1
2022-03-08T01:10:36.000Z
2022-03-08T01:10:36.000Z
src/models/cryptocurrency_weighted_average_similarity_matrix.ipynb
raj1034/Sysc-4906-Introduction-to-Data-Science-Project
72f2c1a01e3a9ea5f47a9480b3ba0631b6d6ddcb
[ "MIT" ]
null
null
null
src/models/cryptocurrency_weighted_average_similarity_matrix.ipynb
raj1034/Sysc-4906-Introduction-to-Data-Science-Project
72f2c1a01e3a9ea5f47a9480b3ba0631b6d6ddcb
[ "MIT" ]
null
null
null
39.476425
195
0.346549
[ [ [ "# Cryptocurreny Weighted Average Matrix Creation\n\n### Authors\n| Student Name | Student Number |\n|---------------------------------|--------------------|\n| Raj Sandhu | 101111960 |\n| Akaash Kapoor | 101112895 |\n| Ali Alvi | 101114940 |\n| Hassan Jallad | 101109334 |\n| Areeb Ul Haq | 101115337 |\n| Ahmad Abuoudeh | 101072636 |\n", "_____no_output_____" ], [ "# Libraries to Import", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport os", "_____no_output_____" ] ], [ [ "# Read In Coin Similarity Matrices", "_____no_output_____" ] ], [ [ "parent_folder = os.path.dirname(os.path.dirname(os.getcwd())) #Parent folder of the repo\nmodel_folder = \"models\"\nmodel_data_file_path = os.path.join(parent_folder, model_folder) #path to models folder\n\n#load in data from cvs files\ncoin_df_wmd = pd.read_csv(open(os.path.join(model_data_file_path, \"coin-similarity-matrix-description.csv\"), \"r\"), index_col=\"Name\")\ncoin_df_ed = pd.read_csv(open(os.path.join(model_data_file_path, \"coin-similarity-matrix-euclidean-distance.csv\"), \"r\"), index_col=\"Name\")\n\n#test output of the processed wmd data file.\ncoin_df_wmd", "_____no_output_____" ], [ "#test output of the processed euclidean distance data file.\ncoin_df_ed", "_____no_output_____" ] ], [ [ "# Create and Display Weighted Average Matrix", "_____no_output_____" ] ], [ [ "coin_df_average = coin_df_ed.add(coin_df_wmd)/2\ncoin_df_average", "_____no_output_____" ] ], [ [ "We want to cluster coins using volatility, by using the euclidean distance measure, coin description, by using the word mover's distance, and by using an average of both of those measures. ", "_____no_output_____" ], [ "# Sanity Check", "_____no_output_____" ] ], [ [ "#extracting iota wmd and euclidean distance similarity value for iota and anchor-protocol\nwmd = coin_df_wmd[\"iota\"][\"anchor-protocol\"]\ned = coin_df_ed[\"iota\"][\"anchor-protocol\"]\n\naverage = (wmd + ed)/2\n\nassert average == coin_df_average[\"iota\"][\"anchor-protocol\"], \"Coins iota and anchor-protocol fail unit test. Computed average distances do not match.\"\nprint(\"Coins iota and anchor-protocol pass the unit test. They have an average distance of: \" + str(average))", "Coins iota and anchor-protocol pass the unit test. They have an average distance of: 1.5854126007194353\n" ] ], [ [ "# Download Similarity Matrix as a CSV File", "_____no_output_____" ] ], [ [ "coin_df_average.to_csv(open(os.path.join(model_data_file_path, \"coin-similarity-matrix-weighted-average.csv\"), \"w\"))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec56393a693fced1a4454c65fc3c17dd87f91de4
229,803
ipynb
Jupyter Notebook
examples/brownian_bridge.ipynb
mikea/StochasticProcesses.jl
6263efcb2cccb50fa3a38760142ab39561bc4110
[ "Apache-2.0" ]
3
2017-01-05T14:49:31.000Z
2021-10-31T21:19:39.000Z
examples/brownian_bridge.ipynb
mikea/StochasticProcesses.jl
6263efcb2cccb50fa3a38760142ab39561bc4110
[ "Apache-2.0" ]
null
null
null
examples/brownian_bridge.ipynb
mikea/StochasticProcesses.jl
6263efcb2cccb50fa3a38760142ab39561bc4110
[ "Apache-2.0" ]
2
2017-06-26T21:50:44.000Z
2017-11-22T18:25:45.000Z
2,275.277228
226,522
0.957646
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec563a4cc1d094111b796e70ad2d78b4b05f81cb
9,730
ipynb
Jupyter Notebook
notebooks/dashboard.ipynb
tomstark99/epic-kitchens-100-fyrp
cbc9e59569fb6110b900a51def1947b8a3c93699
[ "Apache-2.0" ]
null
null
null
notebooks/dashboard.ipynb
tomstark99/epic-kitchens-100-fyrp
cbc9e59569fb6110b900a51def1947b8a3c93699
[ "Apache-2.0" ]
null
null
null
notebooks/dashboard.ipynb
tomstark99/epic-kitchens-100-fyrp
cbc9e59569fb6110b900a51def1947b8a3c93699
[ "Apache-2.0" ]
null
null
null
24.385965
355
0.480473
[ [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "results = pd.read_pickle('../datasets/epic-100/esvs/f_train_mf-esv-min_frames=1-max_frames=8.pkl')\nlabels = pd.read_pickle('../datasets/epic-100/features/67217_train_features.pkl')", "_____no_output_____" ], [ "l = labels['labels']", "_____no_output_____" ], [ "idx = 'P01_01_10'\n\nalie = build_dict(l)", "_____no_output_____" ], [ "def build_dict(lst):\n return {d['narration_id']: d for d in lst}", "_____no_output_____" ], [ "alie[idx]", "_____no_output_____" ], [ "n_frames = 3", "_____no_output_____" ], [ "total=alie[idx]['num_frames']\nbins = np.linspace(0,total,11,dtype=int,axis=0).squeeze()\n\n# data=np.random.random(100)\n# np.digitize(data,bins)\n# np.median(bins,axis=1)\n\nmedian = (0.5*(bins[1:]+bins[:-1])).astype(int)\nmedian", "_____no_output_____" ], [ "wtf = results['uids']", "_____no_output_____" ], [ "pff = [seq_idxs[3] for seq_idxs in results['sequence_idxs']]", "_____no_output_____" ], [ "[median[idx] for idx in pff]", "_____no_output_____" ], [ "from pathlib import Path", "_____no_output_____" ], [ "path = Path('../datasets/epic-100/esvs/f_train_mf-esv-min_frames=1-max_frames=8.pkl')", "_____no_output_____" ], [ "'val' in path.name", "_____no_output_____" ], [ "path.resolve()", "_____no_output_____" ], [ "xd = np.array()", "_____no_output_____" ], [ "mtrn_verb = np.array([\n [35.53,26.51,81.80,72.21,29.38,11.87,59.11,31.17],\n [39.42,28.87,84.29,72.84,33.15,13.10,63.84,32.75],\n [41.88,30.66,85.67,73.91,35.38,13.68,67.17,33.59],\n [43.94,30.93,86.78,73.87,37.22,13.58,69.31,33.33],\n [45.91,31.81,87.66,74.04,38.55,14.17,70.58,33.83],\n [46.97,31.67,88.18,74.02,39.76,14.18,71.98,33.47],\n [47.98,32.10,88.95,74.18,40.54,14.38,72.90,34.49],\n [48.51,32.07,89.00,74.15,41.24,13.85,73.73,33.96]\n])\n\nmf_verb = np.array([\n [80.50,65.99,96.54,89.92,79.23,56.22,94.17,79.73],\n [81.94,66.64,97.12,90.08,81.20,57.09,95.36,80.34],\n [82.81,67.02,97.51,90.52,82.16,57.35,95.84,80.23],\n [83.33,67.02,97.74,90.35,82.91,57.51,96.07,80.69],\n [83.91,66.89,97.86,90.25,83.39,57.44,96.56,80.73],\n [84.28,67.00,97.96,90.34,84.00,57.60,96.68,80.44],\n [84.67,67.10,98.21,90.37,84.33,57.51,96.83,80.31],\n [84.98,66.90,98.36,90.34,84.60,57.75,97.04,80.85]\n])\n", "_____no_output_____" ], [ "np.array([mf_verb.min(axis=0),\n mf_verb.max(axis=0)])", "_____no_output_____" ], [ "mf_verb.max(axis=0)", "_____no_output_____" ], [ "mf_verb", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec563de4042347755c2e439efb82f586eddff6ee
8,848
ipynb
Jupyter Notebook
rastervision/examples/object_detection/xview-data-prep.ipynb
jeromemaleski/raster-vision
1b0c7c789afbe1db27da4bc2dd0f58a02fca9f01
[ "Apache-2.0" ]
1,577
2017-05-22T14:22:00.000Z
2022-03-31T02:03:25.000Z
rastervision/examples/object_detection/xview-data-prep.ipynb
jeromemaleski/raster-vision
1b0c7c789afbe1db27da4bc2dd0f58a02fca9f01
[ "Apache-2.0" ]
933
2017-05-09T20:25:02.000Z
2022-03-31T10:22:59.000Z
rastervision_pytorch_backend/rastervision/pytorch_backend/examples/object_detection/xview-data-prep.ipynb
xiaolingis/raster-vision
fc181a6f31f085affa1ee12f0204bdbc5a6bf85a
[ "Apache-2.0" ]
336
2017-05-15T16:30:44.000Z
2022-03-28T06:19:33.000Z
31.15493
450
0.588382
[ [ [ "# xView Vehicle Object Detection Data Prep\n\nThis notebook prepares data for training an object detection model on the xView dataset.\n\n\n* Download the training images and labels from the xView competition site, unzip them, and put the contents of each zipfile in a local or S3 directory.\n* Set `raw_uri` to this directory containing the raw dataset.\n* Set `processed_uri` to a local or S3 directory (you can write to), which will store the processed data generated by this notebook.\n\nThis is all you will need to do in order to run this notebook.", "_____no_output_____" ] ], [ [ "raw_uri = 's3://raster-vision-xview-example/raw-data'\nprocessed_uri = '/opt/data/examples/xview/processed-data'\n# processed_uri = 's3://raster-vision-xview-example/processed-data'", "_____no_output_____" ] ], [ [ "The steps we'll take to prepare the data are as follows:\n\n- Filter out all of the non-vehicle bounding boxes from the labels. Combine all vehicle types into one class. \n- Subset the entire xView dataset to only include the images that are most densely populated with vehicles.\n- Split the selected images randomly into 80%/20% training and validation sets\n- Split the vehicle labels by image, and save off a label GeoJSON file per image\n\n\nThis process will save the split labels, and `train_scenes.csv` and `val_scenes.csv` files that are used by the experiment at `examples/object_detection/xview.py` to `processed_uri`.", "_____no_output_____" ] ], [ [ "import os\nfrom os.path import join\nimport json\nimport random\nfrom collections import defaultdict\n\nfrom rastervision.pipeline.file_system import (\n download_if_needed, list_paths, file_to_json, json_to_file, \n get_local_path, make_dir, sync_to_dir, str_to_file)\n\nrandom.seed(12345)", "_____no_output_____" ] ], [ [ "### Filter out non-vehicle labels\n\nThe xView dataset includes labels for a number of different types of objects. We are only interested in building a detector for objects that can be categorized as vehicles (e.g. 'small car', 'passenger vehicle', 'bus'). We have pre-determined the ids that map to vehicle labels and will use them to extract all the vehicles from the whole xView label set. In this section we also assign a class name of 'vehicle' to all of the resulting labels.", "_____no_output_____" ] ], [ [ "label_uri = join(raw_uri, 'xView_train.geojson')\nlabel_js = file_to_json(label_uri)", "_____no_output_____" ], [ "vehicle_type_ids = [17, 18, 19, 20, 21, 23, 24, 25, 26, 27, 28, 29, 32, \n 53, 54, 55, 56, 57, 59, 60, 61, 62, 63, 64, 65, 66]", "_____no_output_____" ], [ "vehicle_features = []\nfor f in label_js['features']:\n if f['properties']['type_id'] in vehicle_type_ids:\n f['properties']['class_name'] = 'vehicle'\n vehicle_features.append(f)\nlabel_js['features'] = vehicle_features", "_____no_output_____" ] ], [ [ "### Subset images with the most vehicles\n\nIn this section we determine which images contain the most vehicles and are therefore the best candidates for this experiment.", "_____no_output_____" ] ], [ [ "image_to_vehicle_counts = defaultdict(int)\nfor f in label_js['features']:\n image_id = f['properties']['image_id']\n image_to_vehicle_counts[image_id] += 1", "_____no_output_____" ], [ "# Use top 10% of images by vehicle count.\nexperiment_image_count = round(len(image_to_vehicle_counts.keys()) * 0.1)\nsorted_images_and_counts = sorted(image_to_vehicle_counts.items(), key=lambda x: x[1])\nselected_images_and_counts = sorted_images_and_counts[-experiment_image_count:]", "_____no_output_____" ] ], [ [ "### Split into train and validation\n\nSplit up training and validation data. Use 80% of images in the training set and 20% in the validation set.", "_____no_output_____" ] ], [ [ "ratio = 0.8\ntraining_sample_size = round(ratio * experiment_image_count)\ntrain_sample = random.sample(range(experiment_image_count), training_sample_size)\n\ntrain_images = []\nval_images = []", "_____no_output_____" ], [ "for i in range(training_sample_size):\n img = selected_images_and_counts[i][0]\n img_path = join('train_images', img)\n if i in train_sample:\n train_images.append(img_path)\n else:\n val_images.append(img_path)", "_____no_output_____" ] ], [ [ "### Divide labels up by image\n\nUsing one vehicle label geojson for all of the training and validation images can become unwieldy. Instead, we will divide the labels up so that each image has a unique geojson associated with it. We will save each of these geojsons to the base directory you provided at the outset.\n\nThen, we will create CSVs that our experiments will use to load the training and validation data.", "_____no_output_____" ] ], [ [ "def subset_labels(images):\n for i in images:\n img_fn = os.path.basename(i)\n img_id = os.path.splitext(img_fn)[0]\n tiff_features = []\n for l in label_js['features']:\n image_id = l['properties']['image_id']\n if image_id == img_fn:\n tiff_features.append(l)\n\n tiff_geojson = {}\n for key in label_js:\n if not key == 'features':\n tiff_geojson[key] = label_js[key]\n tiff_geojson['features'] = tiff_features\n \n json_to_file(tiff_geojson, join(processed_uri, 'labels', '{}.geojson'.format(img_id)))", "_____no_output_____" ], [ "subset_labels(train_images)\nsubset_labels(val_images)", "_____no_output_____" ], [ "def create_csv(images, path):\n csv_rows = []\n for img in images:\n img_id = os.path.splitext(os.path.basename(img))[0]\n img_path = join('train_images', '{}.tif'.format(img_id))\n labels_path = join('labels','{}.geojson'.format(img_id))\n csv_rows.append('\"{}\",\"{}\"'.format(img_path, labels_path))\n str_to_file('\\n'.join(csv_rows), path)", "_____no_output_____" ], [ "create_csv(train_images, join(processed_uri, 'train-scenes.csv'))\ncreate_csv(val_images, join(processed_uri, 'val-scenes.csv'))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec5650da8f040a4ea043cf69ca1ca8ef291f525b
16,468
ipynb
Jupyter Notebook
zefr_demo.ipynb
IanQS/zefr_rl_demo
7736f5fcb303428fb6e6bc12ad031bb426f0a33e
[ "MIT" ]
null
null
null
zefr_demo.ipynb
IanQS/zefr_rl_demo
7736f5fcb303428fb6e6bc12ad031bb426f0a33e
[ "MIT" ]
null
null
null
zefr_demo.ipynb
IanQS/zefr_rl_demo
7736f5fcb303428fb6e6bc12ad031bb426f0a33e
[ "MIT" ]
1
2021-02-08T16:29:07.000Z
2021-02-08T16:29:07.000Z
53.294498
1,572
0.598433
[ [ [ "<a href=\"https://colab.research.google.com/github/IanQS/zefr_rl_demo/blob/master/zefr_demo.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Setup and installation", "_____no_output_____" ] ], [ [ "!pip install --upgrade tensorflow -q", "_____no_output_____" ], [ "!apt-get install -y xvfb python-opengl ffmpeg x11-utils > /dev/null 2>&1", "_____no_output_____" ] ], [ [ "# Handle Imports\n\n- Rendering OpenAI Gym in Colab [Guide](https://star-ai.github.io/Rendering-OpenAi-Gym-in-Colaboratory/)", "_____no_output_____" ] ], [ [ "from IPython import display\nimport numpy as np\nimport time\n\nimport PIL.Image\nimport io\n\nimport tensorflow as tf\n\nimport gym\nfrom collections import deque as RingBuffer", "_____no_output_____" ] ], [ [ "# Defining our Model", "_____no_output_____" ] ], [ [ "class CustomModel(tf.keras.Model):\n \"\"\"\n For learning\n \"\"\"\n def __init__(self, num_actions):\n # Input shape is (210, 160, 3)\n super().__init__()\n # Dimensionality of input = 210 * 160 (we greyscale)\n self.layer_1 = tf.keras.layers.Conv2D(\n filters=32, kernel_size=(8, 8), strides=(2, 2), activation=\"relu\",\n name=\"L1\"\n )\n self.layer_2 = tf.keras.layers.Conv2D(\n filters=64, kernel_size=(5, 5), strides=(2, 2), activation=\"relu\",\n name=\"l2\"\n )\n self.layer_3 = tf.keras.layers.Conv2D(\n filters=64, kernel_size=(5, 5), strides=(2, 2), activation=\"relu\",\n name=\"l3\"\n )\n self.flatten_layer = tf.keras.layers.Flatten(name=\"flatten\")\n self.layer_4 = tf.keras.layers.Dense(units=512, activation=\"relu\",\n name=\"l4\")\n self.output_layer = tf.keras.layers.Dense(num_actions, name=\"output\")\n\n @tf.function\n def call(self, inputs, training=True):\n greyscaled = tf.reduce_mean(inputs, axis=3, keepdims=True) / 255.0 # (210, 160, 3) -> (210, 160)\n x = self.layer_1(greyscaled)\n x = self.layer_2(x)\n x = self.layer_3(x)\n x = self.flatten_layer(x)\n x = self.layer_4(x)\n x = self.output_layer(x)\n return x\n", "_____no_output_____" ] ], [ [ "# Defining our Q-learning Tracker", "_____no_output_____" ] ], [ [ "# Based loosely on https://github.com/VXU1230/reinforcement_learning/blob/master/dqn/cart_pole.py\n\nclass DeepQNetwork(object):\n def __init__(self, num_states, num_actions, discount_factor, \n replay_buffer_min, replay_buffer_max, \n learning_rate):\n self.num_s = num_states\n self.num_a = num_actions\n self.gamma = discount_factor\n self.replay_buffer_min = replay_buffer_min\n self.replay_buffer = RingBuffer([], maxlen=replay_buffer_max)\n self.lr = learning_rate\n\n def predict(self, inputs):\n return self.model(inputs)\n\n def take_action(self, )", "_____no_output_____" ] ], [ [ "# Setup Display on Colab", "_____no_output_____" ] ], [ [ "def showarray(a, fmt='png'):\n a = np.uint8(a)\n f = io.BytesIO()\n ima = PIL.Image.fromarray(a).save(f, fmt)\n return f.getvalue()\n\nimagehandle = display.display(display.Image(data=showarray(env.render(mode='rgb_array')), width=450), display_id='gymscr')\n\ndef evaluate_model(trained_model):\n env = gym.make('SpaceInvaders-v0')\n state = env.reset() \n while True:\n env.\n time.sleep(0.001)\n action = trained_model.predict(state)\n env.step(env.action_space.sample()) # take a random action\n display.update_display(display.Image(data=showarray(env.render(mode='rgb_array')), width=450), display_id='gymscr')", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec56542fe80fd67950e486a18dae986aaac9ab67
28,078
ipynb
Jupyter Notebook
docs/report/su21-reu-361/project/code/timeseries_generator.ipynb
mikahla1/cybertraining-dsc.github.io
168cadb2f755cb6ad4907e5656bd879d57e01e43
[ "Apache-2.0" ]
null
null
null
docs/report/su21-reu-361/project/code/timeseries_generator.ipynb
mikahla1/cybertraining-dsc.github.io
168cadb2f755cb6ad4907e5656bd879d57e01e43
[ "Apache-2.0" ]
null
null
null
docs/report/su21-reu-361/project/code/timeseries_generator.ipynb
mikahla1/cybertraining-dsc.github.io
168cadb2f755cb6ad4907e5656bd879d57e01e43
[ "Apache-2.0" ]
null
null
null
237.949153
25,150
0.917088
[ [ [ "from pandas import read_csv\r\nfrom matplotlib import pyplot\r\nseries = read_csv(\"C:/Users/Sledgehammer/Desktop/eos.csv\", header=0, index_col=0, parse_dates=True, squeeze=True)\r\nprint(series.head())", " Open High Low Close Volume Market Cap\nDate \n2020-12-12 2.69 2.79 2.69 2.77 1.399304e+09 2.598080e+09\n2020-12-11 2.74 2.74 2.65 2.69 1.803557e+09 2.528741e+09\n2020-12-10 2.81 2.81 2.71 2.74 1.940078e+09 2.573992e+09\n2020-12-09 2.80 2.83 2.68 2.81 2.579252e+09 2.636766e+09\n2020-12-08 2.95 2.96 2.76 2.80 2.365115e+09 2.626114e+09\n" ], [ "series.drop(\"Open\", axis=1, inplace=True)\r\nseries.drop(\"High\", axis=1, inplace=True)\r\nseries.drop(\"Low\", axis=1, inplace=True)\r\nseries.drop(\"Volume\", axis=1, inplace=True)\r\nseries.drop(\"Market Cap\", axis=1, inplace=True)\r\n", "_____no_output_____" ], [ "series.plot()\r\npyplot.title(\"EOS Price Over the Years\")\r\npyplot.xlabel(\"Date\")\r\npyplot.ylabel(\"EOS Price\")\r\npyplot.tight_layout()\r\npyplot.savefig(\"C://Users//Sledgehammer//Desktop//eos_price.png\", dpi=100, facecolor=\"#FFFFFF\")\r\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
ec56566157570074e77c10ceef78ac1619169132
125,249
ipynb
Jupyter Notebook
Task B/Inference/Individual models/bert_tweet_+kim_cnn+_f1_macro_BCE_loss_inference.ipynb
rematchka/Saracsm-detection-for-extremly-unbalanced-dataset
bda6dea03a8a781ff1c7c3a9550b951a6d680cf0
[ "MIT" ]
null
null
null
Task B/Inference/Individual models/bert_tweet_+kim_cnn+_f1_macro_BCE_loss_inference.ipynb
rematchka/Saracsm-detection-for-extremly-unbalanced-dataset
bda6dea03a8a781ff1c7c3a9550b951a6d680cf0
[ "MIT" ]
null
null
null
Task B/Inference/Individual models/bert_tweet_+kim_cnn+_f1_macro_BCE_loss_inference.ipynb
rematchka/Saracsm-detection-for-extremly-unbalanced-dataset
bda6dea03a8a781ff1c7c3a9550b951a6d680cf0
[ "MIT" ]
null
null
null
46.509098
9,289
0.499261
[ [ [ "# Main imports and code", "_____no_output_____" ] ], [ [ "# check which gpu we're using\n!nvidia-smi", "Sun Feb 27 13:08:52 2022 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla K80 Off | 00000000:00:04.0 Off | 0 |\n| N/A 35C P8 27W / 149W | 0MiB / 11441MiB | 0% Default |\n| | | N/A |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n" ], [ "!pip install transformers\n!pip install pytorch-ignite", "Collecting transformers\n Downloading transformers-4.16.2-py3-none-any.whl (3.5 MB)\n\u001b[K |████████████████████████████████| 3.5 MB 11.8 MB/s \n\u001b[?25hRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from transformers) (21.3)\nCollecting tokenizers!=0.11.3,>=0.10.1\n Downloading tokenizers-0.11.5-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (6.8 MB)\n\u001b[K |████████████████████████████████| 6.8 MB 41.1 MB/s \n\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers) (3.6.0)\nCollecting pyyaml>=5.1\n Downloading PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (596 kB)\n\u001b[K |████████████████████████████████| 596 kB 48.1 MB/s \n\u001b[?25hRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers) (4.62.3)\nRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (1.21.5)\nCollecting sacremoses\n Downloading sacremoses-0.0.47-py2.py3-none-any.whl (895 kB)\n\u001b[K |████████████████████████████████| 895 kB 43.2 MB/s \n\u001b[?25hRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from transformers) (4.11.1)\nCollecting huggingface-hub<1.0,>=0.1.0\n Downloading huggingface_hub-0.4.0-py3-none-any.whl (67 kB)\n\u001b[K |████████████████████████████████| 67 kB 5.0 MB/s \n\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers) (2.23.0)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (2019.12.20)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.7/dist-packages (from huggingface-hub<1.0,>=0.1.0->transformers) (3.10.0.2)\nRequirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=20.0->transformers) (3.0.7)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->transformers) (3.7.0)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2.10)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2021.10.8)\nRequirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (7.1.2)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.15.0)\nRequirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.1.0)\nInstalling collected packages: pyyaml, tokenizers, sacremoses, huggingface-hub, transformers\n Attempting uninstall: pyyaml\n Found existing installation: PyYAML 3.13\n Uninstalling PyYAML-3.13:\n Successfully uninstalled PyYAML-3.13\nSuccessfully installed huggingface-hub-0.4.0 pyyaml-6.0 sacremoses-0.0.47 tokenizers-0.11.5 transformers-4.16.2\nCollecting pytorch-ignite\n Downloading pytorch_ignite-0.4.8-py3-none-any.whl (251 kB)\n\u001b[K |████████████████████████████████| 251 kB 10.8 MB/s \n\u001b[?25hRequirement already satisfied: torch<2,>=1.3 in /usr/local/lib/python3.7/dist-packages (from pytorch-ignite) (1.10.0+cu111)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch<2,>=1.3->pytorch-ignite) (3.10.0.2)\nInstalling collected packages: pytorch-ignite\nSuccessfully installed pytorch-ignite-0.4.8\n" ], [ "# Any results you write to the current directory are saved as output.\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfrom transformers import BertTokenizer,BertModel\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader,Dataset\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom torch.optim import AdamW\nfrom tqdm import tqdm\nfrom argparse import ArgumentParser\nfrom ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\nfrom ignite.metrics import Accuracy, Loss\nfrom ignite.engine.engine import Engine, State, Events\nfrom ignite.handlers import EarlyStopping\nfrom ignite.contrib.handlers import TensorboardLogger, ProgressBar\nfrom ignite.utils import convert_tensor\nfrom torch.optim.lr_scheduler import ExponentialLR\nimport warnings \nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "import os\nimport gc\nimport copy\nimport time\nimport random\nimport string\n\n# For data manipulation\nimport numpy as np\nimport pandas as pd\n\n# Pytorch Imports\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data import Dataset, DataLoader\n\n# Utils\nfrom tqdm import tqdm\nfrom collections import defaultdict\n\n# Sklearn Imports\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import StratifiedKFold, KFold", "_____no_output_____" ], [ "from transformers import AutoTokenizer, AutoModel, AdamW\n", "_____no_output_____" ], [ "!pip install sentencepiece\n", "Collecting sentencepiece\n Downloading sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)\n\u001b[?25l\r\u001b[K |▎ | 10 kB 21.1 MB/s eta 0:00:01\r\u001b[K |▌ | 20 kB 25.7 MB/s eta 0:00:01\r\u001b[K |▉ | 30 kB 25.0 MB/s eta 0:00:01\r\u001b[K |█ | 40 kB 8.9 MB/s eta 0:00:01\r\u001b[K |█▍ | 51 kB 10.0 MB/s eta 0:00:01\r\u001b[K |█▋ | 61 kB 11.4 MB/s eta 0:00:01\r\u001b[K |██ | 71 kB 10.7 MB/s eta 0:00:01\r\u001b[K |██▏ | 81 kB 11.4 MB/s eta 0:00:01\r\u001b[K |██▍ | 92 kB 12.3 MB/s eta 0:00:01\r\u001b[K |██▊ | 102 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███ | 112 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███▎ | 122 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███▌ | 133 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███▉ | 143 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████ | 153 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████▎ | 163 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████▋ | 174 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████▉ | 184 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████▏ | 194 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████▍ | 204 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████▊ | 215 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████ | 225 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████▏ | 235 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████▌ | 245 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████▊ | 256 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████ | 266 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████▎ | 276 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████▋ | 286 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████▉ | 296 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████ | 307 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████▍ | 317 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████▋ | 327 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████ | 337 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████▏ | 348 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████▌ | 358 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████▊ | 368 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████ | 378 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████▎ | 389 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████▌ | 399 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████▉ | 409 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████ | 419 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████▍ | 430 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████▋ | 440 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████▉ | 450 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████▏ | 460 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████▍ | 471 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████▊ | 481 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████ | 491 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████▎ | 501 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████▌ | 512 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████▊ | 522 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████ | 532 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████▎ | 542 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████▋ | 552 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████▉ | 563 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████▏ | 573 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████▍ | 583 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████▋ | 593 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████ | 604 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████▏ | 614 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████▌ | 624 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████▊ | 634 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████ | 645 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████▎ | 655 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████▌ | 665 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████▉ | 675 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████ | 686 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████▍ | 696 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████▋ | 706 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████ | 716 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████▏ | 727 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████▍ | 737 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████▊ | 747 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████████ | 757 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████████▎ | 768 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████████▌ | 778 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████████▉ | 788 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████████ | 798 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████████▎ | 808 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████████▋ | 819 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████████▉ | 829 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████████▏ | 839 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████████▍ | 849 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████████▊ | 860 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████ | 870 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████▏ | 880 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████▌ | 890 kB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████▊ | 901 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████████████ | 911 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████████████▎ | 921 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████████████▋ | 931 kB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████████████▉ | 942 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████████████ | 952 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████████████▍ | 962 kB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████████████▋ | 972 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████████████ | 983 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▏ | 993 kB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▌ | 1.0 MB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▊ | 1.0 MB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████████ | 1.0 MB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████████▎ | 1.0 MB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████████▌ | 1.0 MB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████████▉ | 1.1 MB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████████████████ | 1.1 MB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▍ | 1.1 MB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▋ | 1.1 MB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▉ | 1.1 MB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▏ | 1.1 MB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▍ | 1.1 MB 10.8 MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▊ | 1.1 MB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████ | 1.1 MB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▎ | 1.1 MB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▌ | 1.2 MB 10.8 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▊ | 1.2 MB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████ | 1.2 MB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▎| 1.2 MB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▋| 1.2 MB 10.8 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▉| 1.2 MB 10.8 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 1.2 MB 10.8 MB/s \n\u001b[?25hInstalling collected packages: sentencepiece\nSuccessfully installed sentencepiece-0.1.96\n" ], [ "import random\nimport os\nfrom urllib import request", "_____no_output_____" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "Mounted at /content/drive\n" ], [ "train=pd.concat([train, validate], ignore_index=True)", "_____no_output_____" ], [ "test=pd.read_csv('/content/drive/MyDrive/ISarcasm/TestSet/task_B_En_test.csv')\n", "_____no_output_____" ] ], [ [ "# RoBERTa Baseline for Task 1", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom sklearn.metrics import classification_report, accuracy_score, f1_score, confusion_matrix, precision_score , recall_score\n\nfrom transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, BertTokenizer\nfrom transformers.data.processors import SingleSentenceClassificationProcessor\nfrom transformers import Trainer , TrainingArguments\nfrom transformers.trainer_utils import EvaluationStrategy\nfrom transformers.data.processors.utils import InputFeatures\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader", "_____no_output_____" ], [ "!pip install datasets\n", "Collecting datasets\n Downloading datasets-1.18.3-py3-none-any.whl (311 kB)\n\u001b[?25l\r\u001b[K |█ | 10 kB 23.9 MB/s eta 0:00:01\r\u001b[K |██ | 20 kB 24.1 MB/s eta 0:00:01\r\u001b[K |███▏ | 30 kB 24.9 MB/s eta 0:00:01\r\u001b[K |████▏ | 40 kB 12.3 MB/s eta 0:00:01\r\u001b[K |█████▎ | 51 kB 11.7 MB/s eta 0:00:01\r\u001b[K |██████▎ | 61 kB 13.5 MB/s eta 0:00:01\r\u001b[K |███████▍ | 71 kB 12.9 MB/s eta 0:00:01\r\u001b[K |████████▍ | 81 kB 12.3 MB/s eta 0:00:01\r\u001b[K |█████████▌ | 92 kB 13.5 MB/s eta 0:00:01\r\u001b[K |██████████▌ | 102 kB 12.4 MB/s eta 0:00:01\r\u001b[K |███████████▋ | 112 kB 12.4 MB/s eta 0:00:01\r\u001b[K |████████████▋ | 122 kB 12.4 MB/s eta 0:00:01\r\u001b[K |█████████████▊ | 133 kB 12.4 MB/s eta 0:00:01\r\u001b[K |██████████████▊ | 143 kB 12.4 MB/s eta 0:00:01\r\u001b[K |███████████████▊ | 153 kB 12.4 MB/s eta 0:00:01\r\u001b[K |████████████████▉ | 163 kB 12.4 MB/s eta 0:00:01\r\u001b[K |█████████████████▉ | 174 kB 12.4 MB/s eta 0:00:01\r\u001b[K |███████████████████ | 184 kB 12.4 MB/s eta 0:00:01\r\u001b[K |████████████████████ | 194 kB 12.4 MB/s eta 0:00:01\r\u001b[K |█████████████████████ | 204 kB 12.4 MB/s eta 0:00:01\r\u001b[K |██████████████████████ | 215 kB 12.4 MB/s eta 0:00:01\r\u001b[K |███████████████████████▏ | 225 kB 12.4 MB/s eta 0:00:01\r\u001b[K |████████████████████████▏ | 235 kB 12.4 MB/s eta 0:00:01\r\u001b[K |█████████████████████████▎ | 245 kB 12.4 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▎ | 256 kB 12.4 MB/s eta 0:00:01\r\u001b[K |███████████████████████████▍ | 266 kB 12.4 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▍ | 276 kB 12.4 MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▍ | 286 kB 12.4 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▌ | 296 kB 12.4 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▌| 307 kB 12.4 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 311 kB 12.4 MB/s \n\u001b[?25hRequirement already satisfied: pyarrow!=4.0.0,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from datasets) (6.0.1)\nRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from datasets) (4.11.1)\nCollecting xxhash\n Downloading xxhash-3.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (212 kB)\n\u001b[K |████████████████████████████████| 212 kB 39.7 MB/s \n\u001b[?25hRequirement already satisfied: dill in /usr/local/lib/python3.7/dist-packages (from datasets) (0.3.4)\nRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from datasets) (1.21.5)\nRequirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.7/dist-packages (from datasets) (4.62.3)\nCollecting aiohttp\n Downloading aiohttp-3.8.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.1 MB)\n\u001b[K |████████████████████████████████| 1.1 MB 47.6 MB/s \n\u001b[?25hRequirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.7/dist-packages (from datasets) (2.23.0)\nRequirement already satisfied: multiprocess in /usr/local/lib/python3.7/dist-packages (from datasets) (0.70.12.2)\nCollecting fsspec[http]>=2021.05.0\n Downloading fsspec-2022.2.0-py3-none-any.whl (134 kB)\n\u001b[K |████████████████████████████████| 134 kB 48.1 MB/s \n\u001b[?25hRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from datasets) (21.3)\nRequirement already satisfied: huggingface-hub<1.0.0,>=0.1.0 in /usr/local/lib/python3.7/dist-packages (from datasets) (0.4.0)\nRequirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from datasets) (1.3.5)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.7/dist-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (3.10.0.2)\nRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (3.6.0)\nRequirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (6.0)\nRequirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->datasets) (3.0.7)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->datasets) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->datasets) (2.10)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->datasets) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->datasets) (2021.10.8)\nCollecting async-timeout<5.0,>=4.0.0a3\n Downloading async_timeout-4.0.2-py3-none-any.whl (5.8 kB)\nCollecting asynctest==0.13.0\n Downloading asynctest-0.13.0-py3-none-any.whl (26 kB)\nRequirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp->datasets) (21.4.0)\nCollecting aiosignal>=1.1.2\n Downloading aiosignal-1.2.0-py3-none-any.whl (8.2 kB)\nCollecting frozenlist>=1.1.1\n Downloading frozenlist-1.3.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (144 kB)\n\u001b[K |████████████████████████████████| 144 kB 46.7 MB/s \n\u001b[?25hCollecting yarl<2.0,>=1.0\n Downloading yarl-1.7.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (271 kB)\n\u001b[K |████████████████████████████████| 271 kB 38.9 MB/s \n\u001b[?25hRequirement already satisfied: charset-normalizer<3.0,>=2.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp->datasets) (2.0.12)\nCollecting multidict<7.0,>=4.5\n Downloading multidict-6.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (94 kB)\n\u001b[K |████████████████████████████████| 94 kB 1.4 MB/s \n\u001b[?25hRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->datasets) (3.7.0)\nRequirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas->datasets) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas->datasets) (2.8.2)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas->datasets) (1.15.0)\nInstalling collected packages: multidict, frozenlist, yarl, asynctest, async-timeout, aiosignal, fsspec, aiohttp, xxhash, datasets\nSuccessfully installed aiohttp-3.8.1 aiosignal-1.2.0 async-timeout-4.0.2 asynctest-0.13.0 datasets-1.18.3 frozenlist-1.3.0 fsspec-2022.2.0 multidict-6.0.2 xxhash-3.0.0 yarl-1.7.2\n" ], [ "class PCLTrainDataset(Dataset):\n def __init__(self, df, tokenizer, max_length,displacemnt):\n self.df = df\n self.max_len = max_length\n self.tokenizer = tokenizer\n self.text = df['text'].values\n self.label=df[['sarcasm', 'irony',\n 'satire', 'understatement', 'overstatement', 'rhetorical_question']].values\n \n def __len__(self):\n return len(self.df)\n \n def __getitem__(self, index):\n text = self.text[index]\n # summary = self.summary[index]\n inputs_text = self.tokenizer.encode_plus(\n text,\n truncation=True,\n add_special_tokens=True,\n max_length=self.max_len,\n padding='max_length'\n )\n \n \n target = self.label[index]\n \n text_ids = inputs_text['input_ids']\n text_mask = inputs_text['attention_mask']\n \n \n \n \n return {\n \n 'text_ids': torch.tensor(text_ids, dtype=torch.long),\n 'text_mask': torch.tensor(text_mask, dtype=torch.long),\n 'target': torch.tensor(target, dtype=torch.float)\n }\n", "_____no_output_____" ], [ "class PCL_Model_Arch(nn.Module):\n def __init__(self,pre_trained='vinai/bertweet-base'):\n super().__init__()\n \n self.bert = AutoModel.from_pretrained(pre_trained, output_hidden_states=True)\n output_channel = 16 # number of kernels\n num_classes = 6 # number of targets to predict\n dropout = 0.2 # dropout value\n embedding_dim = 768 # length of embedding dim\n\n ks = 3 # three conv nets here\n\n # input_channel = word embeddings at a value of 1; 3 for RGB images\n input_channel = 4 # for single embedding, input_channel = 1\n\n # [3, 4, 5] = window height\n # padding = padding to account for height of search window\n\n # 3 convolutional nets\n self.conv1 = nn.Conv2d(input_channel, output_channel, (3, embedding_dim), padding=(2, 0), groups=4)\n self.conv2 = nn.Conv2d(input_channel, output_channel, (4, embedding_dim), padding=(3, 0), groups=4)\n self.conv3 = nn.Conv2d(input_channel, output_channel, (5, embedding_dim), padding=(4, 0), groups=4)\n\n # apply dropout\n self.dropout = nn.Dropout(dropout)\n\n # fully connected layer for classification\n # 3x conv nets * output channel\n self.fc1 = nn.Linear(ks * output_channel, num_classes)\n self.softmax = nn.Sigmoid()\n\n def forward(self, text_id, text_mask):\n # get the last 4 layers\n outputs= self.bert(text_id, attention_mask=text_mask)\n # all_layers = [4, 16, 256, 768]\n hidden_layers = outputs[2] # get hidden layers\n\n hidden_layers = torch.stack(hidden_layers, dim=1)\n x = hidden_layers[:, -4:] \n # x = x.unsqueeze(1)\n # x = torch.mean(x, 0)\n # print(hidden_layers.size())\n \n torch.cuda.empty_cache()\n x = [F.relu(self.conv1(x)).squeeze(3), F.relu(self.conv2(x)).squeeze(3), F.relu(self.conv3(x)).squeeze(3)]\n # max-over-time pooling; # (batch, channel_output) * ks\n x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]\n # concat results; (batch, channel_output * ks)\n x = torch.cat(x, 1)\n # add dropout\n x = self.dropout(x)\n # generate logits (batch, target_size)\n logit = self.fc1(x)\n torch.cuda.empty_cache()\n return logit", "_____no_output_____" ], [ "!pip install emoji", "Collecting emoji\n Downloading emoji-1.6.3.tar.gz (174 kB)\n\u001b[?25l\r\u001b[K |█▉ | 10 kB 21.6 MB/s eta 0:00:01\r\u001b[K |███▊ | 20 kB 21.0 MB/s eta 0:00:01\r\u001b[K |█████▋ | 30 kB 16.1 MB/s eta 0:00:01\r\u001b[K |███████▌ | 40 kB 17.3 MB/s eta 0:00:01\r\u001b[K |█████████▍ | 51 kB 12.5 MB/s eta 0:00:01\r\u001b[K |███████████▎ | 61 kB 14.1 MB/s eta 0:00:01\r\u001b[K |█████████████▏ | 71 kB 9.6 MB/s eta 0:00:01\r\u001b[K |███████████████ | 81 kB 10.3 MB/s eta 0:00:01\r\u001b[K |█████████████████ | 92 kB 11.3 MB/s eta 0:00:01\r\u001b[K |██████████████████▉ | 102 kB 12.1 MB/s eta 0:00:01\r\u001b[K |████████████████████▊ | 112 kB 12.1 MB/s eta 0:00:01\r\u001b[K |██████████████████████▋ | 122 kB 12.1 MB/s eta 0:00:01\r\u001b[K |████████████████████████▌ | 133 kB 12.1 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▍ | 143 kB 12.1 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▏ | 153 kB 12.1 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████ | 163 kB 12.1 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 174 kB 12.1 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 174 kB 12.1 MB/s \n\u001b[?25hBuilding wheels for collected packages: emoji\n Building wheel for emoji (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for emoji: filename=emoji-1.6.3-py3-none-any.whl size=170298 sha256=93111db2c93e452d1833197c2b89a5638e66dd6d3765c710de41c0934d0e20a4\n Stored in directory: /root/.cache/pip/wheels/03/8b/d7/ad579fbef83c287215c0caab60fb0ae0f30c4d7ce5f580eade\nSuccessfully built emoji\nInstalling collected packages: emoji\nSuccessfully installed emoji-1.6.3\n" ], [ "tokenizer= AutoTokenizer.from_pretrained('vinai/bertweet-base')\n", "_____no_output_____" ], [ "CONFIG = {\"seed\": 2021,\n \"epochs\": 5,\n \"model_name\": \"xlnet-base-cased\",\n \"train_batch_size\": 16,\n \"valid_batch_size\": 64,\n \"max_length\": 120,\n \"learning_rate\": 1e-4,\n \"scheduler\": 'CosineAnnealingLR',\n \"min_lr\": 1e-6,\n \"T_max\": 500,\n \"weight_decay\": 1e-6,\n \"n_fold\": 5,\n \"n_accumulate\": 1,\n \"num_classes\": 1,\n \"margin\": 0.5,\n \"device\": torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\"),\n }", "_____no_output_____" ], [ "test.dropna(inplace=True)", "_____no_output_____" ], [ "valid_dataset = PCLTrainDataset(test, tokenizer=tokenizer, max_length=CONFIG['max_length'],displacemnt=0)\nvalid_loader = DataLoader(valid_dataset, batch_size=CONFIG['valid_batch_size'], \n num_workers=2, shuffle=False, pin_memory=True)", "_____no_output_____" ], [ "@torch.no_grad()\ndef valid_fn(model, dataloader, device):\n model.eval()\n \n dataset_size = 0\n running_loss = 0.0\n \n PREDS = []\n \n bar = tqdm(enumerate(dataloader), total=len(dataloader))\n for step, data in bar:\n ids = data['text_ids'].to(device, dtype = torch.long)\n mask = data['text_mask'].to(device, dtype = torch.long)\n \n outputs = model(ids, mask)\n sig=nn.Sigmoid()\n outputs=sig(outputs)\n # outputs = outputs.argmax(dim=1)\n# print(len(outputs))\n# print(len(np.max(outputs.cpu().detach().numpy(),axis=1)))\n PREDS.append(outputs.detach().cpu().numpy()) \n # print(outputs.detach().cpu().numpy())\n \n PREDS = np.concatenate(PREDS)\n gc.collect()\n \n return PREDS", "_____no_output_____" ], [ "def inference(model_paths, dataloader, device):\n final_preds = []\n for i, path in enumerate(model_paths):\n model = PCL_Model_Arch()\n model.to(CONFIG['device'])\n model.load_state_dict(torch.load(path))\n \n print(f\"Getting predictions for model {i+1}\")\n preds = valid_fn(model, dataloader, device)\n final_preds.append(preds)\n \n final_preds = np.array(final_preds)\n # print(final_preds)\n final_preds = np.mean(final_preds, axis=0)\n # print(final_preds)\n final_preds[final_preds>=0.5] = 1\n final_preds[final_preds<0.5] = 0\n # final_preds= np.argmax(final_preds,axis=1)\n return final_preds", "_____no_output_____" ] ], [ [ "Assymetric loss random sampler", "_____no_output_____" ], [ "Balanced Loss random sampler", "_____no_output_____" ] ], [ [ "MODEL_PATH_2=['/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_balanced_loss_random_sampler/Loss-Fold-0.bin','/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_balanced_loss_random_sampler/Loss-Fold-1.bin','/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_balanced_loss_random_sampler/Loss-Fold-2.bin','/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_balanced_loss_random_sampler/Loss-Fold-3.bin','/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_balanced_loss_random_sampler/Loss-Fold-4.bin']\n# MODEL_PATH_2=['/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_kim_cnn/Loss-Fold-0.bin']\npreds = inference(MODEL_PATH_2, valid_loader, CONFIG['device'])", "_____no_output_____" ], [ "from sklearn.metrics import f1_score,accuracy_score,precision_score,classification_report\nprint(classification_report(test[['sarcasm', 'irony',\n 'satire', 'understatement', 'overstatement', 'rhetorical_question']].values, preds,target_names=['sarcasm', 'irony',\n 'satire', 'understatement', 'overstatement', 'rhetorical_question']))", " precision recall f1-score support\n\n sarcasm 0.13 0.93 0.23 180\n irony 0.06 0.90 0.11 20\n satire 0.12 0.33 0.17 49\n understatement 0.00 0.00 0.00 1\n overstatement 0.05 0.10 0.06 10\nrhetorical_question 0.06 0.82 0.11 11\n\n micro avg 0.11 0.78 0.20 271\n macro avg 0.07 0.51 0.12 271\n weighted avg 0.12 0.78 0.20 271\n samples avg 0.12 0.12 0.12 271\n\n" ] ], [ [ "Balance losss under sampler", "_____no_output_____" ], [ "Balance loss no sampler", "_____no_output_____" ], [ "Balanced Loss + sampler cycle", "_____no_output_____" ], [ "Assym Loss + sampler cycle", "_____no_output_____" ] ], [ [ "MODEL_PATH_2=['/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_kim_assym_loss/Loss-Fold-0.bin','/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_kim_assym_loss/Loss-Fold-1.bin','/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_kim_assym_loss/Loss-Fold-2.bin','/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_kim_assym_loss/Loss-Fold-3.bin','/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_kim_assym_loss/Loss-Fold-4.bin']\n# MODEL_PATH_2=['/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_kim_cnn/Loss-Fold-0.bin']\npreds = inference(MODEL_PATH_2, valid_loader, CONFIG['device'])", "Some weights of the model checkpoint at vinai/bertweet-base were not used when initializing RobertaModel: ['lm_head.bias', 'lm_head.dense.bias', 'lm_head.decoder.bias', 'lm_head.decoder.weight', 'lm_head.layer_norm.weight', 'lm_head.dense.weight', 'lm_head.layer_norm.bias']\n- This IS expected if you are initializing RobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing RobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" ], [ "from sklearn.metrics import f1_score,accuracy_score,precision_score,classification_report\nprint(classification_report(test[['sarcasm', 'irony',\n 'satire', 'understatement', 'overstatement', 'rhetorical_question']].values, preds,target_names=['sarcasm', 'irony',\n 'satire', 'understatement', 'overstatement', 'rhetorical_question']))", " precision recall f1-score support\n\n sarcasm 0.13 1.00 0.23 180\n irony 0.03 0.95 0.05 20\n satire 0.09 0.47 0.16 49\n understatement 0.00 0.00 0.00 1\n overstatement 0.02 0.30 0.03 10\nrhetorical_question 0.06 1.00 0.11 11\n\n micro avg 0.08 0.87 0.15 271\n macro avg 0.05 0.62 0.10 271\n weighted avg 0.11 0.87 0.19 271\n samples avg 0.09 0.13 0.10 271\n\n" ] ], [ [ "*Recall* Loss + sampler defualt", "_____no_output_____" ] ], [ [ "MODEL_PATH_2=['/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_kim_cnn_recall_balancer/Loss-Fold-0.bin','/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_kim_cnn_recall_balancer/Loss-Fold-1.bin','/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_kim_cnn_recall_balancer/Loss-Fold-2.bin','/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_kim_cnn_recall_balancer/Loss-Fold-3.bin','/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_kim_cnn_recall_balancer/Loss-Fold-4.bin']\n# MODEL_PATH_2=['/content/drive/MyDrive/ISarcasm/Models_Task_B/bert_tweet_kim_cnn/Loss-Fold-0.bin']\npreds = inference(MODEL_PATH_2, valid_loader, CONFIG['device'])", "Some weights of the model checkpoint at vinai/bertweet-base were not used when initializing RobertaModel: ['lm_head.bias', 'lm_head.dense.bias', 'lm_head.decoder.bias', 'lm_head.decoder.weight', 'lm_head.layer_norm.weight', 'lm_head.dense.weight', 'lm_head.layer_norm.bias']\n- This IS expected if you are initializing RobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing RobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" ], [ "from sklearn.metrics import f1_score,accuracy_score,precision_score,classification_report\nprint(classification_report(test[['sarcasm', 'irony',\n 'satire', 'understatement', 'overstatement', 'rhetorical_question']].values, preds,target_names=['sarcasm', 'irony',\n 'satire', 'understatement', 'overstatement', 'rhetorical_question']))", " precision recall f1-score support\n\n sarcasm 0.13 1.00 0.23 180\n irony 0.01 1.00 0.03 20\n satire 0.00 0.00 0.00 49\n understatement 0.00 1.00 0.00 1\n overstatement 0.01 0.20 0.02 10\nrhetorical_question 0.00 0.00 0.00 11\n\n micro avg 0.05 0.75 0.09 271\n macro avg 0.03 0.53 0.05 271\n weighted avg 0.09 0.75 0.15 271\n samples avg 0.05 0.12 0.07 271\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ec565d052c1d6de75ccc857513410f775fbc322c
3,598
ipynb
Jupyter Notebook
zp_database/4096x4096/zp_hdf5_ms.ipynb
s-sajid-ali/xwp_petsc
e890cdcdeaf140714f4fe16e5ddc175a2e20ec3b
[ "MIT" ]
null
null
null
zp_database/4096x4096/zp_hdf5_ms.ipynb
s-sajid-ali/xwp_petsc
e890cdcdeaf140714f4fe16e5ddc175a2e20ec3b
[ "MIT" ]
null
null
null
zp_database/4096x4096/zp_hdf5_ms.ipynb
s-sajid-ali/xwp_petsc
e890cdcdeaf140714f4fe16e5ddc175a2e20ec3b
[ "MIT" ]
2
2019-10-08T22:22:04.000Z
2022-03-15T11:04:02.000Z
21.416667
140
0.535575
[ [ [ "Code to convert zone plate pattern in numpy to the time dependent term in parabolic wave equation and store it as hdf5.", "_____no_output_____" ] ], [ [ "%%capture\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport pickle,os", "_____no_output_____" ] ], [ [ "Switch to directory containing zone plate array and parameters.", "_____no_output_____" ] ], [ [ "pwd = os.getcwd()\nos.chdir(pwd+str('/rings'))", "_____no_output_____" ] ], [ [ "Load the zone plate pattern from numpy array. <br>\nLoad the parameters from the metadata file associated with the zone plate.", "_____no_output_____" ] ], [ [ "ZP = np.load(\"zp.npy\")\nparameters = pickle.load(open('parameters.pickle','rb'))\n\ngrid_size = parameters['grid_size']\nenergy = parameters['energy(in eV)']\ndelta = parameters['delta']\nbeta = parameters['beta']\nwavel = parameters['wavelength in m']\n\ndim_x = dim_y = grid_size\ndim_xy = dim_x*dim_y", "_____no_output_____" ] ], [ [ "Switch back to current working directory.", "_____no_output_____" ] ], [ [ "os.chdir(pwd)", "_____no_output_____" ] ], [ [ "Flatten dataset and store as 1D array with the first column containin the real part and the second part containing the imaginary part.", "_____no_output_____" ] ], [ [ "ZP = ZP.reshape(dim_xy)\nZP_ = np.zeros((dim_xy,2))\nZP_[:,0] = np.real(ZP)\nZP_[:,1] = np.imag(ZP)", "_____no_output_____" ] ], [ [ "Create a new hdf5 file to store the dataset. Set the attribute complex for PETSc to interpret the dataset as complex array.", "_____no_output_____" ] ], [ [ "f = h5py.File(\"ref_index_ms.h5\", \"w\")\ndset = f.create_dataset(\"ref_index\", np.shape(ZP_), dtype='f8', chunks=True)\ndset.attrs.__setitem__(\"complex\",1)", "_____no_output_____" ] ], [ [ "Transfer the data. Close the hdf5 file after the transfer.", "_____no_output_____" ] ], [ [ "dset[:,:] = ZP_\n\nf.close()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec56689a7f638018fe7c79fba42a45ec8acca28a
249,506
ipynb
Jupyter Notebook
MLG_Lab1.ipynb
djaquet5/MLG_Python
097bddbee4dd69ce98387fecb572f94f416cc3a8
[ "MIT" ]
null
null
null
MLG_Lab1.ipynb
djaquet5/MLG_Python
097bddbee4dd69ce98387fecb572f94f416cc3a8
[ "MIT" ]
4
2020-03-24T18:11:34.000Z
2021-08-23T20:38:14.000Z
MLG_Lab1.ipynb
djaquet5/MLG_Python
097bddbee4dd69ce98387fecb572f94f416cc3a8
[ "MIT" ]
null
null
null
105.902377
12,120
0.808209
[ [ [ "# MLG / Introduction to jupyter notebooks", "_____no_output_____" ], [ "In this laboratory you are going to learn how to perform interactive computing using **jupyter**. The **jupyter** project, born out of the IPython Project in 2014, evolved to support interactive data science and scientific computing across all programming languages. It was initially an interactive shell for python that has more functionnalities than the basic one, now it allows you to interact with your scripts using so called **notebooks**. The notebook extends the console-based approach to interactive computing in a qualitatively new direction, providing a web-based application suitable for capturing the whole computation process: developing, documenting, and executing code, as well as communicating the results.\n\nThis guide does not start from the basics of the general purpose language **python**. If you do not know this language, it is recommended to follow a **python** tutorial in order to learn the basic concepts and commands. You can have a look at the [official python tutorial](https://docs.python.org/2/tutorial/) or [Google's python tutorial](https://developers.google.com/edu/python/) for example.\n\nNote that for this course, we will use the Python 3.X series.\n\nYou will use a browser-based notebook to interactively explore a dataset by:\n- Reading raw data from ascii files\n- Reading typed data (data frames) from ascii files\n- Selecting specific columns and/or rows from a dataset\n- Filtering datasets\n- Plotting the information in the dataset (e.g., scatter-plot, boxplot, histogram)", "_____no_output_____" ], [ "## 1. Using the notebooks", "_____no_output_____" ], [ "You are in an jupyter notebook right now. A jupyter notebook is a web interface to a python interpreter.\n\nA notebook is made of cells. Each cell has a type which defines what happens when it is run. \n\n- Markdown cells allow you to write [Markdown](http://daringfireball.net/projects/markdown/) text in them. They are just displayed as HTML when run.\n- Code cells contain python code. When the cell is run, the code is sent to the python interpreter, executed and you get the result in the cell output.\n- Various header cells that allow you to structure your document.\n\nYou can change the type of a cell using the drop-down menu in the toolbar.\n\nYou can click (for Code cells) or double-click (for headers and markdown cells) on cells to edit their content. You can then use keyboard shortcuts to run them :\n\n- Ctrl + Enter : run in place\n- Shift + Enter : run and move to next cell\n- Alt + Enter : run and insert new cell after", "_____no_output_____" ] ], [ [ "# This is a code cell containing python code !\nprint( 2 + 2 )", "4\n" ] ], [ [ "The python interpreter that executes the code you write in the notebook is called a *Kernel*. You can restart the kernel (the interpreter) using the *Kernel* menu. This is useful if you want to delete all your variables.", "_____no_output_____" ], [ "Jupyter has also \"magic\" functions that start with % . They allow you to do a lot of useful things with your ipython environment :\n\nhttp://nbviewer.ipython.org/github/ipython/ipython/blob/1.x/examples/notebooks/Cell%20Magics.ipynb\n\nThe %who magic gives you a list of the defined python variables. object? can be used to get documentation about an object :", "_____no_output_____" ] ], [ [ "a = 2\n%who", "a\t my_documented_function\t \n" ] ], [ [ "## 2. Scientific computing with Python", "_____no_output_____" ] ], [ [ "def my_documented_function(a):\n '''\n This is a revolutionary function that returns a + 1\n '''\n return a + 1\n\nprint(my_documented_function(2))\nprint('This is the traditional python help() function :\\n')\nhelp(my_documented_function)", "3\nThis is the traditional python help() function :\n\nHelp on function my_documented_function in module __main__:\n\nmy_documented_function(a)\n This is a revolutionary function that returns a + 1\n\n" ], [ "# We can access the same info with just ? (note that you have to run this cell to view the effect)\nmy_documented_function?", "_____no_output_____" ] ], [ [ "Python has a number of packages (libraries) dedicated to scientific programming :\n\nThe foundation is [numpy](http://www.numpy.org/) which provides a N-dimensional array implementation with a nice indexing syntax (similar to MATLAB).\n\nThen comes [scipy](http://www.scipy.org/) which contains a number of algorithms (signal processing, distance computation, etc...) built on top of numpy.\n\n[matplotlib](http://matplotlib.org/) is a library to create 2D plots.\n\n[pandas](http://pandas.pydata.org/) provides a DataFrame implementation, which is a layer on top of numpy arrays that makes some things (handling missing values, date indexing) easier. Heavily inspired by the [R](http://www.r-project.org/) statistical computing language.\n\n[scikit-learn](http://scikit-learn.org/stable/) is a machine learning library that contains implementations of many of the most popular machine learning algorithms.\n\n[keras](https://keras.io/), [tensorflow](http://www.tensorflow.org) and [pytorch](https://pytorch.org/) allow you to write programs that are compiled and can run on a GPU.\n\nFinally, this is not a python package, but [stackoverflow](http://stackoverflow.com/) is a really good questions and answers platform where you can probably find answers to the most common problems you'll have :-)", "_____no_output_____" ], [ "If you want to install a scientific python environment on your machines, we suggest the use of [anaconda](https://store.continuum.io/cshop/anaconda/). It is a \"python distribution\" that comes with a package manager (conda) and all of the scientific packages listed above (and many others) pre-installed. We strongly recommend to use the [miniconda](https://docs.conda.io/en/latest/miniconda.html) variant, which install only the basic packages and the package manager (conda) while consuming less space in your machine.", "_____no_output_____" ], [ "## 3. Quick numpy introduction", "_____no_output_____" ], [ "Numpy allows you to define [multidimensionnal arrays](http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html) (recommended reading).", "_____no_output_____" ] ], [ [ "# Makes the numpy function available as np.<funcname> (np is a convention)\nimport numpy as np\n\narray1 = np.array([\n [1, 2, 3],\n [4, 5, 6]]\n)\n\nprint(array1.shape)\n\n# The last line of a python cell is evaluated and used as the output for the cell\narray1", "(2, 3)\n" ] ], [ [ "The array elements can be accessed using the [indexing syntax](http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#arrays-indexing) (recommended reading). Numpy (and python) uses 0-based indexing.", "_____no_output_____" ] ], [ [ "print(\"element (2,3) : \", array1[1,2])\nprint(\"first row : \", array1[0,:])\nprint(\"second column : \", array1[:,1])\nprint(\"second and third element of the second row : \", array1[1, 1:])", "element (2,3) : 6\nfirst row : [1 2 3]\nsecond column : [2 5]\nsecond and third element of the second row : [5 6]\n" ] ], [ [ "You can do a [lot of things](http://docs.scipy.org/doc/numpy/reference/) with numpy arrays. For example, we can compute the mean of each row :", "_____no_output_____" ] ], [ [ "array1.mean(axis=1)", "_____no_output_____" ] ], [ [ "We can also sort an array :", "_____no_output_____" ] ], [ [ "array2 = np.array([5, 8, 2, 9, 4, 3])\nprint(\"original\\t :\\t\", array2)\n\n\nprint(\"using argsort\\t :\\t\", array2[np.argsort(array2)]) # the fancy indexing version\n\narray2.sort() # note that this does in-place sorting, so it *modifies* array2\nprint(\"after sort()\\t :\\t\", array2)\n", "original\t :\t [5 8 2 9 4 3]\nusing argsort\t :\t [2 3 4 5 8 9]\nafter sort()\t :\t [2 3 4 5 8 9]\n" ] ], [ [ "And we can randomly shuffle an array:", "_____no_output_____" ] ], [ [ "np.random.shuffle(array2)\n\nprint(array2)\n\nprint(\"the max value is at position:\", np.argmax(array2))", "[4 8 2 5 9 3]\nthe max value is at position: 4\n" ] ], [ [ "We can also use comparison operators on arrays, giving us a boolean mask. And then use the mask to index the array :", "_____no_output_____" ] ], [ [ "mask = (array1 > 2) & (array1 < 5)\nprint(array1)\nprint(mask)\nprint(array1[mask])", "[[1 2 3]\n [4 5 6]]\n[[False False True]\n [ True False False]]\n[3 4]\n" ] ], [ [ "<p style=\"background-color:#660066; color:#fff;padding:5px; font-weight:bold\">Exercises</p>", "_____no_output_____" ], [ "As exercises, do the following :\n\n- Compute the max of each row of array1\n- Compute the max of each column of array1\n- Print the elements of array1 that are less than 4 (without using a loop)\n- Convert the range of the values in array2 to [0,1]", "_____no_output_____" ] ], [ [ "print(array1)\nprint(\"Max of each row\\t\\t\\t: \" + str(np.amax(array1, axis=1)))\nprint(\"Max of each column\\t\\t: \" + str(np.amax(array1, axis=0)))\n\nmask = array1 < 4\nprint(\"Elements that are less than 4\\t: \" + str(array1[mask]))\n\n# TODO: range ? HEIN?!", "[[1 2 3]\n [4 5 6]]\nMax of each row\t\t\t: [3 6]\nMax of each column\t\t: [4 5 6]\nElements that are less than 4\t: [1 2 3]\n" ] ], [ [ "## 4. Loading a dataset with numpy ", "_____no_output_____" ], [ "### The Wine dataset", "_____no_output_____" ], [ "We will use the wine dataset from the UCI repository :\nhttps://archive.ics.uci.edu/ml/datasets/Wine\n\nThe wine.data file is a simple CSV file, which we can be easily loaded as a numpy array with np.genfromtxt.", "_____no_output_____" ] ], [ [ "# Some nicer printing defaults for numpy arrays\nnp.set_printoptions(precision=5, suppress=True)\n\n# Be careful to place the database in the right directory or adapt the path\ndata = np.genfromtxt('data/wine/wine.data', delimiter=',')\nprint(data.shape)\nprint(data)", "(178, 14)\n[[ 1. 14.23 1.71 ... 1.04 3.92 1065. ]\n [ 1. 13.2 1.78 ... 1.05 3.4 1050. ]\n [ 1. 13.16 2.36 ... 1.03 3.17 1185. ]\n ...\n [ 3. 13.27 4.28 ... 0.59 1.56 835. ]\n [ 3. 13.17 2.59 ... 0.6 1.62 840. ]\n [ 3. 14.13 4.1 ... 0.61 1.6 560. ]]\n" ] ], [ [ "So this is a dataset with 178 samples and 14 dimensions for each sample. Let's have a look at the dataset description file (cat is a nice jupyter command that prints the content of a text file, like the unix cat)", "_____no_output_____" ] ], [ [ "cat 'data/wine/wine.names'", "1. Title of Database: Wine recognition data\r\n\tUpdated Sept 21, 1998 by C.Blake : Added attribute information\r\n\r\n2. Sources:\r\n (a) Forina, M. et al, PARVUS - An Extendible Package for Data\r\n Exploration, Classification and Correlation. Institute of Pharmaceutical\r\n and Food Analysis and Technologies, Via Brigata Salerno, \r\n 16147 Genoa, Italy.\r\n\r\n (b) Stefan Aeberhard, email: [email protected]\r\n (c) July 1991\r\n3. Past Usage:\r\n\r\n (1)\r\n S. Aeberhard, D. Coomans and O. de Vel,\r\n Comparison of Classifiers in High Dimensional Settings,\r\n Tech. Rep. no. 92-02, (1992), Dept. of Computer Science and Dept. of\r\n Mathematics and Statistics, James Cook University of North Queensland.\r\n (Also submitted to Technometrics).\r\n\r\n The data was used with many others for comparing various \r\n classifiers. The classes are separable, though only RDA \r\n has achieved 100% correct classification.\r\n (RDA : 100%, QDA 99.4%, LDA 98.9%, 1NN 96.1% (z-transformed data))\r\n (All results using the leave-one-out technique)\r\n\r\n In a classification context, this is a well posed problem \r\n with \"well behaved\" class structures. A good data set \r\n for first testing of a new classifier, but not very \r\n challenging.\r\n\r\n (2) \r\n S. Aeberhard, D. Coomans and O. de Vel,\r\n \"THE CLASSIFICATION PERFORMANCE OF RDA\"\r\n Tech. Rep. no. 92-01, (1992), Dept. of Computer Science and Dept. of\r\n Mathematics and Statistics, James Cook University of North Queensland.\r\n (Also submitted to Journal of Chemometrics).\r\n\r\n Here, the data was used to illustrate the superior performance of\r\n the use of a new appreciation function with RDA. \r\n\r\n4. Relevant Information:\r\n\r\n -- These data are the results of a chemical analysis of\r\n wines grown in the same region in Italy but derived from three\r\n different cultivars.\r\n The analysis determined the quantities of 13 constituents\r\n found in each of the three types of wines. \r\n\r\n -- I think that the initial data set had around 30 variables, but \r\n for some reason I only have the 13 dimensional version. \r\n I had a list of what the 30 or so variables were, but a.) \r\n I lost it, and b.), I would not know which 13 variables\r\n are included in the set.\r\n\r\n -- The attributes are (dontated by Riccardo Leardi, \r\n\[email protected] )\r\n \t1) Alcohol\r\n \t2) Malic acid\r\n \t3) Ash\r\n\t4) Alcalinity of ash \r\n \t5) Magnesium\r\n\t6) Total phenols\r\n \t7) Flavanoids\r\n \t8) Nonflavanoid phenols\r\n \t9) Proanthocyanins\r\n\t10)Color intensity\r\n \t11)Hue\r\n \t12)OD280/OD315 of diluted wines\r\n \t13)Proline \r\n\r\n5. Number of Instances\r\n\r\n \tclass 1 59\r\n\tclass 2 71\r\n\tclass 3 48\r\n\r\n6. Number of Attributes \r\n\t\r\n\t13\r\n\r\n7. For Each Attribute:\r\n\r\n\tAll attributes are continuous\r\n\t\r\n\tNo statistics available, but suggest to standardise\r\n\tvariables for certain uses (e.g. for us with classifiers\r\n\twhich are NOT scale invariant)\r\n\r\n\tNOTE: 1st attribute is class identifier (1-3)\r\n\r\n8. Missing Attribute Values:\r\n\r\n\tNone\r\n\r\n9. Class Distribution: number of instances per class\r\n\r\n \tclass 1 59\r\n\tclass 2 71\r\n\tclass 3 48\r\n" ], [ "# store the column names in an array\ncolnames = np.array([\n 'class', 'alcohol', 'malic acid', 'ash', 'alcalinity of ash', 'magnesium', 'total phenols', \n 'flavanoids', 'nonflavanoid phenols', 'proanthocyanins', 'color intensity', 'hue',\n 'OD280/OD315 of diluted wines', 'proline'\n])\ncolnames.shape", "_____no_output_____" ] ], [ [ "## 5. Quick introduction to Pandas", "_____no_output_____" ], [ "Ok, so now we have two numpy arrays. *data* contains our dataset and *colnames* the name of our columns. However, we would like to deal with only one data structure that can store both the data and the columns names. That's what pandas' DataFrame are for ! (or, but a bit less elegant, [numpy structured arrays](http://docs.scipy.org/doc/numpy/user/basics.rec.html)).\n\n(Note that we could also directly load our CSV using pandas.read_csv)", "_____no_output_____" ] ], [ [ "import pandas as pd # pd is a convention too\n\ndf = pd.DataFrame(data=data[:,1:], columns=colnames[1:])\n# Ensure the class column is an int\ndf['class'] = data[:,0].astype(np.int)\n\n# Pandas dataframes have a nice pretty-printing for ipython notebooks\ndf", "_____no_output_____" ] ], [ [ "DataFrames have a nice <b>describe()</b> function that print some per-column statistics", "_____no_output_____" ] ], [ [ "df.describe()", "_____no_output_____" ] ], [ [ "<p style=\"background-color:#660066; color:#fff;padding:5px; font-weight:bold\">Exercise</p>", "_____no_output_____" ], [ "Describe in statistical terms the alcohol variable for each class of wine. E.g., use the describe() function of dataframes for each class of wine.", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ], [ [ "## 6. Visual exploratory analysis of data ", "_____no_output_____" ], [ "In statistics, exploratory data analysis (EDA) is an approach for analyzing datasets to summarize their main characteristics, often with visual methods.\n\nFor example, box and whisker plots use a graphical box: the bottom and top of the box are always the first (Q1) and third (Q3) quartiles, and the band inside the box is always the second quartile (the median). The whiskers are placed at Q1 - 1.5 IQR and Q3 + 1.5 IQR, where IQR means Inter-quartile range.\nSee [Boxplot definition at Wikipedia](https://en.wikipedia.org/wiki/Box_plot)", "_____no_output_____" ], [ "### Boxplots with matplotlib", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as pl # pylab is matplotlib\n# The line below enables matplotlib-jupyter integration and allows plots to be displayed inline in the notebook.\n%matplotlib inline", "_____no_output_____" ] ], [ [ "We want to see what the distribution of each feature is for each class. We'll use the [boxplot](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot) function of pylab/pyplot.", "_____no_output_____" ] ], [ [ "classes = np.unique(df['class'])\nprint('classes : ', classes)\n\nalcohol_by_class = [df['alcohol'][df['class'] == c] for c in classes]\n\npl.boxplot(alcohol_by_class)\npl.grid()", "classes : [1 2 3]\n" ] ], [ [ "dataframes have a [boxplot](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.boxplot.html) function that does per-class plotting with the *by* parameter :", "_____no_output_____" ] ], [ [ "df.boxplot(column='alcohol', by='class'); # the ; at the end makes Python not to print the response of boxplot", "_____no_output_____" ] ], [ [ "We can do that for each column", "_____no_output_____" ] ], [ [ "for c in df.columns:\n df.boxplot(column=c, by='class')", "_____no_output_____" ] ], [ [ "<p style=\"background-color:#660066; color:#fff;padding:5px; font-weight:bold\">Q: By looking at the boxplot, which features seem the most discriminative ? (which variables would be helpful to separate the wine classes?)</p>", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ], [ [ "## 7. Classifying the wine data", "_____no_output_____" ], [ "Let's first try a rule-based approach to classify the wine data.", "_____no_output_____" ], [ "Suppose that the <b>alcohol</b> variable allows for a good classification and by observing the corresponding boxplot, let's define the rules that associate a class to alcohol range values.", "_____no_output_____" ] ], [ [ "pred = []\n\nfor row in df['alcohol']:\n if row > 13.5:\n pred.append(1);\n elif row > 12.75 and row < 13.5 :\n pred.append(2); \n else:\n pred.append(3)\n\n# A new column is added to the dataframe\ndf['prediction'] = pred", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "<p style=\"background-color:#660066; color:#fff;padding:5px; font-weight:bold\">Q: Can you estimate the performance of such a classification method ?</p>", "_____no_output_____" ], [ "<b>Hint:</b> Divide the number of times the prediction value corresponds to the real one (e.g., how many times, class == prediction ?), by the total number of observations. ", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ], [ [ "## 8. Performance evaluation", "_____no_output_____" ], [ "#### Confusion matrix, precision, recall and F-score", "_____no_output_____" ], [ "A <b>confusion matrix</b> is a table that is often used to describe the performance of a classification model (or \"classifier\") on a set of test data for which the true values are known. In the confusion matrix all correct guesses are located in the diagonal of the table, so it's easy to visually inspect the table for errors, as they will be represented by values outside the diagonal.\n\nFor an example, please see the Wikipedia page: https://en.wikipedia.org/wiki/Confusion_matrix\n\nBehind the confusion matrix are the following concepts:\n\n<b>true positives (TP)</b>: These are cases in which we predicted correctly the positive class (e.g. the person has a disease and has been diagnosed as sick).\n\n<b>true negatives (TN):</b> We correctly predicted the negative class (e.g. the person is healthy and has not been diagnosed as sick).\n\n<b>false positives (FP):</b> We wrongly predicted the positive class (e.g. the person don't actually have the disease but has been diagnosed as sick). (Also known as a \"Type I error.\")\n\n<b>false negatives (FN):</b> We wrongly predicted the negative class (e.g. the person si diagnosed as healthy but actually is sick). (Also known as a \"Type II error.\")\n\n<b>precision</b>: When we predict the positive class, how often are we correct?<p>\n\n<font color=\"red\">precision = tp/(tp + fp)</font>\n\nSensitivity and specificity are statistical measures of the performance of a binary classification test, also known in statistics as classification function:\n\n<b>Recall or sensitivity</b> (also called the true positive rate or probability of detection in some fields) measures the proportion of positives that are correctly identified as such (e.g., the percentage of sick people who are correctly identified as having the condition).<p>\n\n<font color=\"red\">recall = tp/(tp + fn)</font>\n\n<b>Specificity</b> (also called the true negative rate) measures the proportion of negatives that are correctly identified as such (e.g., the percentage of healthy people who are correctly identified as not having the condition).\n\nThe <b>F1 score</b> can be interpreted as a weighted average of precision and recall, where an F1 score reaches its best value at 1 and worst at 0.<p>\n\n<font color=\"red\">f1-score = 2 x precision x recall / ( precision + recall)</font>\n", "_____no_output_____" ] ], [ [ "# This function generates a colored confusion matrix.\n\nimport matplotlib.cm as cm\n\ndef plot_confusion_matrix(confmat, labels_names, ax=None):\n if ax is None:\n ax = pl.subplot(111)\n cmim = ax.matshow(confmat, interpolation='nearest', cmap=cm.jet)\n\n for i in range(confmat.shape[0]):\n for j in range(confmat.shape[1]):\n ax.annotate(str(confmat[i, j]), xy=(j, i),\n horizontalalignment='center',\n verticalalignment='center',\n fontsize=8)\n ax.set_xticks(np.arange(confmat.shape[0]))\n ax.set_xticklabels([labels_names[l] for l in range(confmat.shape[0])], rotation='vertical')\n ax.set_yticks(np.arange(confmat.shape[1]))\n _ = ax.set_yticklabels([labels_names[l] for l in range(confmat.shape[1])])\n ax.set_xlabel('predicted label')\n ax.xaxis.set_label_position('top')\n ax.set_ylabel('true label')\n pl.colorbar(cmim, shrink=0.7, orientation='horizontal', pad=0.01)", "_____no_output_____" ], [ "import sklearn.metrics as skmetrics\n\nlabels_names=['1', '2', '3']\nC = skmetrics.confusion_matrix(y_true=df['class'], y_pred=df['prediction'])\nplot_confusion_matrix(C, labels_names)\n\nprint(skmetrics.classification_report(y_true=df['class'], y_pred=df['prediction']))", " precision recall f1-score support\n\n 1 0.75 0.69 0.72 59\n 2 0.16 0.11 0.13 71\n 3 0.15 0.23 0.18 48\n\n accuracy 0.34 178\n macro avg 0.35 0.35 0.34 178\nweighted avg 0.35 0.34 0.34 178\n\n" ] ], [ [ "<p style=\"background-color:#660066; color:#fff;padding:5px; font-weight:bold\">Q: Define a rule that uses the most discriminative feature to classify the wine observations ?</p>", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ], [ [ "<p style=\"background-color:#660066; color:#fff;padding:5px; font-weight:bold\">Q: Compute the confusion matrix of the resulting rule-based system ?</p>", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ], [ [ "<p style=\"background-color:#660066; color:#fff;padding:5px; font-weight:bold\">Q: Compute the precision, the recall and the f1-score of the system for a given class using the values of the confusion matrix ?</p>", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ], [ [ "## 9. Try a simple machine learning classifier", "_____no_output_____" ], [ "### Standardization\nAs you can see from the df.describe() above, the range values for each column in our dataset varies a lot. Depending on the model you want to use, that can be a big problem.\n\nThe model we'll use here is k-Nearest Neighbor with the Euclidean distance. When using the Euclidean distance, it is important to think about how to preprocess your data.\n\nFor example, look at the 'magnesium' and 'total phenols' columns. The standard deviation for magnesium is 14 while for total phenols it is 0.62. This means that the data are more spread out on the magnesium axis compared to the phenols axis. And if we use raw values to compute distances, the magnesium axis will be much more important than the phenols axis, but this importance will just be due to the (arbitrary) scales that we used to measure magnesium and phenols.", "_____no_output_____" ] ], [ [ "print(df['magnesium'].describe())\nprint(df['total phenols'].describe())", "count 178.000000\nmean 99.741573\nstd 14.282484\nmin 70.000000\n25% 88.000000\n50% 98.000000\n75% 107.000000\nmax 162.000000\nName: magnesium, dtype: float64\ncount 178.000000\nmean 2.295112\nstd 0.625851\nmin 0.980000\n25% 1.742500\n50% 2.355000\n75% 2.800000\nmax 3.880000\nName: total phenols, dtype: float64\n" ] ], [ [ "To make the distances less dependent on particular scales, we can standardize our data by making sure each column has 0 mean and unit variance, using [sklearn.preprocessing.scale](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.scale.html) or by computing the normalization ourselves.", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import scale\n\n# all variables of the dataset with normalization\n#X = scale(df[colnames[1:]])\n\n# some selected variables without normalization\nX = df[['magnesium', 'total phenols']].values\n\ny = df['class'].values", "_____no_output_____" ] ], [ [ "### Preparing the datasets for cross-validation: Train/test split\nTo build and evaluate a machine learning model, we need to split our data into training and testing sets. Scikit-learn has a [cross_validation](http://scikit-learn.org/stable/modules/cross_validation.html#cross-validation) module that helps with this task.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n# train and test are indices arrays containing the indices of train/test samples\ntrain, test = train_test_split(\n np.arange(X.shape[0]), test_size=0.4, random_state=42 # we fix a random state for reproducibility\n)\n\nprint(\"train shape : \", train.shape)\nprint(\"test shape : \", test.shape)", "train shape : (106,)\ntest shape : (72,)\n" ] ], [ [ "### Classifying the wine data\nWe will use a k-Nearest Neighbor classifier. This classifier will classify a new sample by assigning it the class of its nearest neighbor (for k=1). It computes the distance between the new sample and all the samples in the training set, find the nearest training sample and then use the class of the nearest neighbor to classify the new sample.\n\nThe [scipy.spatial.distance](http://docs.scipy.org/doc/scipy/reference/spatial.distance.html) module helps with distance computation", "_____no_output_____" ] ], [ [ "def norm(x):\n return np.sqrt(x.dot(x))\n\ndef euclidean_distance(x1, x2):\n \"\"\"Euclidean distance\"\"\"\n return norm(x1 - x2)\n\ndef distances(X_train, x):\n \"\"\"\n Returns a vector containing the distance between x and each\n sample in X_train\n \"\"\"\n n_train = X_train.shape[0]\n return np.array([euclidean_distance(X_train[i,:], x) for i in range(n_train)])\n\ndef nn_classify_single(X_train, y_train, X):\n \"\"\"\n Nearest neighbor classifier. Returns the class of the nearest training vector\n \"\"\"\n dists = distances(X_train, X)\n closest = np.argmin(dists)\n return y_train[closest]\n\ndef nn_classify(X_train, y_train, X):\n \"\"\"\n Assign to each row in X the class of its nearest neighbor in X_train\n \"\"\"\n y_pred = np.zeros(X.shape[0], dtype=np.int)\n for i in range(X.shape[0]):\n y_pred[i] = nn_classify_single(X_train, y_train, X[i])\n return y_pred", "_____no_output_____" ], [ "y_test_pred = nn_classify(X[train], y[train], X[test])\nprint(\"y_true : \", y[test])\nprint(\"y_pred : \", y_test_pred)", "y_true : [1 1 3 1 2 1 2 3 2 3 1 3 1 2 1 2 2 2 1 2 1 2 2 3 3 3 2 2 2 1 1 2 3 1 1 1 3\n 3 2 3 1 2 2 2 3 1 2 2 3 1 2 1 1 3 3 2 2 1 2 1 3 2 2 3 1 1 1 3 1 1 2 3]\ny_pred : [3 1 3 1 2 1 2 1 2 3 2 3 2 2 1 3 3 2 2 2 1 2 3 2 2 2 1 1 2 3 2 3 1 1 3 2 2\n 3 2 3 1 2 2 1 3 2 3 1 2 2 2 1 1 3 2 3 2 1 3 2 2 2 2 2 1 2 2 3 1 2 2 3]\n" ], [ "labels_names=['1', '2', '3']\nC = skmetrics.confusion_matrix(y_true=y[test], y_pred=y_test_pred)\nplot_confusion_matrix(C, labels_names)\n\nprint(skmetrics.classification_report(y_true=y[test], y_pred=y_test_pred))", " precision recall f1-score support\n\n 1 0.67 0.46 0.55 26\n 2 0.46 0.59 0.52 27\n 3 0.47 0.47 0.47 19\n\n accuracy 0.51 72\n macro avg 0.53 0.51 0.51 72\nweighted avg 0.54 0.51 0.52 72\n\n" ] ], [ [ "<p style=\"background-color:#660066; color:#fff;padding:5px; font-weight:bold\">Q: Now, normalize (scale) the columns of your data matrix and re-run the classification. What do you observe ?</p>", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ], [ [ "<p style=\"background-color:#660066; color:#fff;padding:5px; font-weight:bold\">Q: Above, we implemented a basic nearest neighbor classifier (k=1). Modify it so that it is a k nearest neighbor classifier. Evaluate it for k=3, k=5 and k=10. Does increasing k help ? Is there a limit after which increasing k is useless ? Why ?</p>", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ], [ [ "<p style=\"background-color:#660066; color:#fff;padding:5px; font-weight:bold\">Q: In our example, we only used two features (magnesium and total phenols). Try classifying with all the features and compare the performance.</p>", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec566a7141f9cbddf5fc9d4420751960c842b661
9,876
ipynb
Jupyter Notebook
notebooks/development/nearest_neighbour.ipynb
Withington/deepscent
45d10dba5055be88f522bed2acb7bbe8deedd0fe
[ "MIT" ]
1
2020-02-15T14:41:42.000Z
2020-02-15T14:41:42.000Z
notebooks/development/nearest_neighbour.ipynb
Withington/deepscent
45d10dba5055be88f522bed2acb7bbe8deedd0fe
[ "MIT" ]
2
2021-08-25T15:23:56.000Z
2022-02-10T00:10:12.000Z
notebooks/development/nearest_neighbour.ipynb
Withington/deepscent
45d10dba5055be88f522bed2acb7bbe8deedd0fe
[ "MIT" ]
1
2020-04-11T03:45:13.000Z
2020-04-11T03:45:13.000Z
30.387692
175
0.545666
[ [ [ "# K-Nearest Neighbour Classifier", "_____no_output_____" ] ], [ [ "import os\nfrom pathlib import Path\nfrom datetime import datetime\nfrom dateutil.tz import gettz\nimport csv\nimport numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import KNeighborsClassifier, DistanceMetric\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.metrics import confusion_matrix, roc_curve, roc_auc_score, classification_report\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nnp.random.seed(455)\nk_fold_seed = 765432", "_____no_output_____" ] ], [ [ "## User inputs", "_____no_output_____" ] ], [ [ "flist = ['private_balanced'] #, 'private_dog0_correct_plus', 'private_dog2_correct'] # List of dataset directory names. WormsTwoClass Lightning2 Earthquakes GunPoint \n\nn_neighbors=1\n\nk = 3 # For k-fold cross validation. If k=1, the original test-train split is used.\nm = 4 # Number of repetitions of k-fold cross validation (if k>1).\n\n# Input directory\nif 'private_dog0' == flist[0]:\n fdir = '../data/private_data/private_events_dev' \nelif 'private' in flist[0]:\n fdir = '../data/private_data/private_events_dev2' \nelse:\n fdir = '../data' \n \n# Output directories\nlogs_dir = '../logs'\ntimestamp = '{:%Y-%m-%dT%H:%M}'.format(datetime.now(gettz(\"Europe/London\")))\nlogs_dir = logs_dir +'/' + timestamp\n\nif 'private' in flist[0] and 'correct_plus' in flist[0]:\n do_end_test = True\nelse:\n do_end_test = False\n \ndef readucr(filename):\n ''' Load a dataset from a file in UCR format\n space delimited, class labels in the first column.\n Returns\n X : DNN input data\n Y : class labels\n '''\n data = np.loadtxt(Path(filename))\n Y = data[:,0]\n X = data[:,1:]\n return X, Y", "_____no_output_____" ] ], [ [ "## Load data", "_____no_output_____" ] ], [ [ "fname = flist[0]\nx_train, y_train = readucr(fdir+'/'+fname+'/'+fname+'_TRAIN.txt')\nx_test, y_test = readucr(fdir+'/'+fname+'/'+fname+'_TEST.txt')\n\nnb_classes = 2\ny_train = (y_train - y_train.min())/(y_train.max()-y_train.min())*(nb_classes-1)\ny_test = (y_test - y_test.min())/(y_test.max()-y_test.min())*(nb_classes-1)\n \nx_train_mean = x_train.mean()\nx_train_std = x_train.std()\nx_train = (x_train - x_train_mean)/(x_train_std) \nx_test = (x_test - x_train_mean)/(x_train_std)\n\nprint('Number of training samples of class 0', (y_train == 0).sum())\nprint('Number of training samples of class 1', (y_train == 1).sum())\nprint('Number of test samples of class 0', (y_test == 0).sum())\nprint('Number of test samples of class 1', (y_test == 1).sum())", "_____no_output_____" ] ], [ [ "## Fit classifier (single train and test)", "_____no_output_____" ] ], [ [ "neigh = KNeighborsClassifier(n_neighbors=n_neighbors, metric='euclidean') # minkowski\nneigh.fit(x_train, y_train) ", "_____no_output_____" ], [ "y_pred = neigh.predict(x_test)\ncm = confusion_matrix(y_test, y_pred, labels=[1,0])\nacc_calc = (cm[0][0]+cm[1][1])/(cm.sum())\ncm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\nprint('Pred', y_pred[:20])\nprint('True', y_test.astype(int))\nprint(cm)\nprint('Calculated accuracy:',acc_calc)\nprint('Normalised confusion matrix:\\n', cm_norm)", "_____no_output_____" ] ], [ [ "## Test on other dataset", "_____no_output_____" ] ], [ [ "if do_end_test:\n other = fname+'_END_TEST' #_dog_incorrect' # 'private_dog0_correct_plus_END_TEST'\n datadir = fdir+'/'+fname\n print('Testing on:', datadir+'/'+other+'.txt')\n x_other, y_other = readucr(datadir+'/'+other+'.txt')\n y_other_pred = neigh.predict(x_other)\n\n # Results\n cm = confusion_matrix(y_other, y_other_pred, labels=[1,0])\n acc_calc = (cm[0][0]+cm[1][1])/(cm.sum())\n cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('KNN cm\\n', cm)\n print('KNN cm_norm\\n', cm_norm)\n print('KNN acc', acc_calc)\n\n # Get dog result\n meta = pd.read_csv(datadir+'/'+other+'_meta.txt', sep=',', parse_dates=['date'])\n cm = confusion_matrix(y_other, meta['dog_pred'], labels=[1,0])\n dog_acc = (cm[0][0]+cm[1][1])/(cm.sum())\n cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Dog cm\\n', cm)\n print('Dog cm_norm\\n', cm_norm)\n print('Dog acc', dog_acc)", "_____no_output_____" ] ], [ [ "# k-fold cross validation", "_____no_output_____" ] ], [ [ "# k-fold cross validation setup\nif k > 1:\n x_all = np.concatenate((x_train, x_test), axis=0)\n y_all = np.concatenate((y_train, y_test), axis=0)\n kfold = RepeatedStratifiedKFold(n_splits=k, n_repeats=m, random_state=k_fold_seed)\n scores = list()\n other_scores = list() # accuracy on the other dataset, the realistic dataset\n for train, test in kfold.split(x_all, y_all):\n x_train, y_train, x_test, y_test = x_all[train], y_all[train], x_all[test], y_all[test]\n neigh = KNeighborsClassifier(n_neighbors=n_neighbors)\n neigh.fit(x_train, y_train)\n scores.append(neigh.score(x_test, y_test))\n if do_end_test:\n other_scores.append(neigh.score(x_other, y_other))\n print(scores)\n print('Estimated Accuracy and sample std dev:')\n print(np.mean(scores))\n print(np.std(scores, ddof=1))\n \n if do_end_test:\n print(other_scores)\n print('Estimated Accuracy and sample std dev on realistic dataset:')\n print(np.mean(other_scores))\n print(np.std(other_scores, ddof=1))\nelse:\n neigh = KNeighborsClassifier(n_neighbors=n_neighbors)\n neigh.fit(x_train, y_train)\n print('Accuracy', neigh.score(x_test, y_test))\n \n# Save the result to file\nPath(logs_dir+'/'+fname).mkdir(parents=True, exist_ok=True)\nwith open(logs_dir+'/'+fname+'/nearestneighbours_summary.csv', 'w') as f:\n w = csv.writer(f, dialect='excel')\n if do_end_test:\n for s, o in zip(scores, other_scores):\n w.writerow([s, o])\n else:\n for s in zip(scores):\n w.writerow(s)\n print('Added scores to ', f.name) ", "_____no_output_____" ], [ "data = pd.DataFrame(scores, columns=['val_acc'])\ndata.boxplot(whis=[2.5,97.5])", "_____no_output_____" ], [ "sns.set(style=\"whitegrid\")\nax = sns.boxplot(data=data)\nax = sns.swarmplot(data=data, color='black')", "_____no_output_____" ] ], [ [ "## Compare classifiers", "_____no_output_____" ] ], [ [ "file1 = '../logs/2019-03-17T12:59/private_dog0_correct/devnet_summary.csv'\ndata1 = pd.read_csv(file1, header=None, names=['run','loss','val_acc','epoch','time'])\nname1 = 'dog0_correct'\n\nall_data = [data1['val_acc'], data['val_acc']]\nsns.set(style=\"whitegrid\")\nax = sns.boxplot(data=all_data)\nax = sns.swarmplot(data=all_data, color='black')\nplt.xticks([0, 1], ['dev_net', 'kNN'])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec567560ea589577e0b56194ab689025131389ac
3,895
ipynb
Jupyter Notebook
_downloads/words.ipynb
TeamNotJava/networkx-doc
b86736258d5459c17868dce63e626fea66b0e5f4
[ "BSD-3-Clause" ]
null
null
null
_downloads/words.ipynb
TeamNotJava/networkx-doc
b86736258d5459c17868dce63e626fea66b0e5f4
[ "BSD-3-Clause" ]
null
null
null
_downloads/words.ipynb
TeamNotJava/networkx-doc
b86736258d5459c17868dce63e626fea66b0e5f4
[ "BSD-3-Clause" ]
null
null
null
72.12963
2,379
0.545058
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Words\n\n\nWords/Ladder Graph\n------------------\nGenerate an undirected graph over the 5757 5-letter words in the\ndatafile `words_dat.txt.gz`. Two words are connected by an edge\nif they differ in one letter, resulting in 14,135 edges. This example\nis described in Section 1.1 in Knuth's book (see [1]_ and [2]_).\n\nReferences\n----------\n.. [1] Donald E. Knuth,\n \"The Stanford GraphBase: A Platform for Combinatorial Computing\",\n ACM Press, New York, 1993.\n.. [2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html\n\n", "_____no_output_____" ] ], [ [ "# Authors: Aric Hagberg ([email protected]),\n# Brendt Wohlberg,\n# [email protected]\n\n# Copyright (C) 2004-2018 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n\nimport gzip\nfrom string import ascii_lowercase as lowercase\n\nimport networkx as nx\n\n#-------------------------------------------------------------------\n# The Words/Ladder graph of Section 1.1\n#-------------------------------------------------------------------\n\n\ndef generate_graph(words):\n G = nx.Graph(name=\"words\")\n lookup = dict((c, lowercase.index(c)) for c in lowercase)\n\n def edit_distance_one(word):\n for i in range(len(word)):\n left, c, right = word[0:i], word[i], word[i + 1:]\n j = lookup[c] # lowercase.index(c)\n for cc in lowercase[j + 1:]:\n yield left + cc + right\n candgen = ((word, cand) for word in sorted(words)\n for cand in edit_distance_one(word) if cand in words)\n G.add_nodes_from(words)\n for word, cand in candgen:\n G.add_edge(word, cand)\n return G\n\n\ndef words_graph():\n \"\"\"Return the words example graph from the Stanford GraphBase\"\"\"\n fh = gzip.open('words_dat.txt.gz', 'r')\n words = set()\n for line in fh.readlines():\n line = line.decode()\n if line.startswith('*'):\n continue\n w = str(line[0:5])\n words.add(w)\n return generate_graph(words)\n\n\nif __name__ == '__main__':\n G = words_graph()\n print(\"Loaded words_dat.txt containing 5757 five-letter English words.\")\n print(\"Two words are connected if they differ in one letter.\")\n print(\"Graph has %d nodes with %d edges\"\n % (nx.number_of_nodes(G), nx.number_of_edges(G)))\n print(\"%d connected components\" % nx.number_connected_components(G))\n\n for (source, target) in [('chaos', 'order'),\n ('nodes', 'graph'),\n ('pound', 'marks')]:\n print(\"Shortest path between %s and %s is\" % (source, target))\n try:\n sp = nx.shortest_path(G, source, target)\n for n in sp:\n print(n)\n except nx.NetworkXNoPath:\n print(\"None\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ] ]
ec569449f62cb961c8ee30f056422238a77330d6
132,961
ipynb
Jupyter Notebook
tutorials/streamlit_notebooks/healthcare/NER_BERT_TOKEN_CLASSIFIER.ipynb
fcivardi/spark-nlp-workshop
aedb1f5d93577c81bc3dd0da5e46e02586941541
[ "Apache-2.0" ]
null
null
null
tutorials/streamlit_notebooks/healthcare/NER_BERT_TOKEN_CLASSIFIER.ipynb
fcivardi/spark-nlp-workshop
aedb1f5d93577c81bc3dd0da5e46e02586941541
[ "Apache-2.0" ]
null
null
null
tutorials/streamlit_notebooks/healthcare/NER_BERT_TOKEN_CLASSIFIER.ipynb
fcivardi/spark-nlp-workshop
aedb1f5d93577c81bc3dd0da5e46e02586941541
[ "Apache-2.0" ]
null
null
null
87.416831
30,693
0.579614
[ [ [ "![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png)\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/healthcare/NER_BERT_TOKEN_CLASSIFIER.ipynb)", "_____no_output_____" ], [ "To run this yourself, you will need to upload your license keys to the notebook. Just Run The Cell Below in order to do that. Also You can open the file explorer on the left side of the screen and upload license_keys.json to the folder that opens. Otherwise, you can look at the example outputs at the bottom of the notebook.", "_____no_output_____" ], [ "## 1. Colab Setup", "_____no_output_____" ], [ "Import license keys", "_____no_output_____" ] ], [ [ "import json\nimport os\n\nfrom google.colab import files\n\nlicense_keys = files.upload()\n\nwith open(list(license_keys.keys())[0]) as f:\n license_keys = json.load(f)\n\n# Defining license key-value pairs as local variables\nlocals().update(license_keys)\n\n# Adding license key-value pairs to environment variables\nos.environ.update(license_keys)", "_____no_output_____" ], [ "# Installing pyspark and spark-nlp\n! pip install --upgrade -q pyspark==3.1.2 spark-nlp==$PUBLIC_VERSION\n\n# Installing Spark NLP Healthcare\n! pip install --upgrade -q spark-nlp-jsl==$JSL_VERSION --extra-index-url https://pypi.johnsnowlabs.com/$SECRET\n\n# Installing Spark NLP Display Library for visualization\n! pip install -q spark-nlp-display", "_____no_output_____" ], [ "import pandas as pd\nimport json\nimport os\n\nimport sparknlp\nfrom sparknlp.base import *\nfrom sparknlp.common import *\nfrom sparknlp.annotator import *\nfrom sparknlp.training import *\n\nimport sparknlp_jsl\nfrom sparknlp_jsl.base import *\nfrom sparknlp_jsl.annotator import *\nfrom pyspark.ml import Pipeline\nfrom sparknlp.base import *\nimport pyspark.sql.functions as F\n\nspark = sparknlp_jsl.start(license_keys['SECRET'])\nspark", "_____no_output_____" ] ], [ [ "## 2. Create example inputs", "_____no_output_____" ] ], [ [ "text_list_jsl = [\n \"\"\"The patient is a 30-year-old female with a long history of insulin dependent diabetes, type 2; coronary artery disease; chronic renal insufficiency; peripheral vascular disease, also secondary to diabetes; who was originally admitted to an outside hospital for what appeared to be acute paraplegia, lower extremities. She did receive a course of Bactrim for 14 days for UTI. Evidently, at some point in time, the patient was noted to develop a pressure-type wound on the sole of her left foot and left great toe. She was also noted to have a large sacral wound; this is in a similar location with her previous laminectomy, and this continues to receive daily care. The patient was transferred secondary to inability to participate in full physical and occupational therapy and continue medical management of her diabetes, the sacral decubitus, left foot pressure wound, and associated complications of diabetes. She is given Fragmin 5000 units subcutaneously daily, Xenaderm to wounds topically b.i.d., Lantus 40 units subcutaneously at bedtime, OxyContin 30 mg p.o. q.12 h., folic acid 1 mg daily, levothyroxine 0.1 mg p.o. daily, Prevacid 30 mg daily, Avandia 4 mg daily, Norvasc 10 mg daily, Lexapro 20 mg daily, aspirin 81 mg daily, Senna 2 tablets p.o. q.a.m., Neurontin 400 mg p.o. t.i.d., Percocet 5/325 mg 2 tablets q.4 h. p.r.n., magnesium citrate 1 bottle p.o. p.r.n., sliding scale coverage insulin, Wellbutrin 100 mg p.o. daily, and Bactrim DS b.i.d.\"\"\"]", "_____no_output_____" ], [ "text_list_drug = [\n \"The human KCNJ9 (Kir 3.3, GIRK3) is a member of the G-protein-activated inwardly rectifying potassium (GIRK) channel family. Here we describe the genomic organization of the KCNJ9 locus on chromosome 1q21-23 as a candidate gene for type II diabetes mellitus in the Pima Indian population. The gene spans approximately 7.6 kb and contains one noncoding and two coding exons separated by approximately 2.2 and approximately 2.6 kb introns, respectively. We identified14 single nucleotide polymorphisms (SNPs), including one that predicts aVal366Ala substitution, and an 8 base-pair (bp) insertion/deletion. Our expression studies revealed the presence of the transcript in various human tissues including the pancreas, and two major insulin-responsive tissues: fat and skeletal muscle. The characterization of the KCNJ9 gene should facilitate further studies on the function of the KCNJ9 protein and allow evaluation of the potential role of the locus in Type II diabetes.BACKGROUND: At present, it is one of the most important issues for the treatment of breast cancer to develop the standard therapy for patients previously treated with anthracyclines and taxanes. With the objective of determining the usefulness of vinorelbine monotherapy in patients with advanced or recurrent breast cancer after standard therapy, we evaluated the efficacy and safety of vinorelbine in patients previously treated with anthracyclines and taxanes.\"]", "_____no_output_____" ], [ "text_list_deid = [\n \"\"\"HISTORY OF PRESENT ILLNESS: Mr. Smith is a 60-year-old white male veteran with multiple comorbidities, who has a history of bladder cancer diagnosed approximately two years ago by the VA Hospital. He underwent a resection there. He was to be admitted to the Day Hospital for cystectomy. He was seen in Urology Clinic and Radiology Clinic on 02/04/2003.\n\nHOSPITAL COURSE: Mr. Smith presented to the Day Hospital in anticipation for Urology surgery. On evaluation, EKG, echocardiogram was abnormal, a Cardiology consult was obtained. A cardiac adenosine stress MRI was then proceeded, same was positive for inducible ischemia, mild-to-moderate inferolateral subendocardial infarction with peri-infarct ischemia. In addition, inducible ischemia seen in the inferior lateral septum. Mr. Smith underwent a left heart catheterization, which revealed two vessel coronary artery disease. The RCA, proximal was 95% stenosed and the distal 80% stenosed. The mid LAD was 85% stenosed and the distal LAD was 85% stenosed. There was four Multi-Link Vision bare metal stents placed to decrease all four lesions to 0%. Following intervention, Mr. Smith was admitted to 7 Ardmore Tower under Cardiology Service under the direction of Dr. Hart. Mr. Smith had a noncomplicated post-intervention hospital course. He was stable for discharge home on 02/07/2003 with instructions to take Plavix daily for one month and Urology is aware of the same.\"\"\"]", "_____no_output_____" ], [ "text_list_jsl_slim = [\"\"\"HPI: A 69-year-old white female with a history of metastatic breast cancer, depression, anxiety, recent UTI, and obstructive uropathy, admitted to the ABCD Hospital on February 6, 2007, for lightheadedness, weakness, and shortness of breath. The patient was consulted by Psychiatry for anxiety. I know this patient from a previous consult. During this recent admission, the patient has experienced anxiety and had a panic attack yesterday with \"syncopal episodes.\" She was given Ativan 0.25 mg on a p.r.n. basis with relief after one to two hours. The patient was seen by Abc, MD, and Def, Ph.D. The laboratories were reviewed and were positive for UTI, and anemia is also present. The TSH level was within normal limits. She previously responded well to trazodone for depression, poor appetite, and decreased sleep and anxiety. A low dose of Klonopin was also helpful for sedation.\n\nPAST MEDICAL HISTORY: Metastatic breast cancer to bone. The patient also has a history of hypertension, hypothyroidism, recurrent UTI secondary to obstruction of left ureteropelvic junction, cholelithiasis, chronic renal insufficiency, Port-A-Cath placement, and hydronephrosis.\n\nPAST PSYCHIATRIC HISTORY: The patient has a history of depression and anxiety. She was taking Remeron 15 mg q.h.s., Ambien 5 mg q.h.s. on a p.r.n. basis, Ativan 0.25 mg every 6 hours on a p.r.n. basis, and Klonopin 0.25 mg at night while she was at home.\"\"\",\n \"\"\"The patient is a 21-day-old Caucasian male here for 2 days of congestion - mom has been suctioning yellow discharge from the patient's nares, plus she has noticed some mild problems with his breathing while feeding (but negative for any perioral cyanosis or retractions). One day ago, mom also noticed a tactile temperature and gave the patient Tylenol. Baby also has had some decreased p.o. intake. His normal breast-feeding is down from 20 minutes q.2h. to 5 to 10 minutes secondary to his respiratory congestion. He sleeps well, but has been more tired and has been fussy over the past 2 days. The parents noticed no improvement with albuterol treatments given in the ER. His urine output has also decreased; normally he has 8 to 10 wet and 5 dirty diapers per 24 hours, now he has down to 4 wet diapers per 24 hours. Mom denies any diarrhea. His bowel movements are yellow colored and soft in nature.\"\"\",\n \"\"\" The patient is a 40-year-old white male who presents with a chief complaint of \"chest pain\". The patient is diabetic and has a prior history of coronary artery disease. The patient presents today stating that his chest pain started yesterday evening and has been somewhat intermittent. He has been advised Aspirin 81 milligrams QDay. Humulin N. insulin 50 units in a.m. HCTZ 50 mg QDay. Nitroglycerin 1/150 sublingually PRN chest pain.\"\"\",\n \"\"\" HISTORY OF PRESENT ILLNESS: The patient is a 68-year-old Korean gentleman with a history of coronary artery disease, hypertension, diabetes and stage III CKD with a creatinine of 1.8 in May 2006 corresponding with the GFR of 40-41 mL/min. The patient had blood work done at Dr. XYZ's office on June 01, 2006, which revealed an elevation in his creatinine up to 2.3. He was asked to come in to see a nephrologist for further evaluation. I am therefore asked by Dr. XYZ to see this patient in consultation for evaluation of acute on chronic kidney failure. The patient states that he was actually taking up to 12 to 13 pills of Chinese herbs and dietary supplements for the past year. He only stopped about two or three weeks ago. He also states that TriCor was added about one or two months ago but he is not sure of the date. He has not had an ultrasound but has been diagnosed with prostatic hypertrophy by his primary care doctor and placed on Flomax. He states that his urinary dribbling and weak stream had not improved since doing this. For the past couple of weeks, he has had dizziness in the morning. This is then associated with low glucose. However the patient's blood glucose this morning was 123 and he still was dizzy. This was worse on standing. He states that he has been checking his blood pressure regularly at home because he has felt so bad and that he has gotten under 100/60 on several occasions. His pulses remained in the 60s.\n\nALLERGIES: None.\n\nMEDICATIONS: Imdur 20 mg two to three times daily, nitroglycerin p.r.n., insulin 70/30 40/45 units daily, Zetia 10 mg daily, ? Triglide 50 mg daily, Prevacid 30 mg daily, Plavix 75 mg daily, potassium 10 mEq daily, Lasix 60 mg daily, folate 1 mg b.i.d., Niaspan 500 mg daily, atenolol 50 mg daily, enalapril 10 mg b.i.d., glyburide 10 mg b.i.d., Xanax 0.25 mg b.i.d., aspirin 325 mg daily, Tylenol p.r.n., Zantac 150 mg b.i.d., Crestor 5 mg daily, TriCor 145 mg daily, Digitek 0.125 mg daily, Celexa 20 mg daily, and Flomax 0.4 mg daily.\"\"\",\n \"\"\"Mr. ABC is a 60-year-old gentleman who had a markedly abnormal stress test earlier today in my office with severe chest pain after 5 minutes of exercise on the standard Bruce with horizontal ST depressions and moderate apical ischemia on stress imaging only. He required 3 sublingual nitroglycerin in total (please see also admission history and physical for full details). \n\nThe patient underwent cardiac catheterization with myself today which showed mild-to-moderate left main distal disease of 30%, moderate proximal LAD with a severe mid-LAD lesion of 99%, and a mid-left circumflex lesion of 80% with normal LV function and some mild luminal irregularities in the right coronary artery with some moderate stenosis seen in the mid to distal right PDA.\n\nI discussed these results with the patient, and he had been relating to me that he was having rest anginal symptoms, as well as nocturnal anginal symptoms, and especially given the severity of the mid left anterior descending lesion, with a markedly abnormal stress test, I felt he was best suited for transfer for PCI. I discussed the case with Dr. X at Medical Center who has kindly accepted the patient in transfer.\n\nCONDITION ON TRANSFER: Stable but guarded. The patient is pain-free at this time.\n\nMEDICATIONS ON TRANSFER:\n1. Aspirin 325 mg once a day.\n2. Metoprolol 50 mg once a day, but we have had to hold it because of relative bradycardia which he apparently has a history of.\n3. Nexium 40 mg once a day.\n4. Zocor 40 mg once a day, and there is a fasting lipid profile pending at the time of this dictation. I see that his LDL was 136 on May 3, 2002.\n5. Plavix 600 mg p.o. x1 which I am giving him tonight.\"\"\"]", "_____no_output_____" ], [ "text_list_bacteria = [\"\"\"Based on these genetic and phenotypic properties, we propose that strain SMSP (T) represents \\\na novel species of the genus Methanoregula, for which we propose the name Methanoregula formicica \\\nsp. nov., with the type strain SMSP (T) (= NBRC 105244 (T) = DSM 22288 (T)).\"\"\"]", "_____no_output_____" ], [ "text_list_ade = [\"\"\"Been taking Lipitor for 15 years , have experienced severe fatigue a lot!!! . Doctor moved me to voltaren 2 months ago , so far , have only experienced cramps\"\"\"]", "_____no_output_____" ], [ "text_list_anatomy = [\"\"\"This is an 11-year-old female who comes in for two different things. 1. She was seen by the allergist. No allergies present, so she stopped her Allegra, but she is still real congested and does a lot of snorting. They do not notice a lot of snoring at night though, but she seems to be always like that. 2. On her right great toe, she has got some redness and erythema. Her skin is kind of peeling a little bit, but it has been like that for about a week and a half now.\\nGeneral: Well-developed female, in no acute distress, afebrile.\\nHEENT: Sclerae and conjunctivae clear. Extraocular muscles intact. TMs clear. Nares patent. A little bit of swelling of the turbinates on the left. Oropharynx is essentially clear. Mucous membranes are moist.\\nNeck: No lymphadenopathy.\\nChest: Clear.\\nAbdomen: Positive bowel sounds and soft.\\nDermatologic: She has got redness along her right great toe, but no bleeding or oozing. Some dryness of her skin. Her toenails themselves are very short and even on her left foot and her left great toe the toenails are very short.\"\"\"]", "_____no_output_____" ], [ "text_list_chemicals = [\"\"\"The results have shown that the product p - choloroaniline is not a significant factor in chlorhexidine - digluconate associated erosive cystitis. \"A high percentage of kanamycin - colistin and povidone - iodine irrigations were associated with erosive cystitis.\"\"\"]", "_____no_output_____" ], [ "text_list_chemprot = [\"Keratinocyte growth factor and acidic fibroblast growth factor are mitogens for primary cultures of mammary epithelium.\"]", "_____no_output_____" ], [ "text_list_ner_bionlp = [\"\"\"The human KCNJ9 (Kir 3.3, GIRK3) is a member of the G-protein-activated inwardly rectifying potassium (GIRK) channel family. Here we describe the genomicorganization of the KCNJ9 locus on chromosome 1q21-23 as a candidate gene forType II diabetes mellitus in the Pima Indian population. The gene spansapproximately 7.6 kb and contains one noncoding and two coding exons separated byapproximately 2.2 and approximately 2.6 kb introns, respectively. We identified14 single nucleotide polymorphisms (SNPs), including one that predicts aVal366Ala substitution, and an 8 base-pair (bp) insertion/deletion. Ourexpression studies revealed the presence of the transcript in various humantissues including pancreas, and two major insulin-responsive tissues: fat andskeletal muscle. The characterization of the KCNJ9 gene should facilitate furtherstudies on the function of the KCNJ9 protein and allow evaluation of thepotential role of the locus in Type II diabetes.\"\"\"]", "_____no_output_____" ], [ "text_list_ner_cellular = [\"\"\"Detection of various other intracellular signaling proteins is also described. Genetic characterization of transactivation of the human T-cell leukemia virus type 1 promoter: Binding of Tax to Tax-responsive element 1 is mediated by the cyclic AMP-responsive members of the CREB/ATF family of transcription factors. To achieve a better understanding of the mechanism of transactivation by Tax of human T-cell leukemia virus type 1 Tax-responsive element 1 (TRE-1), we developed a genetic approach with Saccharomyces cerevisiae. We constructed a yeast reporter strain containing the lacZ gene under the control of the CYC1 promoter associated with three copies of TRE-1. Expression of either the cyclic AMP response element-binding protein (CREB) or CREB fused to the GAL4 activation domain (GAD) in this strain did not modify the expression of the reporter gene. Tax alone was also inactive.\"\"\"]", "_____no_output_____" ] ], [ [ "## 3. Select the NER model, construct the pipeline and visualize the results.", "_____no_output_____" ], [ "Select the NER model - Models: **'bert_token_classifier_ner_jsl', 'bert_token_classifier_ner_drugs', 'bert_token_classifier_ner_deid', 'bert_token_classifier_ner_bacteria','bert_token_classifier_ner_ade','bert_token_classifier_ner_anatomy', 'bert_token_classifier_ner_chemicals','bert_token_classifier_ner_chemprot','bert_token_classifier_ner_bionlp','bert_token_classifier_ner_cellular','bert_token_classifier_ner_jsl_slim'**\n\nFor more details: https://github.com/JohnSnowLabs/spark-nlp-models#pretrained-models---spark-nlp-for-healthcare", "_____no_output_____" ] ], [ [ "model_list = ['bert_token_classifier_ner_jsl','bert_token_classifier_ner_drugs','bert_token_classifier_ner_deid','bert_token_classifier_ner_bacteria','bert_token_classifier_ner_ade','bert_token_classifier_ner_anatomy', 'bert_token_classifier_ner_chemicals','bert_token_classifier_ner_chemprot', 'bert_token_classifier_ner_bionlp', 'bert_token_classifier_ner_cellular', 'bert_token_classifier_ner_jsl_slim']\n", "_____no_output_____" ], [ "from sparknlp_display import NerVisualizer\n\ndocumentAssembler = DocumentAssembler()\\\n .setInputCol(\"text\")\\\n .setOutputCol(\"document\")\n\nsentenceDetector = SentenceDetectorDLModel.pretrained() \\\n .setInputCols([\"document\"]) \\\n .setOutputCol(\"sentence\") \n\ntokenizer = Tokenizer()\\\n .setInputCols(\"sentence\")\\\n .setOutputCol(\"token\")\n\nner_converter = NerConverter()\\\n .setInputCols([\"sentence\",\"token\",\"ner\"])\\\n .setOutputCol(\"ner_chunk\")\n \nfor MODEL_NAME in model_list:\n \n tokenClassifier = MedicalBertForTokenClassifier.pretrained( MODEL_NAME, \"en\", 'clinical/models')\\\n .setInputCols(\"sentence\",\"token\")\\\n .setOutputCol(\"ner\")\\\n .setCaseSensitive(True)\n\n pipeline = Pipeline(stages=[documentAssembler,sentenceDetector, tokenizer, tokenClassifier, ner_converter])\n\n pipelineModel = pipeline.fit(spark.createDataFrame([['']]).toDF(\"text\"))\n \n if MODEL_NAME == \"bert_token_classifier_ner_jsl\":\n df = spark.createDataFrame(pd.DataFrame({\"text\": text_list_jsl}))\n elif MODEL_NAME == \"bert_token_classifier_ner_drugs\":\n df = spark.createDataFrame(pd.DataFrame({\"text\": text_list_drug}))\n elif MODEL_NAME == \"bert_token_classifier_ner_deid\":\n df = spark.createDataFrame(pd.DataFrame({\"text\": text_list_deid}))\n elif MODEL_NAME == \"bert_token_classififer_ner_bacteria\":\n df = spark.createDataFrame(pd.DataFrame({\"text\": text_list_bacteria}))\n elif MODEL_NAME == \"bert_token_classifier_ner_jsl_slim\":\n df = spark.createDataFrame(pd.DataFrame({\"text\": text_list_jsl_slim}))\n elif MODEL_NAME == \"bert_token_classifier_ner_ade\":\n df = spark.createDataFrame(pd.DataFrame({\"text\": text_list_ade}))\n elif MODEL_NAME == \"bert_token_classifier_ner_anatomy\":\n df = spark.createDataFrame(pd.DataFrame({\"text\": text_list_anatomy}))\n elif MODEL_NAME == \"bert_token_classifier_ner_chemicals\":\n df = spark.createDataFrame(pd.DataFrame({\"text\": text_list_chemicals}))\n elif MODEL_NAME == \"bert_token_classifier_ner_bionlp\":\n df = spark.createDataFrame(pd.DataFrame({\"text\": text_list_ner_bionlp}))\n elif MODEL_NAME == \"bert_token_classifier_ner_cellular\":\n df = spark.createDataFrame(pd.DataFrame({\"text\": text_list_ner_cellular}))\n elif MODEL_NAME == \"bert_token_classifier_ner_chemprot\":\n df = spark.createDataFrame(pd.DataFrame({\"text\": text_list_chemprot}))\n\n\n print(\"<----------------- MODEL NAME:\",\"\\033[1m\" + MODEL_NAME + \"\\033[0m\",\" ----------------- >\")\n \n result = pipelineModel.transform(df)\n result.select(F.explode(F.arrays_zip('ner_chunk.result', 'ner_chunk.metadata')).alias(\"cols\")) \\\n .select(F.expr(\"cols['0']\").alias(\"chunk\"),\n F.expr(\"cols['1']['entity']\").alias(\"ner_label\"))\\\n .show(truncate=False)\n\n NerVisualizer().display(\n result = result.collect()[0],\n label_col = 'ner_chunk',\n document_col = 'document'\n )", "sentence_detector_dl download started this may take some time.\nApproximate size to download 354.6 KB\n[OK!]\nbert_token_classifier_ner_jsl download started this may take some time.\nApproximate size to download 385.8 MB\n[OK!]\n<----------------- MODEL NAME: \u001b[1mbert_token_classifier_ner_jsl\u001b[0m ----------------- >\n+---------------------------+----------------------------+\n|chunk |ner_label |\n+---------------------------+----------------------------+\n|30-year-old |Age |\n|female |Gender |\n|insulin dependent |Diabetes |\n|diabetes |Diabetes |\n|type 2 |Diabetes |\n|coronary artery disease |Heart_Disease |\n|chronic renal insufficiency|Kidney_Disease |\n|peripheral vascular disease|Disease_Syndrome_Disorder |\n|diabetes |Diabetes |\n|admitted |Admission_Discharge |\n|hospital |Clinical_Dept |\n|acute |Modifier |\n|paraplegia |Symptom |\n|lower extremities |External_body_part_or_region|\n|She |Gender |\n|Bactrim |Drug_BrandName |\n|for 14 days |Duration |\n|UTI |Disease_Syndrome_Disorder |\n|pressure-type wound |Symptom |\n|sole |External_body_part_or_region|\n+---------------------------+----------------------------+\nonly showing top 20 rows\n\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
ec5697af79931a445c2b855958b8d54dfa4b0c3b
40,696
ipynb
Jupyter Notebook
tensorboard/Anna_KaRNNa_Name_Scoped.ipynb
luofan18/deep-learning
4a6c521a52f7d7fdaf3a77df9fc0d5965abdf4d5
[ "MIT" ]
null
null
null
tensorboard/Anna_KaRNNa_Name_Scoped.ipynb
luofan18/deep-learning
4a6c521a52f7d7fdaf3a77df9fc0d5965abdf4d5
[ "MIT" ]
null
null
null
tensorboard/Anna_KaRNNa_Name_Scoped.ipynb
luofan18/deep-learning
4a6c521a52f7d7fdaf3a77df9fc0d5965abdf4d5
[ "MIT" ]
null
null
null
46.088335
1,355
0.596717
[ [ [ "# Anna KaRNNa\n\nIn this notebook, I'll build a character-wise RNN trained on Anna Karenina, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.\n\nThis network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Also, some information [here at r2rt](http://r2rt.com/recurrent-neural-networks-in-tensorflow-ii.html) and from [Sherjil Ozair](https://github.com/sherjilozair/char-rnn-tensorflow) on GitHub. Below is the general architecture of the character-wise RNN.\n\n<img src=\"assets/charseq.jpeg\" width=\"500\">", "_____no_output_____" ] ], [ [ "import time\nfrom collections import namedtuple\n\nimport numpy as np\nimport tensorflow as tf", "_____no_output_____" ] ], [ [ "First we'll load the text file and convert it into integers for our network to use.", "_____no_output_____" ] ], [ [ "with open('anna.txt', 'r') as f:\n text=f.read()\nvocab = set(text)\nvocab_to_int = {c: i for i, c in enumerate(vocab)}\nint_to_vocab = dict(enumerate(vocab))\nchars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)", "_____no_output_____" ], [ "text[:100]", "_____no_output_____" ], [ "chars[:100]", "_____no_output_____" ] ], [ [ "Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text.\n\nHere I'll make both input and target arrays. The targets are the same as the inputs, except shifted one character over. I'll also drop the last bit of data so that I'll only have completely full batches.\n\nThe idea here is to make a 2D matrix where the number of rows is equal to the number of batches. Each row will be one long concatenated string from the character data. We'll split this data into a training set and validation set using the `split_frac` keyword. This will keep 90% of the batches in the training set, the other 10% in the validation set.", "_____no_output_____" ] ], [ [ "def split_data(chars, batch_size, num_steps, split_frac=0.9):\n \"\"\" \n Split character data into training and validation sets, inputs and targets for each set.\n \n Arguments\n ---------\n chars: character array\n batch_size: Size of examples in each of batch\n num_steps: Number of sequence steps to keep in the input and pass to the network\n split_frac: Fraction of batches to keep in the training set\n \n \n Returns train_x, train_y, val_x, val_y\n \"\"\"\n \n \n slice_size = batch_size * num_steps\n n_batches = int(len(chars) / slice_size)\n \n # Drop the last few characters to make only full batches\n x = chars[: n_batches*slice_size]\n y = chars[1: n_batches*slice_size + 1]\n \n # Split the data into batch_size slices, then stack them into a 2D matrix \n x = np.stack(np.split(x, batch_size))\n y = np.stack(np.split(y, batch_size))\n \n # Now x and y are arrays with dimensions batch_size x n_batches*num_steps\n \n # Split into training and validation sets, keep the virst split_frac batches for training\n split_idx = int(n_batches*split_frac)\n train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]\n val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]\n \n return train_x, train_y, val_x, val_y", "_____no_output_____" ], [ "train_x, train_y, val_x, val_y = split_data(chars, 10, 200)", "_____no_output_____" ], [ "train_x.shape", "_____no_output_____" ], [ "train_x[:,:10]", "_____no_output_____" ] ], [ [ "I'll write another function to grab batches out of the arrays made by split data. Here each batch will be a sliding window on these arrays with size `batch_size X num_steps`. For example, if we want our network to train on a sequence of 100 characters, `num_steps = 100`. For the next batch, we'll shift this window the next sequence of `num_steps` characters. In this way we can feed batches to the network and the cell states will continue through on each batch.", "_____no_output_____" ] ], [ [ "def get_batch(arrs, num_steps):\n batch_size, slice_size = arrs[0].shape\n \n n_batches = int(slice_size/num_steps)\n for b in range(n_batches):\n yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]", "_____no_output_____" ], [ "def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,\n learning_rate=0.001, grad_clip=5, sampling=False):\n \n if sampling == True:\n batch_size, num_steps = 1, 1\n\n tf.reset_default_graph()\n \n # Declare placeholders we'll feed into the graph\n with tf.name_scope('inputs'):\n inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')\n x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot')\n \n with tf.name_scope('targets'):\n targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')\n y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot')\n y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])\n \n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n \n # Build the RNN layers\n with tf.name_scope(\"RNN_layers\"):\n lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)\n drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)\n cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)\n \n with tf.name_scope(\"RNN_init_state\"):\n initial_state = cell.zero_state(batch_size, tf.float32)\n\n # Run the data through the RNN layers\n with tf.name_scope(\"RNN_forward\"):\n outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=initial_state)\n \n final_state = state\n \n # Reshape output so it's a bunch of rows, one row for each cell output\n with tf.name_scope('sequence_reshape'):\n seq_output = tf.concat(outputs, axis=1,name='seq_output')\n output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output')\n \n # Now connect the RNN putputs to a softmax layer and calculate the cost\n with tf.name_scope('logits'):\n softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1),\n name='softmax_w')\n softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b')\n logits = tf.matmul(output, softmax_w) + softmax_b\n\n with tf.name_scope('predictions'):\n preds = tf.nn.softmax(logits, name='predictions')\n \n \n with tf.name_scope('cost'):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss')\n cost = tf.reduce_mean(loss, name='cost')\n\n # Optimizer for training, using gradient clipping to control exploding gradients\n with tf.name_scope('train'):\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)\n train_op = tf.train.AdamOptimizer(learning_rate)\n optimizer = train_op.apply_gradients(zip(grads, tvars))\n \n # Export the nodes \n export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',\n 'keep_prob', 'cost', 'preds', 'optimizer']\n Graph = namedtuple('Graph', export_nodes)\n local_dict = locals()\n graph = Graph(*[local_dict[each] for each in export_nodes])\n \n return graph", "_____no_output_____" ] ], [ [ "## Hyperparameters\n\nHere I'm defining the hyperparameters for the network. The two you probably haven't seen before are `lstm_size` and `num_layers`. These set the number of hidden units in the LSTM layers and the number of LSTM layers, respectively. Of course, making these bigger will improve the network's performance but you'll have to watch out for overfitting. If your validation loss is much larger than the training loss, you're probably overfitting. Decrease the size of the network or decrease the dropout keep probability.", "_____no_output_____" ] ], [ [ "batch_size = 100\nnum_steps = 100\nlstm_size = 512\nnum_layers = 2\nlearning_rate = 0.001", "_____no_output_____" ] ], [ [ "## Write out the graph for TensorBoard", "_____no_output_____" ] ], [ [ "model = build_rnn(len(vocab), \n batch_size=batch_size,\n num_steps=num_steps,\n learning_rate=learning_rate,\n lstm_size=lstm_size,\n num_layers=num_layers)\n\nwith tf.Session() as sess:\n \n sess.run(tf.global_variables_initializer())\n file_writer = tf.summary.FileWriter('./logs/3', sess.graph)", "_____no_output_____" ] ], [ [ "## Training\n\nTime for training which is is pretty straightforward. Here I pass in some data, and get an LSTM state back. Then I pass that state back in to the network so the next batch can continue the state from the previous batch. And every so often (set by `save_every_n`) I calculate the validation loss and save a checkpoint.", "_____no_output_____" ] ], [ [ "!mkdir -p checkpoints/anna", "_____no_output_____" ], [ "epochs = 10\nsave_every_n = 200\ntrain_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)\n\nmodel = build_rnn(len(vocab), \n batch_size=batch_size,\n num_steps=num_steps,\n learning_rate=learning_rate,\n lstm_size=lstm_size,\n num_layers=num_layers)\n\nsaver = tf.train.Saver(max_to_keep=100)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n # Use the line below to load a checkpoint and resume training\n #saver.restore(sess, 'checkpoints/anna20.ckpt')\n \n n_batches = int(train_x.shape[1]/num_steps)\n iterations = n_batches * epochs\n for e in range(epochs):\n \n # Train network\n new_state = sess.run(model.initial_state)\n loss = 0\n for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):\n iteration = e*n_batches + b\n start = time.time()\n feed = {model.inputs: x,\n model.targets: y,\n model.keep_prob: 0.5,\n model.initial_state: new_state}\n batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer], \n feed_dict=feed)\n loss += batch_loss\n end = time.time()\n print('Epoch {}/{} '.format(e+1, epochs),\n 'Iteration {}/{}'.format(iteration, iterations),\n 'Training loss: {:.4f}'.format(loss/b),\n '{:.4f} sec/batch'.format((end-start)))\n \n \n if (iteration%save_every_n == 0) or (iteration == iterations):\n # Check performance, notice dropout has been set to 1\n val_loss = []\n new_state = sess.run(model.initial_state)\n for x, y in get_batch([val_x, val_y], num_steps):\n feed = {model.inputs: x,\n model.targets: y,\n model.keep_prob: 1.,\n model.initial_state: new_state}\n batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed)\n val_loss.append(batch_loss)\n\n print('Validation loss:', np.mean(val_loss),\n 'Saving checkpoint!')\n saver.save(sess, \"checkpoints/anna/i{}_l{}_{:.3f}.ckpt\".format(iteration, lstm_size, np.mean(val_loss)))", "Epoch 1/10 Iteration 1/1780 Training loss: 4.4195 1.3313 sec/batch\nEpoch 1/10 Iteration 2/1780 Training loss: 4.3756 0.1287 sec/batch\nEpoch 1/10 Iteration 3/1780 Training loss: 4.2069 0.1276 sec/batch\nEpoch 1/10 Iteration 4/1780 Training loss: 4.5396 0.1185 sec/batch\nEpoch 1/10 Iteration 5/1780 Training loss: 4.4190 0.1206 sec/batch\nEpoch 1/10 Iteration 6/1780 Training loss: 4.3547 0.1233 sec/batch\nEpoch 1/10 Iteration 7/1780 Training loss: 4.2792 0.1188 sec/batch\nEpoch 1/10 Iteration 8/1780 Training loss: 4.2018 0.1170 sec/batch\nEpoch 1/10 Iteration 9/1780 Training loss: 4.1251 0.1187 sec/batch\nEpoch 1/10 Iteration 10/1780 Training loss: 4.0558 0.1174 sec/batch\nEpoch 1/10 Iteration 11/1780 Training loss: 3.9946 0.1190 sec/batch\nEpoch 1/10 Iteration 12/1780 Training loss: 3.9451 0.1193 sec/batch\nEpoch 1/10 Iteration 13/1780 Training loss: 3.9011 0.1210 sec/batch\nEpoch 1/10 Iteration 14/1780 Training loss: 3.8632 0.1185 sec/batch\nEpoch 1/10 Iteration 15/1780 Training loss: 3.8275 0.1199 sec/batch\nEpoch 1/10 Iteration 16/1780 Training loss: 3.7945 0.1211 sec/batch\nEpoch 1/10 Iteration 17/1780 Training loss: 3.7649 0.1215 sec/batch\nEpoch 1/10 Iteration 18/1780 Training loss: 3.7400 0.1214 sec/batch\nEpoch 1/10 Iteration 19/1780 Training loss: 3.7164 0.1247 sec/batch\nEpoch 1/10 Iteration 20/1780 Training loss: 3.6933 0.1212 sec/batch\nEpoch 1/10 Iteration 21/1780 Training loss: 3.6728 0.1203 sec/batch\nEpoch 1/10 Iteration 22/1780 Training loss: 3.6538 0.1207 sec/batch\nEpoch 1/10 Iteration 23/1780 Training loss: 3.6359 0.1200 sec/batch\nEpoch 1/10 Iteration 24/1780 Training loss: 3.6198 0.1229 sec/batch\nEpoch 1/10 Iteration 25/1780 Training loss: 3.6041 0.1204 sec/batch\nEpoch 1/10 Iteration 26/1780 Training loss: 3.5904 0.1202 sec/batch\nEpoch 1/10 Iteration 27/1780 Training loss: 3.5774 0.1189 sec/batch\nEpoch 1/10 Iteration 28/1780 Training loss: 3.5642 0.1214 sec/batch\nEpoch 1/10 Iteration 29/1780 Training loss: 3.5522 0.1231 sec/batch\nEpoch 1/10 Iteration 30/1780 Training loss: 3.5407 0.1199 sec/batch\nEpoch 1/10 Iteration 31/1780 Training loss: 3.5309 0.1180 sec/batch\nEpoch 1/10 Iteration 32/1780 Training loss: 3.5207 0.1179 sec/batch\nEpoch 1/10 Iteration 33/1780 Training loss: 3.5109 0.1224 sec/batch\nEpoch 1/10 Iteration 34/1780 Training loss: 3.5021 0.1206 sec/batch\nEpoch 1/10 Iteration 35/1780 Training loss: 3.4931 0.1241 sec/batch\nEpoch 1/10 Iteration 36/1780 Training loss: 3.4850 0.1169 sec/batch\nEpoch 1/10 Iteration 37/1780 Training loss: 3.4767 0.1204 sec/batch\nEpoch 1/10 Iteration 38/1780 Training loss: 3.4688 0.1202 sec/batch\nEpoch 1/10 Iteration 39/1780 Training loss: 3.4611 0.1213 sec/batch\n" ], [ "tf.train.get_checkpoint_state('checkpoints/anna')", "_____no_output_____" ] ], [ [ "## Sampling\n\nNow that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.\n\nThe network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.\n\n", "_____no_output_____" ] ], [ [ "def pick_top_n(preds, vocab_size, top_n=5):\n p = np.squeeze(preds)\n p[np.argsort(p)[:-top_n]] = 0\n p = p / np.sum(p)\n c = np.random.choice(vocab_size, 1, p=p)[0]\n return c", "_____no_output_____" ], [ "def sample(checkpoint, n_samples, lstm_size, vocab_size, prime=\"The \"):\n prime = \"Far\"\n samples = [c for c in prime]\n model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, checkpoint)\n new_state = sess.run(model.initial_state)\n for c in prime:\n x = np.zeros((1, 1))\n x[0,0] = vocab_to_int[c]\n feed = {model.inputs: x,\n model.keep_prob: 1.,\n model.initial_state: new_state}\n preds, new_state = sess.run([model.preds, model.final_state], \n feed_dict=feed)\n\n c = pick_top_n(preds, len(vocab))\n samples.append(int_to_vocab[c])\n\n for i in range(n_samples):\n x[0,0] = c\n feed = {model.inputs: x,\n model.keep_prob: 1.,\n model.initial_state: new_state}\n preds, new_state = sess.run([model.preds, model.final_state], \n feed_dict=feed)\n\n c = pick_top_n(preds, len(vocab))\n samples.append(int_to_vocab[c])\n \n return ''.join(samples)", "_____no_output_____" ], [ "checkpoint = \"checkpoints/anna/i3560_l512_1.122.ckpt\"\nsamp = sample(checkpoint, 2000, lstm_size, len(vocab), prime=\"Far\")\nprint(samp)", "Farlathit that if had so\nlike it that it were. He could not trouble to his wife, and there was\nanything in them of the side of his weaky in the creature at his forteren\nto him.\n\n\"What is it? I can't bread to those,\" said Stepan Arkadyevitch. \"It's not\nmy children, and there is an almost this arm, true it mays already,\nand tell you what I have say to you, and was not looking at the peasant,\nwhy is, I don't know him out, and she doesn't speak to me immediately, as\nyou would say the countess and the more frest an angelembre, and time and\nthings's silent, but I was not in my stand that is in my head. But if he\nsay, and was so feeling with his soul. A child--in his soul of his\nsoul of his soul. He should not see that any of that sense of. Here he\nhad not been so composed and to speak for as in a whole picture, but\nall the setting and her excellent and society, who had been delighted\nand see to anywing had been being troed to thousand words on them,\nwe liked him.\n\nThat set in her money at the table, he came into the party. The capable\nof his she could not be as an old composure.\n\n\"That's all something there will be down becime by throe is\nsuch a silent, as in a countess, I should state it out and divorct.\nThe discussion is not for me. I was that something was simply they are\nall three manshess of a sensitions of mind it all.\"\n\n\"No,\" he thought, shouted and lifting his soul. \"While it might see your\nhonser and she, I could burst. And I had been a midelity. And I had a\nmarnief are through the countess,\" he said, looking at him, a chosing\nwhich they had been carried out and still solied, and there was a sen that\nwas to be completely, and that this matter of all the seconds of it, and\na concipation were to her husband, who came up and conscaously, that he\nwas not the station. All his fourse she was always at the country,,\nto speak oft, and though they were to hear the delightful throom and\nwhether they came towards the morning, and his living and a coller and\nhold--the children. \n" ], [ "checkpoint = \"checkpoints/anna/i200_l512_2.432.ckpt\"\nsamp = sample(checkpoint, 1000, lstm_size, len(vocab), prime=\"Far\")\nprint(samp)", "Farnt him oste wha sorind thans tout thint asd an sesand an hires on thime sind thit aled, ban thand and out hore as the ter hos ton ho te that, was tis tart al the hand sostint him sore an tit an son thes, win he se ther san ther hher tas tarereng,.\n\nAnl at an ades in ond hesiln, ad hhe torers teans, wast tar arering tho this sos alten sorer has hhas an siton ther him he had sin he ard ate te anling the sosin her ans and\narins asd and ther ale te tot an tand tanginge wath and ho ald, so sot th asend sat hare sother horesinnd, he hesense wing ante her so tith tir sherinn, anded and to the toul anderin he sorit he torsith she se atere an ting ot hand and thit hhe so the te wile har\nens ont in the sersise, and we he seres tar aterer, to ato tat or has he he wan ton here won and sen heren he sosering, to to theer oo adent har herere the wosh oute, was serild ward tous hed astend..\n\nI's sint on alt in har tor tit her asd hade shithans ored he talereng an soredendere tim tot hees. Tise sor and \n" ], [ "checkpoint = \"checkpoints/anna/i600_l512_1.750.ckpt\"\nsamp = sample(checkpoint, 1000, lstm_size, len(vocab), prime=\"Far\")\nprint(samp)", "Fard as astice her said he celatice of to seress in the raice, and to be the some and sere allats to that said to that the sark and a cast a the wither ald the pacinesse of her had astition, he said to the sount as she west at hissele. Af the cond it he was a fact onthis astisarianing.\n\n\n\"Or a ton to to be that's a more at aspestale as the sont of anstiring as\nthours and trey.\n\nThe same wo dangring the\nraterst, who sore and somethy had ast out an of his book. \"We had's beane were that, and a morted a thay he had to tere. Then to\nher homent andertersed his his ancouted to the pirsted, the soution for of the pirsice inthirgest and stenciol, with the hard and and\na colrice of to be oneres,\nthe song to this anderssad.\nThe could ounterss the said to serom of\nsoment a carsed of sheres of she\ntorded\nhar and want in their of hould, but\nher told in that in he tad a the same to her. Serghing an her has and with the seed, and the camt ont his about of the\nsail, the her then all houg ant or to hus to \n" ], [ "checkpoint = \"checkpoints/anna/i1000_l512_1.484.ckpt\"\nsamp = sample(checkpoint, 1000, lstm_size, len(vocab), prime=\"Far\")\nprint(samp)", "Farrat, his felt has at it.\n\n\"When the pose ther hor exceed\nto his sheant was,\" weat a sime of his sounsed. The coment and the facily that which had began terede a marilicaly whice whether the pose of his hand, at she was alligated herself the same on she had to\ntaiking to his forthing and streath how to hand\nbegan in a lang at some at it, this he cholded not set all her. \"Wo love that is setthing. Him anstering as seen that.\"\n\n\"Yes in the man that say the mare a crances is it?\" said Sergazy Ivancatching. \"You doon think were somether is ifficult of a mone of\nthough the most at the countes that the\nmean on the come to say the most, to\nhis feesing of\na man she, whilo he\nsained and well, that he would still at to said. He wind at his for the sore in the most\nof hoss and almoved to see him. They have betine the sumper into at he his stire, and what he was that at the so steate of the\nsound, and shin should have a geest of shall feet on the conderation to she had been at that imporsing the dre\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
ec56a2708d56f88d85ebcb6aae054b4be8a6766a
2,456
ipynb
Jupyter Notebook
exponential.ipynb
procheta1999/distributions__
12341b562a054bd690404bb13719b4b002fe5691
[ "MIT" ]
null
null
null
exponential.ipynb
procheta1999/distributions__
12341b562a054bd690404bb13719b4b002fe5691
[ "MIT" ]
null
null
null
exponential.ipynb
procheta1999/distributions__
12341b562a054bd690404bb13719b4b002fe5691
[ "MIT" ]
null
null
null
23.390476
238
0.440554
[ [ [ "<a href=\"https://colab.research.google.com/github/procheta1999/distributions__/blob/master/exponential.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nfrom scipy.stats import expon\nn=np.arange(0,10,0.01)\nnp.mean(expon.pdf(n))", "_____no_output_____" ], [ "np.median(expon.pdf(n))", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
ec56a537b0e6f88bab6cb865faa87d683a457a58
11,089
ipynb
Jupyter Notebook
src/iot/ProcessIotData.ipynb
kreidOSS/Spark-Examples
a0c499b91ee031fcb3502e0c7da638a4128a8305
[ "MIT" ]
null
null
null
src/iot/ProcessIotData.ipynb
kreidOSS/Spark-Examples
a0c499b91ee031fcb3502e0c7da638a4128a8305
[ "MIT" ]
null
null
null
src/iot/ProcessIotData.ipynb
kreidOSS/Spark-Examples
a0c499b91ee031fcb3502e0c7da638a4128a8305
[ "MIT" ]
null
null
null
46.592437
354
0.482911
[ [ [ "This notebook is for demo purposes. \n**Requirements**: \n[Kafka library ](http://spark.apache.org/docs/latest/structured-streaming-kafka-integration.html#creating-a-kafka-source-stream#) \n[SQL Spark Connector library](https://github.com/microsoft/sql-spark-connector)\n\n## Overview\nAn IOT device is sending data to a Kafka endpoint. This notebook consumes the data and performs the following functions: \n\n1. Store data in a folder partitioned format on cloud storage by deviceName,tagName, year, month, day, hour.\n\n2. Ensure that the \"value\" column of the received data message is not a duplicate of the \"value\" column for data that matches the same device for the same timeframe. If it does not, the data will be inserted into the Delta Table and sent to downstream databases, in this scenario a SQL DB. If the value does match it will be disregarded.\n\n3. The objective is to only capture changes in the \"value\" column and ignore any duplicates. The value in this scenario can only be a 0 or 1.\n\n\n\n\n\n", "_____no_output_____" ] ], [ [ "#Create a function to handle writes. Data is persisted before write to prevent recomputation of data.\ndef writeData(df):\n #Set up SQL connection information to use Spark SQL Connector\n server_name = \"jdbc:sqlserver://{SERVER_ADDR}\"\n database_name = \"database_name\"\n url = server_name + \";\" + \"databaseName=\" + database_name + \";\"\n\n table_name = \"table_name\"\n username = \"username\"\n password = \"password123!#\" # Please specify password here\n\n df.persist()\n df.write.format(\"delta\").mode(\"append\").partitionBy('day').save(\"/data/silver\")\n df.write \\\n .format(\"com.microsoft.sqlserver.jdbc.spark\") \\\n .mode(\"append\") \\\n .option(\"url\", url) \\\n .option(\"dbtable\", table_name) \\\n .option(\"user\", username) \\\n .option(\"password\", password) \\\n .save()\n df.unpersist()", "_____no_output_____" ], [ "from datetime import timezone\nfrom delta.tables import *\nimport datetime\n\n#Created function to be used in foreachbactch\ndef processRow(df, batchId):\n #Create dataframe to be used to insert to SQL, need to cast year,month,day,hour as integer\n newDF = df.withColumn(\"year\", from_unixtime(col(\"time\"), 'yyyy').cast(IntegerType())) \\\n .withColumn(\"month\", from_unixtime(col(\"time\"), 'MM').cast(IntegerType())) \\\n .withColumn(\"day\", from_unixtime(col(\"time\"), 'dd').cast(IntegerType())) \\\n .withColumn(\"hour\", from_unixtime(col(\"time\"), 'HH').cast(IntegerType()))\n\n\n #Get unix timestap from seven days ago at the current datetime.\n unixAWeekAgo = ((datetime.datetime.now()) - (datetime.timedelta(days=7))).replace(tzinfo=timezone.utc).timestamp()\n #Get timestamp from data received\n time = newDF.toPandas().iat[0][2]\n\n\n\n #Ensure time is greater than 7 days ago\n if(time>unixAWeekAgo):\n #Verify if Delta table has been created, if not create table and insert data.\n if(DeltaTable.isDeltaTable(spark,\"/data/silver\")== False):\n #persist data to prevent recomputation at each write.\n newDf.persist()\n newDF.write.format(\"delta\").partitionBy(\"day\").option(\"path\",\"/data/silver\").saveAsTable(\"events\")\n #If using Spark 2.4-see below\n #newDF.write.format(\"delta\").mode(\"append\").partitionBy('day') \\\n # .option(\"__partition_columns\", \"\"\"[\"day\"]\"\"\").option(\"path\",\"/data/silver\") \\\n # .saveAsTable(\"events\")\n\n #Write to SQL\n newDF.write \\\n .format(\"com.microsoft.sqlserver.jdbc.spark\") \\\n .mode(\"append\") \\\n .option(\"url\", url) \\\n .option(\"dbtable\", table_name) \\\n .option(\"user\", username) \\\n .option(\"password\", password) \\\n .save()\n newDF.unpersist()\n else: \n #Get day and hour values \n day = newDF.toPandas().iat[0][6]\n hour = newDF.toPandas().iat[0][7]\n #Get the result that happend prior to this timestamp\n lastResult = spark.sql(\"SELECT * FROM events WHERE day = {0} AND hour = {1} AND time < {2} ORDER BY time DESC limit 1\".format(day,hour,time))\n #Get the result that happend after this timestamp\n afterResult = spark.sql(\"SELECT * FROM events WHERE day = {0} AND hour = {1} AND time > {2} ORDER BY time DESC limit 1\".format(day,hour,time))\n \n #Get values from the new data and the data for the last and after result.\n rawValue = newDF.toPandas().iat[0][3]\n lastResultValue = lastResult.toPandas().iat[0][3]\n afterResultValue = afterResult.toPandas().iat[0][3]\n\n #Compare results to see if a insert is needed.\n #There is a result for before and after\n if(lastResult.count() == 1 and afterResult.count() == 1):\n #Check if the previous result and the after result are not the same as the dataframe current result\n if (rawValue != lastResultValue and rawValue != afterResultValue):\n writeData(df)\n #Check if no previous result but there was a result after\n elif(lastResult.count()== 0 and afterResult.count() == 1):\n #Verify that the after result is not equal to the current dataframe\n if(rawValue != afterResultValue): \n writeData(df)\n #Check if there is a pervious result but not one after.\n elif(lastResult.count()==1 and afterResult.count() == 0):\n #Verify that the last result does not match the current dataframe result.\n if(rawValue != lastResultValue):\n writeData(df)\n else:\n #There is no data in table, inserting.\n writeData(df)", "_____no_output_____" ], [ "# define Schema for reading data off kafka topic\nschema = StructType().add(\"deviceName\", StringType()).add(\"tagName\", StringType()) \\\n .add(\"time\", StringType()).add(\"value\", StringType())", "_____no_output_____" ], [ "#Read data from kafka topic\ndf = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"127.0.0.1:9092\") \\\n .option(\"subscribe\", \"iot-events\") \\\n .load() \\\n .select(from_json(col(\"value\").cast(\"string\"), schema))", "_____no_output_____" ], [ "from pyspark.sql.functions import from_unixtime , col\n\n#Add columns for folder partitioning, date is stored in unix time\nstorageDF = df.withColumn(\"year\", from_unixtime(col(\"time\"), 'yyyy')) \\\n .withColumn(\"month\", from_unixtime(col(\"time\"), 'MM')) \\\n .withColumn(\"day\", from_unixtime(col(\"time\"), 'dd')) \\\n .withColumn(\"hour\", from_unixtime(col(\"time\"), 'HH'))\n\n#Retrieve value for each (row,column) for path creation of storing data.\ndeviceName = df.toPandas().iat[0][0]\ntagName = df.toPandas().iat[0][1]\nyear = storageDF.toPandas().iat[0][4]\nmonth = storageDF.toPandas().iat[0][5]\nday = storageDF.toPandas().iat[0][6]\nhour = storageDF.toPandas().iat[0][7]\n\n#Create Path\nstorage_path = '/Data/%s/%s/%s/%s/%s/%s' % (deviceName, tagName, year,month,day,hour)\n\n#Write Raw data to folder.\nstorageDF.write.parquet(storage_path, mode = \"append\")", "_____no_output_____" ], [ "#use foreachbatch funcationality to auto process each df.\ndf.writeStream \\\n .foreachBatch(processRow) \\ \n .start()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
ec56b7292702821316aaaee271f997a70fa9c6b1
87,337
ipynb
Jupyter Notebook
Colab_notebooks/StarDist_2D_ZeroCostDL4Mic.ipynb
FynnBe/ZeroCostDL4Mic
1f82cdf81b7ab23ffac7d2813e4aa067f98856d4
[ "MIT" ]
null
null
null
Colab_notebooks/StarDist_2D_ZeroCostDL4Mic.ipynb
FynnBe/ZeroCostDL4Mic
1f82cdf81b7ab23ffac7d2813e4aa067f98856d4
[ "MIT" ]
null
null
null
Colab_notebooks/StarDist_2D_ZeroCostDL4Mic.ipynb
FynnBe/ZeroCostDL4Mic
1f82cdf81b7ab23ffac7d2813e4aa067f98856d4
[ "MIT" ]
null
null
null
87,337
87,337
0.67954
[ [ [ "# **StarDist (2D)**\n---\n\n<font size = 4>**StarDist 2D** is a deep-learning method that can be used to segment cell nuclei from bioimages and was first published by [Schmidt *et al.* in 2018, on arXiv](https://arxiv.org/abs/1806.03535). It uses a shape representation based on star-convex polygons for nuclei in an image to predict the presence and the shape of these nuclei. This StarDist 2D network is based on an adapted U-Net network architecture.\n\n<font size = 4> **This particular notebook enables nuclei segmentation of 2D dataset. If you are interested in 3D dataset, you should use the StarDist 3D notebook instead.**\n\n---\n<font size = 4>*Disclaimer*:\n\n<font size = 4>This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n\n<font size = 4>This notebook is largely based on the paper:\n\n<font size = 4>**Cell Detection with Star-convex Polygons** from Schmidt *et al.*, International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI), Granada, Spain, September 2018. (https://arxiv.org/abs/1806.03535)\n\n<font size = 4>and the 3D extension of the approach:\n\n<font size = 4>**Star-convex Polyhedra for 3D Object Detection and Segmentation in Microscopy** from Weigert *et al.* published on arXiv in 2019 (https://arxiv.org/abs/1908.03636)\n\n<font size = 4>**The Original code** is freely available in GitHub:\nhttps://github.com/mpicbg-csbd/stardist\n\n<font size = 4>**Please also cite this original paper when using or developing this notebook.**\n", "_____no_output_____" ], [ "# **How to use this notebook?**\n\n---\n\n<font size = 4>Video describing how to use our notebooks are available on youtube:\n - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n\n\n---\n###**Structure of a notebook**\n\n<font size = 4>The notebook contains two types of cell: \n\n<font size = 4>**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n\n<font size = 4>**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n\n---\n###**Table of contents, Code snippets** and **Files**\n\n<font size = 4>On the top left side of the notebook you find three tabs which contain from top to bottom:\n\n<font size = 4>*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n\n<font size = 4>*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n\n<font size = 4>*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n\n<font size = 4>**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n\n<font size = 4>**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n\n---\n###**Making changes to the notebook**\n\n<font size = 4>**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n\n<font size = 4>To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\nYou can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment.", "_____no_output_____" ], [ "#**0. Before getting started**\n---\n<font size = 4> For StarDist to train, **it needs to have access to a paired training dataset made of images of nuclei and their corresponding masks**. Information on how to generate a training dataset is available in our Wiki page: https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\n\n<font size = 4>**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model**. The quality control assessment can be done directly in this notebook.\n\n<font size = 4>The data structure is important. It is necessary that all the input data are in the same folder and that all the output data is in a separate folder. The provided training dataset is already split in two folders called \"Training - Images\" (Training_source) and \"Training - Masks\" (Training_target).\n\n<font size = 4>Additionally, the corresponding Training_source and Training_target files need to have **the same name**.\n\n<font size = 4>Please note that you currently can **only use .tif files!**\n\n<font size = 4>You can also provide a folder that contains the data that you wish to analyse with the trained network once all training has been performed. This can include Test dataset for which you have the equivalent output and can compare to what the network provides.\n\n<font size = 4>Here's a common data structure that can work:\n* Experiment A\n - **Training dataset**\n - Images of nuclei (Training_source)\n - img_1.tif, img_2.tif, ...\n - Masks (Training_target)\n - img_1.tif, img_2.tif, ...\n - **Quality control dataset**\n - Images of nuclei\n - img_1.tif, img_2.tif\n - Masks \n - img_1.tif, img_2.tif\n - **Data to be predicted**\n - **Results**\n\n---\n<font size = 4>**Important note**\n\n<font size = 4>- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n\n<font size = 4>- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n\n<font size = 4>- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n---", "_____no_output_____" ], [ "# **1. Initialise the Colab session**\n\n\n\n\n---\n\n\n\n\n", "_____no_output_____" ], [ "\n\n## **1.1. Check for GPU access**\n---\n\nBy default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n\n<font size = 4>Go to **Runtime -> Change the Runtime type**\n\n<font size = 4>**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n\n<font size = 4>**Accelator: GPU** *(Graphics processing unit)*\n", "_____no_output_____" ] ], [ [ "#@markdown ##Run this cell to check if you have GPU access\n%tensorflow_version 1.x\nimport tensorflow as tf\nif tf.test.gpu_device_name()=='':\n print('You do not have GPU access.') \n print('Did you change your runtime ?') \n print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n print('Expect slow performance. To access GPU try reconnecting later')\n\nelse:\n print('You have GPU access')\n !nvidia-smi", "_____no_output_____" ] ], [ [ "## **1.2. Mount your Google Drive**\n---\n<font size = 4> To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n\n<font size = 4> Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n\n<font size = 4> Once this is done, your data are available in the **Files** tab on the top left of notebook.", "_____no_output_____" ] ], [ [ "#@markdown ##Play the cell to connect your Google Drive to Colab\n\n\n# mount user's Google Drive to Google Colab.\nfrom google.colab import drive\ndrive.mount('/content/gdrive')", "_____no_output_____" ] ], [ [ "# **2. Install StarDist and dependencies**\n---\n", "_____no_output_____" ] ], [ [ "\nNotebook_version = ['1.11']\n\n\n#@markdown ##Install StarDist and dependencies\n%tensorflow_version 1.x\n\nimport tensorflow\nprint(tensorflow.__version__)\nprint(\"Tensorflow enabled.\")\n\n# Install packages which are not included in Google Colab\n\n!pip install tifffile # contains tools to operate tiff-files\n!pip install csbdeep # contains tools for restoration of fluorescence microcopy images (Content-aware Image Restoration, CARE). It uses Keras and Tensorflow.\n!pip install stardist # contains tools to operate STARDIST.\n!pip install gputools # improves STARDIST performances\n!pip install edt # improves STARDIST performances\n!pip install wget\n!pip install fpdf\n!pip install PTable # Nice tables \n\n\n# ------- Variable specific to Stardist -------\nfrom stardist import fill_label_holes, random_label_cmap, calculate_extents, gputools_available, relabel_image_stardist, random_label_cmap, relabel_image_stardist, _draw_polygons, export_imagej_rois\nfrom stardist.models import Config2D, StarDist2D, StarDistData2D # import objects\nfrom stardist.matching import matching_dataset\nfrom __future__ import print_function, unicode_literals, absolute_import, division\nfrom csbdeep.utils import Path, normalize, download_and_extract_zip_file, plot_history # for loss plot\nfrom csbdeep.io import save_tiff_imagej_compatible\nimport numpy as np\nnp.random.seed(42)\nlbl_cmap = random_label_cmap()\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\n\n# ------- Common variable to all ZeroCostDL4Mic notebooks -------\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport urllib\nimport os, random\nimport shutil \nimport zipfile\nfrom tifffile import imread, imsave\nimport time\nimport sys\nimport wget\nfrom pathlib import Path\nimport pandas as pd\nimport csv\nfrom glob import glob\nfrom scipy import signal\nfrom scipy import ndimage\nfrom skimage import io\nfrom sklearn.linear_model import LinearRegression\nfrom skimage.util import img_as_uint\nimport matplotlib as mpl\nfrom skimage.metrics import structural_similarity\nfrom skimage.metrics import peak_signal_noise_ratio as psnr\nfrom astropy.visualization import simple_norm\nfrom skimage import img_as_float32, img_as_ubyte, img_as_float\nfrom skimage.util import img_as_ubyte\nfrom tqdm import tqdm \nimport cv2\nfrom fpdf import FPDF, HTMLMixin\nfrom datetime import datetime\nfrom pip._internal.operations.freeze import freeze\nimport subprocess\n\n# For sliders and dropdown menu and progress bar\nfrom ipywidgets import interact\nimport ipywidgets as widgets\n\n# Colors for the warning messages\nclass bcolors:\n WARNING = '\\033[31m'\nW = '\\033[0m' # white (normal)\nR = '\\033[31m' # red\n\n#Disable some of the tensorflow warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nprint('------------------------------------------')\nprint(\"Libraries installed\")\n\n\n# Check if this is the latest version of the notebook\nLatest_notebook_version = pd.read_csv(\"https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_ZeroCostDL4Mic_Release.csv\")\n\nif Notebook_version == list(Latest_notebook_version.columns):\n print(\"This notebook is up-to-date.\")\n\nif not Notebook_version == list(Latest_notebook_version.columns):\n print(bcolors.WARNING +\"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\")\n\n!pip freeze > requirements.txt\n", "_____no_output_____" ] ], [ [ "# **3. Select your parameters and paths**\n\n---\n", "_____no_output_____" ], [ "## **3.1. Setting main training parameters**\n---\n<font size = 4> ", "_____no_output_____" ], [ "<font size = 5> **Paths for training, predictions and results**\n\n\n<font size = 4>**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source (images of nuclei) and Training_target (masks) training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n\n<font size = 4>**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n\n<font size = 4>**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n\n\n<font size = 5>**Training parameters**\n\n<font size = 4>**`number_of_epochs`:** Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a 50-100 epochs, but a full training should run for up to 400 epochs. Evaluate the performance after training (see 5.). **Default value: 100**\n\n<font size = 5>**Advanced Parameters - experienced users only**\n\n<font size =4>**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 2**\n\n<font size = 4>**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each image / patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n\n<font size = 4>**`patch_size`:** Input the size of the patches use to train StarDist 2D (length of a side). The value should be smaller or equal to the dimensions of the image. Make the patch size as large as possible and divisible by 8. **Default value: dimension of the training images** \n\n<font size = 4>**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10** \n\n<font size = 4>**`n_rays`:** Set number of rays (corners) used for StarDist (for instance, a square has 4 corners). **Default value: 32** \n\n<font size = 4>**`grid_parameter`:** increase this number if the cells/nuclei are very large or decrease it if they are very small. **Default value: 2**\n\n<font size = 4>**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0003**\n\n<font size = 4>**If you get an Out of memory (OOM) error during the training, manually decrease the patch_size value until the OOM error disappear.**\n\n\n\n", "_____no_output_____" ] ], [ [ "#@markdown ###Path to training images: \nTraining_source = \"\" #@param {type:\"string\"}\n\nTraining_target = \"\" #@param {type:\"string\"}\n\n\n#@markdown ###Name of the model and path to model folder:\nmodel_name = \"\" #@param {type:\"string\"}\n\nmodel_path = \"\" #@param {type:\"string\"}\n#trained_model = model_path \n\n\n#@markdown ### Other parameters for training:\nnumber_of_epochs = 100#@param {type:\"number\"}\n\n#@markdown ###Advanced Parameters\nUse_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n\n#@markdown ###If not, please input:\n\n#GPU_limit = 90 #@param {type:\"number\"}\nbatch_size = 2 #@param {type:\"number\"}\nnumber_of_steps = 20#@param {type:\"number\"}\npatch_size = 1024 #@param {type:\"number\"}\npercentage_validation = 10 #@param {type:\"number\"}\nn_rays = 32 #@param {type:\"number\"}\ngrid_parameter = 2#@param [1, 2, 4, 8, 16, 32] {type:\"raw\"}\ninitial_learning_rate = 0.0003 #@param {type:\"number\"}\n\nif (Use_Default_Advanced_Parameters): \n print(\"Default advanced parameters enabled\")\n batch_size = 2\n n_rays = 32\n percentage_validation = 10\n grid_parameter = 2\n initial_learning_rate = 0.0003\n\npercentage = percentage_validation/100\n\n#here we check that no model with the same name already exist, if so print a warning\n\nif os.path.exists(model_path+'/'+model_name):\n print(bcolors.WARNING +\"!! WARNING: \"+model_name+\" already exists and will be deleted !!\")\n print(bcolors.WARNING +\"To continue training \"+model_name+\", choose a new model_name here, and load \"+model_name+\" in section 3.3\"+W)\n\n \n# Here we open will randomly chosen input and output image\nrandom_choice = random.choice(os.listdir(Training_source))\nx = imread(Training_source+\"/\"+random_choice)\n\n# Here we check the image dimensions\n\nImage_Y = x.shape[0]\nImage_X = x.shape[1]\n\nprint('Loaded images (width, length) =', x.shape)\n\n# If default parameters, patch size is the same as image size\nif (Use_Default_Advanced_Parameters):\n patch_size = min(Image_Y, Image_X)\n \n#Hyperparameters failsafes\n\n# Here we check that patch_size is smaller than the smallest xy dimension of the image \n\nif patch_size > min(Image_Y, Image_X):\n patch_size = min(Image_Y, Image_X)\n print (bcolors.WARNING + \" Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:\",patch_size)\n\nif patch_size > 2048:\n patch_size = 2048\n print (bcolors.WARNING + \" Your image dimension is large; therefore the patch_size chosen is now:\",patch_size)\n\n\n# Here we check that the patch_size is divisible by 16\nif not patch_size % 16 == 0:\n patch_size = ((int(patch_size / 16)-1) * 16)\n print (bcolors.WARNING + \" Your chosen patch_size is not divisible by 8; therefore the patch_size chosen is:\",patch_size)\n\n# Here we disable pre-trained model by default (in case the next cell is not ran)\nUse_pretrained_model = False\n\n# Here we disable data augmentation by default (in case the cell is not ran)\n\nUse_Data_augmentation = False\n\n\nprint(\"Parameters initiated.\")\n\n\nos.chdir(Training_target)\ny = imread(Training_target+\"/\"+random_choice)\n\n#Here we use a simple normalisation strategy to visualise the image\nnorm = simple_norm(x, percent = 99)\n\nf=plt.figure(figsize=(16,8))\nplt.subplot(1,2,1)\nplt.imshow(x, interpolation='nearest', norm=norm, cmap='magma')\nplt.title('Training source')\nplt.axis('off');\n\nplt.subplot(1,2,2)\nplt.imshow(y, interpolation='nearest', cmap=lbl_cmap)\nplt.title('Training target')\nplt.axis('off');\nplt.savefig('/content/TrainingDataExample_StarDist2D.png',bbox_inches='tight',pad_inches=0)", "_____no_output_____" ] ], [ [ "## **3.2. Data augmentation**\n---\n<font size = 4>\n\n\n", "_____no_output_____" ], [ "<font size = 4>Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.\n\n<font size = 4> **However, data augmentation is not a magic solution and may also introduce issues. Therefore, we recommend that you train your network with and without augmentation, and use the QC section to validate that it improves overall performances.** \n\n<font size = 4>Data augmentation is performed here by [Augmentor.](https://github.com/mdbloice/Augmentor)\n\n<font size = 4>[Augmentor](https://github.com/mdbloice/Augmentor) was described in the following article:\n\n<font size = 4>Marcus D Bloice, Peter M Roth, Andreas Holzinger, Biomedical image augmentation using Augmentor, Bioinformatics, https://doi.org/10.1093/bioinformatics/btz259\n\n<font size = 4>**Please also cite this original paper when publishing results obtained using this notebook with augmentation enabled.** ", "_____no_output_____" ] ], [ [ "#Data augmentation\n\nUse_Data_augmentation = False #@param {type:\"boolean\"}\n\nif Use_Data_augmentation:\n !pip install Augmentor\n import Augmentor\n\n\n#@markdown ####Choose a factor by which you want to multiply your original dataset\n\nMultiply_dataset_by = 4 #@param {type:\"slider\", min:1, max:30, step:1}\n\nSave_augmented_images = False #@param {type:\"boolean\"}\n\nSaving_path = \"\" #@param {type:\"string\"}\n\n\nUse_Default_Augmentation_Parameters = True #@param {type:\"boolean\"}\n#@markdown ###If not, please choose the probability of the following image manipulations to be used to augment your dataset (1 = always used; 0 = disabled ):\n\n#@markdown ####Mirror and rotate images\nrotate_90_degrees = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n\nrotate_270_degrees = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n\nflip_left_right = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n\nflip_top_bottom = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n\n#@markdown ####Random image Zoom\n\nrandom_zoom = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n\nrandom_zoom_magnification = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n\n#@markdown ####Random image distortion\n\nrandom_distortion = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n\n\n#@markdown ####Image shearing and skewing \n\nimage_shear = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\nmax_image_shear = 1 #@param {type:\"slider\", min:1, max:25, step:1}\n\nskew_image = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n\nskew_image_magnitude = 0 #@param {type:\"slider\", min:0, max:1, step:0.1}\n\n\nif Use_Default_Augmentation_Parameters:\n rotate_90_degrees = 0.5\n rotate_270_degrees = 0.5\n flip_left_right = 0.5\n flip_top_bottom = 0.5\n\n if not Multiply_dataset_by >5:\n random_zoom = 0\n random_zoom_magnification = 0.9\n random_distortion = 0\n image_shear = 0\n max_image_shear = 10\n skew_image = 0\n skew_image_magnitude = 0\n\n if Multiply_dataset_by >5:\n random_zoom = 0.1\n random_zoom_magnification = 0.9\n random_distortion = 0.5\n image_shear = 0.2\n max_image_shear = 5\n skew_image = 0.2\n skew_image_magnitude = 0.4\n\n if Multiply_dataset_by >25:\n random_zoom = 0.5\n random_zoom_magnification = 0.8\n random_distortion = 0.5\n image_shear = 0.5\n max_image_shear = 20\n skew_image = 0.5\n skew_image_magnitude = 0.6\n\n\nlist_files = os.listdir(Training_source)\nNb_files = len(list_files)\n\nNb_augmented_files = (Nb_files * Multiply_dataset_by)\n\n\nif Use_Data_augmentation:\n print(\"Data augmentation enabled\")\n# Here we set the path for the various folder were the augmented images will be loaded\n\n# All images are first saved into the augmented folder\n #Augmented_folder = \"/content/Augmented_Folder\"\n \n if not Save_augmented_images:\n Saving_path= \"/content\"\n\n Augmented_folder = Saving_path+\"/Augmented_Folder\"\n if os.path.exists(Augmented_folder):\n shutil.rmtree(Augmented_folder)\n os.makedirs(Augmented_folder)\n\n #Training_source_augmented = \"/content/Training_source_augmented\"\n Training_source_augmented = Saving_path+\"/Training_source_augmented\"\n\n if os.path.exists(Training_source_augmented):\n shutil.rmtree(Training_source_augmented)\n os.makedirs(Training_source_augmented)\n\n #Training_target_augmented = \"/content/Training_target_augmented\"\n Training_target_augmented = Saving_path+\"/Training_target_augmented\"\n\n if os.path.exists(Training_target_augmented):\n shutil.rmtree(Training_target_augmented)\n os.makedirs(Training_target_augmented)\n\n\n# Here we generate the augmented images\n#Load the images\n p = Augmentor.Pipeline(Training_source, Augmented_folder)\n\n#Define the matching images\n p.ground_truth(Training_target)\n#Define the augmentation possibilities\n if not rotate_90_degrees == 0:\n p.rotate90(probability=rotate_90_degrees)\n \n if not rotate_270_degrees == 0:\n p.rotate270(probability=rotate_270_degrees)\n\n if not flip_left_right == 0:\n p.flip_left_right(probability=flip_left_right)\n\n if not flip_top_bottom == 0:\n p.flip_top_bottom(probability=flip_top_bottom)\n\n if not random_zoom == 0:\n p.zoom_random(probability=random_zoom, percentage_area=random_zoom_magnification)\n \n if not random_distortion == 0:\n p.random_distortion(probability=random_distortion, grid_width=4, grid_height=4, magnitude=8)\n\n if not image_shear == 0:\n p.shear(probability=image_shear,max_shear_left=20,max_shear_right=20)\n \n if not skew_image == 0:\n p.skew(probability=skew_image,magnitude=skew_image_magnitude)\n\n p.sample(int(Nb_augmented_files))\n\n print(int(Nb_augmented_files),\"matching images generated\")\n\n# Here we sort through the images and move them back to augmented trainning source and targets folders\n\n augmented_files = os.listdir(Augmented_folder)\n\n for f in augmented_files:\n\n if (f.startswith(\"_groundtruth_(1)_\")):\n shortname_noprefix = f[17:]\n shutil.copyfile(Augmented_folder+\"/\"+f, Training_target_augmented+\"/\"+shortname_noprefix) \n if not (f.startswith(\"_groundtruth_(1)_\")):\n shutil.copyfile(Augmented_folder+\"/\"+f, Training_source_augmented+\"/\"+f)\n \n\n for filename in os.listdir(Training_source_augmented):\n os.chdir(Training_source_augmented)\n os.rename(filename, filename.replace('_original', ''))\n \n #Here we clean up the extra files\n shutil.rmtree(Augmented_folder)\n\nif not Use_Data_augmentation:\n print(bcolors.WARNING+\"Data augmentation disabled\") \n\n\n", "_____no_output_____" ] ], [ [ "\n## **3.3. Using weights from a pre-trained model as initial weights**\n---\n<font size = 4> Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a StarDist model**. \n\n<font size = 4> This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n\n<font size = 4> In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. ", "_____no_output_____" ] ], [ [ "# @markdown ##Loading weights from a pre-trained network\n\n\nUse_pretrained_model = False #@param {type:\"boolean\"}\n\npretrained_model_choice = \"2D_versatile_fluo_from_Stardist_Fiji\" #@param [\"Model_from_file\", \"2D_versatile_fluo_from_Stardist_Fiji\", \"2D_Demo_Model_from_Stardist_Github\", \"Versatile_H&E_nuclei\"]\n\nWeights_choice = \"best\" #@param [\"last\", \"best\"]\n\n\n#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\npretrained_model_path = \"\" #@param {type:\"string\"}\n\n# --------------------- Check if we load a previously trained model ------------------------\nif Use_pretrained_model:\n\n# --------------------- Load the model from the choosen path ------------------------\n if pretrained_model_choice == \"Model_from_file\":\n h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n\n\n# --------------------- Download the Demo 2D model provided in the Stardist 2D github ------------------------\n\n if pretrained_model_choice == \"2D_Demo_Model_from_Stardist_Github\":\n pretrained_model_name = \"2D_Demo\"\n pretrained_model_path = \"/content/\"+pretrained_model_name\n print(\"Downloading the 2D_Demo_Model_from_Stardist_Github\")\n if os.path.exists(pretrained_model_path):\n shutil.rmtree(pretrained_model_path)\n os.makedirs(pretrained_model_path)\n wget.download(\"https://github.com/mpicbg-csbd/stardist/raw/master/models/examples/2D_demo/config.json\", pretrained_model_path)\n wget.download(\"https://github.com/mpicbg-csbd/stardist/raw/master/models/examples/2D_demo/thresholds.json\", pretrained_model_path)\n wget.download(\"https://github.com/mpicbg-csbd/stardist/blob/master/models/examples/2D_demo/weights_best.h5?raw=true\", pretrained_model_path) \n wget.download(\"https://github.com/mpicbg-csbd/stardist/blob/master/models/examples/2D_demo/weights_last.h5?raw=true\", pretrained_model_path)\n h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n\n# --------------------- Download the Demo 2D_versatile_fluo_from_Stardist_Fiji ------------------------\n\n if pretrained_model_choice == \"2D_versatile_fluo_from_Stardist_Fiji\":\n print(\"Downloading the 2D_versatile_fluo_from_Stardist_Fiji\")\n pretrained_model_name = \"2D_versatile_fluo\"\n pretrained_model_path = \"/content/\"+pretrained_model_name\n \n if os.path.exists(pretrained_model_path):\n shutil.rmtree(pretrained_model_path)\n os.makedirs(pretrained_model_path)\n \n wget.download(\"https://cloud.mpi-cbg.de/index.php/s/1k5Zcy7PpFWRb0Q/download?path=/versatile&files=2D_versatile_fluo.zip\", pretrained_model_path)\n \n with zipfile.ZipFile(pretrained_model_path+\"/2D_versatile_fluo.zip\", 'r') as zip_ref:\n zip_ref.extractall(pretrained_model_path)\n \n h5_file_path = os.path.join(pretrained_model_path, \"weights_best.h5\")\n\n# --------------------- Download the Versatile (H&E nuclei)_fluo_from_Stardist_Fiji ------------------------\n\n if pretrained_model_choice == \"Versatile_H&E_nuclei\":\n print(\"Downloading the Versatile_H&E_nuclei from_Stardist_Fiji\")\n pretrained_model_name = \"2D_versatile_he\"\n pretrained_model_path = \"/content/\"+pretrained_model_name\n \n if os.path.exists(pretrained_model_path):\n shutil.rmtree(pretrained_model_path)\n os.makedirs(pretrained_model_path)\n \n wget.download(\"https://cloud.mpi-cbg.de/index.php/s/1k5Zcy7PpFWRb0Q/download?path=/versatile&files=2D_versatile_he.zip\", pretrained_model_path)\n \n with zipfile.ZipFile(pretrained_model_path+\"/2D_versatile_he.zip\", 'r') as zip_ref:\n zip_ref.extractall(pretrained_model_path)\n \n h5_file_path = os.path.join(pretrained_model_path, \"weights_best.h5\")\n\n\n# --------------------- Add additional pre-trained models here ------------------------\n\n\n\n# --------------------- Check the model exist ------------------------\n# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n if not os.path.exists(h5_file_path):\n print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist' + W)\n Use_pretrained_model = False\n\n \n# If the model path contains a pretrain model, we load the training rate, \n if os.path.exists(h5_file_path):\n#Here we check if the learning rate can be loaded from the quality control folder\n if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n\n with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n csvRead = pd.read_csv(csvfile, sep=',')\n #print(csvRead)\n \n if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n print(\"pretrained network learning rate found\")\n #find the last learning rate\n lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n #Find the learning rate corresponding to the lowest validation loss\n min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n #print(min_val_loss)\n bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n\n if Weights_choice == \"last\":\n print('Last learning rate: '+str(lastLearningRate))\n\n if Weights_choice == \"best\":\n print('Learning rate of best validation loss: '+str(bestLearningRate))\n\n if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n bestLearningRate = initial_learning_rate\n lastLearningRate = initial_learning_rate\n print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead' + W)\n\n#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+ W)\n bestLearningRate = initial_learning_rate\n lastLearningRate = initial_learning_rate\n\n\n# Display info about the pretrained model to be loaded (or not)\nif Use_pretrained_model:\n print('Weights found in:')\n print(h5_file_path)\n print('will be loaded prior to training.')\n\nelse:\n print(bcolors.WARNING+'No pretrained network will be used.')\n\n", "_____no_output_____" ] ], [ [ "#**4. Train the network**\n---\n", "_____no_output_____" ], [ "## **4.1. Prepare the training data and model for training**\n---\n\n<font size = 4>Here, we use the information from 3. to build the model and convert the training data into a suitable format for training.", "_____no_output_____" ] ], [ [ "#@markdown ##Create the model and dataset objects\n\n# --------------------- Here we delete the model folder if it already exist ------------------------\n\nif os.path.exists(model_path+'/'+model_name):\n print(bcolors.WARNING +\"!! WARNING: Model folder already exists and has been removed !!\" + W)\n shutil.rmtree(model_path+'/'+model_name)\n\n\n\n# --------------------- Here we load the augmented data or the raw data ------------------------\n\nif Use_Data_augmentation:\n Training_source_dir = Training_source_augmented\n Training_target_dir = Training_target_augmented\n\nif not Use_Data_augmentation:\n Training_source_dir = Training_source\n Training_target_dir = Training_target\n# --------------------- ------------------------------------------------\n\ntraining_images_tiff=Training_source_dir+\"/*.tif\"\nmask_images_tiff=Training_target_dir+\"/*.tif\"\n\n# this funtion imports training images and masks and sorts them suitable for the network\nX = sorted(glob(training_images_tiff)) \nY = sorted(glob(mask_images_tiff)) \n\n# assert -funtion check that X and Y really have images. If not this cell raises an error\nassert all(Path(x).name==Path(y).name for x,y in zip(X,Y))\n\n# Here we map the training dataset (images and masks).\nX = list(map(imread,X))\nY = list(map(imread,Y))\nn_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]\n\n#Normalize images and fill small label holes.\naxis_norm = (0,1) # normalize channels independently\n# axis_norm = (0,1,2) # normalize channels jointly\nif n_channel > 1:\n print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n sys.stdout.flush()\n\nX = [normalize(x,1,99.8,axis=axis_norm) for x in tqdm(X)]\nY = [fill_label_holes(y) for y in tqdm(Y)]\n\n#Here we split the your training dataset into training images (90 %) and validation images (10 %). \n#It is advisable to use 10 % of your training dataset for validation. This ensures the truthfull validation error value. If only few validation images are used network may choose too easy or too challenging images for validation. \n# split training data (images and masks) into training images and validation images.\nassert len(X) > 1, \"not enough training data\"\nrng = np.random.RandomState(42)\nind = rng.permutation(len(X))\nn_val = max(1, int(round(percentage * len(ind))))\nind_train, ind_val = ind[:-n_val], ind[-n_val:]\nX_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]\nX_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train] \nprint('number of images: %3d' % len(X))\nprint('- training: %3d' % len(X_trn))\nprint('- validation: %3d' % len(X_val))\n\n# Use OpenCL-based computations for data generator during training (requires 'gputools')\nuse_gpu = False and gputools_available()\n\n#Here we ensure that our network has a minimal number of steps\nif (Use_Default_Advanced_Parameters): \n number_of_steps= int(len(X)/batch_size)+1\n\n\n# --------------------- Using pretrained model ------------------------\n#Here we ensure that the learning rate set correctly when using pre-trained models\nif Use_pretrained_model:\n if Weights_choice == \"last\":\n initial_learning_rate = lastLearningRate\n\n if Weights_choice == \"best\": \n initial_learning_rate = bestLearningRate\n# --------------------- ---------------------- ------------------------\n\n\n\nconf = Config2D (\n n_rays = n_rays,\n use_gpu = use_gpu,\n train_batch_size = batch_size,\n n_channel_in = n_channel,\n train_patch_size = (patch_size, patch_size),\n grid = (grid_parameter, grid_parameter),\n train_learning_rate = initial_learning_rate,\n)\n\n# Here we create a model according to section 5.3.\nmodel = StarDist2D(conf, name=model_name, basedir=model_path)\n\n# --------------------- Using pretrained model ------------------------\n# Load the pretrained weights \nif Use_pretrained_model:\n model.load_weights(h5_file_path)\n\n\n# --------------------- ---------------------- ------------------------\n\n#Here we check the FOV of the network.\nmedian_size = calculate_extents(list(Y), np.median)\nfov = np.array(model._axes_tile_overlap('YX'))\nif any(median_size > fov):\n print(bcolors.WARNING+\"WARNING: median object size larger than field of view of the neural network.\")\nprint(conf)\n\n\n\n", "_____no_output_____" ] ], [ [ "\n## **4.2. Start Training**\n---\n\n<font size = 4>When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n\n<font size = 4>* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point.\n\n<font size = 4>**Of Note:** At the end of the training, your model will be automatically exported so it can be used in the Stardist Fiji plugin. You can find it in your model folder (TF_SavedModel.zip). In Fiji, Make sure to choose the right version of tensorflow. You can check at: Edit-- Options-- Tensorflow. Choose the version 1.4 (CPU or GPU depending on your system).", "_____no_output_____" ] ], [ [ "start = time.time()\n\n#@markdown ##Start training\naugmenter = None\n\n# def augmenter(X_batch, Y_batch):\n# \"\"\"Augmentation for data batch.\n# X_batch is a list of input images (length at most batch_size)\n# Y_batch is the corresponding list of ground-truth label images\n# \"\"\"\n# # ...\n# return X_batch, Y_batch\n\n# Training the model. \n# 'input_epochs' and 'steps' refers to your input data in section 5.1 \nhistory = model.train(X_trn, Y_trn, validation_data=(X_val,Y_val), augmenter=augmenter,\n epochs=number_of_epochs, steps_per_epoch=number_of_steps)\nNone;\n\nprint(\"Training done\")\n\nprint(\"Network optimization in progress\")\n#Here we optimize the network.\nmodel.optimize_thresholds(X_val, Y_val)\n\nprint(\"Done\")\n\n# convert the history.history dict to a pandas DataFrame: \nlossData = pd.DataFrame(history.history) \n\nif os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n\nos.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n\n# The training evaluation.csv is saved (overwrites the Files if needed). \nlossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\nwith open(lossDataCSVpath, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['loss','val_loss', 'learning rate'])\n for i in range(len(history.history['loss'])):\n writer.writerow([history.history['loss'][i], history.history['val_loss'][i], history.history['lr'][i]])\n\n\n\n# Displaying the time elapsed for training\ndt = time.time() - start\nmins, sec = divmod(dt, 60) \nhour, mins = divmod(mins, 60) \nprint(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n\nmodel.export_TF()\n\nprint(\"Your model has been sucessfully exported and can now also be used in the Stardist Fiji plugin\")\n\n\n#Create a pdf document with training summary\n\n# save FPDF() class into a \n# variable pdf \n\nclass MyFPDF(FPDF, HTMLMixin):\n pass\n\npdf = MyFPDF()\npdf.add_page()\npdf.set_right_margin(-1)\npdf.set_font(\"Arial\", size = 11, style='B') \n\nNetwork = 'StarDist 2D'\nday = datetime.now()\ndatetime_str = str(day)[0:10]\n\nHeader = 'Training report for '+Network+' model ('+model_name+')\\nDate: '+datetime_str\npdf.multi_cell(180, 5, txt = Header, align = 'L') \n \n# add another cell \ntraining_time = \"Training time: \"+str(hour)+ \"hour(s) \"+str(mins)+\"min(s) \"+str(round(sec))+\"sec(s)\"\npdf.cell(190, 5, txt = training_time, ln = 1, align='L')\npdf.ln(1)\n\nHeader_2 = 'Information for your materials and method:'\npdf.cell(190, 5, txt=Header_2, ln=1, align='L')\n\nall_packages = ''\nfor requirement in freeze(local_only=True):\n all_packages = all_packages+requirement+', '\n#print(all_packages)\n\n#Main Packages\nmain_packages = ''\nversion_numbers = []\nfor name in ['tensorflow','numpy','Keras','csbdeep']:\n find_name=all_packages.find(name)\n main_packages = main_packages+all_packages[find_name:all_packages.find(',',find_name)]+', '\n #Version numbers only here:\n version_numbers.append(all_packages[find_name+len(name)+2:all_packages.find(',',find_name)])\n\ncuda_version = subprocess.run('nvcc --version',stdout=subprocess.PIPE, shell=True)\ncuda_version = cuda_version.stdout.decode('utf-8')\ncuda_version = cuda_version[cuda_version.find(', V')+3:-1]\ngpu_name = subprocess.run('nvidia-smi',stdout=subprocess.PIPE, shell=True)\ngpu_name = gpu_name.stdout.decode('utf-8')\ngpu_name = gpu_name[gpu_name.find('Tesla'):gpu_name.find('Tesla')+10]\n#print(cuda_version[cuda_version.find(', V')+3:-1])\n#print(gpu_name)\n\nshape = io.imread(Training_source+'/'+os.listdir(Training_source)[1]).shape\ndataset_size = len(os.listdir(Training_source))\n\ntext = 'The '+Network+' model was trained from scratch for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and a '+conf.train_dist_loss+' loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), Keras (v '+version_numbers[2]+'), csbdeep (v '+version_numbers[3]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'\n\n#text = 'The '+Network+' model ('+model_name+') was trained using '+str(dataset_size)+' paired images (image dimensions: '+str(shape)+') using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), Keras (v '+version_numbers[2]+'), csbdeep (v '+version_numbers[3]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The GPU used was a '+gpu_name+'.'\n\nif Use_pretrained_model:\n text = 'The '+Network+' model was trained for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and a '+conf.train_dist_loss+' loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). The model was retrained from a pretrained model. Key python packages used include tensorflow (v '+version_numbers[0]+'), Keras (v '+version_numbers[2]+'), csbdeep (v '+version_numbers[3]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'\n\npdf.set_font('')\npdf.set_font_size(10.)\npdf.multi_cell(190, 5, txt = text, align='L')\npdf.set_font('')\npdf.set_font('Arial', size = 10, style = 'B')\npdf.ln(1)\npdf.cell(28, 5, txt='Augmentation: ', ln=0)\npdf.set_font('')\nif Use_Data_augmentation:\n aug_text = 'The dataset was augmented by a factor of '+str(Multiply_dataset_by)+' by'\n if rotate_270_degrees != 0 or rotate_90_degrees != 0:\n aug_text = aug_text+'\\n- rotation'\n if flip_left_right != 0 or flip_top_bottom != 0:\n aug_text = aug_text+'\\n- flipping'\n if random_zoom_magnification != 0:\n aug_text = aug_text+'\\n- random zoom magnification'\n if random_distortion != 0:\n aug_text = aug_text+'\\n- random distortion'\n if image_shear != 0:\n aug_text = aug_text+'\\n- image shearing'\n if skew_image != 0:\n aug_text = aug_text+'\\n- image skewing'\nelse:\n aug_text = 'No augmentation was used for training.'\npdf.multi_cell(190, 5, txt=aug_text, align='L')\npdf.set_font('Arial', size = 11, style = 'B')\npdf.ln(1)\npdf.cell(180, 5, txt = 'Parameters', align='L', ln=1)\npdf.set_font('')\npdf.set_font_size(10.)\nif Use_Default_Advanced_Parameters:\n pdf.cell(200, 5, txt='Default Advanced Parameters were enabled')\npdf.cell(200, 5, txt='The following parameters were used for training:')\npdf.ln(1)\nhtml = \"\"\" \n<table width=40% style=\"margin-left:0px;\">\n <tr>\n <th width = 50% align=\"left\">Parameter</th>\n <th width = 50% align=\"left\">Value</th>\n </tr>\n <tr>\n <td width = 50%>number_of_epochs</td>\n <td width = 50%>{0}</td>\n </tr>\n <tr>\n <td width = 50%>patch_size</td>\n <td width = 50%>{1}</td>\n </tr>\n <tr>\n <td width = 50%>batch_size</td>\n <td width = 50%>{2}</td>\n </tr>\n <tr>\n <td width = 50%>number_of_steps</td>\n <td width = 50%>{3}</td>\n </tr>\n <tr>\n <td width = 50%>percentage_validation</td>\n <td width = 50%>{4}</td>\n </tr>\n <tr>\n <td width = 50%>n_rays</td>\n <td width = 50%>{5}</td>\n </tr>\n <tr>\n <td width = 50%>grid_parameter</td>\n <td width = 50%>{6}</td>\n </tr>\n <tr>\n <td width = 50%>initial_learning_rate</td>\n <td width = 50%>{7}</td>\n </tr>\n</table>\n\"\"\".format(number_of_epochs,str(patch_size)+'x'+str(patch_size),batch_size,number_of_steps,percentage_validation,n_rays,grid_parameter,initial_learning_rate)\npdf.write_html(html)\n\n#pdf.multi_cell(190, 5, txt = text_2, align='L')\npdf.set_font(\"Arial\", size = 11, style='B')\npdf.ln(1)\npdf.cell(190, 5, txt = 'Training Dataset', align='L', ln=1)\npdf.set_font('')\npdf.set_font('Arial', size = 10, style = 'B')\npdf.cell(30, 5, txt= 'Training_source:', align = 'L', ln=0)\npdf.set_font('')\npdf.multi_cell(170, 5, txt = Training_source, align = 'L')\npdf.set_font('')\npdf.set_font('Arial', size = 10, style = 'B')\npdf.cell(28, 5, txt= 'Training_target:', align = 'L', ln=0)\npdf.set_font('')\npdf.multi_cell(170, 5, txt = Training_target, align = 'L')\n#pdf.cell(190, 5, txt=aug_text, align='L', ln=1)\npdf.ln(1)\npdf.set_font('')\npdf.set_font('Arial', size = 10, style = 'B')\npdf.cell(21, 5, txt= 'Model Path:', align = 'L', ln=0)\npdf.set_font('')\npdf.multi_cell(170, 5, txt = model_path+'/'+model_name, align = 'L')\npdf.ln(1)\npdf.cell(60, 5, txt = 'Example Training pair', ln=1)\npdf.ln(1)\nexp_size = io.imread('/content/TrainingDataExample_StarDist2D.png').shape\npdf.image('/content/TrainingDataExample_StarDist2D.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\npdf.ln(1)\nref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy.\" BioRxiv (2020).'\npdf.multi_cell(190, 5, txt = ref_1, align='L')\nref_2 = '- StarDist 2D: Schmidt, Uwe, et al. \"Cell detection with star-convex polygons.\" International Conference on Medical Image Computing and Computer-Assisted Intervention. Springer, Cham, 2018.'\npdf.multi_cell(190, 5, txt = ref_2, align='L')\nif Use_Data_augmentation:\n ref_4 = '- Augmentor: Bloice, Marcus D., Christof Stocker, and Andreas Holzinger. \"Augmentor: an image augmentation library for machine learning.\" arXiv preprint arXiv:1708.04680 (2017).'\n pdf.multi_cell(190, 5, txt = ref_4, align='L')\npdf.ln(3)\nreminder = 'Important:\\nRemember to perform the quality control step on all newly trained models\\nPlease consider depositing your training dataset on Zenodo'\npdf.set_font('Arial', size = 11, style='B')\npdf.multi_cell(190, 5, txt=reminder, align='C')\n\npdf.output(model_path+'/'+model_name+'/'+model_name+\"_training_report.pdf\")", "_____no_output_____" ] ], [ [ "## **4.3. Download your model(s) from Google Drive**\n---\n\n<font size = 4>Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder.\n\n", "_____no_output_____" ], [ "# **5. Evaluate your model**\n---\n\n<font size = 4>This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n\n\n<font size = 4>**We highly recommend to perform quality control on all newly trained models.**\n\n\n", "_____no_output_____" ] ], [ [ "# model name and path\n#@markdown ###Do you want to assess the model you just trained ?\nUse_the_current_trained_model = True #@param {type:\"boolean\"}\n\n#@markdown ###If not, please provide the path to the model folder:\n\nQC_model_folder = \"\" #@param {type:\"string\"}\n\n#Here we define the loaded model name and path\nQC_model_name = os.path.basename(QC_model_folder)\nQC_model_path = os.path.dirname(QC_model_folder)\n\nif (Use_the_current_trained_model): \n QC_model_name = model_name\n QC_model_path = model_path\n\nfull_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\nif os.path.exists(full_QC_model_path):\n print(\"The \"+QC_model_name+\" network will be evaluated\")\nelse: \n print(bcolors.WARNING+'!! WARNING: The chosen model does not exist !!')\n print('Please make sure you provide a valid model path and model name before proceeding further.')\n", "_____no_output_____" ] ], [ [ "## **5.1. Inspection of the loss function**\n---\n\n<font size = 4>First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n\n<font size = 4>**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n\n<font size = 4>**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n\n<font size = 4>During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n\n<font size = 4>Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased.\n\n\n", "_____no_output_____" ] ], [ [ "#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n\nlossDataFromCSV = []\nvallossDataFromCSV = []\n\nwith open(QC_model_path+'/'+QC_model_name+'/Quality Control/training_evaluation.csv','r') as csvfile:\n csvRead = csv.reader(csvfile, delimiter=',')\n next(csvRead)\n for row in csvRead:\n lossDataFromCSV.append(float(row[0]))\n vallossDataFromCSV.append(float(row[1]))\n\nepochNumber = range(len(lossDataFromCSV))\nplt.figure(figsize=(15,10))\n\nplt.subplot(2,1,1)\nplt.plot(epochNumber,lossDataFromCSV, label='Training loss')\nplt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\nplt.title('Training loss and validation loss vs. epoch number (linear scale)')\nplt.ylabel('Loss')\nplt.xlabel('Epoch number')\nplt.legend()\n\nplt.subplot(2,1,2)\nplt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\nplt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\nplt.title('Training loss and validation loss vs. epoch number (log scale)')\nplt.ylabel('Loss')\nplt.xlabel('Epoch number')\nplt.legend()\nplt.savefig(QC_model_path+'/'+QC_model_name+'/Quality Control/lossCurvePlots.png',bbox_inches='tight',pad_inches=0)\nplt.show()\n\n", "_____no_output_____" ] ], [ [ "## **5.2. Error mapping and quality metrics estimation**\n---\n<font size = 4>This section will calculate the Intersection over Union score for all the images provided in the Source_QC_folder and Target_QC_folder ! The result for one of the image will also be displayed.\n\n<font size = 4>The **Intersection over Union** (IuO) metric is a method that can be used to quantify the percent overlap between the target mask and your prediction output. **Therefore, the closer to 1, the better the performance.** This metric can be used to assess the quality of your model to accurately predict nuclei. \n\n<font size = 4>Here, the IuO is both calculated over the whole image and on a per-object basis. The value displayed below is the IuO value calculated over the entire image. The IuO value calculated on a per-object basis is used to calculate the other metrics displayed.\n\n<font size = 4>“n_true” refers to the number of objects present in the ground truth image. “n_pred” refers to the number of objects present in the predicted image. \n\n<font size = 4>When a segmented object has an IuO value above 0.5 (compared to the corresponding ground truth), it is then considered a true positive. The number of “**true positives**” is available in the table below. The number of “false positive” is then defined as “**false positive**” = “n_pred” - “true positive”. The number of “false negative” is defined as “false negative” = “n_true” - “true positive”.\n\n<font size = 4>The mean_matched_score is the mean IoUs of matched true positives. The mean_true_score is the mean IoUs of matched true positives but normalized by the total number of ground truth objects. The panoptic_quality is calculated as described by [Kirillov et al. 2019](https://arxiv.org/abs/1801.00868).\n\n<font size = 4>For more information about the other metric displayed, please consult the SI of the paper describing ZeroCostDL4Mic.\n\n<font size = 4> The results can be found in the \"*Quality Control*\" folder which is located inside your \"model_folder\".", "_____no_output_____" ] ], [ [ "#@markdown ##Choose the folders that contain your Quality Control dataset\n\nfrom stardist.matching import matching\nfrom stardist.plot import render_label, render_label_pred \n\nSource_QC_folder = \"\" #@param{type:\"string\"}\nTarget_QC_folder = \"\" #@param{type:\"string\"}\n\n\n#Create a quality control Folder and check if the folder already exist\nif os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control\") == False:\n os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control\")\n\nif os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\"):\n shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n\nos.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n\n\n# Generate predictions from the Source_QC_folder and save them in the QC folder\n\nSource_QC_folder_tif = Source_QC_folder+\"/*.tif\"\n\nnp.random.seed(16)\nlbl_cmap = random_label_cmap()\nZ = sorted(glob(Source_QC_folder_tif))\nZ = list(map(imread,Z))\nn_channel = 1 if Z[0].ndim == 2 else Z[0].shape[-1]\naxis_norm = (0,1) # normalize channels independently\n\nprint('Number of test dataset found in the folder: '+str(len(Z)))\n \n # axis_norm = (0,1,2) # normalize channels jointly\nif n_channel > 1:\n print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n\nmodel = StarDist2D(None, name=QC_model_name, basedir=QC_model_path)\n\nnames = [os.path.basename(f) for f in sorted(glob(Source_QC_folder_tif))]\n\n \n# modify the names to suitable form: path_images/image_numberX.tif\n \nlenght_of_Z = len(Z)\n \nfor i in range(lenght_of_Z):\n img = normalize(Z[i], 1,99.8, axis=axis_norm)\n labels, polygons = model.predict_instances(img)\n os.chdir(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n imsave(names[i], labels, polygons)\n\n\n\n# Here we start testing the differences between GT and predicted masks\n\n\nwith open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Quality_Control for \"+QC_model_name+\".csv\", \"w\", newline='') as file:\n writer = csv.writer(file)\n writer.writerow([\"image\",\"Prediction v. GT Intersection over Union\", \"false positive\", \"true positive\", \"false negative\", \"precision\", \"recall\", \"accuracy\", \"f1 score\", \"n_true\", \"n_pred\", \"mean_true_score\", \"mean_matched_score\", \"panoptic_quality\"]) \n\n# define the images\n\n for n in os.listdir(Source_QC_folder):\n \n if not os.path.isdir(os.path.join(Source_QC_folder,n)):\n print('Running QC on: '+n)\n test_input = io.imread(os.path.join(Source_QC_folder,n))\n test_prediction = io.imread(os.path.join(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\",n))\n test_ground_truth_image = io.imread(os.path.join(Target_QC_folder, n))\n\n # Calculate the matching (with IoU threshold `thresh`) and all metrics\n\n stats = matching(test_prediction, test_ground_truth_image, thresh=0.5)\n \n\n\n #Convert pixel values to 0 or 255\n test_prediction_0_to_255 = test_prediction\n test_prediction_0_to_255[test_prediction_0_to_255>0] = 255\n\n #Convert pixel values to 0 or 255\n test_ground_truth_0_to_255 = test_ground_truth_image\n test_ground_truth_0_to_255[test_ground_truth_0_to_255>0] = 255\n\n\n # Intersection over Union metric\n\n intersection = np.logical_and(test_ground_truth_0_to_255, test_prediction_0_to_255)\n union = np.logical_or(test_ground_truth_0_to_255, test_prediction_0_to_255)\n iou_score = np.sum(intersection) / np.sum(union)\n writer.writerow([n, str(iou_score), str(stats.fp), str(stats.tp), str(stats.fn), str(stats.precision), str(stats.recall), str(stats.accuracy), str(stats.f1), str(stats.n_true), str(stats.n_pred), str(stats.mean_true_score), str(stats.mean_matched_score), str(stats.panoptic_quality)])\n\n\nfrom prettytable import from_csv\n\nwith open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Quality_Control for \"+QC_model_name+\".csv\", \"r\") as file:\n x = from_csv(file)\n \nprint(x)\n\n\nfrom astropy.visualization import simple_norm\n\n# ------------- For display ------------\nprint('--------------------------------------------------------------')\n@interact\ndef show_QC_results(file = os.listdir(Source_QC_folder)):\n \n\n plt.figure(figsize=(25,5))\n source_image = io.imread(os.path.join(Source_QC_folder, file), as_gray = True)\n target_image = io.imread(os.path.join(Target_QC_folder, file), as_gray = True)\n prediction = io.imread(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction/\"+file, as_gray = True)\n\n stats = matching(prediction, target_image, thresh=0.5)\n\n target_image_mask = np.empty_like(target_image)\n target_image_mask[target_image > 0] = 255\n target_image_mask[target_image == 0] = 0\n \n prediction_mask = np.empty_like(prediction)\n prediction_mask[prediction > 0] = 255\n prediction_mask[prediction == 0] = 0\n\n intersection = np.logical_and(target_image_mask, prediction_mask)\n union = np.logical_or(target_image_mask, prediction_mask)\n iou_score = np.sum(intersection) / np.sum(union)\n\n norm = simple_norm(source_image, percent = 99)\n\n \n #Input\n plt.subplot(1,4,1)\n plt.axis('off')\n plt.imshow(source_image, aspect='equal', norm=norm, cmap='magma', interpolation='nearest')\n plt.title('Input')\n\n #Ground-truth\n plt.subplot(1,4,2)\n plt.axis('off')\n plt.imshow(target_image_mask, aspect='equal', cmap='Greens')\n plt.title('Ground Truth')\n\n #Prediction\n plt.subplot(1,4,3)\n plt.axis('off')\n plt.imshow(prediction_mask, aspect='equal', cmap='Purples')\n plt.title('Prediction')\n\n #Overlay\n plt.subplot(1,4,4)\n plt.axis('off')\n plt.imshow(target_image_mask, cmap='Greens')\n plt.imshow(prediction_mask, alpha=0.5, cmap='Purples')\n plt.title('Ground Truth and Prediction, Intersection over Union:'+str(round(iou_score,3 )));\n plt.savefig(full_QC_model_path+'/Quality Control/QC_example_data.png',bbox_inches='tight',pad_inches=0)\n\n\n# # Display the last image\n# f = plt.figure(figsize=(25,25))\n\n# from astropy.visualization import simple_norm\n# norm = simple_norm(test_input, percent = 99)\n\n# #Input\n# plt.subplot(1,4,1)\n# plt.axis('off')\n# plt.imshow(test_input, aspect='equal', norm=norm, cmap='magma', interpolation='nearest')\n# plt.title('Input')\n\n\n# #Ground-truth\n# plt.subplot(1,4,2)\n# plt.axis('off')\n# plt.imshow(test_ground_truth_0_to_255, aspect='equal', cmap='Greens')\n# plt.title('Ground Truth')\n\n# #Prediction\n# plt.subplot(1,4,3)\n# plt.axis('off')\n# plt.imshow(test_prediction_0_to_255, aspect='equal', cmap='Purples')\n# plt.title('Prediction')\n\n# #Overlay\n# plt.subplot(1,4,4)\n# plt.axis('off')\n# plt.imshow(test_ground_truth_0_to_255, cmap='Greens')\n# plt.imshow(test_prediction_0_to_255, alpha=0.5, cmap='Purples')\n# plt.title('Ground Truth and Prediction, Intersection over Union:'+str(round(iou_score,3)));\n#Make a pdf summary of the QC results\n\nfrom datetime import datetime\n\nclass MyFPDF(FPDF, HTMLMixin):\n pass\n\npdf = MyFPDF()\npdf.add_page()\npdf.set_right_margin(-1)\npdf.set_font(\"Arial\", size = 11, style='B') \n\nNetwork = 'Stardist 2D'\n\nday = datetime.now()\ndatetime_str = str(day)[0:10]\n\nHeader = 'Quality Control report for '+Network+' model ('+QC_model_name+')\\nDate: '+datetime_str\npdf.multi_cell(180, 5, txt = Header, align = 'L') \n\nall_packages = ''\nfor requirement in freeze(local_only=True):\n all_packages = all_packages+requirement+', '\n\npdf.set_font('')\npdf.set_font('Arial', size = 11, style = 'B')\npdf.ln(2)\npdf.cell(190, 5, txt = 'Development of Training Losses', ln=1, align='L')\npdf.ln(1)\nexp_size = io.imread(full_QC_model_path+'/Quality Control/lossCurvePlots.png').shape\nif os.path.exists(full_QC_model_path+'/Quality Control/lossCurvePlots.png'):\n pdf.image(full_QC_model_path+'/Quality Control/lossCurvePlots.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\nelse:\n pdf.set_font('')\n pdf.set_font('Arial', size=10)\n pdf.multi_cell(190, 5, txt='If you would like to see the evolution of the loss function during training please play the first cell of the QC section in the notebook.')\npdf.ln(2)\npdf.set_font('')\npdf.set_font('Arial', size = 10, style = 'B')\npdf.ln(3)\npdf.cell(80, 5, txt = 'Example Quality Control Visualisation', ln=1)\npdf.ln(1)\nexp_size = io.imread(full_QC_model_path+'/Quality Control/QC_example_data.png').shape\npdf.image(full_QC_model_path+'/Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/10), h = round(exp_size[0]/10))\npdf.ln(1)\npdf.set_font('')\npdf.set_font('Arial', size = 11, style = 'B')\npdf.ln(1)\npdf.cell(180, 5, txt = 'Quality Control Metrics', align='L', ln=1)\npdf.set_font('')\npdf.set_font_size(10.)\n\npdf.ln(1)\nhtml = \"\"\"\n<body>\n<font size=\"7\" face=\"Courier New\" >\n<table width=100% style=\"margin-left:0px;\">\"\"\"\nwith open(full_QC_model_path+'/Quality Control/Quality_Control for '+QC_model_name+'.csv', 'r') as csvfile:\n metrics = csv.reader(csvfile)\n header = next(metrics)\n #image = header[0]\n #PvGT_IoU = header[1]\n fp = header[2]\n tp = header[3]\n fn = header[4]\n precision = header[5]\n recall = header[6]\n acc = header[7]\n f1 = header[8]\n n_true = header[9]\n n_pred = header[10]\n mean_true = header[11]\n mean_matched = header[12]\n panoptic = header[13]\n header = \"\"\"\n <tr>\n <th width = 5% align=\"center\">{0}</th>\n <th width = 12% align=\"center\">{1}</th>\n <th width = 6% align=\"center\">{2}</th>\n <th width = 6% align=\"center\">{3}</th>\n <th width = 6% align=\"center\">{4}</th>\n <th width = 5% align=\"center\">{5}</th>\n <th width = 5% align=\"center\">{6}</th>\n <th width = 5% align=\"center\">{7}</th>\n <th width = 5% align=\"center\">{8}</th>\n <th width = 5% align=\"center\">{9}</th>\n <th width = 5% align=\"center\">{10}</th>\n <th width = 10% align=\"center\">{11}</th>\n <th width = 11% align=\"center\">{12}</th>\n <th width = 11% align=\"center\">{13}</th>\n </tr>\"\"\".format(\"image #\",\"Prediction v. GT IoU\",'false pos.','true pos.','false neg.',precision,recall,acc,f1,n_true,n_pred,mean_true,mean_matched,panoptic)\n html = html+header\n i=0\n for row in metrics:\n i+=1\n #image = row[0]\n PvGT_IoU = row[1]\n fp = row[2]\n tp = row[3]\n fn = row[4]\n precision = row[5]\n recall = row[6]\n acc = row[7]\n f1 = row[8]\n n_true = row[9]\n n_pred = row[10]\n mean_true = row[11]\n mean_matched = row[12]\n panoptic = row[13]\n cells = \"\"\"\n <tr>\n <td width = 5% align=\"center\">{0}</td>\n <td width = 12% align=\"center\">{1}</td>\n <td width = 6% align=\"center\">{2}</td>\n <td width = 6% align=\"center\">{3}</td>\n <td width = 6% align=\"center\">{4}</td>\n <td width = 5% align=\"center\">{5}</td>\n <td width = 5% align=\"center\">{6}</td>\n <td width = 5% align=\"center\">{7}</td>\n <td width = 5% align=\"center\">{8}</td>\n <td width = 5% align=\"center\">{9}</td>\n <td width = 5% align=\"center\">{10}</td>\n <td width = 10% align=\"center\">{11}</td>\n <td width = 11% align=\"center\">{12}</td>\n <td width = 11% align=\"center\">{13}</td>\n </tr>\"\"\".format(str(i),str(round(float(PvGT_IoU),3)),fp,tp,fn,str(round(float(precision),3)),str(round(float(recall),3)),str(round(float(acc),3)),str(round(float(f1),3)),n_true,n_pred,str(round(float(mean_true),3)),str(round(float(mean_matched),3)),str(round(float(panoptic),3)))\n html = html+cells\n html = html+\"\"\"</body></table>\"\"\"\n \npdf.write_html(html)\n\npdf.ln(1)\npdf.set_font('')\npdf.set_font_size(10.)\nref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy.\" BioRxiv (2020).'\npdf.multi_cell(190, 5, txt = ref_1, align='L')\nref_2 = '- StarDist 2D: Schmidt, Uwe, et al. \"Cell detection with star-convex polygons.\" International Conference on Medical Image Computing and Computer-Assisted Intervention. Springer, Cham, 2018.'\npdf.multi_cell(190, 5, txt = ref_2, align='L')\n\npdf.ln(3)\nreminder = 'To find the parameters and other information about how this model was trained, go to the training_report.pdf of this model which should be in the folder of the same name.'\n\npdf.set_font('Arial', size = 11, style='B')\npdf.multi_cell(190, 5, txt=reminder, align='C')\n\npdf.output(full_QC_model_path+'/Quality Control/'+QC_model_name+'_QC_report.pdf')\n", "_____no_output_____" ] ], [ [ "# **6. Using the trained model**\n---", "_____no_output_____" ], [ "\n\n## **6.1 Generate prediction(s) from unseen dataset**\n---\n\n<font size = 4>In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive.\n\n---\n\n<font size = 4>The current trained model (from section 4.3) can now be used to process images. If an older model needs to be used, please untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Prediction_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n\n<font size = 4>**`Data_folder`:** This folder should contains the images that you want to predict using the network that you will train.\n\n<font size = 4>**`Result_folder`:** This folder will contain the predicted output ROI.\n\n<font size = 4>**`Data_type`:** Please indicate if the images you want to predict are single images or stacks\n\n\n<font size = 4>In stardist the following results can be exported:\n- Region of interest (ROI) that can be opened in ImageJ / Fiji. The ROI are saved inside of a .zip file in your choosen result folder. To open the ROI in Fiji, just drag and drop the zip file !**\n- The predicted mask images\n- A tracking file that can easily be imported into Trackmate to track the nuclei (Stacks only).\n- A CSV file that contains the number of nuclei detected per image (single image only). \n- A CSV file that contains the coordinate the centre of each detected nuclei (single image only). \n\n", "_____no_output_____" ] ], [ [ "Single_Images = 1\nStacks = 2\n\n#@markdown ### Provide the path to your dataset and to the folder where the prediction will be saved (Result folder), then play the cell to predict output on your unseen images.\n\nData_folder = \"\" #@param {type:\"string\"}\nResults_folder = \"\" #@param {type:\"string\"}\n\n#@markdown ###Are your data single images or stacks?\n\nData_type = Single_Images #@param [\"Single_Images\", \"Stacks\"] {type:\"raw\"}\n\n#@markdown ###What outputs would you like to generate?\nRegion_of_interests = True #@param {type:\"boolean\"}\nMask_images = True #@param {type:\"boolean\"}\nTracking_file = False #@param {type:\"boolean\"}\n\n\n# model name and path\n#@markdown ###Do you want to use the current trained model?\nUse_the_current_trained_model = True #@param {type:\"boolean\"}\n\n#@markdown ###If not, please provide the path to the model folder:\n\nPrediction_model_folder = \"\" #@param {type:\"string\"}\n\n#Here we find the loaded model name and parent path\nPrediction_model_name = os.path.basename(Prediction_model_folder)\nPrediction_model_path = os.path.dirname(Prediction_model_folder)\n\nif (Use_the_current_trained_model): \n print(\"Using current trained network\")\n Prediction_model_name = model_name\n Prediction_model_path = model_path\n\nfull_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\nif os.path.exists(full_Prediction_model_path):\n print(\"The \"+Prediction_model_name+\" network will be used.\")\nelse:\n print(bcolors.WARNING+'!! WARNING: The chosen model does not exist !!'+W)\n print('Please make sure you provide a valid model path and model name before proceeding further.')\n\n#single images\nData_folder = Data_folder+\"/*.tif\"\n\nif Data_type == 1 :\n print(\"Single images are now beeing predicted\")\n np.random.seed(16)\n lbl_cmap = random_label_cmap()\n X = sorted(glob(Data_folder))\n X = list(map(imread,X))\n n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]\n axis_norm = (0,1) # normalize channels independently\n \n # axis_norm = (0,1,2) # normalize channels jointly\n if n_channel > 1:\n print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n model = StarDist2D(None, name = Prediction_model_name, basedir = Prediction_model_path)\n \n names = [os.path.basename(f) for f in sorted(glob(Data_folder))]\n \n Nuclei_number = []\n\n # modify the names to suitable form: path_images/image_numberX.tif\n FILEnames = []\n for m in names:\n m = Results_folder+'/'+m\n FILEnames.append(m)\n\n # Create a list of name with no extension\n \n name_no_extension=[]\n for n in names:\n name_no_extension.append(os.path.splitext(n)[0])\n \n\n # Save all ROIs and masks into results folder\n \n for i in range(len(X)):\n img = normalize(X[i], 1,99.8, axis = axis_norm)\n labels, polygons = model.predict_instances(img)\n \n os.chdir(Results_folder)\n\n if Mask_images:\n imsave(FILEnames[i], labels, polygons)\n\n if Region_of_interests:\n export_imagej_rois(name_no_extension[i], polygons['coord'])\n\n if Tracking_file:\n print(bcolors.WARNING+\"Tracking files are only generated when stacks are predicted\"+W) \n \n Nuclei_centre_coordinate = polygons['points']\n my_df2 = pd.DataFrame(Nuclei_centre_coordinate)\n my_df2.columns =['Y', 'X']\n \n my_df2.to_csv(Results_folder+'/'+name_no_extension[i]+'_Nuclei_centre.csv', index=False, header=True)\n\n\n Nuclei_array = polygons['coord']\n Nuclei_array2 = [names[i], Nuclei_array.shape[0]]\n Nuclei_number.append(Nuclei_array2) \n\n my_df = pd.DataFrame(Nuclei_number)\n my_df.to_csv(Results_folder+'/Nuclei_count.csv', index=False, header=False)\n \n\n # One example is displayed\n\n print(\"One example image is displayed bellow:\")\n plt.figure(figsize=(10,10))\n plt.imshow(img if img.ndim==2 else img[...,:3], clim=(0,1), cmap='gray')\n plt.imshow(labels, cmap=lbl_cmap, alpha=0.5)\n plt.axis('off');\n plt.savefig(name_no_extension[i]+\"_overlay.tif\")\n\nif Data_type == 2 :\n print(\"Stacks are now beeing predicted\")\n np.random.seed(42)\n lbl_cmap = random_label_cmap()\n Y = sorted(glob(Data_folder))\n X = list(map(imread,Y))\n n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]\n axis_norm = (0,1) # normalize channels independently\n # axis_norm = (0,1,2) # normalize channels jointly\n if n_channel > 1:\n print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n #Load a pretrained network\n model = StarDist2D(None, name = Prediction_model_name, basedir = Prediction_model_path)\n \n names = [os.path.basename(f) for f in sorted(glob(Data_folder))]\n\n # Create a list of name with no extension\n \n name_no_extension = []\n for n in names:\n name_no_extension.append(os.path.splitext(n)[0])\n\n outputdir = Path(Results_folder)\n\n# Save all ROIs and images in Results folder.\n for num, i in enumerate(X):\n print(\"Performing prediction on: \"+names[num])\n\n \n timelapse = np.stack(i)\n timelapse = normalize(timelapse, 1,99.8, axis=(0,)+tuple(1+np.array(axis_norm)))\n timelapse.shape\n\n if Region_of_interests: \n polygons = [model.predict_instances(frame)[1]['coord'] for frame in tqdm(timelapse)] \n export_imagej_rois(os.path.join(outputdir, name_no_extension[num]), polygons) \n \n n_timepoint = timelapse.shape[0]\n prediction_stack = np.zeros((n_timepoint, timelapse.shape[1], timelapse.shape[2]))\n Tracking_stack = np.zeros((n_timepoint, timelapse.shape[2], timelapse.shape[1]))\n\n# Save the masks in the result folder\n if Mask_images or Tracking_file:\n for t in range(n_timepoint):\n img_t = timelapse[t]\n labels, polygons = model.predict_instances(img_t) \n prediction_stack[t] = labels\n\n# Create a tracking file for trackmate\n\n for point in polygons['points']:\n cv2.circle(Tracking_stack[t],tuple(point),0,(1), -1)\n\n prediction_stack_32 = img_as_float32(prediction_stack, force_copy=False)\n Tracking_stack_32 = img_as_float32(Tracking_stack, force_copy=False)\n Tracking_stack_8 = img_as_ubyte(Tracking_stack_32, force_copy=True)\n \n Tracking_stack_8_rot = np.rot90(Tracking_stack_8, axes=(1,2))\n Tracking_stack_8_rot_flip = np.fliplr(Tracking_stack_8_rot)\n\n os.chdir(Results_folder)\n if Mask_images:\n imsave(names[num], prediction_stack_32)\n if Tracking_file:\n imsave(name_no_extension[num]+\"_tracking_file.tif\", Tracking_stack_8_rot_flip)\n\n \n\nprint(\"Predictions completed\") ", "_____no_output_____" ] ], [ [ "## **6.2. Download your predictions**\n---\n\n<font size = 4>**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name.", "_____no_output_____" ], [ "\n#**Thank you for using StarDist 2D!**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec56b8bafb891f7402442e9d7ab1f27fa73e828e
5,709
ipynb
Jupyter Notebook
optuna_catboost_cv.ipynb
dnoci001/Kaggle_Volcano_Prediction
72616a2ab346c39eabfb0fdc77ae9e973375293f
[ "MIT" ]
null
null
null
optuna_catboost_cv.ipynb
dnoci001/Kaggle_Volcano_Prediction
72616a2ab346c39eabfb0fdc77ae9e973375293f
[ "MIT" ]
null
null
null
optuna_catboost_cv.ipynb
dnoci001/Kaggle_Volcano_Prediction
72616a2ab346c39eabfb0fdc77ae9e973375293f
[ "MIT" ]
null
null
null
33.19186
161
0.553862
[ [ [ "# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nfrom time import time\nfrom time import ctime\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom tqdm import tqdm_notebook\nfrom tqdm import tqdm\n\nimport joblib\nfrom joblib import Parallel, delayed\nimport multiprocessing\nnum_cores = multiprocessing.cpu_count()-1\n\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import r2_score\nfrom catboost import CatBoostRegressor, Pool\nfrom sklearn.model_selection import KFold\nimport matplotlib.pyplot as plt\nimport optuna\n# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session", "_____no_output_____" ], [ "train = pd.read_csv('/home/dominique/Projects/predict_volcanic_eruptions/tfresh/train.csv', sep = ';')\ntrain.set_index('Unnamed: 0', inplace = True)\ntest = pd.read_csv('/home/dominique/Projects/predict_volcanic_eruptions/tfresh/test.csv', sep = ';')\ntest.set_index('Unnamed: 0', inplace = True)", "_____no_output_____" ], [ "train.fillna(0, inplace=True)\ntest.fillna(0, inplace=True)", "_____no_output_____" ], [ "def objective(trial):\n\n Y = train['time_to_eruption']\n X = train.drop(['time_to_eruption'], axis = 1)\n X_test = test\n\n n_fold = 3\n cv = KFold(n_splits=n_fold, shuffle=True, random_state=42)\n\n oof = np.zeros(len(X))\n cat_prediction = np.zeros(len(X_test))\n mae, r2 = [], []\n\n PARAMS = {\n 'random_seed': 42,\n 'eval_metric': 'MAE', #Also used as the eval metric for competition\n 'iterations': 100,\n 'eta': trial.suggest_float('eta',0.03,0.1),\n 'subsample': trial.suggest_float('subsample',0.7,1.0),\n 'l2_leaf_reg' : trial.suggest_float(\"lambda_l2\", 1e-3, 10.0, log=True),\n }\n\n for fold_n, (train_index, valid_index) in enumerate(cv.split(X)):\n\n X_train = X.iloc[train_index,:]\n X_valid = X.iloc[valid_index,:]\n\n Y_train = Y.iloc[train_index]\n Y_valid = Y.iloc[valid_index]\n\n best_model = CatBoostRegressor(**PARAMS, thread_count = -1) \n\n train_dataset = Pool(data=X_train,\n label=Y_train,\n )\n\n eval_dataset = Pool(data=X_valid,\n label=Y_valid,\n )\n\n best_model.fit(train_dataset,\n use_best_model=True,\n verbose = False,\n eval_set=eval_dataset)\n\n\n y_pred = best_model.predict(Pool(data=X_valid))\n\n mae.append(mean_absolute_error(Y_valid, y_pred))\n r2.append(r2_score(Y_valid, y_pred))\n\n cat_prediction += best_model.predict(Pool(data=X_test))\n\n cat_prediction /= n_fold\n\n \n submission = pd.DataFrame()\n submission['segment_id'] = test.index\n submission['time_to_eruption'] = cat_prediction\n filename = 'submission' + str(trial.number) + '.csv'\n submission.to_csv(filename, header=True, index=False)\n\n return np.mean(mae)", "_____no_output_____" ], [ "study_name = 'catboost_study' \nstudy = optuna.create_study(direction=\"minimize\",load_if_exists = True,study_name=study_name, storage='sqlite:///catboost_study.db')\nstudy.optimize(objective, n_trials=1000)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ec56ba9165dfe8392695ff635dea6c951c506766
111,632
ipynb
Jupyter Notebook
notebooks/02_train_model.ipynb
marwan290/Arabic-Dialect
1e5f3499b02a410fb42a3ad0acdb16829f3dddc4
[ "MIT" ]
null
null
null
notebooks/02_train_model.ipynb
marwan290/Arabic-Dialect
1e5f3499b02a410fb42a3ad0acdb16829f3dddc4
[ "MIT" ]
null
null
null
notebooks/02_train_model.ipynb
marwan290/Arabic-Dialect
1e5f3499b02a410fb42a3ad0acdb16829f3dddc4
[ "MIT" ]
null
null
null
239.553648
86,896
0.910993
[ [ [ "PATH = r'..\\data\\processed\\02_preprocced.csv'\nMODEL_PATH = r'..\\models\\mnb.pkl'\nBOW_PATH = r'..\\models\\BOW.pkl'", "_____no_output_____" ], [ "!pip install xgboost\nfrom xgboost import XGBClassifier\n", "Requirement already satisfied: xgboost in c:\\users\\oem\\anaconda3\\lib\\site-packages (1.5.2)" ], [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pickle\n\n\n\n# from keras.utils import plot_model\n\nimport sklearn\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import BernoulliNB,MultinomialNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC\n\nfrom imblearn.over_sampling import SMOTE\n\n", "_____no_output_____" ], [ "df = pd.read_csv(PATH)", "_____no_output_____" ], [ "plt.figure(figsize=(12,6))\ng = sns.countplot(data=df, x='dialect')\n\nplt.show(g)", "_____no_output_____" ] ], [ [ "This graph show Imblanced data", "_____no_output_____" ] ], [ [ "bow = CountVectorizer(min_df=6)\nX = bow.fit_transform(df['tweet_text'])", "_____no_output_____" ], [ "X.shape", "_____no_output_____" ], [ "X_train,X_test,y_train,y_test = train_test_split(X,df['dialect'],test_size=0.03,shuffle = True)", "_____no_output_____" ], [ "\nmnb = MultinomialNB()\nparameters={'alpha': (0.1, 0.2,0.15,0.19,0.13,0.19,0.21,.25)}\ngrid_search= GridSearchCV(mnb,parameters )", "_____no_output_____" ], [ "grid_search.fit(X_under,y_under) # search to find best paramter\ngrid_search.best_params_", "_____no_output_____" ], [ "X_under, y_under = SMOTE(random_state=0).fit_resample(X_train,y_train)", "_____no_output_____" ], [ "mnb = MultinomialNB()\nmnb.fit(X_train,y_train)", "_____no_output_____" ], [ "y_pred = mnb.predict(X_test)\nprint(sklearn.metrics.classification_report(y_test,y_pred))", " precision recall f1-score support\n\n AE 0.44 0.42 0.43 788\n BH 0.38 0.38 0.38 809\n DZ 0.67 0.54 0.60 495\n EG 0.67 0.89 0.76 1682\n IQ 0.76 0.54 0.63 475\n JO 0.45 0.28 0.35 892\n KW 0.46 0.58 0.51 1254\n LB 0.64 0.68 0.66 788\n LY 0.67 0.72 0.70 1106\n MA 0.83 0.61 0.71 373\n OM 0.47 0.31 0.37 535\n PL 0.46 0.62 0.53 1333\n QA 0.43 0.52 0.47 897\n SA 0.43 0.45 0.44 802\n SD 0.83 0.56 0.67 432\n SY 0.58 0.26 0.36 503\n TN 0.79 0.39 0.52 297\n YE 0.52 0.11 0.18 285\n\n accuracy 0.55 13746\n macro avg 0.58 0.49 0.52 13746\nweighted avg 0.56 0.55 0.54 13746\n\n" ], [ "labels = np.unique(y_test)\ncf_matrix = sklearn.metrics.confusion_matrix(y_test,y_pred)\nfig, ax = plt.subplots(figsize=(10,10))\nsns.heatmap(cf_matrix, linewidths=1, annot=True, ax=ax, fmt='g',xticklabels=labels, yticklabels=labels,cbar=False,annot_kws={\"fontsize\":10})", "_____no_output_____" ], [ "lnsvm = LinearSVC()\nlnsvm.fit(X_train,y_train)", "C:\\Users\\oem\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:985: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n warnings.warn(\"Liblinear failed to converge, increase \"\n" ], [ "y_pred = lnsvm.predict(X_test)\nprint(sklearn.metrics.classification_report(y_test,y_pred))", " precision recall f1-score support\n\n AE 0.39 0.39 0.39 773\n BH 0.35 0.34 0.35 746\n DZ 0.53 0.54 0.53 484\n EG 0.70 0.79 0.74 1743\n IQ 0.52 0.48 0.50 456\n JO 0.42 0.36 0.38 867\n KW 0.49 0.52 0.50 1276\n LB 0.60 0.66 0.63 832\n LY 0.64 0.66 0.65 1136\n MA 0.59 0.57 0.58 350\n OM 0.33 0.31 0.32 547\n PL 0.46 0.49 0.48 1287\n QA 0.45 0.43 0.44 925\n SA 0.40 0.39 0.40 806\n SD 0.59 0.55 0.57 406\n SY 0.40 0.32 0.35 530\n TN 0.54 0.46 0.50 283\n YE 0.24 0.22 0.23 299\n\n accuracy 0.51 13746\n macro avg 0.48 0.47 0.47 13746\nweighted avg 0.50 0.51 0.50 13746\n\n" ], [ "pickle.dump(mnb, open(MODEL_PATH, 'wb'))\npickle.dump(bow, open(BOW_PATH, 'wb'))", "_____no_output_____" ], [ "test = bow.transform(['lv,h','جدا','احبك'])", "_____no_output_____" ], [ "test.shape", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec56bd399feef9a3610539945b6de3520008be5a
66,363
ipynb
Jupyter Notebook
bilibili/.ipynb_checkpoints/7.bilibili_LIghtGBM-checkpoint.ipynb
RubyRose-TAT/bilibili_Data_analysis-Clout_prediction
470aa324330e831b629491c1f504707d2e8b2998
[ "MIT" ]
1
2022-01-03T05:18:07.000Z
2022-01-03T05:18:07.000Z
bilibili/.ipynb_checkpoints/7.bilibili_LIghtGBM-checkpoint.ipynb
RubyRose-TAT/bilibili_Data_analysis-Clout_prediction
470aa324330e831b629491c1f504707d2e8b2998
[ "MIT" ]
null
null
null
bilibili/.ipynb_checkpoints/7.bilibili_LIghtGBM-checkpoint.ipynb
RubyRose-TAT/bilibili_Data_analysis-Clout_prediction
470aa324330e831b629491c1f504707d2e8b2998
[ "MIT" ]
null
null
null
53.175481
32,308
0.660413
[ [ [ "import lightgbm\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nplt.style.use({'figure.figsize':(15,10)})\n\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus']=False", "_____no_output_____" ], [ "df = pd.read_csv('data/bilibili_rank100_data.csv')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "#按标题去重\ndf = df.drop_duplicates(subset=['标题'],keep='first',inplace=False)", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "#缺失值处理\ndf = df.drop(df[df['时间']>1000].index)\ndf[df.isnull().values==True]", "_____no_output_____" ], [ "#重置索引\ndf=df.reset_index(drop=True, inplace=False)", "_____no_output_____" ], [ "#分区按序号编码\ndef LabelEncoding(df):\n x, dfc = '分区', df\n key = dfc[x].unique() # 将唯一值作为关键字\n value = [i for i in range(len(key))] # 键值\n Dict = dict(zip(key, value)) # 字典,即键值对\n for i in range(len(key)):\n for j in range(dfc.shape[0]):\n if key[i] == dfc[x][j]:\n dfc[x][j] = Dict[key[i]]\n dfc[x] = dfc[x].astype(np.int64)\n return dfc\n\ndf = LabelEncoding(df)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1614 entries, 0 to 1613\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 作者 1614 non-null object \n 1 粉丝 1614 non-null int64 \n 2 标题 1614 non-null object \n 3 点赞 1614 non-null int64 \n 4 硬币 1614 non-null int64 \n 5 收藏 1614 non-null int64 \n 6 分享 1614 non-null int64 \n 7 弹幕 1614 non-null int64 \n 8 播放 1614 non-null int64 \n 9 评论 1614 non-null int64 \n 10 上传时间 1614 non-null object \n 11 爬取时间 1614 non-null object \n 12 标签 1613 non-null object \n 13 分区 1614 non-null int64 \n 14 时间 1614 non-null float64\ndtypes: float64(1), int64(9), object(5)\nmemory usage: 189.3+ KB\n" ], [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "df.drop(df.columns[[0,2,10,11,12]],axis=1,inplace=True)\ndf", "_____no_output_____" ], [ "X = df.drop([\"播放\"],axis = 1)\ny = df[\"播放\"]\nX_train,X_test,y_train,y_test=train_test_split(X,y,random_state=99)", "_____no_output_____" ], [ "X_train.shape, X_test.shape", "_____no_output_____" ], [ "from lightgbm import LGBMRegressor\nfrom sklearn.model_selection import GridSearchCV", "_____no_output_____" ], [ "gbm = LGBMRegressor()\n\nparam_grid = {\n 'n_estimators':[10,50,100,200,500,1000],\n 'max_depth':[3,5,7,9],\n}\n\ngrid = GridSearchCV(gbm, param_grid=param_grid, cv=10)\n\ngrid.fit(X_train, y_train)", "_____no_output_____" ], [ "# 查看最佳分数和最佳参数\ngrid.best_score_", "_____no_output_____" ], [ "grid.best_params_", "_____no_output_____" ], [ "# 获取最佳模型\ngrid.best_estimator_", "_____no_output_____" ], [ "# 利用最佳模型来进行预测\ngbm=grid.best_estimator_\npred = gbm.predict(X_test)", "_____no_output_____" ], [ "result = {\"labels\":y_test,\"prediction\":pred}\nresult = pd.DataFrame(result)\nresult.head()", "_____no_output_____" ], [ "result['labels'].plot(style='k.',figsize=(15,5))\nresult['prediction'].plot(style='r.')\n#设置图例文字大小和图示大小\nplt.legend(fontsize=15,markerscale=3)\n#设置坐标文字大小\nplt.tick_params(labelsize=25)\n#生成刻度线网格\nplt.grid()", "_____no_output_____" ], [ "from sklearn import metrics\nMSE = metrics.mean_squared_error(y_test,pred)\nRMSE = np.sqrt(MSE)\nprint('(MSE,RMSE)=',(MSE,RMSE))", "(MSE,RMSE)= (475086539934.499, 689265.2174123536)\n" ], [ "print(\"模型评分: {:.2f}\".format(gbm.score(X_test, y_test)))", "模型评分: 0.72\n" ], [ "from sklearn.metrics import r2_score, auc\nr2_score(y_test,pred)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec56eda0b03383377db1fb8983d11f628d65cd09
132,189
ipynb
Jupyter Notebook
examples/Notebooks/KL_Divergence_Estimation.ipynb
jedbrown/approxposterior
ca8bce1be2a075ef4aec7915cb4213a0f4fd6c4c
[ "MIT" ]
null
null
null
examples/Notebooks/KL_Divergence_Estimation.ipynb
jedbrown/approxposterior
ca8bce1be2a075ef4aec7915cb4213a0f4fd6c4c
[ "MIT" ]
null
null
null
examples/Notebooks/KL_Divergence_Estimation.ipynb
jedbrown/approxposterior
ca8bce1be2a075ef4aec7915cb4213a0f4fd6c4c
[ "MIT" ]
null
null
null
429.185065
84,662
0.925977
[ [ [ "### KL Divergence Estimation\n\n---\n\nThis notebook explores how to compute and estimate the KL Divergence.", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport scipy.stats as ss\nimport numpy as np\nfrom approxposterior import utility as ut, gp_utils\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n#Typical plot parameters that make for pretty plots\nmpl.rcParams['figure.figsize'] = (9,8)\nmpl.rcParams['font.size'] = 25.0\n\n## for Palatino and other serif fonts use:\nmpl.rc('font',**{'family':'serif','serif':['Computer Modern']})\nmpl.rc('text', usetex=True)", "_____no_output_____" ] ], [ [ "**Compute KL Divergence for two normal distributions using scipy's entropy function**\n\n---\n\nThis method is quick and easy when you have 1D arrays representing the two pdfs.", "_____no_output_____" ] ], [ [ "# Normal distributions!\nnum = 1000\nx = np.linspace(-5, 5, num)\n\n# Make two different normal pdfs\np_diff = ss.norm.pdf(x, loc=1.2, scale=1)\nq_diff = ss.norm.pdf(x, loc=-1.2, scale=1)\n\n# Estimate KL divergence: Should be rather non-zero\nkl_diff = ss.entropy(p_diff, q_diff)\nprint(kl_diff)", "2.87931253029\n" ], [ "# Make two slightly different normal pdfs\np_close = ss.norm.pdf(x, loc=0, scale=1)\nq_close = ss.norm.pdf(x, loc=0.2, scale=0.8)\n\n# Estimate KL divergence: should be close to zero\nkl_close = ss.entropy(p_close, q_close)\nprint(kl_close)", "0.0893529245286\n" ], [ "# Use the same pdf now\np = ss.norm.pdf(x, loc=0, scale=1)\n\n# Estimate KL divergence: should be zero\nkl_same = ss.entropy(p,p)\nprint(kl_same)", "0.0\n" ], [ "# Plot the distributions and the KL divergence!\nfig, axes = plt.subplots(ncols=3, figsize=(24,8), sharey=True)\n\n# Listify the quantities\nps = [p_diff, p_close, p]\nqs = [q_diff, q_close, p]\nkls = [kl_diff, kl_close, kl_same]\nmus = [[1.2,-1.2], [0,0.2], [0,0]]\nsigs = [[1,1], [1,0.8], [1,1]]\n\n# Plot the data with annotations\nfor ii in range(len(axes)):\n axes[ii].plot(x, ps[ii], 'r-', lw=3)\n axes[ii].plot(x, qs[ii], 'b-', lw=3)\n\n # Add KL Divergence\n axes[ii].text(-5.5, 0.45, \"$D_{KL}$ = %.2lf\" % kls[ii],\n ha=\"left\", va=\"center\", size=30, color=\"k\",\n bbox=dict(boxstyle=\"square\", fc=\"w\", ec=\"none\"))\n\n # Add Gaussian Parameters\n axes[ii].text(1.3, 0.48, \"$\\mu_R, \\mu_B$ = %.0lf, %.0lf\" % (mus[ii][0],mus[ii][1]),\n ha=\"left\", va=\"center\", size=28, color=\"k\",\n bbox=dict(boxstyle=\"square\", fc=\"w\", ec=\"none\"))\n axes[ii].text(1.3, 0.43, \"$\\sigma_R, \\sigma_B$ = %.1lf, %.1lf\" % (sigs[ii][0],sigs[ii][1]),\n ha=\"left\", va=\"center\", size=28, color=\"k\",\n bbox=dict(boxstyle=\"square\", fc=\"w\", ec=\"none\"))\n\n# Format all axes\naxes[0].set_ylabel(\"Density\")\nfor ax in axes:\n ax.set_xlabel(\"x\")\n \n ax.set_ylim(-0.025,0.525)\n ax.set_xlim(-6,6)\nfig.tight_layout()", "_____no_output_____" ] ], [ [ "**Numerically estimate the KL divergence via samping the distributions**\n\n---\n\nWhen you have a weird, multi-dimensional pdf, you must turn to numerical methods to get an answer.\n\nHere, we examine the 1st case (left panel of the above figure) to see gauge the performance of our numerical method of estimating the KL divergence. ", "_____no_output_____" ] ], [ [ "# Example usage:\np_kwargs = {\"loc\": 1, \"scale\": 1}\nq_kwargs = {\"loc\" : -1, \"scale\" : 1}\n\n# Wrap the functions\np_pdf = ut.function_wrapper(ss.norm.pdf, **p_kwargs)\nq_pdf = ut.function_wrapper(ss.norm.pdf, **q_kwargs)", "_____no_output_____" ], [ "# How does it all scale as a function of the number of random samples?\nnums = [100, 1000, 10000, 100000]\nseeds = [42,1,55,91]\niters = 5\n\nkl_num = np.zeros((len(nums), iters))\n\np_kwargs = {\"loc\": 1.2, \"scale\": 1}\nq_kwargs = {\"loc\" : -1.2, \"scale\" : 1}\n\n# Wrap the functions\np_pdf = ut.function_wrapper(ss.norm.pdf, **p_kwargs)\nq_pdf = ut.function_wrapper(ss.norm.pdf, **q_kwargs)\n\nfor ii in range(len(nums)):\n # Reset seed for \"new\" random numbers\n np.random.seed(seeds[ii])\n for jj in range(iters):\n x = ss.norm.rvs(loc=1.2, scale=1, size=nums[ii])\n kl_num[ii,jj] = ut.kl_numerical(x, p_pdf, q_pdf)\n \n# Compute mean, error of KL divergence numerical estimates\nmeans = np.mean(kl_num, axis=1)\nstds = np.std(kl_num, axis=1)", "_____no_output_____" ], [ "# Plot it!\nfig, ax = plt.subplots()\n\n# Truth\nax.axhline(kl_diff, ls=\"-.\", lw=3, color=\"k\", label=\"True KL Divergence\")\n\n# Estimate, error envelope at each step\nax.plot(nums, means, lw=3, color=\"C0\", label=\"Mean Numerical KL Divergence\")\nax.fill_between(nums, means - stds, means + stds, alpha=0.5, color=\"C0\")\n\nax.set_ylabel(\"KL Divergence\")\nax.legend(loc=\"upper right\", fontsize=20)\n\nax.set_xlabel(\"Number of Samples\")\nax.set_xscale(\"log\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec5702f688c3ddb9be8019b6c23ab96dda8a18a6
11,299
ipynb
Jupyter Notebook
workbooks/collision_network_analysis.ipynb
amarallab/waldo
e38d23d9474a0bcb7a94e685545edb0115b12af4
[ "MIT" ]
null
null
null
workbooks/collision_network_analysis.ipynb
amarallab/waldo
e38d23d9474a0bcb7a94e685545edb0115b12af4
[ "MIT" ]
null
null
null
workbooks/collision_network_analysis.ipynb
amarallab/waldo
e38d23d9474a0bcb7a94e685545edb0115b12af4
[ "MIT" ]
null
null
null
36.099042
878
0.57483
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec570947bb2b3ecb97bb40af2b8945a3329c9b9b
11,657
ipynb
Jupyter Notebook
Assignments_6.ipynb
bipsec/Assignments
914a7016da1be707df3db6afa8da4bc1f697aaac
[ "MIT" ]
null
null
null
Assignments_6.ipynb
bipsec/Assignments
914a7016da1be707df3db6afa8da4bc1f697aaac
[ "MIT" ]
null
null
null
Assignments_6.ipynb
bipsec/Assignments
914a7016da1be707df3db6afa8da4bc1f697aaac
[ "MIT" ]
null
null
null
23.887295
136
0.401476
[ [ [ "#Answer of 1:\n\n'''\nEscape characters:\n\\t\n\\n\n\\'\n\\\\\n\\b\n\\r\n\n\n'''\n\ntext = \"My name is \\tBiplab\"\ntext1 = \"My name is \\nBiplab\"\ntext2 = \"My name\\'s Biplab\"\ntext3 = \"My name \\\\is Biplab\"\ntext4 = \"My name \\bis Biplab\"\ntext5 = \"My name \\ris Biplab\"", "_____no_output_____" ], [ "print(text)\nprint(text1)\nprint(text2)\nprint(text3)\nprint(text4)\nprint(text5)", "My name is \tBiplab\nMy name is \nBiplab\nMy name's Biplab\nMy name \\is Biplab\nMy name \bis Biplab\nMy name \ris Biplab\n" ], [ "#Answer of 2:\n'''\nn --> stands for new line\nt --> stands for tab\n\n'''", "_____no_output_____" ], [ "#Answer of 3:\nstring = \"\\\\My country is Beautiful\\\\\"\nprint(string)", "\\My country is Beautiful\\\n" ], [ "#Answer of 4:\ncontent = \"Howl's moving castle\"\nprint(content) ### the double quotes have been used to enclose the string aw well as the single quote is used to \n ### wrap the name and put a quotation symbol. Python looks for double quotes to enclose the string if it was\n ### started by dobule quotes and in between single quotes is untroubled by Python. \n\n", "Howl's moving castle\n" ], [ "#Answer of 5:\ntext = \"I am from Bangladesh.\\n And I am proud of Bangladeshi\"\nprint(text, end=\"\")\n\n\n### Let me know the answer!!!!!", "I am from Bangladesh.\n And I am proud of Bangladeshi" ], [ "#Answer of 6:\n\n'Hello, World!'[1]", "_____no_output_____" ], [ "'Hello, World!'[0:5]", "_____no_output_____" ], [ "'Hello, World!'[:5]", "_____no_output_____" ], [ "'Hello, World!'[3:]", "_____no_output_____" ], [ "#Answer of 7:\n'Hello'.upper()", "_____no_output_____" ], [ "'Hello'.upper().isupper()", "_____no_output_____" ], [ "'Hello'.upper().lower()", "_____no_output_____" ], [ "#Answer of 8:\n'Remember,remember,the fifth of July.'.split()", "_____no_output_____" ], [ "'-'.join('There can only one').split()", "_____no_output_____" ], [ "#Answer of 9:\n\ntext = \" I am from Bangladesh.And I am proud of Bangladeshi \"\n\ntext.strip()", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec570c31cb2b4912014bec1073c5710cfc4147b1
40,373
ipynb
Jupyter Notebook
MultipleAlgebraicEquations.ipynb
josephcslater/Introduction_to_Python
0d30771ed135432fe27ec830c741eb7c30a1ec3c
[ "Unlicense" ]
3
2018-10-08T00:48:12.000Z
2021-02-05T20:20:39.000Z
MultipleAlgebraicEquations.ipynb
josephcslater/Introduction_to_Python
0d30771ed135432fe27ec830c741eb7c30a1ec3c
[ "Unlicense" ]
1
2020-09-04T18:29:42.000Z
2020-09-04T18:29:42.000Z
MultipleAlgebraicEquations.ipynb
josephcslater/Introduction_to_Python
0d30771ed135432fe27ec830c741eb7c30a1ec3c
[ "Unlicense" ]
2
2019-01-23T17:13:28.000Z
2020-07-29T18:59:28.000Z
148.97786
10,376
0.882223
[ [ [ "# Multiple equations/multiple unknowns \nSolution to problem P2.31 in Modern Control Systems, Dorf and Bishop", "_____no_output_____" ] ], [ [ "import sympy as sym\n# print things all pretty\nfrom sympy.abc import *\nsym.init_printing()\n\n# Need to define variables as symbolic for sympy to use them. \n# This might have been best through multiple excetitions for smaller sets of variables.\nx, y, G1, H1, G2, H2, G3, H3, G4, H4, G5, G6, R1, Y1, Y2, X1= symbols(\"x, y, G1, H1, G2, H2, G3, H3, G4, H4, G5, G6, R1, Y1, Y2, X1\", real = True)", "_____no_output_____" ] ], [ [ "Equations are defined by putting everything on the same side so the expression is equal to zero. \n\nThe equations can be assigned to a variable. On the next line, equation `eqn1`.", "_____no_output_____" ] ], [ [ "eqn1 = R1-H1*Y1-X1\neqn1", "_____no_output_____" ], [ "eqn2 = X1*G1+G3*H2*Y2-Y1/G2\neqn2", "_____no_output_____" ], [ "eqn3 = Y2*H2*G5+G4/G2*Y1-G6*Y2\neqn3", "_____no_output_____" ] ], [ [ "Let's eliminate $X_1$ from equations 1 and 2. ", "_____no_output_____" ] ], [ [ "eqn4 = eqn2.subs(X1,sym.solve(eqn1,X1)[0])\neqn4", "_____no_output_____" ] ], [ [ "Solve the equations for $Y_1$ and $Y_2$. ", "_____no_output_____" ] ], [ [ "soln = sym.solve([eqn4, eqn3], Y1, Y2)\nsoln", "_____no_output_____" ], [ "T11 = soln[Y1]/R1\nT11", "_____no_output_____" ], [ "T12 = soln[Y2]/R1\nT12", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec570ebede38c71dccc2eca712e50db8214e3744
33,134
ipynb
Jupyter Notebook
site/pt/beta/tutorials/keras/basic_classification.ipynb
crypdra/docs
41ab06fd14b3a3dff933bb80b19ce46c7c5781cf
[ "Apache-2.0" ]
2
2019-10-25T18:51:16.000Z
2019-10-25T18:51:18.000Z
site/pt/beta/tutorials/keras/basic_classification.ipynb
crypdra/docs
41ab06fd14b3a3dff933bb80b19ce46c7c5781cf
[ "Apache-2.0" ]
null
null
null
site/pt/beta/tutorials/keras/basic_classification.ipynb
crypdra/docs
41ab06fd14b3a3dff933bb80b19ce46c7c5781cf
[ "Apache-2.0" ]
null
null
null
32.420744
604
0.521881
[ [ [ "##### Copyright 2018 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ], [ "#@title MIT License\n#\n# Copyright (c) 2017 François Chollet\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.", "_____no_output_____" ] ], [ [ "# Treine sua primeira rede neural: classificação básica", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/beta/tutorials/keras/basic_classification\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />Veja em TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/pt/beta/tutorials/keras/basic_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Execute em Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/pt/beta/tutorials/keras/basic_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />Veja código fonte em GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/pt/beta/tutorials/keras/basic_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Baixe o notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "Note: A nossa comunidade TensorFlow traduziu estes documentos. Como as traduções da comunidade são *o melhor esforço*, não há garantias de que sejam uma reflexão exata e atualizada da [documentação oficial em Inglês](https://www.tensorflow.org/?hl=en). Se tem alguma sugestão para melhorar esta tradução, por favor envie um pull request para o repositório do GitHub [tensorflow/docs](https://github.com/tensorflow/docs). Para se voluntariar para escrever ou rever as traduções da comunidade, contacte a [lista [email protected]](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs).", "_____no_output_____" ], [ "Este tutorial treina um modelo de rede neural para classificação de imagens de roupas, como tênis e camisetas. Tudo bem se você não entender todos os detalhes; este é um visão geral de um programa do TensorFlow com detalhes explicados enquanto progredimos.\n\nO guia usa [tf.keras](https://www.tensorflow.org/guide/keras), uma API alto-nível para construir e treinar modelos no TensorFlow.", "_____no_output_____" ] ], [ [ "try:\n # Colab only\n %tensorflow_version 2.x\nexcept Exception:\n pass\n", "_____no_output_____" ], [ "from __future__ import absolute_import, division, print_function, unicode_literals\n\n# TensorFlow e tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\n\n# Librariesauxiliares\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprint(tf.__version__)", "_____no_output_____" ] ], [ [ "## Importe a base de dados Fashion MNIST", "_____no_output_____" ], [ "Esse tutorial usa a base de dados [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) que contém 70,000 imagens em tons de cinza em 10 categorias. As imagens mostram artigos individuais de roupas com baixa resolução (28 por 28 pixels), como vemos aqui:\n\n<table>\n <tr><td>\n <img src=\"https://tensorflow.org/images/fashion-mnist-sprite.png\"\n alt=\"Fashion MNIST sprite\" width=\"600\">\n </td></tr>\n <tr><td align=\"center\">\n <b>Figure 1.</b> <a href=\"https://github.com/zalandoresearch/fashion-mnist\">Amostras de Fashion-MNIST</a> (por Zalando, MIT License).<br/>&nbsp;\n </td></tr>\n</table>\n\nFashion MNIST tem como intenção substituir a clássica base de dados [MNIST](http://yann.lecun.com/exdb/mnist/ )— frequentemente usada como \"Hello, World\" de programas de aprendizado de máquina (*machine learning*) para visão computacional. A base de dados MNIST contém imagens de dígitos escritos à mão (0, 1, 2, etc.) em um formato idêntico ao dos artigos de roupas que usaremos aqui.\n\nEsse tutorial usa a Fashion MNIST para variar, e porque é um problema um pouco mais desafiador que o regular MNIST. Ambas bases são relativamente pequenas e são usadas para verificar se um algoritmo funciona como esperado. Elas são bons pontos de partida para testar e debugar código.\n\nUsaremos 60,000 imagens para treinar nossa rede e 10,000 imagens para avaliar quão precisamente nossa rede aprendeu a classificar as imagens. Você pode acessar a Fashion MNIST directly diretamente do TensorFlow. Importe e carregue a base Fashion MNIST diretamente do TensorFlow:", "_____no_output_____" ] ], [ [ "fashion_mnist = keras.datasets.fashion_mnist\n\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()", "_____no_output_____" ] ], [ [ "Carregando a base de dados que retorna quatro NumPy arrays:\n\n* Os *arrays* `train_images` e `train_labels` são o *conjunto de treinamento*— os dados do modelo usados para aprender.\n* O modelo é testado com o *conjunto de teste*, os *arrays* `test_images` e `test_labels`.\n\nAs imagens são arrays NumPy de 28x28, com os valores des pixels entre 0 to 255. As *labels* (alvo da classificação) são um array de inteiros, no intervalo de 0 a 9. Esse corresponde com a classe de roupa que cada imagem representa:\n\n<table>\n <tr>\n <th>Label</th>\n <th>Classe</th>\n </tr>\n <tr>\n <td>0</td>\n <td>Camisetas/Top (T-shirt/top)</td>\n </tr>\n <tr>\n <td>1</td>\n <td>Calça (Trouser)</td>\n </tr>\n <tr>\n <td>2</td>\n <td>Suéter (Pullover)</td>\n </tr>\n <tr>\n <td>3</td>\n <td>Vestidos (Dress)</td>\n </tr>\n <tr>\n <td>4</td>\n <td>Casaco (Coat)</td>\n </tr>\n <tr>\n <td>5</td>\n <td>Sandálias (Sandal)</td>\n </tr>\n <tr>\n <td>6</td>\n <td>Camisas (Shirt)</td>\n </tr>\n <tr>\n <td>7</td>\n <td>Tênis (Sneaker)</td>\n </tr>\n <tr>\n <td>8</td>\n <td>Bolsa (Bag)</td>\n </tr>\n <tr>\n <td>9</td>\n <td>Botas (Ankle boot)</td>\n </tr>\n</table>\n\nCada imagem é mapeada com um só label. Já que o *nome das classes* não são incluídas na base de dados, armazene os dados aqui para usá-los mais tarde quando plotarmos as imagens:", "_____no_output_____" ] ], [ [ "class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']", "_____no_output_____" ] ], [ [ "## Explore os dados\n\nVamos explorar o formato da base de dados antes de treinar o modelo. O próximo comando mostra que existem 60000 imagens no conjunto de treinamento, e cada imagem é representada em 28 x 28 pixels:", "_____no_output_____" ] ], [ [ "train_images.shape", "_____no_output_____" ] ], [ [ "Do mesmo modo, existem 60000 labels no conjunto de treinamento:", "_____no_output_____" ] ], [ [ "len(train_labels)", "_____no_output_____" ] ], [ [ "Cada label é um inteiro entre 0 e 9:", "_____no_output_____" ] ], [ [ "train_labels", "_____no_output_____" ] ], [ [ "Existem 10000 imagens no conjnto de teste. Novamente, cada imagem é representada por 28 x 28 pixels:", "_____no_output_____" ] ], [ [ "test_images.shape", "_____no_output_____" ] ], [ [ "E um conjunto de teste contendo 10000 labels das imagens :", "_____no_output_____" ] ], [ [ "len(test_labels)", "_____no_output_____" ] ], [ [ "## Pré-processe os dados\n\nOs dados precisam ser pré-processados antes de treinar a rede. Se você inspecionar a primeira imagem do conjunto de treinamento, você verá que os valores dos pixels estão entre 0 e 255:", "_____no_output_____" ] ], [ [ "plt.figure()\nplt.imshow(train_images[0])\nplt.colorbar()\nplt.grid(False)\nplt.show()", "_____no_output_____" ] ], [ [ "Escalaremos esses valores no intervalo de 0 e 1 antes antes de alimentar o modelo da rede neural. Para fazer isso, dividimos os valores por 255. É importante que o *conjunto de treinamento* e o *conjunto de teste* podem ser pré-processados do mesmo modo:", "_____no_output_____" ] ], [ [ "train_images = train_images / 255.0\n\ntest_images = test_images / 255.0", "_____no_output_____" ] ], [ [ "Para verificar que os dados estão no formato correto e que estamos prontos para construir e treinar a rede, vamos mostrar as primeiras 25 imagens do *conjunto de treinamento* e mostrar o nome das classes de cada imagem abaixo.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10,10))\nfor i in range(25):\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(train_images[i], cmap=plt.cm.binary)\n plt.xlabel(class_names[train_labels[i]])\nplt.show()", "_____no_output_____" ] ], [ [ "## Construindo o modelo\n\nConstruir a rede neural requer configurar as camadas do modelo, e depois, compilar o modelo.", "_____no_output_____" ], [ "### Montar as camadas\n\nO principal bloco de construção da rede neural é a camada (*layer*). As camadas (*layers*) extraem representações dos dados inseridos na rede. Com sorte, essas representações são significativas para o problema à mão.\n\nMuito do *deep learning* consiste encadear simples camadas. Muitas camadas, como `tf.keras.layers.Dense`, tem paramêtros que são aprendidos durante o treinamento.", "_____no_output_____" ] ], [ [ "model = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(128, activation='relu'),\n keras.layers.Dense(10, activation='softmax')\n])", "_____no_output_____" ] ], [ [ "A primeira camada da rede, `tf.keras.layers.Flatten`, transforma o formato da imagem de um array de imagens de duas dimensões (of 28 by 28 pixels) para um array de uma dimensão (de 28 * 28 = 784 pixels). Pense nessa camada como camadas não empilhadas de pixels de uma imagem e os emfilere. Essa camada não tem paramêtros para aprender; ela só reformata os dados.\n\nDepois dos pixels serem achatados, a rede consite de uma sequência de duas camadas `tf.keras.layers.Dense`. Essa são camadas neurais *densely connected*, ou *fully connected*. A primeira camada `Dense` tem 128 nós (ou neurônios). A segunda (e última) camda é uma *softmax* de 10 nós que retorna um array de 10 probabilidades, cuja soma resulta em 1. Cada nó contem um valor que indica a probabilidade de que aquela imagem pertence a uma das 10 classes.\n\n### Compile o modelo\n\nAntes do modelo estar pronto para o treinamento, é necessário algumas configurações a mais. Essas serão adicionadas no passo de *compilação*:\n\n* *Função Loss* —Essa mede quão precisa o modelo é durante o treinamento. Queremos minimizar a função para *guiar* o modelo para direção certa.\n* *Optimizer* —Isso é como o modelo se atualiza com base no dado que ele vê e sua função *loss*.\n* *Métricas* —usadas para monitorar os passos de treinamento e teste. O exemplo abaixo usa a *acurácia*, a fração das imagens que foram classificadas corretamente.", "_____no_output_____" ] ], [ [ "model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "## Treine o modelo\n\nTreinar a rede neural requer os seguintes passos:\n\n1. Alimente com os dados de treinamento, o modelo. Neste exemplo, os dados de treinamento são os arrays `train_images` e `train_labels`.\n2. O modelo aprende como associar as imagens as *labels*.\n3. Perguntamos ao modelo para fazer previsões sobre o conjunto de teste — nesse exemplo, o array `test_images`. Verificamos se as previsões combinaram com as *labels* do array `test_labels`.\n\nPara começar a treinar, chame o método `model.fit`— assim chamado, porque ele \"encaixa\" o modelo no conjunto de treinamento:", "_____no_output_____" ] ], [ [ "model.fit(train_images, train_labels, epochs=10)", "_____no_output_____" ] ], [ [ "À medida que o modelo treina, as métricas loss e acurácia são mostradas. O modelo atinge uma acurácia de 0.88 (ou 88%) com o conjunto de treinamento.", "_____no_output_____" ], [ "## Avalie a acurácia\n\nDepois, compare como o modelo performou com o conjunto de teste:", "_____no_output_____" ] ], [ [ "test_loss, test_acc = model.evaluate(test_images, test_labels)\n\nprint('\\nTest accuracy:', test_acc)", "_____no_output_____" ] ], [ [ "Acabou que o a acurácia com o conjunto de teste é um pouco menor do que a acurácia de treinamento. Essa diferença entre as duas acurácias representa um *overfitting*. Overfitting é modelo de aprendizado de máquina performou de maneira pior em um conjunto de entradas novas, e não usadas anteriormente, que usando o conjunto de treinamento.", "_____no_output_____" ], [ "## Faça predições\n\nCom o modelo treinado, o usaremos para predições de algumas imagens.", "_____no_output_____" ] ], [ [ "predictions = model.predict(test_images)", "_____no_output_____" ] ], [ [ "Aqui, o modelo previu que a *label* de cada imagem no conjunto de treinamento. Vamos olhar na primeira predição:", "_____no_output_____" ] ], [ [ "predictions[0]", "_____no_output_____" ] ], [ [ "A predição é um array de 10 números. Eles representam um a *confiança* do modelo que a imagem corresponde a cada um dos diferentes artigos de roupa. Podemos ver cada *label* tem um maior valor de confiança:", "_____no_output_____" ] ], [ [ "np.argmax(predictions[0])", "_____no_output_____" ] ], [ [ "Então, o modelo é confiante de que esse imagem é uma bota (ankle boot) ou `class_names[9]`. Examinando a label do teste, vemos que essa classificação é correta:", "_____no_output_____" ] ], [ [ "test_labels[0]", "_____no_output_____" ] ], [ [ "Podemos mostrar graficamente como se parece em um conjunto total de previsão de 10 classes.", "_____no_output_____" ] ], [ [ "def plot_image(i, predictions_array, true_label, img):\n predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n\n plt.imshow(img, cmap=plt.cm.binary)\n\n predicted_label = np.argmax(predictions_array)\n if predicted_label == true_label:\n color = 'blue'\n else:\n color = 'red'\n\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\n 100*np.max(predictions_array),\n class_names[true_label]),\n color=color)\n\ndef plot_value_array(i, predictions_array, true_label):\n predictions_array, true_label = predictions_array[i], true_label[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array)\n\n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('blue')", "_____no_output_____" ] ], [ [ "Vamos olhar a previsão imagem na posição 0, do array de predição.", "_____no_output_____" ] ], [ [ "i = 0\nplt.figure(figsize=(6,3))\nplt.subplot(1,2,1)\nplot_image(i, predictions, test_labels, test_images)\nplt.subplot(1,2,2)\nplot_value_array(i, predictions, test_labels)\nplt.show()", "_____no_output_____" ], [ "i = 12\nplt.figure(figsize=(6,3))\nplt.subplot(1,2,1)\nplot_image(i, predictions, test_labels, test_images)\nplt.subplot(1,2,2)\nplot_value_array(i, predictions, test_labels)\nplt.show()", "_____no_output_____" ] ], [ [ "Vamos plotar algumas da previsão do modelo. Labels preditas corretamente são azuis e as predições erradas são vermelhas. O número dá a porcentagem (de 100) das labels preditas. Note que o modelo pode errar mesmo estão confiante.", "_____no_output_____" ] ], [ [ "# Plota o primeiro X test images, e as labels preditas, e as labels verdadeiras.\n# Colore as predições corretas de azul e as incorretas de vermelho.\nnum_rows = 5\nnum_cols = 3\nnum_images = num_rows*num_cols\nplt.figure(figsize=(2*2*num_cols, 2*num_rows))\nfor i in range(num_images):\n plt.subplot(num_rows, 2*num_cols, 2*i+1)\n plot_image(i, predictions, test_labels, test_images)\n plt.subplot(num_rows, 2*num_cols, 2*i+2)\n plot_value_array(i, predictions, test_labels)\nplt.show()", "_____no_output_____" ] ], [ [ "Finamente, use o modelo treinado para fazer a predição de uma única imagem.", "_____no_output_____" ] ], [ [ "# Grab an image from the test dataset.\nimg = test_images[0]\n\nprint(img.shape)", "_____no_output_____" ] ], [ [ "Modelos `tf.keras` são otimizados para fazer predições em um *batch*, ou coleções, de exemplos de uma vez. De acordo, mesmo que usemos uma única imagem, precisamos adicionar em uma lista:", "_____no_output_____" ] ], [ [ "# Adiciona a imagem em um batch que possui um só membro.\nimg = (np.expand_dims(img,0))\n\nprint(img.shape)", "_____no_output_____" ] ], [ [ "Agora prediremos a label correta para essa imagem:", "_____no_output_____" ] ], [ [ "predictions_single = model.predict(img)\n\nprint(predictions_single)", "_____no_output_____" ], [ "plot_value_array(0, predictions_single, test_labels)\n_ = plt.xticks(range(10), class_names, rotation=45)", "_____no_output_____" ] ], [ [ "`model.predict` retorna a lista de listas — uma lista para cada imagem em um *batch* de dados. Pegue a predição de nossa (única) imagem no *batch*:", "_____no_output_____" ] ], [ [ "np.argmax(predictions_single[0])", "_____no_output_____" ] ], [ [ "E, como antes, o modelo previu a label como 9.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec57111c7846a109742054aecad84c551a3ba041
188,400
ipynb
Jupyter Notebook
Projeto 1 Ciencias dos dados/P1 Ciencias Dos Dados.ipynb
luvi01/Projeto1Dados
700fbfb76478a7aa8975e46a691582f15525dc4d
[ "MIT" ]
null
null
null
Projeto 1 Ciencias dos dados/P1 Ciencias Dos Dados.ipynb
luvi01/Projeto1Dados
700fbfb76478a7aa8975e46a691582f15525dc4d
[ "MIT" ]
null
null
null
Projeto 1 Ciencias dos dados/P1 Ciencias Dos Dados.ipynb
luvi01/Projeto1Dados
700fbfb76478a7aa8975e46a691582f15525dc4d
[ "MIT" ]
null
null
null
54.309599
15,100
0.556423
[ [ [ "# Projeto 1\n\n## Luiz Vitor Germanos Teixeira\n\n### Pergunta:\nPaíses que recebem auxílio internacional tem, como consequência, o crescimento no seu IDH? O seu índice de democracia tem algum impacto neste crescimento?\n\n\n\n\n## Dados utilizados:\nIDH\n\nÍndice de democracia\n\nAjuda monetária Internacional por pessoa\n", "_____no_output_____" ], [ "## Método:\nEste projeto vai analisar uma faixa de tempo de 5 anos, começando em 1995 e terminando em 2000. Como o Índice de democracia utilizado é o Polity IV dataset, as nações serão classificadas de acordo com o seguinte sistema de rankeamento:\n\nPaíses com um índice de -10 até -6, autocracia\n\nPaíses com um índice de -5 até 0, anocracia fechada\n\nPaíses com um índice de 1 até 5, anocracia aberta\n\nPaíses com um índice de 6 até 9, democracia\n\nPaíses com um índice de 10, democracia plena\n\n\nA análise será feita da seguinte maneira, cada classificação receberá uma cor, depois será calculada a variação do IDH dos países em um período de 5 anos junto com somatório da ajuda financeira, também no mesmo período. Com esses dados será montado um gráfico para checar se existe alguma correlação entre o grau de democracia do país e o efeito da ajuda no crescimento do seu IDH.", "_____no_output_____" ], [ "## Importando bibliotecas e criando funções", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport seaborn as sns\nimport numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport matplotlib\n\ndef reta(dados_x, dados_y): \n a = dados_y.cov(dados_x) / dados_x.var()\n b = dados_y.mean() - a*dados_x.mean()\n \n print('Ajuste linear entre X3 e X4:')\n print('Coeficiente linear:', b)\n print('Coeficiente angular:', a)\n \n #fig = plt.figure(figsize=(11, 6))\n #plt.scatter(dados_x, dados_y, c='red', alpha=0.8)\n plt.title('x versus y')\n plt.xlabel('eixo x')\n plt.ylabel('eixo y')\n \n plt.plot((dados_x.min(), dados_x.max()), (a*dados_x.min()+b, a*dados_x.max()+b), color='blue')\n \n #plt.tight_layout()", "_____no_output_____" ], [ "IndiceDemocracia = pd.read_excel('democracy_score_use_as_color.xlsx')\nIDH = pd.read_excel('hdi_human_development_index.xlsx')\nAid = pd.read_excel('aid_received_total_us_inflation_adjusted.xlsx')\nAidperp = pd.read_excel(\"aid_received_per_person_current_us.xlsx\")", "_____no_output_____" ] ], [ [ "## Selecionado o intervalo a ser estudado e limpando a database:", "_____no_output_____" ] ], [ [ "IDH_idx = IDH.set_index('geo')\nanos = str.split(str(np.arange(1990,2011,1)))\nIDHa = IDH_idx.loc[:,[1995, 1996, 1997, 1998, 1999, 2000]]\nIDHa.head()", "_____no_output_____" ], [ "\nAidperp_idx = Aidperp.set_index('geo')\nAidperpa = Aidperp_idx.loc[:,[1995, 1996, 1997, 1998, 1999, 2000]]\nAidperpa.head()\n", "_____no_output_____" ], [ "\nIndiceDemocracia.head()\nIndiceDemocracia_idx = IndiceDemocracia.set_index('geo')\nIndiceDemocraciaa = IndiceDemocracia_idx.loc[:,[1995, 1996, 1997, 1998, 1999, 2000]]\nIndiceDemocracia1995 = IndiceDemocracia_idx.loc[:,[1995]]\nlimpo1995 = IndiceDemocracia1995", "_____no_output_____" ], [ "data = IDHa.join(Aidperpa, how='inner',lsuffix='_IDH', rsuffix='_Aid')\ndatia = data.join(IndiceDemocraciaa, how=\"inner\",rsuffix='_Demo',lsuffix='_Aid' )\ndados = datia.dropna()\ndados", "_____no_output_____" ], [ "datia.to_excel( 'Projeto1dataframe2.xlsx', index=True)", "_____no_output_____" ] ], [ [ "## Classificando os Países", "_____no_output_____" ] ], [ [ "def categoria(x):\n if x == 10:\n return 'Full Democracy'\n elif x >= 6:\n return 'Medium'\n elif x >= 1:\n return 'Anocracy'\n elif x >= -5:\n return 'Closed Anocracy'\n else:\n return \"Autocracy\"", "_____no_output_____" ], [ "dados[1995]\ndados[1995] = dados[1995].apply(categoria)\ndados[1996] = dados[1996].apply(categoria)\ndados[1997] = dados[1997].apply(categoria)\ndados[1998] = dados[1998].apply(categoria)\ndados[1999] = dados[1999].apply(categoria)\ndados[2000] = dados[2000].apply(categoria)\nIndiceDemocracia1995[1995] = IndiceDemocracia1995[1995].apply(categoria)", "/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \n/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:4: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n after removing the cwd from sys.path.\n/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"\n/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:6: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \n/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:7: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n import sys\n" ] ], [ [ "## Criando um novo data frame para a análise", "_____no_output_____" ] ], [ [ "Total = dados\nTotalIDH = Total[\"2000_IDH\"] - Total[\"1995_IDH\"]\nTotalAid = Total[\"2000_Aid\"] + Total[\"1995_Aid\"] + Total[\"1996_Aid\"] + Total[\"1997_Aid\"] + Total[\"1998_Aid\"] + Total[\"1999_Aid\"]\nTotalsum = pd.concat([TotalAid, TotalIDH], axis = 1)\nTotalsum = Totalsum.join(IndiceDemocracia1995, how=\"inner\")\nTotalsum.columns = ['Ajudatotal5anos', 'DeltaIDH', 'Governo']\nTotalsum.head()\n#plt.scatter(TotalAid,TotalIDH,c=\"#7B68EE\",alpha=0.8)", "_____no_output_____" ], [ "limpo = pd.concat([TotalAid, TotalIDH], axis = 1)\nTotalimpo = limpo.join(limpo1995, how=\"inner\")\nTotalimpo.columns = ['Ajudatotal5anos', 'DeltaIDH', 'Governo']\nTotalimpo.head()", "_____no_output_____" ], [ "#Totalsum.to_excel( 'Totalsum.xlsx', index=True)", "_____no_output_____" ] ], [ [ "## Análise preliminar\n\nNāo é possível determinar uma correlaçāo entre os níveis de regime e sua variaçāo de crescimento em 5 anos.\n", "_____no_output_____" ] ], [ [ "plt.scatter(Totalimpo[\"Governo\"],Totalimpo[\"DeltaIDH\"],c=\"Blue\",alpha=0.8)", "_____no_output_____" ], [ "governos = ['Full Democracy', \"Medium\", \"Anocracy\", \"Closed Anocracy\", \"Autocracy\"]\n\ncores = [\"#0000FF\", \"#81DAF5\", \"#FFBF00\",\"#FFFF00\", \"#DF0101\"]\n\nfor e in range(5):\n dedo = Totalsum[Totalsum['Governo']==governos[e]]\n cor = cores[e]\n #reta(dedo[\"Ajudatotal5anos\"],dedo[\"DeltaIDH\"])\n plt.scatter(dedo[\"Ajudatotal5anos\"],dedo[\"DeltaIDH\"],c=cor,alpha=0.8)\n", "_____no_output_____" ] ], [ [ "### Democracia Plena\n\nSão poucos os países desta categoria que recebem auxílio, como pode ser observado no gráfico abaixo e tabela abaixo. Não se pode traçar uma correlação clara, porém os países que se destacam pertencem a instituições sólidas, como a União Europeia, ou como a Costa Rica que tem uma das democracias mais antigas do mundo e não tem um exército nacional. \n\n", "_____no_output_____" ] ], [ [ "DemocraciaPlena = Totalsum[Totalsum['Governo'] == 'Full Democracy']\nplt.scatter(DemocraciaPlena[\"Ajudatotal5anos\"],DemocraciaPlena[\"DeltaIDH\"],c='blue',alpha=0.8)\nDemocraciaPlena", "_____no_output_____" ] ], [ [ "### Democracia\nNeste bloco se encontram as nações que possuem um governo democrata, porém apresentam problemas. Novamente não é possível identificar uma correlação de que um aumento no auxílio financeiro interfere no crescimento do IDH.", "_____no_output_____" ] ], [ [ "Democracia = Totalsum[Totalsum['Governo'] == 'Medium']\nplt.scatter(Democracia[\"Ajudatotal5anos\"],Democracia[\"DeltaIDH\"],c='blue',alpha=0.8)\nDemocracia", "_____no_output_____" ] ], [ [ "### Anocracia\nNeste bloco se encontram as nações que possuem uma especie de mistura de democracia com autocracia, um regime instavel sujeito a mudanças bruscas de liderança auxilio leve a um maior crescimento no seu IDH. Apesar disto estes países apresentam uma alta taxa de crescimento, como Moçambique, que em 1993 acabava de sair de uma guerra civil, e já em 1995 implementava reformas democraticas e economicas, algo que ajuda a explicar o seu desenvolvimento.", "_____no_output_____" ] ], [ [ "Anocracy = Totalsum[Totalsum['Governo'] == 'Anocracy']\nplt.scatter(Anocracy[\"Ajudatotal5anos\"],Anocracy[\"DeltaIDH\"],c='yellow',alpha=0.8)\nAnocracy", "_____no_output_____" ] ], [ [ "### Anocracia fechada\nEsses países têm um viés mais autocrata do que democráticos, mas ainda possuem algumas características democráticas. Também não se possui uma correlação, os estados que tiveram crescimento alto como a Uganda, firmaram acordos com instituiçōes sólidas como o FMI eo banco Mundial\n", "_____no_output_____" ] ], [ [ "ClosedAnocracy = Totalsum[Totalsum['Governo'] == 'Closed Anocracy']\nplt.scatter(ClosedAnocracy[\"Ajudatotal5anos\"],ClosedAnocracy[\"DeltaIDH\"],c='orange',alpha=0.8)\nClosedAnocracy", "_____no_output_____" ] ], [ [ "### Autocracia\nEstes são regimes fechados e ditatoriais. O capital doado é um dos mais altos vistos até agora, isto se deve pelos conflitos que estes se encontravam como em Serra Leoa.", "_____no_output_____" ] ], [ [ "Autocracy = Totalsum[Totalsum['Governo'] == 'Autocracy']\nplt.scatter(Autocracy[\"Ajudatotal5anos\"],Autocracy[\"DeltaIDH\"],c='red',alpha=0.8)\nAutocracy", "_____no_output_____" ] ], [ [ "## Conclusão\n\nComo visto nas análises anteriores o fator democrático não entra como um grande fator no crescimento do IDH mas sim o quão sólidas são as instituições dos países beneficiários tal como os acordos que estes firmão com seus doadores. E ainda assim o crescimento não está correlacionado com o tamanho da ajuda, como visto em todos os gráficos, eles se mantêm em faixas.\n", "_____no_output_____" ], [ "### Bibliografia\n\n«Human Development Report 2016 – \"Human Development for Everyone\"» (PDF) (em inglês). Human Development Report (Human Development Report Office) - United Nations Development Programme. Consultado em 22 de março de 2017.\n\n«Ministério das Relações Exteriores do Brasil»\n\n«Fíji».\n\n«Código de Redação da Comunidade Europeia»\n\n\"Mozambique | Partner Countries and Activities | English | Þróunarsamvinnustofnun Íslands\" (in Icelandic). Iceida.is. 1 June 1999. Retrieved 2 May 2010.\n\n World DataBank World Development Indicators Mozambique The World Bank (2013), Retrieved 5 April 2013\n \nAfrican Economic Outlook\n\n\"2015 Human Development Report\". United Nations \n\n\"Численность населения Республики Казахстан по отдельным этносам на начало 2016 года\". Archived from the original on 14 October 2017. Retrieved 24 May 2017.\n\n\"Kazakh TV – Kazakhstan enters top 50 most competitive countries\"\n\nNational Institute of Statistics of Rwanda 2015.\n\nHuman Rights Watch & Wells 2008", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec5716d2432f7c69e7c5bfb99ead3650f3960731
11,280
ipynb
Jupyter Notebook
notebook/tracker.ipynb
VincentWang25/Kaggle_TGBR
9a93d8cf75ae0a9716a72cb6da49645eac63a641
[ "Apache-2.0" ]
null
null
null
notebook/tracker.ipynb
VincentWang25/Kaggle_TGBR
9a93d8cf75ae0a9716a72cb6da49645eac63a641
[ "Apache-2.0" ]
null
null
null
notebook/tracker.ipynb
VincentWang25/Kaggle_TGBR
9a93d8cf75ae0a9716a72cb6da49645eac63a641
[ "Apache-2.0" ]
null
null
null
38.108108
175
0.523227
[ [ [ "import numpy as np\nfrom norfair import Detection, Tracker\ndef init_tracker():\n tracker = Tracker(\n distance_function=euclidean_distance, \n distance_threshold=30,\n hit_inertia_min=0,\n hit_inertia_max=3,\n initialization_delay=2,\n ) \n print(\"----init tracker-----\")\n return tracker\n\n# Helper to convert bbox in format [x_min, y_min, x_max, y_max, score] to norfair.Detection class\ndef to_norfair(detects, frame_id):\n result = []\n for x_min, y_min, x_max, y_max, score in detects:\n xc, yc = (x_min + x_max) / 2, (y_min + y_max) / 2\n w, h = x_max - x_min, y_max - y_min\n result.append(Detection(points=np.array([xc, yc]), scores=np.array([score]), data=np.array([w, h, frame_id])))\n \n return result\n\ndef euclidean_distance(detection, tracked_object):\n return np.linalg.norm(detection.points - tracked_object.estimate)\n\ndef tracking_function(tracker, frame_id, bboxes, scores, best_conf, num_prev_bbox=None, reinit=False):\n \n detects = []\n predictions = []\n if reinit:\n tracker = init_tracker()\n \n if len(scores)>0:\n for i in range(len(bboxes)):\n # remember to check\n if scores[i] <= best_conf:\n continue\n box = bboxes[i]\n score = scores[i]\n x_min = int(box[0])\n y_min = int(box[1])\n bbox_width = int(box[2])\n bbox_height = int(box[3])\n detects.append([x_min, y_min, x_min+bbox_width, y_min+bbox_height, score])\n predictions.append('{:.2f} {} {} {} {}'.format(score, x_min, y_min, bbox_width, bbox_height))\n# print(predictions[:-1])\n # Update tracks using detects from current frame\n tracked_objects = tracker.update(detections=to_norfair(detects, frame_id))\n print(\"frame_id {}, number of notrack pred {}, number of tracked_objects {}, their frame_id {}, box x, y {}\".format(frame_id, len(bboxes), len(tracked_objects),\n [o.last_detection.data[-1] for o in tracked_objects], \n [o.estimate[0] for o in tracked_objects]))\n print(\"hit counter {}\".format([o.hit_counter for o in tracked_objects ]) )\n to_add_preds = []\n for tobj in tracked_objects:\n bbox_width, bbox_height, last_detected_frame_id = tobj.last_detection.data\n if last_detected_frame_id == frame_id: # Skip objects that were detected on current frame\n continue\n xc, yc = tobj.estimate[0]\n x_min, y_min = int(round(xc - bbox_width / 2)), int(round(yc - bbox_height / 2))\n\n #exclude those in the edge\n if (x_min + bbox_width >= 1279) or (y_min + bbox_height) >= 719 or (x_min <= 1) or (y_min <= 1):\n continue\n score = tobj.last_detection.scores[0] \n track_pred = np.array([[x_min, y_min, bbox_width, bbox_height]])\n #to_add_preds.append('{:.2f} {} {} {} {}'.format(score, x_min, y_min, bbox_width, bbox_height))\n predictions.append('{:.2f} {} {} {} {}'.format(score, x_min, y_min, bbox_width, bbox_height))\n \n \n# if (num_prev_bbox is None or (num_prev_bbox - len(predictions) + 1) >= len(to_add_preds)):\n# predictions.extend(to_add_preds)\n \n return predictions, tracker", "_____no_output_____" ], [ "bboxes = [[[1,1,1,1]] for _ in range(5)] + [[] for _ in range(7)]\nconfs = [[1] for i in range(5)] + [[] for _ in range(7)]\n\ntracker = init_tracker()\nfor i in range(len(bboxes)):\n p, tracker = tracking_function(tracker, i, bboxes[i], confs[i], 0, num_prev_bbox=None, reinit=i%3==0)\n print(p)", "----init tracker-----\n----init tracker-----\nframe_id 0, number of notrack pred 1, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n['1.00 1 1 1 1']\nframe_id 1, number of notrack pred 1, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n['1.00 1 1 1 1']\nframe_id 2, number of notrack pred 1, number of tracked_objects 1, their frame_id [2], box x, y [array([1.5, 1.5])]\nhit counter [3]\n['1.00 1 1 1 1']\n----init tracker-----\nframe_id 3, number of notrack pred 1, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n['1.00 1 1 1 1']\nframe_id 4, number of notrack pred 1, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n['1.00 1 1 1 1']\nframe_id 5, number of notrack pred 0, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n[]\n----init tracker-----\nframe_id 6, number of notrack pred 0, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n[]\nframe_id 7, number of notrack pred 0, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n[]\nframe_id 8, number of notrack pred 0, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n[]\n----init tracker-----\nframe_id 9, number of notrack pred 0, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n[]\nframe_id 10, number of notrack pred 0, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n[]\nframe_id 11, number of notrack pred 0, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n[]\n" ], [ "bboxes = [[[1,1,1,1]] for _ in range(5)] + [[] for _ in range(7)]\nconfs = [[1] for i in range(5)] + [[] for _ in range(7)]\n\ntracker = init_tracker()\nfor i in range(len(bboxes)):\n f = i * 7\n p, tracker = tracking_function(tracker, f, bboxes[i], confs[i], 0, num_prev_bbox=None, reinit=False)\n print(p)", "----init tracker-----\nframe_id 0, number of notrack pred 1, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n['1.00 1 1 1 1']\nframe_id 7, number of notrack pred 1, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n['1.00 1 1 1 1']\nframe_id 14, number of notrack pred 1, number of tracked_objects 1, their frame_id [14], box x, y [array([1.5, 1.5])]\nhit counter [3]\n['1.00 1 1 1 1']\nframe_id 21, number of notrack pred 1, number of tracked_objects 1, their frame_id [21], box x, y [array([1.5, 1.5])]\nhit counter [4]\n['1.00 1 1 1 1']\nframe_id 28, number of notrack pred 1, number of tracked_objects 1, their frame_id [28], box x, y [array([1.5, 1.5])]\nhit counter [3]\n['1.00 1 1 1 1']\nframe_id 35, number of notrack pred 0, number of tracked_objects 1, their frame_id [28], box x, y [array([1.5, 1.5])]\nhit counter [2]\n[]\nframe_id 42, number of notrack pred 0, number of tracked_objects 1, their frame_id [28], box x, y [array([1.5, 1.5])]\nhit counter [1]\n[]\nframe_id 49, number of notrack pred 0, number of tracked_objects 1, their frame_id [28], box x, y [array([1.5, 1.5])]\nhit counter [0]\n[]\nframe_id 56, number of notrack pred 0, number of tracked_objects 1, their frame_id [28], box x, y [array([1.5, 1.5])]\nhit counter [-1]\n[]\nframe_id 63, number of notrack pred 0, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n[]\nframe_id 70, number of notrack pred 0, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n[]\nframe_id 77, number of notrack pred 0, number of tracked_objects 0, their frame_id [], box x, y []\nhit counter []\n[]\n" ], [ "confs", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
ec5719d2e19e7b4b43e336362d6740fb8f2995c0
10,646
ipynb
Jupyter Notebook
src/lab3/030_ServerPerformance.ipynb
dimitarpg13/NVIDIA_GTC_2022_March
a16141fa5b0328246717575cb06812e006bcd540
[ "Apache-2.0" ]
null
null
null
src/lab3/030_ServerPerformance.ipynb
dimitarpg13/NVIDIA_GTC_2022_March
a16141fa5b0328246717575cb06812e006bcd540
[ "Apache-2.0" ]
null
null
null
src/lab3/030_ServerPerformance.ipynb
dimitarpg13/NVIDIA_GTC_2022_March
a16141fa5b0328246717575cb06812e006bcd540
[ "Apache-2.0" ]
null
null
null
44.919831
756
0.66316
[ [ [ "<a href=\"https://www.nvidia.com/dli\"> <img src=\"images/DLI_Header.png\" alt=\"Header\" style=\"width: 400px;\"/> </a>", "_____no_output_____" ], [ "# 3.0 Server Performance\n\nIn this notebook, you'll implement the optimization techniques you've learned, and profile the resulting model in a more formal way.\n\n**[3.1 Assessing the impact of Optimizations](#3.1-Assessing-the-impact-of-Optimizations)**<br>\n&nbsp; &nbsp; &nbsp; &nbsp; [3.1.1 Exercise: Profile the Model](#3.1.1-Exercise:-Profile-the-Model)<br>\n**[3.2 Monitoring and Responding to Performance Fluctuations](#3.2-Monitoring-and-Responding-to-Performance-Fluctuations)**<br>\n&nbsp; &nbsp; &nbsp; &nbsp; [3.2.1 Viewing Prometheus Metrics](#3.2.1-Viewing-Prometheus-Metrics)<br>\n&nbsp; &nbsp; &nbsp; &nbsp; [3.2.2 Interpreting the Metrics](#3.2.2-Interpreting-the-Metrics)<br>", "_____no_output_____" ], [ "We'll analyze the impact of our configuration changes, as well as how the nature of the request pattern affects our inferencing capability. We will generate structured reports aimed at comparing the performance of a TorchScript-based model with no advanced Triton features activated, to a TensorRT ONNX model with the key features you've learned enabled. \n\nWe will not only focus on the basic metrics that we have analyzed in the previous parts of the class (throughput and latency), but also try to understand which factors affect the latency of our solution (e.g. network communication).\n\nFinally, we will look at the tools that can be used to monitor and manage the performance of our solution in production, and look at how they can be used to implement more advanced functionality like auto-scaling.", "_____no_output_____" ], [ "# 3.1 Assessing the impact of Optimizations\nThe performance tool that we've been using has an additional feature: not only does it display the results on the screen, it also saves the data in a tabular format to the following location: \n\n<code>\"./results/${MODEL_NAME}/results${RESULTS_ID}_${TIMESTAMP}.csv\"</code>\n\nTo assess the impact of the various optimizations, let's take advantage of the previously generated log files.", "_____no_output_____" ], [ "## 3.1.1 Exercise: Profile the Model\nWe executed <code>bertQA-torchscript</code> as well as <code>bertQA-onnx-trt-dynbatch</code> earlier, so we should already have the logs from that execution saved. Let's look at the content of the appropriate log folders. If you have executed the performance tool more than once, you might see multiple log files with different time stamps created.", "_____no_output_____" ] ], [ [ "!ls ./results/bertQA-torchscript/\n!ls ./results/bertQA-onnx-trt-dynbatch", "_____no_output_____" ] ], [ [ "Please download both of the CSV files (browse in the left pane and right-click to find \"download\"). In order to generate the execution reports follow the steps below to import the log files of the <code>bertQA-onnx-trt-dynbatch</code>:\n\n<!-- - Open [this spreadsheet](Triton%20Inference%20Server%20Performance%20Results.xlsx) -->\n- Open <a href=\"https://docs.google.com/spreadsheets/d/1S8h0bWBBElHUoLd2SOvQPzZzRiQ55xjyqodm_9ireiw/edit#gid=1572240508\">this spreadsheet</a>\n- Make a copy from the File menu \"Make a copy…\"\n- Open the copy\n- Select the A1 cell on the \"Raw Data\" tab\n- From the File menu select \"Import…\"\n- Select \"Upload\" and upload the file\n- Select \"Replace data at selected cell\" and then select the \"Import data\" button\n\nOnce you have completed the above steps you should be presented with the following plots in the \"Components of Latency\" tab and \"Latency vs. Throughput\" tab, respectively: <br/>\n<img width=600 src=\"images/ComponentsOfLatency1.png\"/> <img width=600 src=\"images/LatencyVsThrughput1.png\"/> <br/>", "_____no_output_____" ], [ "Please repeat the above for the <code>bertQA-torchscript</code> model. (Remember that the TorchScript variant was executed at batch 8). <br>\nHow do those compare? Discuss with the instructor.\n\nImages of the analysis for the `bertQA-torchscript` model can also be found <a href=\"images/torchscript_latency1.png\">here</a> and <a href=\"images/torchscript_latency2.png\">here</a>.", "_____no_output_____" ], [ "# 3.2 Monitoring and Responding to Performance Fluctuations\n\nUnderstanding the performance of your inference server is not only critical at the initial planning stage but equally important throughout the lifetime of the application. The ability to capture metrics describing server performance is not only central to the ability to respond to issues, but also is a foundation of more advanced features like automatic scaling. The diagram below demonstrates a simplified view of the Triton deployment architecture. By combining Triton with technologies like [Kubernetes](https://kubernetes.io/docs/home/), you can, with relative ease, create a configuration that will automatically scale with the increased demand within your data center or, if necessary, burst the excess workload to the cloud/clouds. <br/>\n\n<img width=700 src=\"images/DeploymentArchitecture.png\"/>", "_____no_output_____" ], [ "## 3.2.1 Viewing Prometheus Metrics\nTriton exposes [Prometheus](https://prometheus.io/) performance metrics for monitoring on port 8002 by default. These include metrics on GPU power usage, GPU memory, request counts, and latency measures. More documentation on individual metrics can be found in the <a href=\"https://docs.nvidia.com/deeplearning/triton-inference-server/master-user-guide/docs/metrics.html\">Triton Metrics documentation</a>. For now, let's query the metrics captured throughout our performance runs:", "_____no_output_____" ] ], [ [ "# Set the server hostname and check it - you should get a message that \"Triton Server is ready!\"\ntritonServerHostName = \"triton\"\n!./utilities/wait_for_triton_server.sh {tritonServerHostName}", "_____no_output_____" ], [ "# Use a curl command to request the metrics\nprometheus_url = tritonServerHostName + \":8002/metrics\"\n!curl -v {prometheus_url}", "_____no_output_____" ] ], [ [ "## 3.2.2 Interpreting the Metrics\nThe Prometheus metrics output is a list of metrics, where each is provided with the form:\n\n```\n# HELP <metric_name and description>\n# TYPE <metric_name and type>\nmetric_name{gpu_uuid=\"GPU-xxxxxx\",...} <data>\n```\n\nFor example, if the inference server models includes two models, you should see among the list some metrics that are specific to each model, and other metrics that are more general about the GPU they both share.<br>\n\n#### Count Example\nThe following example indicates that the inference count for the `bertQA-onnx-trt-dynbatch` model is 10,105 so far, while the inference count for `bertQA-torchscript` model is 717.<br>What do your results show?\n```\n# HELP nv_inference_count Number of inferences performed\n# TYPE nv_inference_count counter\nnv_inference_count{gpu_uuid=\"GPU-640c6e00-43dd-9fae-9f9a-cb6af82df8e9\",model=\"bertQA-onnx-trt-dynbatch\",version=\"1\"} 10105.000000\nnv_inference_count{gpu_uuid=\"GPU-640c6e00-43dd-9fae-9f9a-cb6af82df8e9\",model=\"bertQA-torchscript\",version=\"1\"} 717.000000\n```\n\n#### GPU Power Example\nThe following example indicates that current GPU power usage is about 40 watts.<br>What do your results show?\n```\n# HELP nv_gpu_power_usage GPU power usage in watts\n# TYPE nv_gpu_power_usage gauge\nnv_gpu_power_usage{gpu_uuid=\"GPU-640c6e00-43dd-9fae-9f9a-cb6af82df8e9\"} 39.958000\n```", "_____no_output_____" ], [ "#### What Do Your Results Indicate?\n\n* Can you identify the current utilization rate? \n* Why is it zero? \n* How much memory are we using? \n* Why do you think we are using the GPU memory even though there are no requests executed against our server? \n\nDiscuss with the instructor.", "_____no_output_____" ], [ "<h3 style=\"color:green;\">Congratulations!</h3><br>\nYou've successfully configured optimizations and learned how to profile the model.<br>\n\nPlease move to the last part of the class to learn how to build custom applications that take advantage of Triton features:<br>\n[4.0 Using the Model](040_UsingTheModel.ipynb)", "_____no_output_____" ], [ "<a href=\"https://www.nvidia.com/dli\"> <img src=\"images/DLI_Header.png\" alt=\"Header\" style=\"width: 400px;\"/> </a>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
ec572737a5af499ffc2b657d2cc7923cf1aba2e0
9,208
ipynb
Jupyter Notebook
notebooks/SSSN/.ipynb_checkpoints/analyze_drdh-checkpoint.ipynb
cunningham-lab/epi
38febae7035ca921334a616b0f396b3767bf18d4
[ "Apache-2.0" ]
8
2020-02-28T21:22:42.000Z
2021-02-02T18:44:13.000Z
notebooks/SSSN/.ipynb_checkpoints/analyze_drdh-checkpoint.ipynb
cunningham-lab/epi
38febae7035ca921334a616b0f396b3767bf18d4
[ "Apache-2.0" ]
8
2020-01-24T22:50:35.000Z
2021-05-10T23:53:33.000Z
notebooks/SSSN/.ipynb_checkpoints/analyze_drdh-checkpoint.ipynb
cunningham-lab/epi
38febae7035ca921334a616b0f396b3767bf18d4
[ "Apache-2.0" ]
2
2020-04-01T15:40:39.000Z
2021-07-14T21:56:15.000Z
30.490066
107
0.462967
[ [ [ "%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\nimport scipy\n#import matplotlib.colors\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nfrom epi.models import Parameter, Model\nfrom epi.normalizing_flows import NormalizingFlow\nfrom epi.util import pairplot, AugLagHPs\nimport os\nfrom epi.example_eps import load_W\n\nneuron_inds = {'E':0, 'P':1, 'S':2, 'V':3}", "_____no_output_____" ], [ "D = 4\nlb = -10.*np.ones((D,))\nub = 10.*np.ones((D,))\n\ndh = Parameter(\"dh\", D, lb=lb, ub=ub)\nparameters = [dh]\n\nalphas = ['E', 'P', 'S', 'V']\nnum_alphas = len(alphas)\n\nblack = np.array([0, 0, 0])\nblue = '#4769A0'\nred = '#AF3A31'\ngreen = '#287C5A'\ncolors = [black, blue, red, green]", "_____no_output_____" ], [ "purple = '#4C0099'\n\ndef plot_T_x(T_x, T_x_sim, bins=30, xmin=None, xmax=None, xlabel=None, ylim=None, fontsize=14):\n if xmin is not None and xmax is not None:\n _range = (xmin, xmax)\n else:\n _range = None\n plt.figure()\n if T_x is None:\n plt.hist(T_x_sim, bins=bins, range=_range, color=purple, alpha=0.5)\n else:\n n, bins, patches = plt.hist(T_x, bins=bins, color='k', range=_range, alpha=0.5)\n plt.hist(T_x_sim, bins=bins, color=purple, alpha=0.5)\n if ylim is not None:\n plt.ylim(ylim)\n if xlabel is not None:\n plt.xlabel(xlabel, fontsize=fontsize)\n plt.ylabel('count', fontsize=fontsize)\n return None", "_____no_output_____" ], [ "import tensorflow as tf\n\ndef euler_sim_stoch(f, x_init, dt, T):\n x = x_init\n for t in range(T):\n x = x + f(x) * dt\n return x[:, :, :, 0]\n\ndef euler_sim_stoch_traj(f, x_init, dt, T):\n x = x_init\n xs = [x_init]\n for t in range(T):\n x = x + f(x) * dt\n xs.append(x)\n return tf.concat(xs, axis=3)\n\nV_INIT = tf.constant(-65.*np.ones((1,4,1)), dtype=np.float32)\n\nk = 0.3\nn = 2.\nv_rest = -70.\n\ndt = 0.005\n\nN = 5\nT = 100\n\ndef f_r(v):\n return k*(tf.nn.relu(v-v_rest)**n)\n\ndef SSSN_sim(h):\n h = h[:,None,:,None]\n\n W = load_W()\n sigma_eps = 0.2*np.array([1., 0.5, 0.5, 0.5])\n tau = np.array([0.02, 0.01, 0.01, 0.01])\n tau_noise = np.array([0.05, 0.05, 0.05, 0.05])\n\n W = W[None,:,:,:]\n sigma_eps = sigma_eps[None,None,:,None]\n tau = tau[None,None,:,None]\n tau_noise = tau_noise[None,None,:,None]\n\n _v_shape = tf.ones((h.shape[0], N, 4, 1), dtype=tf.float32)\n v_init = _v_shape*V_INIT\n eps_init = 0.*_v_shape\n y_init = tf.concat((v_init, eps_init), axis=2)\n\n def f(y):\n v = y[:,:,:4,:]\n eps = y[:,:,4:,:]\n B = tf.random.normal(eps.shape, 0., np.sqrt(dt))\n\n dv = (-v + v_rest + h + eps + tf.matmul(W, f_r(v))) / tau\n deps = (-eps + (np.sqrt(2.*tau_noise)*sigma_eps*B/dt)) / tau_noise\n\n return tf.concat((dv, deps), axis=2)\n\n v_ss = euler_sim_stoch(f, y_init, dt, T)\n return v_ss", "_____no_output_____" ], [ "M = 1000\n\n# Define model\nalphas = ['E', 'P', 'S', 'V']\n\nnpzfile = np.load('SV_mode.npz')\nz_mode1 = npzfile['z_mode1'][0]\nDH = z_mode1[4]\nH = z_mode1[:4] + DH*np.array([1., 1., 0., 0.])\n\nfigdir = \"/Users/seanbittner/Documents/epi/written/V1_model/figs/FigSX/\"\nmu = np.array([0., 1.])\n\nnus = {'E':0.5, 'P':0.5, 'S':0.5, 'V':0.5}\n\nfor alpha in alphas:\n neuron_ind = neuron_inds[alpha]\n nu = nus[alpha]\n name = \"SSSN_drdh_%s\" % alpha\n print(name)\n dh = Parameter(\"dh\", D, lb=lb, ub=ub)\n parameters = [dh]\n model = Model(name, parameters)\n # Define eps\n def dr(dh):\n h = tf.constant(H[None,:], dtype=tf.float32)\n\n x1 = f_r(SSSN_sim(h)[:,:,neuron_ind])\n x2 = f_r(SSSN_sim(h + dh)[:,:,neuron_ind])\n\n diff = tf.reduce_mean(x2 - x1, axis=1, keepdims=True)\n T_x = tf.concat((diff, diff ** 2), axis=1)\n\n return T_x\n model.set_eps(dr)\n\n epi_df = model.get_epi_df()\n epi_df['c0'] = [al_hps['c0'] for al_hps in epi_df['AL_hps']]\n paths = sorted(epi_df['path'].unique())\n \n if len(paths) == 0:\n continue\n\n best_Hs = []\n convergeds = []\n best_ks = []\n for i, path in enumerate(paths):\n #if (alpha == 'E'):\n # if i in [0,3]:\n # best_Hs.append(np.nan)\n # convergeds.append(False)\n # best_ks.append(0)\n # continue\n epi_df2 = epi_df[epi_df['path'] == path]\n df_row = epi_df2.iloc[0]\n init = df_row['init']\n init_params = {\"mu\":init[\"mu\"], \"Sigma\":init[\"Sigma\"]}\n nf = model._df_row_to_nf(df_row)\n aug_lag_hps = model._df_row_to_al_hps(df_row)\n best_k, converged, best_H = model.get_convergence_epoch(init_params, nf, mu, aug_lag_hps, \n alpha=0.05, nu=nu)\n best_Hs.append(best_H)\n convergeds.append(converged)\n best_ks.append(best_k)\n\n bestHs = np.array(best_Hs)\n best_ks = np.array(best_ks)\n\n best_Hs = np.array([x if x is not None else np.nan for x in best_Hs])\n ind = np.nanargmax(best_Hs)\n\n path = paths[ind]\n best_k = int(best_ks[ind])\n best_H = best_Hs[ind]\n\n print('ind', ind, path)\n print('best k', best_k)\n\n epi_df2 = epi_df[epi_df['path'] == path]\n df_row = epi_df2.iloc[0]\n init = df_row['init']\n init_params = {\"mu\":init[\"mu\"], \"Sigma\":init[\"Sigma\"]}\n nf = model._df_row_to_nf(df_row)\n aug_lag_hps = model._df_row_to_al_hps(df_row)\n\n dist = model._get_epi_dist(best_k, init_params, nf, mu, aug_lag_hps)\n\n z, log_q_z = dist.nf(M)\n T_x = model.eps(z).numpy()\n z = z.numpy()\n log_q_z = log_q_z.numpy()\n\n neurons = [alpha for alpha in neuron_inds.keys()]\n labels = [r'$dh_%s$' % alpha for alpha in neurons]\n fig, axs, = pairplot(z, range(4), labels, s=25, figsize=(6,6),\n fontsize=18, lb=dist.nf.lb, ub=dist.nf.ub,\n outlier_stds=3,\n c=log_q_z, c_label='')\n \n plt.savefig(figdir + name + '_z.png')\n\n plot_T_x(None, T_x[:,0], xmin=-3., xmax=3., xlabel=r'$dx_%s$' % alpha, fontsize=20)\n plt.tight_layout()\n plt.savefig(figdir + name + '_Tx.png')\n\n\n\n\n", "SSSN_drdh_E\n0.00_0.29\r" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ec572bfad471a86b2df7099dc500fc8d6b13290b
23,340
ipynb
Jupyter Notebook
model/rnn_glove.ipynb
windsuzu/analysis-and-classification-of-thesis
b84c4427528490e1feb8e914f0cb47d7398033bf
[ "MIT" ]
null
null
null
model/rnn_glove.ipynb
windsuzu/analysis-and-classification-of-thesis
b84c4427528490e1feb8e914f0cb47d7398033bf
[ "MIT" ]
null
null
null
model/rnn_glove.ipynb
windsuzu/analysis-and-classification-of-thesis
b84c4427528490e1feb8e914f0cb47d7398033bf
[ "MIT" ]
2
2019-11-08T06:34:21.000Z
2020-05-20T15:43:04.000Z
23,340
23,340
0.628449
[ [ [ "# Add Frameworks and Libraries", "_____no_output_____" ] ], [ [ "# Import libraries\nimport os\nimport pandas as pd\nimport pickle\nimport nltk\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils.class_weight import compute_sample_weight\nfrom multiprocessing import Pool\nfrom nltk.tokenize import word_tokenize\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset, DataLoader\nfrom google.colab import drive", "_____no_output_____" ] ], [ [ "# Setup Environments", "_____no_output_____" ] ], [ [ "# nltk, plt setup\nnltk.download('punkt')\n%matplotlib inline\n\n# drive setup\ndrive.mount('/content/drive')\ndataset_url = \"/content/drive/My Drive/NCKUDMPH2/task1/dataset/\"\nprogram_url = \"/content/drive/My Drive/NCKUDMPH2/task1/program/\"\n\n# setup gpu\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Package punkt is already up-to-date!\nDrive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ] ], [ [ "# Setup Hyperparameters", "_____no_output_____" ] ], [ [ "embedding_dim = 300\nhidden_dim = 512\nlearning_rate = 1e-4\nmax_epoch = 15\nbatch_size = 64", "_____no_output_____" ] ], [ [ "# Load Data and split it into Train/Dev/Test", "_____no_output_____" ] ], [ [ "dataset = pd.read_csv(dataset_url+'task1_trainset.csv', dtype=str)\n\n# Drop unuse columns\ndataset.drop('Title',axis=1,inplace=True)\ndataset.drop('Categories',axis=1,inplace=True)\ndataset.drop('Created Date',axis=1, inplace=True)\ndataset.drop('Authors',axis=1,inplace=True)\n\n# Data Partition\ntrainset, validset = train_test_split(dataset, test_size=0.1, random_state=2020)\n\ntrainset.to_csv(dataset_url+'trainset.csv',index=False)\nvalidset.to_csv(dataset_url+'validset.csv',index=False)\n\n# Test Data\ndataset = pd.read_csv(dataset_url+'task1_private_testset.csv')\ndataset.drop('Title',axis=1,inplace=True)\ndataset.drop('Categories',axis=1,inplace=True)\ndataset.drop('Created Date',axis=1, inplace=True)\ndataset.drop('Authors',axis=1,inplace=True)\n\ndataset.to_csv(dataset_url+'testset.csv',index=False)", "_____no_output_____" ] ], [ [ "# Helpers", "_____no_output_____" ], [ "## Tokenize Helpers", "_____no_output_____" ] ], [ [ "# tokenize the words\ndef collect_words(data_path, n_workers=4):\n df = pd.read_csv(data_path, dtype=str)\n \n sent_list = []\n for i in df.iterrows():\n sent_list += i[1]['Abstract'].split('$$$')\n\n chunks = [\n ' '.join(sent_list[i:i + len(sent_list) // n_workers])\n for i in range(0, len(sent_list), len(sent_list) // n_workers)\n ]\n with Pool(n_workers) as pool:\n chunks = pool.map_async(word_tokenize, chunks)\n words = set(sum(chunks.get(), []))\n\n return words", "_____no_output_____" ] ], [ [ "## Data Formatting Helpers", "_____no_output_____" ] ], [ [ "def label_to_onehot(labels):\n label_dict = {'BACKGROUND': 0, 'OBJECTIVES':1, 'METHODS':2, 'RESULTS':3, 'CONCLUSIONS':4, 'OTHERS':5}\n onehot = [0,0,0,0,0,0]\n for l in labels.split('/'):\n onehot[label_dict[l]] = 1\n return onehot\n\ndef sentence_to_indices(sentence, word_dict):\n return [word_dict.get(word,UNK_TOKEN) for word in word_tokenize(sentence)]\n\ndef get_dataset(data_path, word_dict, n_workers=4):\n dataset = pd.read_csv(data_path, dtype=str)\n\n results = [None] * n_workers\n with Pool(processes=n_workers) as pool:\n for i in range(n_workers):\n batch_start = (len(dataset) // n_workers) * i\n if i == n_workers - 1:\n batch_end = len(dataset)\n else:\n batch_end = (len(dataset) // n_workers) * (i + 1)\n \n batch = dataset[batch_start: batch_end]\n results[i] = pool.apply_async(preprocess_samples, args=(batch,word_dict))\n\n pool.close()\n pool.join()\n\n processed = []\n for result in results:\n processed += result.get()\n return processed\n\ndef preprocess_samples(dataset, word_dict):\n processed = []\n for sample in tqdm(dataset.iterrows(), total=len(dataset)):\n processed.append(preprocess_sample(sample[1], word_dict))\n\n return processed\n\ndef preprocess_sample(data, word_dict):\n processed = {}\n processed['Abstract'] = [sentence_to_indices(sent, word_dict) for sent in data['Abstract'].split('$$$')]\n if 'Task 1' in data:\n processed['Label'] = [label_to_onehot(label) for label in data['Task 1'].split(' ')]\n \n return processed", "_____no_output_____" ] ], [ [ "## Data Packing Helpers", "_____no_output_____" ] ], [ [ "# Data Packing\nclass AbstractDataset(Dataset):\n def __init__(self, data, pad_idx, max_len = 500):\n self.data = data\n self.pad_idx = pad_idx\n self.max_len = max_len\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n return self.data[index]\n \n def collate_fn(self, datas):\n # get max length in this batch\n max_sent = max([len(data['Abstract']) for data in datas])\n max_len = max([min(len(sentence), self.max_len) for data in datas for sentence in data['Abstract']])\n batch_abstract = []\n batch_label = []\n sent_len = []\n for data in datas:\n # padding abstract to make them in same length\n pad_abstract = []\n for sentence in data['Abstract']:\n if len(sentence) > max_len:\n pad_abstract.append(sentence[:max_len])\n else:\n pad_abstract.append(sentence+[self.pad_idx]*(max_len-len(sentence)))\n sent_len.append(len(pad_abstract))\n pad_abstract.extend([[self.pad_idx]*max_len]*(max_sent-len(pad_abstract)))\n batch_abstract.append(pad_abstract)\n # gather labels\n if 'Label' in data:\n pad_label = data['Label']\n pad_label.extend([[0]*6]*(max_sent-len(pad_label)))\n \n batch_label.append(pad_label)\n return torch.LongTensor(batch_abstract), torch.FloatTensor(batch_label), sent_len", "_____no_output_____" ] ], [ [ "## Score Helpers", "_____no_output_____" ] ], [ [ "# Score methods\nclass F1():\n def __init__(self):\n self.threshold = 0.4\n self.n_precision = 0\n self.n_recall = 0\n self.n_corrects = 0\n self.name = 'F1'\n\n def reset(self):\n self.n_precision = 0\n self.n_recall = 0\n self.n_corrects = 0\n\n def update(self, predicts, groundTruth):\n predicts = predicts > self.threshold\n self.n_precision += torch.sum(predicts).data.item()\n self.n_recall += torch.sum(groundTruth).data.item()\n self.n_corrects += torch.sum(groundTruth.type(torch.uint8) * predicts).data.item()\n\n def get_score(self):\n recall = self.n_corrects / self.n_recall\n precision = self.n_corrects / (self.n_precision + 1e-20) #prevent divided by zero\n return 2 * (recall * precision) / (recall + precision + 1e-20)\n\n def print_score(self):\n score = self.get_score()\n return '{:.5f}'.format(score)", "_____no_output_____" ] ], [ [ "## Train Helpers", "_____no_output_____" ] ], [ [ "# Train helpers\ndef _run_epoch(epoch, training):\n model.train(training)\n if training:\n description = 'Train'\n dataset = trainData\n shuffle = True\n else:\n description = 'Valid'\n dataset = validData\n shuffle = False\n\n dataloader = DataLoader(dataset=dataset, batch_size=batch_size,\n shuffle=shuffle, collate_fn=dataset.collate_fn, num_workers=8)\n\n trange = tqdm(enumerate(dataloader), total=len(dataloader), desc=description)\n loss = 0\n f1_score = F1()\n for i, (x, y, sent_len) in trange:\n o_labels, batch_loss = _run_iter(x,y)\n if training:\n opt.zero_grad()\n batch_loss.backward()\n opt.step()\n\n loss += batch_loss.item()\n f1_score.update(o_labels.cpu(), y)\n\n trange.set_postfix(\n loss=loss / (i + 1), f1=f1_score.print_score())\n del o_labels, batch_loss # Flush memory\n\n if training:\n history['train'].append({'f1':f1_score.get_score(), 'loss':loss/ len(trange)})\n else:\n history['valid'].append({'f1':f1_score.get_score(), 'loss':loss/ len(trange)})\n \n \n\ndef _run_iter(x,y):\n abstract = x.to(device)\n labels = y.to(device)\n o_labels = model(abstract)\n l_loss = criteria(o_labels, labels)\n l_loss = (l_loss * sample_weights).mean()\n del abstract, labels # flush\n return o_labels, l_loss\n\ndef save(epoch):\n if not os.path.exists(program_url+'model'):\n os.makedirs(program_url+'model')\n torch.save(model.state_dict(), program_url+'model/model.pkl.'+str(epoch))\n with open(program_url+'model/history.json', 'w') as f:\n json.dump(history, f, indent=4)", "_____no_output_____" ] ], [ [ "## Submit Helpers", "_____no_output_____" ] ], [ [ "def SubmitGenerator(prediction, sampleFile, public=True, filename='prediction.csv'):\n sample = pd.read_csv(sampleFile)\n submit = {}\n submit['order_id'] = list(sample.order_id.values)\n redundant = len(sample) - prediction.shape[0]\n if public:\n submit['BACKGROUND'] = list(prediction[:,0]) + [0]*redundant\n submit['OBJECTIVES'] = list(prediction[:,1]) + [0]*redundant\n submit['METHODS'] = list(prediction[:,2]) + [0]*redundant\n submit['RESULTS'] = list(prediction[:,3]) + [0]*redundant\n submit['CONCLUSIONS'] = list(prediction[:,4]) + [0]*redundant\n submit['OTHERS'] = list(prediction[:,5]) + [0]*redundant\n else:\n submit['BACKGROUND'] = [0]*redundant + list(prediction[:,0])\n submit['OBJECTIVES'] = [0]*redundant + list(prediction[:,1])\n submit['METHODS'] = [0]*redundant + list(prediction[:,2])\n submit['RESULTS'] = [0]*redundant + list(prediction[:,3])\n submit['CONCLUSIONS'] = [0]*redundant + list(prediction[:,4])\n submit['OTHERS'] = [0]*redundant + list(prediction[:,5])\n df = pd.DataFrame.from_dict(submit) \n df.to_csv(filename,index=False)", "_____no_output_____" ] ], [ [ "# Data-Preprocessing", "_____no_output_____" ] ], [ [ "# Create a word dictionary\nPAD_TOKEN = 0\nUNK_TOKEN = 1\n\nif os.path.exists(program_url+'dicitonary.pkl'):\n with open(program_url+'dicitonary.pkl','rb') as f:\n word_dict = pickle.load(f)\nelse:\n words = set()\n words |= collect_words(dataset_url+'trainset.csv')\n\n word_dict = {'<pad>':PAD_TOKEN,'<unk>':UNK_TOKEN}\n for word in words:\n word_dict[word]=len(word_dict)\n\n with open(program_url+'dicitonary.pkl','wb') as f:\n pickle.dump(word_dict, f)", "_____no_output_____" ], [ "# Get 3 datasets\nprint('[INFO] Start processing trainset...')\ntrain = get_dataset(dataset_url+'trainset.csv', word_dict, n_workers=8)\nprint('[INFO] Start processing validset...')\nvalid = get_dataset(dataset_url+'validset.csv', word_dict, n_workers=8)\nprint('[INFO] Start processing testset...')\ntest = get_dataset(dataset_url+'testset.csv', word_dict, n_workers=8)", "_____no_output_____" ], [ "trainData = AbstractDataset(train, PAD_TOKEN, max_len = 128)\nvalidData = AbstractDataset(valid, PAD_TOKEN, max_len = 128)\ntestData = AbstractDataset(test, PAD_TOKEN, max_len = 128)\ndel trainset, validset, dataset, train, valid, test", "_____no_output_____" ] ], [ [ "# handle data imbalance problem in training\n", "_____no_output_____" ] ], [ [ "def squeeze_sample_weights(sample_weights):\n sample_weights = np.array(sample_weights)\n return np.mean(sample_weights, axis=0)\n\nsample_weights = [compute_sample_weight(class_weight='balanced', y=abstract_labels) for data in trainData.data for abstract_labels in data['Label']]\nsample_weights = np.array(sample_weights)\nsample_weights = squeeze_sample_weights(sample_weights)\nsample_weights = torch.tensor(sample_weights).to(device)", "_____no_output_____" ] ], [ [ "# Create GloVe Embedding vectors", "_____no_output_____" ] ], [ [ "glove_name = 'glove.840B.300d'\n\nif os.path.exists(program_url+'embedding_matrix_{}'.format(glove_name)):\n with open(program_url+'embedding_matrix_{}'.format(glove_name),'rb') as f:\n embedding_matrix = pickle.load(f)\nelse:\n # Parse the unzipped file (a .txt file) to build an index that maps \n # words (as strings) to their vector representation (as number vectors)\n wordvector_path = dataset_url+'glove/{}.txt'.format(glove_name)\n embeddings_index = {}\n f = open(wordvector_path, encoding='utf8')\n for line in f:\n values = line.split()\n word = ''.join(values[:-300])\n coefs = np.asarray(values[-300:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n print('Found %s word vectors.' % len(embeddings_index))\n\n # Preparing the GloVe word-embeddings matrix\n max_words = len(word_dict)\n embedding_matrix = np.zeros((max_words, embedding_dim))\n for word, i in word_dict.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n embedding_matrix = torch.FloatTensor(embedding_matrix)\n with open(program_url+'embedding_matrix_{}'.format(glove_name),'wb') as f:\n pickle.dump(embedding_matrix, f)", "_____no_output_____" ] ], [ [ "# Create Nets", "_____no_output_____" ] ], [ [ "class GloVeNet(nn.Module):\n def __init__(self, vocabulary_size):\n super(GloVeNet, self).__init__()\n self.embedding_size = embedding_dim\n self.hidden_dim = hidden_dim\n self.embedding = nn.Embedding(vocabulary_size, self.embedding_size)\n self.embedding.weight = torch.nn.Parameter(embedding_matrix)\n self.word_rnn = nn.GRU(self.embedding_size,\n self.hidden_dim,\n bidirectional=True,\n batch_first=True)\n self.sent_rnn = nn.GRU(self.hidden_dim*2, self.hidden_dim, bidirectional=True, batch_first=True)\n \n # 1024 => 512\n self.l1 = nn.Linear(self.hidden_dim*2, self.hidden_dim)\n torch.nn.init.kaiming_normal_(self.l1.weight)\n # 512 => 6\n self.l2 = nn.Linear(self.hidden_dim, 6)\n \n\n def forward(self, x):\n # input 32, 11, 64\n x = self.embedding(x) # emb 32, 11, 64, 300\n b,s,w,e = x.shape\n x = x.view(b,s*w,e) # sent*word 32, 704, 300\n x, __ = self.word_rnn(x) # rnn 32, 704, 1024\n x = x.view(b,s,w,-1) # unwrap sw 32, 11, 64, 1024\n # 32 batch, 11 sentence, each 64 words\n x = torch.max(x,dim=2)[0] # 32, 11, 1024\n x, __ = self.sent_rnn(x) # 32, 11, 1024\n x = torch.relu(self.l1(x)) # 32, 11, 512\n x = torch.sigmoid(self.l2(x)) # 32, 11, 6\n return x", "_____no_output_____" ] ], [ [ "# Start Learning", "_____no_output_____" ] ], [ [ "model = GloVeNet(len(word_dict))\nopt = torch.optim.RMSprop(model.parameters(), lr=learning_rate)\ncriteria = torch.nn.BCELoss(reduction='none')\nmodel.to(device)\nhistory = {'train':[],'valid':[]}\n\nfor epoch in range(max_epoch):\n print('Epoch: {}'.format(epoch))\n _run_epoch(epoch, True)\n _run_epoch(epoch, False)\n save(epoch)", "_____no_output_____" ] ], [ [ "## Plot Learning Result", "_____no_output_____" ] ], [ [ "with open(program_url+'model/history.json', 'r') as f:\n history = json.loads(f.read())\n \ntrain_loss = [l['loss'] for l in history['train']]\nvalid_loss = [l['loss'] for l in history['valid']]\ntrain_f1 = [l['f1'] for l in history['train']]\nvalid_f1 = [l['f1'] for l in history['valid']]\n\nplt.figure(figsize=(7,5))\nplt.title('Loss')\nplt.plot(train_loss, label='train')\nplt.plot(valid_loss, label='valid')\nplt.legend()\nplt.show()\n\nplt.figure(figsize=(7,5))\nplt.title('F1 Score')\nplt.plot(train_f1, label='train')\nplt.plot(valid_f1, label='valid')\nplt.legend()\nplt.show()\n\nprint('Best F1 score ', max([[l['f1'], idx] for idx, l in enumerate(history['valid'])]))", "_____no_output_____" ] ], [ [ "# Start Predicting", "_____no_output_____" ] ], [ [ "best_model = 709652\nmodel.load_state_dict(state_dict=torch.load(os.path.join(program_url,'model/model.pkl.{}'.format(best_model))))\n_run_epoch(1, False)\n\n# Use trained model to predict\nmodel.train(False)\ndataloader = DataLoader(dataset=testData,\n batch_size=64,\n shuffle=False,\n collate_fn=testData.collate_fn,\n num_workers=8)\ntrange = tqdm(enumerate(dataloader), total=len(dataloader), desc='Predict')\nprediction = []\nfor i, (x, y, sent_len) in trange:\n o_labels = model(x.to(device))\n result = o_labels>0.4\n for idx, o_label in enumerate(result):\n prediction.append(o_label[:sent_len[idx]].to('cpu'))\nprediction = torch.cat(prediction).detach().numpy().astype(int)", "_____no_output_____" ] ], [ [ "## Submit Prediction Results", "_____no_output_____" ] ], [ [ "# SubmitGenerator(prediction,\n# dataset_url+'task1_sample_submission.csv', \n# True, \n# dataset_url+'task1_submission.csv')", "_____no_output_____" ], [ "SubmitGenerator(prediction,\n dataset_url+'task1_sample_submission.csv', \n False,\n dataset_url+'task1_submission.csv')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ec574283a7caa4a247765bb76336e7b12f00c03e
20,176
ipynb
Jupyter Notebook
3-project/project-management.ipynb
sed-pro-inria/tools-training
4b9bdd71d60107935ea5e1e5e7ab428df702bac0
[ "CC-BY-4.0" ]
1
2015-03-25T09:26:40.000Z
2015-03-25T09:26:40.000Z
3-project/project-management.ipynb
sed-pro-inria/tools-training
4b9bdd71d60107935ea5e1e5e7ab428df702bac0
[ "CC-BY-4.0" ]
null
null
null
3-project/project-management.ipynb
sed-pro-inria/tools-training
4b9bdd71d60107935ea5e1e5e7ab428df702bac0
[ "CC-BY-4.0" ]
null
null
null
26.901333
202
0.508277
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec5744b750dc675d93c0352e5a429a351b4bb6d7
73,306
ipynb
Jupyter Notebook
LDSSurveyAnalysis.ipynb
jlc42/ReligiousMembership
b0087f3f870791a6d7c47788283503354675e8fb
[ "CC0-1.0" ]
null
null
null
LDSSurveyAnalysis.ipynb
jlc42/ReligiousMembership
b0087f3f870791a6d7c47788283503354675e8fb
[ "CC0-1.0" ]
null
null
null
LDSSurveyAnalysis.ipynb
jlc42/ReligiousMembership
b0087f3f870791a6d7c47788283503354675e8fb
[ "CC0-1.0" ]
null
null
null
40.953073
282
0.411617
[ [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "#Make Results DF for Use Later\nresultsdf=pd.DataFrame(columns = ['year','RLSLDSIdentifying','RLSMormonIdentifying'])\nresultsdf.set_index('year', inplace=True)", "_____no_output_____" ], [ "# Read in PEW surveys\nrls2007df = pd.read_spss('dataset_Religious_Landscape_Survey_Data/Religious Landscape Survey Data - Continental US.sav')\nrls2014df = pd.read_spss('Pew-Research-Center-2014-U.S.-Religious-Landscape-Study/Dataset - Pew Research Center 2014 Religious Landscape Study National Telephone Survey - Version 1.1 - December 1 2016.sav')", "_____no_output_____" ], [ "#Read in CCES Data:\n#ccesCUMdf = pd.read_stata('CCESData/cumulative_2006-2019.dta')\n\n#The religion question was asked in 2005, but was not helpful in its wording for finding Mormons\n#cces2005df = pd.read_excel('CCESData/CCES05.xls')\n#2006 won't read because it has duplicated categoricals that are not unique.\n#cces2006df = pd.read_stata('CCESData/CCES06_common.dta')\n#2007 won't read because it has bad encoding, can't read it in R either. \n#cces2007df = pd.read_stata('CCESData/CCES07_OUTPUT.sav')\nprint(\"reading CCES 2008\")\ncces2008df = pd.read_stata('CCESData/CCES08_common.dta')\ncces2009df = pd.read_stata('CCESData/cces09_cmn_output_2.dta')\nprint(\"reading CCES 2010\")\ncces2010df = pd.read_stata('CCESData/CCES10_common_validated.dta')\ncces2011df = pd.read_stata('CCESData/CCES11_Common_OUTPUT.dta')\nprint(\"reading CCES 2012\")\ncces2012df = pd.read_stata('CCESData/CCES12_Common_VV.dta')\ncces2013df = pd.read_stata('CCESData/CCES13_Common_Content_Data.dta')\nprint(\"reading CCES 2014\")\ncces2014df = pd.read_stata('CCESData/CCES14_Common_Content_Validated.dta')\ncces2015df = pd.read_stata('CCESData/CCES15_Common_OUTPUT_Jan2016.dta')\nprint(\"reading CCES 2016\")\ncces2016df = pd.read_stata('CCESData/CCES16_Common_OUTPUT_Feb2018_VV.dta')\ncces2017df = pd.read_stata('CCESData/CCES17_Common_Content_Data.dta')\nprint(\"reading CCES 2018\")\ncces2018df = pd.read_stata('CCESData/cces18_common_vv.dta')\ncces2019df = pd.read_stata('CCESData/CCES19_Common_OUTPUT.dta')\nprint(\"reading CCES 2020\")\ncces2020df = pd.read_stata('CCESData/CCES20_Common_OUTPUT.dta')\n", "reading CCES 2008\nreading CCES 2010\nreading CCES 2012\nreading CCES 2014\nreading CCES 2016\nreading CCES 2018\nreading CCES 2020\n" ], [ "#Interactive Read one at a time:\n#cces2020df = pd.read_stata('CCESData/CCES20_Common_OUTPUT.dta')\n#cces2016df = pd.read_stata('CCESData/CCES16_Common_OUTPUT_Feb2018_VV.dta')\n#cces2015df = pd.read_stata('CCESData/CCES15_Common_OUTPUT_Jan2016.dta')\n#cces2013df = pd.read_stata('CCESData/CCES13_Common_Content_Data.dta')\n#cces2012df = pd.read_stata('CCESData/CCES12_Common_VV.dta')\n#cces2011df = pd.read_stata('CCESData/CCES11_Common_OUTPUT.dta')\n#cces2010df = pd.read_stata('CCESData/cces_2010_common_validated.dta')\n#cces2009df = pd.read_stata('CCESData/cces09_cmn_output_2.dta')\n#cces2008df = pd.read_stata('CCESData/cces08_common.dta')\n#cces2007df = pd.read_spss(\"CCESData/CCES07_OUTPUT.sav\")\n\n#cces2006df = pd.read_stata('CCESData/CCES06_common.dta')\n#cces2006df\ncces2005df = pd.read_excel('CCESData/CCES05.xls')\ncces2005df", "_____no_output_____" ], [ "#Process CCES Data\nccesYears=[]\nccesdfs={}\n\nprint('Processing CCES Year 2008')\ncces2008df['year']=2008\ncces2008df.rename(columns={\"V201\":\"commonweight\"}, inplace=True)\ncces2008df.rename(columns={\"V217\":\"pew_churatd\"}, inplace=True)\ncces2008df.rename(columns={\"V235\":\"religpew_mormon\"}, inplace=True)\ncces2008WeightTotal=cces2008df['commonweight'].sum()\nresultsdf.loc[2008,'ccesWeightTotal']=cces2008WeightTotal\nresultsdf.loc[2008,'ccesN']=len(cces2008df)\nccesYears.append(2008)\nccesdfs[2008]=cces2008df\n\nprint('Processing CCES Year 2009')\ncces2009df['year']=2009\ncces2009df.rename(columns={\"v200\":\"commonweight\"}, inplace=True)\ncces2009df.rename(columns={\"v217\":\"pew_churatd\"}, inplace=True)\ncces2009df.rename(columns={\"v235\":\"religpew_mormon\"}, inplace=True)\ncces2009WeightTotal=cces2009df['commonweight'].sum()\nresultsdf.loc[2009,'ccesWeightTotal']=cces2009WeightTotal\nresultsdf.loc[2009,'ccesN']=len(cces2009df)\nccesYears.append(2009)\nccesdfs[2009]=cces2009df\n\nprint('Processing CCES Year 2010')\ncces2010df['year']=2010\ncces2010df.rename(columns={\"V101\":\"commonweight\"}, inplace=True)\ncces2010df.rename(columns={\"V217\":\"pew_churatd\"}, inplace=True)\ncces2010df.rename(columns={\"V235\":\"religpew_mormon\"}, inplace=True)\ncces2010WeightTotal=cces2010df['commonweight'].sum()\nresultsdf.loc[2010,'ccesWeightTotal']=cces2010WeightTotal\nresultsdf.loc[2010,'ccesN']=len(cces2010df)\nccesYears.append(2010)\nccesdfs[2010]=cces2010df\n\nprint('Processing CCES Year 2011')\ncces2011df['year']=2011\ncces2011df.rename(columns={\"V101\":\"commonweight\"}, inplace=True)\ncces2011df.rename(columns={\"V217\":\"pew_churatd\"}, inplace=True)\ncces2011df.rename(columns={\"V235\":\"religpew_mormon\"}, inplace=True)\ncces2011WeightTotal=cces2011df['commonweight'].sum()\nresultsdf.loc[2011,'ccesWeightTotal']=cces2011WeightTotal\nresultsdf.loc[2011,'ccesN']=len(cces2011df)\nccesYears.append(2011)\nccesdfs[2011]=cces2011df\n\nprint('Processing CCES Year 2012')\ncces2012df['year']=2012\ncces2012df.rename(columns={\"weight_vv\":\"commonweight\"}, inplace=True)\ncces2012WeightTotal=cces2012df['commonweight'].sum()\nresultsdf.loc[2012,'ccesWeightTotal']=cces2012WeightTotal\nresultsdf.loc[2012,'ccesN']=len(cces2012df)\nccesYears.append(2012)\nccesdfs[2012]=cces2012df\n\nprint('Processing CCES Year 2013')\ncces2013df['year']=2013\ncces2013df.rename(columns={\"weight\":\"commonweight\"}, inplace=True)\ncces2013WeightTotal=cces2013df['commonweight'].sum()\nresultsdf.loc[2013,'ccesWeightTotal']=cces2013WeightTotal\nresultsdf.loc[2013,'ccesN']=len(cces2013df)\nccesYears.append(2013)\nccesdfs[2013]=cces2013df\n\nprint('Processing CCES Year 2014')\ncces2014df['year']=2014\ncces2014df.rename(columns={\"weight\":\"commonweight\"}, inplace=True)\ncces2014WeightTotal=cces2014df['commonweight'].sum()\nresultsdf.loc[2014,'ccesWeightTotal']=cces2014WeightTotal\nresultsdf.loc[2014,'ccesN']=len(cces2014df)\nccesYears.append(2014)\nccesdfs[2014]=cces2014df\n\nprint('Processing CCES Year 2015')\ncces2015df['year']=2015\ncces2015df.rename(columns={\"weight\":\"commonweight\"}, inplace=True)\ncces2015WeightTotal=cces2015df['commonweight'].sum()\nresultsdf.loc[2015,'ccesWeightTotal']=cces2015WeightTotal\nresultsdf.loc[2015,'ccesN']=len(cces2015df)\nccesYears.append(2015)\nccesdfs[2015]=cces2015df\n\nprint('Processing CCES Year 2016')\ncces2016df['year']=2016\ncces2016WeightTotal=cces2016df['commonweight'].sum()\nresultsdf.loc[2016,'ccesWeightTotal']=cces2016WeightTotal\nresultsdf.loc[2016,'ccesN']=len(cces2016df)\nccesYears.append(2016)\nccesdfs[2016]=cces2016df\n\nprint('Processing CCES Year 2017')\ncces2017df['year']=2017\ncces2017df.rename(columns={\"weights_common\":\"commonweight\"}, inplace=True)\ncces2017WeightTotal=cces2017df['commonweight'].sum()\nresultsdf.loc[2017,'ccesWeightTotal']=cces2017WeightTotal\nresultsdf.loc[2017,'ccesN']=len(cces2017df)\nccesYears.append(2017)\nccesdfs[2017]=cces2017df\n\nprint('Processing CCES Year 2018')\ncces2018df['year']=2018\ncces2018WeightTotal=cces2018df['commonweight'].sum()\nresultsdf.loc[2018,'ccesWeightTotal']=cces2018WeightTotal\nresultsdf.loc[2018,'ccesN']=len(cces2018df)\nccesYears.append(2018)\nccesdfs[2018]=cces2018df\n\nprint('Processing CCES Year 2019')\ncces2019df['year']=2019\ncces2019WeightTotal=cces2019df['commonweight'].sum()\nresultsdf.loc[2019,'ccesWeightTotal']=cces2019WeightTotal\nresultsdf.loc[2019,'ccesN']=len(cces2019df)\nccesYears.append(2019)\nccesdfs[2019]=cces2019df\n\nprint('Processing CCES Year 2020')\ncces2020df['year']=2020\ncces2020WeightTotal=cces2020df['commonweight'].sum()\nresultsdf.loc[2020,'ccesWeightTotal']=cces2020WeightTotal\nresultsdf.loc[2020,'ccesN']=len(cces2020df)\nccesYears.append(2020)\nccesdfs[2020]=cces2020df\n\nresultsdf.sort_index(inplace=True)", "Processing CCES Year 2008\nProcessing CCES Year 2009\nProcessing CCES Year 2010\nProcessing CCES Year 2011\nProcessing CCES Year 2012\nProcessing CCES Year 2013\nProcessing CCES Year 2014\nProcessing CCES Year 2015\nProcessing CCES Year 2016\nProcessing CCES Year 2017\nProcessing CCES Year 2018\nProcessing CCES Year 2019\nProcessing CCES Year 2020\n" ], [ "#I don't do this anymore, because it was takign up WAY too much memory\n#ccesdf=pd.concat([cces2014df, cces2017df, cces2018df, cces2019df,cces2020df],keys=ccesYears)\n#resultsdf.sort_index(inplace=True)", "_____no_output_____" ], [ "#Do All Years\nyearsToAnalize=ccesYears\n#Do Just Some Years\n#yearsToAnalize=[2018]\n\nfor y in yearsToAnalize:\n print(\"Analyzing CCES for year \" + str(y) + \".\")\n df=ccesdfs[y]\n LDSfilter = (df['religpew_mormon']=='The Church of Jesus Christ of Latter-day Saints') | (df['religpew_mormon']=='the church of jesus christ of latter-day saints')\n LDSdf=df.loc[LDSfilter]\n resultsdf.loc[y,'ccesLDSWeightTotal']=LDSdf['commonweight'].sum()\n resultsdf.loc[y,'ccesLDSN']=len(LDSdf)\nresultsdf['ccesLDSWeightedFrac'] = resultsdf['ccesLDSWeightTotal']/resultsdf['ccesWeightTotal']\nresultsdf['ccesLDSNFrac'] = resultsdf['ccesLDSN']/resultsdf['ccesN']", "Analyzing CCES for year 2008.\nAnalyzing CCES for year 2009.\nAnalyzing CCES for year 2010.\nAnalyzing CCES for year 2011.\nAnalyzing CCES for year 2012.\nAnalyzing CCES for year 2013.\nAnalyzing CCES for year 2014.\nAnalyzing CCES for year 2015.\nAnalyzing CCES for year 2016.\nAnalyzing CCES for year 2017.\nAnalyzing CCES for year 2018.\nAnalyzing CCES for year 2019.\nAnalyzing CCES for year 2020.\n" ], [ "resultsdf", "_____no_output_____" ], [ "print(\"2019\")\nprint(ccesdf.loc[2019])\nprint(\"2020\")\nprint(ccesdf.loc[2020])", "2019\n caseid commonweight CCEStake birthyr gender \\\n0 1.028710e+09 1.073482 Yes 1958.0 Male \n1 1.028710e+09 1.117033 Yes 1951.0 Male \n2 1.028408e+09 0.784682 Yes 1968.0 Female \n3 1.028710e+09 0.462042 Yes 1945.0 Female \n4 1.028458e+09 1.266609 Yes 1971.0 Male \n... ... ... ... ... ... \n17995 1.041001e+09 0.778497 Yes 1990.0 Female \n17996 1.044119e+09 0.687052 Yes 1968.0 Male \n17997 1.041016e+09 1.569546 Yes 1985.0 Male \n17998 1.044119e+09 0.180218 Yes 1962.0 Female \n17999 1.041006e+09 3.336704 Yes 1960.0 Female \n\n educ race race_other hispanic multrace_1 \\\n0 High school graduate Hispanic __NA__ NaN selected \n1 Post-grad White __NA__ No selected \n2 4-year White __NA__ No selected \n3 4-year White __NA__ No NaN \n4 4-year White __NA__ No NaN \n... ... ... ... ... ... \n17995 4-year White __NA__ No NaN \n17996 Some college Black __NA__ No NaN \n17997 Some college Hispanic __NA__ NaN NaN \n17998 2-year Black __NA__ No NaN \n17999 High school graduate Native American __NA__ No selected \n\n ... page_CC20_433_rep_timing page_CC20_433b_timing \\\n0 ... NaN NaN \n1 ... NaN NaN \n2 ... NaN NaN \n3 ... NaN NaN \n4 ... NaN NaN \n... ... ... ... \n17995 ... NaN NaN \n17996 ... NaN NaN \n17997 ... NaN NaN \n17998 ... NaN NaN \n17999 ... NaN NaN \n\n page_CC20_440_grid_timing page_CC20_441_grid_timing \\\n0 NaN NaN \n1 NaN NaN \n2 NaN NaN \n3 NaN NaN \n4 NaN NaN \n... ... ... \n17995 NaN NaN \n17996 NaN NaN \n17997 NaN NaN \n17998 NaN NaN \n17999 NaN NaN \n\n page_CC20_442_grid_timing page_CC20_443_timing page_gunown_timing \\\n0 NaN NaN NaN \n1 NaN NaN NaN \n2 NaN NaN NaN \n3 NaN NaN NaN \n4 NaN NaN NaN \n... ... ... ... \n17995 NaN NaN NaN \n17996 NaN NaN NaN \n17997 NaN NaN NaN \n17998 NaN NaN NaN \n17999 NaN NaN NaN \n\n page_numchildren_timing page_edloan_timing page_student_timing \n0 NaN NaN NaN \n1 NaN NaN NaN \n2 NaN NaN NaN \n3 NaN NaN NaN \n4 NaN NaN NaN \n... ... ... ... \n17995 NaN NaN NaN \n17996 NaN NaN NaN \n17997 NaN NaN NaN \n17998 NaN NaN NaN \n17999 NaN NaN NaN \n\n[18000 rows x 848 columns]\n2020\n caseid commonweight CCEStake birthyr gender \\\n0 1.232319e+09 0.7830 Yes 1966.0 Male \n1 1.231395e+09 1.3436 Yes 1955.0 Female \n2 1.232452e+09 0.4044 Yes 1946.0 Female \n3 1.232495e+09 0.9580 Yes 1962.0 Female \n4 1.232495e+09 0.1950 Yes 1967.0 Male \n... ... ... ... ... ... \n60995 1.199587e+09 1.7661 Yes 1978.0 Male \n60996 1.261250e+09 1.1583 Yes 1993.0 Male \n60997 1.261248e+09 1.5717 Yes 1959.0 Male \n60998 1.257274e+09 1.0110 Yes 1996.0 Male \n60999 1.261243e+09 0.7377 Yes 1985.0 Male \n\n educ race race_other hispanic multrace_1 \\\n0 2-year White __NA__ No NaN \n1 Post-grad White __NA__ No NaN \n2 4-year White __NA__ No NaN \n3 4-year White __NA__ No selected \n4 4-year White __NA__ No selected \n... ... ... ... ... ... \n60995 High school graduate White __NA__ No NaN \n60996 Some college White __NA__ No NaN \n60997 High school graduate White __NA__ No selected \n60998 Some college Two or more races __NA__ Yes selected \n60999 High school graduate White __NA__ No NaN \n\n ... page_CC20_433_rep_timing page_CC20_433b_timing \\\n0 ... 11.844 0.000 \n1 ... 0.000 0.000 \n2 ... 0.000 2.372 \n3 ... 0.000 5.508 \n4 ... 0.000 3.025 \n... ... ... ... \n60995 ... NaN NaN \n60996 ... 0.000 0.000 \n60997 ... 0.000 0.000 \n60998 ... NaN NaN \n60999 ... 0.000 3.324 \n\n page_CC20_440_grid_timing page_CC20_441_grid_timing \\\n0 59.339 28.464 \n1 32.828 22.774 \n2 21.405 17.800 \n3 58.590 56.963 \n4 17.174 18.891 \n... ... ... \n60995 NaN NaN \n60996 16.539 8.668 \n60997 38.750 25.433 \n60998 NaN NaN \n60999 25.375 53.363 \n\n page_CC20_442_grid_timing page_CC20_443_timing page_gunown_timing \\\n0 97.403 339.340 94.145 \n1 52.309 17.867 8.863 \n2 25.163 39.397 12.507 \n3 55.885 64.907 15.360 \n4 28.124 21.330 8.807 \n... ... ... ... \n60995 NaN NaN NaN \n60996 43.093 47.014 9.518 \n60997 163.518 79.075 4.301 \n60998 NaN NaN NaN \n60999 37.885 30.868 4.422 \n\n page_numchildren_timing page_edloan_timing page_student_timing \n0 23.653 55.331 0.0 \n1 5.130 4.504 0.0 \n2 8.621 3.623 0.0 \n3 11.832 28.634 0.0 \n4 5.805 3.252 0.0 \n... ... ... ... \n60995 NaN NaN NaN \n60996 13.807 3.317 0.0 \n60997 18.032 4.457 0.0 \n60998 NaN NaN NaN \n60999 3.827 9.019 0.0 \n\n[61000 rows x 848 columns]\n" ] ], [ [ "Calculate the Weighted Totals for PEW:", "_____no_output_____" ] ], [ [ "rls2007WeightTotal=rls2007df['weight'].sum()\nprint(\"2007 weight total: \" + str(rls2007WeightTotal))\nresultsdf.loc[2007,'rlsWeightTotal']=rls2007WeightTotal\nrls2014WeightTotal=rls2014df['WEIGHT'].sum()\nprint(\"2014 weight total: \" + str(rls2014WeightTotal))\nresultsdf.loc[2014,'rlsWeightTotal']=rls2014WeightTotal\n\n", "2007 weight total: 88292.49782861538\n2014 weight total: 35070.98000337468\n" ], [ "rls2014df\n", "_____no_output_____" ] ], [ [ "For the RLS, Calculate Fraction of Samples that are Mormon, and LDS:", "_____no_output_____" ] ], [ [ "#RLS 2007\nrls2007LDSdf=rls2007df.loc[rls2007df['denom']==' Church of Jesus Christ of Latter Day Saints']\nrls2007LDSWeightTotal=rls2007LDSdf['weight'].sum()\nrls2007Mormondf=rls2007df.loc[rls2007df['reltrad']==' Mormon']\nrls2007MormonWeightTotal=rls2007Mormondf['weight'].sum()\n\n#LDS\nrls2007LDSIdentifying=rls2007LDSWeightTotal/rls2007WeightTotal\nprint('2007RLS: % LDS = ' + str(rls2007LDSIdentifying))\nresultsdf.loc[2007,'RLSLDSIdentifying']=rls2007LDSIdentifying\n#Mormon\nrls2007MormonIdentifying=rls2007MormonWeightTotal/rls2007WeightTotal\nprint('2007RLS: % Mormon = ' + str(rls2007MormonWeightTotal/rls2007WeightTotal))\nresultsdf.loc[2007,'RLSMormonIdentifying']=rls2007MormonIdentifying\n\n#RLS 2014\nrls2014LDSdf=rls2014df.loc[rls2014df['DENOM']=='Church of Jesus Christ of Latter Day Saints']\nrls2014LDSWeightTotal=rls2014LDSdf['WEIGHT'].sum()\nrls2014Mormondf=rls2014df.loc[rls2014df['RELTRAD']=='Mormon']\nrls2014MormonWeightTotal=rls2014Mormondf['WEIGHT'].sum()\n\n#LDS\nrls2014LDSIdentifying=rls2014LDSWeightTotal/rls2014WeightTotal\nprint('2014RLS: % LDS = ' + str(rls2014LDSWeightTotal/rls2014WeightTotal))\nresultsdf.loc[2014,'RLSLDSIdentifying']=rls2014LDSIdentifying\n#Mormon\nrls2014MormonIdentifying=rls2014MormonWeightTotal/rls2014WeightTotal\nprint('2014RLS: % Mormon = ' + str(rls2014MormonWeightTotal/rls2014WeightTotal))\nresultsdf.loc[2014,'RLSMormonIdentifying']=rls2014MormonIdentifying", "_____no_output_____" ] ], [ [ "Calculate the Attendance Rates for LDS in 2007", "_____no_output_____" ] ], [ [ "#Don't Know\nrls2007LDSDontKnowAttenddf=rls2007LDSdf.loc[rls2007LDSdf['q20']==\"Don't know/Refused (VOL.)\"]\nrls2007LDSDontKnowAttendWeightTotal=rls2007LDSDontKnowAttenddf['weight'].sum()\nrls2007LDSDontKnowAttendWeightTotal\nrls2007LDSDontKnowAttendFrac=rls2007LDSDontKnowAttendWeightTotal/rls2007LDSWeightTotal\n\n#Never\nrls2007LDSNeverAttenddf=rls2007LDSdf.loc[rls2007LDSdf['q20']=='Never']\nrls2007LDSNeverAttendWeightTotal=rls2007LDSNeverAttenddf['weight'].sum()\nrls2007LDSNeverAttendWeightTotal\nrls2007LDSNeverAttendFrac=rls2007LDSNeverAttendWeightTotal/rls2007LDSWeightTotal\n\n\n#Seldom\nrls2007LDSSeldomAttenddf=rls2007LDSdf.loc[rls2007LDSdf['q20']=='Seldom']\nrls2007LDSSeldomAttendWeightTotal=rls2007LDSSeldomAttenddf['weight'].sum()\nrls2007LDSSeldomAttendWeightTotal\nrls2007LDSSeldomAttendFrac=rls2007LDSSeldomAttendWeightTotal/rls2007LDSWeightTotal\n\n\n#A few times a year\nrls2007LDSYearAttenddf=rls2007LDSdf.loc[rls2007LDSdf['q20']=='A few times a year']\nrls2007LDSYearAttendWeightTotal=rls2007LDSYearAttenddf['weight'].sum()\nrls2007LDSYearAttendWeightTotal\nrls2007LDSYearAttendFrac=rls2007LDSYearAttendWeightTotal/rls2007LDSWeightTotal\n\n\n#Once or twice a month\nrls2007LDSMonthAttenddf=rls2007LDSdf.loc[rls2007LDSdf['q20']=='Once or twice a month']\nrls2007LDSMonthAttendWeightTotal=rls2007LDSMonthAttenddf['weight'].sum()\nrls2007LDSMonthAttendWeightTotal\nrls2007LDSMonthAttendFrac=rls2007LDSMonthAttendWeightTotal/rls2007LDSWeightTotal\n\n\n#Once a week\nrls2007LDSWeekAttenddf=rls2007LDSdf.loc[rls2007LDSdf['q20']=='Once a week']\nrls2007LDSWeekAttendWeightTotal=rls2007LDSWeekAttenddf['weight'].sum()\nrls2007LDSWeekAttendWeightTotal\nrls2007LDSWeekAttendFrac=rls2007LDSWeekAttendWeightTotal/rls2007LDSWeightTotal\n\n\n#More than once a week\nrls2007LDSMoreAttenddf=rls2007LDSdf.loc[rls2007LDSdf['q20']=='More than once a week']\nrls2007LDSMoreAttendWeightTotal=rls2007LDSMoreAttenddf['weight'].sum()\nrls2007LDSMoreAttendWeightTotal\nrls2007LDSMoreAttendFrac=rls2007LDSMoreAttendWeightTotal/rls2007LDSWeightTotal\n\n\n\nrls2007LDSDontKnowAttendFrac\nrls2007LDSNeverAttendFrac\nrls2007LDSSeldomAttendFrac\nrls2007LDSYearAttendFrac\nrls2007LDSMonthAttendFrac\nrls2007LDSWeekAttendFrac\nrls2007LDSMoreAttendFrac\n\n\nrls2007AttendFracCheck=rls2007LDSDontKnowAttendFrac + rls2007LDSNeverAttendFrac + rls2007LDSSeldomAttendFrac + rls2007LDSYearAttendFrac + rls2007LDSMonthAttendFrac + rls2007LDSWeekAttendFrac + rls2007LDSMoreAttendFrac\nrls2007AttendFracCheck\n\n\nrls2007AttendCheckWeightTotal=rls2007LDSDontKnowAttendWeightTotal + rls2007LDSNeverAttendWeightTotal + rls2007LDSSeldomAttendWeightTotal + rls2007LDSYearAttendWeightTotal + rls2007LDSMonthAttendWeightTotal + rls2007LDSWeekAttendWeightTotal + rls2007LDSMoreAttendWeightTotal\nrls2007AttendCheckWeightTotal\n\n\nrls2007LDSActiveFrac=(rls2007LDSMoreAttendFrac + rls2007LDSWeekAttendFrac + rls2007LDSMonthAttendFrac)\nprint(\"RLS 2007 LDS % Active (monthly or more) = \" +str(rls2007LDSActiveFrac))\n\n\n", "_____no_output_____" ] ], [ [ "Calculate the Attendance Rates for LDS in 2014", "_____no_output_____" ] ], [ [ "rls2014LDSdf['attend']", "_____no_output_____" ], [ "#Don't Know\nrls2014LDSDontKnowAttenddf=rls2014LDSdf.loc[rls2014LDSdf['attend']==\"Don't know/Refused (VOL.)\"]\nrls2014LDSDontKnowAttendWeightTotal=rls2014LDSDontKnowAttenddf['WEIGHT'].sum()\nrls2014LDSDontKnowAttendWeightTotal\nrls2014LDSDontKnowAttendFrac=rls2014LDSDontKnowAttendWeightTotal/rls2014LDSWeightTotal\n\n#Never\nrls2014LDSNeverAttenddf=rls2014LDSdf.loc[rls2014LDSdf['attend']=='Never']\nrls2014LDSNeverAttendWeightTotal=rls2014LDSNeverAttenddf['WEIGHT'].sum()\nrls2014LDSNeverAttendWeightTotal\nrls2014LDSNeverAttendFrac=rls2014LDSNeverAttendWeightTotal/rls2014LDSWeightTotal\n\n#Seldom\nrls2014LDSSeldomAttenddf=rls2014LDSdf.loc[rls2014LDSdf['attend']=='Seldom']\nrls2014LDSSeldomAttendWeightTotal=rls2014LDSSeldomAttenddf['WEIGHT'].sum()\nrls2014LDSSeldomAttendWeightTotal\nrls2014LDSSeldomAttendFrac=rls2014LDSSeldomAttendWeightTotal/rls2014LDSWeightTotal\n\n#A few times a year\nrls2014LDSYearAttenddf=rls2014LDSdf.loc[rls2014LDSdf['attend']=='A few times a year']\nrls2014LDSYearAttendWeightTotal=rls2014LDSYearAttenddf['WEIGHT'].sum()\nrls2014LDSYearAttendWeightTotal\nrls2014LDSYearAttendFrac=rls2014LDSYearAttendWeightTotal/rls2014LDSWeightTotal\n\n#Once or twice a month\nrls2014LDSMonthAttenddf=rls2014LDSdf.loc[rls2014LDSdf['attend']=='Once or twice a month']\nrls2014LDSMonthAttendWeightTotal=rls2014LDSMonthAttenddf['WEIGHT'].sum()\nrls2014LDSMonthAttendWeightTotal\nrls2014LDSMonthAttendFrac=rls2014LDSMonthAttendWeightTotal/rls2014LDSWeightTotal\n\n#Once a week\nrls2014LDSWeekAttenddf=rls2014LDSdf.loc[rls2014LDSdf['attend']=='Once a week']\nrls2014LDSWeekAttendWeightTotal=rls2014LDSWeekAttenddf['WEIGHT'].sum()\nrls2014LDSWeekAttendWeightTotal\nrls2014LDSWeekAttendFrac=rls2014LDSWeekAttendWeightTotal/rls2014LDSWeightTotal\n\n#More than once a week\nrls2014LDSMoreAttenddf=rls2014LDSdf.loc[rls2014LDSdf['attend']=='More than once a week']\nrls2014LDSMoreAttendWeightTotal=rls2014LDSMoreAttenddf['WEIGHT'].sum()\nrls2014LDSMoreAttendWeightTotal\nrls2014LDSMoreAttendFrac=rls2014LDSMoreAttendWeightTotal/rls2014LDSWeightTotal\n\nprint('RLS 2014 LDS % \"Dont know\" attend ' + str(rls2014LDSDontKnowAttendFrac))\nprint('RLS 2014 LDS % \"Never\" attend ' + str(rls2014LDSNeverAttendFrac))\nprint('RLS 2014 LDS % \"Seldom\" attend ' + str(rls2014LDSSeldomAttendFrac))\nprint('RLS 2014 LDS % \"Year\" attend ' + str(rls2014LDSYearAttendFrac))\nprint('RLS 2014 LDS % \"Month\" attend ' + str(rls2014LDSMonthAttendFrac))\nprint('RLS 2014 LDS % \"Week\" attend ' + str(rls2014LDSWeekAttendFrac))\nprint('RLS 2014 LDS % \"More\" attend ' + str(rls2014LDSMoreAttendFrac))\n\nrls2014AttendFracCheck=rls2014LDSDontKnowAttendFrac + rls2014LDSNeverAttendFrac + rls2014LDSSeldomAttendFrac + rls2014LDSYearAttendFrac + rls2014LDSMonthAttendFrac + rls2014LDSWeekAttendFrac + rls2014LDSMoreAttendFrac\nprint('RLS 2014 LDS Attendance Frac Check ' + str(rls2007AttendFracCheck))\n\n\nrls2014AttendCheckWeightTotal=rls2014LDSDontKnowAttendWeightTotal + rls2014LDSNeverAttendWeightTotal + rls2014LDSSeldomAttendWeightTotal + rls2014LDSYearAttendWeightTotal + rls2014LDSMonthAttendWeightTotal + rls2014LDSWeekAttendWeightTotal + rls2014LDSMoreAttendWeightTotal\nprint('RLS 2014 LDS Attendance Weight Total Check ' + str(rls2014AttendCheckWeightTotal))\n\n\nrls2014LDSActiveFrac=(rls2014LDSMoreAttendFrac + rls2014LDSWeekAttendFrac + rls2014LDSMonthAttendFrac)\nprint(\"RLS 2014 LDS % Active (monthly or more) = \" + str(rls2014LDSActiveFrac))", "_____no_output_____" ], [ "resultsdf", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec57560be2c2d1a09f8881fa6680467b8feb213a
223,209
ipynb
Jupyter Notebook
logistic_regression_for_credit_card_fraud_detection.ipynb
prabhatk579/Fraud_Detection
bf58ffb318a84575d85e82534fe9447e0ae55d2d
[ "Unlicense" ]
1
2022-01-02T10:42:51.000Z
2022-01-02T10:42:51.000Z
logistic_regression_for_credit_card_fraud_detection.ipynb
prabhatk579/Fraud_Detection
bf58ffb318a84575d85e82534fe9447e0ae55d2d
[ "Unlicense" ]
null
null
null
logistic_regression_for_credit_card_fraud_detection.ipynb
prabhatk579/Fraud_Detection
bf58ffb318a84575d85e82534fe9447e0ae55d2d
[ "Unlicense" ]
3
2022-01-02T10:40:25.000Z
2022-03-31T16:44:13.000Z
113.304061
32,250
0.797598
[ [ [ "# Logistic Regression", "_____no_output_____" ] ], [ [ "import pandas as pd \r\nimport numpy as np\r\nfrom sklearn import preprocessing\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn import svm\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.mlab as mlab\r\nimport seaborn\r\n%matplotlib inline", "_____no_output_____" ], [ "from google.colab import drive\r\ndrive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "data = pd.read_csv('drive/MyDrive/creditcard.csv')\r\ndf = pd.DataFrame(data)", "_____no_output_____" ] ], [ [ "## Data Visualization", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ], [ "df_fraud = df[df['Class'] == 1] \r\nplt.figure(figsize=(15,5))\r\nplt.scatter(df_fraud['Time'], df_fraud['Amount'])\r\nplt.title('Scratter plot amount fraud')\r\nplt.xlabel('Time')\r\nplt.ylabel('Amount')\r\nplt.xlim([0,175000])\r\nplt.ylim([0,2500])\r\nplt.show()", "_____no_output_____" ], [ "nb_big_fraud = df_fraud[df_fraud['Amount'] > 1000].shape[0] # Recovery of frauds over 1000\r\nprint('There are only {} frauds where the amount was bigger than 1000 over {} frauds'.format(str(nb_big_fraud),str(df_fraud.shape[0]) ))", "There are only 9 frauds where the amount was bigger than 1000 over 492 frauds\n" ] ], [ [ "## Unbalanced Data", "_____no_output_____" ] ], [ [ "number_fraud = len(data[data.Class == 1])\r\nnumber_no_fraud = len(data[data.Class == 0])\r\nprint('There are only {} frauds in the original dataset, even though there are {} no frauds in the dataset.'.format(str(number_fraud),str(number_no_fraud)))", "There are only 492 frauds in the original dataset, even though there are 284315 no frauds in the dataset.\n" ], [ "print(\"The accuracy of the classifier then would be : {} which is the number of good classification over the number of tuple to classify\".format(str((284315-492)/284315)))", "The accuracy of the classifier then would be : 0.998269524998681 which is the number of good classification over the number of tuple to classify\n" ] ], [ [ "## Correlation of the features", "_____no_output_____" ] ], [ [ "df_corr = df.corr() # Pearson, Standard Correlation Coefficient", "_____no_output_____" ], [ "plt.figure(figsize=(6,5))\r\nseaborn.heatmap(df_corr, cmap='Blues')\r\nseaborn.set(font_scale=2,style='white')\r\n\r\nplt.title('Heatmap correlation')\r\nplt.show()", "_____no_output_____" ] ], [ [ "## Data Selection", "_____no_output_____" ], [ "### UNDERSAMPLING", "_____no_output_____" ] ], [ [ "# We seperate ours data in two groups : a train dataset and a test dataset\r\n\r\n# First we build our train dataset\r\ndf_train_all = df[0:150000] # We divide the original dataset in two parts\r\ndf_train_1 = df_train_all[df_train_all['Class'] == 1]\r\ndf_train_0 = df_train_all[df_train_all['Class'] == 0]\r\nprint('In this dataset, we have {} frauds so we need to take a similar number of non-fraud'.format(len(df_train_1)))\r\n\r\ndf_sample=df_train_0.sample(300)\r\ndf_train = df_train_1.append(df_sample) # We gather the frauds with the no frauds. \r\ndf_train = df_train.sample(frac=1) # Then we mix our dataset", "In this dataset, we have 293 frauds so we need to take a similar number of non-fraud\n" ], [ "X_train = df_train.drop(['Time', 'Class'],axis=1) # We drop the features Time (useless), and the Class (label)\r\ny_train = df_train['Class'] # We create our label\r\nX_train = np.asarray(X_train)\r\ny_train = np.asarray(y_train)", "_____no_output_____" ], [ "# with all the test dataset to see if the model learn correctly\r\ndf_test_all = df[150000:]\r\n\r\nX_test_all = df_test_all.drop(['Time', 'Class'],axis=1)\r\ny_test_all = df_test_all['Class']\r\nX_test_all = np.asarray(X_test_all)\r\ny_test_all = np.asarray(y_test_all)", "_____no_output_____" ] ], [ [ "## Fucntion for Confusion Matrix", "_____no_output_____" ] ], [ [ "class_names=np.array(['0','1']) # Binary label, Class = 1 (fraud) and Class = 0 (no fraud)", "_____no_output_____" ], [ "# Function to plot the confusion Matrix\r\ndef plot_confusion_matrix(cm, classes,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n \r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = 'd' \r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')", "_____no_output_____" ] ], [ [ "# Applying Logistic Regression", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression", "_____no_output_____" ], [ "classifier = LogisticRegression()", "_____no_output_____" ], [ "classifier.fit(X_train, y_train)", "/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)\n" ], [ "print(classifier.intercept_, classifier.coef_)", "[-2.86438049] [[ 0.00908324 -0.6603154 -0.16940042 0.88268394 -0.31792477 -0.1732796\n 0.84889956 -0.8684064 -0.3717664 -0.99396007 0.57272193 -0.94655916\n -0.551261 -1.3298367 -0.05560556 -0.55778326 -0.81908262 -0.00268815\n -0.03360937 0.03830921 0.20862332 0.54646126 -0.08992324 -0.62976972\n 0.07651314 -0.34431767 0.16293138 0.47385403 -0.00490961]]\n" ], [ "pred = classifier.predict(X_train)", "_____no_output_____" ], [ "print ('Accuracy from sk-learn: {}'.format(classifier.score(X_train,y_train)))", "Accuracy from sk-learn: 0.954468802698145\n" ] ], [ [ "### Confusion Matrix", "_____no_output_____" ] ], [ [ "cm = confusion_matrix(y_train, pred)\r\nplot_confusion_matrix(cm,class_names)", "_____no_output_____" ] ], [ [ "### Precision, Recall, F1-Score, Mean Absolute Error, Mean Percentage Error and Mean Squared Error", "_____no_output_____" ] ], [ [ "from sklearn.metrics import classification_report,mean_absolute_error,mean_squared_error,r2_score\r\nreport= classification_report(y_train,pred)\r\nprint(report)", " precision recall f1-score support\n\n 0 0.93 0.98 0.96 300\n 1 0.98 0.92 0.95 293\n\n accuracy 0.95 593\n macro avg 0.96 0.95 0.95 593\nweighted avg 0.96 0.95 0.95 593\n\n" ], [ "mean_abs_error = mean_absolute_error(y_train,pred)\r\nmean_abs_percentage_error = np.mean(np.abs((y_train - pred) // y_train))\r\nmse= mean_squared_error(y_train,pred)\r\nr_squared_error = r2_score(y_train,pred)\r\nprint(\"Mean absolute error : {} \\nMean Absolute Percentage error : {}\\nMean Squared Error : {}\\nR Squared Error: {}\".format(mean_abs_error,mean_abs_percentage_error,mse,r_squared_error))", "Mean absolute error : 0.045531197301854974 \nMean Absolute Percentage error : 0.03709949409780776\nMean Squared Error : 0.045531197301854974\nR Squared Error: 0.8178498293515358\n" ] ], [ [ "# Undersampling using Synthetic Minority Oversampling Technique (SMOTE) approach", "_____no_output_____" ] ], [ [ "from imblearn.over_sampling import SMOTE\r\noversample=SMOTE()\r\nX_train,y_train= oversample.fit_resample(X_train,y_train)", "/usr/local/lib/python3.7/dist-packages/sklearn/externals/six.py:31: FutureWarning: The module is deprecated in version 0.21 and will be removed in version 0.23 since we've dropped support for Python 2.7. Please rely on the official version of six (https://pypi.org/project/six/).\n \"(https://pypi.org/project/six/).\", FutureWarning)\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.neighbors.base module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.neighbors. Anything that cannot be imported from sklearn.neighbors is now part of the private API.\n warnings.warn(message, FutureWarning)\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n" ] ], [ [ "## Applying Logistic Regression", "_____no_output_____" ] ], [ [ "classifier.fit(X_train, y_train)", "/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)\n" ], [ "print(classifier.intercept_, classifier.coef_)", "[-3.04955805] [[ 0.09881579 -0.61843021 -0.08189188 0.98817601 -0.34140006 -0.14366433\n 0.79005715 -0.78762421 -0.40296671 -0.99835478 0.58143513 -1.0343642\n -0.49650436 -1.44568553 0.01833729 -0.58885464 -0.74998263 -0.06751787\n -0.00345061 -0.14034109 0.24413152 0.51893844 -0.16940419 -0.82928158\n 0.17574159 -0.36855749 0.23615798 0.65471445 -0.00394447]]\n" ], [ "pred = classifier.predict(X_train)", "_____no_output_____" ], [ "print ('Accuracy from sk-learn after applying SMOTE: {}'.format(classifier.score(X_train,y_train)))", "Accuracy from sk-learn after applying SMOTE: 0.9583333333333334\n" ] ], [ [ "## Confusion Matrix", "_____no_output_____" ] ], [ [ "cm = confusion_matrix(y_train, pred)\r\nplot_confusion_matrix(cm,class_names)", "_____no_output_____" ] ], [ [ "### Precision, Recall, F1-Score, Mean Absolute Error, Mean Percentage Error and Mean Squared Error", "_____no_output_____" ] ], [ [ "report= classification_report(y_train,pred)\r\nprint(report)", " precision recall f1-score support\n\n 0 0.94 0.98 0.96 300\n 1 0.98 0.93 0.96 300\n\n accuracy 0.96 600\n macro avg 0.96 0.96 0.96 600\nweighted avg 0.96 0.96 0.96 600\n\n" ], [ "mean_abs_error = mean_absolute_error(y_train,pred)\r\nmean_abs_percentage_error = np.mean(np.abs((y_train - pred) // y_train))\r\nmse= mean_squared_error(y_train,pred)\r\nr_squared_error = r2_score(y_train,pred)\r\nprint(\"Mean absolute error : {} \\nMean Absolute Percentage error : {}\\nMean Squared Error : {}\\nR Squared Error: {}\".format(mean_abs_error,mean_abs_percentage_error,mse,r_squared_error))", "Mean absolute error : 0.041666666666666664 \nMean Absolute Percentage error : 0.03333333333333333\nMean Squared Error : 0.041666666666666664\nR Squared Error: 0.8333333333333334\n" ] ], [ [ "## Testing Error", "_____no_output_____" ] ], [ [ "pred = classifier.predict(X_test_all)", "_____no_output_____" ], [ "print ('Accuracy from sk-learn: {}'.format(classifier.score(X_test_all,y_test_all)))", "Accuracy from sk-learn: 0.952710170836826\n" ] ], [ [ "### Confusion Matrix", "_____no_output_____" ] ], [ [ "cm = confusion_matrix(y_test_all, pred)\r\nplot_confusion_matrix(cm,class_names)", "_____no_output_____" ] ], [ [ "### Precision, Recall, F1-Score, Mean Absolute Error, Mean Percentage Error and Mean Squared Error", "_____no_output_____" ] ], [ [ "report= classification_report(y_test_all,pred)\r\nprint(report)", " precision recall f1-score support\n\n 0 1.00 0.95 0.98 134608\n 1 0.03 0.92 0.05 199\n\n accuracy 0.95 134807\n macro avg 0.51 0.94 0.52 134807\nweighted avg 1.00 0.95 0.97 134807\n\n" ], [ "mean_abs_error = mean_absolute_error(y_test_all,pred)\r\nmean_abs_percentage_error = np.mean(np.abs((y_test_all - pred) // y_test_all))\r\nmse= mean_squared_error(y_test_all,pred)\r\nr_squared_error = r2_score(y_test_all,pred)\r\nprint(\"Mean absolute error : {} \\nMean Absolute Percentage error : {}\\nMean Squared Error : {}\\nR Squared Error: {}\".format(mean_abs_error,mean_abs_percentage_error,mse,np.abs(r_squared_error)/100))", "Mean absolute error : 0.04728982916317402 \nMean Absolute Percentage error : 0.0001186881986840446\nMean Squared Error : 0.04728982916317402\nR Squared Error: 0.310825356202742\n" ] ], [ [ "## Hyperparameter Tuning", "_____no_output_____" ] ], [ [ "classifier_b = LogisticRegression(class_weight={0:0.6,1:0.4})", "_____no_output_____" ], [ "classifier_b.fit(X_train,y_train)", "/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)\n" ], [ "pred_b = classifier_b.predict(X_test_all)", "_____no_output_____" ], [ "print(classifier_b.intercept_, classifier_b.coef_)", "[-3.45739348] [[ 4.35048957e-03 -2.51664252e-01 -3.61740218e-02 8.26839368e-01\n -2.50220585e-01 -2.74290597e-01 6.15419368e-01 -6.34898774e-01\n -1.82517507e-01 -6.05270468e-01 4.67967748e-01 -7.59020169e-01\n -4.98490194e-01 -1.12792340e+00 -9.47965834e-04 -3.66086059e-01\n -4.24973567e-01 -5.02473317e-02 1.24269131e-02 -1.91698161e-01\n 2.03490667e-01 3.39239798e-01 -2.17260243e-01 -7.51611739e-01\n 2.98256733e-01 -2.80065468e-01 1.67962169e-01 6.57350969e-01\n -1.26442642e-03]]\n" ], [ "print ('Accuracy from sk-learn after hyperpaarameter tuning: {}'.format(classifier_b.score(X_test_all,y_test_all)))", "Accuracy from sk-learn after hyperpaarameter tuning: 0.9706024167884457\n" ] ], [ [ "### Confusion Matrix", "_____no_output_____" ] ], [ [ "cm = confusion_matrix(y_test_all, pred_b)\r\nplot_confusion_matrix(cm,class_names)", "_____no_output_____" ] ], [ [ "### Precision, Recall, F1-Score, Mean Absolute Error, Mean Percentage Error and Mean Squared Error", "_____no_output_____" ] ], [ [ "report= classification_report(y_test_all,pred_b)\r\nprint(report)", " precision recall f1-score support\n\n 0 1.00 0.97 0.99 134608\n 1 0.04 0.91 0.08 199\n\n accuracy 0.97 134807\n macro avg 0.52 0.94 0.53 134807\nweighted avg 1.00 0.97 0.98 134807\n\n" ], [ "mean_abs_error = mean_absolute_error(y_test_all,pred_b)\r\nmean_abs_percentage_error = np.mean(np.abs((y_test_all - pred_b) // y_test_all))\r\nmse= mean_squared_error(y_test_all,pred_b)\r\nr_squared_error = r2_score(y_test_all,pred_b)\r\nprint(\"Mean absolute error : {} \\nMean Absolute Percentage error : {}\\nMean Squared Error : {}\\nR Squared Error: {}\".format(mean_abs_error,mean_abs_percentage_error,mse,np.abs(r_squared_error)/100))", "Mean absolute error : 0.029397583211554296 \nMean Absolute Percentage error : 0.00013352422351955016\nMean Squared Error : 0.029397583211554296\nR Squared Error: 0.18944013907944574\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ec57599cceb68d84190dd19c85a21467fef2bcb3
37,282
ipynb
Jupyter Notebook
code/evaluate.ipynb
frankaging/multimodal_emotion_detection
f289241bea518aaf28e947c0be86d2fe843b397c
[ "MIT" ]
1
2022-02-26T22:42:25.000Z
2022-02-26T22:42:25.000Z
code/evaluate.ipynb
frankaging/multimodal_emotion_detection
f289241bea518aaf28e947c0be86d2fe843b397c
[ "MIT" ]
null
null
null
code/evaluate.ipynb
frankaging/multimodal_emotion_detection
f289241bea518aaf28e947c0be86d2fe843b397c
[ "MIT" ]
null
null
null
39.368532
414
0.548629
[ [ [ "#### Evaluate Script of Trained Models", "_____no_output_____" ] ], [ [ "from run_multimodal_time_series import *\nfrom collections import OrderedDict\nimport csv", "_____no_output_____" ], [ "use_target_ratings=True", "_____no_output_____" ], [ "# loading model from saved model.\nmodel = MultimodalEmotionPrediction()\nnew_state_dict = OrderedDict()\nDEVICE = torch.device('cpu') # 'cpu' in this case\nif use_target_ratings:\n model_path = \"../target/best_ccc_pytorch_model.bin\"\nelse:\n model_path = \"../observer/best_ccc_pytorch_model.bin\"\nprint(\"loading the model from: \", model_path)\nstate_dict = torch.load(model_path, map_location=DEVICE)[\"model\"]\nfor k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\nmodel.load_state_dict(new_state_dict)\n_ = model.eval()", "Some weights of the model checkpoint at bert-base-uncased were not used when initializing LinguisticEncoderBERT: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias']\n- This IS expected if you are initializing LinguisticEncoderBERT from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing LinguisticEncoderBERT from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" ], [ "if use_target_ratings:\n print(\"WARNING: use_target_ratings is setting to TRUE.\")\n modality_dir_map = {\"acoustic\": \"acoustic-egemaps\", \n \"linguistic\": \"linguistic-word-level\", # we don't load features\n \"visual\": \"image-raw\", # image is nested,\n \"target\": \"target\"}\n preprocess = {\n 'acoustic': lambda df : df.loc[:,' F0semitoneFrom27.5Hz_sma3nz_amean':' equivalentSoundLevel_dBp'],\n 'acoustic_timer': lambda df : df.loc[:,' frameTime'],\n 'linguistic': lambda df : df.loc[:,'word'],\n 'linguistic_timer': lambda df : df.loc[:,'time-offset'],\n 'target': lambda df : ((df.loc[:,' rating'] / 0.5) - 1.0),\n 'target_timer': lambda df : df.loc[:,'time'],\n }\nelse:\n modality_dir_map = {\"acoustic\": \"acoustic-egemaps\", \n \"linguistic\": \"linguistic-word-level\", # we don't load features\n \"visual\": \"image-raw\", # image is nested,\n \"target\": \"observer_EWE\"}\n preprocess = {\n 'acoustic': lambda df : df.loc[:,' F0semitoneFrom27.5Hz_sma3nz_amean':' equivalentSoundLevel_dBp'],\n 'acoustic_timer': lambda df : df.loc[:,' frameTime'],\n 'linguistic': lambda df : df.loc[:,'word'],\n 'linguistic_timer': lambda df : df.loc[:,'time-offset'],\n 'target': lambda df : ((df.loc[:,'evaluatorWeightedEstimate'] / 50.0) - 1.0),\n 'target_timer': lambda df : df.loc[:,'time'],\n }", "WARNING: use_target_ratings is setting to TRUE.\n" ], [ "if use_target_ratings:\n output_dir = \"../data-files/target/\"\nelse:\n output_dir = \"../data-files/observer/\"", "_____no_output_____" ], [ "tokenizer = AutoTokenizer.from_pretrained(\n \"bert-base-uncased\",\n use_fast=False,\n cache_dir=\"../.huggingface_cache/\"\n)", "_____no_output_____" ], [ "# Loading all the data partitions.\ndata_dir = \"../../SENDv1-data/\"\ntrain_modalities_data_dir = os.path.join(data_dir, \"features/Train/\")\ntrain_target_data_dir = os.path.join(data_dir, \"ratings/Train\")\ntrain_SEND_features = preprocess_SEND_files(\n train_modalities_data_dir,\n train_target_data_dir,\n use_target_ratings,\n modality_dir_map=modality_dir_map,\n preprocess=preprocess,\n linguistic_tokenizer=tokenizer,\n max_number_of_file=-1\n)\n\ndev_modalities_data_dir = os.path.join(data_dir, \"features/Valid/\")\ndev_target_data_dir = os.path.join(data_dir, \"ratings/Valid\")\ndev_SEND_features = preprocess_SEND_files(\n dev_modalities_data_dir,\n dev_target_data_dir,\n use_target_ratings,\n modality_dir_map=modality_dir_map,\n preprocess=preprocess,\n linguistic_tokenizer=tokenizer,\n max_number_of_file=-1\n)\n\ntest_modalities_data_dir = os.path.join(data_dir, \"features/Test/\")\ntest_target_data_dir = os.path.join(data_dir, \"ratings/Test\")\ntest_SEND_features = preprocess_SEND_files(\n test_modalities_data_dir,\n test_target_data_dir,\n use_target_ratings,\n modality_dir_map=modality_dir_map,\n preprocess=preprocess,\n linguistic_tokenizer=tokenizer,\n max_number_of_file=-1\n)", "_____no_output_____" ], [ "# Put dataset into correct format.\ntrain_video_id = [video_struct[\"video_id\"] for video_struct in train_SEND_features]\ntrain_input_a_feature = torch.stack([video_struct[\"a_feature\"] for video_struct in train_SEND_features]).float()\ntrain_input_l_feature = torch.stack([video_struct[\"l_feature\"] for video_struct in train_SEND_features])\ntrain_input_l_mask = torch.stack([video_struct[\"l_mask\"] for video_struct in train_SEND_features])\ntrain_input_l_segment_ids = torch.stack([video_struct[\"l_segment_ids\"] for video_struct in train_SEND_features])\ntrain_input_v_feature = torch.stack([video_struct[\"v_feature\"] for video_struct in train_SEND_features]).float()\ntrain_rating_labels = torch.stack([video_struct[\"rating\"] for video_struct in train_SEND_features]).float()\ntrain_seq_lens = torch.tensor([[video_struct[\"seq_len\"]] for video_struct in train_SEND_features]).float()\ntrain_input_mask = torch.stack([video_struct[\"input_mask\"] for video_struct in train_SEND_features])\ntrain_data = TensorDataset(\n train_input_a_feature, \n train_input_l_feature, train_input_l_mask, train_input_l_segment_ids,\n train_input_v_feature, train_rating_labels, train_seq_lens, train_input_mask\n)\ntrain_dataloader = DataLoader(train_data, batch_size=1, shuffle=False)\n\ndev_video_id = [video_struct[\"video_id\"] for video_struct in dev_SEND_features]\ndev_input_a_feature = torch.stack([video_struct[\"a_feature\"] for video_struct in dev_SEND_features]).float()\ndev_input_l_feature = torch.stack([video_struct[\"l_feature\"] for video_struct in dev_SEND_features])\ndev_input_l_mask = torch.stack([video_struct[\"l_mask\"] for video_struct in dev_SEND_features])\ndev_input_l_segment_ids = torch.stack([video_struct[\"l_segment_ids\"] for video_struct in dev_SEND_features])\ndev_input_v_feature = torch.stack([video_struct[\"v_feature\"] for video_struct in dev_SEND_features]).float()\ndev_rating_labels = torch.stack([video_struct[\"rating\"] for video_struct in dev_SEND_features]).float()\ndev_seq_lens = torch.tensor([[video_struct[\"seq_len\"]] for video_struct in dev_SEND_features]).float()\ndev_input_mask = torch.stack([video_struct[\"input_mask\"] for video_struct in dev_SEND_features])\ndev_data = TensorDataset(\n dev_input_a_feature, \n dev_input_l_feature, dev_input_l_mask, dev_input_l_segment_ids,\n dev_input_v_feature, dev_rating_labels, dev_seq_lens, dev_input_mask\n)\ndev_dataloader = DataLoader(dev_data, batch_size=1, shuffle=False)\n\ntest_video_id = [video_struct[\"video_id\"] for video_struct in test_SEND_features]\ntest_input_a_feature = torch.stack([video_struct[\"a_feature\"] for video_struct in test_SEND_features]).float()\ntest_input_l_feature = torch.stack([video_struct[\"l_feature\"] for video_struct in test_SEND_features])\ntest_input_l_mask = torch.stack([video_struct[\"l_mask\"] for video_struct in test_SEND_features])\ntest_input_l_segment_ids = torch.stack([video_struct[\"l_segment_ids\"] for video_struct in test_SEND_features])\ntest_input_v_feature = torch.stack([video_struct[\"v_feature\"] for video_struct in test_SEND_features]).float()\ntest_rating_labels = torch.stack([video_struct[\"rating\"] for video_struct in test_SEND_features]).float()\ntest_seq_lens = torch.tensor([[video_struct[\"seq_len\"]] for video_struct in test_SEND_features]).float()\ntest_input_mask = torch.stack([video_struct[\"input_mask\"] for video_struct in test_SEND_features])\ntest_data = TensorDataset(\n test_input_a_feature, \n test_input_l_feature, test_input_l_mask, test_input_l_segment_ids,\n test_input_v_feature, test_rating_labels, test_seq_lens, test_input_mask\n)\ntest_dataloader = DataLoader(test_data, batch_size=1, shuffle=False)", "_____no_output_____" ], [ "def evaluate_ablation(\n video_id, dataloader, model, condition=\"A,V,L\"\n):\n ret = {}\n video_index = 0\n pbar = tqdm(dataloader, desc=\"videos\")\n for step, batch in enumerate(pbar):\n vid_id = video_id[video_index]\n ret[vid_id] = {}\n # print(f\"analyzing ablation studies on video_id={vid_id}\")\n input_a_feature, input_l_feature, input_l_mask, input_l_segment_ids, \\\n input_v_feature, rating_labels, seq_lens, input_mask = batch\n # based one condition, we need to mask out some channels!\n if \"A\" not in condition:\n input_a_feature = torch.zeros_like(input_a_feature)\n if \"V\" not in condition:\n input_v_feature = torch.zeros_like(input_v_feature)\n if \"L\" not in condition:\n input_l_feature = torch.zeros_like(input_l_feature)\n _, output = \\\n model(input_a_feature, input_l_feature, input_l_mask, input_l_segment_ids,\n input_v_feature, rating_labels, input_mask)\n seq_l = int(seq_lens[0].tolist()[0])\n pred = output[0][:seq_l].cpu().detach().numpy()\n true = rating_labels[0][:seq_l].cpu().detach().numpy()\n ccc = eval_ccc(pred, true)\n ret[vid_id][\"pred\"] = pred\n ret[vid_id][\"true\"] = true\n video_index += 1\n return ret", "_____no_output_____" ], [ "conditions = [\"A,V,L\", \"A,V\", \"A,L\", \"V,L\", \"A\", \"V\", \"L\"]\nmega_results = {}\nfor condition in conditions:\n print(\"analyzing results for condition: \", condition)\n train_results = evaluate_ablation(\n train_video_id, train_dataloader, model,\n condition=condition\n )\n \n dev_results = evaluate_ablation(\n dev_video_id, dev_dataloader, model,\n condition=condition\n )\n\n test_results = evaluate_ablation(\n test_video_id, test_dataloader, model,\n condition=condition\n )\n mega_results[condition] = {}\n for k,v in train_results.items():\n mega_results[condition][k] = v\n for k,v in dev_results.items():\n mega_results[condition][k] = v\n for k,v in test_results.items():\n mega_results[condition][k] = v", "\rvideos: 0%| | 0/114 [00:00<?, ?it/s]" ], [ "print(\"output dir: \", output_dir)", "output dir: ../data-files/target/\n" ], [ "# for each video, we are creating a file to save ratings for all conditions.\nfor video in mega_results[\"A,V,L\"].keys():\n with open(os.path.join(output_dir, f\"{video}.csv\"), \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n headers = [c for c in conditions]\n headers += [\"actual\"]\n writer.writerow(headers)\n s_len = len(mega_results[\"A,V,L\"][video][\"pred\"])\n for i in range(s_len): # write line by line.\n row = []\n for condition in conditions:\n norm_r = (mega_results[condition][video][\"pred\"][i]+1.0)/2.0\n row.append(norm_r)\n norm_r = (mega_results[condition][video][\"true\"][i]+1.0)/2.0\n row.append(norm_r)\n writer.writerow(row)", "_____no_output_____" ], [ "with open(\"../data-files/train_ids.csv\", \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n headers = [\"vid_id\"]\n writer.writerow(headers)\n for vid_id in train_video_id:\n writer.writerow([vid_id])\nwith open(\"../data-files/dev_ids.csv\", \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n headers = [\"vid_id\"]\n writer.writerow(headers)\n for vid_id in dev_video_id:\n writer.writerow([vid_id])\nwith open(\"../data-files/test_ids.csv\", \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n headers = [\"vid_id\"]\n writer.writerow(headers)\n for vid_id in test_video_id:\n writer.writerow([vid_id])", "_____no_output_____" ] ], [ [ "#### Evaluate with Hebrew Videos", "_____no_output_____" ] ], [ [ "use_target_ratings = True", "_____no_output_____" ], [ "# loading model from saved model.\nmodel = MultimodalEmotionPrediction()\nnew_state_dict = OrderedDict()\nDEVICE = torch.device('cpu') # 'cpu' in this case\nif use_target_ratings:\n model_path = \"../target/best_ccc_pytorch_model.bin\"\nelse:\n model_path = \"../observer/best_ccc_pytorch_model.bin\"\nprint(\"loading the model from: \", model_path)\nstate_dict = torch.load(model_path, map_location=DEVICE)[\"model\"]\nfor k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\nmodel.load_state_dict(new_state_dict)\n_ = model.eval()", "Some weights of the model checkpoint at bert-base-uncased were not used when initializing LinguisticEncoderBERT: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias']\n- This IS expected if you are initializing LinguisticEncoderBERT from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing LinguisticEncoderBERT from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" ], [ "if use_target_ratings:\n output_dir = \"../data-files/target_hebrew/\"\nelse:\n output_dir = \"../data-files/observer_hebrew/\"", "_____no_output_____" ], [ "def preprocess_HEBREW_files(\n data_dir, # Multitmodal X\n time_window_in_sec=4.0,\n modality_dir_map = {\"acoustic\": \"acoustic-egemaps\", \n \"linguistic\": \"linguistic-word-level\", # we don't load features\n \"visual\": \"image-raw\", # image is nested,\n \"target\": \"observer_EWE\",\n },\n preprocess= {'acoustic': lambda df : df.loc[:,' F0semitoneFrom27.5Hz_sma3nz_amean':' equivalentSoundLevel_dBp'],\n 'acoustic_timer': lambda df : df.loc[:,' frameTime'],\n 'linguistic': lambda df : df.loc[:,'word'],\n 'linguistic_timer': lambda df : df.loc[:,'time-offset'],\n 'target': lambda df : ((df.loc[:,'evaluatorWeightedEstimate'] / 50.0) - 1.0),\n 'target_timer': lambda df : df.loc[:,'time'],\n },\n pad_symbol=0,\n max_number_of_file=-1\n):\n SEND_videos = []\n \n # basically, let us gett all the video ids?\n a_ids = [f.split(\"_\")[0]+\"_\"+f.split(\"_\")[1] \n for f in listdir(os.path.join(data_dir, modality_dir_map[\"acoustic\"])) \n if isfile(os.path.join(data_dir, modality_dir_map[\"acoustic\"], f))]\n v_ids = [f.split(\"_\")[0]+\"_\"+f.split(\"_\")[1] \n for f in listdir(os.path.join(data_dir, modality_dir_map[\"visual\"])) \n if f != \".DS_Store\"]\n\n if max_number_of_file != -1:\n logger.info(f\"WARNING: Only loading #{max_number_of_file} videos.\")\n max_seq_len = -1\n video_count = 0\n for video_id in a_ids: # pick any one!\n if max_number_of_file != -1 and video_count >= max_number_of_file:\n break # we enforce!\n if video_count > 1 and video_count%100 == 0:\n logger.info(f\"Processed #{len(SEND_videos)} videos.\")\n # logger.info(SEND_videos[-1])\n \n # we need to fix this to get features aligned.\n \n # Step 1: Load rating data, and we can get window partitioned according to our interval.\n a_file = os.path.join(data_dir, modality_dir_map[\"acoustic\"], f\"{video_id}_acousticFeatures.csv\")\n a_df = pd.read_csv(a_file)\n a_features = np.array(preprocess[\"acoustic\"](a_df))\n a_timestamps = np.array(preprocess[\"acoustic_timer\"](a_df))\n windows = []\n number_of_window = int(max(a_timestamps)//time_window_in_sec)\n for i in range(0, number_of_window):\n windows += [(i*time_window_in_sec, (i+1)*time_window_in_sec)]\n if max(a_timestamps) > (i+1)*time_window_in_sec:\n windows += [((i+1)*time_window_in_sec, max(a_timestamps))]\n # [(0, 5], (5, 10], ...]\n\n # acoustic features process\n a_file = os.path.join(data_dir, modality_dir_map[\"acoustic\"], f\"{video_id}_acousticFeatures.csv\")\n a_df = pd.read_csv(a_file)\n a_features = np.array(preprocess[\"acoustic\"](a_df))\n a_timestamps = np.array(preprocess[\"acoustic_timer\"](a_df))\n a_feature_dim = a_features.shape[1]\n assert a_features.shape[0] == a_timestamps.shape[0]\n sampled_a_features_raw = [[] for i in range(len(windows))]\n for i in range(0, a_timestamps.shape[0]):\n # using mod to hash to the correct bucket.\n hash_in_window = int(a_timestamps[i]//time_window_in_sec)\n if hash_in_window >= len(windows):\n continue # we cannot predict after ratings max.\n sampled_a_features_raw[hash_in_window].append(a_features[i])\n sampled_a_features = []\n for window in sampled_a_features_raw:\n # only acoustic need to consider this I think.\n if len(window) == 0:\n collate_window = np.zeros(a_feature_dim)\n else:\n collate_window = np.mean(np.array(window), axis=0)\n sampled_a_features.append(collate_window)\n\n # visual features process\n # for visual, we actually need to active control what image we load, we\n # cannot just load all images, it will below memory.\n fps=30 # We may need to dynamically figure out this number?\n frame_names = []\n for f in listdir(os.path.join(data_dir, modality_dir_map[\"visual\"], video_id)):\n if \".jpg\" in f:\n frame_names += [(int(f.split(\"_\")[0][5:])*(1.0/fps), f)]\n frame_names.sort(key=lambda x:x[0])\n sampled_v_features_raw = [[] for i in range(len(windows))]\n for f in frame_names:\n # using mod to hash to the correct bucket.\n hash_in_window = int(f[0]//time_window_in_sec)\n if hash_in_window >= len(windows):\n continue # we cannot predict after ratings max.\n sampled_v_features_raw[hash_in_window].append(f)\n\n sampled_v_features = []\n for window in sampled_v_features_raw:\n if len(window) == 0:\n f_data = np.zeros((224,224,3))\n else:\n # we collate by using the last frame in the time window.\n f = window[-1]\n f_path = os.path.join(data_dir, modality_dir_map[\"visual\"], video_id, f[1])\n f_image = Image.open(f_path)\n f_data = asarray(f_image)\n sampled_v_features.append(f_data)\n \n max_window_cutoff_a = int(max(a_timestamps)//time_window_in_sec)\n max_window_cutoff_v = int(frame_names[-1][0]//time_window_in_sec)\n max_window_cutoff = min([max_window_cutoff_a, max_window_cutoff_v])\n sampled_a_features = sampled_a_features[:max_window_cutoff]\n sampled_v_features = sampled_v_features[:max_window_cutoff]\n \n video_struct = {\n \"video_id\": video_id,\n \"a_feature\": sampled_a_features,\n \"v_feature\": sampled_v_features,\n \"seq_len\": len(sampled_a_features),\n \"input_mask\": np.ones(len(sampled_a_features)).tolist()\n }\n video_count += 1\n SEND_videos += [video_struct]\n if len(sampled_a_features) > max_seq_len:\n max_seq_len = len(sampled_a_features)\n \n # padding based on length\n for video_struct in SEND_videos:\n for i in range(max_seq_len-video_struct[\"seq_len\"]):\n video_struct[\"a_feature\"].append(np.zeros(a_feature_dim))\n video_struct[\"v_feature\"].append(np.zeros((224,224,3)))\n video_struct[\"input_mask\"].append(0)\n\n video_struct[\"a_feature\"] = torch.tensor(video_struct[\"a_feature\"])\n video_struct[\"v_feature\"] = torch.tensor(video_struct[\"v_feature\"])\n video_struct[\"input_mask\"] = torch.LongTensor(video_struct[\"input_mask\"])\n \n return SEND_videos", "_____no_output_____" ], [ "# Loading all the data partitions.\ndata_dir = \"../../SENDv1-data/\"\ntest_modalities_data_dir = os.path.join(data_dir, \"features/Test-Hebrew/\")\ntest_HEBREW_features = preprocess_HEBREW_files(\n test_modalities_data_dir,\n modality_dir_map=modality_dir_map,\n preprocess=preprocess,\n max_number_of_file=-1\n)", "_____no_output_____" ], [ "test_video_id = [video_struct[\"video_id\"] for video_struct in test_HEBREW_features]\ntest_input_a_feature = torch.stack([video_struct[\"a_feature\"] for video_struct in test_HEBREW_features]).float()\ntest_input_v_feature = torch.stack([video_struct[\"v_feature\"] for video_struct in test_HEBREW_features]).float()\ntest_seq_lens = torch.tensor([[video_struct[\"seq_len\"]] for video_struct in test_HEBREW_features]).float()\ntest_input_mask = torch.stack([video_struct[\"input_mask\"] for video_struct in test_HEBREW_features])\ntest_data = TensorDataset(\n test_input_a_feature, \n test_input_v_feature, \n test_seq_lens, test_input_mask\n)\ntest_dataloader = DataLoader(test_data, batch_size=1, shuffle=False)", "_____no_output_____" ], [ "def evaluate_ablation(\n video_id, dataloader, model, condition=\"A,V\"\n):\n ret = {}\n video_index = 0\n pbar = tqdm(dataloader, desc=\"videos\")\n for step, batch in enumerate(pbar):\n vid_id = video_id[video_index]\n ret[vid_id] = {}\n # print(f\"analyzing ablation studies on video_id={vid_id}\")\n input_a_feature, input_v_feature, seq_lens, input_mask = batch \n \n # based one condition, we need to mask out some channels!\n if \"A\" not in condition:\n input_a_feature = torch.zeros_like(input_a_feature)\n if \"V\" not in condition:\n input_v_feature = torch.zeros_like(input_v_feature)\n\n # mock linguistic and rating data.\n batch = input_a_feature.shape[0]\n seq_l = input_a_feature.shape[1]\n input_l_feature = torch.zeros((batch, seq_l, 3)).long()\n input_l_mask = torch.ones((batch, seq_l, 3)).long()\n input_l_segment_ids = torch.zeros((batch, seq_l, 3)).long()\n rating_labels = torch.zeros((batch, seq_l))\n \n _, output = \\\n model(input_a_feature, input_l_feature, input_l_mask, input_l_segment_ids,\n input_v_feature, rating_labels, input_mask)\n seq_l = int(seq_lens[0].tolist()[0])\n pred = output[0][:seq_l].cpu().detach().numpy()\n true = rating_labels[0][:seq_l].cpu().detach().numpy()\n ccc = eval_ccc(pred, true)\n ret[vid_id][\"pred\"] = pred\n ret[vid_id][\"true\"] = true\n video_index += 1\n return ret", "_____no_output_____" ], [ "mega_results = {}", "_____no_output_____" ], [ "conditions = [\"A,V\", \"A\", \"V\",]\nfor condition in conditions:\n print(\"analyzing results for condition: \", condition)\n\n test_results = evaluate_ablation(\n test_video_id, test_dataloader, model,\n condition=condition\n )\n mega_results[condition] = {}\n for k,v in test_results.items():\n mega_results[condition][k] = v", "\rvideos: 0%| | 0/9 [00:00<?, ?it/s]" ], [ "print(\"output dir: \", output_dir)", "output dir: ../data-files/target_hebrew/\n" ], [ "# for each video, we are creating a file to save ratings for all conditions.\nconditions = [\"A,V\", \"A\", \"V\",]\nfor video in mega_results[\"A,V\"].keys():\n with open(os.path.join(output_dir, f\"{video}.csv\"), \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n headers = [c for c in conditions]\n writer.writerow(headers)\n s_len = len(mega_results[\"A,V\"][video][\"pred\"])\n for i in range(s_len): # write line by line.\n row = []\n for condition in conditions:\n norm_r = (mega_results[condition][video][\"pred\"][i]+1.0)/2.0\n row.append(norm_r)\n writer.writerow(row)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5769bb00bc3d43be23e71cf611c6389cc65dbb
3,098
ipynb
Jupyter Notebook
camera_calibration.ipynb
kadway/SelfDrivingCarND-Proj2
c84f2430e5879b812b4c694f903a8c2aeaa9e514
[ "MIT" ]
null
null
null
camera_calibration.ipynb
kadway/SelfDrivingCarND-Proj2
c84f2430e5879b812b4c694f903a8c2aeaa9e514
[ "MIT" ]
null
null
null
camera_calibration.ipynb
kadway/SelfDrivingCarND-Proj2
c84f2430e5879b812b4c694f903a8c2aeaa9e514
[ "MIT" ]
null
null
null
33.673913
105
0.553906
[ [ [ "import pickle\nimport numpy as np\nimport cv2\nimport glob\n\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nobjp = np.zeros((6*9,3), np.float32)\nobjp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)\n\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d points in real world space\nimgpoints = [] # 2d points in image plane.\n\n# Make a list of calibration images\nimages = glob.glob('camera_cal/cal*.jpg')\n\n# Step through the list and search for chessboard corners\nfor idx, fname in enumerate(images):\n img = cv2.imread(fname)\n img_name = fname[len('camera_cal/'):]\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (9,6), None)\n # If found, add object points, image points\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n\n # Draw and display the corners\n cv2.drawChessboardCorners(img, (9,6), corners, ret)\n write_name = 'corners_found/corners_'+ img_name\n cv2.imwrite(write_name, img)\n #cv2.imshow(img_name, img)\n #cv2.waitKey(500)\n #cv2.destroyAllWindows()\n\nimages = glob.glob('camera_cal/cal*.jpg')\nfor idx, fname in enumerate(images):\n\n img = cv2.imread(fname)\n img_name = fname[len('camera_cal/'):]\n img_size = (img.shape[1], img.shape[0])\n\n # Do camera calibration given object points and image points\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)\n\n dst = cv2.undistort(img, mtx, dist, None, mtx)\n cv2.imwrite('camera_chess_undist/chess_undist_' + img_name, dst)\n #cv2.imshow(img_name, dst)\n #cv2.waitKey(500)\n #cv2.destroyAllWindows()\n\n# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)\ndist_pickle = {}\ndist_pickle[\"mtx\"] = mtx\ndist_pickle[\"dist\"] = dist\npickle.dump( dist_pickle, open( \"camera_cal/dist_pickle.p\", \"wb\" ) )", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
ec576ee3da405ff2f028f6cd32e01fdf90677b74
302,120
ipynb
Jupyter Notebook
notebooks/paper_error_vs_slope_Fig_SI4.ipynb
ekkrym/CovidTrendModel
26f49b0e3bf18ef2de5f9ec15f85e8148618c135
[ "MIT" ]
null
null
null
notebooks/paper_error_vs_slope_Fig_SI4.ipynb
ekkrym/CovidTrendModel
26f49b0e3bf18ef2de5f9ec15f85e8148618c135
[ "MIT" ]
null
null
null
notebooks/paper_error_vs_slope_Fig_SI4.ipynb
ekkrym/CovidTrendModel
26f49b0e3bf18ef2de5f9ec15f85e8148618c135
[ "MIT" ]
null
null
null
893.846154
87,272
0.946276
[ [ [ "%load_ext autoreload\n%autoreload 2\nimport numpy as np\nfrom datetime import datetime\nimport os\nimport glob\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\nsys.path.append('../source/')\nsys.path.append('../methods_configuration/')\nimport more_itertools\nimport datetime\nimport os\nimport json\nfrom precomputing import read_countrydata, read_countries \nfrom datetime import datetime", "_____no_output_____" ], [ "from countries_preselected import countries_preselected, countries_JHU\nfrom paper_evaluation import evaluation_AE \ncurrent_value = 'JHU'\nH, number_startcases = 7, 0\nerror_estimation, error_results = \"MAE\", \"MAE\" ", "_____no_output_____" ], [ "path_data = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv\" \ndatasource, parse_column = \"JHU\", \"Country/Region\"\n \ncountries = countries_preselected\ncountries = list(set(countries_JHU)-set([\"Cases_on_an_international_conveyance_Japan\", \n \"Diamond_Princess\", \"Diamond Princess\"])) \n ", "_____no_output_____" ], [ "stl, baseline, target = evaluation_AE(countries)", "_____no_output_____" ], [ " # ============================================================================\n# EXAMPLE -- MAIN\n# ============================================================================\n\n# ------------------------------------------------------------------------\n# parameters\n# ========================================================================\n\n# data for evaluation\nDATAPATH = '../data/evaluation_slope/cases/'\n\n# forecast horizon\nH = 7\n\n# period of time to consider\nstart_date = datetime.strptime('2020-04-01', \"%Y-%m-%d\")\nend_date = datetime.strptime('2021-01-31', \"%Y-%m-%d\")\nt0 = '2020-04-01'\nT = '2021-02-01'\n\n# for this example the trend is the convolution of the raw signal with a cubic spline\nw_cubic = np.convolve(np.convolve(np.convolve(np.ones(H),np.ones(H)),np.ones(H)),np.ones(H))\nw_cubic = w_cubic/np.sum(w_cubic)\n\n# ------------------------------------------------------------------------\n# variables to store estimates\n# ========================================================================\ne_list = [] # list to store errors\nt_list = [] # list to store trend\nstl_e_list = []\n\n# ------------------------------------------------------------------------\n# data loading\n# ========================================================================\n\n# get list of files\nfilelist = sorted(glob.glob(DATAPATH + \"*.csv\"))\n\n# get country/zone names\nall_zones = [os.path.splitext(os.path.basename(f))[0] for f in filelist] \n\nall_zones = list(set(countries_preselected).intersection(set(all_zones)))\n# ------------------------------------------------------------------------\n# loop over zones (region/country/sovereignty)\n# ========================================================================\nfor zz, zone in enumerate(all_zones):\n\n # read file\n df = pd.read_csv(DATAPATH+zone+'.csv')#filelist[zz])\n\n # get time series\n dates = [datetime.strptime(date, \"%Y-%m-%d\") for date in df['dates']]\n \n # get daily cases \n # start and end indices for the considered dates \n # target weekly forecast and baseline prediction\n target_ = target[zone] \n baseline_ = baseline[zone] \n stl_ = stl[zone]\n \n w_target = target_[(target_.index>=t0)&(target_.index<=T)]\n w_base = baseline_[(baseline_.index>=t0)&(baseline_.index<=T)]\n w_method = stl_[(stl_.index>=t0)&(stl_.index<=T)].sort_index() \n \n # get trend\n dy_raw = df['cases'].values\n dy_raw = np.maximum(dy_raw,0) \n dates = df['dates'] \n # trend is smooth weekly target\n ts = np.where(target_.index >= t0)[0][0]\n te = np.where(target_.index <= T)[0][-1]\n \n trend = np.convolve(target_.values[:,0],w_cubic,mode='same')[ts:te+1]\n \n w_base = w_base.to_numpy().squeeze()\n w_target = w_target.to_numpy().squeeze()\n w_method = w_method.to_numpy().squeeze()\n \n if zz<1:\n #checking the alignment of forecasts and the target\n plt.subplots(1,1,figsize=(24,4))\n plt.plot(w_target)\n plt.plot(w_base)\n plt.plot(w_method)\n plt.plot(trend)\n plt.grid()\n plt.title(zone)\n plt.legend([\"target\", \"base\", \"method\",\"trend\"])\n \n # normalization\n norm = (w_target)\n\n # relative absolute error\n bench_rel_err = np.abs(w_target-w_base)/(5+norm)\n stl_rel_err = np.abs(w_target-w_method)/(5+norm)\n #bench_rel_err = np.abs(w_target-w_base)/(1+trend[1:])\n #stl_rel_err = np.abs(w_target-w_method)/(1+trend[1:])\n \n # store trend and error \n e_list.append(np.ravel(bench_rel_err).copy())\n stl_e_list.append(np.ravel(stl_rel_err).copy())\n t_list.append(np.ravel(trend).copy())", "_____no_output_____" ] ], [ [ "## Fig SI 4 ", "_____no_output_____" ] ], [ [ "# plot aggregating multiple zones\n# ============================================================================\nbinn='auto'\n# average error w.r.t. slope\navg_err, bin_edges, bound_upper, bound_lower = error_slope(e_list,t_list,bins=binn)\nx_center = bin_edges[:-1] + np.diff(bin_edges)/2\n\navg_stl_err, bin_edges, bound_upper_stl, bound_lower_stl = error_slope(stl_e_list,t_list,bins=binn)\nstl_x_center = bin_edges[:-1] + np.diff(bin_edges)/2\n \n# display error\nfig,ax = plt.subplots(figsize=(12,6))\nplt.plot(x_center,avg_err,linewidth=2,label='baseline')\nplt.plot(stl_x_center,avg_stl_err,linewidth=2,label='stl') \n\nplt.xlabel('relative slope')\nplt.ylabel('average relative absolute error')\nplt.grid(); plt.legend(fontsize=14)\nplt.title('Aggregated Zones') \nfor item in [ax.title,ax.xaxis.label,ax.yaxis.label]+ax.get_xticklabels()+ax.get_yticklabels():\n item.set_fontsize(18)\n \n\n# restrict domain\nfig,ax = plt.subplots(figsize=(12,6))\nplt.plot(x_center,avg_err,linewidth=2,label='baseline')\nplt.plot(stl_x_center,avg_stl_err,linewidth=2,label='STL')\nplt.fill_between(stl_x_center, bound_lower_stl, bound_upper_stl ,linewidth=0,color=\"orange\",alpha=0.3)\nplt.fill_between(stl_x_center, bound_lower, bound_upper,linewidth=0,color=\"b\",alpha=0.3)\n#\nplt.xlim([-0.1,0.1])\nplt.ylim([0,1.2])\n#\nplt.xlabel('growth rate')\nplt.ylabel('MAPE')\nplt.grid(True,which=\"both\",ls=\"--\",c='gray') ; plt.legend(fontsize=14) \nfor item in [ax.title,ax.xaxis.label,ax.yaxis.label]+ax.get_xticklabels()+ax.get_yticklabels():\n item.set_fontsize(18)\n \n# smooth data\nL = 7\nh_filter = np.convolve(np.ones(L),np.ones(L)); h_filter = h_filter/np.sum(h_filter)\n#\nfig,ax = plt.subplots(figsize=(12,6))\nplt.plot(x_center,np.convolve(avg_err,h_filter,mode='same'),linewidth=2,label='baseline')\nplt.plot(stl_x_center,np.convolve(avg_stl_err,h_filter,mode='same'),linewidth=2,label='STL')\nplt.fill_between(stl_x_center, np.convolve(bound_lower_stl,h_filter,mode='same'), \n np.convolve(bound_upper_stl,h_filter,mode='same') ,linewidth=0,color=\"orange\",alpha=0.3)\nplt.fill_between(stl_x_center, np.convolve(bound_lower,h_filter,mode='same'),\n np.convolve(bound_upper,h_filter,mode='same'),linewidth=0,color=\"b\",alpha=0.3)\n#\nplt.xlim([-0.1,0.1])\nplt.ylim([0,0.7])\n#\nplt.xlabel('growth rate')\nplt.ylabel('average relative absolute error')\nplt.grid(); plt.legend(fontsize=14)\nplt.title('Aggregated Zones') \nfor item in [ax.title,ax.xaxis.label,ax.yaxis.label]+ax.get_xticklabels()+ax.get_yticklabels():\n item.set_fontsize(18)", "6.410256410256411\n12.055352236566128\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec577241c803a2aa582387b9fda1125cec7e6bc2
116,913
ipynb
Jupyter Notebook
Python Lectures/Coding 2/Coding 2.ipynb
amichuda/are106-python
99f66ebb0a7689c1b01ce7f448bdce86c869131d
[ "MIT" ]
1
2020-11-26T18:03:45.000Z
2020-11-26T18:03:45.000Z
Python Lectures/Coding 2/Coding 2.ipynb
amichuda/are106-python
99f66ebb0a7689c1b01ce7f448bdce86c869131d
[ "MIT" ]
null
null
null
Python Lectures/Coding 2/Coding 2.ipynb
amichuda/are106-python
99f66ebb0a7689c1b01ce7f448bdce86c869131d
[ "MIT" ]
null
null
null
39.834072
1,998
0.38166
[ [ [ "# Data and Regression in Python", "_____no_output_____" ], [ "## Non-standard Libraries\n\n- Python is good a lot of stuff\n - The standard library has lists, tuples, dictionaries, loops, many more things\n- But sometimes you need to use libraries outside of the standard library for whatever you're doing. For instance:\n - `BeautifulSoup` for web scraping\n - `numpy` for math/matrices\n - `itertools` for permutations/combinations and looping tools\n - `jupyter` comes with its own library for creating notebooks on the fly and converting notebooks to other filetypes\n - `matplotlib` for making nice graphs\n - `nltk` for natural language processing\n- In this class, we're going to be using two libraries:\n - `pandas` for data loading/cleaning/visualization\n - `statsmodels` for running regression\n", "_____no_output_____" ], [ "## What is an import?\n\n- An import tells Python that you want to use a particular library that isn't a standard one \n- Usually you put all import statements in the beginning of your script \n - Best practice is to put all imports in the beginning of the script to make it more readable and to allow any part of your code to call all your libraries\n - Since Python runs your script in order, it loads libraries first and understands when you call it afterwards.", "_____no_output_____" ] ], [ [ "numpy.array([1,2,3])", "_____no_output_____" ], [ "import numpy\n\nnumpy.array([1,2,3])", "_____no_output_____" ], [ "## Import statements\n\nimport pandas\nimport numpy\nimport statsmodels.api ", "_____no_output_____" ] ], [ [ "## Using Imported Libraries\n\n- Once you've imported your libraries, you can call the functions from those libraries using....\n\n- Periods! \n- The period after the library means that you're calling a \"method\" (function) or attribute (kind of like a characterizing feature) from within numpy", "_____no_output_____" ] ], [ [ "numpy.array([1,2,34]).var()\n\nmy_array = numpy.array([1,2,34])\n\nmy_array.var()", "_____no_output_____" ] ], [ [ "## Using import abbreviations\n\n- Sometimes we don't really want to keep writing numpy, pandas statsmodels.api in front of every command\n- We can use abbreviations in order to give our library a short name we can use", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport statsmodels.formula.api as sm", "_____no_output_____" ] ], [ [ "- Now we can call everything like so:\n\n`my_array = np.array([1,2,34])`", "_____no_output_____" ], [ "## Before embarking on Pandas and statsmodels\n\n- Before we learn about dataframes, we need to quickly learn about lambda functions\n- Lambda functions are a nice feature of Python as they allow you a way to write a function that you'll only really use once.\n- `lambda variable: expression`", "_____no_output_____" ] ], [ [ "sum_func = lambda x: x+1\n\nsum_func(3)", "_____no_output_____" ] ], [ [ "- As we'll see in a bit, we can use lambda functions to make new variables in a pandas dataframe.\n- Only in this case, the `x` would refer to an observation in a variable.\n - It will make more sense in a bit.", "_____no_output_____" ], [ "## Pandas ", "_____no_output_____" ], [ "- Pandas is a data library for Python, which is based on the idea of \"dataframes\" \n- You can think of a dataframe as a souped up dictionary or list\n- As in, a pandas dataframe has methods that are statistics specific\n - Calling a subset yields another dataframe\n - means, variances, sums\n - data merging\n - checking and filling in missing values\n - plotting data\n - Creating more \"columns\" using already existing columns", "_____no_output_____" ], [ "## What is a dataframe?\n\n- A Pandas dataframe is made up of: columns, rows and an index\n- column = variable\n- row = obervation\n- index is how we count the observations\n - we can use indexes in more complicated ways later\n - Foreshadowing: panel data\n - Household, year data\n- A \"vector\" of data is called a pandas \"Series\"\n - It's still part of pandas, but doesn't have exactly the same capabilities\n - A full dataframe is made up of many series put together\n ", "_____no_output_____" ] ], [ [ "## Load in raw_data\nraw_df = pd.read_csv(\"https://raw.githubusercontent.com/lordflaron/ARE106data/master/lawsch85.csv\")\nraw_df", "_____no_output_____" ], [ "raw_df.columns", "_____no_output_____" ], [ "raw_df.index", "_____no_output_____" ] ], [ [ "- To get just a snippet of data, we can use the `head()` and `tail()` methods\n - `head()` gets us the first observations\n - `tail()` get us the last observations\n - The default is 5, but can we change that by using the `n` option", "_____no_output_____" ] ], [ [ "raw_df.head()", "_____no_output_____" ], [ "raw_df.tail(n=10)", "_____no_output_____" ] ], [ [ "- To get a look at just one variable, you can do it one of two ways:\n - `raw_df.variable`\n - `raw_df['variable']`\n - Sort of like a dictionary!\n - if there are spaces in the variable name (there usually shouldn't be though), use the second\n \n", "_____no_output_____" ] ], [ [ "raw_df.salary.head()", "_____no_output_____" ], [ "raw_df['salary'].head()", "_____no_output_____" ] ], [ [ "- If you wanted to get just some variables (a subset), you can use a sort of slicing notation", "_____no_output_____" ] ], [ [ "subset = ['salary', 'GPA']\nraw_df[subset].head()", "_____no_output_____" ] ], [ [ "## Assigning a new column (variable)\n\n- Now let's think about how to make a new variable\n- Sometimes we might need to do that\n - Like in the homework, for example\n- How would we do the $1000*x$ operation in `pandas`?\n- Note: remember about *inplace* operations\n- assigning a new variable won't actually change the raw data itself", "_____no_output_____" ] ], [ [ "#raw_df.assign(salary_times_thousand = lambda x: x['salary']*1000, inplace=True)\nraw_df.assign(salary_times_thousand = lambda x: x['salary']*1000)", "_____no_output_____" ], [ "raw_df['salary_times_thousand']", "_____no_output_____" ], [ "df = raw_df.assign(salary_times_thousand = lambda x: x['salary']*1000)\n\ndf[['salary', 'salary_times_thousand']].head()", "_____no_output_____" ] ], [ [ "## Running Regressions\n\n- To run regressions, we need to set up a regression \"object\"\n- Regression comes in steps:\n - Setting up the object\n - Fitting the object\n - Looking at the results", "_____no_output_____" ] ], [ [ "mod = sm.ols('salary ~ GPA ', data=df) ## Like writing down the equation\nresults = mod.fit() ## Like doing the minimization problem \nresults.summary() ## Computing the numbers and showing in a table", "_____no_output_____" ] ], [ [ "## A Note on Patsy Regression Writing\n\n- The formula above `salary ~ GPA` is actually part of another package that `pandas` uses to write regression formulas, `patsy`\n- `patsy` is great for creating dataframes based on formulas\n- For more information on what you can do, check:\n [https://patsy.readthedocs.io/en/latest/formulas.html](https://patsy.readthedocs.io/en/latest/formulas.html)\n \n- As we'll see, it might be necessary to make more complicated formulas with our variables\n- `patsy` formulas are perfect for combining variables together, but if you need\n- You can also use numpy functions:\n - `np.power()` raising something to a power\n - ex. `np.power(salary,2)`\n - `np.log()` logging a variable\n - ex. `np.log(salary)`", "_____no_output_____" ] ], [ [ "### Let's take our variable from before, 1000*salary\nmod = sm.ols('GPA ~ salary_times_thousand', data=df) ## Like writing down the equation\nresults = mod.fit() ## Like doing the minimization problem \nresults.summary() ## Computing the numbers and showing in a table", "_____no_output_____" ] ], [ [ "## Appendix\n\nPatsy Formula language (from link above):\n\n~\n\nSeparates the left-hand side and right-hand side of a formula. Optional. If not present, then the formula is considered to contain a right-hand side only.\n\n\n+\n\nTakes the set of terms given on the left and the set of terms given on the right, and returns a set of terms that combines both (i.e., it computes a set union). Note that this means that a + a is just a.\n\n-\n\nTakes the set of terms given on the left and removes any terms which are given on the right (i.e., it computes a set difference).\n\n*\n\na * b is short-hand for a + b + a:b, and is useful for the common case of wanting to include all interactions between a set of variables while partitioning their variance between lower- and higher-order interactions. Standard ANOVA models are of the form a * b * c * ....\n\n/\n\nThis one is a bit quirky. a / b is shorthand for a + a:b, and is intended to be useful in cases where you want to fit a standard sort of ANOVA model, but b is nested within a, so a*b doesn’t make sense. So far so good. Also, if you have multiple terms on the right, then the obvious thing happens: a / (b + c) is equivalent to a + a:b + a:c (/ is rightward distributive over +). But, if you have multiple terms on the left, then there is a surprising special case: (a + b)/c is equivalent to a + b + a:b:c (and note that this is different from what you’d get out of a/c + b/c – / is not leftward distributive over +). Again, this is motivated by the idea of using this for nested variables. It doesn’t make sense for c to be nested within both a and b separately, unless b is itself nested in a – but if that were true, then you’d write a/b/c instead. So if we see (a + b)/c, we decide that a and b must be independent factors, but that c is nested within each combination of levels of a and b, which is what a:b:c gives us. If this is confusing, then my apologies… S has been working this way for >20 years, so it’s a bit late to change it now.\n\n:\n\nThis takes two sets of terms, and computes the interaction between each term on the left and each term on the right. So, for example, (a + b):(c + d) is the same as a:c + a:d + b:c + b:d. Calculating the interaction between two terms is also a kind of set union operation, but : takes the union of factors within two terms, while + takes the union of two sets of terms. Note that this means that a:a is just a, and (a:b):(a:c) is the same as a:b:c.\n\n**\n\nThis takes a set of terms on the left, and an integer n on the right, and computes the * of that set of terms with itself n times. This is useful if you want to compute all interactions up to order n, but no further.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec5788bb44c92de6e479e0254ee42e151a378a5c
2,760
ipynb
Jupyter Notebook
01_core.ipynb
Liucd0520/test2
8302cd9be08da17359e7bfbb7db1b42840eac8bd
[ "Apache-2.0" ]
null
null
null
01_core.ipynb
Liucd0520/test2
8302cd9be08da17359e7bfbb7db1b42840eac8bd
[ "Apache-2.0" ]
null
null
null
01_core.ipynb
Liucd0520/test2
8302cd9be08da17359e7bfbb7db1b42840eac8bd
[ "Apache-2.0" ]
null
null
null
16.428571
49
0.46087
[ [ [ "# default_exp core2", "_____no_output_____" ] ], [ [ "## say hello module\n> this is a function for say hi to a person", "_____no_output_____" ], [ "the first code", "_____no_output_____" ] ], [ [ "# hide\n\nfrom nbdev.showdoc import *\nprint('hello')", "hello\n" ], [ "# export \ndef say_hi(to):\n return 'hello ' + to", "_____no_output_____" ], [ "say_hi('liucd')", "_____no_output_____" ], [ "# hide \nsay_hi('xiaoming')", "_____no_output_____" ], [ "# export \nsay_hi('xiaohong')", "_____no_output_____" ], [ "from nbdev.export import *\nnotebook2script('01_core.ipynb')", "Converted 01_core.ipynb.\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
ec579e8fd101b0d0cbe7d9152310df0e069c2894
9,467
ipynb
Jupyter Notebook
Google Analytics/Google_Analytics_Get_bounce_rate.ipynb
krajai/testt
3aaf5fd7fe85e712c8c1615852b50f9ccb6737e5
[ "BSD-3-Clause" ]
1
2022-03-24T07:46:45.000Z
2022-03-24T07:46:45.000Z
Google Analytics/Google_Analytics_Get_bounce_rate.ipynb
PZawieja/awesome-notebooks
8ae86e5689749716e1315301cecdad6f8843dcf8
[ "BSD-3-Clause" ]
null
null
null
Google Analytics/Google_Analytics_Get_bounce_rate.ipynb
PZawieja/awesome-notebooks
8ae86e5689749716e1315301cecdad6f8843dcf8
[ "BSD-3-Clause" ]
null
null
null
22.702638
305
0.521284
[ [ [ "<img width=\"10%\" alt=\"Naas\" src=\"https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160\"/>", "_____no_output_____" ], [ "# Google Analytics - Get bounce rate\n<a href=\"https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Google%20Analytics/Google_Analytics_Get_bounce_rate.ipynb\" target=\"_parent\"><img src=\"https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg\"/></a>", "_____no_output_____" ], [ "**Tags:** #googleanalytics #bouncerate #plotly #linechart #naas_drivers #scheduler #asset #naas #marketing #analytics #automation #image #csv #html", "_____no_output_____" ], [ "**Author:** [Charles Demontigny](https://www.linkedin.com/in/charles-demontigny/)", "_____no_output_____" ], [ "Pre-requisite: Create your own <a href=\"\">Google API JSON credential</a>", "_____no_output_____" ], [ "## Input", "_____no_output_____" ], [ "### Import library", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport plotly.graph_objects as go\nimport naas\nfrom naas_drivers import googleanalytics", "_____no_output_____" ] ], [ [ "### Get your credential from Google Cloud Platform", "_____no_output_____" ] ], [ [ "json_path = 'naas-googleanalytics.json'", "_____no_output_____" ] ], [ [ "### Get view id from google analytics", "_____no_output_____" ] ], [ [ "view_id = \"228952707\"", "_____no_output_____" ] ], [ [ "### Setup your output paths", "_____no_output_____" ] ], [ [ "csv_output = \"googleanalytics_bounce_rate.csv\"\nhtml_output = \"googleanalytics_bounce_rate.html\"", "_____no_output_____" ] ], [ [ "### Schedule your notebook", "_____no_output_____" ] ], [ [ "naas.scheduler.add(cron=\"0 8 * * *\")\nnaas.dependency.add(json_path)\n\n#-> Uncomment the line below (by removing the hashtag) to remove your scheduler\n# naas.scheduler.delete()", "_____no_output_____" ] ], [ [ "## Model", "_____no_output_____" ], [ "### Bounce Rate", "_____no_output_____" ] ], [ [ "df_bounce_rate = googleanalytics.connect(json_path=json_path).views.get_bounce_rate(view_id=view_id)\ndf_bounce_rate", "_____no_output_____" ] ], [ [ "## Output", "_____no_output_____" ], [ "### Save dataframe in csv", "_____no_output_____" ] ], [ [ "df_bounce_rate.to_csv(csv_output, index=False)", "_____no_output_____" ] ], [ [ "### Bounce Rate Plot", "_____no_output_____" ] ], [ [ "def plot_bounce_rate(df: pd.DataFrame):\n \"\"\"\n Plot bounce rate as an area chart in Plotly.\n \"\"\"\n # Prep dataframe\n df[\"Date\"] = pd.to_datetime(df['Year Month'] + \"01\")\n \n # Get total views\n value = \"{:,.0%}\".format(df[\"Bounce Rate\"].mean())\n \n # Create data\n data = go.Scatter(\n x=df[\"Date\"],\n y=df['Bounce Rate'],\n stackgroup=\"one\"\n )\n \n # Create layout\n layout = go.Layout(\n yaxis={\"tickformat\": ',.0%'},\n title=f\"<b>Bounce Rate</b><br><span style='font-size: 13px;'>Average bounce rate: {value}</span>\",\n title_font=dict(family=\"Arial\", size=18, color=\"black\"),\n yaxis_title=\"Bounce rate %\",\n yaxis_title_font=dict(family=\"Arial\", size=11, color=\"black\"),\n xaxis_title=\"Mounths\",\n xaxis_title_font=dict(family=\"Arial\", size=11, color=\"black\"),\n plot_bgcolor=\"#ffffff\",\n width=1200,\n height=800,\n margin_pad=10,\n )\n fig = go.Figure(data=data, layout=layout)\n fig.update_traces(mode='lines+markers')\n return fig\n\nfig = plot_bounce_rate(df_bounce_rate)\nfig", "_____no_output_____" ] ], [ [ "### Export and share graph", "_____no_output_____" ] ], [ [ "# Export in HTML\nfig.write_html(html_output)\n\n# Shave with naas\n#-> Uncomment the line below (by removing the hashtag) to share your asset with naas\n# naas.asset.add(html_output, params={\"inline\": True})\n\n#-> Uncomment the line below (by removing the hashtag) to delete your asset\n# naas.asset.delete(html_output)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec57a3c65c12709ddba3bef4d037904fac537c2f
10,253
ipynb
Jupyter Notebook
wip/nn2.ipynb
ArenasGuerreroJulian/kglab
5c34ff586890b56d25bda6cc2bce3f11ce09065f
[ "MIT" ]
388
2020-11-06T23:35:04.000Z
2022-03-30T06:59:56.000Z
wip/nn2.ipynb
ArenasGuerreroJulian/kglab
5c34ff586890b56d25bda6cc2bce3f11ce09065f
[ "MIT" ]
76
2020-11-23T19:59:19.000Z
2022-03-30T10:44:28.000Z
wip/nn2.ipynb
ArenasGuerreroJulian/kglab
5c34ff586890b56d25bda6cc2bce3f11ce09065f
[ "MIT" ]
45
2020-11-23T19:20:10.000Z
2022-03-27T10:44:37.000Z
31.164134
1,217
0.446406
[ [ [ "# Word2Vec Skipgram impl. in PyTorch\n\n[\"Implementing word2vec in PyTorch (skip-gram model)\"](https://towardsdatascience.com/implementing-word2vec-in-pytorch-skip-gram-model-e6bae040d2fb) \nMateusz Bednarski (2018-03-06)\n\nsee: [w2v.py gist](https://gist.github.com/mbednarski/da08eb297304f7a66a3840e857e060a0)", "_____no_output_____" ] ], [ [ "import torch\nfrom torch.autograd import Variable\nimport numpy as np\nimport torch.functional as F\nimport torch.nn.functional as F", "_____no_output_____" ], [ "corpus = [\n 'he is a king',\n 'she is a queen',\n 'he is a man',\n 'she is a woman',\n 'warsaw is poland capital',\n 'berlin is germany capital',\n 'paris is france capital', \n]", "_____no_output_____" ], [ "def tokenize_corpus (corpus):\n tokens = [x.split() for x in corpus]\n return tokens\n\ntokenized_corpus = tokenize_corpus(corpus)\ntokenized_corpus", "_____no_output_____" ], [ "vocabulary = []\n\nfor sentence in tokenized_corpus:\n for token in sentence:\n if token not in vocabulary:\n vocabulary.append(token)\n\nword2idx = {w: idx for (idx, w) in enumerate(vocabulary)}\nidx2word = {idx: w for (idx, w) in enumerate(vocabulary)}\n\nvocabulary_size = len(vocabulary)\nvocabulary", "_____no_output_____" ], [ "window_size = 2\nidx_pairs = []\n\n# for each sentence\nfor sentence in tokenized_corpus:\n indices = [word2idx[word] for word in sentence]\n \n # for each word, threated as center word\n for center_word_pos in range(len(indices)):\n \n # for each window position\n for w in range(-window_size, window_size + 1):\n context_word_pos = center_word_pos + w\n \n # make sure not jump out sentence\n if context_word_pos < 0 or context_word_pos >= len(indices) or center_word_pos == context_word_pos:\n continue\n \n context_word_idx = indices[context_word_pos]\n idx_pairs.append((indices[center_word_pos], context_word_idx))\n\nidx_pairs = np.array(idx_pairs) # it will be useful to have this as numpy array\nidx_pairs", "_____no_output_____" ], [ "def get_input_layer (word_idx):\n x = torch.zeros(vocabulary_size).float()\n x[word_idx] = 1.0\n \n return x", "_____no_output_____" ], [ "embedding_dims = 5\n\nW1 = Variable(torch.randn(embedding_dims, vocabulary_size).float(), requires_grad=True)\nW2 = Variable(torch.randn(vocabulary_size, embedding_dims).float(), requires_grad=True)", "_____no_output_____" ], [ "num_epochs = 101\nlearning_rate = 0.001\n\nfor epo in range(num_epochs):\n loss_val = 0\n \n for data, target in idx_pairs:\n x = Variable(get_input_layer(data)).float()\n y_true = Variable(torch.from_numpy(np.array([target])).long())\n\n z1 = torch.matmul(W1, x)\n z2 = torch.matmul(W2, z1)\n \n log_softmax = F.log_softmax(z2, dim=0)\n\n loss = F.nll_loss(log_softmax.view(1,-1), y_true)\n loss_val += loss.data[0]\n loss.backward()\n \n W1.data -= learning_rate * W1.grad.data\n W2.data -= learning_rate * W2.grad.data\n\n W1.grad.data.zero_()\n W2.grad.data.zero_()\n\n if epo % 10 == 0: \n print(f'Loss at epo {epo}: {loss_val/len(idx_pairs)}')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec57bdd3e359e7310b5ed9cae3ca51f6a8d3715f
31,259
ipynb
Jupyter Notebook
nbs/vendor.tfcompat.hparam.ipynb
Cris140/uberduck-ml-dev
7349480210cdee40d6df494ecec5a62207d8ee72
[ "Apache-2.0" ]
167
2021-10-18T22:04:17.000Z
2022-03-21T19:44:21.000Z
nbs/vendor.tfcompat.hparam.ipynb
Cris140/uberduck-ml-dev
7349480210cdee40d6df494ecec5a62207d8ee72
[ "Apache-2.0" ]
18
2021-10-19T02:33:57.000Z
2022-03-28T17:25:52.000Z
nbs/vendor.tfcompat.hparam.ipynb
Cris140/uberduck-ml-dev
7349480210cdee40d6df494ecec5a62207d8ee72
[ "Apache-2.0" ]
24
2021-10-22T02:16:53.000Z
2022-03-30T18:22:43.000Z
48.614308
99
0.53319
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"></ul></div>", "_____no_output_____" ] ], [ [ "# default_exp vendor.tfcompat.hparam", "_____no_output_____" ], [ "# export\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Hyperparameter values.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# export\nimport json\nimport numbers\nimport re\n\nimport six\n\n# Define the regular expression for parsing a single clause of the input\n# (delimited by commas). A legal clause looks like:\n# <variable name>[<index>]? = <rhs>\n# where <rhs> is either a single token or [] enclosed list of tokens.\n# For example: \"var[1] = a\" or \"x = [1,2,3]\"\nPARAM_RE = re.compile(\n r\"\"\"\n (?P<name>[a-zA-Z][\\w\\.]*) # variable name: \"var\" or \"x\"\n (\\[\\s*(?P<index>\\d+)\\s*\\])? # (optional) index: \"1\" or None\n \\s*=\\s*\n ((?P<val>[^,\\[]*) # single value: \"a\" or None\n |\n \\[(?P<vals>[^\\]]*)\\]) # list of values: None or \"1,2,3\"\n ($|,\\s*)\"\"\",\n re.VERBOSE,\n)\n\n\ndef _parse_fail(name, var_type, value, values):\n \"\"\"Helper function for raising a value error for bad assignment.\"\"\"\n raise ValueError(\n \"Could not parse hparam '%s' of type '%s' with value '%s' in %s\"\n % (name, var_type.__name__, value, values)\n )\n\n\ndef _reuse_fail(name, values):\n \"\"\"Helper function for raising a value error for reuse of name.\"\"\"\n raise ValueError(\"Multiple assignments to variable '%s' in %s\" % (name, values))\n\n\ndef _process_scalar_value(name, parse_fn, var_type, m_dict, values, results_dictionary):\n \"\"\"Update results_dictionary with a scalar value.\n Used to update the results_dictionary to be returned by parse_values when\n encountering a clause with a scalar RHS (e.g. \"s=5\" or \"arr[0]=5\".)\n Mutates results_dictionary.\n Args:\n name: Name of variable in assignment (\"s\" or \"arr\").\n parse_fn: Function for parsing the actual value.\n var_type: Type of named variable.\n m_dict: Dictionary constructed from regex parsing.\n m_dict['val']: RHS value (scalar)\n m_dict['index']: List index value (or None)\n values: Full expression being parsed\n results_dictionary: The dictionary being updated for return by the parsing\n function.\n Raises:\n ValueError: If the name has already been used.\n \"\"\"\n try:\n parsed_value = parse_fn(m_dict[\"val\"])\n except ValueError:\n _parse_fail(name, var_type, m_dict[\"val\"], values)\n\n # If no index is provided\n if not m_dict[\"index\"]:\n if name in results_dictionary:\n _reuse_fail(name, values)\n results_dictionary[name] = parsed_value\n else:\n if name in results_dictionary:\n # The name has already been used as a scalar, then it\n # will be in this dictionary and map to a non-dictionary.\n if not isinstance(results_dictionary.get(name), dict):\n _reuse_fail(name, values)\n else:\n results_dictionary[name] = {}\n\n index = int(m_dict[\"index\"])\n # Make sure the index position hasn't already been assigned a value.\n if index in results_dictionary[name]:\n _reuse_fail(\"{}[{}]\".format(name, index), values)\n results_dictionary[name][index] = parsed_value\n\n\ndef _process_list_value(name, parse_fn, var_type, m_dict, values, results_dictionary):\n \"\"\"Update results_dictionary from a list of values.\n Used to update results_dictionary to be returned by parse_values when\n encountering a clause with a list RHS (e.g. \"arr=[1,2,3]\".)\n Mutates results_dictionary.\n Args:\n name: Name of variable in assignment (\"arr\").\n parse_fn: Function for parsing individual values.\n var_type: Type of named variable.\n m_dict: Dictionary constructed from regex parsing.\n m_dict['val']: RHS value (scalar)\n values: Full expression being parsed\n results_dictionary: The dictionary being updated for return by the parsing\n function.\n Raises:\n ValueError: If the name has an index or the values cannot be parsed.\n \"\"\"\n if m_dict[\"index\"] is not None:\n raise ValueError(\"Assignment of a list to a list index.\")\n elements = filter(None, re.split(\"[ ,]\", m_dict[\"vals\"]))\n # Make sure the name hasn't already been assigned a value\n if name in results_dictionary:\n raise _reuse_fail(name, values)\n try:\n results_dictionary[name] = [parse_fn(e) for e in elements]\n except ValueError:\n _parse_fail(name, var_type, m_dict[\"vals\"], values)\n\n\ndef _cast_to_type_if_compatible(name, param_type, value):\n \"\"\"Cast hparam to the provided type, if compatible.\n Args:\n name: Name of the hparam to be cast.\n param_type: The type of the hparam.\n value: The value to be cast, if compatible.\n Returns:\n The result of casting `value` to `param_type`.\n Raises:\n ValueError: If the type of `value` is not compatible with param_type.\n * If `param_type` is a string type, but `value` is not.\n * If `param_type` is a boolean, but `value` is not, or vice versa.\n * If `param_type` is an integer type, but `value` is not.\n * If `param_type` is a float type, but `value` is not a numeric type.\n \"\"\"\n fail_msg = \"Could not cast hparam '%s' of type '%s' from value %r\" % (\n name,\n param_type,\n value,\n )\n\n # Some callers use None, for which we can't do any casting/checking. :(\n if issubclass(param_type, type(None)):\n return value\n\n # Avoid converting a non-string type to a string.\n if issubclass(param_type, (six.string_types, six.binary_type)) and not isinstance(\n value, (six.string_types, six.binary_type)\n ):\n raise ValueError(fail_msg)\n\n # Avoid converting a number or string type to a boolean or vice versa.\n if issubclass(param_type, bool) != isinstance(value, bool):\n raise ValueError(fail_msg)\n\n # Avoid converting float to an integer (the reverse is fine).\n if issubclass(param_type, numbers.Integral) and not isinstance(\n value, numbers.Integral\n ):\n raise ValueError(fail_msg)\n\n # Avoid converting a non-numeric type to a numeric type.\n if issubclass(param_type, numbers.Number) and not isinstance(value, numbers.Number):\n raise ValueError(fail_msg)\n\n return param_type(value)\n\n\ndef parse_values(values, type_map):\n \"\"\"Parses hyperparameter values from a string into a python map.\n `values` is a string containing comma-separated `name=value` pairs.\n For each pair, the value of the hyperparameter named `name` is set to\n `value`.\n If a hyperparameter name appears multiple times in `values`, a ValueError\n is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2').\n If a hyperparameter name in both an index assignment and scalar assignment,\n a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1').\n The hyperparameter name may contain '.' symbols, which will result in an\n attribute name that is only accessible through the getattr and setattr\n functions. (And must be first explicit added through add_hparam.)\n WARNING: Use of '.' in your variable names is allowed, but is not well\n supported and not recommended.\n The `value` in `name=value` must follows the syntax according to the\n type of the parameter:\n * Scalar integer: A Python-parsable integer point value. E.g.: 1,\n 100, -12.\n * Scalar float: A Python-parsable floating point value. E.g.: 1.0,\n -.54e89.\n * Boolean: Either true or false.\n * Scalar string: A non-empty sequence of characters, excluding comma,\n spaces, and square brackets. E.g.: foo, bar_1.\n * List: A comma separated list of scalar values of the parameter type\n enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low].\n When index assignment is used, the corresponding type_map key should be the\n list name. E.g. for \"arr[1]=0\" the type_map must have the key \"arr\" (not\n \"arr[1]\").\n Args:\n values: String. Comma separated list of `name=value` pairs where\n 'value' must follow the syntax described above.\n type_map: A dictionary mapping hyperparameter names to types. Note every\n parameter name in values must be a key in type_map. The values must\n conform to the types indicated, where a value V is said to conform to a\n type T if either V has type T, or V is a list of elements of type T.\n Hence, for a multidimensional parameter 'x' taking float values,\n 'x=[0.1,0.2]' will parse successfully if type_map['x'] = float.\n Returns:\n A python map mapping each name to either:\n * A scalar value.\n * A list of scalar values.\n * A dictionary mapping index numbers to scalar values.\n (e.g. \"x=5,L=[1,2],arr[1]=3\" results in {'x':5,'L':[1,2],'arr':{1:3}}\")\n Raises:\n ValueError: If there is a problem with input.\n * If `values` cannot be parsed.\n * If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]').\n * If the same rvalue is assigned two different values (e.g. 'a=1,a=2',\n 'a[1]=1,a[1]=2', or 'a=1,a=[1]')\n \"\"\"\n results_dictionary = {}\n pos = 0\n while pos < len(values):\n m = PARAM_RE.match(values, pos)\n if not m:\n raise ValueError(\"Malformed hyperparameter value: %s\" % values[pos:])\n # Check that there is a comma between parameters and move past it.\n pos = m.end()\n # Parse the values.\n m_dict = m.groupdict()\n name = m_dict[\"name\"]\n if name not in type_map:\n raise ValueError(\"Unknown hyperparameter type for %s\" % name)\n type_ = type_map[name]\n\n # Set up correct parsing function (depending on whether type_ is a bool)\n if type_ == bool:\n\n def parse_bool(value):\n if value in [\"true\", \"True\"]:\n return True\n elif value in [\"false\", \"False\"]:\n return False\n else:\n try:\n return bool(int(value))\n except ValueError:\n _parse_fail(name, type_, value, values)\n\n parse = parse_bool\n else:\n parse = type_\n\n # If a singe value is provided\n if m_dict[\"val\"] is not None:\n _process_scalar_value(\n name, parse, type_, m_dict, values, results_dictionary\n )\n\n # If the assigned value is a list:\n elif m_dict[\"vals\"] is not None:\n _process_list_value(name, parse, type_, m_dict, values, results_dictionary)\n\n else: # Not assigned a list or value\n _parse_fail(name, type_, \"\", values)\n\n return results_dictionary\n\n\nclass HParams(object):\n \"\"\"Class to hold a set of hyperparameters as name-value pairs.\n A `HParams` object holds hyperparameters used to build and train a model,\n such as the number of hidden units in a neural net layer or the learning rate\n to use when training.\n You first create a `HParams` object by specifying the names and values of the\n hyperparameters.\n To make them easily accessible the parameter names are added as direct\n attributes of the class. A typical usage is as follows:\n ```python\n # Create a HParams object specifying names and values of the model\n # hyperparameters:\n hparams = HParams(learning_rate=0.1, num_hidden_units=100)\n # The hyperparameter are available as attributes of the HParams object:\n hparams.learning_rate ==> 0.1\n hparams.num_hidden_units ==> 100\n ```\n Hyperparameters have type, which is inferred from the type of their value\n passed at construction type. The currently supported types are: integer,\n float, boolean, string, and list of integer, float, boolean, or string.\n You can override hyperparameter values by calling the\n [`parse()`](#HParams.parse) method, passing a string of comma separated\n `name=value` pairs. This is intended to make it possible to override\n any hyperparameter values from a single command-line flag to which\n the user passes 'hyper-param=value' pairs. It avoids having to define\n one flag for each hyperparameter.\n The syntax expected for each value depends on the type of the parameter.\n See `parse()` for a description of the syntax.\n Example:\n ```python\n # Define a command line flag to pass name=value pairs.\n # For example using argparse:\n import argparse\n parser = argparse.ArgumentParser(description='Train my model.')\n parser.add_argument('--hparams', type=str,\n help='Comma separated list of \"name=value\" pairs.')\n args = parser.parse_args()\n ...\n def my_program():\n # Create a HParams object specifying the names and values of the\n # model hyperparameters:\n hparams = tf.HParams(learning_rate=0.1, num_hidden_units=100,\n activations=['relu', 'tanh'])\n # Override hyperparameters values by parsing the command line\n hparams.parse(args.hparams)\n # If the user passed `--hparams=learning_rate=0.3` on the command line\n # then 'hparams' has the following attributes:\n hparams.learning_rate ==> 0.3\n hparams.num_hidden_units ==> 100\n hparams.activations ==> ['relu', 'tanh']\n # If the hyperparameters are in json format use parse_json:\n hparams.parse_json('{\"learning_rate\": 0.3, \"activations\": \"relu\"}')\n ```\n \"\"\"\n\n _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks.\n\n def __init__(self, hparam_def=None, model_structure=None, **kwargs):\n \"\"\"Create an instance of `HParams` from keyword arguments.\n The keyword arguments specify name-values pairs for the hyperparameters.\n The parameter types are inferred from the type of the values passed.\n The parameter names are added as attributes of `HParams` object, so they\n can be accessed directly with the dot notation `hparams._name_`.\n Example:\n ```python\n # Define 3 hyperparameters: 'learning_rate' is a float parameter,\n # 'num_hidden_units' an integer parameter, and 'activation' a string\n # parameter.\n hparams = tf.HParams(\n learning_rate=0.1, num_hidden_units=100, activation='relu')\n hparams.activation ==> 'relu'\n ```\n Note that a few names are reserved and cannot be used as hyperparameter\n names. If you use one of the reserved name the constructor raises a\n `ValueError`.\n Args:\n hparam_def: Serialized hyperparameters, encoded as a hparam_pb2.HParamDef\n protocol buffer. If provided, this object is initialized by\n deserializing hparam_def. Otherwise **kwargs is used.\n model_structure: An instance of ModelStructure, defining the feature\n crosses to be used in the Trial.\n **kwargs: Key-value pairs where the key is the hyperparameter name and\n the value is the value for the parameter.\n Raises:\n ValueError: If both `hparam_def` and initialization values are provided,\n or if one of the arguments is invalid.\n \"\"\"\n # Register the hyperparameters and their type in _hparam_types.\n # This simplifies the implementation of parse().\n # _hparam_types maps the parameter name to a tuple (type, bool).\n # The type value is the type of the parameter for scalar hyperparameters,\n # or the type of the list elements for multidimensional hyperparameters.\n # The bool value is True if the value is a list, False otherwise.\n self._hparam_types = {}\n self._model_structure = model_structure\n if hparam_def:\n raise ValueError(\"hparam_def has been disabled in this version\")\n else:\n for name, value in six.iteritems(kwargs):\n self.add_hparam(name, value)\n\n def add_hparam(self, name, value):\n \"\"\"Adds {name, value} pair to hyperparameters.\n Args:\n name: Name of the hyperparameter.\n value: Value of the hyperparameter. Can be one of the following types:\n int, float, string, int list, float list, or string list.\n Raises:\n ValueError: if one of the arguments is invalid.\n \"\"\"\n # Keys in kwargs are unique, but 'name' could the name of a pre-existing\n # attribute of this object. In that case we refuse to use it as a\n # hyperparameter name.\n if getattr(self, name, None) is not None:\n raise ValueError(\"Hyperparameter name is reserved: %s\" % name)\n if isinstance(value, (list, tuple)):\n if not value:\n raise ValueError(\n \"Multi-valued hyperparameters cannot be empty: %s\" % name\n )\n self._hparam_types[name] = (type(value[0]), True)\n else:\n self._hparam_types[name] = (type(value), False)\n setattr(self, name, value)\n\n def set_hparam(self, name, value):\n \"\"\"Set the value of an existing hyperparameter.\n This function verifies that the type of the value matches the type of the\n existing hyperparameter.\n Args:\n name: Name of the hyperparameter.\n value: New value of the hyperparameter.\n Raises:\n ValueError: If there is a type mismatch.\n \"\"\"\n param_type, is_list = self._hparam_types[name]\n if isinstance(value, list):\n if not is_list:\n raise ValueError(\n \"Must not pass a list for single-valued parameter: %s\" % name\n )\n setattr(\n self,\n name,\n [_cast_to_type_if_compatible(name, param_type, v) for v in value],\n )\n else:\n if is_list:\n raise ValueError(\n \"Must pass a list for multi-valued parameter: %s.\" % name\n )\n setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))\n\n def del_hparam(self, name):\n \"\"\"Removes the hyperparameter with key 'name'.\n Args:\n name: Name of the hyperparameter.\n \"\"\"\n if hasattr(self, name):\n delattr(self, name)\n del self._hparam_types[name]\n\n def parse(self, values):\n \"\"\"Override hyperparameter values, parsing new values from a string.\n See parse_values for more detail on the allowed format for values.\n Args:\n values: String. Comma separated list of `name=value` pairs where\n 'value' must follow the syntax described above.\n Returns:\n The `HParams` instance.\n Raises:\n ValueError: If `values` cannot be parsed.\n \"\"\"\n type_map = dict()\n for name, t in self._hparam_types.items():\n param_type, _ = t\n type_map[name] = param_type\n\n values_map = parse_values(values, type_map)\n return self.override_from_dict(values_map)\n\n def override_from_dict(self, values_dict):\n \"\"\"Override hyperparameter values, parsing new values from a dictionary.\n Args:\n values_dict: Dictionary of name:value pairs.\n Returns:\n The `HParams` instance.\n Raises:\n ValueError: If `values_dict` cannot be parsed.\n \"\"\"\n for name, value in values_dict.items():\n self.set_hparam(name, value)\n return self\n\n ## @deprecation.deprecated(None, 'Use `override_from_dict`.')\n def set_from_map(self, values_map):\n \"\"\"DEPRECATED. Use override_from_dict.\"\"\"\n return self.override_from_dict(values_dict=values_map)\n\n def set_model_structure(self, model_structure):\n self._model_structure = model_structure\n\n def get_model_structure(self):\n return self._model_structure\n\n def to_json(self, indent=None, separators=None, sort_keys=False):\n \"\"\"Serializes the hyperparameters into JSON.\n Args:\n indent: If a non-negative integer, JSON array elements and object members\n will be pretty-printed with that indent level. An indent level of 0, or\n negative, will only insert newlines. `None` (the default) selects the\n most compact representation.\n separators: Optional `(item_separator, key_separator)` tuple. Default is\n `(', ', ': ')`.\n sort_keys: If `True`, the output dictionaries will be sorted by key.\n Returns:\n A JSON string.\n \"\"\"\n return json.dumps(\n self.values(), indent=indent, separators=separators, sort_keys=sort_keys\n )\n\n def parse_json(self, values_json):\n \"\"\"Override hyperparameter values, parsing new values from a json object.\n Args:\n values_json: String containing a json object of name:value pairs.\n Returns:\n The `HParams` instance.\n Raises:\n ValueError: If `values_json` cannot be parsed.\n \"\"\"\n values_map = json.loads(values_json)\n return self.override_from_dict(values_map)\n\n def values(self):\n \"\"\"Return the hyperparameter values as a Python dictionary.\n Returns:\n A dictionary with hyperparameter names as keys. The values are the\n hyperparameter values.\n \"\"\"\n return {n: getattr(self, n) for n in self._hparam_types.keys()}\n\n def get(self, key, default=None):\n \"\"\"Returns the value of `key` if it exists, else `default`.\"\"\"\n if key in self._hparam_types:\n # Ensure that default is compatible with the parameter type.\n if default is not None:\n param_type, is_param_list = self._hparam_types[key]\n type_str = \"list<%s>\" % param_type if is_param_list else str(param_type)\n fail_msg = (\n \"Hparam '%s' of type '%s' is incompatible with \"\n \"default=%s\" % (key, type_str, default)\n )\n\n is_default_list = isinstance(default, list)\n if is_param_list != is_default_list:\n raise ValueError(fail_msg)\n\n try:\n if is_default_list:\n for value in default:\n _cast_to_type_if_compatible(key, param_type, value)\n else:\n _cast_to_type_if_compatible(key, param_type, default)\n except ValueError as e:\n raise ValueError(\"%s. %s\" % (fail_msg, e))\n\n return getattr(self, key)\n\n return default\n\n def __contains__(self, key):\n return key in self._hparam_types\n\n def __str__(self):\n return str(sorted(self.values().items()))\n\n def __repr__(self):\n return \"%s(%s)\" % (type(self).__name__, self.__str__())\n\n @staticmethod\n def _get_kind_name(param_type, is_list):\n \"\"\"Returns the field name given parameter type and is_list.\n Args:\n param_type: Data type of the hparam.\n is_list: Whether this is a list.\n Returns:\n A string representation of the field name.\n Raises:\n ValueError: If parameter type is not recognized.\n \"\"\"\n if issubclass(param_type, bool):\n # This check must happen before issubclass(param_type, six.integer_types),\n # since Python considers bool to be a subclass of int.\n typename = \"bool\"\n elif issubclass(param_type, six.integer_types):\n # Setting 'int' and 'long' types to be 'int64' to ensure the type is\n # compatible with both Python2 and Python3.\n typename = \"int64\"\n elif issubclass(param_type, (six.string_types, six.binary_type)):\n # Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is\n # compatible with both Python2 and Python3.\n typename = \"bytes\"\n elif issubclass(param_type, float):\n typename = \"float\"\n else:\n raise ValueError(\"Unsupported parameter type: %s\" % str(param_type))\n\n suffix = \"list\" if is_list else \"value\"\n return \"_\".join([typename, suffix])", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
ec57cc054f12c03ad3223daa50979e42a58caa5a
28,538
ipynb
Jupyter Notebook
DCGAN_Face_Generator.ipynb
alex-parisi/DCGAN-Face-Generator
49d55f668f10df46ac28ab5d65ec05f117356201
[ "MIT" ]
1
2022-01-06T23:04:03.000Z
2022-01-06T23:04:03.000Z
DCGAN_Face_Generator.ipynb
alex-parisi/DCGAN-Face-Generator
49d55f668f10df46ac28ab5d65ec05f117356201
[ "MIT" ]
null
null
null
DCGAN_Face_Generator.ipynb
alex-parisi/DCGAN-Face-Generator
49d55f668f10df46ac28ab5d65ec05f117356201
[ "MIT" ]
null
null
null
35.806775
596
0.52106
[ [ [ "<a href=\"https://colab.research.google.com/github/alex-parisi/DCGAN-Face-Generator/blob/main/DCGAN_Face_Generator.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Generating Faces with a Deep Convolutional Generative Adversarial Network (DCGAN)\n\nA DCGAN uses two networks (discriminator and generator) working against one another in attempt to generate images that could pass as \"authentic\". A discriminator network is trained to determine whether or not an inputted image is a genuine image or an image generated by the generator network - which is attempting to generate images that will deceive the discriminator.\n\n\n ", "_____no_output_____" ], [ "The discriminator has a relatively standard layout in image recognition, and consists of an input layer, three convolution layers, a dropout layer, and then a fully connected layer. The convolution layers use a leakly ReLu activation function, and the fully connected layer uses a sigmoid activation function.\n\n<img src='https://drive.google.com/uc?id=1JOLpN18ANTYuiz6c5An_T5body47yFZw'>\n\nThe above image shows the layout of the generator in this DCGAN. A vector of random noise is upscaled through convolution layers until the appropriate image size is reached.", "_____no_output_____" ], [ "# Imports", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport matplotlib.pyplot as plt\nimport os\nimport gdown\nfrom zipfile import ZipFile\nimport random\nimport glob\nimport imageio\nimport cv2\nfrom google.colab import auth\nfrom google.cloud import storage", "_____no_output_____" ] ], [ [ "# Authenticate and Initiate TPU's\nIn order to connect to Google Cloud Services (GCS) to load the dataset, you must authenticate your Google account. Run the snippet below and follow the link, then paste the access key into the input box and press Enter.", "_____no_output_____" ] ], [ [ "auth.authenticate_user()", "_____no_output_____" ] ], [ [ "Before running this, ensure the Google Colab notebook is set to use TPU's. Go to \"Edit\", then \"Notebook settings\", and set the Hardware Accelerator to \"TPU\".\n\n<br>This will initiate the TPU's, which will run this program almost 8x faster than running locally on a GTX 970.", "_____no_output_____" ] ], [ [ "resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])\n\ntf.config.experimental_connect_to_cluster(resolver)\ntf.tpu.experimental.initialize_tpu_system(resolver)\n\nstrategy = tf.distribute.experimental.TPUStrategy(resolver)", "_____no_output_____" ] ], [ [ "# Prepare dataset\n\nIn order to use TPU's on Google Colab, you cannot use a local filesystem for data - you **must** use a GCS bucket to hold your images. Note that also when using Keras, the dataset object cannot be used to load images directly from a GCS bucket. Therefore, we must first convert the dataset to a .tfrecords format file and upload that to the GCS bucket, which Keras can then convert to a dataset object and use for training. This only needs to be performed once, as once the .tfrecords file is uploaded to the GCS bucket, we can refer to it as long as the training set doesn't change.\n\nFirst, you must sign up for GCS [here](https://cloud.google.com/). There are free options available - personally I am using the 90 day free trial.\n\nThen, access the \"Storage\" section [here](https://cloud.google.com/storage) and create a bucket. This is where you will upload your .tfrecords file.\n\nRemember the name that you use for the bucket, as the link you will use throughout is \"gs://< bucket_name>\"", "_____no_output_____" ], [ "Download celeb-a dataset", "_____no_output_____" ] ], [ [ "os.makedirs(\"celeba_gan\")\nurl = \"https://drive.google.com/uc?id=1O7m1010EJjLE5QxLZiM9Fpjs7Oj6e684\"\noutput = \"celeba_gan/data.zip\"\ngdown.download(url, output, quiet=True)", "_____no_output_____" ] ], [ [ "Extract to local filesystem in runtime environment", "_____no_output_____" ] ], [ [ "with ZipFile(\"celeba_gan/data.zip\", \"r\") as zipobj:\n zipobj.extractall(\"celeba_gan\")", "_____no_output_____" ] ], [ [ "Assemble list of filenames in celeb-a dataset", "_____no_output_____" ] ], [ [ "in_pics = []\nfor path, subdirs, files in os.walk(os.path.join(os.getcwd(), 'celeba_gan')):\n for name in files:\n if name.startswith('.'):\n continue\n if '.png' in name or '.jpg' in name:\n in_pics.append(os.path.join(path, name))", "_____no_output_____" ] ], [ [ "Shuffle filenames", "_____no_output_____" ] ], [ [ "random.shuffle(in_pics)\nprint('enumerated pics: ', len(in_pics))", "_____no_output_____" ] ], [ [ "Write image data in each filename to a .tfrecords file", "_____no_output_____" ] ], [ [ "TFRecord_write_file = os.path.join(os.getcwd(), 'celeba_gan.tfrecords')\nprint('Writing TFRecord', TFRecord_write_file)\nwith tf.io.TFRecordWriter(TFRecord_write_file) as writer:\n for i in range(len(in_pics)):\n with tf.io.gfile.GFile(in_pics[i], 'rb') as fid:\n img = fid.read()\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'image': tf.train.Feature(bytes_list = tf.train.BytesList(value=[img])),\n }))\n writer.write(example.SerializeToString())\nprint('Writing TFRecord done')", "_____no_output_____" ] ], [ [ "Upload .tfrecords file to the GCS bucket you created.\n<br>Replace: ```gs://celeba-alexp/celeba_gan.tfrecords```\n<br>With: ```gs://< bucket_name>/celeba_gan.tfrecords```\n", "_____no_output_____" ] ], [ [ "!gsutil cp /content/celeba_gan.tfrecords gs://celeba-alexp/celeba_gan.tfrecords", "_____no_output_____" ] ], [ [ "# Assemble dataset from GCS bucket", "_____no_output_____" ], [ "Define the extract function to parse the .tfrecords file and load the dataset within the TPU scope", "_____no_output_____" ], [ "Replace: ```gs://celeba-alexp/celeba_gan.tfrecords```\n<br> With: ```gs://< bucket_name>/celeba_gan.tfrecords```", "_____no_output_____" ] ], [ [ "def TFRecord_extract_fn(data_record):\n features = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n }\n sample = tf.io.parse_single_example(data_record, features)\n sample = tf.image.decode_jpeg(sample['image'], channels=3)\n sample = tf.image.convert_image_dtype(sample, tf.float32)\n sample = tf.image.resize(sample, [64, 64])\n return sample\n\nwith strategy.scope():\n dataset = tf.data.TFRecordDataset('gs://celeba-alexp/celeba_gan.tfrecords')\n dataset = dataset.map(TFRecord_extract_fn)\n dataset = dataset.batch(32)", "_____no_output_____" ] ], [ [ "# Define Models\n\nAs stated above, the DCGAN is split into two competing neural networks: a discriminator and a generator. The discriminator attempts to determine whether or not an inputted image is authentic, i.e. a member of the original dataset, or is a fake generated by the generator. The generator attempts to create an image authentic enough to trick the discriminator into making an incorrect classification.", "_____no_output_____" ], [ "The discriminator has the shape:\n\n* (None, 64, 64, 3)\n* (None, 32, 32, 64)\n* (None, 16, 16, 128)\n* (None, 8, 8, 128)\n* (None, 8192)\n* (None, 1)\n\nThe generator has the shape:\n\n* (None, 128)\n* (None, 4, 4, 1024)\n* (None, 8, 8, 512)\n* (None, 16, 16, 256)\n* (None, 32, 32, 128)\n* (None, 64, 64, 3)\n\n<br>Note that \"None\" is the batch size, which in this case is 32\n\n\n", "_____no_output_____" ] ], [ [ "with strategy.scope():\n discriminator = keras.Sequential(\n [\n keras.Input(shape=(64, 64, 3)),\n layers.Conv2D(128, kernel_size=5, strides=2, padding=\"same\"),\n layers.LeakyReLU(alpha=0.2),\n layers.Conv2D(256, kernel_size=5, strides=2, padding=\"same\"),\n layers.LeakyReLU(alpha=0.2),\n layers.Conv2D(512, kernel_size=5, strides=2, padding=\"same\"),\n layers.LeakyReLU(alpha=0.2),\n layers.Conv2D(1024, kernel_size=5, strides=2, padding=\"same\"),\n layers.LeakyReLU(alpha=0.2),\n layers.Flatten(),\n layers.Dropout(0.2),\n layers.Dense(1, activation=\"sigmoid\"),\n ],\n name=\"discriminator\",\n )\n\n latent_dim = 128\n\n generator = keras.Sequential(\n [\n keras.Input(shape=(latent_dim,)),\n layers.Dense(4 * 4 * 1024),\n layers.Reshape((4, 4, 1024)),\n layers.Conv2DTranspose(512, kernel_size=5, strides=2, padding=\"same\"),\n layers.LeakyReLU(alpha=0.2),\n layers.Conv2DTranspose(256, kernel_size=5, strides=2, padding=\"same\"),\n layers.LeakyReLU(alpha=0.2),\n layers.Conv2DTranspose(128, kernel_size=5, strides=2, padding=\"same\"),\n layers.LeakyReLU(alpha=0.2),\n layers.Conv2DTranspose(3, kernel_size=5, strides=2, padding=\"same\", activation=\"sigmoid\"),\n ],\n name=\"generator\",\n )", "_____no_output_____" ] ], [ [ "Create the GAN Network. This step is complicated, as I am using a custom training loop. This is a necessary step because we need to establish a shared loss function - this ensures that as the discriminator gets better at discriminating, the generator will concurrently get better at generating.", "_____no_output_____" ] ], [ [ "with strategy.scope(): \n class GAN(keras.Model):\n def __init__(self, discriminator, generator, latent_dim):\n super(GAN, self).__init__()\n self.discriminator = discriminator\n self.generator = generator\n self.latent_dim = latent_dim\n\n def compile(self, d_optimizer, g_optimizer, loss_fn):\n super(GAN, self).compile()\n self.d_optimizer = d_optimizer\n self.g_optimizer = g_optimizer\n self.loss_fn = loss_fn\n self.d_loss_metric = keras.metrics.Mean(name=\"d_loss\")\n self.g_loss_metric = keras.metrics.Mean(name=\"g_loss\")\n\n @property\n def metrics(self):\n return [self.d_loss_metric, self.g_loss_metric]\n\n def train_step(self, real_images):\n # Sample random points in the latent space\n batch_size = tf.shape(real_images)[0]\n random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))\n\n # Decode them to fake images\n generated_images = self.generator(random_latent_vectors)\n\n # Combine them with real images\n combined_images = tf.concat([generated_images, real_images], axis=0)\n\n # Assemble labels discriminating real from fake images\n labels = tf.concat(\n [tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0\n )\n # Add random noise to the labels\n labels += 0.05 * tf.random.uniform(tf.shape(labels))\n\n # Train the discriminator\n with tf.GradientTape() as tape:\n predictions = self.discriminator(combined_images)\n d_loss = self.loss_fn(labels, predictions)\n grads = tape.gradient(d_loss, self.discriminator.trainable_weights)\n self.d_optimizer.apply_gradients(\n zip(grads, self.discriminator.trainable_weights)\n )\n\n # Sample random points in the latent space\n random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))\n\n # Assemble labels that say \"all real images\"\n misleading_labels = tf.zeros((batch_size, 1))\n\n # Train the generator\n with tf.GradientTape() as tape:\n predictions = self.discriminator(self.generator(random_latent_vectors))\n g_loss = self.loss_fn(misleading_labels, predictions)\n grads = tape.gradient(g_loss, self.generator.trainable_weights)\n self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))\n\n # Update metrics\n self.d_loss_metric.update_state(d_loss)\n self.g_loss_metric.update_state(g_loss)\n return {\n \"d_loss\": self.d_loss_metric.result(),\n \"g_loss\": self.g_loss_metric.result(),\n }", "_____no_output_____" ] ], [ [ "Define some functions to upload and download the generated images and checkpoint files from the GCS bucket", "_____no_output_____" ] ], [ [ "def upload_blob(bucket_name, source_file_name, destination_blob_name):\n \"\"\"Uploads a file to the bucket.\"\"\"\n # The ID of your GCS bucket\n # bucket_name = \"your-bucket-name\"\n # The path to your file to upload\n # source_file_name = \"local/path/to/file\"\n # The ID of your GCS object\n # destination_blob_name = \"storage-object-name\"\n\n os.environ[\"GCLOUD_PROJECT\"] = \"DCGAN-Faces\"\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(source_file_name)\n\n print(\n \"File {} uploaded to {}.\".format(\n source_file_name, destination_blob_name\n )\n )\n\ndef download_blob(bucket_name, source_blob_name, destination_file_name):\n \"\"\"Downloads a blob from the bucket.\"\"\"\n # The ID of your GCS bucket\n # bucket_name = \"your-bucket-name\"\n\n # The ID of your GCS object\n # source_blob_name = \"storage-object-name\"\n\n # The path to which the file should be downloaded\n # destination_file_name = \"local/path/to/file\"\n\n os.environ[\"GCLOUD_PROJECT\"] = \"DCGAN-Faces\"\n\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n\n # Construct a client side representation of a blob.\n # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve\n # any content from Google Cloud Storage. As we don't need additional data,\n # using `Bucket.blob` is preferred here.\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n\n print(\n \"Downloaded storage object {} from bucket {} to local file {}.\".format(\n source_blob_name, bucket_name, destination_file_name\n )\n )", "_____no_output_____" ] ], [ [ "Create a monitor function that executes at the end of each epoch. This will save a checkpoint of the model (if the epoch is a multiple of 5) and will generate 10 images from the generator and save them. The seed used to generate these images is saved alongside model information when a checkpoint occurs.", "_____no_output_____" ] ], [ [ "with strategy.scope(): \n class GANMonitor(keras.callbacks.Callback):\n def __init__(self, num_img=3, latent_dim=128):\n self.num_img = num_img\n self.latent_dim = latent_dim\n\n def on_epoch_end(self, epoch, logs=None):\n if (epoch + 1) % 5 == 0:\n checkpoint.save(file_prefix=checkpoint_prefix, options=local_device_option)\n upload_blob('celeba-alexp', tf.train.latest_checkpoint(checkpoint_dir), tf.train.latest_checkpoint(checkpoint_dir))\n generated_images = self.model.generator(random_latent_vectors_monitor)\n generated_images *= 255\n generated_images.numpy()\n for i in range(self.num_img):\n img = keras.preprocessing.image.array_to_img(generated_images[i])\n img.save(\"generated_img_%03d_%d.png\" % (epoch + 0, i))\n upload_blob('celeba-alexp', \"generated_img_%03d_%d.png\" % (epoch + 0, i), \"generated_images/generated_img_%03d_%d.png\" % (epoch + 0, i))", "_____no_output_____" ] ], [ [ "Finally, create the GAN model", "_____no_output_____" ] ], [ [ "with strategy.scope():\n gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim)", "_____no_output_____" ] ], [ [ "# Training", "_____no_output_____" ], [ "Setup checkpoints that will save the model information and generator seed for later use, or in case training gets interrupted.", "_____no_output_____" ] ], [ [ "random_latent_vectors_monitor = tf.random.normal(shape=(10, latent_dim))\ngen_var = tf.Variable(random_latent_vectors_monitor)\n\ndiscriminator_optimizer = keras.optimizers.Adam(learning_rate=0.0001)\ngenerator_optimizer = keras.optimizers.Adam(learning_rate=0.0001)\n\ncheckpoint_dir = './training_checkpoints'\n#checkpoint_dir = 'gs://celeba-alexp/training_checkpoints'\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\ncheckpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer,\n generator=generator,\n discriminator=discriminator,\n gen_var=gen_var,\n gan=gan)\n\nlocal_device_option = tf.train.CheckpointOptions(experimental_io_device=\"/job:localhost\")\nckpt_manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir, max_to_keep=3)", "_____no_output_____" ] ], [ [ "Begin training", "_____no_output_____" ] ], [ [ "epochs = 100 # In practice, use ~100 epochs\n\nwith strategy.scope():\n random_latent_vectors_monitor = tf.constant(gen_var.numpy())\n\n if ckpt_manager.latest_checkpoint:\n print('Checkpoint loaded!')\n checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir), options=local_device_option)\n gan.compile(\n d_optimizer=discriminator_optimizer,\n g_optimizer=generator_optimizer,\n loss_fn=keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.SUM),\n )\n\n gan.fit(\n dataset, epochs=epochs, callbacks=[GANMonitor(num_img=10, latent_dim=latent_dim)]\n )", "_____no_output_____" ] ], [ [ "# Additional Tools\n", "_____no_output_____" ], [ "Convert to GIF", "_____no_output_____" ] ], [ [ "anim_file = 'dcgan0.gif'\n\nwith imageio.get_writer(anim_file, mode='I') as writer:\n filenames = glob.glob('generated_img*_0.png')\n filenames = sorted(filenames)\n for filename in filenames:\n image = imageio.imread(filename)\n image = cv2.resize(image, dsize=(512, 512), interpolation=cv2.INTER_CUBIC)\n writer.append_data(image)\n image = imageio.imread(filename)\n writer.append_data(image)", "_____no_output_____" ] ], [ [ "Load checkpoints from GCS bucket", "_____no_output_____" ] ], [ [ "download_blob('celeba-alexp', '/training_checkpoints/ckpt-1.data-00000-of-00001', '/training_checkpoints/ckpt-1.data-00000-of-00001')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec57ce4f35d1d5f96daffaba4b4a7d65768969ff
5,331
ipynb
Jupyter Notebook
examples/animations.ipynb
rkube/mpl-interactions
b08e3933dcd60db43551c4c6b90055d64566b56e
[ "BSD-3-Clause" ]
null
null
null
examples/animations.ipynb
rkube/mpl-interactions
b08e3933dcd60db43551c4c6b90055d64566b56e
[ "BSD-3-Clause" ]
null
null
null
examples/animations.ipynb
rkube/mpl-interactions
b08e3933dcd60db43551c4c6b90055d64566b56e
[ "BSD-3-Clause" ]
null
null
null
28.356383
492
0.580191
[ [ [ "# Saving Animations\n\nSince the controls object knows how to update figures as the sliders change their values it is also able to save an animation (e.g. `.gif` or `.mp4` by updating the slider values for you. Under the hood this makes use of [FuncAnimation](https://matplotlib.org/api/_as_gen/matplotlib.animation.FuncAnimation.html?highlight=funcanimation#matplotlib.animation.FuncAnimation) and you can pass any relevant kwargs in via `func_anim_kwargs`. Other `kwargs` will passed to `animation.save`.\n\nSaving animations will work with either ipywidgets Sliders or with matplotlib Sliders. However, it will not work with other widgets. (This is an potential area of improvement, PRs welcome)", "_____no_output_____" ] ], [ [ "%matplotlib ipympl\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport mpl_interactions.ipyplot as iplt", "_____no_output_____" ] ], [ [ "## Basic Usage", "_____no_output_____" ] ], [ [ "x = np.linspace(0, 2 * np.pi, 200)\n\n\ndef f(x, amp, freq):\n return amp * np.sin(x * freq)\n\n\n# Create the plot as normal\nfig, ax = plt.subplots()\ncontrols = iplt.plot(x, f, freq=(0.05, 10, 250), amp=(1,10))\n_ = iplt.title(\"the Frequency is: {freq:.2f}\", controls=controls[\"freq\"])", "_____no_output_____" ], [ "# save as a gif\nanim = controls.save_animation(\"freq-plot-1.gif\", fig, \"freq\", interval=35)", "_____no_output_____" ] ], [ [ "### Which Generates this GIF\n\n![gif of sin animated over frequency](freq-plot-1.gif)", "_____no_output_____" ], [ "## Embeding the animation in a noteook.\n\nTo embed the animation you can do:\n\n1. Link to it in markdown cell with `![alt-text](path/to/image)`\n2. Drag the file into a markdown cell\n3. Embed `anim.to_html5_video()` using IPython.display.Video:\n```python\nfrom IPython.display import Video\nVideo(anim.to_html5_video(), embed=True)\n```\n\n4. Use IPython to display the saved gif\n\nYou can also read more in this excellent blog post: http://louistiao.me/posts/notebooks/embedding-matplotlib-animations-in-jupyter-as-interactive-javascript-widgets/", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\nfrom IPython.display import Image\n\nImage(\"freq-plot-1.gif\")", "_____no_output_____" ] ], [ [ "## Matplotlib Sliders with `valstep=None`\n\nMatplotlib sliders have an optional attribute `valstep` that allows for discrete slider steps. `mpl-interactions` uses this for all sliders that it creates, however if you passed a custom made slider in as a kwarg you may not have used `valstep` if this is the case then the `save_animation` function cannot infer how many frames it should render, so you can specify this with the `N_frames` arguments.", "_____no_output_____" ] ], [ [ "from matplotlib.widgets import Slider\n\nimport mpl_interactions.ipyplot as iplt\n\nfig, ax = plt.subplots()\nplt.subplots_adjust(bottom=0.25)\nx = np.linspace(0, 2 * np.pi, 200)\n\n\ndef f(x, freq):\n return np.sin(x * freq)\n\n\naxfreq = plt.axes([0.25, 0.1, 0.65, 0.03])\nslider = Slider(axfreq, label=\"freq\", valmin=0.05, valmax=10) # note the lack of valstep\ncontrols2 = iplt.plot(x, f, freq=slider, ax=ax)\n_ = iplt.title(\"the Frequency is: {freq:.2f}\", controls=controls2[\"freq\"])", "_____no_output_____" ], [ "# save as a gif\nanim2 = controls2.save_animation(\"freq-plot-2.gif\", fig, \"freq\", interval=35, N_frames=100)", "_____no_output_____" ] ], [ [ "### Gives this GIF:\n\n![freq-plot-2.gif](freq-plot-2.gif)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec57cff87e9699db9eeb388af9724b451a8c07b1
72,661
ipynb
Jupyter Notebook
examples/dev/test_smirnoff.ipynb
SalomeRonja/PyGromosTools
5a17740a0ec634b8b591ef74d8a420e3fd3e38ba
[ "MIT" ]
13
2021-03-17T09:29:37.000Z
2022-01-14T20:42:16.000Z
examples/dev/test_smirnoff.ipynb
SchroederB/PyGromosTools
c31c38455a849c864241a962efee9e6575f27b06
[ "MIT" ]
185
2021-03-03T14:24:55.000Z
2022-03-31T18:39:29.000Z
examples/dev/test_smirnoff.ipynb
SchroederB/PyGromosTools
c31c38455a849c864241a962efee9e6575f27b06
[ "MIT" ]
13
2021-03-03T14:18:06.000Z
2022-02-17T09:48:55.000Z
78.893594
7,243
0.689393
[ [ [ "import pygromos\nfrom pygromos import files\nfrom pygromos.files.topology import *\nfrom pygromos.files.blocks.topology_blocks import *\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom openforcefield.topology import Molecule, Topology\nfrom openforcefield.typing.engines import smirnoff\nfrom pygromos import data\nfrom pygromos.files.gromos_system import *\nfrom simtk.unit import *\n", "Warning: Unable to load toolkit 'OpenEye Toolkit'. The Open Force Field Toolkit does not require the OpenEye Toolkits, and can use RDKit/AmberTools instead. However, if you have a valid license for the OpenEye Toolkits, consider installing them for faster performance and additional file format support: https://docs.eyesopen.com/toolkits/python/quickstart-python/linuxosx.html OpenEye offers free Toolkit licenses for academics: https://www.eyesopen.com/academic-licensing\n" ], [ "ff = data.data_dir + \"/ff/SMIRNOFF/frosst_12-12-18.xml\"\nforcefield = smirnoff.ForceField(ff)", "0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'Bonds' tag. The SMIRNOFF spec converter is assuming it has a value of 'harmonic'\n0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'Angles' tag. The SMIRNOFF spec converter is assuming it has a value of 'harmonic'\n0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'ProperTorsions' tag. The SMIRNOFF spec converter is assuming it has a value of 'charmm'\n0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'vdW' tag. The SMIRNOFF spec converter is assuming it has a value of 'Lennard-Jones-12-6'\n0.1 SMIRNOFF spec did not allow the 'Electrostatics' tag. Adding it in 0.2 spec conversion, and assuming the following values:\n\tmethod: PME\n\tscale12: 0.0\n\tscale13: 0.0\n\tscale15: 1.0\n\tcutoff: 9.0\n\tcutoff_unit: angstrom\n0.1 SMIRNOFF spec file does not contain 'method' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'cutoff'\n0.1 SMIRNOFF spec file does not contain 'combining_rules' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'Lorentz-Berthelot'\n0.1 SMIRNOFF spec file does not contain 'scale12' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '0.0'\n0.1 SMIRNOFF spec file does not contain 'scale13' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '0.0'\n0.1 SMIRNOFF spec file does not contain 'scale15' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '1'\n0.1 SMIRNOFF spec file does not contain 'switch_width' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '1.0'\n0.1 SMIRNOFF spec file does not contain 'switch_width_unit' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'angstrom'\n0.1 SMIRNOFF spec file does not contain 'cutoff' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '9.0'\n0.1 SMIRNOFF spec file does not contain 'cutoff_unit' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'angstrom'\n" ], [ "rdkmol=Chem.MolFromMolFile(\"/home/mlehner/Documents/dev/molecule_174.sdf\", removeHs = False)", "_____no_output_____" ], [ "moleculeFF = Molecule.from_rdkit(rdkmol)\ntopology = Topology.from_molecules(moleculeFF)", "_____no_output_____" ], [ "moleculeFF.name", "_____no_output_____" ], [ "molecule_force_list = forcefield.label_molecules(topology)", "_____no_output_____" ], [ "top = Top(input=data.data_dir+\"/topology_templates/blank_template.top\")", "TITLE\nPHYSICALCONSTANTS\nTOPVERSION\nSOLUTEMOLECULES\nTEMPERATUREGROUPS\nPRESSUREGROUPS\nLJEXCEPTIONS\nSOLVENTATOM\nSOLVENTCONSTR\n" ], [ "for molecule in molecule_force_list:\n for key in molecule[\"Bonds\"]:\n force = molecule[\"Bonds\"][key]\n #hQ = topology.atom(force[0]).atomic_number == 1 or topology.atom(force[1]).atomic_number == 1\n hQ = not all([topology.atom(x).atomic_number != 1 for x in key]) \n top.add_new_bond(k=force.k.value_in_unit(kilojoule / (mole * nanometer ** 2)), b0=force.length.value_in_unit(nanometer), atomI=key[0]+1, atomJ=key[1]+1, includesH=hQ)", "_____no_output_____" ], [ "for molecule in molecule_force_list:\n for key in molecule[\"Angles\"]:\n force = molecule[\"Angles\"][key]\n hQ = not all([topology.atom(x).atomic_number != 1 for x in key])\n top.add_new_angle(k=force.k.value_in_unit(kilojoule / (mole * radian ** 2)), b0=force.angle.value_in_unit(degree), atomI=key[0]+1, atomJ=key[1]+1, atomK=key[2]+1, includesH=hQ)", "_____no_output_____" ], [ "for molecule in molecule_force_list:\n for key in molecule[\"ProperTorsions\"]:\n force = molecule[\"ProperTorsions\"][key]\n hQ = not all([topology.atom(x).atomic_number != 1 for x in key])\n atomI=key[0]+1\n atomJ=key[1]+1\n atomK=key[2]+1\n atomL=key[3]+1\n k_list = force.k\n phase_list = force.phase\n per_list = force.periodicity\n for t in range(len(k_list)):\n CP=k_list[t].value_in_unit(kilojoule/mole)\n PD=phase_list[t].value_in_unit(degree)\n NP=per_list[t]\n top.add_new_torsiondihedral(CP=CP, PD=PD, NP=NP, atomI=atomI, atomJ=atomJ, atomK=atomK, atomL=atomL, includesH=hQ)", "_____no_output_____" ], [ "for molecule in molecule_force_list:\n for key in molecule[\"ImproperTorsions\"]:\n force = molecule[\"ImproperTorsions\"][key]\n hQ = not all([topology.atom(x).atomic_number != 1 for x in key])\n atomI=key[0]+1\n atomJ=key[1]+1\n atomK=key[2]+1\n atomL=key[3]+1\n k_list = force.k\n phase_list = force.phase\n per_list = force.periodicity\n for t in range(len(k_list)):\n CP=k_list[t].value_in_unit(kilojoule/mole)\n PD=phase_list[t].value_in_unit(degree)\n NP=per_list[t]\n top.add_new_torsiondihedral(CP=CP, PD=PD, NP=NP, atomI=atomI, atomJ=atomJ, atomK=atomK, atomL=atomL, includesH=hQ)\nif not hasattr(top, \"IMPDIHEDRALTYPE\"):\n top.add_block(blocktitle=\"IMPDIHEDRALTYPE\", content=[])\nif not hasattr(top, \"IMPDIHEDRALH\"):\n top.add_block(blocktitle=\"IMPDIHEDRALH\", content=[])\nif not hasattr(top, \"IMPDIHEDRAL\"):\n top.add_block(blocktitle=\"IMPDIHEDRAL\", content=[])", "_____no_output_____" ], [ "exclusionlist=dict()", "_____no_output_____" ], [ "exclusionlist", "_____no_output_____" ], [ "exclusionlist=dict()\nfor molecule in molecule_force_list:\n for key in molecule[\"Bonds\"]:\n if not str(key[0]) in exclusionlist.keys():\n exclusionlist[str(key[0])] = {key[1]}\n exclusionlist[str(key[0])].add(key[1])\n for key in molecule[\"Angles\"]:\n if not str(key[0]) in exclusionlist.keys():\n exclusionlist[str(key[0])] = {key[1]}\n exclusionlist[str(key[0])].add(key[1])\n exclusionlist[str(key[0])].add(key[2])\n for key in molecule[\"ProperTorsions\"]:\n if not str(key[0]) in exclusionlist.keys():\n exclusionlist[str(key[0])] = {key[1]}\n exclusionlist[str(key[0])].add(key[1])\n exclusionlist[str(key[0])].add(key[2])\n exclusionlist[str(key[0])].add(key[3])\n for key in molecule[\"ImproperTorsions\"]:\n if not str(key[0]) in exclusionlist.keys():\n exclusionlist[str(key[0])] = {key[1]}\n exclusionlist[str(key[0])].add(key[1])\n exclusionlist[str(key[0])].add(key[2])\n exclusionlist[str(key[0])].add(key[3])", "_____no_output_____" ], [ "exclusionlist", "_____no_output_____" ], [ "moleculeFF.atoms[0].mass.value_in_unit(dalton)", "_____no_output_____" ], [ "moleculeFF.atoms[0].atomic_number", "_____no_output_____" ], [ "moleculeItr = 1\nfor molecule in molecule_force_list:\n for key in molecule[\"vdW\"]:\n force = molecule[\"vdW\"][key]\n ATNM = int(key[0]) + 1\n MRES = moleculeItr\n IAC = 0\n MASS = moleculeFF.atoms[int(key[0])].mass.value_in_unit(dalton)\n CG = 0\n CGC = 0\n if str(key[0]) in exclusionlist:\n INE = list(exclusionlist[str(key[0])])\n else:\n INE = list()\n INE14 = list()\n epsilon = float(force.epsilon.value_in_unit(kilojoule_per_mole))\n rmin = 2 * force.rmin_half.value_in_unit(nanometer)\n C6 = 2 * epsilon * (rmin**6)\n C12 = epsilon * (rmin**12)\n IACname = force.id\n print(force.smirks)\n top.add_new_SOLUTEATOM(ATNM=ATNM, MRES=MRES, IAC=IAC, MASS=MASS, CG=CG, CGC=CGC, INE=INE, INE14=INE14, C6=C6, C12=C12, IACname=IACname)\n moleculeItr += 1 ", "[#6X4:1]\n[#6X4:1]\n[#6X4:1]\n[#6X4:1]\n[#6X4:1]\n[#6X4:1]\n[#1:1]-[#6X4]\n[#1:1]-[#6X4]\n[#1:1]-[#6X4]\n[#1:1]-[#6X4]\n[#1:1]-[#6X4]\n[#1:1]-[#6X4]\n[#1:1]-[#6X4]\n[#1:1]-[#6X4]\n[#1:1]-[#6X4]\n[#1:1]-[#6X4]\n[#1:1]-[#6X4]\n[#1:1]-[#6X4]\n" ], [ "len(moleculeFF.name)", "_____no_output_____" ], [ "if len(moleculeFF.name) >= 1:\n top.add_new_resname(moleculeFF.name)\nelse:\n top.add_new_resname(moleculeFF.hill_formula)", "_____no_output_____" ], [ "top.TEMPERATUREGROUPS.content", "_____no_output_____" ], [ "moleculeFF.n_atoms", "_____no_output_____" ], [ "top.make_ordered()", "_____no_output_____" ], [ "print(top)", "TITLE\nBlank Template created with Pygromos\n\n\t >>> Generated with python lib function_libs utilities. (riniker group)\n\t >>> line_seperator: '\\n'\t field_seperator: '\\t'\nEND\nPHYSICALCONSTANTS\n# FPEPSI: 1.0/(4.0*PI*EPS0) (EPS0 is the permittivity of vacuum)\n138.9354\n# HBAR: Planck's constant HBAR = H/(2* PI)\n0.0635078\n# SPDL: Speed of light (nm/ps)\n299792.458\n# BOLTZ: Boltzmann's constant kB\n0.00831441\nEND\nTOPVERSION\n2.0\nEND\nATOMTYPENAME\n2\nn16\nn2\nEND\nRESNAME\n1\nC6H12\nEND\nSOLUTEATOM\n# NRP: number of solute atoms\n \t 18\n# ATNM: atom number\n# MRES: residue number\n# PANM: atom name of solute atom\n# IAC: integer (van der Waals) atom type code\n# MASS: mass of solute atom\n# CG: charge of solute atom\n# CGC: charge group code (0 or 1)\n# INE: number of excluded atoms\n# INE14: number of 1-4 interactions\n# ATNM MRES PANM IAC MASS CG CGC INE\n# INE14\n\t1\t1\t_\t1\t12.01078\t0\t0\t15\t1\t2\t3\t4\t5\t6\n\t\t\t\t\t\t\t\t\t7\t8\t9\t10\t11\t14\n\t\t\t\t\t\t\t\t\t15\t16\t17\n\t\t\t\t\t\t\t\t0\n\t2\t1\t_\t1\t12.01078\t0\t0\t15\t0\t2\t3\t4\t5\t6\n\t\t\t\t\t\t\t\t\t7\t8\t9\t10\t11\t12\n\t\t\t\t\t\t\t\t\t13\t16\t17\n\t\t\t\t\t\t\t\t0\n\t3\t1\t_\t1\t12.01078\t0\t0\t15\t0\t1\t3\t4\t5\t6\n\t\t\t\t\t\t\t\t\t7\t8\t9\t10\t11\t12\n\t\t\t\t\t\t\t\t\t13\t14\t15\n\t\t\t\t\t\t\t\t0\n\t4\t1\t_\t1\t12.01078\t0\t0\t14\t1\t2\t4\t5\t8\t9\n\t\t\t\t\t\t\t\t\t10\t11\t12\t13\t14\t15\n\t\t\t\t\t\t\t\t\t16\t17\n\t\t\t\t\t\t\t\t0\n\t5\t1\t_\t1\t12.01078\t0\t0\t14\t0\t2\t3\t5\t6\t7\n\t\t\t\t\t\t\t\t\t10\t11\t12\t13\t14\t15\n\t\t\t\t\t\t\t\t\t16\t17\n\t\t\t\t\t\t\t\t0\n\t6\t1\t_\t1\t12.01078\t0\t0\t14\t0\t1\t3\t4\t6\t7\n\t\t\t\t\t\t\t\t\t8\t9\t12\t13\t14\t15\n\t\t\t\t\t\t\t\t\t16\t17\n\t\t\t\t\t\t\t\t0\n\t7\t1\t_\t2\t1.007947\t0\t0\t8\t0\t1\t5\t7\t8\t9\n\t\t\t\t\t\t\t\t\t16\t17\n\t\t\t\t\t\t\t\t0\n\t8\t1\t_\t2\t1.007947\t0\t0\t7\t0\t1\t5\t8\t9\t16\n\t\t\t\t\t\t\t\t\t17\n\t\t\t\t\t\t\t\t0\n\t9\t1\t_\t2\t1.007947\t0\t0\t5\t1\t2\t9\t10\t11\n\t\t\t\t\t\t\t\t0\n\t10\t1\t_\t2\t1.007947\t0\t0\t4\t11\t1\t2\t10\n\t\t\t\t\t\t\t\t0\n\t11\t1\t_\t2\t1.007947\t0\t0\t5\t2\t3\t11\t12\t13\n\t\t\t\t\t\t\t\t0\n\t12\t1\t_\t2\t1.007947\t0\t0\t4\t2\t3\t12\t13\n\t\t\t\t\t\t\t\t0\n\t13\t1\t_\t2\t1.007947\t0\t0\t5\t3\t4\t13\t14\t15\n\t\t\t\t\t\t\t\t0\n\t14\t1\t_\t2\t1.007947\t0\t0\t4\t3\t4\t14\t15\n\t\t\t\t\t\t\t\t0\n\t15\t1\t_\t2\t1.007947\t0\t0\t5\t4\t5\t15\t16\t17\n\t\t\t\t\t\t\t\t0\n\t16\t1\t_\t2\t1.007947\t0\t0\t4\t16\t17\t4\t5\n\t\t\t\t\t\t\t\t0\n\t17\t1\t_\t2\t1.007947\t0\t0\t2\t17\t5\n\t\t\t\t\t\t\t\t0\n\t18\t1\t_\t2\t1.007947\t0\t0\t0\n\t\t\t\t\t\t\t\t0\nEND\nBONDSTRETCHTYPE\n# \t NBTY: number of covalent bond types\n \t 2\n# \t CB \t CHB \t B0\n\t5.56986e+06\t2.59408e+05\t1.52600e-01\n\t1.19734e+07\t2.84512e+05\t1.09000e-01\nEND\nBONDH\n# \t NBONH: number of bonds involving H atoms in solute\n \t 12\n# \t IBH \t JBH \t ICBH\n\t1\t7\t2\n\t1\t8\t2\n\t2\t9\t2\n\t2\t10\t2\n\t3\t11\t2\n\t3\t12\t2\n\t4\t13\t2\n\t4\t14\t2\n\t5\t15\t2\n\t5\t16\t2\n\t6\t17\t2\n\t6\t18\t2\nEND\nBOND\n# \t NBON: number of bonds NOT involving H atoms in solute\n \t 6\n# \t IB \t JB \t ICB\n\t1\t2\t1\n\t1\t6\t1\n\t2\t3\t1\n\t3\t4\t1\n\t4\t5\t1\n\t5\t6\t1\nEND\nBONDANGLEBENDTYPE\n# \t NBTY: number of angle types\n \t 2\n# \t CT \t CHT \t T0\n\t0.00000e+00\t4.18400e+02\t1.09500e+02\n\t0.00000e+00\t2.92880e+02\t1.09500e+02\nEND\nBONDANGLEH\n# \t NTHEH: number of bondangles involving a H\n \t 30\n# \t ITH \t JTH \t KTH \t ICTH\n\t1\t2\t9\t1\n\t1\t2\t10\t1\n\t1\t6\t17\t1\n\t1\t6\t18\t1\n\t2\t1\t7\t1\n\t2\t1\t8\t1\n\t2\t3\t11\t1\n\t2\t3\t12\t1\n\t3\t2\t9\t1\n\t3\t2\t10\t1\n\t3\t4\t13\t1\n\t3\t4\t14\t1\n\t4\t3\t11\t1\n\t4\t3\t12\t1\n\t4\t5\t15\t1\n\t4\t5\t16\t1\n\t5\t4\t13\t1\n\t5\t4\t14\t1\n\t5\t6\t17\t1\n\t5\t6\t18\t1\n\t6\t1\t7\t1\n\t6\t1\t8\t1\n\t6\t5\t15\t1\n\t6\t5\t16\t1\n\t7\t1\t8\t2\n\t9\t2\t10\t2\n\t11\t3\t12\t2\n\t13\t4\t14\t2\n\t15\t5\t16\t2\n\t17\t6\t18\t2\nEND\nBONDANGLE\n# \t NTHE: number of angles\n \t 6\n# \t IT \t JT \t KT \t ICT\n\t1\t2\t3\t1\n\t1\t6\t5\t1\n\t2\t1\t6\t1\n\t2\t3\t4\t1\n\t3\t4\t5\t1\n\t4\t5\t6\t1\nEND\nIMPDIHEDRALTYPE\n# \t NQTY: number of improper dihedrals\n \t 0\n# \t CQ \t Q0\nEND\nIMPDIHEDRALH\n# \t NQHIH: number of improper dihedrals involving H atoms\n \t 0\n# \t IQH \t JQH \t KQH \t LQH \t ICQH\nEND\nIMPDIHEDRAL\n# \t # NQHI: number of improper dihedrals\n \t 0\n# \t IQ \t JQ \t KQ \t LQ \t ICQ\nEND\nTORSDIHEDRALTYPE\n# \t NPTY: number of torsion dihedrals\n \t 5\n# \t CP \t PD \t NP\n\t7.53120e-01\t0.00000e+00\t3\n\t1.04600e+00\t1.80000e+02\t2\n\t8.36800e-01\t1.80000e+02\t1\n\t6.69440e-01\t0.00000e+00\t3\n\t6.27600e-01\t0.00000e+00\t3\nEND\nDIHEDRALH\n# \t NPHIH: number of torsion dihedrals involving H atoms\n \t 48\n# \t IPH \t JPH \t KPH \t LPH \t ICPH\n\t1\t2\t3\t11\t4\n\t1\t2\t3\t12\t4\n\t1\t6\t5\t15\t4\n\t1\t6\t5\t16\t4\n\t2\t1\t6\t17\t4\n\t2\t1\t6\t18\t4\n\t2\t3\t4\t13\t4\n\t2\t3\t4\t14\t4\n\t3\t2\t1\t7\t4\n\t3\t2\t1\t8\t4\n\t3\t4\t5\t15\t4\n\t3\t4\t5\t16\t4\n\t4\t3\t2\t9\t4\n\t4\t3\t2\t10\t4\n\t4\t5\t6\t17\t4\n\t4\t5\t6\t18\t4\n\t5\t4\t3\t11\t4\n\t5\t4\t3\t12\t4\n\t5\t6\t1\t7\t4\n\t5\t6\t1\t8\t4\n\t6\t1\t2\t9\t4\n\t6\t1\t2\t10\t4\n\t6\t5\t4\t13\t4\n\t6\t5\t4\t14\t4\n\t7\t1\t2\t9\t5\n\t7\t1\t2\t10\t5\n\t7\t1\t6\t17\t5\n\t7\t1\t6\t18\t5\n\t8\t1\t2\t9\t5\n\t8\t1\t2\t10\t5\n\t8\t1\t6\t17\t5\n\t8\t1\t6\t18\t5\n\t9\t2\t3\t11\t5\n\t9\t2\t3\t12\t5\n\t10\t2\t3\t11\t5\n\t10\t2\t3\t12\t5\n\t11\t3\t4\t13\t5\n\t11\t3\t4\t14\t5\n\t12\t3\t4\t13\t5\n\t12\t3\t4\t14\t5\n\t13\t4\t5\t15\t5\n\t13\t4\t5\t16\t5\n\t14\t4\t5\t15\t5\n\t14\t4\t5\t16\t5\n\t15\t5\t6\t17\t5\n\t15\t5\t6\t18\t5\n\t16\t5\t6\t17\t5\n\t16\t5\t6\t18\t5\nEND\nDIHEDRAL\n# \t NPHI: number of dihedrals NOT involving H atoms\n \t 18\n# \t IP \t JP \t KP \t LP \t ICP\n\t1\t2\t3\t4\t1\n\t1\t2\t3\t4\t2\n\t1\t2\t3\t4\t3\n\t1\t6\t5\t4\t1\n\t1\t6\t5\t4\t2\n\t1\t6\t5\t4\t3\n\t2\t1\t6\t5\t1\n\t2\t1\t6\t5\t2\n\t2\t1\t6\t5\t3\n\t2\t3\t4\t5\t1\n\t2\t3\t4\t5\t2\n\t2\t3\t4\t5\t3\n\t3\t2\t1\t6\t1\n\t3\t2\t1\t6\t2\n\t3\t2\t1\t6\t3\n\t3\t4\t5\t6\t1\n\t3\t4\t5\t6\t2\n\t3\t4\t5\t6\t3\nEND\nLJPARAMETERS\n# \t NRATT2: number of LJ interaction types = NRATT*(NRATT+1)/2\n \t 3\n# \t IAC \t JAC \t C12 \t C6 \t CS12 \t CS6\n\t1\t1\t4.364248e-06\t2.826762e-03\t0.000000e+00\t0.000000e+00\n\t1\t2\t3.704641e-07\t5.069068e-04\t0.000000e+00\t0.000000e+00\n\t2\t2\t3.144727e-08\t9.090068e-05\t0.000000e+00\t0.000000e+00\nEND\nSOLUTEMOLECULES\n1\n1\nEND\nTEMPERATUREGROUPS\n1\n1\nEND\nPRESSUREGROUPS\n1\n1\nEND\nLJEXCEPTIONS\n# This block defines special LJ-interactions based on atom numbers \n# This overrules the normal LJ-parameters (including 1-4 interactions)\n# \t NEX: number of exceptions\n \t 0\n# \t AT1 \t AT2 \t C12 \t C6\nEND\nSOLVENTATOM\n# \t NRAM: number of atoms per solvent molecule\n \t 3\n# \t I \t ANMS \t IACS \t MASS \t CGS\n\t1\tOW\t999\t1.59994e+01\t-8.20000e-01\n\t2\tHW1\t998\t1.00800e+00\t4.10000e-01\n\t3\tHW2\t998\t1.00800e+00\t4.10000e-01\nEND\nSOLVENTCONSTR\n# \t NCONS: number of constraints\n \t 3\n# \t ICONS \t JCONS \t CONS\n\t1\t2\t0.1\n\t1\t3\t0.1\n\t2\t3\t0.163299\nEND\n\n" ], [ "top2 = Top(input=data.data_dir+\"/topology_templates/blank_template.top\")", "TITLE\nPHYSICALCONSTANTS\nTOPVERSION\nSOLUTEMOLECULES\nTEMPERATUREGROUPS\nPRESSUREGROUPS\nLJEXCEPTIONS\nSOLVENTATOM\nSOLVENTCONSTR\n" ], [ "top3 = openforcefield2gromos.openforcefield2gromos(moleculeFF, gromosTop=top2, forcefield_name=data.data_dir + \"/ff/SMIRNOFF/frosst_12-12-18.xml\").convert_return()", "0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'Bonds' tag. The SMIRNOFF spec converter is assuming it has a value of 'harmonic'\n0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'Angles' tag. The SMIRNOFF spec converter is assuming it has a value of 'harmonic'\n0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'ProperTorsions' tag. The SMIRNOFF spec converter is assuming it has a value of 'charmm'\n0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'vdW' tag. The SMIRNOFF spec converter is assuming it has a value of 'Lennard-Jones-12-6'\n0.1 SMIRNOFF spec did not allow the 'Electrostatics' tag. Adding it in 0.2 spec conversion, and assuming the following values:\n\tmethod: PME\n\tscale12: 0.0\n\tscale13: 0.0\n\tscale15: 1.0\n\tcutoff: 9.0\n\tcutoff_unit: angstrom\n0.1 SMIRNOFF spec file does not contain 'method' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'cutoff'\n0.1 SMIRNOFF spec file does not contain 'combining_rules' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'Lorentz-Berthelot'\n0.1 SMIRNOFF spec file does not contain 'scale12' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '0.0'\n0.1 SMIRNOFF spec file does not contain 'scale13' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '0.0'\n0.1 SMIRNOFF spec file does not contain 'scale15' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '1'\n0.1 SMIRNOFF spec file does not contain 'switch_width' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '1.0'\n0.1 SMIRNOFF spec file does not contain 'switch_width_unit' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'angstrom'\n0.1 SMIRNOFF spec file does not contain 'cutoff' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '9.0'\n0.1 SMIRNOFF spec file does not contain 'cutoff_unit' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'angstrom'\n" ], [ "top3.make_ordered()", "_____no_output_____" ], [ "print(top3)", "TITLE\nBlank Template created with Pygromos\n\n\tname: \t hill_formula: C6H12\n\t----------------------------------------\n\t| created from OpenForceField topology |\n\t| use Amber Block for OpenFF topology! |\n\t----------------------------------------\n\n\t >>> Generated with python lib function_libs utilities. (riniker group)\n\t >>> line_seperator: '\\n'\t field_seperator: '\\t'\nEND\nPHYSICALCONSTANTS\n# FPEPSI: 1.0/(4.0*PI*EPS0) (EPS0 is the permittivity of vacuum)\n138.9354\n# HBAR: Planck's constant HBAR = H/(2* PI)\n0.0635078\n# SPDL: Speed of light (nm/ps)\n299792.458\n# BOLTZ: Boltzmann's constant kB\n0.00831441\nEND\nTOPVERSION\n2.0\nEND\nATOMTYPENAME\n2\nn16\nn2\nEND\nRESNAME\n1\nC6H12\nEND\nSOLUTEATOM\n# NRP: number of solute atoms\n \t 18\n# ATNM: atom number\n# MRES: residue number\n# PANM: atom name of solute atom\n# IAC: integer (van der Waals) atom type code\n# MASS: mass of solute atom\n# CG: charge of solute atom\n# CGC: charge group code (0 or 1)\n# INE: number of excluded atoms\n# INE14: number of 1-4 interactions\n# ATNM MRES PANM IAC MASS CG CGC INE\n# INE14\n\t1\t1\tC1\t1\t12.01078\t0\t1\t10\t2\t3\t5\t6\t7\t8\n\t\t\t\t\t\t\t\t\t9\t10\t17\t18\n\t\t\t\t\t\t\t\t5\t4\t11\t12\t15\t16\n\t2\t1\tC2\t1\t12.01078\t0\t1\t9\t3\t4\t6\t7\t8\t9\n\t\t\t\t\t\t\t\t\t10\t11\t12\n\t\t\t\t\t\t\t\t5\t5\t13\t14\t17\t18\n\t3\t1\tC3\t1\t12.01078\t0\t1\t8\t4\t5\t9\t10\t11\t12\n\t\t\t\t\t\t\t\t\t13\t14\n\t\t\t\t\t\t\t\t5\t6\t7\t8\t15\t16\n\t4\t1\tC4\t1\t12.01078\t0\t1\t8\t5\t6\t11\t12\t13\t14\n\t\t\t\t\t\t\t\t\t15\t16\n\t\t\t\t\t\t\t\t4\t9\t10\t17\t18\n\t5\t1\tC5\t1\t12.01078\t0\t1\t7\t6\t13\t14\t15\t16\t17\n\t\t\t\t\t\t\t\t\t18\n\t\t\t\t\t\t\t\t4\t7\t8\t11\t12\n\t6\t1\tC6\t1\t12.01078\t0\t1\t6\t7\t8\t15\t16\t17\t18\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t4\t9\t10\t13\t14\n\t7\t1\tH1\t2\t1.007947\t0\t1\t1\t8\n\t\t\t\t\t\t\t\t4\t9\t10\t17\t18\n\t8\t1\tH2\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t4\t9\t10\t17\t18\n\t9\t1\tH3\t2\t1.007947\t0\t1\t1\t10\n\t\t\t\t\t\t\t\t2\t11\t12\n\t10\t1\tH4\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t2\t11\t12\n\t11\t1\tH5\t2\t1.007947\t0\t1\t1\t12\n\t\t\t\t\t\t\t\t2\t13\t14\n\t12\t1\tH6\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t2\t13\t14\n\t13\t1\tH7\t2\t1.007947\t0\t1\t1\t14\n\t\t\t\t\t\t\t\t2\t15\t16\n\t14\t1\tH8\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t2\t15\t16\n\t15\t1\tH9\t2\t1.007947\t0\t1\t1\t16\n\t\t\t\t\t\t\t\t2\t17\t18\n\t16\t1\tH10\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t2\t17\t18\n\t17\t1\tH11\t2\t1.007947\t0\t1\t1\t18\n\t\t\t\t\t\t\t\t0\n\t18\t1\tH12\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t0\nEND\nBONDSTRETCHTYPE\n# \t NBTY: number of covalent bond types\n \t 2\n# \t CB \t CHB \t B0\n\t5.56986e+06\t2.59408e+05\t1.52600e-01\n\t1.19734e+07\t2.84512e+05\t1.09000e-01\nEND\nBONDH\n# \t NBONH: number of bonds involving H atoms in solute\n \t 12\n# \t IBH \t JBH \t ICBH\n\t1\t7\t2\n\t1\t8\t2\n\t2\t9\t2\n\t2\t10\t2\n\t3\t11\t2\n\t3\t12\t2\n\t4\t13\t2\n\t4\t14\t2\n\t5\t15\t2\n\t5\t16\t2\n\t6\t17\t2\n\t6\t18\t2\nEND\nBOND\n# \t NBON: number of bonds NOT involving H atoms in solute\n \t 6\n# \t IB \t JB \t ICB\n\t1\t2\t1\n\t1\t6\t1\n\t2\t3\t1\n\t3\t4\t1\n\t4\t5\t1\n\t5\t6\t1\nEND\nBONDANGLEBENDTYPE\n# \t NBTY: number of angle types\n \t 2\n# \t CT \t CHT \t T0\n\t0.00000e+00\t4.18400e+02\t1.09500e+02\n\t0.00000e+00\t2.92880e+02\t1.09500e+02\nEND\nBONDANGLEH\n# \t NTHEH: number of bondangles involving a H\n \t 30\n# \t ITH \t JTH \t KTH \t ICTH\n\t1\t2\t9\t1\n\t1\t2\t10\t1\n\t1\t6\t17\t1\n\t1\t6\t18\t1\n\t2\t1\t7\t1\n\t2\t1\t8\t1\n\t2\t3\t11\t1\n\t2\t3\t12\t1\n\t3\t2\t9\t1\n\t3\t2\t10\t1\n\t3\t4\t13\t1\n\t3\t4\t14\t1\n\t4\t3\t11\t1\n\t4\t3\t12\t1\n\t4\t5\t15\t1\n\t4\t5\t16\t1\n\t5\t4\t13\t1\n\t5\t4\t14\t1\n\t5\t6\t17\t1\n\t5\t6\t18\t1\n\t6\t1\t7\t1\n\t6\t1\t8\t1\n\t6\t5\t15\t1\n\t6\t5\t16\t1\n\t7\t1\t8\t2\n\t9\t2\t10\t2\n\t11\t3\t12\t2\n\t13\t4\t14\t2\n\t15\t5\t16\t2\n\t17\t6\t18\t2\nEND\nBONDANGLE\n# \t NTHE: number of angles\n \t 6\n# \t IT \t JT \t KT \t ICT\n\t1\t2\t3\t1\n\t1\t6\t5\t1\n\t2\t1\t6\t1\n\t2\t3\t4\t1\n\t3\t4\t5\t1\n\t4\t5\t6\t1\nEND\nIMPDIHEDRALTYPE\n# \t NQTY: number of improper dihedrals\n \t 0\n# \t CQ \t Q0\nEND\nIMPDIHEDRALH\n# \t NQHIH: number of improper dihedrals involving H atoms\n \t 0\n# \t IQH \t JQH \t KQH \t LQH \t ICQH\nEND\nIMPDIHEDRAL\n# \t # NQHI: number of improper dihedrals\n \t 0\n# \t IQ \t JQ \t KQ \t LQ \t ICQ\nEND\nTORSDIHEDRALTYPE\n# \t NPTY: number of torsion dihedrals\n \t 5\n# \t CP \t PD \t NP\n\t7.53120e-01\t0.00000e+00\t3\n\t1.04600e+00\t1.80000e+02\t2\n\t8.36800e-01\t1.80000e+02\t1\n\t6.69440e-01\t0.00000e+00\t3\n\t6.27600e-01\t0.00000e+00\t3\nEND\nDIHEDRALH\n# \t NPHIH: number of torsion dihedrals involving H atoms\n \t 48\n# \t IPH \t JPH \t KPH \t LPH \t ICPH\n\t1\t2\t3\t11\t4\n\t1\t2\t3\t12\t4\n\t1\t6\t5\t15\t4\n\t1\t6\t5\t16\t4\n\t2\t1\t6\t17\t4\n\t2\t1\t6\t18\t4\n\t2\t3\t4\t13\t4\n\t2\t3\t4\t14\t4\n\t3\t2\t1\t7\t4\n\t3\t2\t1\t8\t4\n\t3\t4\t5\t15\t4\n\t3\t4\t5\t16\t4\n\t4\t3\t2\t9\t4\n\t4\t3\t2\t10\t4\n\t4\t5\t6\t17\t4\n\t4\t5\t6\t18\t4\n\t5\t4\t3\t11\t4\n\t5\t4\t3\t12\t4\n\t5\t6\t1\t7\t4\n\t5\t6\t1\t8\t4\n\t6\t1\t2\t9\t4\n\t6\t1\t2\t10\t4\n\t6\t5\t4\t13\t4\n\t6\t5\t4\t14\t4\n\t7\t1\t2\t9\t5\n\t7\t1\t2\t10\t5\n\t7\t1\t6\t17\t5\n\t7\t1\t6\t18\t5\n\t8\t1\t2\t9\t5\n\t8\t1\t2\t10\t5\n\t8\t1\t6\t17\t5\n\t8\t1\t6\t18\t5\n\t9\t2\t3\t11\t5\n\t9\t2\t3\t12\t5\n\t10\t2\t3\t11\t5\n\t10\t2\t3\t12\t5\n\t11\t3\t4\t13\t5\n\t11\t3\t4\t14\t5\n\t12\t3\t4\t13\t5\n\t12\t3\t4\t14\t5\n\t13\t4\t5\t15\t5\n\t13\t4\t5\t16\t5\n\t14\t4\t5\t15\t5\n\t14\t4\t5\t16\t5\n\t15\t5\t6\t17\t5\n\t15\t5\t6\t18\t5\n\t16\t5\t6\t17\t5\n\t16\t5\t6\t18\t5\nEND\nDIHEDRAL\n# \t NPHI: number of dihedrals NOT involving H atoms\n \t 18\n# \t IP \t JP \t KP \t LP \t ICP\n\t1\t2\t3\t4\t1\n\t1\t2\t3\t4\t2\n\t1\t2\t3\t4\t3\n\t1\t6\t5\t4\t1\n\t1\t6\t5\t4\t2\n\t1\t6\t5\t4\t3\n\t2\t1\t6\t5\t1\n\t2\t1\t6\t5\t2\n\t2\t1\t6\t5\t3\n\t2\t3\t4\t5\t1\n\t2\t3\t4\t5\t2\n\t2\t3\t4\t5\t3\n\t3\t2\t1\t6\t1\n\t3\t2\t1\t6\t2\n\t3\t2\t1\t6\t3\n\t3\t4\t5\t6\t1\n\t3\t4\t5\t6\t2\n\t3\t4\t5\t6\t3\nEND\nLJPARAMETERS\n# \t NRATT2: number of LJ interaction types = NRATT*(NRATT+1)/2\n \t 3\n# \t IAC \t JAC \t C12 \t C6 \t CS12 \t CS6\n\t1\t1\t4.364248e-06\t2.826762e-03\t0.000000e+00\t0.000000e+00\n\t1\t2\t3.704641e-07\t5.069068e-04\t0.000000e+00\t0.000000e+00\n\t2\t2\t3.144727e-08\t9.090068e-05\t0.000000e+00\t0.000000e+00\nEND\nSOLUTEMOLECULES\n1\n18\nEND\nTEMPERATUREGROUPS\n1\n18\nEND\nPRESSUREGROUPS\n1\n18\nEND\nLJEXCEPTIONS\n# This block defines special LJ-interactions based on atom numbers \n# This overrules the normal LJ-parameters (including 1-4 interactions)\n# \t NEX: number of exceptions\n \t 0\n# \t AT1 \t AT2 \t C12 \t C6\nEND\nSOLVENTATOM\n# \t NRAM: number of atoms per solvent molecule\n \t 3\n# \t I \t ANMS \t IACS \t MASS \t CGS\n\t1\tOW\t999\t1.59994e+01\t-8.20000e-01\n\t2\tHW1\t998\t1.00800e+00\t4.10000e-01\n\t3\tHW2\t998\t1.00800e+00\t4.10000e-01\nEND\nSOLVENTCONSTR\n# \t NCONS: number of constraints\n \t 3\n# \t ICONS \t JCONS \t CONS\n\t1\t2\t0.1\n\t1\t3\t0.1\n\t2\t3\t0.163299\nEND\n\n" ], [ "from pygromos.gromos import gromosPP, gromosXX", "_____no_output_____" ], [ "gromosPP.GromosPP(bin_path=\"/home/mlehner/gromosPlsPls/gromos++/BUILD/programs\")", "_____no_output_____" ], [ "top3._orig_file_path = \"/home/mlehner/Documents/dev/temp19/top3.top\"", "_____no_output_____" ], [ "top3.write(\"/home/mlehner/Documents/dev/temp19/top3.top\")", "_____no_output_____" ], [ "top4 = openforcefield2gromos(moleculeFF).convert_return()", "0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'Bonds' tag. The SMIRNOFF spec converter is assuming it has a value of 'harmonic'\n0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'Angles' tag. The SMIRNOFF spec converter is assuming it has a value of 'harmonic'\n0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'ProperTorsions' tag. The SMIRNOFF spec converter is assuming it has a value of 'charmm'\n0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'vdW' tag. The SMIRNOFF spec converter is assuming it has a value of 'Lennard-Jones-12-6'\n0.1 SMIRNOFF spec did not allow the 'Electrostatics' tag. Adding it in 0.2 spec conversion, and assuming the following values:\n\tmethod: PME\n\tscale12: 0.0\n\tscale13: 0.0\n\tscale15: 1.0\n\tcutoff: 9.0\n\tcutoff_unit: angstrom\n0.1 SMIRNOFF spec file does not contain 'method' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'cutoff'\n0.1 SMIRNOFF spec file does not contain 'combining_rules' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'Lorentz-Berthelot'\n0.1 SMIRNOFF spec file does not contain 'scale12' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '0.0'\n0.1 SMIRNOFF spec file does not contain 'scale13' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '0.0'\n0.1 SMIRNOFF spec file does not contain 'scale15' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '1'\n0.1 SMIRNOFF spec file does not contain 'switch_width' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '1.0'\n0.1 SMIRNOFF spec file does not contain 'switch_width_unit' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'angstrom'\n0.1 SMIRNOFF spec file does not contain 'cutoff' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '9.0'\n0.1 SMIRNOFF spec file does not contain 'cutoff_unit' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'angstrom'\nTITLE\nPHYSICALCONSTANTS\nTOPVERSION\nSOLUTEMOLECULES\nTEMPERATUREGROUPS\nPRESSUREGROUPS\nLJEXCEPTIONS\nSOLVENTATOM\nSOLVENTCONSTR\n" ], [ "top4.make_ordered()", "_____no_output_____" ], [ "print(top4)", "TITLE\nBlank Template created with Pygromos\n\n\tname: \t hill_formula: C6H12\n\t----------------------------------------\n\t| created from OpenForceField topology |\n\t| use Amber Block for OpenFF topology! |\n\t----------------------------------------\n\n\t >>> Generated with python lib function_libs utilities. (riniker group)\n\t >>> line_seperator: '\\n'\t field_seperator: '\\t'\nEND\nPHYSICALCONSTANTS\n# FPEPSI: 1.0/(4.0*PI*EPS0) (EPS0 is the permittivity of vacuum)\n138.9354\n# HBAR: Planck's constant HBAR = H/(2* PI)\n0.0635078\n# SPDL: Speed of light (nm/ps)\n299792.458\n# BOLTZ: Boltzmann's constant kB\n0.00831441\nEND\nTOPVERSION\n2.0\nEND\nATOMTYPENAME\n2\nn16\nn2\nEND\nRESNAME\n1\nC6H12\nEND\nSOLUTEATOM\n# NRP: number of solute atoms\n \t 18\n# ATNM: atom number\n# MRES: residue number\n# PANM: atom name of solute atom\n# IAC: integer (van der Waals) atom type code\n# MASS: mass of solute atom\n# CG: charge of solute atom\n# CGC: charge group code (0 or 1)\n# INE: number of excluded atoms\n# INE14: number of 1-4 interactions\n# ATNM MRES PANM IAC MASS CG CGC INE\n# INE14\n\t1\t1\tn16\t1\t12.01078\t0\t1\t10\t2\t3\t5\t6\t7\t8\n\t\t\t\t\t\t\t\t\t9\t10\t17\t18\n\t\t\t\t\t\t\t\t5\t4\t11\t12\t15\t16\n\t2\t1\tn16\t1\t12.01078\t0\t1\t9\t3\t4\t6\t7\t8\t9\n\t\t\t\t\t\t\t\t\t10\t11\t12\n\t\t\t\t\t\t\t\t5\t5\t13\t14\t17\t18\n\t3\t1\tn16\t1\t12.01078\t0\t1\t8\t4\t5\t9\t10\t11\t12\n\t\t\t\t\t\t\t\t\t13\t14\n\t\t\t\t\t\t\t\t5\t6\t7\t8\t15\t16\n\t4\t1\tn16\t1\t12.01078\t0\t1\t8\t5\t6\t11\t12\t13\t14\n\t\t\t\t\t\t\t\t\t15\t16\n\t\t\t\t\t\t\t\t4\t9\t10\t17\t18\n\t5\t1\tn16\t1\t12.01078\t0\t1\t7\t6\t13\t14\t15\t16\t17\n\t\t\t\t\t\t\t\t\t18\n\t\t\t\t\t\t\t\t4\t7\t8\t11\t12\n\t6\t1\tn16\t1\t12.01078\t0\t1\t6\t7\t8\t15\t16\t17\t18\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t4\t9\t10\t13\t14\n\t7\t1\tn2\t2\t1.007947\t0\t1\t1\t8\n\t\t\t\t\t\t\t\t4\t9\t10\t17\t18\n\t8\t1\tn2\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t4\t9\t10\t17\t18\n\t9\t1\tn2\t2\t1.007947\t0\t1\t1\t10\n\t\t\t\t\t\t\t\t2\t11\t12\n\t10\t1\tn2\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t2\t11\t12\n\t11\t1\tn2\t2\t1.007947\t0\t1\t1\t12\n\t\t\t\t\t\t\t\t2\t13\t14\n\t12\t1\tn2\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t2\t13\t14\n\t13\t1\tn2\t2\t1.007947\t0\t1\t1\t14\n\t\t\t\t\t\t\t\t2\t15\t16\n\t14\t1\tn2\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t2\t15\t16\n\t15\t1\tn2\t2\t1.007947\t0\t1\t1\t16\n\t\t\t\t\t\t\t\t2\t17\t18\n\t16\t1\tn2\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t2\t17\t18\n\t17\t1\tn2\t2\t1.007947\t0\t1\t1\t18\n\t\t\t\t\t\t\t\t0\n\t18\t1\tn2\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t0\nEND\nBONDSTRETCHTYPE\n# \t NBTY: number of covalent bond types\n \t 2\n# \t CB \t CHB \t B0\n\t5.56986e+06\t2.59408e+05\t1.52600e-01\n\t1.19734e+07\t2.84512e+05\t1.09000e-01\nEND\nBONDH\n# \t NBONH: number of bonds involving H atoms in solute\n \t 12\n# \t IBH \t JBH \t ICBH\n\t1\t7\t2\n\t1\t8\t2\n\t2\t9\t2\n\t2\t10\t2\n\t3\t11\t2\n\t3\t12\t2\n\t4\t13\t2\n\t4\t14\t2\n\t5\t15\t2\n\t5\t16\t2\n\t6\t17\t2\n\t6\t18\t2\nEND\nBOND\n# \t NBON: number of bonds NOT involving H atoms in solute\n \t 6\n# \t IB \t JB \t ICB\n\t1\t2\t1\n\t1\t6\t1\n\t2\t3\t1\n\t3\t4\t1\n\t4\t5\t1\n\t5\t6\t1\nEND\nBONDANGLEBENDTYPE\n# \t NBTY: number of angle types\n \t 2\n# \t CT \t CHT \t T0\n\t0.00000e+00\t4.18400e+02\t1.09500e+02\n\t0.00000e+00\t2.92880e+02\t1.09500e+02\nEND\nBONDANGLEH\n# \t NTHEH: number of bondangles involving a H\n \t 30\n# \t ITH \t JTH \t KTH \t ICTH\n\t1\t2\t9\t1\n\t1\t2\t10\t1\n\t1\t6\t17\t1\n\t1\t6\t18\t1\n\t2\t1\t7\t1\n\t2\t1\t8\t1\n\t2\t3\t11\t1\n\t2\t3\t12\t1\n\t3\t2\t9\t1\n\t3\t2\t10\t1\n\t3\t4\t13\t1\n\t3\t4\t14\t1\n\t4\t3\t11\t1\n\t4\t3\t12\t1\n\t4\t5\t15\t1\n\t4\t5\t16\t1\n\t5\t4\t13\t1\n\t5\t4\t14\t1\n\t5\t6\t17\t1\n\t5\t6\t18\t1\n\t6\t1\t7\t1\n\t6\t1\t8\t1\n\t6\t5\t15\t1\n\t6\t5\t16\t1\n\t7\t1\t8\t2\n\t9\t2\t10\t2\n\t11\t3\t12\t2\n\t13\t4\t14\t2\n\t15\t5\t16\t2\n\t17\t6\t18\t2\nEND\nBONDANGLE\n# \t NTHE: number of angles\n \t 6\n# \t IT \t JT \t KT \t ICT\n\t1\t2\t3\t1\n\t1\t6\t5\t1\n\t2\t1\t6\t1\n\t2\t3\t4\t1\n\t3\t4\t5\t1\n\t4\t5\t6\t1\nEND\nIMPDIHEDRALTYPE\n# \t NQTY: number of improper dihedrals\n \t 0\n# \t CQ \t Q0\nEND\nIMPDIHEDRALH\n# \t NQHIH: number of improper dihedrals involving H atoms\n \t 0\n# \t IQH \t JQH \t KQH \t LQH \t ICQH\nEND\nIMPDIHEDRAL\n# \t # NQHI: number of improper dihedrals\n \t 0\n# \t IQ \t JQ \t KQ \t LQ \t ICQ\nEND\nTORSDIHEDRALTYPE\n# \t NPTY: number of torsion dihedrals\n \t 5\n# \t CP \t PD \t NP\n\t7.53120e-01\t0.00000e+00\t3\n\t1.04600e+00\t1.80000e+02\t2\n\t8.36800e-01\t1.80000e+02\t1\n\t6.69440e-01\t0.00000e+00\t3\n\t6.27600e-01\t0.00000e+00\t3\nEND\nDIHEDRALH\n# \t NPHIH: number of torsion dihedrals involving H atoms\n \t 48\n# \t IPH \t JPH \t KPH \t LPH \t ICPH\n\t1\t2\t3\t11\t4\n\t1\t2\t3\t12\t4\n\t1\t6\t5\t15\t4\n\t1\t6\t5\t16\t4\n\t2\t1\t6\t17\t4\n\t2\t1\t6\t18\t4\n\t2\t3\t4\t13\t4\n\t2\t3\t4\t14\t4\n\t3\t2\t1\t7\t4\n\t3\t2\t1\t8\t4\n\t3\t4\t5\t15\t4\n\t3\t4\t5\t16\t4\n\t4\t3\t2\t9\t4\n\t4\t3\t2\t10\t4\n\t4\t5\t6\t17\t4\n\t4\t5\t6\t18\t4\n\t5\t4\t3\t11\t4\n\t5\t4\t3\t12\t4\n\t5\t6\t1\t7\t4\n\t5\t6\t1\t8\t4\n\t6\t1\t2\t9\t4\n\t6\t1\t2\t10\t4\n\t6\t5\t4\t13\t4\n\t6\t5\t4\t14\t4\n\t7\t1\t2\t9\t5\n\t7\t1\t2\t10\t5\n\t7\t1\t6\t17\t5\n\t7\t1\t6\t18\t5\n\t8\t1\t2\t9\t5\n\t8\t1\t2\t10\t5\n\t8\t1\t6\t17\t5\n\t8\t1\t6\t18\t5\n\t9\t2\t3\t11\t5\n\t9\t2\t3\t12\t5\n\t10\t2\t3\t11\t5\n\t10\t2\t3\t12\t5\n\t11\t3\t4\t13\t5\n\t11\t3\t4\t14\t5\n\t12\t3\t4\t13\t5\n\t12\t3\t4\t14\t5\n\t13\t4\t5\t15\t5\n\t13\t4\t5\t16\t5\n\t14\t4\t5\t15\t5\n\t14\t4\t5\t16\t5\n\t15\t5\t6\t17\t5\n\t15\t5\t6\t18\t5\n\t16\t5\t6\t17\t5\n\t16\t5\t6\t18\t5\nEND\nDIHEDRAL\n# \t NPHI: number of dihedrals NOT involving H atoms\n \t 18\n# \t IP \t JP \t KP \t LP \t ICP\n\t1\t2\t3\t4\t1\n\t1\t2\t3\t4\t2\n\t1\t2\t3\t4\t3\n\t1\t6\t5\t4\t1\n\t1\t6\t5\t4\t2\n\t1\t6\t5\t4\t3\n\t2\t1\t6\t5\t1\n\t2\t1\t6\t5\t2\n\t2\t1\t6\t5\t3\n\t2\t3\t4\t5\t1\n\t2\t3\t4\t5\t2\n\t2\t3\t4\t5\t3\n\t3\t2\t1\t6\t1\n\t3\t2\t1\t6\t2\n\t3\t2\t1\t6\t3\n\t3\t4\t5\t6\t1\n\t3\t4\t5\t6\t2\n\t3\t4\t5\t6\t3\nEND\nLJPARAMETERS\n# \t NRATT2: number of LJ interaction types = NRATT*(NRATT+1)/2\n \t 3\n# \t IAC \t JAC \t C12 \t C6 \t CS12 \t CS6\n\t1\t1\t4.364248e-06\t2.826762e-03\t0.000000e+00\t0.000000e+00\n\t1\t2\t3.704641e-07\t5.069068e-04\t0.000000e+00\t0.000000e+00\n\t2\t2\t3.144727e-08\t9.090068e-05\t0.000000e+00\t0.000000e+00\nEND\nSOLUTEMOLECULES\n1\n18\nEND\nTEMPERATUREGROUPS\n1\n18\nEND\nPRESSUREGROUPS\n1\n18\nEND\nLJEXCEPTIONS\n# This block defines special LJ-interactions based on atom numbers \n# This overrules the normal LJ-parameters (including 1-4 interactions)\n# \t NEX: number of exceptions\n \t 0\n# \t AT1 \t AT2 \t C12 \t C6\nEND\nSOLVENTATOM\n# \t NRAM: number of atoms per solvent molecule\n \t 0\n# \t I \t ANMS \t IACS \t MASS \t CGS\nEND\nSOLVENTCONSTR\n# \t NCONS: number of constraints\n \t 0\n# \t ICONS \t JCONS \t CONS\nEND\n\n" ], [ "top5 = openforcefield2gromos(moleculeFF, gromosTop=Top(input=None)).convert_return()", "0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'Bonds' tag. The SMIRNOFF spec converter is assuming it has a value of 'harmonic'\n0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'Angles' tag. The SMIRNOFF spec converter is assuming it has a value of 'harmonic'\n0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'ProperTorsions' tag. The SMIRNOFF spec converter is assuming it has a value of 'charmm'\n0.1 SMIRNOFF spec file does not contain 'potential' attribute for 'vdW' tag. The SMIRNOFF spec converter is assuming it has a value of 'Lennard-Jones-12-6'\n0.1 SMIRNOFF spec did not allow the 'Electrostatics' tag. Adding it in 0.2 spec conversion, and assuming the following values:\n\tmethod: PME\n\tscale12: 0.0\n\tscale13: 0.0\n\tscale15: 1.0\n\tcutoff: 9.0\n\tcutoff_unit: angstrom\n0.1 SMIRNOFF spec file does not contain 'method' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'cutoff'\n0.1 SMIRNOFF spec file does not contain 'combining_rules' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'Lorentz-Berthelot'\n0.1 SMIRNOFF spec file does not contain 'scale12' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '0.0'\n0.1 SMIRNOFF spec file does not contain 'scale13' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '0.0'\n0.1 SMIRNOFF spec file does not contain 'scale15' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '1'\n0.1 SMIRNOFF spec file does not contain 'switch_width' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '1.0'\n0.1 SMIRNOFF spec file does not contain 'switch_width_unit' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'angstrom'\n0.1 SMIRNOFF spec file does not contain 'cutoff' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of '9.0'\n0.1 SMIRNOFF spec file does not contain 'cutoff_unit' attribute for 'NonBondedMethod/vdW'' tag. The SMIRNOFF spec converter is assuming it has a value of 'angstrom'\n" ], [ "top5.make_ordered()", "_____no_output_____" ], [ "print(top5)", "TITLE\n\n\tname: \t hill_formula: C6H12\n\t----------------------------------------\n\t| created from OpenForceField topology |\n\t| use Amber Block for OpenFF topology! |\n\t----------------------------------------\n\n\t >>> Generated with python lib function_libs utilities. (riniker group)\n\t >>> line_seperator: '\\n'\t field_seperator: '\\t'\nEND\nPHYSICALCONSTANTS\n# FPEPSI: 1.0/(4.0*PI*EPS0) (EPS0 is the permittivity of vacuum)\n138.9354\n# HBAR: Planck's constant HBAR = H/(2* PI)\n0.0635078\n# SPDL: Speed of light (nm/ps)\n299792.458\n# BOLTZ: Boltzmann's constant kB\n0.00831441\nEND\nTOPVERSION\n2.0\nEND\nATOMTYPENAME\n2\nn16\nn2\nEND\nRESNAME\n1\nC6H12\nEND\nSOLUTEATOM\n# NRP: number of solute atoms\n \t 18\n# ATNM: atom number\n# MRES: residue number\n# PANM: atom name of solute atom\n# IAC: integer (van der Waals) atom type code\n# MASS: mass of solute atom\n# CG: charge of solute atom\n# CGC: charge group code (0 or 1)\n# INE: number of excluded atoms\n# INE14: number of 1-4 interactions\n# ATNM MRES PANM IAC MASS CG CGC INE\n# INE14\n\t1\t1\tn16\t1\t12.01078\t0\t1\t10\t2\t3\t5\t6\t7\t8\n\t\t\t\t\t\t\t\t\t9\t10\t17\t18\n\t\t\t\t\t\t\t\t5\t4\t11\t12\t15\t16\n\t2\t1\tn16\t1\t12.01078\t0\t1\t9\t3\t4\t6\t7\t8\t9\n\t\t\t\t\t\t\t\t\t10\t11\t12\n\t\t\t\t\t\t\t\t5\t5\t13\t14\t17\t18\n\t3\t1\tn16\t1\t12.01078\t0\t1\t8\t4\t5\t9\t10\t11\t12\n\t\t\t\t\t\t\t\t\t13\t14\n\t\t\t\t\t\t\t\t5\t6\t7\t8\t15\t16\n\t4\t1\tn16\t1\t12.01078\t0\t1\t8\t5\t6\t11\t12\t13\t14\n\t\t\t\t\t\t\t\t\t15\t16\n\t\t\t\t\t\t\t\t4\t9\t10\t17\t18\n\t5\t1\tn16\t1\t12.01078\t0\t1\t7\t6\t13\t14\t15\t16\t17\n\t\t\t\t\t\t\t\t\t18\n\t\t\t\t\t\t\t\t4\t7\t8\t11\t12\n\t6\t1\tn16\t1\t12.01078\t0\t1\t6\t7\t8\t15\t16\t17\t18\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t4\t9\t10\t13\t14\n\t7\t1\tn2\t2\t1.007947\t0\t1\t1\t8\n\t\t\t\t\t\t\t\t4\t9\t10\t17\t18\n\t8\t1\tn2\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t4\t9\t10\t17\t18\n\t9\t1\tn2\t2\t1.007947\t0\t1\t1\t10\n\t\t\t\t\t\t\t\t2\t11\t12\n\t10\t1\tn2\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t2\t11\t12\n\t11\t1\tn2\t2\t1.007947\t0\t1\t1\t12\n\t\t\t\t\t\t\t\t2\t13\t14\n\t12\t1\tn2\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t2\t13\t14\n\t13\t1\tn2\t2\t1.007947\t0\t1\t1\t14\n\t\t\t\t\t\t\t\t2\t15\t16\n\t14\t1\tn2\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t2\t15\t16\n\t15\t1\tn2\t2\t1.007947\t0\t1\t1\t16\n\t\t\t\t\t\t\t\t2\t17\t18\n\t16\t1\tn2\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t2\t17\t18\n\t17\t1\tn2\t2\t1.007947\t0\t1\t1\t18\n\t\t\t\t\t\t\t\t0\n\t18\t1\tn2\t2\t1.007947\t0\t1\t0\n\t\t\t\t\t\t\t\t0\nEND\nBONDSTRETCHTYPE\n# \t NBTY: number of covalent bond types\n \t 2\n# \t CB \t CHB \t B0\n\t5.56986e+06\t2.59408e+05\t1.52600e-01\n\t1.19734e+07\t2.84512e+05\t1.09000e-01\nEND\nBONDH\n# \t NBONH: number of bonds involving H atoms in solute\n \t 12\n# \t IBH \t JBH \t ICBH\n\t1\t7\t2\n\t1\t8\t2\n\t2\t9\t2\n\t2\t10\t2\n\t3\t11\t2\n\t3\t12\t2\n\t4\t13\t2\n\t4\t14\t2\n\t5\t15\t2\n\t5\t16\t2\n\t6\t17\t2\n\t6\t18\t2\nEND\nBOND\n# \t NBON: number of bonds NOT involving H atoms in solute\n \t 6\n# \t IB \t JB \t ICB\n\t1\t2\t1\n\t1\t6\t1\n\t2\t3\t1\n\t3\t4\t1\n\t4\t5\t1\n\t5\t6\t1\nEND\nBONDANGLEBENDTYPE\n# \t NBTY: number of angle types\n \t 2\n# \t CT \t CHT \t T0\n\t0.00000e+00\t4.18400e+02\t1.09500e+02\n\t0.00000e+00\t2.92880e+02\t1.09500e+02\nEND\nBONDANGLEH\n# \t NTHEH: number of bondangles involving a H\n \t 30\n# \t ITH \t JTH \t KTH \t ICTH\n\t1\t2\t9\t1\n\t1\t2\t10\t1\n\t1\t6\t17\t1\n\t1\t6\t18\t1\n\t2\t1\t7\t1\n\t2\t1\t8\t1\n\t2\t3\t11\t1\n\t2\t3\t12\t1\n\t3\t2\t9\t1\n\t3\t2\t10\t1\n\t3\t4\t13\t1\n\t3\t4\t14\t1\n\t4\t3\t11\t1\n\t4\t3\t12\t1\n\t4\t5\t15\t1\n\t4\t5\t16\t1\n\t5\t4\t13\t1\n\t5\t4\t14\t1\n\t5\t6\t17\t1\n\t5\t6\t18\t1\n\t6\t1\t7\t1\n\t6\t1\t8\t1\n\t6\t5\t15\t1\n\t6\t5\t16\t1\n\t7\t1\t8\t2\n\t9\t2\t10\t2\n\t11\t3\t12\t2\n\t13\t4\t14\t2\n\t15\t5\t16\t2\n\t17\t6\t18\t2\nEND\nBONDANGLE\n# \t NTHE: number of angles\n \t 6\n# \t IT \t JT \t KT \t ICT\n\t1\t2\t3\t1\n\t1\t6\t5\t1\n\t2\t1\t6\t1\n\t2\t3\t4\t1\n\t3\t4\t5\t1\n\t4\t5\t6\t1\nEND\nIMPDIHEDRALTYPE\n# \t NQTY: number of improper dihedrals\n \t 0\n# \t CQ \t Q0\nEND\nIMPDIHEDRALH\n# \t NQHIH: number of improper dihedrals involving H atoms\n \t 0\n# \t IQH \t JQH \t KQH \t LQH \t ICQH\nEND\nIMPDIHEDRAL\n# \t # NQHI: number of improper dihedrals\n \t 0\n# \t IQ \t JQ \t KQ \t LQ \t ICQ\nEND\nTORSDIHEDRALTYPE\n# \t NPTY: number of torsion dihedrals\n \t 5\n# \t CP \t PD \t NP\n\t7.53120e-01\t0.00000e+00\t3\n\t1.04600e+00\t1.80000e+02\t2\n\t8.36800e-01\t1.80000e+02\t1\n\t6.69440e-01\t0.00000e+00\t3\n\t6.27600e-01\t0.00000e+00\t3\nEND\nDIHEDRALH\n# \t NPHIH: number of torsion dihedrals involving H atoms\n \t 48\n# \t IPH \t JPH \t KPH \t LPH \t ICPH\n\t1\t2\t3\t11\t4\n\t1\t2\t3\t12\t4\n\t1\t6\t5\t15\t4\n\t1\t6\t5\t16\t4\n\t2\t1\t6\t17\t4\n\t2\t1\t6\t18\t4\n\t2\t3\t4\t13\t4\n\t2\t3\t4\t14\t4\n\t3\t2\t1\t7\t4\n\t3\t2\t1\t8\t4\n\t3\t4\t5\t15\t4\n\t3\t4\t5\t16\t4\n\t4\t3\t2\t9\t4\n\t4\t3\t2\t10\t4\n\t4\t5\t6\t17\t4\n\t4\t5\t6\t18\t4\n\t5\t4\t3\t11\t4\n\t5\t4\t3\t12\t4\n\t5\t6\t1\t7\t4\n\t5\t6\t1\t8\t4\n\t6\t1\t2\t9\t4\n\t6\t1\t2\t10\t4\n\t6\t5\t4\t13\t4\n\t6\t5\t4\t14\t4\n\t7\t1\t2\t9\t5\n\t7\t1\t2\t10\t5\n\t7\t1\t6\t17\t5\n\t7\t1\t6\t18\t5\n\t8\t1\t2\t9\t5\n\t8\t1\t2\t10\t5\n\t8\t1\t6\t17\t5\n\t8\t1\t6\t18\t5\n\t9\t2\t3\t11\t5\n\t9\t2\t3\t12\t5\n\t10\t2\t3\t11\t5\n\t10\t2\t3\t12\t5\n\t11\t3\t4\t13\t5\n\t11\t3\t4\t14\t5\n\t12\t3\t4\t13\t5\n\t12\t3\t4\t14\t5\n\t13\t4\t5\t15\t5\n\t13\t4\t5\t16\t5\n\t14\t4\t5\t15\t5\n\t14\t4\t5\t16\t5\n\t15\t5\t6\t17\t5\n\t15\t5\t6\t18\t5\n\t16\t5\t6\t17\t5\n\t16\t5\t6\t18\t5\nEND\nDIHEDRAL\n# \t NPHI: number of dihedrals NOT involving H atoms\n \t 18\n# \t IP \t JP \t KP \t LP \t ICP\n\t1\t2\t3\t4\t1\n\t1\t2\t3\t4\t2\n\t1\t2\t3\t4\t3\n\t1\t6\t5\t4\t1\n\t1\t6\t5\t4\t2\n\t1\t6\t5\t4\t3\n\t2\t1\t6\t5\t1\n\t2\t1\t6\t5\t2\n\t2\t1\t6\t5\t3\n\t2\t3\t4\t5\t1\n\t2\t3\t4\t5\t2\n\t2\t3\t4\t5\t3\n\t3\t2\t1\t6\t1\n\t3\t2\t1\t6\t2\n\t3\t2\t1\t6\t3\n\t3\t4\t5\t6\t1\n\t3\t4\t5\t6\t2\n\t3\t4\t5\t6\t3\nEND\nLJPARAMETERS\n# \t NRATT2: number of LJ interaction types = NRATT*(NRATT+1)/2\n \t 3\n# \t IAC \t JAC \t C12 \t C6 \t CS12 \t CS6\n\t1\t1\t4.364248e-06\t2.826762e-03\t0.000000e+00\t0.000000e+00\n\t1\t2\t3.704641e-07\t5.069068e-04\t0.000000e+00\t0.000000e+00\n\t2\t2\t3.144727e-08\t9.090068e-05\t0.000000e+00\t0.000000e+00\nEND\nSOLUTEMOLECULES\n1\n18\nEND\nTEMPERATUREGROUPS\n1\n18\nEND\nPRESSUREGROUPS\n1\n18\nEND\nLJEXCEPTIONS\n# This block defines special LJ-interactions based on atom numbers \n# This overrules the normal LJ-parameters (including 1-4 interactions)\n# \t NEX: number of exceptions\n \t 0\n# \t AT1 \t AT2 \t C12 \t C6\nEND\nSOLVENTATOM\n# \t NRAM: number of atoms per solvent molecule\n \t 0\n# \t I \t ANMS \t IACS \t MASS \t CGS\nEND\nSOLVENTCONSTR\n# \t NCONS: number of constraints\n \t 0\n# \t ICONS \t JCONS \t CONS\nEND\n\n" ], [ "top5.write(\"/home/mlehner/Documents/dev/test2.top\")", "_____no_output_____" ], [ "mol2=AllChem.MolFromSmarts(\"[C:1]=C(C)C(=O)OC\")", "_____no_output_____" ], [ "mol2", "_____no_output_____" ], [ "for a in mol2.GetAtoms():\n print(a.GetAtomicNum())\n print(a.GetIdx())\n print(a.GetTotalNumHs())\n print(a.GetAtomMapNum())\n print(\"----------------\")", "6\n0\n2\n1\n----------------\n6\n1\n0\n0\n----------------\n6\n2\n3\n0\n----------------\n6\n3\n0\n0\n----------------\n8\n4\n0\n0\n----------------\n8\n5\n0\n0\n----------------\n6\n6\n3\n0\n----------------\n" ], [ "AllChem.MolFromSmarts(\"[#16][#6]=[#6:1]([#6])[#8][#6]\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec57d01c957c91c4e01778abd43cbe1d84176f3b
295,030
ipynb
Jupyter Notebook
notebooks/05_three_prime_nanopore_vs_helicos.ipynb
elifesciences-publications/Simpson_Barton_Nanopore_1
5081c895aa0682d17c1331a00c2ec9e5805e2a0e
[ "MIT" ]
8
2020-01-21T15:41:46.000Z
2021-11-30T10:03:13.000Z
notebooks/05_three_prime_nanopore_vs_helicos.ipynb
elifesciences-publications/Simpson_Barton_Nanopore_1
5081c895aa0682d17c1331a00c2ec9e5805e2a0e
[ "MIT" ]
6
2020-02-24T16:29:25.000Z
2021-05-20T09:09:25.000Z
notebooks/05_three_prime_nanopore_vs_helicos.ipynb
bartongroup/Simpson_Barton_Nanopore_1
1b509454a9e25a8c81be5092f8e525ca00e7b5a5
[ "MIT" ]
5
2019-12-19T13:56:15.000Z
2021-11-30T10:03:42.000Z
642.766885
114,160
0.939803
[ [ [ "# Comparing 3' ends for ONT DRS and Helicos Bio\n\nCode for making Figure 2 in the Nanopore DRS paper *Native long-read RNA sequencing of the Arabidopsis thaliana transcriptome*. For this figure we're focussing on two sets of data; first, four biological replicates of WT col-0 tissue sequenced with ONT Direct RNA Sequencing and second, the combined data from a set of three biological replicates of WT col-0 tissue sequenced with Helicos Bio poly-A data that was used to identify poly-A positions with ~bp resolution in Sherstnev et. al. 2012.\n\nThis figure is here to illuminate the capability of ONT DRS data to reliably identify the 3' ends and poly-A position of mRNAs.", "_____no_output_____" ] ], [ [ "import re, os, sys, pysam, numpy, json, matplotlib, shutil\n%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom parsing_routines.wig_tools import wigData, mathWigs", "_____no_output_____" ], [ "# ONT Datasets\nONT_DRS_fwd = {\"col0_rep1\":{\"bigwig\":\"201901_col0_2916_fwd_three-prime.bigwig\",\n \"path\":\"../../datasets/20180201_1617_20180201_FAH45730_WT_Col0_2916_regular_seq\"},\n \"col0_rep2a\":{\"bigwig\":\"201903_col0_2917_exp2_fwd_three-prime.bigwig\",\n \"path\":\"../../datasets/20180405_FAH59362_WT_Col0_2917\"},\n \"col0_rep2b\":{\"bigwig\":\"201901_col0_2917_fwd_three-prime.bigwig\",\n \"path\":\"../../datasets/20180413_1558_20180413_FAH77434_mRNA_WT_Col0_2917\"},\n \"col0_rep3\":{\"bigwig\":\"201901_col0_2918_fwd_three-prime.bigwig\",\n \"path\":\"../../datasets/20180416_1534_20180415_FAH83697_mRNA_WT_Col0_2918\"},\n \"col0_rep4\":{\"bigwig\":\"201901_col0_2919_fwd_three-prime.bigwig\",\n \"path\":\"../../datasets/20180418_1428_20180418_FAH83552_mRNA_WT_Col0_2919\"}}\n\nONT_DRS_rev = {\"col0_rep1\":{\"bigwig\":\"201901_col0_2916_rev_three-prime.bigwig\",\n \"path\":\"../../datasets/20180201_1617_20180201_FAH45730_WT_Col0_2916_regular_seq\"},\n \"col0_rep2a\":{\"bigwig\":\"201903_col0_2917_exp2_rev_three-prime.bigwig\",\n \"path\":\"../../datasets/20180405_FAH59362_WT_Col0_2917\"},\n \"col0_rep2b\":{\"bigwig\":\"201901_col0_2917_rev_three-prime.bigwig\",\n \"path\":\"../../datasets/20180413_1558_20180413_FAH77434_mRNA_WT_Col0_2917\"},\n \"col0_rep3\":{\"bigwig\":\"201901_col0_2918_rev_three-prime.bigwig\",\n \"path\":\"../../datasets/20180416_1534_20180415_FAH83697_mRNA_WT_Col0_2918\"},\n \"col0_rep4\":{\"bigwig\":\"201901_col0_2919_rev_three-prime.bigwig\",\n \"path\":\"../../datasets/20180418_1428_20180418_FAH83552_mRNA_WT_Col0_2919\"}}\n\n# Helicos Bio datasets\nHB_data_root = \"../../supporting_datasets/Sherstnev2012_Helicos_Bio/\"\nHB_DRS_fwd = \"fwd_wt1_HQ2.wig.gz\"\nHB_DRS_rev = \"rev_wt1_HQ2.wig.gz\"\n\ndatadic = {\"fwd\":{\"Helicos DRS\":wigData(os.path.join(HB_data_root, HB_DRS_fwd))},\n \"rev\":{\"Helicos DRS\":wigData(os.path.join(HB_data_root, HB_DRS_rev))}}\n\nbigWigToWig_binary = '../../pipeline/external_tools/bigWigToWig'\nfwd_ONT_wigdata = None\nfor keystr in ONT_DRS_fwd.keys():\n fullpath = os.path.join(ONT_DRS_fwd[keystr][\"path\"], \"aligned_data/TAIR10/\",\n ONT_DRS_fwd[keystr][\"bigwig\"])\n print(\"Reading {}...\".format(fullpath))\n thisdata = wigData(fullpath, isBigWig=True, bigWigToWig_binary=bigWigToWig_binary)\n if fwd_ONT_wigdata is None:\n fwd_ONT_wigdata = thisdata\n else:\n fwd_ONT_wigdata = mathWigs(fwd_ONT_wigdata, thisdata)\n\ndatadic[\"fwd\"][\"ONT DRS\"] = fwd_ONT_wigdata\n\nrev_ONT_wigdata = None\nfor keystr in ONT_DRS_rev.keys():\n fullpath = os.path.join(ONT_DRS_rev[keystr][\"path\"], \"aligned_data/TAIR10/\",\n ONT_DRS_rev[keystr][\"bigwig\"])\n print(\"Reading {}...\".format(fullpath))\n thisdata = wigData(fullpath, isBigWig=True, bigWigToWig_binary=bigWigToWig_binary)\n if rev_ONT_wigdata is None:\n rev_ONT_wigdata = thisdata\n else:\n rev_ONT_wigdata = mathWigs(rev_ONT_wigdata, thisdata)\n\ndatadic[\"rev\"][\"ONT DRS\"] = rev_ONT_wigdata\nos.unlink(os.path.join(os.getcwd(),\"out.wig\")) # remove irritating intermediate file ;)", "Reading ../../datasets/20180201_1617_20180201_FAH45730_WT_Col0_2916_regular_seq/aligned_data/TAIR10/201901_col0_2916_fwd_three-prime.bigwig...\nReading ../../datasets/2020180405_FAH59362_WT_Col0_2917/aligned_data/TAIR10/201903_col0_2917_exp2_fwd_three-prime.bigwig...\nReading ../../datasets/20180413_1558_20180413_FAH77434_mRNA_WT_Col0_2917/aligned_data/TAIR10/201901_col0_2917_fwd_three-prime.bigwig...\nReading ../../datasets/20180418_1428_20180418_FAH83552_mRNA_WT_Col0_2919/aligned_data/TAIR10/201901_col0_2919_fwd_three-prime.bigwig...\nReading ../../datasets/20180201_1617_20180201_FAH45730_WT_Col0_2916_regular_seq/aligned_data/TAIR10/201901_col0_2916_rev_three-prime.bigwig...\nReading ../../datasets/2020180405_FAH59362_WT_Col0_2917/aligned_data/TAIR10/201903_col0_2917_exp2_rev_three-prime.bigwig...\nReading ../../datasets/20180416_1534_20180415_FAH83697_mRNA_WT_Col0_2918/aligned_data/TAIR10/201901_col0_2918_rev_three-prime.bigwig...\nReading ../../datasets/20180418_1428_20180418_FAH83552_mRNA_WT_Col0_2919/aligned_data/TAIR10/201901_col0_2919_rev_three-prime.bigwig...\n" ], [ "def notallzeroIndex(x, y):\n x_index = x==0\n y_index = y==0\n index = ~(x_index*y_index)\n return(index)\n\ndef findNearestDRS(positions, npdata, drsdata, npcountthreshold=0, drscountthreshold=0):\n np_index = numpy.where(npdata>npcountthreshold)[0]\n drs_index = numpy.where(drsdata>drscountthreshold)[0]\n nearest_drs_deltas=[]\n for pos in np_index:\n exact = numpy.where(drs_index==pos)\n if len(exact[0])==1:\n nearest_drs_deltas.append(0)\n else:\n nextup = numpy.where(drs_index>pos)[0]\n nextdown = numpy.where(drs_index<pos)[0]\n \n up_delta = None\n if len(nextup)>0:\n up_delta = drs_index[nextup[0]]-pos\n \n down_delta = None\n if len(nextdown)>0:\n down_delta = drs_index[nextdown[-1]]-pos\n \n if up_delta is not None and down_delta is not None:\n if abs(up_delta)>abs(down_delta):\n nearest_drs_deltas.append(down_delta)\n #if abs(down_delta)>1500:\n #print(\"called down: \", pos, positions[pos], up_delta, positions[pos+up_delta], down_delta, positions[pos+down_delta], npdata[pos], drsdata[pos+up_delta])\n else:\n nearest_drs_deltas.append(up_delta)\n #if abs(up_delta)>1500:\n #print(\"called down: \", pos, positions[pos], up_delta, positions[pos+up_delta], down_delta, positions[pos+down_delta], npdata[pos], drsdata[pos+up_delta])\n elif up_delta is not None:\n nearest_drs_deltas.append(up_delta)\n elif down_delta is not None:\n nearest_drs_deltas.append(down_delta)\n return(numpy.array(nearest_drs_deltas))", "_____no_output_____" ], [ "deltas = None\ncthresh=2\nfor strand in datadic.keys():\n thisONTdata = datadic[strand][\"ONT DRS\"]\n thisHELdata = datadic[strand][\"Helicos DRS\"]\n for chr in thisONTdata.tracks.keys():\n if \"Chr{}\".format(chr) in thisHELdata.tracks.keys():\n print(\"Analysing deltas for {s} strand, {ch} and Chr{ch}...\".format(ch=chr, s=strand))\n thisONTdata.set_region(chr)\n thisHELdata.set_region(\"Chr{}\".format(chr))\n x=thisONTdata.get_region_data()\n y=thisHELdata.get_region_data()\n full_y = numpy.zeros(len(x[1]))\n full_y[y[0]]=y[1]\n index = notallzeroIndex(x[1], full_y)\n if deltas is None:\n deltas = findNearestDRS(x[0], x[1], full_y, npcountthreshold=cthresh, drscountthreshold=cthresh)\n else:\n deltas = numpy.append(deltas,findNearestDRS(x[0], x[1], full_y, npcountthreshold=cthresh, drscountthreshold=cthresh))", "Analysing deltas for fwd strand, 1 and Chr1...\nAnalysing deltas for fwd strand, 2 and Chr2...\nAnalysing deltas for fwd strand, 3 and Chr3...\nAnalysing deltas for fwd strand, 4 and Chr4...\nAnalysing deltas for fwd strand, 5 and Chr5...\nAnalysing deltas for rev strand, 1 and Chr1...\nAnalysing deltas for rev strand, 2 and Chr2...\nAnalysing deltas for rev strand, 3 and Chr3...\nAnalysing deltas for rev strand, 4 and Chr4...\nAnalysing deltas for rev strand, 5 and Chr5...\n" ], [ "fig = plt.figure(figsize=(10,6), dpi=150)\nx=plt.hist(deltas, bins=2000, cumulative=False, log=True)\nplt.xlabel(\"Separation between ONT 3' ends and Helicos Bio poly-A peaks (bp)\")\nplt.ylabel(\"Count\")", "_____no_output_____" ], [ "ind = (deltas>-5000)*(deltas<5000)\nfig = plt.figure(figsize=(10,6), dpi=150)\nx=plt.hist(deltas[ind], bins=500, cumulative=False, log=True)\nplt.xlabel(\"Separation between ONT 3' ends and Helicos Bio poly-A peaks (bp)\")\nplt.ylabel(\"Count\")", "_____no_output_____" ] ], [ [ "# Thresholding\n\nPositions that are undetected in one dataset but detected in the other will naturally result in massive distandes upstream or downstream to their 'nearest' matching signal in the other dataset. Conversely, well detected 3' ends should cluster more tightly together. One hopes that the clustered ends will form a distribution (of sorts) with a characteristic standard deviation and in the missing data scenario thhe distance distribution is essentially uniform and will thus have a character standard deviation that grows 1-to-1 with the width of selected region. So, what we'll look for is how the standard deviation of the distribution changes with theshold distances and use the gradient of this curve to find a sensible transition point.", "_____no_output_____" ] ], [ [ "d=2000\nstep=1\nstds=[]\nds=[]\nwhile d>0:\n ind = (deltas>-1*d)*(deltas<d)\n stds.append(numpy.std(deltas[ind]))\n ds.append(d)\n d=d-step\n\nstds = numpy.array(stds)[::-1]\nds=numpy.array(ds)[::-1]\ndstds = numpy.gradient(stds)\n# use the standard deviation of the gradients as the threhold rather than exactly zero \n# to account for noise\ngrad_threshold = numpy.std(dstds)\nthreshold = ds[numpy.where(dstds<grad_threshold)[0][0]] \n\nfig = plt.figure(figsize=(10,6), dpi=150)\nx=plt.plot(ds, stds)\nplt.xlabel(\"Separation between ONT 3' ends and Helicos Bio poly-A peaks (bp)\")\nplt.ylabel(\"standard deviation\")\nplt.plot([threshold,threshold],[0,70])", "_____no_output_____" ], [ "fig = plt.figure(figsize=(10,6), dpi=150)\nx=plt.plot(ds, dstds)\nplt.xlabel(\"Separation between ONT 3' ends and Helicos Bio poly-A peaks (bp)\")\nplt.ylabel(\"d(Standard deviation)/d(Separation)\")\nplt.plot([threshold,threshold],[0, 0.5])", "_____no_output_____" ], [ "ind = (deltas>-threshold)*(deltas<threshold)\nfig = plt.figure(figsize=(10,6), dpi=150)\nx=plt.hist(deltas[ind], bins=(2*threshold)-1, cumulative=False,log=False)\nplt.xlabel(\"Separation between ONT 3' ends and Helicos Bio poly-A peaks (bp)\")\nplt.ylabel(\"Count\")\nplt.savefig(\"../figures/Figure_05.png\", dpi=300, transparent=True, format='png')\nplt.savefig(\"../figures/Figure_05.svg\", format='svg')", "_____no_output_____" ], [ "print(\"Mean separation:\\t{:>4.1f}\\nMedian separation:\\t{:>4.1f}\\nStandard deviation:\\t{:>4.1f}\".format(numpy.mean(deltas[ind]),\n numpy.median(deltas[ind]),\n numpy.std(deltas[ind])))", "Mean separation:\t 0.4\nMedian separation:\t 0.0\nStandard deviation:\t12.5\n" ], [ "nz = len(numpy.where(deltas[ind]==0)[0])\nnnz = len(numpy.where(deltas[ind]!=0)[0])\nprint(\"Zero separation:\\t{:>6d} ({:.2f}%)\".format(nz, 100*nz/(nz+nnz)))\nprint(\"Non-zero separation:\\t{:>6d} ({:.2f}%)\".format(nnz, 100*nnz/(nz+nnz)))", "Zero separation:\t 74466 (37.28%)\nNon-zero separation:\t125290 (62.72%)\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ec57d8359674ce0f782656209891216fd3928408
18,733
ipynb
Jupyter Notebook
Velocity Histograms/.ipynb_checkpoints/Testing box shell (Code works!!)-checkpoint.ipynb
Benard117/AnacondaProjects
8921618e2e7bd5552a56b4edf6363ae08cfd0f3f
[ "MIT" ]
1
2019-08-05T17:06:06.000Z
2019-08-05T17:06:06.000Z
Velocity Histograms/.ipynb_checkpoints/Testing box shell (Code works!!)-checkpoint.ipynb
Benard117/AnacondaProjects
8921618e2e7bd5552a56b4edf6363ae08cfd0f3f
[ "MIT" ]
null
null
null
Velocity Histograms/.ipynb_checkpoints/Testing box shell (Code works!!)-checkpoint.ipynb
Benard117/AnacondaProjects
8921618e2e7bd5552a56b4edf6363ae08cfd0f3f
[ "MIT" ]
null
null
null
39.688559
5,220
0.585918
[ [ [ "from astropy.table import Table, Column\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport os\nimport urllib.request", "_____no_output_____" ], [ "os.chdir(\"/Users/Roberto Benard Orci/Documents/UNI/Verano Investigacion/Simulation data\")\nos.getcwd()", "_____no_output_____" ], [ "DMP = np.loadtxt(\"/Users/Roberto Benard Orci/Documents/UNI/Verano Investigacion/Simulation data/Testing box shell.txt\")\nHalos = np.loadtxt(\"/Users/Roberto Benard Orci/Documents/UNI/Verano Investigacion/Simulation data/Halos Testing.txt\")", "_____no_output_____" ], [ "DMP = np.array(DMP)\nDMPCopy = DMP\nDMPCopy = np.array(DMPCopy)\n\nDMP[:,1] = DMP[:,1] + 1 \nDMP[:,2] = DMP[:,2] + 1 \nDMP[:,3] = DMP[:,3] + 50\n\nNewDMPBoxShell = []\n\nfor m, x, y, z, Vz in zip(DMPCopy[:,0], DMPCopy[:,1], DMPCopy[:,2], DMPCopy[:,3], DMPCopy[:,4]) :\n if x < 1 :\n if y < 1 :\n if z < 50 :\n NewDMPBoxShell.append([m, x+1051, y+1051, z+1100, Vz]) #PUNTO\n print(\"a\",m)\n elif z > 1000 :\n NewDMPBoxShell.append([m, x+1051, y+1051, z-1000, Vz]) #PUNTO\n print(\"b\",m)\n else : \n NewDMPBoxShell.append([m, x+1051, y+1051, z+50, Vz]) #ARISTA\n print(\"c\",m)\n \n elif y > 1049 :\n if z < 50 :\n NewDMPBoxShell.append([m, x+1051, y-1049, z+1100, Vz]) #PUNTO\n print(\"d\",m)\n elif z > 1000 :\n NewDMPBoxShell.append([m, x+1051, y-1049, z-1000, Vz]) #PUNTO\n print(\"e\",m)\n else : \n NewDMPBoxShell.append([m, x+1051, y-1049, z+50, Vz]) #ARISTA\n print(\"f\",m)\n \n elif z < 50 :\n NewDMPBoxShell.append([m, x+1051, y+1, z+1100, Vz]) #ARISTA\n print(\"g\",m)\n elif z > 1000 :\n NewDMPBoxShell.append([m, x+1051, y+1, z-1000, Vz]) #ARISTA\n print(\"h\",m) \n \n else :\n NewDMPBoxShell.append([m, x+1051, y+1, z+50, Vz]) #CARA\n print(\"i\",m)\n \n elif x > 1049 :\n if y < 1 :\n if z < 50 :\n NewDMPBoxShell.append([m, x-1049, y+1051, z+1100, Vz]) #PUNTO\n print(\"j\",m)\n elif z > 1000 :\n NewDMPBoxShell.append([m, x-1049, y+1051, z-1000, Vz]) #PUNTO\n print(\"k\",m)\n else : \n NewDMPBoxShell.append([m, x-1049, y+1051, z+50, Vz]) #ARISTA\n print(\"l\",m)\n \n elif y > 1049 :\n if z < 50 :\n NewDMPBoxShell.append([m, x-1049, y-1049, z+1100, Vz]) #PUNTO\n print(\"m\",m)\n elif z > 1000 :\n NewDMPBoxShell.append([m, x-1049, y-1049, z-1000, Vz]) #PUNTO\n print(\"n\",m)\n else : \n NewDMPBoxShell.append([m, x-1049, y-1049, z+50, Vz]) #ARISTA\n print(\"o\",m)\n \n elif z < 50 :\n NewDMPBoxShell.append([m, x-1049, y+1, z+1100, Vz]) #ARISTA\n print(\"p\",m)\n elif z > 1000 :\n NewDMPBoxShell.append([m, x-1049, y+1, z-1000, Vz]) #ARISTA\n print(\"q\",m)\n \n else :\n NewDMPBoxShell.append([m, x-1049, y+1, z+50, Vz]) #CARA\n print(\"r\",m)\n \n elif y < 1 :\n if z < 50 :\n NewDMPBoxShell.append([m, x+1, y+1051, z+1100, Vz]) #ARISTA\n print(\"s\",m)\n elif z > 1000 :\n NewDMPBoxShell.append([m, x+1, y+1051, z-1000, Vz]) #ARISTA\n print(\"t\",m)\n else :\n NewDMPBoxShell.append([m, x+1, y+1051, z+50, Vz]) #CARA\n print(\"u\",m)\n \n elif y > 1049 :\n if z < 50 :\n NewDMPBoxShell.append([m, x+1, y-1049, z+1100, Vz]) #ARISTA\n print(\"v\",m)\n elif z > 1000 :\n NewDMPBoxShell.append([m, x+1, y-1049, z-1000, Vz]) #ARISTA\n print(\"w\",m)\n else :\n NewDMPBoxShell.append([m, x+1, y-1049, z+50, Vz]) #CARA\n print(\"x\",m)\n \n elif z < 50 :\n NewDMPBoxShell.append([m, x+1, y+1, z+1100, Vz]) #CARA\n print(\"y\",m) \n \n elif z > 1000 :\n NewDMPBoxShell.append([m, x+1, y+1, z-1000, Vz]) #CARA\n print(\"z\",m)", "a 1.0\nc 2.0\nc 3.0\nb 4.0\nd 5.0\ng 6.0\ne 7.0\nf 8.0\nh 9.0\nj 10.0\nm 11.0\nv 12.0\ns 13.0\np 14.0\ni 15.0\ny 16.0\nk 17.0\nt 18.0\nl 19.0\nu 20.0\nn 21.0\nq 22.0\nw 23.0\no 24.0\nz 25.0\nx 26.0\nr 27.0\n" ], [ "NewDMPBoxShell", "_____no_output_____" ], [ "NewDMPBox = np.concatenate((DMP, NewDMPBoxShell))", "_____no_output_____" ], [ "MostMassiveHalo = Halos[:,0].max()\nIDMostMassiveHalo = 0\n\nfor x in Halos[:,0]:\n if x == MostMassiveHalo:\n break\n IDMostMassiveHalo = IDMostMassiveHalo+1", "_____no_output_____" ], [ "x0 = Halos[IDMostMassiveHalo,1].item() + 1\ny0 = Halos[IDMostMassiveHalo,2].item() + 1\nz0 = Halos[IDMostMassiveHalo,3].item() +50\n\nx0SupLim = x0+10\nx0InfLim = x0-10 #I added 10 instead of 1 because of the Testing file.\ny0SupLim = y0+10\ny0InfLim = y0-10\nz0SupLim = z0+50\nz0InfLim = z0-50", "_____no_output_____" ], [ "Box = []\nCylinder = []\n\nfor x, y, z ,Vz in zip(NewDMPBox[:,1], NewDMPBox[:,2], NewDMPBox[:,3], NewDMPBox[:,4]) :\n if (x <= x0SupLim) & (x >= x0InfLim) & (y <= y0SupLim) & (y >= y0InfLim) & (z <= z0SupLim) & (z >= z0InfLim) :\n Box.append([x, y, z, Vz])\n \nBox = np.array(Box)\n\nDelta = np.zeros((np.size(Box[:,0]),2))\n\nDelta[:,0]= Box[:,0]-x0\nDelta[:,1]= Box[:,1]-y0\n\nDistanceSquared = Delta[:,0]**2 + Delta[:,1]**2\n\n# np.where(DistanceSquared<=25)\n\nNewDMPBoxIndex = np.zeros((np.size(np.where(DistanceSquared<=100)),1))\nIndexNumbers = np.array(np.where(DistanceSquared<=100))\nNewDMPBoxIndex[:,0] = IndexNumbers\n\nfor x in NewDMPBoxIndex[:,0] :\n w = int(x)\n Cylinder.append([Box[w,0], Box[w,1], Box[w,2], Box[w,3]])", "_____no_output_____" ], [ "print(Box,Cylinder)", "[[501. 501. 600. 362.]] [[501.0, 501.0, 600.0, 362.0]]\n" ], [ "MMHVelocity = Halos[IDMostMassiveHalo,4]\n\nCylinder = np.array(Cylinder)", "_____no_output_____" ], [ "FinalVelocities = []\n\nfor x in Cylinder[:,3] :\n VelocityOfDMPRelativeToTheMMH = x - MMHVelocity\n FinalVelocities.append(VelocityOfDMPRelativeToTheMMH)", "_____no_output_____" ], [ "print(FinalVelocities)", "[16.0]\n" ], [ "plt.hist(FinalVelocities, density=True, bins=5)\nplt.ylabel('Probability');", "_____no_output_____" ], [ "JustChecking = [[]]\n\nLOL=0\nfor QQ in Halos[:,0] :\n \n Newlist = []\n \n for x in Cylinder[:,3] :\n Newlist.append(x - MMHVelocity)\n \n FinalVelocities.append(Newlist)\n \n \n JustChecking.append(Newlist)\n \n LOL=LOL+1", "_____no_output_____" ], [ "print(JustChecking)", "[[], [16.0], [16.0], [16.0], [16.0], [16.0], [16.0], [16.0], [16.0], [16.0], [16.0]]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec57faaab0563aa6ddfe3f157805f3070530e9d4
34,590
ipynb
Jupyter Notebook
2_python_introduction.ipynb
philuttley/basic_linux_and_coding
6ad288d1a6ae5f0fe97fe36c527ff5c74a29085a
[ "MIT" ]
1
2018-08-08T15:18:14.000Z
2018-08-08T15:18:14.000Z
2_python_introduction.ipynb
philuttley/basic_linux_and_coding
6ad288d1a6ae5f0fe97fe36c527ff5c74a29085a
[ "MIT" ]
null
null
null
2_python_introduction.ipynb
philuttley/basic_linux_and_coding
6ad288d1a6ae5f0fe97fe36c527ff5c74a29085a
[ "MIT" ]
null
null
null
22.301741
7,218
0.50211
[ [ [ "# Python Introduction", "_____no_output_____" ], [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#History\" data-toc-modified-id=\"History-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>History</a></span><ul class=\"toc-item\"><li><span><a href=\"#Versions\" data-toc-modified-id=\"Versions-1.1\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>Versions</a></span></li></ul></li><li><span><a href=\"#Programming-terms\" data-toc-modified-id=\"Programming-terms-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Programming terms</a></span></li><li><span><a href=\"#Running-Python\" data-toc-modified-id=\"Running-Python-3\"><span class=\"toc-item-num\">3&nbsp;&nbsp;</span>Running Python</a></span><ul class=\"toc-item\"><li><span><a href=\"#Interactive-shell\" data-toc-modified-id=\"Interactive-shell-3.1\"><span class=\"toc-item-num\">3.1&nbsp;&nbsp;</span>Interactive shell</a></span></li><li><span><a href=\"#Running-files\" data-toc-modified-id=\"Running-files-3.2\"><span class=\"toc-item-num\">3.2&nbsp;&nbsp;</span>Running files</a></span></li><li><span><a href=\"#IDEs\" data-toc-modified-id=\"IDEs-3.3\"><span class=\"toc-item-num\">3.3&nbsp;&nbsp;</span>IDEs</a></span><ul class=\"toc-item\"><li><span><a href=\"#For-instance,-from-autocompleting-code-...\" data-toc-modified-id=\"For-instance,-from-autocompleting-code-...-3.3.1\"><span class=\"toc-item-num\">3.3.1&nbsp;&nbsp;</span>For instance, from autocompleting code ...</a></span></li><li><span><a href=\"#...-to-check-for-mistakes-...\" data-toc-modified-id=\"...-to-check-for-mistakes-...-3.3.2\"><span class=\"toc-item-num\">3.3.2&nbsp;&nbsp;</span>... to check for mistakes ...</a></span></li><li><span><a href=\"#...-and-interactive-coding\" data-toc-modified-id=\"...-and-interactive-coding-3.3.3\"><span class=\"toc-item-num\">3.3.3&nbsp;&nbsp;</span>... and interactive coding</a></span></li></ul></li><li><span><a href=\"#Jupyter-notebook\" data-toc-modified-id=\"Jupyter-notebook-3.4\"><span class=\"toc-item-num\">3.4&nbsp;&nbsp;</span>Jupyter notebook</a></span><ul class=\"toc-item\"><li><span><a href=\"#Magic-commands\" data-toc-modified-id=\"Magic-commands-3.4.1\"><span class=\"toc-item-num\">3.4.1&nbsp;&nbsp;</span>Magic commands</a></span></li></ul></li></ul></li><li><span><a href=\"#Variables-and-basic-data-types\" data-toc-modified-id=\"Variables-and-basic-data-types-4\"><span class=\"toc-item-num\">4&nbsp;&nbsp;</span>Variables and basic data types</a></span><ul class=\"toc-item\"><li><span><a href=\"#Preface\" data-toc-modified-id=\"Preface-4.1\"><span class=\"toc-item-num\">4.1&nbsp;&nbsp;</span>Preface</a></span><ul class=\"toc-item\"><li><span><a href=\"#Some-terminology\" data-toc-modified-id=\"Some-terminology-4.1.1\"><span class=\"toc-item-num\">4.1.1&nbsp;&nbsp;</span>Some terminology</a></span></li></ul></li></ul></li><li><span><a href=\"#Numbers\" data-toc-modified-id=\"Numbers-5\"><span class=\"toc-item-num\">5&nbsp;&nbsp;</span>Numbers</a></span><ul class=\"toc-item\"><li><span><a href=\"#Integer-(int)\" data-toc-modified-id=\"Integer-(int)-5.1\"><span class=\"toc-item-num\">5.1&nbsp;&nbsp;</span>Integer (<code>int</code>)</a></span></li><li><span><a href=\"#Float-(float)\" data-toc-modified-id=\"Float-(float)-5.2\"><span class=\"toc-item-num\">5.2&nbsp;&nbsp;</span>Float (<code>float</code>)</a></span></li><li><span><a href=\"#Arithmetic\" data-toc-modified-id=\"Arithmetic-5.3\"><span class=\"toc-item-num\">5.3&nbsp;&nbsp;</span>Arithmetic</a></span></li></ul></li><li><span><a href=\"#Words\" data-toc-modified-id=\"Words-6\"><span class=\"toc-item-num\">6&nbsp;&nbsp;</span>Words</a></span><ul class=\"toc-item\"><li><span><a href=\"#Strings-(str)\" data-toc-modified-id=\"Strings-(str)-6.1\"><span class=\"toc-item-num\">6.1&nbsp;&nbsp;</span>Strings (<code>str</code>)</a></span><ul class=\"toc-item\"><li><span><a href=\"#String-manipulations\" data-toc-modified-id=\"String-manipulations-6.1.1\"><span class=\"toc-item-num\">6.1.1&nbsp;&nbsp;</span>String manipulations</a></span></li><li><span><a href=\"#f-strings\" data-toc-modified-id=\"f-strings-6.1.2\"><span class=\"toc-item-num\">6.1.2&nbsp;&nbsp;</span>f-strings</a></span></li><li><span><a href=\"#Escape-sequences\" data-toc-modified-id=\"Escape-sequences-6.1.3\"><span class=\"toc-item-num\">6.1.3&nbsp;&nbsp;</span>Escape sequences</a></span></li></ul></li><li><span><a href=\"#Indexing-and-slicing\" data-toc-modified-id=\"Indexing-and-slicing-6.2\"><span class=\"toc-item-num\">6.2&nbsp;&nbsp;</span>Indexing and slicing</a></span><ul class=\"toc-item\"><li><span><a href=\"#Indexing\" data-toc-modified-id=\"Indexing-6.2.1\"><span class=\"toc-item-num\">6.2.1&nbsp;&nbsp;</span>Indexing</a></span></li><li><span><a href=\"#Slicing\" data-toc-modified-id=\"Slicing-6.2.2\"><span class=\"toc-item-num\">6.2.2&nbsp;&nbsp;</span>Slicing</a></span></li></ul></li><li><span><a href=\"#User-Input\" data-toc-modified-id=\"User-Input-6.3\"><span class=\"toc-item-num\">6.3&nbsp;&nbsp;</span>User Input</a></span></li></ul></li><li><span><a href=\"#Collections\" data-toc-modified-id=\"Collections-7\"><span class=\"toc-item-num\">7&nbsp;&nbsp;</span>Collections</a></span><ul class=\"toc-item\"><li><span><a href=\"#Lists-(list)\" data-toc-modified-id=\"Lists-(list)-7.1\"><span class=\"toc-item-num\">7.1&nbsp;&nbsp;</span>Lists (<code>list</code>)</a></span></li><li><span><a href=\"#Dictionaries-(dict)\" data-toc-modified-id=\"Dictionaries-(dict)-7.2\"><span class=\"toc-item-num\">7.2&nbsp;&nbsp;</span>Dictionaries (<code>dict</code>)</a></span></li><li><span><a href=\"#Tuples-(tuple)\" data-toc-modified-id=\"Tuples-(tuple)-7.3\"><span class=\"toc-item-num\">7.3&nbsp;&nbsp;</span>Tuples (<code>tuple</code>)</a></span></li><li><span><a href=\"#Sets--(set)\" data-toc-modified-id=\"Sets--(set)-7.4\"><span class=\"toc-item-num\">7.4&nbsp;&nbsp;</span>Sets (<code>set</code>)</a></span></li><li><span><a href=\"#None-vs.-NaN\" data-toc-modified-id=\"None-vs.-NaN-7.5\"><span class=\"toc-item-num\">7.5&nbsp;&nbsp;</span>None vs. NaN</a></span></li><li><span><a href=\"#Some-terminology\" data-toc-modified-id=\"Some-terminology-7.6\"><span class=\"toc-item-num\">7.6&nbsp;&nbsp;</span>Some terminology</a></span></li></ul></li><li><span><a href=\"#Booleans\" data-toc-modified-id=\"Booleans-8\"><span class=\"toc-item-num\">8&nbsp;&nbsp;</span>Booleans</a></span><ul class=\"toc-item\"><li><span><a href=\"#if-statements\" data-toc-modified-id=\"if-statements-8.1\"><span class=\"toc-item-num\">8.1&nbsp;&nbsp;</span>if-statements</a></span></li></ul></li><li><span><a href=\"#Loops-and-iteration\" data-toc-modified-id=\"Loops-and-iteration-9\"><span class=\"toc-item-num\">9&nbsp;&nbsp;</span>Loops and iteration</a></span><ul class=\"toc-item\"><li><span><a href=\"#while-loop\" data-toc-modified-id=\"while-loop-9.1\"><span class=\"toc-item-num\">9.1&nbsp;&nbsp;</span><code>while</code>-loop</a></span></li><li><span><a href=\"#for-loop\" data-toc-modified-id=\"for-loop-9.2\"><span class=\"toc-item-num\">9.2&nbsp;&nbsp;</span><code>for</code>-loop</a></span></li><li><span><a href=\"#Zipping\" data-toc-modified-id=\"Zipping-9.3\"><span class=\"toc-item-num\">9.3&nbsp;&nbsp;</span>Zipping</a></span></li></ul></li></ul></div>", "_____no_output_____" ], [ "## History\n*Locally sourced*", "_____no_output_____" ], [ "* Est. in 1989\n* Guido van Rossum at CWI, in the Netherlands\n* Named after the comedy series 'Monty Python'\n* Developed for general purpose programming\n* Modular\n* Extremely popular", "_____no_output_____" ], [ "### Versions\n* Python 3.x versus Python 2.x\n* Libraries have already switched to Python 3.x\n* Difference in performance\n* Difference in syntax", "_____no_output_____" ], [ "## Programming terms\n*Learning the jargon*", "_____no_output_____" ], [ "* *Machine language* | Code executed directly by hardware\n* *Source code* | Instructions in a language before converting it to machine language\n* *Low level programming language* | Requiring knowledge of hardware\n* *High level programming language* | The opposite (eg `python`)", "_____no_output_____" ], [ "* *Compilation* | The act of converting source code to machine language in one go (eg `C`)\n* *Compiler* | Program doing the compiling\n* *Interpreted* | Source code translated line-by-line into machine language (eg `python`)\n* *Interpreter* | Program doing the interpreting\n * In Linux/Max this is often referred to as `CPython`, as it's written in `C`", "_____no_output_____" ], [ "## Running Python\n*Skipping the walking*", "_____no_output_____" ], [ "* Simply start the interpreter with\n ```bash\n python3\n ```\n* Older versions (if installed) can be accessed with eg `python2.7`", "_____no_output_____" ], [ "### Interactive shell\n* Annoying having to do things per line, eg\n ```python\n def function(x):\n a = 2\n c = 10\n return a*x + c\n ```", "_____no_output_____" ], [ "* So instead use...\n ```bash\n ipython3\n ```", "_____no_output_____" ], [ "### Running files\n* Or, run a program with\n ```bash\n python3 <filename.py>\n ```", "_____no_output_____" ], [ "### IDEs\n\n* You can edit python code in a 'notebook'-like program\n* But, Integrated Development Environments (IDEs) can save you a *lot* of time\n* Code a lot in Python? Try out `Atom`, `Pycharm`, `Spyder` etc.", "_____no_output_____" ], [ "#### For instance, from autocompleting code ...\n![auto_complete](media/auto_complete.gif)", "_____no_output_____" ], [ "#### ... to check for mistakes ...\n![linter](media/linter.gif)", "_____no_output_____" ], [ "#### ... and interactive coding\n![hydrogen](media/hydrogen.gif)", "_____no_output_____" ], [ "### Jupyter notebook\n* Exploring data? `jupyter notebook` provides an easy way to interactively run just the parts of the code you want to change. \n* While they don't have the functionality of an IDE, for purely exploratory or tutorial reasons they can be pretty convenient as they encourage text amongst code.\n* A.k.a. `ipython notebook` for historical reasons", "_____no_output_____" ], [ "#### Magic commands\nPrefaced with a `%`, ipython magic commands allow for quick settings to be applied eg", "_____no_output_____" ] ], [ [ "%timeit [i for i in range(1000)] # Long running line of code", "_____no_output_____" ] ], [ [ "## Variables and basic data types\n*Essential knowledge. Nothing variable about it*", "_____no_output_____" ], [ "### Preface\nImportant commands to know before diving in", "_____no_output_____" ] ], [ [ "# The equals-sign sets a variable\na = 5", "_____no_output_____" ] ], [ [ "#### Some terminology\n* There are several standard data types\n* *Dynamic types* | A variable can refer to any data type\n* *Static types* | A variable is defined only once", "_____no_output_____" ] ], [ [ "# This is a comment", "_____no_output_____" ], [ "print(3) # Prints a variable", "_____no_output_____" ], [ "type(2) # Gives the datatype of a variable", "_____no_output_____" ], [ "# Examples where it's convenient to set a variable\nparsec = 648000 / 3.14 # A.U.\nau = 149597870700 # meters", "_____no_output_____" ] ], [ [ "## Numbers\n*Do you know what's odd? Every other number*", "_____no_output_____" ], [ "### Integer (`int`)\nNice and simple numbers eg", "_____no_output_____" ] ], [ [ "4", "_____no_output_____" ], [ "type(4)", "_____no_output_____" ] ], [ [ "### Float (`float`)\n\n* Floating point numbers\n* Note there's a limit to the accuracy", "_____no_output_____" ] ], [ [ "4.4", "_____no_output_____" ], [ "4e4", "_____no_output_____" ], [ "4*4e4", "_____no_output_____" ], [ "float(4)", "_____no_output_____" ] ], [ [ "### Arithmetic\n\n| Operator | Meaning | Example |\n| ---------- | --------- | --------- |\n| + | Plus | 1 + 2 |\n| - | Minus | 1 - 2 | \n| * | Multiply | 2*3 |\n| ** | Power | 2 ** 3 |\n| % | Modulus (Remainder of left/right) | 4 % 3 |\n| // | Floor division | 2 // 3 |", "_____no_output_____" ], [ "## Words\n*You can't escape them* ", "_____no_output_____" ], [ "### Strings (`str`)\n~~The violin, viola, cello and double-bass section of an orchestra~~\n\nText of any length eg", "_____no_output_____" ] ], [ [ "a = 'space alpacas'\nb = '🐱'\nc = \"It's a small world\"", "_____no_output_____" ] ], [ [ "#### String manipulations", "_____no_output_____" ] ], [ [ "a.title()", "_____no_output_____" ], [ "a.upper()", "_____no_output_____" ], [ "a.startswith('b')", "_____no_output_____" ], [ "a.isdigit()", "_____no_output_____" ] ], [ [ "#### f-strings", "_____no_output_____" ] ], [ [ "a = 'something amazing'\nb = f'watch {a} right now'\nprint(b)", "_____no_output_____" ] ], [ [ "#### Escape sequences", "_____no_output_____" ] ], [ [ "s = '\\n' # A newline", "_____no_output_____" ], [ "s = '\\t' # A tab", "_____no_output_____" ] ], [ [ "### Indexing and slicing\n\nAccess data within a string", "_____no_output_____" ] ], [ [ "s = 'abcdefg'", "_____no_output_____" ] ], [ [ "#### Indexing", "_____no_output_____" ] ], [ [ "s[0] # Returns the first element", "_____no_output_____" ], [ "s[1]", "_____no_output_____" ], [ "s[100]", "_____no_output_____" ] ], [ [ "#### Slicing", "_____no_output_____" ] ], [ [ "s[-1]", "_____no_output_____" ], [ "s[3:]", "_____no_output_____" ], [ "s[:3]", "_____no_output_____" ] ], [ [ "### User Input\n\n* Often this is not what you want to use\n* But if necessary...", "_____no_output_____" ] ], [ [ "name = input('Your name please:')\nprint(f'Hello {name}')", "_____no_output_____" ] ], [ [ "## Collections\n*The hottest of this season's trends*", "_____no_output_____" ], [ "### Lists (`list`)\nA way to gather values of any type together", "_____no_output_____" ] ], [ [ "l = [2, 4, 8]", "_____no_output_____" ], [ "l = [2, 'a', 8]", "_____no_output_____" ], [ "l.append('b')", "_____no_output_____" ], [ "l.pop(0)", "_____no_output_____" ], [ "l.sort()", "_____no_output_____" ], [ "l[2:]", "_____no_output_____" ], [ "l[0]", "_____no_output_____" ] ], [ [ "### Dictionaries (`dict`)\nFor storing values linked together", "_____no_output_____" ] ], [ [ "d = {'a':2, 'b':5.0, 1:6, 5.0:'something'}", "_____no_output_____" ], [ "d['b'] # Give the key to obtain the value", "_____no_output_____" ], [ "d['unknown'] # An unknown key will give an error", "_____no_output_____" ], [ "d['c'] = [0, 2, 'a'] # Adding a key", "_____no_output_____" ] ], [ [ "### Tuples (`tuple`)", "_____no_output_____" ] ], [ [ "t_1 = (1, 2, 3, 'abc')", "_____no_output_____" ], [ "t_2 = 1, 2,", "_____no_output_____" ], [ "t_3 = (1)", "_____no_output_____" ], [ "t_1[0]", "_____no_output_____" ], [ "t_1[1:]", "_____no_output_____" ], [ "t_1[0] = 'error' # You can't reassign a value", "_____no_output_____" ] ], [ [ "### Sets (`set`)\nLists where order doesn't matter", "_____no_output_____" ] ], [ [ "s = set()\ns.add(1)\ns.add(2) # You can't assume this is ordered", "_____no_output_____" ], [ "s.add(2) # Elements are not repeated\ns", "_____no_output_____" ] ], [ [ "### None vs. NaN\n* Missing a value? ", "_____no_output_____" ] ], [ [ "a = None", "_____no_output_____" ] ], [ [ "* Not a number, but you want to perform operations on a range of values?", "_____no_output_____" ] ], [ [ "a = [1., 2., 3., float('NaN')]\na[-1]*2", "_____no_output_____" ] ], [ [ "### Some terminology\n* Mutable values | Values which can change\n* Immutable values | Values which do not change", "_____no_output_____" ] ], [ [ "# Immutable\na = 'hello world'\na", "_____no_output_____" ], [ "a.title()", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "# Mutable\nl = [8, 2, 4]\nl", "_____no_output_____" ], [ "l.sort()", "_____no_output_____" ], [ "l", "_____no_output_____" ] ], [ [ "## Booleans\n*True or False?*", "_____no_output_____" ] ], [ [ "b = True", "_____no_output_____" ], [ "1 > 2", "_____no_output_____" ], [ "2 > 1", "_____no_output_____" ], [ "a = 10\nb = 5", "_____no_output_____" ], [ "a >= b", "_____no_output_____" ], [ "t = True\nf = False", "_____no_output_____" ], [ "t and t", "_____no_output_____" ], [ "t and f", "_____no_output_____" ], [ "t or f", "_____no_output_____" ], [ "t or t", "_____no_output_____" ], [ "not t", "_____no_output_____" ], [ "not f", "_____no_output_____" ], [ "l_1 = []\nl_2 = [1, 2, 3, 4]", "_____no_output_____" ], [ "bool(l_1)", "_____no_output_____" ], [ "bool(l_2)", "_____no_output_____" ] ], [ [ "### if-statements\nNote indents should be 4 spaces wide", "_____no_output_____" ] ], [ [ "if True:\n print('This will be printed')\nif False:\n print('This will never be printed')", "_____no_output_____" ], [ "x = 10\ny = 20\n\nif x > y:\n print('x is greater than y')\nelse:\n print('x is smaller or equal to y')", "_____no_output_____" ], [ "x = 10\n\nif x > 0:\n print('x is positive')\nelif x == 0:\n print('x is zero')\nelse:\n print('x is negative')", "_____no_output_____" ] ], [ [ "## Loops and iteration\n*Over and over again*", "_____no_output_____" ], [ "### `while`-loop", "_____no_output_____" ] ], [ [ "i = 0\n\nwhile i < 10:\n print(i)\n i += 1 # Fancy way of saying i = i + 1\nelse:\n print('i is equal or larger than 10')", "_____no_output_____" ], [ "detected = False\ni = 1\n\nwhile not detected:\n i *= 2\n \n if i == 8:\n print('Halfway')\n continue # Skips the rest, starts with the next loop\n \n print(i)\n \n if i == 16:\n break # or detected = True", "_____no_output_____" ] ], [ [ "### `for`-loop", "_____no_output_____" ] ], [ [ "s = 'abcdefg'\n\nfor character in s:\n if character == 'a':\n continue\n elif character == 'z':\n break\n else:\n print(character)\nelse:\n print('No z to be found')", "_____no_output_____" ], [ "l = [0 ,1, 2, 4]\n\nfor element in l:\n print(l)", "_____no_output_____" ], [ "l = ['a', 'b', 'c']\n\nfor i, element in enumerate(l):\n print(i, element)", "_____no_output_____" ], [ "for i in range(5):\n print(i)", "_____no_output_____" ], [ "l = [('a', 'b'), ('x', 'y'), ('p', 'q')]\n\nfor c_1, c_2 in l:\n print(c_1, c_2) ", "_____no_output_____" ], [ "d = {1:'a', 2:'b', 3:'c'}\n\nfor key in d:\n print(key)", "_____no_output_____" ], [ "for value in d.values():\n print(value)", "_____no_output_____" ], [ "for key, value in d.items():\n print(key, value)", "_____no_output_____" ], [ "for key in d:\n print(key, d[key])", "_____no_output_____" ] ], [ [ "### Zipping", "_____no_output_____" ] ], [ [ "l_1 = ['a', 'b', 'c']\nl_2 = [0 , 1, 2]\n\nresult = list(zip(l_1, l_2))\nresult", "_____no_output_____" ], [ "a, b = zip(*result)\na, b", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ec57fc6e85daf738d6f708e9828124183184f372
1,174
ipynb
Jupyter Notebook
notebooks/06.00-Layout-and-Styling-Overview.ipynb
Martazzz/jupytertutorial
37039964b0826c3ba31b3cf4d6732a5d53cf950a
[ "BSD-3-Clause" ]
2
2021-02-13T05:52:05.000Z
2022-02-08T09:52:35.000Z
notebooks/06.00-Layout-and-Styling-Overview.ipynb
SteveWang1992/tutorial
37039964b0826c3ba31b3cf4d6732a5d53cf950a
[ "BSD-3-Clause" ]
null
null
null
notebooks/06.00-Layout-and-Styling-Overview.ipynb
SteveWang1992/tutorial
37039964b0826c3ba31b3cf4d6732a5d53cf950a
[ "BSD-3-Clause" ]
null
null
null
29.35
176
0.629472
[ [ [ "# Layout and Styling of Jupyter widgets\n\nThis section of the tutorial describes \n\n+ how to [lay out Jupyter interactive widgets](06.01-Widget_Layout.ipynb) using the Flexbox and Grid CSS models to build rich and *reactive* widget-based applications.\n+ some new [high-level widgets for laying out widget-based applications](06.10-higher-level-containers.ipynb).\n+ [layout and styling of widget labels](06.07-widget-labels.ipynb) (please read this on your own).\n+ styling available for [some individual widgets](06.09-Widget-Styling.ipynb) (please read this on your own).", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
ec57ffa1b2418d366777dc9237e192fbf1b0e57f
26,238
ipynb
Jupyter Notebook
ETL_function_test.ipynb
eric-spoerner/movies_ETL
f9f293f8921ded468b609422ff0a2fcc0ef55816
[ "Apache-2.0" ]
null
null
null
ETL_function_test.ipynb
eric-spoerner/movies_ETL
f9f293f8921ded468b609422ff0a2fcc0ef55816
[ "Apache-2.0" ]
null
null
null
ETL_function_test.ipynb
eric-spoerner/movies_ETL
f9f293f8921ded468b609422ff0a2fcc0ef55816
[ "Apache-2.0" ]
null
null
null
38.081277
94
0.379983
[ [ [ "import json\nimport pandas as pd\nimport numpy as np\n\nimport re\n\nfrom sqlalchemy import create_engine\nimport psycopg2\n\nfrom config import db_password\n\nimport time", "_____no_output_____" ], [ "def import_source_files(wiki_file: str,\n kaggle_file: str,\n ratings_file: str):\n \"\"\"\n Function takes three arguments, each corresponding to the name\n of a specific source csv or json file for the three types of data \n objects we are importing. Returns all three objects as unique\n pandas DataFrames.\n \"\"\"\n kaggle_metadata = pd.read_csv(kaggle_file, low_memory=False)\n ratings = pd.read_csv(ratings_file)\n\n with open(wiki_file, mode='r') as file:\n wiki_movies_json = json.load(file)\n \n wiki_movies_df = pd.DataFrame(wiki_movies_json)\n \n return wiki_movies_df, kaggle_metadata, ratings", "_____no_output_____" ], [ "file_dir = './data'\nwiki_file = f'{file_dir}/wikipedia.movies.json'\nkaggle_file = f'{file_dir}/movies_metadata.csv'\nratings_file = f'{file_dir}/ratings.csv'\n\n# 7. Set the three variables in Step 6 equal to the function created in Step 1.\nwiki_file, kaggle_file, ratings_file = import_source_files(wiki_file=wiki_file, \n kaggle_file=kaggle_file, \n ratings_file=ratings_file)", "_____no_output_____" ], [ "# 8. Set the DataFrames from the return statement equal to the file names in Step 6. \nwiki_movies_df = wiki_file\nkaggle_metadata = kaggle_file\nratings = ratings_file", "_____no_output_____" ], [ "# 9. Check the wiki_movies_df DataFrame.\nwiki_movies_df.head()", "_____no_output_____" ], [ "# 10. Check the kaggle_metadata DataFrame.\nkaggle_metadata.head()", "_____no_output_____" ], [ "# 11. Check the ratings DataFrame.\nratings.head()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
ec580bb606ffb460566c57473686f6ae19382c81
357,714
ipynb
Jupyter Notebook
_docs/nbs/reco-tut-mll-03-eda.ipynb
sparsh-ai/recohut
4121f665761ffe38c9b6337eaa9293b26bee2376
[ "Apache-2.0" ]
null
null
null
_docs/nbs/reco-tut-mll-03-eda.ipynb
sparsh-ai/recohut
4121f665761ffe38c9b6337eaa9293b26bee2376
[ "Apache-2.0" ]
1
2022-01-12T05:40:57.000Z
2022-01-12T05:40:57.000Z
_docs/nbs/reco-tut-mll-03-eda.ipynb
RecoHut-Projects/recohut
4121f665761ffe38c9b6337eaa9293b26bee2376
[ "Apache-2.0" ]
null
null
null
357,714
357,714
0.93731
[ [ [ "import os\nproject_name = \"reco-tut-mll\"; branch = \"main\"; account = \"sparsh-ai\"\nproject_path = os.path.join('/content', project_name)", "_____no_output_____" ], [ "if not os.path.exists(project_path):\n !cp /content/drive/MyDrive/mykeys.py /content\n import mykeys\n !rm /content/mykeys.py\n path = \"/content/\" + project_name; \n !mkdir \"{path}\"\n %cd \"{path}\"\n import sys; sys.path.append(path)\n !git config --global user.email \"[email protected]\"\n !git config --global user.name \"reco-tut\"\n !git init\n !git remote add origin https://\"{mykeys.git_token}\":[email protected]/\"{account}\"/\"{project_name}\".git\n !git pull origin \"{branch}\"\n !git checkout main\nelse:\n %cd \"{project_path}\"", "/content/reco-tut-mll\nInitialized empty Git repository in /content/reco-tut-mll/.git/\nremote: Enumerating objects: 18, done.\u001b[K\nremote: Counting objects: 100% (18/18), done.\u001b[K\nremote: Compressing objects: 100% (13/13), done.\u001b[K\nremote: Total 18 (delta 1), reused 16 (delta 1), pack-reused 0\u001b[K\nUnpacking objects: 100% (18/18), done.\nFrom https://github.com/sparsh-ai/reco-tut-mll\n * branch main -> FETCH_HEAD\n * [new branch] main -> origin/main\nBranch 'main' set up to track remote branch 'main' from 'origin'.\nSwitched to a new branch 'main'\n" ], [ "!git status", "_____no_output_____" ], [ "!git pull --rebase origin main", "_____no_output_____" ], [ "!git add . && git commit -m 'commit' && git push origin \"{branch}\"", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nplt.style.use('ggplot')\n%matplotlib inline", "_____no_output_____" ], [ "ratings = pd.read_parquet('./data/silver/ratings.parquet.gzip')\nratings.head()", "_____no_output_____" ], [ "movies = pd.read_parquet('./data/silver/movies.parquet.gzip')\nmovies.head()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ] ], [ [ "# Number of Ratings Per Year\nratings_per_year = ratings[['rating', 'timestamp']].groupby('timestamp').count()\nratings_per_year.columns = ['Rating Count']\nax1 = ratings_per_year.plot(kind='line',figsize=(12,8))\nax1.set_xlabel('Year')\nax1.set_ylabel('Number of ratings given')\nplt.title('Number of Movies Rated Per Year')\nplt.show()", "_____no_output_____" ], [ "ratings_df = ratings[['rating', 'timestamp']].groupby('timestamp').count().sort_values(by=\"rating\", ascending=False)\nratings_df.head()", "_____no_output_____" ], [ "# Movies Released per Year\ndftmp = movies[['movieId', 'year']].groupby('year')\n\nfig, ax1 = plt.subplots(figsize=(10,5))\nax1.plot(dftmp.year.first(), dftmp.movieId.nunique(), \"g-o\")\nax1.grid(None)\nax1.set_ylim(0,)\nax1.set_xlabel('Year')\nax1.set_ylabel('Number of movies released')\nplt.title('Movies per year')\nplt.show()", "_____no_output_____" ], [ "insights = []\ninsights.append('Most Movies are released in between 1980 and 2020')", "_____no_output_____" ], [ "# Ratings per Year\ndftmp = ratings[['rating', 'timestamp']].groupby('timestamp')\n\nfig, ax1 = plt.subplots(figsize=(10,5))\nax1.plot(dftmp.timestamp.first(), dftmp.rating.count(), \"r-o\")\nax1.grid(None)\nax1.set_ylim(0,)\nax1.set_xlabel('Year')\nax1.set_ylabel('Number of ratings given')\nplt.title('Ratings per year')\nplt.show()", "_____no_output_____" ], [ "insights.append('Ratings given vary in different years. Most Ratings are given around year 2000')", "_____no_output_____" ], [ "# Average Movie Rating\ndftmp = movies[['movieId', 'year']].set_index('movieId').join(\n ratings[['movieId','rating']].groupby('movieId').mean())\ndftmp.rating.hist(bins=25, grid=False, edgecolor='b',figsize=(10,5))\nplt.xlim(0,5)\nplt.xlabel('Average Movie rating')\nplt.ylabel('Number of Movies')\nplt.title('Number of Movies Vs Average Rating')\nplt.show()", "_____no_output_____" ], [ "insights.append('Average movie ratings creates normal distrubition peaked at about 3.5')", "_____no_output_____" ], [ "# Average Movie Ratings Per Year\ndftmp = movies[['movieId', 'year']].set_index('movieId')\ndftmp = dftmp.join(ratings[ratings.set_index('movieId').index.isin(dftmp.index)][['movieId', 'rating']]\n .groupby('movieId').mean())\ndftmp = dftmp.groupby('year').mean()\n\nplt.figure(figsize=(10,5))\nplt.plot(dftmp, \"r-o\", label='All genres', color='black')\nplt.xlabel('Release Year')\nplt.ylabel('Average Rating')\nplt.title('Average Movie Ratings Per Release Year')\nplt.ylim(0,)\nplt.show()", "_____no_output_____" ], [ "insights.append('While movies released after 1960s have more stable average rating, old movies have huge variation in consecutive years')", "_____no_output_____" ], [ "# Average Movie Ratings Per Year In Detail\nfirst_rating_timestamp = ratings['timestamp'].min()\ndftmp = movies[['movieId', 'year']].set_index('movieId')\ndftmp = dftmp[ (dftmp['year'] >= first_rating_timestamp.year) ]\ndftmp = dftmp.join(ratings[ratings.set_index('movieId').index.isin(dftmp.index)][['movieId', 'rating']]\n .groupby('movieId').mean())\ndftmp = dftmp.groupby('year').mean()\n\nplt.figure(figsize=(10,5))\nplt.plot(dftmp, \"r-o\", label='All genres', color='black')\nplt.xlabel('Release Year')\nplt.ylabel('Average Rating')\nplt.title('Average Movie Ratings Per Year For Movies Released After First Rating Given')\nplt.show()", "_____no_output_____" ], [ "insights.append('Users bias changes in different years, as we can see in 2010 users gave average of 3.35 to movies and about 2.5 in 2015')\ninsights.append('When we take a closer look at the average rating of the movies that has been released after first rating given in the dataset, average ratings seem to change a lot as the years pass by. And, average ratings tend to go down. This raises questions like, does the movies released in adjacent years changes a lot, or the users having a different trend after the first trend and new movies that has been released on the adjacent years tend to be similar with old trend, which results in lower averages.')", "_____no_output_____" ], [ "# Average Rating Per User\ndftmp = ratings[['userId','rating']].groupby('userId').mean()\ndftmp.rating.hist(bins=100, grid=False, edgecolor='b',figsize=(10,5))\n\nplt.xlim(1,5)\nplt.xlabel ('Average movie rating')\nplt.ylabel ('Number of users')\nplt.title ('Average ratings per user')\nplt.show()", "_____no_output_____" ], [ "insights.append('Users on average gives 3.7 to movies but different users have different average which shows us some of the users are inclined to give low rating and some of them inclined to give high ratings.')", "_____no_output_____" ], [ "# Ratings Per User\ndftmp = ratings[['userId', 'movieId']].groupby('userId').count()\ndftmp.columns=['num_ratings']\ndftmp.sort_values(by='num_ratings', inplace=True, ascending=False)\n\nplt.figure(figsize=(15,5))\nplt.scatter(dftmp.index, dftmp.num_ratings, edgecolor='black')\nplt.xlim(0,len(dftmp.index))\nplt.ylim(0,)\nplt.title('Number of Ratings per user')\nplt.xlabel('userId')\nplt.ylabel('Number of ratings given')\nplt.show()", "_____no_output_____" ], [ "# Histogram of ratings counts.\nplt.figure(figsize=(10,5))\nplt.hist(dftmp.num_ratings, bins=100, edgecolor='black', log=True)\nplt.title('Number of Ratings per user')\nplt.xlabel('Number of ratings given')\nplt.ylabel('Number of users')\nplt.xlim(0,)\nplt.show()", "_____no_output_____" ], [ "insights.append('while high active users are tend to rate 200-500 movies, most of the users gave only few ratings almost 0. Dataset is quite sparse.')", "_____no_output_____" ], [ "# Rating Per Movie\ndftmp = ratings[['userId', 'movieId']].groupby('movieId').count()\ndftmp.columns=['num_ratings']\n\nplt.figure(figsize=(15,5))\nplt.scatter(dftmp.index, dftmp.num_ratings, edgecolor='black')\nplt.xlim(0,dftmp.index.max())\nplt.ylim(0,)\nplt.title('Ratings per movie')\nplt.xlabel('movieId')\nplt.ylabel('Number of ratings received')\nplt.show()", "_____no_output_____" ], [ "# Histogram of ratings counts.\nplt.figure(figsize=(15,5))\nplt.hist(dftmp.num_ratings, bins=100, edgecolor='black', log=True)\nplt.title('Ratings per movie')\nplt.xlabel('Number of ratings received')\nplt.ylabel('Number of movieIds')\nplt.xlim(0,)\nplt.show()", "_____no_output_____" ], [ "insights.append('Almost %99 percent of the movies taken less than 150 ratings.')", "_____no_output_____" ], [ "# Let's check those movies with +150 reviews, those should be pretty popular movies!\nmovies.set_index('movieId').loc[dftmp.index[dftmp.num_ratings>150]]['title'][:10]", "_____no_output_____" ], [ "# Let's check the average rating too, those should be pretty good movies!\nratings.set_index('movieId').loc[dftmp.index[dftmp.num_ratings>150]].groupby('movieId').mean().rating.plot(style='o')\nplt.ylabel('Average rating')\nplt.title('Most rated movies')\nplt.show()", "_____no_output_____" ], [ "insights.append('Most rated movies also tend to be most liked movies.')", "_____no_output_____" ], [ "# Which is the best most popular movie ever??\ntmp = ratings.set_index('movieId').loc[dftmp.index[dftmp.num_ratings>100]].groupby('movieId').mean()\nbest = movies.set_index('movieId').loc[tmp.rating.idxmax].title\nprint ('Best most popular movie ever is...%s' %best)", "Best most popular movie ever is...Shawshank Redemption, The\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec58214132fa9b07a4933ae33fa826b5a49e70ff
15,816
ipynb
Jupyter Notebook
4_inspection_paradox.ipynb
brianspiering/ComputationalStatistics
b70792b285612e566903e13bedd42a8f5a3f9249
[ "Apache-2.0" ]
4
2018-11-22T18:52:48.000Z
2021-10-07T18:26:29.000Z
4_inspection_paradox.ipynb
brianspiering/ComputationalStatistics
b70792b285612e566903e13bedd42a8f5a3f9249
[ "Apache-2.0" ]
null
null
null
4_inspection_paradox.ipynb
brianspiering/ComputationalStatistics
b70792b285612e566903e13bedd42a8f5a3f9249
[ "Apache-2.0" ]
6
2019-03-10T20:20:22.000Z
2021-12-13T10:36:42.000Z
35.068736
6,436
0.707638
[ [ [ "<center><h1>Computational Statistics:</h1></center>\n<center><h1>Inspection Paradox</h1></center>", "_____no_output_____" ], [ "<center>We'll try to answer the eternal question:</center>\n<center>Why are your friends are more popular than you?</center>", "_____no_output_____" ], [ "Preview\n-----\n\n- Summary statistics do not always capture people's experience", "_____no_output_____" ], [ "<center><h2>School advertise small class sizes, <br>but students complain about how large their classes are.</h2></center>\n\n<center><h2>Who is right?</h2></center>", "_____no_output_____" ] ], [ [ "reset -fs", "_____no_output_____" ], [ "from random import expovariate", "_____no_output_____" ], [ "class_size = 30\nsizes = [int(expovariate(1/class_size)) for _ in range(1_000)]\nsizes", "_____no_output_____" ], [ "import seaborn as sns\n\npalette = \"Dark2\"\n%matplotlib inline\n\nsns.distplot(sizes, kde=False);", "_____no_output_____" ], [ "# What is the average class size?\nfrom statistics import mean\n\nmean_class_size = mean(sizes)\nprint(f\"{mean_class_size:.2f}\")", "29.56\n" ], [ "# What is the student's experience?\nfrom random import choices\n\nstudent_experience = choices(sizes, weights=sizes, k=len(sizes))\nmean_student_experience = mean(student_experience)\nprint(f\"{mean_student_experience:.2f}\")", "57.54\n" ] ], [ [ "<center><h2>What is happening?</h2></center>", "_____no_output_____" ], [ "Inspection paradox - Larger classes are more likely be sampled by students than smaller classes.", "_____no_output_____" ], [ "This is also why __your__ bus wait is more likely to be longer than the __average__ bus weight time.", "_____no_output_____" ], [ "<center><h2>Did I cheat by sampling from the exponential. <br>What about other distributions?</h2></center>", "_____no_output_____" ] ], [ [ "def actual_vs_perception(class_size, distribution_name, **distrubtion_params):\n \n sizes = [distribution_name(**distrubtion_params) for _ in range(10_000)]\n mean_class_size = mean(sizes)\n print(f\"The actual mean class size is: {mean_class_size:>8.2f}\")\n\n student_experience = choices(sizes, weights=sizes, k=len(sizes))\n mean_student_experience = mean(student_experience)\n print(f\"The perceived mean class size is: {mean_student_experience:>5.2f}\")", "_____no_output_____" ] ], [ [ "The software engineer in me wants to write tests.", "_____no_output_____" ] ], [ [ "actual_vs_perception(class_size=30, distribution_name=expovariate, lambd=1/class_size)", "The actual mean class size is: 30.40\nThe perceived mean class size is: 60.37\n" ], [ "from random import gauss\n\nactual_vs_perception(class_size=30, distribution_name=gauss, mu=class_size, sigma=10)", "The actual mean class size is: 29.90\nThe perceived mean class size is: 33.06\n" ], [ "from random import uniform\n\nactual_vs_perception(class_size=30, distribution_name=uniform, a=15, b=45)", "The actual mean class size is: 29.99\nThe perceived mean class size is: 32.55\n" ] ], [ [ "The effect is more pronounced with the exponential but present in most distributions.", "_____no_output_____" ], [ "<center><h2>Statistics are a defense against human cognitive biases.</h2></center>", "_____no_output_____" ], [ "<center><h2>Student activity: Answer the following question …</h2></center>\n<center><h2>Why are you less popular than your friends?</h2></center>\n<br>\n\n<center><h2>Think, pair, share</h2></center>", "_____no_output_____" ], [ "Review\n-----\n\n- Summary statistics can be misleading. \n- We use simulation to understand the human experience.\n- Statistics and programming can help structure reasoning.\n", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ "Sources\n-----\n\nRenewal theory https://en.wikipedia.org/wiki/Renewal_theory\n\nhttp://ben-israel.rutgers.edu/711/Ross-Inspection.pdf\n\nhttp://allendowney.blogspot.com/2015/08/the-inspection-paradox-is-everywhere.html\n\nhttps://twitter.com/raymondh/status/1056774933471145984\n\nhttps://www.scientificamerican.com/article/why-youre-probably-less-popular/\n\nhttp://jakevdp.github.io/blog/2018/09/13/waiting-time-paradox/\n\nBook: [Mindware: Tools for Smart Thinking](https://www.amazon.com/Mindware-Tools-Thinking-Richard-Nisbett/dp/1511357193)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec5823f454eacae806669a8befc894ee1a2cdfa3
13,526
ipynb
Jupyter Notebook
My_notebooks/road_following/data_collection_gamepad.ipynb
geoc1234/jetbot
f2a8ab3bf581db53d22d788f98b7dd7ebde8bcb0
[ "MIT" ]
null
null
null
My_notebooks/road_following/data_collection_gamepad.ipynb
geoc1234/jetbot
f2a8ab3bf581db53d22d788f98b7dd7ebde8bcb0
[ "MIT" ]
null
null
null
My_notebooks/road_following/data_collection_gamepad.ipynb
geoc1234/jetbot
f2a8ab3bf581db53d22d788f98b7dd7ebde8bcb0
[ "MIT" ]
null
null
null
36.956284
436
0.619474
[ [ [ "# Road Following - Data Collection (using Gamepad)\n\nIf you've run through the collision avoidance sample, your should be familiar following three steps\n\n1. Data collection\n2. Training\n3. Deployment\n\nIn this notebook, we'll do the same exact thing! Except, instead of classification, you'll learn a different fundamental technique, **regression**, that we'll use to\nenable JetBot to follow a road (or really, any path or target point). \n\n1. Place the JetBot in different positions on a path (offset from center, different angles, etc)\n\n> Remember from collision avoidance, data variation is key!\n\n2. Display the live camera feed from the robot\n3. Using a gamepad controller, place a 'green dot', which corresponds to the target direction we want the robot to travel, on the image.\n4. Store the X, Y values of this green dot along with the image from the robot's camera\n\nThen, in the training notebook, we'll train a neural network to predict the X, Y values of our label. In the live demo, we'll use\nthe predicted X, Y values to compute an approximate steering value (it's not 'exactly' an angle, as\nthat would require image calibration, but it's roughly proportional to the angle so our controller will work fine).\n\nSo how do you decide exactly where to place the target for this example? Here is a guide we think may help\n\n1. Look at the live video feed from the camera\n2. Imagine the path that the robot should follow (try to approximate the distance it needs to avoid running off road etc.)\n3. Place the target as far along this path as it can go so that the robot could head straight to the target without 'running off' the road.\n\n> For example, if we're on a very straight road, we could place it at the horizon. If we're on a sharp turn, it may need to be placed closer to the robot so it doesn't run out of boundaries.\n\nAssuming our deep learning model works as intended, these labeling guidelines should ensure the following:\n\n1. The robot can safely travel directly towards the target (without going out of bounds etc.)\n2. The target will continuously progress along our imagined path\n\nWhat we get, is a 'carrot on a stick' that moves along our desired trajectory. Deep learning decides where to place the carrot, and JetBot just follows it :)", "_____no_output_____" ], [ "### Labeling example video\n\nExecute the block of code to see an example of how to we labeled the images. This model worked after only 123 images :)", "_____no_output_____" ] ], [ [ "from IPython.display import HTML\nHTML('<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/FW4En6LejhI\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen></iframe>')", "/usr/local/lib/python3.6/dist-packages/IPython/core/display.py:717: UserWarning: Consider using IPython.display.IFrame instead\n warnings.warn(\"Consider using IPython.display.IFrame instead\")\n" ] ], [ [ "### Import Libraries", "_____no_output_____" ], [ "So lets get started by importing all the required libraries for \"data collection\" purpose. We will mainly use OpenCV to visualize and save image with labels. Libraries such as uuid, datetime are used for image naming. ", "_____no_output_____" ] ], [ [ "# IPython Libraries for display and widgets\nimport traitlets\nimport ipywidgets.widgets as widgets\nfrom IPython.display import display\n\n# Camera and Motor Interface for JetBot\nfrom jetbot import Robot, Camera, bgr8_to_jpeg\n\n# Basic Python packages for image annotation\nfrom uuid import uuid1\nimport os\nimport json\nimport glob\nimport datetime\nimport numpy as np\nimport cv2\nimport time", "_____no_output_____" ] ], [ [ "### Display Live Camera Feed", "_____no_output_____" ], [ "First, let's initialize and display our camera like we did in the teleoperation notebook. \n\nWe use Camera Class from JetBot to enable CSI MIPI camera. Our neural network takes a 224x224 pixel image as input. We'll set our camera to that size to minimize the filesize of our dataset (we've tested that it works for this task). In some scenarios it may be better to collect data in a larger image size and downscale to the desired size later.", "_____no_output_____" ] ], [ [ "camera = Camera()\n\nwidget_width = camera.width\nwidget_height = camera.height\n\nimage_widget = widgets.Image(format='jpeg', width=widget_width, height=widget_height)\ntarget_widget = widgets.Image(format='jpeg', width=widget_width, height=widget_height)\n\nx_slider = widgets.FloatSlider(min=-1.0, max=1.0, step=0.001, description='x')\ny_slider = widgets.FloatSlider(min=-1.0, max=1.0, step=0.001, description='y')\n\ndef display_xy(camera_image):\n image = np.copy(camera_image)\n x = x_slider.value\n y = y_slider.value\n x = int(x * widget_width / 2 + widget_width / 2)\n y = int(y * widget_height / 2 + widget_height / 2)\n image = cv2.circle(image, (x, y), 8, (0, 255, 0), 3)\n image = cv2.circle(image, (widget_width / 2, widget_height), 8, (0, 0,255), 3)\n image = cv2.line(image, (x,y), (widget_width / 2, widget_height), (255,0,0), 3)\n jpeg_image = bgr8_to_jpeg(image)\n return jpeg_image\n\ntime.sleep(1)\ntraitlets.dlink((camera, 'value'), (image_widget, 'value'), transform=bgr8_to_jpeg)\ntraitlets.dlink((camera, 'value'), (target_widget, 'value'), transform=display_xy)\n\ndisplay(widgets.HBox([image_widget, target_widget]), x_slider, y_slider)", "_____no_output_____" ] ], [ [ "### Create Gamepad Controller\n\nThis step is similar to \"Teleoperation\" task. In this task, we will use gamepad controller to label images.\n\nThe first thing we want to do is create an instance of the Controller widget, which we'll use to label images with \"x\" and \"y\" values as mentioned in introduction. The Controller widget takes a index parameter, which specifies the number of the controller. This is useful in case you have multiple controllers attached, or some gamepads appear as multiple controllers. To determine the index of the controller you're using,\n\nVisit http://html5gamepad.com.\nPress buttons on the gamepad you're using\nRemember the index of the gamepad that is responding to the button presses\nNext, we'll create and display our controller using that index.", "_____no_output_____" ] ], [ [ "controller = widgets.Controller(index=0)\n\ndisplay(controller)", "_____no_output_____" ] ], [ [ "### Connect Gamepad Controller to Label Images\n\nNow, even though we've connected our gamepad, we haven't yet attached the controller to label images! We'll connect that to the left and right vertical axes using the dlink function. The dlink function, unlike the link function, allows us to attach a transform between the source and target. ", "_____no_output_____" ] ], [ [ "widgets.jsdlink((controller.axes[2], 'value'), (x_slider, 'value'))\nwidgets.jsdlink((controller.axes[3], 'value'), (y_slider, 'value'))", "_____no_output_____" ] ], [ [ "### Collect data\n\nThe following block of code will display the live image feed, as well as the number of images we've saved. We store\nthe target X, Y values by\n\n1. Place the green dot on the target\n2. Press 'down' on the DPAD to save\n\nThis will store a file in the ``dataset_xy`` folder with files named\n\n``xy_<x value>_<y value>_<uuid>.jpg``\n\nwhere `<x value>` and `<y value>` are the coordinates **in pixel (not in percentage)** (count from the top left corner).\n\nWhen we train, we load the images and parse the x, y values from the filename", "_____no_output_____" ] ], [ [ "DATASET_DIR = 'dataset_xy'\n\n# we have this \"try/except\" statement because these next functions can throw an error if the directories exist already\ntry:\n os.makedirs(DATASET_DIR)\nexcept FileExistsError:\n print('Directories not created because they already exist')\n\nfor b in controller.buttons:\n b.unobserve_all()\n\ncount_widget = widgets.IntText(description='count', value=len(glob.glob(os.path.join(DATASET_DIR, '*.jpg'))))\n\ndef xy_uuid(x, y):\n return 'xy_%03d_%03d_%s' % (x * widget_width / 2 + widget_width / 2, y * widget_height / 2 + widget_height / 2, uuid1())\n\ndef save_snapshot(change):\n if change['new']:\n uuid = xy_uuid(x_slider.value, y_slider.value)\n image_path = os.path.join(DATASET_DIR, uuid + '.jpg')\n with open(image_path, 'wb') as f:\n f.write(image_widget.value)\n count_widget.value = len(glob.glob(os.path.join(DATASET_DIR, '*.jpg')))\n\ncontroller.buttons[13].observe(save_snapshot, names='value')\n\ndisplay(widgets.VBox([\n target_widget,\n count_widget\n]))", "_____no_output_____" ] ], [ [ "Again, let's close the camera conneciton properly so that we can use the camera in other notebooks.", "_____no_output_____" ] ], [ [ "camera.stop()", "_____no_output_____" ] ], [ [ "### Next", "_____no_output_____" ], [ "Once you've collected enough data, we'll need to copy that data to our GPU desktop or cloud machine for training. First, we can call the following terminal command to compress our dataset folder into a single zip file. \n\n> If you're training on the JetBot itself, you can skip this step!", "_____no_output_____" ], [ "The ! prefix indicates that we want to run the cell as a shell (or terminal) command.\n\nThe -r flag in the zip command below indicates recursive so that we include all nested files, the -q flag indicates quiet so that the zip command doesn't print any output", "_____no_output_____" ] ], [ [ "def timestr():\n return str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\n\n!zip -r -q road_following_{DATASET_DIR}_{timestr()}.zip {DATASET_DIR}", "_____no_output_____" ] ], [ [ "You should see a file named road_following_<Date&Time>.zip in the Jupyter Lab file browser. You should download the zip file using the Jupyter Lab file browser by right clicking and selecting Download.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
ec582e9b81920cbed6d92f475e02b4eb48cb9da4
14,822
ipynb
Jupyter Notebook
Toronto_DataFrame_with_Location.ipynb
aaliashraf/Toronto_Neigborhood_Clustering
4616fec5ffd38e1e9360910af4851cb73ed307b1
[ "MIT" ]
null
null
null
Toronto_DataFrame_with_Location.ipynb
aaliashraf/Toronto_Neigborhood_Clustering
4616fec5ffd38e1e9360910af4851cb73ed307b1
[ "MIT" ]
null
null
null
Toronto_DataFrame_with_Location.ipynb
aaliashraf/Toronto_Neigborhood_Clustering
4616fec5ffd38e1e9360910af4851cb73ed307b1
[ "MIT" ]
null
null
null
30.943633
432
0.4494
[ [ [ "## 1.Scraping the Dataset of Toronto Neighborhood\n", "_____no_output_____" ], [ "The dataset of the Toronto neighborhood is not available in csv form for manipulation, therefore its data should be scraped from wikipedia here is the technique with step by step guide of how to scrap the data.\nThe Beautiful Soup package is used for scraping the data.", "_____no_output_____" ], [ "Before getting start lets download the following libraries", "_____no_output_____" ] ], [ [ "from bs4 import BeautifulSoup # to scraping the site and getting required data\nimport requests\nimport pandas as pd\nimport numpy as np", "_____no_output_____" ] ], [ [ " Getting the request and parsing ", "_____no_output_____" ], [ "<!DOCTYPE html>\n<html class=\"client-nojs\" dir=\"ltr\" lang=\"en\">\n <head>\n <meta charset=\"utf-8\"/>\n <title>\n List of postal codes of Canada: M - Wikipedia\n </title>\n <script>\n document.documentElement.className=\"client-js\";RLCONF={\"wgBreakFrames\":!1,\"wgSeparatorTransformTable\":[\"\",\"\"],\"wgDigitTransformTable\":[\"\",\"\"],\"wgDefaultDateFormat\":\"dmy\",\"wgMonthNames\":[\"\",\"January\",\"February\",\"March\",\"April\",\"May\",\"June\",\"July\",\"August\",\"September\",\"October\",\"November\",\"December\"],\"wgRequestId\":\"XpVnAwpAAEYAAEsuV0QAAADD\",\"wgCSPNonce\":!1,\"wgCanonica", "_____no_output_____" ] ], [ [ "url='https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M'\nsource=requests.get(url).text\nsoup=BeautifulSoup(source,'lxml')\n", "_____no_output_____" ] ], [ [ "If you carefully inspect the HTML script all the table contents i.e. postalcode,borough and neighborhood of the toronto which we intend to extract is under class Wikitable .", "_____no_output_____" ] ], [ [ "my_table=soup.find('table',{'class':'wikitable'})#find the class where required data is place\n", "_____no_output_____" ] ], [ [ "Now after examing the data we sure our required data lies in a step from 'tbody','tr' to the element of 'td'.Creating a list for storing our data.", "_____no_output_____" ] ], [ [ "\ndata=[]\ntable_body=my_table.find('tbody')\nsub_table=table_body.find_all('tr')\nfor row in sub_table:\n sub=row.find_all('td') # data resides in the element of td\n sub=[ele.text.strip() for ele in sub]\n data.append(sub)\n\n", "_____no_output_____" ] ], [ [ "### Transform the data into Pandas dataframe ", "_____no_output_____" ], [ "From the links, we have to extract our required data which is postalcode,borough and neighborhood.\nNow Convert the following data into Pandas DataFrame to work in python.\n", "_____no_output_____" ] ], [ [ "arr=np.array(data)\narr_refine=[x for x in arr if x] #removing the empty list\n\npostal_code=[index[0] for index in arr_refine] #getting the postalcode\nborough=[index[1] for index in arr_refine] #getting the borough\nneighbor=[index[2] for index in arr_refine] #getting the neighorhood\n\n\n \ndf = pd.DataFrame(list(zip(postal_code,borough,neighbor)), #creating a table of following columns\n columns =['Postal Code','Borough','Neighborhood'])\n\n\ndf_refine=df[df.Borough !='Not assigned'].reset_index(drop=True) # as required ignore the Not assigned cells\n\ntoronto_data=df_refine.apply(lambda x: x.str.replace('/',',')) #as required add comma\n\ntoronto_data.head()", "_____no_output_____" ] ], [ [ "For calculating row lenght of our data", "_____no_output_____" ] ], [ [ "toronto_data.shape[0]", "_____no_output_____" ] ], [ [ "## 2. Adding the Location(Latitude & Longitude) in Data set", "_____no_output_____" ], [ "Here the csv file data set location is used you can get it from Link :http://cocl.us/Geospatial_data", "_____no_output_____" ] ], [ [ "location=pd.read_csv('Geospatial_Coordinates.csv')\ntoronto_data_location=pd.merge(toronto_data,location, on=['Postal Code']) #Alert on merging columns name should be same\ntoronto_data_location", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
ec5833297bfd2da8df959fd52f2c725ea7d9103d
9,752
ipynb
Jupyter Notebook
components/gcp/dataproc/submit_spark_job/sample.ipynb
adamjm/pipelines
442c80449d4253a66fb247ea345f5f82422c6632
[ "Apache-2.0" ]
null
null
null
components/gcp/dataproc/submit_spark_job/sample.ipynb
adamjm/pipelines
442c80449d4253a66fb247ea345f5f82422c6632
[ "Apache-2.0" ]
null
null
null
components/gcp/dataproc/submit_spark_job/sample.ipynb
adamjm/pipelines
442c80449d4253a66fb247ea345f5f82422c6632
[ "Apache-2.0" ]
null
null
null
35.461818
307
0.602646
[ [ [ "# Name\n\nData preparation using Spark on YARN with Cloud Dataproc\n\n\n# Label\n\nCloud Dataproc, GCP, Cloud Storage, Spark, Kubeflow, pipelines, components, YARN\n\n\n# Summary\n\nA Kubeflow Pipeline component to prepare data by submitting a Spark job on YARN to Cloud Dataproc.\n\n# Details\n\n## Intended use\n\nUse the component to run an Apache Spark job as one preprocessing step in a Kubeflow Pipeline.\n\n## Runtime arguments\nArgument | Description | Optional | Data type | Accepted values | Default |\n:--- | :---------- | :--- | :------- | :------| :------| \nproject_id | The ID of the Google Cloud Platform (GCP) project that the cluster belongs to.|No | GCPProjectID | | |\nregion | The Cloud Dataproc region to handle the request. | No | GCPRegion | | | \ncluster_name | The name of the cluster to run the job. | No | String | | |\nmain_jar_file_uri | The Hadoop Compatible Filesystem (HCFS) URI of the JAR file that contains the main class. | No | GCSPath | | |\nmain_class | The name of the driver's main class. The JAR file that contains the class must be either in the default CLASSPATH or specified in `spark_job.jarFileUris`.| No | | | | \nargs | The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.| Yes | | | |\nspark_job | The payload of a [SparkJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob).| Yes | | | |\njob | The payload of a [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs). | Yes | | | |\nwait_interval | The number of seconds to wait between polling the operation. | Yes | | | 30 |\n\n## Output\nName | Description | Type\n:--- | :---------- | :---\njob_id | The ID of the created job. | String\n\n## Cautions & requirements\n\nTo use the component, you must:\n\n\n\n* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).\n* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).\n* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:\n\n ```\n component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n ```\n\n\n* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.\n\n\n## Detailed description\n\nThis component creates a Spark job from [Dataproc submit job REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/submit).\n\nFollow these steps to use the component in a pipeline:\n\n\n\n1. Install the Kubeflow Pipeline SDK:", "_____no_output_____" ] ], [ [ "%%capture --no-stderr\n\nKFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'\n!pip3 install $KFP_PACKAGE --upgrade", "_____no_output_____" ] ], [ [ "2. Load the component using KFP SDK", "_____no_output_____" ] ], [ [ "import kfp.components as comp\n\ndataproc_submit_spark_job_op = comp.load_component_from_url(\n 'https://raw.githubusercontent.com/kubeflow/pipelines/eb830cd73ca148e5a1a6485a9374c2dc068314bc/components/gcp/dataproc/submit_spark_job/component.yaml')\nhelp(dataproc_submit_spark_job_op)", "_____no_output_____" ] ], [ [ "### Sample\nNote: The following sample code works in an IPython notebook or directly in Python code.\n\n\n#### Set up a Dataproc cluster\n[Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster) (or reuse an existing one) before running the sample code.\n\n\n#### Prepare a Spark job\nUpload your Spark JAR file to a Cloud Storage bucket. In the sample, we use a JAR file that is preinstalled in the main cluster: `file:///usr/lib/spark/examples/jars/spark-examples.jar`.\n\nHere is the [source code of the sample](https://github.com/apache/spark/blob/master/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java).\n\nTo package a self-contained Spark application, follow these [instructions](https://spark.apache.org/docs/latest/quick-start.html#self-contained-applications).\n\n\n#### Set sample parameters", "_____no_output_____" ] ], [ [ "PROJECT_ID = '<Please put your project ID here>'\nCLUSTER_NAME = '<Please put your existing cluster name here>'\nREGION = 'us-central1'\nSPARK_FILE_URI = 'file:///usr/lib/spark/examples/jars/spark-examples.jar'\nMAIN_CLASS = 'org.apache.spark.examples.SparkPi'\nARGS = ['1000']\nEXPERIMENT_NAME = 'Dataproc - Submit Spark Job'", "_____no_output_____" ] ], [ [ "#### Example pipeline that uses the component", "_____no_output_____" ] ], [ [ "import kfp.dsl as dsl\nimport kfp.gcp as gcp\nimport json\[email protected](\n name='Dataproc submit Spark job pipeline',\n description='Dataproc submit Spark job pipeline'\n)\ndef dataproc_submit_spark_job_pipeline(\n project_id = PROJECT_ID, \n region = REGION,\n cluster_name = CLUSTER_NAME,\n main_jar_file_uri = '',\n main_class = MAIN_CLASS,\n args = json.dumps(ARGS), \n spark_job=json.dumps({ 'jarFileUris': [ SPARK_FILE_URI ] }), \n job='{}', \n wait_interval='30'\n):\n dataproc_submit_spark_job_op(\n project_id=project_id, \n region=region, \n cluster_name=cluster_name, \n main_jar_file_uri=main_jar_file_uri, \n main_class=main_class,\n args=args, \n spark_job=spark_job, \n job=job, \n wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))\n ", "_____no_output_____" ] ], [ [ "#### Compile the pipeline", "_____no_output_____" ] ], [ [ "pipeline_func = dataproc_submit_spark_job_pipeline\npipeline_filename = pipeline_func.__name__ + '.zip'\nimport kfp.compiler as compiler\ncompiler.Compiler().compile(pipeline_func, pipeline_filename)", "_____no_output_____" ] ], [ [ "#### Submit the pipeline for execution", "_____no_output_____" ] ], [ [ "#Specify pipeline argument values\narguments = {}\n\n#Get or create an experiment and submit a pipeline run\nimport kfp\nclient = kfp.Client()\nexperiment = client.create_experiment(EXPERIMENT_NAME)\n\n#Submit a pipeline run\nrun_name = pipeline_func.__name__ + ' run'\nrun_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)", "_____no_output_____" ] ], [ [ "## References\n\n* [Component Python code](https://github.com/kubeflow/pipelines/blob/master/component_sdk/python/kfp_component/google/dataproc/_submit_spark_job.py)\n* [Component Docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)\n* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataproc/submit_spark_job/sample.ipynb)\n* [Dataproc SparkJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob)\n\n## License\nBy deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec583d862502adf8187e97633156dc9d7cb0cf4f
20,691
ipynb
Jupyter Notebook
optimize_expert_identifier-model.ipynb
Mphasis-ML-Marketplace/Mphasis-Optimize.AI-Expert-Identifier
0e665347d98e33a8a1b37ddda9a9bb0853cb9d0b
[ "Apache-2.0" ]
null
null
null
optimize_expert_identifier-model.ipynb
Mphasis-ML-Marketplace/Mphasis-Optimize.AI-Expert-Identifier
0e665347d98e33a8a1b37ddda9a9bb0853cb9d0b
[ "Apache-2.0" ]
null
null
null
optimize_expert_identifier-model.ipynb
Mphasis-ML-Marketplace/Mphasis-Optimize.AI-Expert-Identifier
0e665347d98e33a8a1b37ddda9a9bb0853cb9d0b
[ "Apache-2.0" ]
null
null
null
32.584252
613
0.510463
[ [ [ "## Deploy Mphasis Optimize.AI Expert Identifier Model Package from AWS Marketplace \n\n\nThis sample notebook shows you how to deploy Expert Identifier is machine learning based model that uses information present in any incident/ticket management data such as: Ticket ID, Ticket Solver Id, Ticket Priority, Ticket Category, Ticket Submission and Resolved date and identifies the right expert to be assigned to a specific ticket or incident request. It can optimise ticket allocation, decreases the ticket resolution time and improve KPIs (Key Performance Indicators) such as customer satisfaction, adherence to SLA (Service Level Agreement), MTTR (Mean Time to Resolve), cost to company, etc.\n using Amazon SageMaker.\n\n> **Note**: This is a reference notebook and it cannot run unless you make changes suggested in the notebook.\n\n#### Pre-requisites:\n1. **Note**: This notebook contains elements which render correctly in Jupyter interface. Open this notebook from an Amazon SageMaker Notebook Instance or Amazon SageMaker Studio.\n1. Ensure that IAM role used has **AmazonSageMakerFullAccess**\n1. To deploy this ML model successfully, ensure that:\n 1. Either your IAM role has these three permissions and you have authority to make AWS Marketplace subscriptions in the AWS account used: \n 1. **aws-marketplace:ViewSubscriptions**\n 1. **aws-marketplace:Unsubscribe**\n 1. **aws-marketplace:Subscribe** \n 2. or your AWS account has a subscription to Mphasis Optimize.AI Expert Identifier. If so, skip step: [Subscribe to the model package](#1.-Subscribe-to-the-model-package)\n\n#### Contents:\n1. [Subscribe to the model package](#1.-Subscribe-to-the-model-package)\n2. [Create an endpoint and perform real-time inference](#2.-Create-an-endpoint-and-perform-real-time-inference)\n 1. [Create an endpoint](#A.-Create-an-endpoint)\n 2. [Create input payload](#B.-Create-input-payload)\n 3. [Perform real-time inference](#C.-Perform-real-time-inference)\n 4. [Visualize output](#D.-Visualize-output)\n 5. [Delete the endpoint](#E.-Delete-the-endpoint)\n3. [Perform batch inference](#3.-Perform-batch-inference) \n4. [Clean-up](#4.-Clean-up)\n 1. [Delete the model](#A.-Delete-the-model)\n 2. [Unsubscribe to the listing (optional)](#B.-Unsubscribe-to-the-listing-(optional))\n \n\n#### Usage instructions\nYou can run this notebook one cell at a time (By using Shift+Enter for running a cell).", "_____no_output_____" ], [ "### 1. Subscribe to the model package", "_____no_output_____" ], [ "To subscribe to the model package:\n1. Open the model package listing page Mphasis Optimize.AI Expert Identifier.\n1. On the AWS Marketplace listing, click on the **Continue to subscribe** button.\n1. On the **Subscribe to this software** page, review and click on **\"Accept Offer\"** if you and your organization agrees with EULA, pricing, and support terms. \n1. Once you click on **Continue to configuration button** and then choose a **region**, you will see a **Product Arn** displayed. This is the model package ARN that you need to specify while creating a deployable model using Boto3. Copy the ARN corresponding to your region and specify the same in the following cell.", "_____no_output_____" ] ], [ [ "model_package_arn='arn:aws:sagemaker:us-east-2:786796469737:model-package/marketplace-expert'", "_____no_output_____" ], [ "import base64\nimport json \nimport uuid\nfrom sagemaker import ModelPackage\nimport sagemaker as sage\nfrom sagemaker import get_execution_role\nfrom sagemaker import ModelPackage\nfrom urllib.parse import urlparse\nimport boto3\nfrom IPython.display import Image\nfrom PIL import Image as ImageEdit\nimport urllib.request\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "role = get_execution_role()\n\nsagemaker_session = sage.Session()\n\nbucket=sagemaker_session.default_bucket()\nbucket", "_____no_output_____" ] ], [ [ "### 2. Create an endpoint and perform real-time inference", "_____no_output_____" ], [ "If you want to understand how real-time inference with Amazon SageMaker works, see [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-hosting.html).", "_____no_output_____" ] ], [ [ "model_name='expert'\n\ncontent_type='text/csv'\n\nreal_time_inference_instance_type='ml.t2.medium'\nbatch_transform_inference_instance_type='ml.m5.large'", "_____no_output_____" ] ], [ [ "#### A. Create an endpoint", "_____no_output_____" ] ], [ [ "\ndef predict_wrapper(endpoint, session):\n return sage.predictor.Predictor(endpoint, session,content_type)\n\n#create a deployable model from the model package.\nmodel = ModelPackage(role=role,\n model_package_arn=model_package_arn,\n sagemaker_session=sagemaker_session,\n predictor_cls=predict_wrapper)\n\n#Deploy the model\npredictor = model.deploy(1, real_time_inference_instance_type, endpoint_name=model_name)", "_____no_output_____" ] ], [ [ "Once endpoint has been created, you would be able to perform real-time inference.", "_____no_output_____" ], [ "#### B. Create input payload", "_____no_output_____" ] ], [ [ "sample_input = \"Input_file.csv\"", "_____no_output_____" ] ], [ [ "<Add code snippet that shows the payload contents>", "_____no_output_____" ] ], [ [ "input_df = pd.read_csv(sample_input)\ninput_df.head()", "_____no_output_____" ], [ "file_name = sample_input\noutput_file_name = \"sample_output\"", "_____no_output_____" ] ], [ [ "#### C. Perform real-time inference", "_____no_output_____" ] ], [ [ "!aws sagemaker-runtime invoke-endpoint \\\n --endpoint-name $model_name \\\n --body fileb://$file_name \\\n --content-type $content_type \\\n --region $sagemaker_session.boto_region_name \\\n $output_file_name", "{\r\n \"ContentType\": \"text/csv; charset=utf-8\",\r\n \"InvokedProductionVariant\": \"AllTraffic\"\r\n}\r\n" ] ], [ [ "#### D. Visualize output", "_____no_output_____" ] ], [ [ "out_df = pd.read_csv(output_file_name)\nout_df.head()", "_____no_output_____" ] ], [ [ "#### E. Delete the endpoint", "_____no_output_____" ], [ "Now that you have successfully performed a real-time inference, you do not need the endpoint any more. You can terminate the endpoint to avoid being charged.", "_____no_output_____" ] ], [ [ "predictor=sage.predictor.Predictor(model_name, sagemaker_session,content_type)\npredictor.delete_endpoint(delete_endpoint_config=True)", "_____no_output_____" ] ], [ [ "### 3. Perform batch inference", "_____no_output_____" ], [ "In this section, you will perform batch inference using multiple input payloads together. If you are not familiar with batch transform, and want to learn more, see these links:\n1. [How it works](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-batch-transform.html)\n2. [How to run a batch transform job](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html)", "_____no_output_____" ] ], [ [ "#upload the batch-transform job input files to S3\ntransform_input_folder = \"data/input/batch\"\ntransform_input = sagemaker_session.upload_data(transform_input_folder, key_prefix=model_name) \nprint(\"Transform input uploaded to \" + transform_input)", "_____no_output_____" ], [ "#Run the batch-transform job\ntransformer = model.transformer(1, batch_transform_inference_instance_type)\ntransformer.transform(transform_input, content_type=content_type)\ntransformer.wait()", "_____no_output_____" ], [ "#output is available on following path\ntransformer.output_path", "_____no_output_____" ] ], [ [ "### 4. Clean-up", "_____no_output_____" ], [ "#### A. Delete the model", "_____no_output_____" ] ], [ [ "model.delete_model()", "_____no_output_____" ] ], [ [ "#### B. Unsubscribe to the listing (optional)", "_____no_output_____" ], [ "If you would like to unsubscribe to the model package, follow these steps. Before you cancel the subscription, ensure that you do not have any [deployable model](https://console.aws.amazon.com/sagemaker/home#/models) created from the model package or using the algorithm. Note - You can find this information by looking at the container name associated with the model. \n\n**Steps to unsubscribe to product from AWS Marketplace**:\n1. Navigate to __Machine Learning__ tab on [__Your Software subscriptions page__](https://aws.amazon.com/marketplace/ai/library?productType=ml&ref_=mlmp_gitdemo_indust)\n2. Locate the listing that you want to cancel the subscription for, and then choose __Cancel Subscription__ to cancel the subscription.\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec584008e5f54e9f342d2bc1bbe9508d868a0e34
2,266
ipynb
Jupyter Notebook
data/energy/v5/3_fixing_index_name.ipynb
theoneandonlywoj/RTE
1d17cb7288c7816362b028d84b6edac76909badd
[ "Apache-2.0" ]
3
2018-02-01T00:14:34.000Z
2019-04-29T00:40:13.000Z
data/energy/v5/.ipynb_checkpoints/3_fixing_index_name-checkpoint.ipynb
arcyfelix/RTE
1d17cb7288c7816362b028d84b6edac76909badd
[ "Apache-2.0" ]
null
null
null
data/energy/v5/.ipynb_checkpoints/3_fixing_index_name-checkpoint.ipynb
arcyfelix/RTE
1d17cb7288c7816362b028d84b6edac76909badd
[ "Apache-2.0" ]
3
2017-07-11T21:53:48.000Z
2018-09-08T04:48:17.000Z
23.360825
73
0.417476
[ [ [ "import pandas as pd\nfrom tqdm import tqdm", "_____no_output_____" ], [ "stations = ['Auvergne-Rhone-Alpes', \n 'Bourgogne-Franche-Comte', \n 'Bretagne', \n 'Centre-Val de Loire', \n 'Grand-Est', \n 'Hauts-de-France',\n 'Ile-de-France', \n 'Normandie', \n 'Nouvelle-Aquitaine', \n 'Occitanie',\n 'PACA', \n 'Pays-de-la-Loire']\n\nyears = ['2013', '2014', '2015', '2016+2017-05', '2017-06']\nfeatures = ['Consumption',\n 'Thermal',\n 'Nuclear',\n 'Wind',\n 'Solar',\n 'Hydraulic',\n 'Pumping',\n 'Bioenergies',\n 'Physical exchanges']", "_____no_output_____" ], [ "for year in tqdm(years):\n for feature in features:\n file = './' + year + '/' + feature + '_' + year + '.csv'\n df = pd.read_csv(file)\n\n df = df.rename(columns={'Unnamed: 0': 'Time'})\n\n df.index = df['Time']\n del df['Time']\n df.to_csv(file) ", "100%|██████████| 5/5 [00:07<00:00, 1.24s/it]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
ec5841b8309cbee05e285e451793f0d1d5d2098f
4,989
ipynb
Jupyter Notebook
public-notebooks/2-compute-features-and-labels.ipynb
TomDecroos/atomic-spadl
b7ffbcb73b5fce00275491dc8cc32ca96849dc25
[ "MIT" ]
7
2019-11-18T15:49:36.000Z
2021-09-17T09:44:03.000Z
public-notebooks/2-compute-features-and-labels.ipynb
TomDecroos/atomic-spadl
b7ffbcb73b5fce00275491dc8cc32ca96849dc25
[ "MIT" ]
null
null
null
public-notebooks/2-compute-features-and-labels.ipynb
TomDecroos/atomic-spadl
b7ffbcb73b5fce00275491dc8cc32ca96849dc25
[ "MIT" ]
null
null
null
26.967568
116
0.526558
[ [ [ "%load_ext autoreload\n%autoreload 2\nimport os; import sys; sys.path.insert(0,'../')\nimport pandas as pd\nimport tqdm\nimport warnings\nwarnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)\n\nimport socceraction.classification.features as fs\nimport socceraction.classification.labels as lab", "_____no_output_____" ], [ "## Configure file and folder names\ndatafolder = \"../data\"\nspadl_h5 = os.path.join(datafolder,\"spadl-statsbomb.h5\")\nfeatures_h5 = os.path.join(datafolder,\"features.h5\")\nlabels_h5 = os.path.join(datafolder,\"labels.h5\")", "_____no_output_____" ], [ "games = pd.read_hdf(spadl_h5,\"games\")\ngames = games[games.competition_name == \"FIFA World Cup\"]\nprint(\"nb of games:\", len(games))\n\nactiontypes = pd.read_hdf(spadl_h5, \"actiontypes\")\nbodyparts = pd.read_hdf(spadl_h5, \"bodyparts\")\nresults = pd.read_hdf(spadl_h5, \"results\")", "nb of games: 64\n" ], [ "xfns = [fs.actiontype,\n fs.actiontype_onehot,\n fs.bodypart,\n fs.bodypart_onehot,\n fs.result,\n fs.result_onehot,\n fs.goalscore,\n fs.startlocation,\n fs.endlocation,\n fs.movement,\n fs.space_delta,\n fs.startpolar,\n fs.endpolar,\n fs.team,\n fs.time,\n fs.time_delta\n ]\n\nfor game in tqdm.tqdm(list(games.itertuples()),desc=f\"Generating and storing features in {features_h5}\"):\n actions = pd.read_hdf(spadl_h5,f\"actions/game_{game.game_id}\")\n actions = (\n actions.merge(actiontypes,how=\"left\")\n .merge(results,how=\"left\")\n .merge(bodyparts,how=\"left\")\n .reset_index(drop=True)\n )\n gamestates = fs.gamestates(actions,3)\n gamestates = fs.play_left_to_right(gamestates,game.home_team_id)\n \n X = pd.concat([fn(gamestates) for fn in xfns],axis=1)\n X.to_hdf(features_h5,f\"game_{game.game_id}\")", "Generating and storing features in ../data/features.h5: 100%|██████████| 64/64 [00:29<00:00, 2.14it/s]\n" ], [ "yfns = [lab.scores,lab.concedes,lab.goal_from_shot]\n\nfor game in tqdm.tqdm(list(games.itertuples()),desc=f\"Computing and storing labels in {labels_h5}\"):\n actions = pd.read_hdf(spadl_h5,f\"actions/game_{game.game_id}\")\n actions = (\n actions.merge(actiontypes,how=\"left\")\n .merge(results,how=\"left\")\n .merge(bodyparts,how=\"left\")\n .reset_index(drop=True)\n )\n \n Y = pd.concat([fn(actions) for fn in yfns],axis=1)\n Y.to_hdf(labels_h5,f\"game_{game.game_id}\")", "Computing and storing labels in ../data/labels.h5: 100%|██████████| 64/64 [00:12<00:00, 5.28it/s]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ec5842a6e5c38c2c58024a774ed192b3831c262a
54,346
ipynb
Jupyter Notebook
ETL Pipeline Preparation.ipynb
kaienn/DST2_Disaster-Response-Pipeline
24e17f2f29ebd81b10d00315521f9f25394b1eff
[ "FTL", "CNRI-Python", "blessing" ]
null
null
null
ETL Pipeline Preparation.ipynb
kaienn/DST2_Disaster-Response-Pipeline
24e17f2f29ebd81b10d00315521f9f25394b1eff
[ "FTL", "CNRI-Python", "blessing" ]
null
null
null
ETL Pipeline Preparation.ipynb
kaienn/DST2_Disaster-Response-Pipeline
24e17f2f29ebd81b10d00315521f9f25394b1eff
[ "FTL", "CNRI-Python", "blessing" ]
null
null
null
36.062376
292
0.3914
[ [ [ "# ETL Pipeline Preparation\nFollow the instructions below to help you create your ETL pipeline.\n### 1. Import libraries and load datasets.\n- Import Python libraries\n- Load `messages.csv` into a dataframe and inspect the first few lines.\n- Load `categories.csv` into a dataframe and inspect the first few lines.", "_____no_output_____" ] ], [ [ "# import libraries\nimport pandas as pd\nfrom sqlalchemy import create_engine", "_____no_output_____" ], [ "# load messages dataset\nmessages = pd.read_csv('messages.csv')\nmessages.head()", "_____no_output_____" ], [ "# load categories dataset\ncategories = pd.read_csv('categories.csv')\ncategories.head()", "_____no_output_____" ] ], [ [ "### 2. Merge datasets.\n- Merge the messages and categories datasets using the common id\n- Assign this combined dataset to `df`, which will be cleaned in the following steps", "_____no_output_____" ] ], [ [ "# merge datasets\ndf = messages.merge(categories, on='id')\ndf.head()", "_____no_output_____" ] ], [ [ "### 3. Split `categories` into separate category columns.\n- Split the values in the `categories` column on the `;` character so that each value becomes a separate column. You'll find [this method](https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.Series.str.split.html) very helpful! Make sure to set `expand=True`.\n- Use the first row of categories dataframe to create column names for the categories data.\n- Rename columns of `categories` with new column names.", "_____no_output_____" ] ], [ [ "# create a dataframe of the 36 individual category columns\ncategories = df['categories'].str.split(';', expand = True)\ncategories.head()", "_____no_output_____" ], [ "# select the first row of the categories dataframe\nrow = categories.iloc[0]\n\n# use this row to extract a list of new column names for categories.\n# one way is to apply a lambda function that takes everything \n# up to the second to last character of each string with slicing\ncategory_colnames = row.str[:-2]\nprint(category_colnames)", "0 related\n1 request\n2 offer\n3 aid_related\n4 medical_help\n5 medical_products\n6 search_and_rescue\n7 security\n8 military\n9 child_alone\n10 water\n11 food\n12 shelter\n13 clothing\n14 money\n15 missing_people\n16 refugees\n17 death\n18 other_aid\n19 infrastructure_related\n20 transport\n21 buildings\n22 electricity\n23 tools\n24 hospitals\n25 shops\n26 aid_centers\n27 other_infrastructure\n28 weather_related\n29 floods\n30 storm\n31 fire\n32 earthquake\n33 cold\n34 other_weather\n35 direct_report\nName: 0, dtype: object\n" ], [ "# rename the columns of `categories`\ncategories.columns = category_colnames\ncategories.head()", "_____no_output_____" ] ], [ [ "### 4. Convert category values to just numbers 0 or 1.\n- Iterate through the category columns in df to keep only the last character of each string (the 1 or 0). For example, `related-0` becomes `0`, `related-1` becomes `1`. Convert the string to a numeric value.\n- You can perform [normal string actions on Pandas Series](https://pandas.pydata.org/pandas-docs/stable/text.html#indexing-with-str), like indexing, by including `.str` after the Series. You may need to first convert the Series to be of type string, which you can do with `astype(str)`.", "_____no_output_____" ] ], [ [ "for column in categories:\n # set each value to be the last character of the string\n categories[column] = categories[column].str[-1]\n \n # convert column from string to numeric\n categories[column] = categories[column].astype(int)\ncategories.head()", "_____no_output_____" ] ], [ [ "### 5. Replace `categories` column in `df` with new category columns.\n- Drop the categories column from the df dataframe since it is no longer needed.\n- Concatenate df and categories data frames.", "_____no_output_____" ] ], [ [ "# drop the original categories column from `df`\ndf.drop('categories', axis=1, inplace=True)\ndf.head()", "_____no_output_____" ], [ "# concatenate the original dataframe with the new `categories` dataframe\ndf = pd.concat([df, categories], axis=1)\ndf.head()", "_____no_output_____" ] ], [ [ "### 6. Remove duplicates.\n- Check how many duplicates are in this dataset.\n- Drop the duplicates.\n- Confirm duplicates were removed.", "_____no_output_____" ] ], [ [ "# drop duplicates\ndf.drop_duplicates(inplace=True)", "_____no_output_____" ], [ "# check number of duplicates\ndf.duplicated().sum()", "_____no_output_____" ] ], [ [ "### 7. Save the clean dataset into an sqlite database.\nYou can do this with pandas [`to_sql` method](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_sql.html) combined with the SQLAlchemy library. Remember to import SQLAlchemy's `create_engine` in the first cell of this notebook to use it below.", "_____no_output_____" ] ], [ [ "engine = create_engine('sqlite:///DisasterResponse.db')\ndf.to_sql('DisasterResponse', engine, index=False)", "_____no_output_____" ] ], [ [ "### 8. Use this notebook to complete `etl_pipeline.py`\nUse the template file attached in the Resources folder to write a script that runs the steps above to create a database based on new datasets specified by the user. Alternatively, you can complete `etl_pipeline.py` in the classroom on the `Project Workspace IDE` coming later.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec584a2ab6b574093f57bd461d83f9c64a8aafc7
25,475
ipynb
Jupyter Notebook
applications/notebooks/romulo/co_clustering_dist.ipynb
phenology/infrastructure
ef4398786984cd8bb652df5883348ce54aafb831
[ "Apache-2.0" ]
2
2017-06-21T15:35:37.000Z
2018-03-29T09:04:46.000Z
applications/notebooks/romulo/co_clustering_dist.ipynb
phenology/infrastructure
ef4398786984cd8bb652df5883348ce54aafb831
[ "Apache-2.0" ]
50
2017-04-10T09:03:25.000Z
2018-04-06T07:11:51.000Z
applications/notebooks/romulo/co_clustering_dist.ipynb
phenology/infrastructure
ef4398786984cd8bb652df5883348ce54aafb831
[ "Apache-2.0" ]
1
2021-06-29T17:24:09.000Z
2021-06-29T17:24:09.000Z
58.833718
13,382
0.729107
[ [ [ "Sys.setenv(SPARK_HOME='/usr/lib/spark')\n.libPaths(c(file.path(Sys.getenv('SPARK_HOME'), 'R', 'lib'), .libPaths()))", "_____no_output_____" ], [ "library(SparkR)", "\nAttaching package: ‘SparkR’\n\nThe following objects are masked from ‘package:stats’:\n\n cov, filter, lag, na.omit, predict, sd, var, window\n\nThe following objects are masked from ‘package:base’:\n\n as.data.frame, colnames, colnames<-, drop, endsWith, intersect,\n rank, rbind, sample, startsWith, subset, summary, transform, union\n\n" ], [ "appName <- 'co_clustering'\nmasterURL <- 'spark://pheno0.phenovari-utwente.surf-hosted.nl:7077'\n\nsc <- sparkR.session(master=masterURL, appName=appName)\n", "Spark package found in SPARK_HOME: /usr/lib/spark\n" ] ], [ [ "## Bregman block average co-clustering algorithm.\n\nThe implementation of Bregman block average co-clustering algorithm (Banerjee et al., 2007) is inpired in the [single node impementation](https://github.com/fnyanez/bbac), [Copyright (c) 2016 Felipe Yanez](https://github.com/fnyanez/bbac/blob/master/LICENSE.md).", "_____no_output_____" ], [ "### Support functions", "_____no_output_____" ], [ "```\ncoCavg <- function(dist, row_col, R, Z, C, W, epsilon) {\n CoCavg <- calculate_average(R, Z, C, W, epsilon)\n if (row_col==\"row\") {\n return(list(Zrowc = array(dist, dim(Z)), Zrowv = CoCavg %*% t(C)))\n } else if (row_col==\"col\") {\n return(list(Zcolc = array(dist, dim(Z)), Zcolv = R %*% CoCavg))\n }\n}\n```", "_____no_output_____" ] ], [ [ "coCavg <- function(dist, row_col, R, matrixDF, C, W, epsilon) {\n CoCavg <- calculate_average(R, matrixDF, C, W, epsilon)\n if (row_col==\"row\") {\n return(list(Zrowc = array(dist, dim(matrixDF)), Zrowv = CoCavg %*% t(C)))\n } else if (row_col==\"col\") {\n return(list(Zcolc = array(dist, dim(matrixDF)), Zcolv = R %*% CoCavg))\n }\n}", "_____no_output_____" ] ], [ [ "```\ncalculate_average <- function(Left, Z, Right, W, epsilon) {\n if (is.null(W)) {W <- array(1, dim(Z))} else {Z <- W * Z}\n numerator <- t(Left) %*% Z %*% Right + mean(Z) * epsilon\n denominator <- t(Left) %*% W %*% Right + epsilon\n return(numerator/denominator)\n}\n```", "_____no_output_____" ] ], [ [ "calculate_average <- function(Left, matrixDF, Right, W, epsilon) {\n if (is.null(W)) {\n #W <- array(1, dim(matrixDF))\n W <- spark.createDataFrame(sc.emptyRDD[Row], schema)\n } else {\n matrixDF <- W * matrixDF\n }\n numerator <- t(Left) %*% matrixDF %*% Right + mean(matrixDF) * epsilon\n denominator <- t(Left) %*% W %*% Right + epsilon\n return(numerator/denominator)\n}", "_____no_output_____" ] ], [ [ "```\nsimilarity_measure <- function(dist, Z, X, Y, W, epsilon) {\n if (is.null(W)) W <- array(1, dim(Z))\n if (dist==0) {\n euc <- function(i) rowSums(W * (Z - X - rep(Y[i,], each = dim(Z)[1]))^2)\n return(sapply(1:dim(Y)[1], euc))\n } else if (dist==1) {\n return((W * X) %*% t(Y + epsilon) - (W * Z) %*% log(t(Y + epsilon)))\n }\n}\n```", "_____no_output_____" ] ], [ [ "similarity_measure <- function(dist, matrixDF, X, Y, W, epsilon) {\n if (is.null(W))\n W <- array(1, dim(matrixDF))\n if (dist==0) {\n euc <- function(i) rowSums(W * (matrixDF - X - rep(Y[i,], each = dim(matrixDF)[1]))^2)\n return(sapply(1:dim(Y)[1], euc))\n } else if (dist==1) {\n return((W * X) %*% t(Y + epsilon) - (W * Z) %*% log(t(Y + epsilon)))\n }\n}", "_____no_output_____" ] ], [ [ "```\nassign_cluster <- function(dist, Z, X, Y, W, epsilon) {\n D <- similarity_measure(dist, Z, X, Y, W, epsilon)\n id <- sapply(1:dim(D)[1], function(i) sort(D[i,], index.return = TRUE)$ix[1])\n res <- sapply(1:dim(D)[1], function(i) sort(D[i,])[1]^(2-dist))\n return(list(Cluster = diag(dim(Y)[1])[id,], Error = sum(res)))\n}\n```", "_____no_output_____" ] ], [ [ "assign_cluster <- function(dist, matrixDF, X, Y, W, epsilon) {\n D <- similarity_measure(dist, matrixDF, X, Y, W, epsilon)\n id <- sapply(1:dim(D)[1], function(i) sort(D[i,], index.return = TRUE)$ix[1])\n res <- sapply(1:dim(D)[1], function(i) sort(D[i,])[1]^(2-dist))\n return(list(Cluster = diag(dim(Y)[1])[id,], Error = sum(res)))\n}", "_____no_output_____" ] ], [ [ "### BBAC\n```\nZ - m x n data matrix, \nW - m x n measure matrix, \nk - num row clusters,\nl - num col clusters\n```", "_____no_output_____" ] ], [ [ "bbac <- function(matrixDF, numRowC, numColC, W = NULL, distance = \"euclidean\", errobj = 1e-6, niters = 100, nruns = 5, epsilon = 1e-8) {\n \n error <- Inf\n error_now <- Inf\n dist <- pmatch(tolower(distance), c(\"euclidean\",\"divergence\")) - 1\n \n for (r in 1:nruns) {\n \n # Initialization of R and C\n R <- diag(numRowC)[base::sample(numRowC, dim(matrixDF)[1], replace = TRUE),]\n C <- diag(numColC)[base::sample(numColC, dim(matrixDF)[2], replace = TRUE),]\n \n for (s in 1:niters) {\n \n # Row estimation\n rs <- coCavg(dist, \"row\", R, matrixDF, C, W, epsilon)\n ra <- assign_cluster(dist, matrixDF, rs$Zrowc, rs$Zrowv, W, epsilon)\n R <- ra$Cluster\n \n # Column estimation\n cs <- coCavg(dist, \"col\", R, matrixDF, C, W, epsilon)\n ca <- assign_cluster(dist, t(matrixDF), t(cs$Zcolc), t(cs$Zcolv), W, epsilon)\n C <- ca$Cluster\n \n # \n if (abs(ca$Error - error_now) < errobj) {\n status <- paste(\"converged in\",s,\"iterations\")\n return(list(R = R, C = C, status = status))\n }\n \n # Update objective value\n error_now <- ca$Error\n \n }\n \n # Keep pair with min error\n if (error_now < error) {\n R_star <- R\n C_star <- C\n error <- error_now\n }\n \n }\n \n status <- paste(\"reached maximum of\", niters, \"iterations\")\n return(list(R = R_star, C = C_star, status = status))\n \n}", "_____no_output_____" ] ], [ [ "### Load Data\n\nThe data will be loaded from a matrix create in Scala and saved as Parquet file.", "_____no_output_____" ] ], [ [ "set.seed(1)\ninput_matrix <- matrix(rep(1:4, 25), 10, 10)", "_____no_output_____" ] ], [ [ "### Run ", "_____no_output_____" ] ], [ [ "# Run co-clustering algorithm\nbbac_res <- bbac(matrixDF, k = 2, l = 2, distance = \"e\")", "_____no_output_____" ], [ "dim(matrixDF)", "_____no_output_____" ] ], [ [ "### Plot results", "_____no_output_____" ] ], [ [ "plot_coclusters <- function(Z, R, C) {\n # Sort matrix\n Y <- t(Z[(R * (1:nrow(R)))[R != 0], (C * (1:nrow(C)))[C != 0]])\n \n # Plot sorted matrix\n image(seq(0, 1, length.out = dim(Y)[1]), seq(0, 1, length.out = dim(Y)[2]),\n Y, col = grey((0:12)/12), axes = FALSE, xlab = \"\", ylab = \"\")\n \n # Print row clusters\n row_clust <- (head(cumsum(colSums(R)), -1) - 0.5)/(ncol(Y) - 1)\n invisible(sapply(1:length(row_clust), function(i) \n segments(-0.5, row_clust[i], 1.5, row_clust[i], col = 2, lwd = 2)))\n \n # Print column clusters\n col_clust <- (head(cumsum(colSums(C)), -1) - 0.5)/(nrow(Y) - 1)\n invisible(sapply(1:length(col_clust), function(i) \n segments(col_clust[i], -0.5, col_clust[i], 1.5, col = 2, lwd = 2)))\n}\n \n# Show co-clusters\npar(mfrow=c(1, 1))\nplot_coclusters(input_matrix, bbac_res$R, bbac_res$C)\ntitle(paste(\"CoCavg algorithm\", bbac_res$status))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
ec584a8a653cd63a8277fc8151894c9fb0525685
3,240
ipynb
Jupyter Notebook
Python Crash Course/Module 2 - Numpy/Numpy L4.ipynb
lars-olsson/deep-machine-learning
0823628a44a7123d7dfdbd2670f48de37d7bece7
[ "MIT" ]
49
2017-09-07T19:56:45.000Z
2022-02-17T11:19:14.000Z
python-crash-course/module2-numpy/Numpy L4.ipynb
sondrec/deep-machine-learning
5f96c500119bd5ee66c641822cd5bf0d7240d88a
[ "MIT" ]
16
2018-09-05T08:10:21.000Z
2021-09-06T11:47:54.000Z
python-crash-course/module2-numpy/Numpy L4.ipynb
sondrec/deep-machine-learning
5f96c500119bd5ee66c641822cd5bf0d7240d88a
[ "MIT" ]
103
2017-09-19T13:37:32.000Z
2021-11-03T14:09:59.000Z
17.513514
162
0.508951
[ [ [ "# 4. Random numbers\n---", "_____no_output_____" ], [ "In this final lecture about Numpy, we'll learn how to generate pseudorandom numbers with it, and how to find more information about Numpy's functionalities.", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "Numpy has a very handy module called `random` to generate pseudorandom numbers.", "_____no_output_____" ] ], [ [ "np.random.rand(3,3)", "_____no_output_____" ], [ "# (elements between 1 and 19)\nnp.random.randint(1,20,[3,3])", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "To allow for reproducibility in our programs, we can adjust the seed of the pseudo-random number generator.", "_____no_output_____" ] ], [ [ "np.random.rand(1,4)", "_____no_output_____" ], [ "np.random.seed(10)", "_____no_output_____" ], [ "np.random.rand(1,4)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "More functions for pseudo-random number generations can be found using the `help` command.", "_____no_output_____" ] ], [ [ "help(np.random)", "_____no_output_____" ], [ "help(np.random.normal)", "_____no_output_____" ] ], [ [ "Or by checking [the docs](https://docs.scipy.org/doc/numpy/).", "_____no_output_____" ], [ "For example:", "_____no_output_____" ] ], [ [ "np.random.geometric(0.2)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
ec58572dd3de95231a71bbf4591fa3939d051e99
289,435
ipynb
Jupyter Notebook
SciPy2016/MTwork/inv3d_HPK1/Make data figures.ipynb
simpeg/simpegExamples
38b8064fb854d809f72b7f1ca8b8096bca696af1
[ "MIT" ]
1
2021-08-07T13:46:54.000Z
2021-08-07T13:46:54.000Z
SciPy2016/MTwork/inv3d_HPK1/Make data figures.ipynb
simpeg/simpegExamples
38b8064fb854d809f72b7f1ca8b8096bca696af1
[ "MIT" ]
1
2016-07-27T22:20:36.000Z
2016-07-27T22:20:36.000Z
SciPy2016/MTwork/inv3d_HPK1/Make data figures.ipynb
simpeg/presentations
38b8064fb854d809f72b7f1ca8b8096bca696af1
[ "MIT" ]
null
null
null
898.86646
161,524
0.945038
[ [ [ "# Notebook to make data figures for the NSEM scipy poster./", "_____no_output_____" ], [ "from SimPEG.NSEM.Utils import plotDataTypes as pDt\nfrom SimPEG.NSEM.Utils import skindepth\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "# Make phusdo sections ~ 7133627.5\n# Load the data\nmtRecData = np.load('../ForwardModeling_noExtension_Coarse/MTdataStArr_nsmesh_HKPK1Coarse_noExtension.npy')\n# Find the section coords\nnp.unique(mtRecData['y']) ", "_____no_output_____" ], [ "def psudoSect_OffDiagTip_RealImag(dataArray,sectDict,colBarMode='single',cLevel=None):\n '''\n Function that plots psudo sections of difference of real, imaginary and abs of the MT impedance\n '''\n from mpl_toolkits.axes_grid1 import ImageGrid\n\n fig = plt.figure(1,(15., 9.))\n axes = ImageGrid(fig, (0.05,0.05,0.875,0.875),aspect=False,nrows_ncols = (2, 4),\n axes_pad = 0.25,add_all=True,share_all=True,label_mode = \"L\",\n cbar_mode=colBarMode,cbar_location='right',cbar_pad=0.005)\n\n [ax.set_yscale('log') for ax in axes]\n n,v = sectDict.items()[0]\n fig.text(0.5,0.96,'Data section at {:.1f} m Northing '.format(v),fontsize=18,ha='center')\n # Plot data\n comps = ['zxy','zxy','zyx','zyx','tzx','tzx','tzy','tzy']\n cTypes = ['real','imag','real','imag','real','imag','real','imag']\n colBs = [True]*8 #[False,False,False,True,False,False,False,True] #\n cLevels = [[1e-1,1e2],[1e-1,1e2],[1e-1,1e2],[1e-1,1e2],\n [1e-3,1e0],[1e-3,1e0],[1e-3,1e0],[1e-3,1e0]]\n csList = []\n for ax, comp, ctype, colB, cLevel in zip(axes,comps,cTypes,colBs,cLevels):\n csList.append(pDt.plotPsudoSectNSimpedance(ax,sectDict,dataArray,comp,ctype,cLevel=cLevel,colorbar=colB))\n\n return (fig, axes, csList)\n\ndef psudoSect_FullImpTip_RealImag(dataArray,sectDict,colBarMode='single',cLevel=None):\n '''\n Function that plots psudo sections of difference of real, imaginary and abs of the MT impedance\n '''\n from mpl_toolkits.axes_grid1 import ImageGrid\n\n fig = plt.figure(1,(15., 13.5))\n axes = ImageGrid(fig, (0.05,0.05,0.875,0.875),aspect=False,nrows_ncols = (3, 4),\n axes_pad = 0.25,add_all=True,share_all=True,label_mode = \"L\",\n cbar_mode=colBarMode,cbar_location='right',cbar_pad=0.005)\n\n [ax.set_yscale('log') for ax in axes]\n n,v = sectDict.items()[0]\n fig.text(0.5,0.96,'Data section at {:.1f} m Northing '.format(v),fontsize=18,ha='center')\n # Plot data\n comps = ['zxx','zxx','zxy','zxy','zyx','zyx','zyy','zyy','tzx','tzx','tzy','tzy']\n cTypes = ['real','imag','real','imag','real','imag','real','imag','real','imag','real','imag']\n colBs = [True]*12 #[False,False,False,True,False,False,False,True] #\n cLevels = [[1e-1,1e2],[1e-1,1e2],[1e-1,1e2],[1e-1,1e2],\n [1e-1,1e2],[1e-1,1e2],[1e-1,1e2],[1e-1,1e2],\n [1e-3,1e0],[1e-3,1e0],[1e-3,1e0],[1e-3,1e0]]\n csList = []\n for ax, comp, ctype, colB, cLevel in zip(axes,comps,cTypes,colBs,cLevels):\n csList.append(pDt.plotPsudoSectNSimpedance(ax,sectDict,dataArray,comp,ctype,cLevel=cLevel,colorbar=colB))\n\n return (fig, axes, csList)", "_____no_output_____" ], [ "%matplotlib inline\n# Make the plot\nfig, axes, csList = psudoSect_OffDiagTip_RealImag(mtRecData,{'y':7133627.5},colBarMode='each')\n[csList[i][1].remove() for i in [0,1,2,4,5,6]]\nax1 = axes[4]\nax1.set_xticklabels(np.round((np.array(ax1.get_xticks().tolist(),dtype=int)/100).tolist())/10.)\n# ax1.get_xticklabels().rotation=45 \naxes[0].set_ylabel('Frequency [Hz]')\nax1.set_ylabel('Frequency [Hz]')\nfig.savefig('NSEM_OffDiagTip_exampleData.png',dpi=300,transparent=True)", "_____no_output_____" ], [ "ax1.get_xticklabels()", "_____no_output_____" ], [ "ax = axes[0]\nax.set_xticks([])\n \n# ax1.set_xlabel('Easting UTM km')\n# ax1.set_xticklabels(np.round((np.array(ax1.get_xticks().tolist(),dtype=int)/1000).tolist()))\n# ax1.set_ylabel('Northing UTM km')\n# ax1.set_yticklabels(np.round((np.array(ax1.get_yticks().tolist(),dtype=int)/1000).tolist()))\n# ax1.set_aspect('equal')", "_____no_output_____" ], [ "%matplotlib inline\n# Make the plot\nfig, axes, csList = psudoSect_FullImpTip_RealImag(mtRecData,{'y':7133627.5},colBarMode='each')\n[csList[i][1].remove() for i in [0,1,2,4,5,6,8,9,10]]\nax1 = axes[4]\nax1.set_xticklabels(np.round((np.array(ax1.get_xticks().tolist(),dtype=int)/100).tolist())/10.)\n# ax1.get_xticklabels().rotation=45 \naxes[0].set_ylabel('Frequency [Hz]')\naxes[4].set_ylabel('Frequency [Hz]')\naxes[8].set_ylabel('Frequency [Hz]')\nfig.savefig('NSEM_FullImpTip_exampleData.png',dpi=300,transparent=True)", "_____no_output_____" ], [ "pwd", "_____no_output_____" ], [ "plt.savefig?", "_____no_output_____" ], [ "skindepth(100,100)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec586321fc2daf55232f0f655d3fb3e2336342b2
11,952
ipynb
Jupyter Notebook
python/The Guardian Data ingestion and wrangling/.ipynb_checkpoints/The_Guardian_biotec - Incyte-checkpoint.ipynb
georgetown-analytics/Economic-Events
413765c40b8d7dcfcaae21e38dd5dc5c063f5c2c
[ "MIT" ]
1
2021-03-26T15:51:31.000Z
2021-03-26T15:51:31.000Z
python/The Guardian Data ingestion and wrangling/The_Guardian_biotec - Incyte.ipynb
georgetown-analytics/Economic-Events
413765c40b8d7dcfcaae21e38dd5dc5c063f5c2c
[ "MIT" ]
null
null
null
python/The Guardian Data ingestion and wrangling/The_Guardian_biotec - Incyte.ipynb
georgetown-analytics/Economic-Events
413765c40b8d7dcfcaae21e38dd5dc5c063f5c2c
[ "MIT" ]
null
null
null
30.724936
151
0.586931
[ [ [ "# Code to download The Guardian UK data and clean data for text analysis\n@Jorge de Leon \n\nThis script allows you to download news articles that match your parameters from the Guardian newspaper, https://www.theguardian.com/us.", "_____no_output_____" ], [ "## Set-up", "_____no_output_____" ] ], [ [ "import os\nimport re \nimport glob\nimport json\nimport requests\nimport pandas as pd \n\nfrom glob import glob\nfrom os import makedirs\nfrom textblob import TextBlob\nfrom os.path import join, exists\nfrom datetime import date, timedelta\n\nos.chdir(\"..\")\n\nimport nltk\nnltk.download('punkt')\nnltk.download('wordnet')\nnltk.download('stopwords')\nfrom nltk import sent_tokenize, word_tokenize\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.corpus import stopwords", "_____no_output_____" ] ], [ [ "## API and news articles requests\n\nThis section contains the code that will be used to download articles from the Guardian website. \nthe initial variables will be determined as user-defined parameters.", "_____no_output_____" ] ], [ [ "#Enter API and parameters - these parameters can be obtained by playing around with the Guardian API tool:\n# https://open-platform.theguardian.com/explore/\n\n# Set up initial and end date \n\nstart_date_global = date(2000, 1, 1)\nend_date_global = date(2020, 5, 17)\nquery = \"Incyte\"\nterm = ('stock')\n\n#Enter API key, endpoint and parameters\nmy_api_key = open(\"..\\\\input files\\\\creds_guardian.txt\").read().strip()\napi_endpoint = \"http://content.guardianapis.com/search?\"\nmy_params = {\n 'from-date': '',\n 'to-date': '',\n 'show-fields': 'bodyText',\n 'q': query,\n 'page-size': 200,\n 'api-key': my_api_key\n}", "_____no_output_____" ], [ "articles_dir = join('theguardian','incyte')\nmakedirs(articles_dir, exist_ok=True)", "_____no_output_____" ], [ "# day iteration from here:\n# http://stackoverflow.com/questions/7274267/print-all-day-dates-between-two-dates\nstart_date = start_date_global\nend_date = end_date_global\ndayrange = range((end_date - start_date).days + 1)\nfor daycount in dayrange:\n dt = start_date + timedelta(days=daycount)\n datestr = dt.strftime('%Y-%m-%d')\n fname = join(articles_dir, datestr + '.json')\n if not exists(fname):\n # then let's download it\n print(\"Downloading\", datestr)\n all_results = []\n my_params['from-date'] = datestr\n my_params['to-date'] = datestr\n current_page = 1\n total_pages = 1\n while current_page <= total_pages:\n print(\"...page\", current_page)\n my_params['page'] = current_page\n resp = requests.get(api_endpoint, my_params)\n data = resp.json()\n all_results.extend(data['response']['results'])\n # if there is more than one page\n current_page += 1\n total_pages = data['response']['pages']\n\n with open(fname, 'w') as f:\n print(\"Writing to\", fname)\n\n # re-serialize it for pretty indentation\n f.write(json.dumps(all_results, indent=2))", "_____no_output_____" ], [ "#Read all json files that will be concatenated\ntest_files = sorted(glob('theguardian/incyte/*.json'))", "_____no_output_____" ], [ "#intialize empty list that we will append dataframes to\nall_files = []\n \n#write a for loop that will go through each of the file name through globbing and the end result will be the list \n#of dataframes\nfor file in test_files:\n try:\n articles = pd.read_json(file)\n all_files.append(articles)\n except pd.errors.EmptyDataError:\n print('Note: filename.csv ws empty. Skipping')\n continue #will skip the rest of the bloc and move to next file\n\n#create dataframe with data from json files\ntheguardian_rawdata = pd.concat(all_files, axis=0, ignore_index=True) ", "_____no_output_____" ] ], [ [ "## Text Analysis", "_____no_output_____" ] ], [ [ "#Drop empty columns\ntheguardian_rawdata = theguardian_rawdata.iloc[:,0:12]", "_____no_output_____" ], [ "#show types of media that was downloaded by type\ntheguardian_rawdata['type'].unique()", "_____no_output_____" ], [ "#filter only for articles\ntheguardian_rawdata = theguardian_rawdata[theguardian_rawdata['type'].str.match('article',na=False)]", "_____no_output_____" ], [ "#remove columns that do not contain relevant information for analysis\ntheguardian_dataset = theguardian_rawdata.drop(['apiUrl','id', 'isHosted', 'pillarId', 'pillarName',\n 'sectionId', 'sectionName', 'type','webTitle', 'webUrl'], axis=1)", "_____no_output_____" ], [ "#Modify the column webPublicationDate to Date and the fields to string and lower case\ntheguardian_dataset[\"date\"] = pd.to_datetime(theguardian_dataset[\"webPublicationDate\"]).dt.strftime('%Y-%m-%d')\ntheguardian_dataset['fields'] = theguardian_dataset['fields'].astype(str).str.lower()", "_____no_output_____" ], [ "#Clean the articles from URLS, remove punctuaction and numbers. \ntheguardian_dataset['fields'] = theguardian_dataset['fields'].str.replace('<.*?>','') # remove HTML tags\ntheguardian_dataset['fields'] = theguardian_dataset['fields'].str.replace('[^\\w\\s]','') # remove punc.", "_____no_output_____" ], [ "#Generate sentiment analysis for each article\n#Using TextBlob obtain polarity\ntheguardian_dataset['sentiment_polarity'] = theguardian_dataset['fields'].apply(lambda row: TextBlob(row).sentiment.polarity)\n#Using TextBlob obtain subjectivity\ntheguardian_dataset['sentiment_subjectivity'] = theguardian_dataset['fields'].apply(lambda row: TextBlob(row).sentiment.subjectivity)", "_____no_output_____" ], [ "#Remove numbers from text\ntheguardian_dataset['fields'] = theguardian_dataset['fields'].str.replace('\\d+','') # remove numbers\n\n#Then I will tokenize each word and remover stop words\ntheguardian_dataset['tokenized_fields'] = theguardian_dataset.apply(lambda row: nltk.word_tokenize(row['fields']), axis=1)", "_____no_output_____" ], [ "#Stop words\nstop_words=set(stopwords.words(\"english\"))", "_____no_output_____" ], [ "#Remove stop words\ntheguardian_dataset['tokenized_fields'] = theguardian_dataset['tokenized_fields'].apply(lambda x: [item for item in x if item not in stop_words])", "_____no_output_____" ], [ "#Count number of words and create a column with the most common 5 words per article\nfrom collections import Counter\ntheguardian_dataset['high_recurrence'] = theguardian_dataset['tokenized_fields'].apply(lambda x: [k for k, v in Counter(x).most_common(5)])", "_____no_output_____" ], [ "#Create a word count for the word \"stock\"\ntheguardian_dataset['word_ocurrence'] = theguardian_dataset['tokenized_fields'].apply(lambda x: [w for w in x if re.search(term, w)])\ntheguardian_dataset['word_count'] = theguardian_dataset['word_ocurrence'].apply(len)", "_____no_output_____" ], [ "#Create a count of the total number of words\ntheguardian_dataset['total_words'] = theguardian_dataset['tokenized_fields'].apply(len)", "_____no_output_____" ], [ "#Create new table with average polarity, subjectivity, count of the word \"stock\" per day\nguardian_microsoft = theguardian_dataset.groupby('date')['sentiment_polarity','sentiment_subjectivity','word_count','total_words'].agg('mean')", "_____no_output_____" ], [ "#Create a variable for the number of articles per day\ncount_articles = theguardian_dataset\ncount_articles['no_articles'] = count_articles.groupby(['date'])['fields'].transform('count')\ncount_articles = count_articles[[\"date\",\"no_articles\"]]\ncount_articles_df = count_articles.drop_duplicates(subset = \"date\", \n keep = \"first\", inplace=False) ", "_____no_output_____" ], [ "#Join tables by date\nguardian_microsoft = guardian_microsoft.merge(count_articles_df, on='date', how ='left')", "_____no_output_____" ], [ "#Save dataframes into CSV\ntheguardian_dataset.to_csv('theguardian/incyte/theguardian_incyte_text.csv', encoding='utf-8')\nguardian_microsoft.to_csv('theguardian/incyte/theguardian_incyte_data.csv', encoding='utf-8')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5863348efd241aea9ffdfb48262c3f75a24ef5
43,313
ipynb
Jupyter Notebook
notebooks/Day10_SQL_EngineeringTasks.ipynb
tomaztk/Azure-Databricks
01ae99b58abf207800096dc74b8f7679a196942c
[ "MIT" ]
28
2020-12-02T19:24:29.000Z
2022-03-23T20:52:28.000Z
notebooks/Day10_SQL_EngineeringTasks.ipynb
DonnyZhao/Azure-Databricks
db1456d581327c106e757c081a56eea3fe3d6419
[ "MIT" ]
null
null
null
notebooks/Day10_SQL_EngineeringTasks.ipynb
DonnyZhao/Azure-Databricks
db1456d581327c106e757c081a56eea3fe3d6419
[ "MIT" ]
22
2020-12-06T16:43:21.000Z
2022-03-08T08:23:36.000Z
21,656.5
43,312
0.722254
[ [ [ "# Using Azure Databricks Notebooks with SQL for Data engineering tasks", "_____no_output_____" ], [ "## Exploring tables and Databases", "_____no_output_____" ] ], [ [ "%sql\nSHOW TABLES;", "_____no_output_____" ], [ "%sql \nSHOW TABLES FROM default;", "_____no_output_____" ], [ "%sql \nSHOW TABLES IN default LIKE 'day6*'", "_____no_output_____" ] ], [ [ "## Creating database and tables", "_____no_output_____" ] ], [ [ "%sql \nCREATE DATABASE IF NOT EXISTS Day10 COMMENT 'This is a sample database for day10' LOCATION '/user';", "_____no_output_____" ] ], [ [ "Check the information about database", "_____no_output_____" ] ], [ [ "%sql\nDESCRIBE DATABASE EXTENDED Day10;", "_____no_output_____" ] ], [ [ "For the underlying CSV we will create a table.\nWe will be using CSV file from Day 6, and it should be still available on location dbfs:/FileStore/Day6_data_dbfs.csv.\nThis dataset has three columns (Date, Temperature and City) and it should be good starting example.", "_____no_output_____" ] ], [ [ "%sql\nUSE Day10;\n\nDROP TABLE IF EXISTS temperature;\nCREATE TABLE temperature (date STRING, mean_daily_temp STRING, city STRING)", "_____no_output_____" ], [ "%sql\nUSE Day10;\n\nSELECT * FROM temperature", "_____no_output_____" ], [ "%sql\nSHOW TABLES IN Day10;", "_____no_output_____" ], [ "%sql\nUSE Day10;\n\nDROP VIEW IF EXISTS temp_view2;\nCREATE TEMPORARY VIEW temp_view2\nUSING CSV\nOPTIONS (path \"/FileStore/Day6Data_dbfs.csv\", header \"true\", mode \"FAILFAST\")", "_____no_output_____" ], [ "%sql\nUSE Day10;\nSELECT * FROM temp_view2", "_____no_output_____" ], [ "%sql\nUSE Day10;\nINSERT INTO temperature TABLE temp_view2;", "_____no_output_____" ] ], [ [ "Check the data types using DESCRIBE clause and most likely we will need to change data type on temp_view2, column temperature from STRING to INT", "_____no_output_____" ] ], [ [ "%sql\nDESCRIBE temperature", "_____no_output_____" ], [ "%sql\nDESCRIBE temp_view2", "_____no_output_____" ], [ "%sql\nUSE Day10;\n\nALTER TABLE temperature CHANGE COLUMN mean_daily_temp STRING\n", "_____no_output_____" ], [ "%sql\nDESCRIBE temperature", "_____no_output_____" ] ], [ [ "### Now create two tables and crete a join", "_____no_output_____" ] ], [ [ "%sql\nUSE Day10;\n\nDROP TABLE IF EXISTS temp1;\nDROP TABLE IF EXISTS temp2;\n\n\nCREATE TABLE temp1 (id_t1 INT, name STRING, temperature INT);\nCREATE TABLE temp2 (id_t2 INT, name STRING, temperature INT);", "_____no_output_____" ], [ "%sql\nUSE Day10;\n\nINSERT INTO temp1 VALUES (2, 'Ljubljana', 1);\nINSERT INTO temp1 VALUES (3, 'Seattle', 5);\nINSERT INTO temp2 VALUES (1, 'Ljubljana', -3);\nINSERT INTO temp2 VALUES (2, 'Seattle`', 3);\n\n", "_____no_output_____" ], [ "%sql\nUSE Day10;\n\nSELECT \nt1.Name as City1\n,t2.Name AS City2\n,t1.temperature*t2.Temperature AS MultipliedTemperature\n\nFROM temp1 AS t1\nJOIN temp2 AS t2\nON t1.id_t1 = t2.id_t2\nWHERE \nt1.name <> t2.name\nLIMIT 1", "_____no_output_____" ], [ "%sql\nSHOW COLUMNS IN temp1;", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec586e5854fcd1ad6b5b6d469056126c179b5914
3,975
ipynb
Jupyter Notebook
examples/notebook/contrib/coins_grid_mip.ipynb
jspricke/or-tools
45770b833997f827d322e929b1ed4781c4e60d44
[ "Apache-2.0" ]
1
2020-07-18T16:24:09.000Z
2020-07-18T16:24:09.000Z
examples/notebook/contrib/coins_grid_mip.ipynb
jspricke/or-tools
45770b833997f827d322e929b1ed4781c4e60d44
[ "Apache-2.0" ]
1
2021-02-23T10:22:55.000Z
2021-02-23T13:57:14.000Z
examples/notebook/contrib/coins_grid_mip.ipynb
jspricke/or-tools
45770b833997f827d322e929b1ed4781c4e60d44
[ "Apache-2.0" ]
1
2021-03-16T14:30:59.000Z
2021-03-16T14:30:59.000Z
34.868421
85
0.553962
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec58710b0e6a489ac2a913732aa58818ae3ad930
2,516
ipynb
Jupyter Notebook
notebooks/minio_setup.ipynb
gvashishtha/seldon-core
425431b90a99576a6dfeb65927bca41e740fca0f
[ "Apache-2.0" ]
4
2019-08-29T19:36:55.000Z
2021-12-20T00:37:08.000Z
notebooks/minio_setup.ipynb
gvashishtha/seldon-core
425431b90a99576a6dfeb65927bca41e740fca0f
[ "Apache-2.0" ]
153
2020-02-03T10:48:40.000Z
2021-08-02T10:31:51.000Z
notebooks/minio_setup.ipynb
YikSanChan/seldon-core
1b94454c137e41ce6e38a320667d140352805cc7
[ "Apache-2.0" ]
7
2020-09-07T09:10:57.000Z
2021-11-25T02:59:02.000Z
19.811024
139
0.533386
[ [ [ "# Install MinIO in cluster", "_____no_output_____" ], [ "## Helm install minio", "_____no_output_____" ] ], [ [ "%%bash\nkubectl create ns minio-system\nhelm repo add minio https://helm.min.io/\nhelm install minio minio/minio \\\n --set accessKey=minioadmin \\\n --set secretKey=minioadmin \\\n --namespace minio-system", "_____no_output_____" ], [ "!kubectl rollout status deployment -n minio-system minio ", "_____no_output_____" ] ], [ [ "## port-forward Minio to localhost\n\nin separate terminal:\n\n```bash\nkubectl port-forward -n minio-system svc/minio 8090:9000\n```\n\nor follow instructions printed by helm", "_____no_output_____" ], [ "## Install MinIO CLI client tool", "_____no_output_____" ], [ "Install minio using `go get`:", "_____no_output_____" ] ], [ [ "%%bash\nGO111MODULE=on go get github.com/minio/mc", "_____no_output_____" ] ], [ [ "Or follow steps relevant to your platform from official [documentation](https://docs.min.io/docs/minio-client-quickstart-guide.html).", "_____no_output_____" ], [ "## Configure mc client to talk to your cluster", "_____no_output_____" ] ], [ [ "%%bash\nmc config host add minio-seldon http://localhost:8090 minioadmin minioadmin", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
ec5880adc89658c4fcfc0b06cc11b50f41dc7ee4
17,188
ipynb
Jupyter Notebook
11 Regression Algorithms/6. Decision Tree Regression/Python/decision_tree_regression.ipynb
IshmaelAsabere/Machine_Learning-Various-Topics
2c663ab73e2631522dac0fa1ec49042aa2088da4
[ "MIT" ]
389
2021-06-13T13:57:13.000Z
2022-03-30T07:49:47.000Z
11 Regression Algorithms/6. Decision Tree Regression/Python/decision_tree_regression.ipynb
IshmaelAsabere/Machine_Learning-Various-Topics
2c663ab73e2631522dac0fa1ec49042aa2088da4
[ "MIT" ]
23
2020-07-21T04:54:58.000Z
2022-03-08T23:30:06.000Z
11 Regression Algorithms/6. Decision Tree Regression/Python/decision_tree_regression.ipynb
IshmaelAsabere/Machine_Learning-Various-Topics
2c663ab73e2631522dac0fa1ec49042aa2088da4
[ "MIT" ]
109
2021-06-13T14:26:21.000Z
2022-03-29T11:55:27.000Z
80.694836
11,926
0.809111
[ [ [ "# Decision Tree Regression", "_____no_output_____" ], [ "## Importing the libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## Importing the dataset", "_____no_output_____" ] ], [ [ "dataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:-1].values\ny = dataset.iloc[:, -1].values", "_____no_output_____" ] ], [ [ "## Training the Decision Tree Regression model on the whole dataset", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state = 0)\nregressor.fit(X, y)", "_____no_output_____" ] ], [ [ "## Predicting a new result", "_____no_output_____" ] ], [ [ "regressor.predict([[6.5]])", "_____no_output_____" ] ], [ [ "## Visualising the Decision Tree Regression results (higher resolution)", "_____no_output_____" ] ], [ [ "X_grid = np.arange(min(X), max(X), 0.01)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color = 'red')\nplt.plot(X_grid, regressor.predict(X_grid), color = 'blue')\nplt.title('Truth or Bluff (Decision Tree Regression)')\nplt.xlabel('Position level')\nplt.ylabel('Salary')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec5889a0859fa8005ef35adab2ecaa1c9d211119
24,810
ipynb
Jupyter Notebook
notebooks/synthetic_data/Step3_train_text_model.ipynb
sreeshnair/octopod
c4d26c19735dff7c386338324a7ba1fd56ffbdab
[ "BSD-3-Clause" ]
27
2020-04-13T20:07:31.000Z
2020-06-11T09:08:32.000Z
notebooks/synthetic_data/Step3_train_text_model.ipynb
sreeshnair/octopod
c4d26c19735dff7c386338324a7ba1fd56ffbdab
[ "BSD-3-Clause" ]
24
2020-07-09T15:43:10.000Z
2022-03-08T18:24:25.000Z
notebooks/synthetic_data/Step3_train_text_model.ipynb
sreeshnair/octopod
c4d26c19735dff7c386338324a7ba1fd56ffbdab
[ "BSD-3-Clause" ]
9
2020-11-02T16:33:12.000Z
2022-03-05T00:21:40.000Z
28.648961
186
0.468763
[ [ [ "As the third step of this tutorial, we will train a text model. This step can be run in parallel with Step 2 (training the image model).\n\nThis notebook was run on an AWS p3.2xlarge", "_____no_output_____" ], [ "# Octopod Text Model Training Pipeline", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n\n%autoreload 2", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ], [ "import sys\nsys.path.append('../../')", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data import Dataset, DataLoader\nfrom transformers import AdamW, BertTokenizer, get_cosine_schedule_with_warmup", "_____no_output_____" ] ], [ [ "Note: for text, we use the MultiTaskLearner since we will only have one input, the text.", "_____no_output_____" ] ], [ [ "from octopod import MultiTaskLearner, MultiDatasetLoader\nfrom octopod.text.dataset import OctopodTextDataset\nfrom octopod.text.models.multi_task_bert import BertForMultiTaskClassification", "_____no_output_____" ] ], [ [ "For our Bert model, we need a tokenizer. We'll use the one from huggingface's `transformers` library.", "_____no_output_____" ] ], [ [ "bert_tok = BertTokenizer.from_pretrained(\n 'bert-base-uncased',\n do_lower_case=True\n)", "_____no_output_____" ] ], [ [ "## Load in train and validation datasets", "_____no_output_____" ], [ "First we load in the csv's we created in Step 1.\nRemember to change the path if you stored your data somewhere other than the default.", "_____no_output_____" ] ], [ [ "TRAIN_COLOR_DF = pd.read_csv('data/color_swatches/color_train.csv')", "_____no_output_____" ], [ "VALID_COLOR_DF = pd.read_csv('data/color_swatches/color_valid.csv')", "_____no_output_____" ], [ "TRAIN_PATTERN_DF = pd.read_csv('data/pattern_swatches/pattern_train.csv')", "_____no_output_____" ], [ "VALID_PATTERN_DF = pd.read_csv('data/pattern_swatches/pattern_valid.csv')", "_____no_output_____" ] ], [ [ "You will most likely have to alter this to however big your batches can be on your machine", "_____no_output_____" ] ], [ [ "batch_size = 16", "_____no_output_____" ] ], [ [ "We use the `OctopodTextDataSet` class to create train and valid datasets for each task.\n\nCheck out the documentation for infomation about the `tokenizer` and `max_seq_length` arguments.", "_____no_output_____" ] ], [ [ "max_seq_length = 128", "_____no_output_____" ], [ "color_train_dataset = OctopodTextDataset(\n x=TRAIN_COLOR_DF['complex_color'],\n y=TRAIN_COLOR_DF['simple_color_cat'],\n tokenizer=bert_tok,\n max_seq_length=max_seq_length\n)\ncolor_valid_dataset = OctopodTextDataset(\n x=VALID_COLOR_DF['complex_color'],\n y=VALID_COLOR_DF['simple_color_cat'],\n tokenizer=bert_tok,\n max_seq_length=max_seq_length\n)\n\npattern_train_dataset = OctopodTextDataset(\n x=TRAIN_PATTERN_DF['fake_text'],\n y=TRAIN_PATTERN_DF['pattern_type_cat'],\n tokenizer=bert_tok,\n max_seq_length=max_seq_length\n)\npattern_valid_dataset = OctopodTextDataset(\n x=VALID_PATTERN_DF['fake_text'],\n y=VALID_PATTERN_DF['pattern_type_cat'],\n tokenizer=bert_tok,\n max_seq_length=max_seq_length\n)", "_____no_output_____" ] ], [ [ "We then put the datasets into a dictionary of dataloaders.\n\nEach task is a key.", "_____no_output_____" ] ], [ [ "train_dataloaders_dict = {\n 'color': DataLoader(color_train_dataset, batch_size=batch_size, shuffle=True, num_workers=2),\n 'pattern': DataLoader(pattern_train_dataset, batch_size=batch_size, shuffle=True, num_workers=2),\n}\nvalid_dataloaders_dict = {\n 'color': DataLoader(color_valid_dataset, batch_size=batch_size, shuffle=False, num_workers=2),\n 'pattern': DataLoader(pattern_valid_dataset, batch_size=batch_size, shuffle=False, num_workers=2),\n}", "_____no_output_____" ] ], [ [ "The dictionary of dataloaders is then put into an instance of the Octopod `MultiDatasetLoader` class.", "_____no_output_____" ] ], [ [ "TrainLoader = MultiDatasetLoader(loader_dict=train_dataloaders_dict)\nlen(TrainLoader)", "_____no_output_____" ], [ "ValidLoader = MultiDatasetLoader(loader_dict=valid_dataloaders_dict, shuffle=False)\nlen(ValidLoader)", "_____no_output_____" ] ], [ [ "We need to create a dictionary of the tasks and the number of unique values so that we can create our model.", "_____no_output_____" ] ], [ [ "new_task_dict = {\n 'color': TRAIN_COLOR_DF['simple_color_cat'].nunique(),\n 'pattern': TRAIN_PATTERN_DF['pattern_type_cat'].nunique(),\n}", "_____no_output_____" ], [ "new_task_dict", "_____no_output_____" ], [ "device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nprint(device)", "cuda:0\n" ] ], [ [ "Create Model and Learner\n===", "_____no_output_____" ], [ "These are completely new tasks so we use `new_task_dict`. If we had already trained a model on some tasks, we would use `pretrained_task_dict`.\n\nWe are using the trained bert weights from the `transformers` library.", "_____no_output_____" ] ], [ [ "model = BertForMultiTaskClassification.from_pretrained(\n 'bert-base-uncased',\n new_task_dict=new_task_dict\n)", "_____no_output_____" ] ], [ [ "You will likely need to explore different values in this section to find some that work\nfor your particular model.", "_____no_output_____" ] ], [ [ "lr = 1e-5\nnum_total_steps = len(TrainLoader)\nnum_warmup_steps = int(len(TrainLoader) * 0.1)\n\noptimizer = AdamW(model.parameters(), lr=lr, correct_bias=True)\n\nscheduler = get_cosine_schedule_with_warmup(\n optimizer=optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_total_steps\n)", "_____no_output_____" ], [ "loss_function_dict = {'color': 'categorical_cross_entropy', 'pattern': 'categorical_cross_entropy'}\nmetric_function_dict = {'color': 'multi_class_acc', 'pattern': 'multi_class_acc'}", "_____no_output_____" ], [ "learn = MultiTaskLearner(model, TrainLoader, ValidLoader, new_task_dict, loss_function_dict, metric_function_dict)", "_____no_output_____" ] ], [ [ "Train Model\n===", "_____no_output_____" ], [ "As your model trains, you can see some output of how the model is performing overall and how it is doing on each individual task.", "_____no_output_____" ] ], [ [ "learn.fit(\n num_epochs=10,\n scheduler=scheduler,\n step_scheduler_on_batch=False,\n optimizer=optimizer,\n device=device,\n best_model=True\n)", "_____no_output_____" ] ], [ [ "Validate Model\n===", "_____no_output_____" ], [ "We provide a method on the learner called `get_val_preds`, which makes predictions on the validation data. You can then use this to analyze your model's performance in more detail.", "_____no_output_____" ] ], [ [ "pred_dict = learn.get_val_preds(device)", "_____no_output_____" ], [ "pred_dict", "_____no_output_____" ] ], [ [ "Save/Export Model\n===", "_____no_output_____" ], [ "Once we are happy with our training we can save (or export) our model, using the `save` method (or `export`).\n\nSee the docs for the difference between `save` and `export`.\n\nWe will need the saved model later to use in the ensemble model", "_____no_output_____" ] ], [ [ "model.save(folder='models/', model_id='TEXT_MODEL1')", "_____no_output_____" ], [ "model.export(folder='models/', model_id='TEXT_MODEL1')", "_____no_output_____" ] ], [ [ "Now that we have an image model and a text model, we can move to `Step4_train_ensemble_model`.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec58ae4509ffb43e2903180003ba47e1ad7f3179
200,459
ipynb
Jupyter Notebook
plots/.ipynb_checkpoints/contour_kycd-checkpoint.ipynb
leedtan/SparklesSunshinePuppies
ab208c627081ae2c2654a3620d2061629d5636f4
[ "MIT" ]
null
null
null
plots/.ipynb_checkpoints/contour_kycd-checkpoint.ipynb
leedtan/SparklesSunshinePuppies
ab208c627081ae2c2654a3620d2061629d5636f4
[ "MIT" ]
null
null
null
plots/.ipynb_checkpoints/contour_kycd-checkpoint.ipynb
leedtan/SparklesSunshinePuppies
ab208c627081ae2c2654a3620d2061629d5636f4
[ "MIT" ]
null
null
null
164.176085
81,020
0.84963
[ [ [ "import os\n# import folium\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport plotly.offline as py\npy.init_notebook_mode(connected=True)\nimport plotly.graph_objs as go\nimport plotly.tools as tls\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# print(folium.__version__)", "_____no_output_____" ], [ "dfloc = pd.read_csv('kycd_loc.csv', header=9)\ndfloc.head()", "_____no_output_____" ], [ "dfloc.columns = ['lat', 'lon', 'kycd', 'cnt']\ndfloc.head()", "_____no_output_____" ], [ "dfloc['lat'] = np.array([float(str(i)[1:]) for i in dfloc.lat.values])\ndfloc['lon'] = np.array([float(str(i)[:-1]) for i in dfloc.lon.values])\ndfloc.head()", "_____no_output_____" ], [ "print( sum(dfloc.isnull().values) )\nprint( dfloc.shape )\ndfloc.describe()", "[0 0 0 0]\n(557461, 4)\n" ], [ "# dfloc = dfloc[(dfloc.year <= 2015) & (dfloc.year >= 2006)]", "_____no_output_____" ], [ "dfloc[dfloc.kycd == 351].lat.values", "_____no_output_____" ], [ "f, axes = plt.subplots(3, 3, figsize=(10, 10), sharex=True, sharey=True)\n\ns = np.linspace(0, 3, 10)\ncmap = sns.cubehelix_palette(start=0.0, light=1, as_cmap=True)\n\n\n# 341: Petit Larceny\nx = dfloc[dfloc.kycd == 341].lon.values\ny = dfloc[dfloc.kycd == 341].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[0,0])\naxes[0,0].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Petit Larceny')\n\ncmap = sns.cubehelix_palette(start=0.333333333333, light=1, as_cmap=True)\n\n# 578: Harrassment 2\nx = dfloc[dfloc.kycd == 578].lon.values\ny = dfloc[dfloc.kycd == 578].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[0,1])\naxes[0,1].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Harrassment 2')\n\ncmap = sns.cubehelix_palette(start=0.666666666667, light=1, as_cmap=True)\n\n# 344: Assault 3 & Related Offenses\nx = dfloc[dfloc.kycd == 344].lon.values\ny = dfloc[dfloc.kycd == 344].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[0,2])\naxes[0,2].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Assault 3 & Related Offenses')\n\ncmap = sns.cubehelix_palette(start=1.0, light=1, as_cmap=True)\n\n# 351: Criminal Mischief\nx = dfloc[dfloc.kycd == 351].lon.values\ny = dfloc[dfloc.kycd == 351].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[1,0])\naxes[1,0].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Criminal Mischief')\n\ncmap = sns.cubehelix_palette(start=1.333333333333, light=1, as_cmap=True)\n\n# 109: Grand Larceny\nx = dfloc[dfloc.kycd == 109].lon.values\ny = dfloc[dfloc.kycd == 109].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[1,1])\naxes[1,1].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Grand Larceny')\n\ncmap = sns.cubehelix_palette(start=1.666666666667, light=1, as_cmap=True)\n\n# 235: Dangerous Drugs\nx = dfloc[dfloc.kycd == 235].lon.values\ny = dfloc[dfloc.kycd == 235].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[1,2])\naxes[1,2].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Dangerous Drugs')\n\ncmap = sns.cubehelix_palette(start=2.0, light=1, as_cmap=True)\n\n# 361: Offense Against Public Order Sensibility\nx = dfloc[dfloc.kycd == 361].lon.values\ny = dfloc[dfloc.kycd == 361].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[2,0])\naxes[2,0].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Offense Against Public Order Sensibility')\n\ncmap = sns.cubehelix_palette(start=2.333333333333, light=1, as_cmap=True)\n\n# 105: Robbery\nx = dfloc[dfloc.kycd == 105].lon.values\ny = dfloc[dfloc.kycd == 105].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[2,1])\naxes[2,1].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Robbery')\n\ncmap = sns.cubehelix_palette(start=2.666666666667, light=1, as_cmap=True)\n\n# 107: Burglary\nx = dfloc[dfloc.kycd == 107].lon.values\ny = dfloc[dfloc.kycd == 107].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[2,2])\naxes[2,2].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Burglary')", "_____no_output_____" ], [ "dfloc", "_____no_output_____" ], [ "\n\n\n\n\n\nf, axes = plt.subplots(3, 3, figsize=(10, 10), sharex=True, sharey=True)\n\ns = np.linspace(0, 3, 10)\ncmap = sns.cubehelix_palette(start=0.0, light=1, as_cmap=True)\n\n\n# 341: Petit Larceny\nx = dfloc[dfloc.kycd == 341].lon.values\ny = dfloc[dfloc.kycd == 341].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[0,0])\naxes[0,0].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Petit Larceny')\n\ncmap = sns.cubehelix_palette(start=0.333333333333, light=1, as_cmap=True)\n\n# 578: Harrassment 2\nx = dfloc[dfloc.kycd == 578].lon.values\ny = dfloc[dfloc.kycd == 578].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[0,1])\naxes[0,1].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Harrassment 2')\n\ncmap = sns.cubehelix_palette(start=0.666666666667, light=1, as_cmap=True)\n\n# 344: Assault 3 & Related Offenses\nx = dfloc[dfloc.kycd == 344].lon.values\ny = dfloc[dfloc.kycd == 344].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[0,2])\naxes[0,2].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Assault 3 & Related Offenses')\n\ncmap = sns.cubehelix_palette(start=1.0, light=1, as_cmap=True)\n\n# 351: Criminal Mischief\nx = dfloc[dfloc.kycd == 351].lon.values\ny = dfloc[dfloc.kycd == 351].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[1,0])\naxes[1,0].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Criminal Mischief')\n\ncmap = sns.cubehelix_palette(start=1.333333333333, light=1, as_cmap=True)\n\n# 109: Grand Larceny\nx = dfloc[dfloc.kycd == 109].lon.values\ny = dfloc[dfloc.kycd == 109].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[1,1])\naxes[1,1].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Grand Larceny')\n\ncmap = sns.cubehelix_palette(start=1.666666666667, light=1, as_cmap=True)\n\n# 235: Dangerous Drugs\nx = dfloc[dfloc.kycd == 235].lon.values\ny = dfloc[dfloc.kycd == 235].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[1,2])\naxes[1,2].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Dangerous Drugs')\n\ncmap = sns.cubehelix_palette(start=2.0, light=1, as_cmap=True)\n\n# 361: Offense Against Public Order Sensibility\nx = dfloc[dfloc.kycd == 361].lon.values\ny = dfloc[dfloc.kycd == 361].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[2,0])\naxes[2,0].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Offense Against Public Order Sensibility')\n\ncmap = sns.cubehelix_palette(start=2.333333333333, light=1, as_cmap=True)\n\n# 105: Robbery\nx = dfloc[dfloc.kycd == 105].lon.values\ny = dfloc[dfloc.kycd == 105].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[2,1])\naxes[2,1].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Robbery')\n\ncmap = sns.cubehelix_palette(start=2.666666666667, light=1, as_cmap=True)\n\n# 107: Burglary\nx = dfloc[dfloc.kycd == 107].lon.values\ny = dfloc[dfloc.kycd == 107].lat.values\nsns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[2,2])\naxes[2,2].set(ylim=(40.498061, 40.912723), xlim=(-74.255076, -73.700316), title = 'Burglary')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec58af06ceb927badc97fcdf18087f303473c85b
5,125
ipynb
Jupyter Notebook
1_2_Convolutional_Filters_Edge_Detection/7. Haar Cascade, Face Detection.ipynb
davidoort/CVND_Exercises
9989a0475983ad6b828bb9d0b0828b2e85d8d230
[ "MIT" ]
33
2019-11-08T19:36:38.000Z
2022-03-30T23:41:54.000Z
1_2_Convolutional_Filters_Edge_Detection/7. Haar Cascade, Face Detection.ipynb
davidoort/CVND_Exercises
9989a0475983ad6b828bb9d0b0828b2e85d8d230
[ "MIT" ]
5
2021-03-19T01:13:24.000Z
2022-03-11T23:49:57.000Z
1_2_Convolutional_Filters_Edge_Detection/7. Haar Cascade, Face Detection.ipynb
davidoort/CVND_Exercises
9989a0475983ad6b828bb9d0b0828b2e85d8d230
[ "MIT" ]
34
2019-12-25T05:15:28.000Z
2022-02-26T17:38:56.000Z
32.643312
401
0.618537
[ [ [ "## Face detection using OpenCV\n\nOne older (from around 2001), but still popular scheme for face detection is a Haar cascade classifier; these classifiers in the OpenCV library and use feature-based classification cascades that learn to isolate and detect faces in an image. You can read [the original paper proposing this approach here](https://www.cs.cmu.edu/~efros/courses/LBMV07/Papers/viola-cvpr-01.pdf).\n\nLet's see how face detection works on an exampe in this notebook.", "_____no_output_____" ] ], [ [ "# import required libraries for this section\n%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2 ", "_____no_output_____" ], [ "# load in color image for face detection\nimage = cv2.imread('images/multi_faces.jpg')\n\n# convert to RBG\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\nplt.figure(figsize=(20,10))\nplt.imshow(image)", "_____no_output_____" ] ], [ [ "To use a face detector, we'll first convert the image from color to grayscale. For face detection this is perfectly fine to do as there is plenty non-color specific structure in the human face for our detector to learn on.", "_____no_output_____" ] ], [ [ "# convert to grayscale\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) \n\nplt.figure(figsize=(20,10))\nplt.imshow(gray, cmap='gray')", "_____no_output_____" ] ], [ [ "Next we load in the fully trained architecture of the face detector, found in the file `detector_architectures/ haarcascade_frontalface_default.xml`,and use it on our image to find faces!\n\n**A note on parameters** \n\nHow many faces are detected is determined by the function, `detectMultiScale` which aims to detect faces of varying sizes. The inputs to this function are: `(image, scaleFactor, minNeighbors)`; you will often detect more faces with a smaller scaleFactor, and lower value for minNeighbors, but raising these values often produces better matches. Modify these values depending on your input image.", "_____no_output_____" ] ], [ [ "# load in cascade classifier\nface_cascade = cv2.CascadeClassifier('detector_architectures/haarcascade_frontalface_default.xml')\n\n# run the detector on the grayscale image\nfaces = face_cascade.detectMultiScale(gray, 4, 6)", "_____no_output_____" ] ], [ [ "The output of the classifier is an array of detections; coordinates that define the dimensions of a bounding box around each face. Note that this always outputs a bounding box that is square in dimension.", "_____no_output_____" ] ], [ [ "# print out the detections found\nprint ('We found ' + str(len(faces)) + ' faces in this image')\nprint (\"Their coordinates and lengths/widths are as follows\")\nprint ('=============================')\nprint (faces)", "_____no_output_____" ] ], [ [ "Let's plot the corresponding detection boxes on our original image to see how well we've done. ", "_____no_output_____" ] ], [ [ "img_with_detections = np.copy(image) # make a copy of the original image to plot rectangle detections ontop of\n\n# loop over our detections and draw their corresponding boxes on top of our original image\nfor (x,y,w,h) in faces:\n # draw next detection as a red rectangle on top of the original image. \n # Note: the fourth element (255,0,0) determines the color of the rectangle, \n # and the final argument (here set to 5) determines the width of the drawn rectangle\n cv2.rectangle(img_with_detections,(x,y),(x+w,y+h),(255,0,0),5) \n\n# display the result\nplt.figure(figsize=(20,10))\nplt.imshow(img_with_detections)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec58bb55c82484b6e3fcd1c0b888c655b7a32911
12,349
ipynb
Jupyter Notebook
examples/api.ipynb
BaptisteCecconi/python-webgeocalc
be3ed33ae7d509766d90fc61bc0dd557ad635e87
[ "MIT" ]
6
2019-01-30T00:21:11.000Z
2021-11-26T15:18:00.000Z
examples/api.ipynb
BaptisteCecconi/python-webgeocalc
be3ed33ae7d509766d90fc61bc0dd557ad635e87
[ "MIT" ]
8
2019-09-19T14:09:22.000Z
2022-02-16T13:10:48.000Z
examples/api.ipynb
BaptisteCecconi/python-webgeocalc
be3ed33ae7d509766d90fc61bc0dd557ad635e87
[ "MIT" ]
5
2019-10-18T07:47:11.000Z
2022-02-04T21:36:00.000Z
20.930508
166
0.501903
[ [ [ "Python package for NAIF WebGeoCalc API\n======================================\n\nIn december 2018, [JPL/NAIF](https://naif.jpl.nasa.gov/naif/webgeocalc.html)\nannounced an **experimental**\n[API RESTful interface](https://naif.jpl.nasa.gov/naif/WebGeocalc_announcement.pdf)\nfor their new \n[WebGeocalc server](https://wgc2.jpl.nasa.gov:8443/webgeocalc)\n(which make online SPICE calculations).\n[Documentation](https://wgc2.jpl.nasa.gov:8443/webgeocalc/documents/api-info.html)\nand [JavaScript examples](https://wgc2.jpl.nasa.gov:8443/webgeocalc/example/perform-calculation.html)\nare already available.\n\nThis package is an **early attempt** to provide a Python interface to\nmake SPICE calculation through this API.\n\n\nDisclaimer\n----------\nThis project is not supported or endorsed by either JPL, NAIF or NASA.\nThe code is provided *\"as is\"*, use at your own risk.", "_____no_output_____" ], [ "# Use WebGeoCalc API", "_____no_output_____" ] ], [ [ "from webgeocalc import API\n\nAPI.url", "_____no_output_____" ] ], [ [ "The default API endpoint can be defined with the `WGC_URL` global environment variable.\nIf it is not present, the `API` will fallback to the [JPL endpoint](https://wgc2.jpl.nasa.gov:8443/webgeocalc/api/).\n\nYou can also use the ESA webgeocalc server:", "_____no_output_____" ] ], [ [ "from webgeocalc import ESA_API\n\nESA_API.url", "_____no_output_____" ] ], [ [ "## Get kernel sets", "_____no_output_____" ] ], [ [ "kernel_sets = API.kernel_sets() # /kernel-sets\nkernel_sets", "_____no_output_____" ] ], [ [ "### Kernel set object", "_____no_output_____" ] ], [ [ "kernel_set = kernel_sets[0]\nint(kernel_set) # kernelSetId", "_____no_output_____" ], [ "str(kernel_set) # Caption", "_____no_output_____" ], [ "kernel_set.description # Get kernel attribute", "_____no_output_____" ], [ "kernel_set.keys()", "_____no_output_____" ], [ "kernel_set.values()", "_____no_output_____" ], [ "dict(kernel_set.items())", "_____no_output_____" ] ], [ [ "### Get a kernel set by it's `id` or `caption name`:", "_____no_output_____" ] ], [ [ "# By ID\nAPI.kernel_set(1)", "_____no_output_____" ], [ "# By full caption name\nAPI.kernel_set('Solar System Kernels')", "_____no_output_____" ], [ "# Not case sensitive\nAPI.kernel_set('solar system kernels')", "_____no_output_____" ], [ "# Search by partial name\nAPI.kernel_set('Solar')", "_____no_output_____" ] ], [ [ "#### Handling errors:", "_____no_output_____" ] ], [ [ "from webgeocalc.errors import TooManyKernelSets, KernelSetNotFound\n\n# More than one kernel found\ntry:\n API.kernel_set('Cassini')\nexcept TooManyKernelSets as err:\n print(err)", "Too many kernel sets contains 'Cassini' in their names:\n - Cassini Huygens\n - SPICE Class -- CASSINI Remote Sensing Lesson Kernels\n" ], [ "# Kernel not found\ntry:\n API.kernel_set('Missing kernel')\nexcept KernelSetNotFound as err:\n print(err)", "Kernel set 'Missing kernel' not found\n" ] ], [ [ "## Get bodies", "_____no_output_____" ] ], [ [ "bodies = API.bodies(5) # /kernel-set/{kernelSetId}/bodies\n# or\nAPI.bodies('Cassini Huygens')", "_____no_output_____" ], [ "body = bodies[0]\nprint(f\"Body `id`: {int(body)} and `name`: {str(body)}\")", "Body `id`: -82 and `name`: CASSINI\n" ] ], [ [ "## Get frames", "_____no_output_____" ] ], [ [ "frames = API.frames(5) # /kernel-set/{kernelSetId}/frames\n# or\nAPI.frames('Cassini Huygens')", "_____no_output_____" ], [ "frames[58].items()", "_____no_output_____" ] ], [ [ "## Get Instruments", "_____no_output_____" ] ], [ [ "instruments = API.instruments(5) # /kernel-set/{kernelSetId}/intruments\n# or\nAPI.instruments('Cassini Huygens')", "_____no_output_____" ], [ "print(f\"Body `id`: {int(instruments[0])} and `name`: {str(instruments[0])}\")", "Body `id`: -82898 and `name`: CASSINI_CIRS_RAD\n" ] ], [ [ "---\n**Next:** [Make a calculation](calculation.ipynb)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec58c4fa31f1b91ed38e669df071eb7b5e8b35e4
891,787
ipynb
Jupyter Notebook
examples/xarray_seasonal_means.ipynb
yutiansut/xarray
78665873990de4189258223e359bec81c60f4f2d
[ "Apache-2.0" ]
51
2019-02-01T19:43:37.000Z
2022-03-16T09:07:03.000Z
examples/xarray_seasonal_means.ipynb
yutiansut/xarray
78665873990de4189258223e359bec81c60f4f2d
[ "Apache-2.0" ]
3
2016-10-26T22:52:19.000Z
2019-05-02T23:40:52.000Z
examples/xarray_seasonal_means.ipynb
yutiansut/xarray
78665873990de4189258223e359bec81c60f4f2d
[ "Apache-2.0" ]
35
2019-02-08T02:00:31.000Z
2022-03-01T23:17:00.000Z
2,083.614486
876,344
0.958712
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Calculating-Seasonal-Averages-from-Timeseries-of-Monthly-Means-\" data-toc-modified-id=\"Calculating-Seasonal-Averages-from-Timeseries-of-Monthly-Means--1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Calculating Seasonal Averages from Timeseries of Monthly Means </a></span><ul class=\"toc-item\"><li><ul class=\"toc-item\"><li><ul class=\"toc-item\"><li><span><a href=\"#Some-calendar-information-so-we-can-support-any-netCDF-calendar.\" data-toc-modified-id=\"Some-calendar-information-so-we-can-support-any-netCDF-calendar.-1.0.0.1\"><span class=\"toc-item-num\">1.0.0.1&nbsp;&nbsp;</span>Some calendar information so we can support any netCDF calendar.</a></span></li><li><span><a href=\"#A-few-calendar-functions-to-determine-the-number-of-days-in-each-month\" data-toc-modified-id=\"A-few-calendar-functions-to-determine-the-number-of-days-in-each-month-1.0.0.2\"><span class=\"toc-item-num\">1.0.0.2&nbsp;&nbsp;</span>A few calendar functions to determine the number of days in each month</a></span></li><li><span><a href=\"#Open-the-Dataset\" data-toc-modified-id=\"Open-the-Dataset-1.0.0.3\"><span class=\"toc-item-num\">1.0.0.3&nbsp;&nbsp;</span>Open the <code>Dataset</code></a></span></li><li><span><a href=\"#Now-for-the-heavy-lifting:\" data-toc-modified-id=\"Now-for-the-heavy-lifting:-1.0.0.4\"><span class=\"toc-item-num\">1.0.0.4&nbsp;&nbsp;</span>Now for the heavy lifting:</a></span></li></ul></li></ul></li></ul></li></ul></div>", "_____no_output_____" ], [ "Calculating Seasonal Averages from Timeseries of Monthly Means \n=====\n\nAuthor: [Joe Hamman](https://github.com/jhamman/)\n\nThe data used for this example can be found in the [xray-data](https://github.com/xray/xray-data) repository. You may need to change the path to `rasm.nc` below.\n\nSuppose we have a netCDF or xray Dataset of monthly mean data and we want to calculate the seasonal average. To do this properly, we need to calculate the weighted average considering that each month has a different number of days.\n\nSuppose we have a netCDF or `xarray.Dataset` of monthly mean data and we want to calculate the seasonal average. To do this properly, we need to calculate the weighted average considering that each month has a different number of days.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nfrom netCDF4 import num2date\nimport matplotlib.pyplot as plt \n\nprint(\"numpy version : \", np.__version__)\nprint(\"pandas version : \", pd.__version__)\nprint(\"xarray version : \", xr.__version__)", "numpy version : 1.14.3\npandas version : 0.23.4\nxarray version : 0.11.0+10.gc01767ce\n" ] ], [ [ "#### Some calendar information so we can support any netCDF calendar. ", "_____no_output_____" ] ], [ [ "dpm = {'noleap': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n '365_day': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n 'standard': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n 'gregorian': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n 'proleptic_gregorian': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n 'all_leap': [0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n '366_day': [0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n '360_day': [0, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30]} ", "_____no_output_____" ] ], [ [ "#### A few calendar functions to determine the number of days in each month\nIf you were just using the standard calendar, it would be easy to use the `calendar.month_range` function.", "_____no_output_____" ] ], [ [ "def leap_year(year, calendar='standard'):\n \"\"\"Determine if year is a leap year\"\"\"\n leap = False\n if ((calendar in ['standard', 'gregorian',\n 'proleptic_gregorian', 'julian']) and\n (year % 4 == 0)):\n leap = True\n if ((calendar == 'proleptic_gregorian') and\n (year % 100 == 0) and\n (year % 400 != 0)):\n leap = False\n elif ((calendar in ['standard', 'gregorian']) and\n (year % 100 == 0) and (year % 400 != 0) and\n (year < 1583)):\n leap = False\n return leap\n\ndef get_dpm(time, calendar='standard'):\n \"\"\"\n return a array of days per month corresponding to the months provided in `months`\n \"\"\"\n month_length = np.zeros(len(time), dtype=np.int)\n \n cal_days = dpm[calendar]\n \n for i, (month, year) in enumerate(zip(time.month, time.year)):\n month_length[i] = cal_days[month]\n if leap_year(year, calendar=calendar):\n month_length[i] += 1\n return month_length", "_____no_output_____" ] ], [ [ "#### Open the `Dataset`", "_____no_output_____" ] ], [ [ "ds = xr.tutorial.open_dataset('rasm').load()\nprint(ds)", "<xarray.Dataset>\nDimensions: (time: 36, x: 275, y: 205)\nCoordinates:\n * time (time) object 1980-09-16 12:00:00 ... 1983-08-17 00:00:00\n xc (y, x) float64 189.2 189.4 189.6 189.7 ... 17.65 17.4 17.15 16.91\n yc (y, x) float64 16.53 16.78 17.02 17.27 ... 28.26 28.01 27.76 27.51\nDimensions without coordinates: x, y\nData variables:\n Tair (time, y, x) float64 nan nan nan nan nan ... 29.8 28.66 28.19 28.21\nAttributes:\n title: /workspace/jhamman/processed/R1002RBRxaaa01a/l...\n institution: U.W.\n source: RACM R1002RBRxaaa01a\n output_frequency: daily\n output_mode: averaged\n convention: CF-1.4\n references: Based on the initial model of Liang et al., 19...\n comment: Output from the Variable Infiltration Capacity...\n nco_openmp_thread_number: 1\n NCO: \"4.6.0\"\n history: Tue Dec 27 14:15:22 2016: ncatted -a dimension...\n" ] ], [ [ "#### Now for the heavy lifting:\nWe first have to come up with the weights,\n- calculate the month lengths for each monthly data record\n- calculate weights using `groupby('time.season')`\n\nFinally, we just need to multiply our weights by the `Dataset` and sum allong the time dimension. ", "_____no_output_____" ] ], [ [ "# Make a DataArray with the number of days in each month, size = len(time)\nmonth_length = xr.DataArray(get_dpm(ds.time.to_index(), calendar='noleap'),\n coords=[ds.time], name='month_length')\n\n# Calculate the weights by grouping by 'time.season'.\n# Conversion to float type ('astype(float)') only necessary for Python 2.x\nweights = month_length.groupby('time.season') / month_length.astype(float).groupby('time.season').sum()\n\n# Test that the sum of the weights for each season is 1.0\nnp.testing.assert_allclose(weights.groupby('time.season').sum().values, np.ones(4))\n\n# Calculate the weighted average\nds_weighted = (ds * weights).groupby('time.season').sum(dim='time')", "_____no_output_____" ], [ "print(ds_weighted)", "<xarray.Dataset>\nDimensions: (season: 4, x: 275, y: 205)\nCoordinates:\n xc (y, x) float64 189.2 189.4 189.6 189.7 ... 17.65 17.4 17.15 16.91\n yc (y, x) float64 16.53 16.78 17.02 17.27 ... 28.26 28.01 27.76 27.51\n * season (season) object 'DJF' 'JJA' 'MAM' 'SON'\nDimensions without coordinates: x, y\nData variables:\n Tair (season, y, x) float64 0.0 0.0 0.0 0.0 ... 23.15 22.08 21.73 21.96\n" ], [ "# only used for comparisons\nds_unweighted = ds.groupby('time.season').mean('time')\nds_diff = ds_weighted - ds_unweighted", "/home/deepak/work/python/xarray/xarray/core/nanops.py:161: RuntimeWarning: Mean of empty slice\n return np.nanmean(a, axis=axis, dtype=dtype)\n" ], [ "# Quick plot to show the results\nnotnull = pd.notnull(ds_unweighted['Tair'][0])\n\nfig, axes = plt.subplots(nrows=4, ncols=3, figsize=(14,12))\nfor i, season in enumerate(('DJF', 'MAM', 'JJA', 'SON')):\n ds_weighted['Tair'].sel(season=season).where(notnull).plot.pcolormesh(\n ax=axes[i, 0], vmin=-30, vmax=30, cmap='Spectral_r', \n add_colorbar=True, extend='both')\n \n ds_unweighted['Tair'].sel(season=season).where(notnull).plot.pcolormesh(\n ax=axes[i, 1], vmin=-30, vmax=30, cmap='Spectral_r', \n add_colorbar=True, extend='both')\n\n ds_diff['Tair'].sel(season=season).where(notnull).plot.pcolormesh(\n ax=axes[i, 2], vmin=-0.1, vmax=.1, cmap='RdBu_r',\n add_colorbar=True, extend='both')\n\n axes[i, 0].set_ylabel(season)\n axes[i, 1].set_ylabel('')\n axes[i, 2].set_ylabel('')\n\nfor ax in axes.flat:\n ax.axes.get_xaxis().set_ticklabels([])\n ax.axes.get_yaxis().set_ticklabels([])\n ax.axes.axis('tight')\n ax.set_xlabel('')\n \naxes[0, 0].set_title('Weighted by DPM')\naxes[0, 1].set_title('Equal Weighting')\naxes[0, 2].set_title('Difference')\n \nplt.tight_layout()\n\nfig.suptitle('Seasonal Surface Air Temperature', fontsize=16, y=1.02)", "_____no_output_____" ], [ "# Wrap it into a simple function\ndef season_mean(ds, calendar='standard'):\n # Make a DataArray of season/year groups\n year_season = xr.DataArray(ds.time.to_index().to_period(freq='Q-NOV').to_timestamp(how='E'),\n coords=[ds.time], name='year_season')\n\n # Make a DataArray with the number of days in each month, size = len(time)\n month_length = xr.DataArray(get_dpm(ds.time.to_index(), calendar=calendar),\n coords=[ds.time], name='month_length')\n # Calculate the weights by grouping by 'time.season'\n weights = month_length.groupby('time.season') / month_length.groupby('time.season').sum()\n\n # Test that the sum of the weights for each season is 1.0\n np.testing.assert_allclose(weights.groupby('time.season').sum().values, np.ones(4))\n\n # Calculate the weighted average\n return (ds * weights).groupby('time.season').sum(dim='time')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ec58d0ab562c010afaa9ee24da72cca20b65a716
12,411
ipynb
Jupyter Notebook
notebooks/evaluate_reconstruction_brain.ipynb
NikEfth/STIR-exercises
48c18a3e8c1bfa5eaa4e04d744967f2e31cafc96
[ "Apache-2.0" ]
13
2017-10-04T18:06:04.000Z
2022-03-05T07:45:14.000Z
notebooks/evaluate_reconstruction_brain.ipynb
356255531/STIR_examples
72ae861f33daa6f47a928e861e674804730b1caf
[ "Apache-2.0" ]
13
2018-10-26T14:17:42.000Z
2021-04-29T16:24:27.000Z
notebooks/evaluate_reconstruction_brain.ipynb
356255531/STIR_examples
72ae861f33daa6f47a928e861e674804730b1caf
[ "Apache-2.0" ]
9
2017-10-24T13:08:15.000Z
2021-11-20T06:50:27.000Z
23.198131
106
0.541697
[ [ [ "# -*- coding: utf-8 -*-\n\"\"\"\nExample script to serve as starting point for displaying the results of the brain reconstruction.\n\nThe current script reads results from the simulation and displays them.\n\nPrerequisite:\nYou should have executed the following on your command prompt\n ./run_simulation_brain.sh\n ./run_reconstruction_brain.sh\n\nAuthor: Kris Thielemans\n\"\"\"", "_____no_output_____" ], [ "%matplotlib notebook", "_____no_output_____" ] ], [ [ "# Initial imports", "_____no_output_____" ] ], [ [ "import numpy\nimport matplotlib.pyplot as plt\nimport stir\nfrom stirextra import *\nimport os", "_____no_output_____" ] ], [ [ "# go to directory with input files", "_____no_output_____" ] ], [ [ "# adapt this path to your situation (or start everything in the exercises directory)\nos.chdir(os.getenv('STIR_exercises_PATH'))", "_____no_output_____" ] ], [ [ "# change directory to where the output files are", "_____no_output_____" ] ], [ [ "os.chdir('working_folder/brain')", "_____no_output_____" ] ], [ [ "# Read in images", "_____no_output_____" ] ], [ [ "FBP=to_numpy(stir.FloatVoxelsOnCartesianGrid.read_from_file('fbp_recon.hv'));\nEMML240=to_numpy(stir.FloatVoxelsOnCartesianGrid.read_from_file('EMML_240.hv'));\nOSEM240=to_numpy(stir.FloatVoxelsOnCartesianGrid.read_from_file('OSEM_240.hv'));\n#OSEMPSF240=to_numpy(stir.FloatVoxelsOnCartesianGrid.read_from_file('OSEMPSF_240.hv'));\n\nfilteredEMML240=to_numpy(stir.FloatVoxelsOnCartesianGrid.read_from_file('filtered_EMML_240.hv'));\nfilteredOSEM240=to_numpy(stir.FloatVoxelsOnCartesianGrid.read_from_file('filtered_OSEM_240.hv'));", "_____no_output_____" ] ], [ [ "# find max and slice number for plots", "_____no_output_____" ] ], [ [ "maxforplot=EMML240.max();\n# pick central slice\nslice=numpy.int(EMML240.shape[0]/2);", "_____no_output_____" ] ], [ [ "# bitmap display of images FBP vs EMML", "_____no_output_____" ] ], [ [ "fig=plt.figure();\nax=plt.subplot(1,2,1);\nplt.imshow(EMML240[slice,:,:,]);\nplt.clim(0,maxforplot)\nplt.colorbar();\nplt.axis('off');\nax.set_title('EMML240');\n\nax=plt.subplot(1,2,2);\nplt.imshow(FBP[slice,:,:,]);\nplt.clim(0,maxforplot)\nplt.colorbar();\nplt.axis('off');\nax.set_title('FBP');\n\nfig.savefig('EMML_vs_FBP.png')", "_____no_output_____" ] ], [ [ "# bitmap display of images EMML vs OSEM", "_____no_output_____" ] ], [ [ "fig=plt.figure();\nax=plt.subplot(1,3,1);\nplt.imshow(EMML240[slice,:,:,]);\nplt.clim(0,maxforplot)\nplt.colorbar();\nplt.axis('off');\nax.set_title('EMML240');\n\nax=plt.subplot(1,3,2);\nplt.imshow(OSEM240[slice,:,:,]);\nplt.clim(0,maxforplot)\nplt.colorbar();\nplt.axis('off');\nax.set_title('OSEM240');\n\ndiff=EMML240-OSEM240;\nax=plt.subplot(1,3,3);\nplt.imshow(diff[slice,:,:,]);\nplt.clim(-maxforplot/50,maxforplot/50)\nplt.colorbar();\nplt.axis('off');\nax.set_title('diff');\n\nfig.savefig('EMML_vs_OSEM_bitmaps.png')", "_____no_output_____" ] ], [ [ "# Display central horizontal profiles through the image", "_____no_output_____" ] ], [ [ "# pick central line\nrow=numpy.int(OSEM240.shape[1]/2);\n\nfig=plt.figure()\nplt.plot(EMML240[slice,row,:],'b');\n#plt.hold(True)\nplt.plot(OSEM240[slice,row,:],'c');\nplt.legend(('EMML240','OSEM240'));\n\nfig.savefig('EMM_vs_OSEM_profiles.png')", "_____no_output_____" ] ], [ [ "# bitmap display of images EMML vs OSEM after postfiltering", "_____no_output_____" ] ], [ [ "# note: check postfilter_Gaussian.par for parameters used for the postfilter\nmaxforplot=filteredEMML240.max();\n# pick central slice\nslice=numpy.int(EMML240.shape[0]/2);\n\nfig=plt.figure();\nax=plt.subplot(1,3,1);\nplt.imshow(filteredEMML240[slice,:,:,]);\nplt.clim(0,maxforplot)\nplt.colorbar();\nplt.axis('off');\nax.set_title('EMML240\\nfiltered');\n\nax=plt.subplot(1,3,2);\nplt.imshow(filteredOSEM240[slice,:,:,]);\nplt.clim(0,maxforplot)\nplt.colorbar();\nplt.axis('off');\nax.set_title('OSEM240\\nfiltered');\n\ndiff=filteredEMML240-filteredOSEM240;\nax=plt.subplot(1,3,3);\nplt.imshow(diff[slice,:,:,]);\nplt.clim(-maxforplot/50,maxforplot/50)\nplt.colorbar();\nplt.axis('off');\nax.set_title('diff');\n\nfig.savefig('EMML_vs_OSEM_postfiltered_bitmaps.png')", "_____no_output_____" ] ], [ [ "# example code for seeing evaluation over (sub)iterations with EMML and OSEM", "_____no_output_____" ] ], [ [ "# The reconstruction script runs EMML and OSEM for 240 (sub)iterations, saving \n# after every 24 (sub)iterations, i.e. image-updates.\n# We can see what the difference is between after one image-update, or for OSEM\n# after when full iteration (using all 8 subsets)\n#\n# First read in extra images\nOSEM241=to_numpy(stir.FloatVoxelsOnCartesianGrid.read_from_file('OSEM_240_continued_1.hv'));\nOSEM242=to_numpy(stir.FloatVoxelsOnCartesianGrid.read_from_file('OSEM_240_continued_2.hv'));\nOSEM248=to_numpy(stir.FloatVoxelsOnCartesianGrid.read_from_file('OSEM_240_continued_8.hv'));\nEMML241=to_numpy(stir.FloatVoxelsOnCartesianGrid.read_from_file('EMML_240_continued_1.hv'));\nEMML242=to_numpy(stir.FloatVoxelsOnCartesianGrid.read_from_file('EMML_240_continued_2.hv'));\nEMML248=to_numpy(stir.FloatVoxelsOnCartesianGrid.read_from_file('EMML_240_continued_8.hv'));", "_____no_output_____" ] ], [ [ "# bitmaps showing images and differences", "_____no_output_____" ] ], [ [ "maxforplot=EMML240.max();\n\n# pick central slice\nslice=numpy.int(EMML240.shape[0]/2);\n\nfig=plt.figure();\nax=plt.subplot(1,3,1);\nplt.imshow(OSEM240[slice,:,:,]);\nplt.clim(0,maxforplot)\nplt.colorbar();\nplt.axis('off');\nax.set_title('OSEM240');\n\nax=plt.subplot(1,3,2);\nplt.imshow(OSEM241[slice,:,:,]);\nplt.clim(0,maxforplot)\nplt.colorbar();\nplt.axis('off');\nax.set_title('OSEM241');\n\ndiff=OSEM241-OSEM240;\nax=plt.subplot(1,3,3);\nplt.imshow(diff[slice,:,:,]);\nplt.clim(-maxforplot/50,maxforplot/50)\nplt.colorbar();\nplt.axis('off');\nax.set_title('diff');\n\nfig.savefig('EMML_vs_OSEM_update_bitmaps.png')", "_____no_output_____" ] ], [ [ "# Display central horizontal profiles through the image for EMML", "_____no_output_____" ] ], [ [ "# pick central line\nrow=numpy.int(EMML240.shape[1]/2);\n\nfig=plt.figure()\nplt.subplot(1,2,1)\n#plt.hold(True)\nplt.plot(EMML241[slice,row,:],'b');\nplt.plot(EMML240[slice,row,:],'c');\nplt.legend(('EMML241','EMML240'));\n\nplt.subplot(1,2,2)\n#plt.hold(True);\nplt.plot((EMML241-EMML240)[slice,row,:],'b');\nplt.plot((EMML242-EMML241)[slice,row,:],'k');\nplt.plot((EMML248-EMML240)[slice,row,:],'r');\nplt.legend(('iter 241 - iter 240', 'iter 242 - iter 241', 'iter 248 - iter 240'));", "_____no_output_____" ] ], [ [ "# Display central horizontal profiles through the image for OSEM", "_____no_output_____" ] ], [ [ "fig=plt.figure()\nplt.subplot(1,2,1)\n#plt.hold(True)\nplt.plot(OSEM241[slice,row,:],'b');\nplt.plot(OSEM240[slice,row,:],'c');\nplt.legend(('OSEM241','OSEM240'));\n\nplt.subplot(1,2,2)\n#plt.hold(True);\nplt.plot((OSEM241-OSEM240)[slice,row,:],'b');\nplt.plot((OSEM242-OSEM241)[slice,row,:],'k');\nplt.plot((OSEM248-OSEM240)[slice,row,:],'r');\nplt.legend(('subiter 241 - subiter 240', \n 'subiter 242 - subiter 241', 'subiter 248 - subiter 240'));", "_____no_output_____" ] ], [ [ "# close all plots", "_____no_output_____" ] ], [ [ "plt.close('all')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec58db6653fe01d6310945f2658a68e9ac0d0594
43,750
ipynb
Jupyter Notebook
Projects/6-Sentiment-Analysis/project_6_starter.ipynb
scumabo/AI4Trading
9a36e18fc25e849b80718c3a462637b086089945
[ "Apache-2.0" ]
98
2020-05-22T00:41:23.000Z
2022-03-24T12:57:15.000Z
Projects/6-Sentiment-Analysis/project_6_starter.ipynb
kevingoh/AI-for-Trading
9d8e85c0753e41fec6b55b5803cdfd34668d8f71
[ "Apache-2.0" ]
1
2020-01-04T05:32:35.000Z
2020-01-04T18:22:21.000Z
Projects/6-Sentiment-Analysis/project_6_starter.ipynb
kevingoh/AI-for-Trading
9d8e85c0753e41fec6b55b5803cdfd34668d8f71
[ "Apache-2.0" ]
74
2020-05-05T16:44:42.000Z
2022-03-23T06:59:09.000Z
35.743464
1,481
0.554034
[ [ [ "# Project 6: Analyzing Stock Sentiment from Twits\n## Instructions\nEach problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment.\n\n## Packages\nWhen you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code.\n\n### Load Packages", "_____no_output_____" ] ], [ [ "import json\nimport nltk\nimport os\nimport random\nimport re\nimport torch\nfrom tqdm import tqdm\n\nfrom torch import nn, optim\nimport torch.nn.functional as F", "_____no_output_____" ] ], [ [ "## Introduction\nWhen deciding the value of a company, it's important to follow the news. For example, a product recall or natural disaster in a company's product chain. You want to be able to turn this information into a signal. Currently, the best tool for the job is a Neural Network. \n\nFor this project, you'll use posts from the social media site [StockTwits](https://en.wikipedia.org/wiki/StockTwits). The community on StockTwits is full of investors, traders, and entrepreneurs. Each message posted is called a Twit. This is similar to Twitter's version of a post, called a Tweet. You'll build a model around these twits that generate a sentiment score.\n\nWe've collected a bunch of twits, then hand labeled the sentiment of each. To capture the degree of sentiment, we'll use a five-point scale: very negative, negative, neutral, positive, very positive. Each twit is labeled -2 to 2 in steps of 1, from very negative to very positive respectively. You'll build a sentiment analysis model that will learn to assign sentiment to twits on its own, using this labeled data.\n\nThe first thing we should to do, is load the data.\n\n## Import Twits \n### Load Twits Data \nThis JSON file contains a list of objects for each twit in the `'data'` field:\n\n```\n{'data':\n {'message_body': 'Neutral twit body text here',\n 'sentiment': 0},\n {'message_body': 'Happy twit body text here',\n 'sentiment': 1},\n ...\n}\n```\n\nThe fields represent the following:\n\n* `'message_body'`: The text of the twit.\n* `'sentiment'`: Sentiment score for the twit, ranges from -2 to 2 in steps of 1, with 0 being neutral.\n\n\nTo see what the data look like by printing the first 10 twits from the list. ", "_____no_output_____" ] ], [ [ "with open(os.path.join('..', '..', 'data', 'project_6_stocktwits', 'twits.json'), 'r') as f:\n twits = json.load(f)\n\nprint(twits['data'][:10])", "[{'message_body': '$FITB great buy at 26.00...ill wait', 'sentiment': 2, 'timestamp': '2018-07-01T00:00:09Z'}, {'message_body': '@StockTwits $MSFT', 'sentiment': 1, 'timestamp': '2018-07-01T00:00:42Z'}, {'message_body': '#STAAnalystAlert for $TDG : Jefferies Maintains with a rating of Hold setting target price at USD 350.00. Our own verdict is Buy http://www.stocktargetadvisor.com/toprating', 'sentiment': 2, 'timestamp': '2018-07-01T00:01:24Z'}, {'message_body': '$AMD I heard there’s a guy who knows someone who thinks somebody knows something - on StockTwits.', 'sentiment': 1, 'timestamp': '2018-07-01T00:01:47Z'}, {'message_body': '$AMD reveal yourself!', 'sentiment': 0, 'timestamp': '2018-07-01T00:02:13Z'}, {'message_body': '$AAPL Why the drop? I warren Buffet taking out his position?', 'sentiment': 1, 'timestamp': '2018-07-01T00:03:10Z'}, {'message_body': '$BA bears have 1 reason on 06-29 to pay more attention https://dividendbot.com?s=BA', 'sentiment': -2, 'timestamp': '2018-07-01T00:04:09Z'}, {'message_body': '$BAC ok good we&#39;re not dropping in price over the weekend, lol', 'sentiment': 1, 'timestamp': '2018-07-01T00:04:17Z'}, {'message_body': '$AMAT - Daily Chart, we need to get back to above 50.', 'sentiment': 2, 'timestamp': '2018-07-01T00:08:01Z'}, {'message_body': '$GME 3% drop per week after spike... if no news in 3 months, back to 12s... if BO, then bingo... what is the odds?', 'sentiment': -2, 'timestamp': '2018-07-01T00:09:03Z'}]\n" ] ], [ [ "### Length of Data\nNow let's look at the number of twits in dataset. Print the number of twits below.", "_____no_output_____" ] ], [ [ "\"\"\"print out the number of twits\"\"\"\n\n# TODO Implement \nprint(len(twits['data']))", "1548010\n" ] ], [ [ "### Split Message Body and Sentiment Score", "_____no_output_____" ] ], [ [ "messages = [twit['message_body'] for twit in twits['data']]\n# Since the sentiment scores are discrete, we'll scale the sentiments to 0 to 4 for use in our network\nsentiments = [twit['sentiment'] + 2 for twit in twits['data']]", "_____no_output_____" ] ], [ [ "## Preprocessing the Data\nWith our data in hand we need to preprocess our text. These twits are collected by filtering on ticker symbols where these are denoted with a leader $ symbol in the twit itself. For example,\n\n`{'message_body': 'RT @google Our annual look at the year in Google blogging (and beyond) http://t.co/sptHOAh8 $GOOG',\n 'sentiment': 0}`\n\nThe ticker symbols don't provide information on the sentiment, and they are in every twit, so we should remove them. This twit also has the `@google` username, again not providing sentiment information, so we should also remove it. We also see a URL `http://t.co/sptHOAh8`. Let's remove these too.\n\nThe easiest way to remove specific words or phrases is with regex using the `re` module. You can sub out specific patterns with a space:\n\n```python\nre.sub(pattern, ' ', text)\n```\nThis will substitute a space with anywhere the pattern matches in the text. Later when we tokenize the text, we'll split appropriately on those spaces.", "_____no_output_____" ], [ "### Pre-Processing", "_____no_output_____" ] ], [ [ "nltk.download('wordnet')\nnltk.download('punkt')\n\ndef preprocess(message):\n \"\"\"\n This function takes a string as input, then performs these operations: \n - lowercase\n - remove URLs\n - remove ticker symbols \n - removes punctuation\n - tokenize by splitting the string on whitespace \n - removes any single character tokens\n \n Parameters\n ----------\n message : The text message to be preprocessed.\n \n Returns\n -------\n tokens: The preprocessed text into tokens.\n \"\"\" \n #TODO: Implement \n \n # Lowercase the twit message\n text = message.lower()\n \n # Replace URLs with a space in the message\n text = re.sub(r'https?://\\S+', ' ', text)\n \n # Replace ticker symbols with a space. The ticker symbols are any stock symbol that starts with $.\n text = re.sub(r'\\$\\S+', ' ', text)\n \n # Replace StockTwits usernames with a space. The usernames are any word that starts with @.\n text = re.sub(r'\\@\\S+', ' ', text)\n\n # Replace everything not a letter with a space\n text = re.sub(r'[^a-zA-Z]', ' ', text)\n \n # Tokenize by splitting the string on whitespace into a list of words\n tokens = nltk.tokenize.word_tokenize(text)\n\n # Lemmatize words using the WordNetLemmatizer. You can ignore any word that is not longer than one character.\n wnl = nltk.stem.WordNetLemmatizer()\n tokens = [wnl.lemmatize(token) for token in tokens if len(token) > 1]\n \n return tokens", "[nltk_data] Downloading package wordnet to /root/nltk_data...\n[nltk_data] Package wordnet is already up-to-date!\n[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n" ] ], [ [ "### Preprocess All the Twits \nNow we can preprocess each of the twits in our dataset. Apply the function `preprocess` to all the twit messages.", "_____no_output_____" ] ], [ [ "# TODO Implement\nimport pickle\nimport os.path\n\ntokenized_twits = []\nfilename = 'tokenList.txt'\nif os.path.exists(filename):\n print('Load preprocessed twits')\n with open(filename, 'rb') as fp:\n tokenized_twits = pickle.load(fp)\nelse:\n print('Preprocessing twits')\n tokenized_twits = [preprocess(message) for message in messages]\n with open(filename, 'wb') as fp:\n pickle.dump(tokenized_twits, fp)\n\nprint(f'raw text: {messages[0]}')\nprint(f'tokenized_twits: {tokenized_twits[0]}')", "Load preprocessed twits\nraw text: $FITB great buy at 26.00...ill wait\ntokenized_twits: ['great', 'buy', 'at', 'ill', 'wait']\n" ] ], [ [ "### Bag of Words\nNow with all of our messages tokenized, we want to create a vocabulary and count up how often each word appears in our entire corpus. Use the [`Counter`](https://docs.python.org/3.1/library/collections.html#collections.Counter) function to count up all the tokens.", "_____no_output_____" ] ], [ [ "from collections import Counter\nimport numpy as np\n\n\"\"\"\nCreate a vocabulary by using Bag of words\n\"\"\"\n\n# TODO: Implement \ntokenized_words = [word for twit in tokenized_twits for word in twit]\n\nbow = Counter(tokenized_words)", "_____no_output_____" ], [ "print(bow.most_common(3))", "[('the', 398754), ('to', 379487), ('is', 284865)]\n" ] ], [ [ "### Frequency of Words Appearing in Message\nWith our vocabulary, now we'll remove some of the most common words such as 'the', 'and', 'it', etc. These words don't contribute to identifying sentiment and are really common, resulting in a lot of noise in our input. If we can filter these out, then our network should have an easier time learning.\n\nWe also want to remove really rare words that show up in a only a few twits. Here you'll want to divide the count of each word by the number of messages. Then remove words that only appear in some small fraction of the messages.", "_____no_output_____" ] ], [ [ "\"\"\"\nSet the following variables:\n freqs\n low_cutoff\n high_cutoff\n K_most_common\n\"\"\"\n\n# TODO Implement \n\n# Dictionart that contains the Frequency of words appearing in messages.\n# The key is the token and the value is the frequency of that word in the corpus.\ntotal_cnt = sum(bow.values())\nfreqs = dict(bow)\nfor key in freqs:\n freqs[key] = freqs[key]/total_cnt\n\n# Float that is the frequency cutoff. Drop words with a frequency that is lower or equal to this number.\nlow_cutoff = 5e-6\n\n# Integer that is the cut off for most common words. Drop words that are the `high_cutoff` most common words.\nhigh_cutoff = 15\n\n# The k most common words in the corpus. Use `high_cutoff` as the k.\nK_most_common = [word[0] for word in bow.most_common(high_cutoff)]\n\n\nfiltered_words = [word for word in freqs if (freqs[word] > low_cutoff and word not in K_most_common)]\nprint(K_most_common)\nlen(filtered_words) ", "['the', 'to', 'is', 'for', 'on', 'of', 'and', 'in', 'this', 'it', 'at', 'will', 'up', 'are', 'you']\n" ] ], [ [ "### Updating Vocabulary by Removing Filtered Words\nLet's creat three variables that will help with our vocabulary.", "_____no_output_____" ] ], [ [ "\"\"\"\nSet the following variables:\n vocab\n id2vocab\n filtered\n\"\"\"\n\n#TODO Implement\n\n# A dictionary for the `filtered_words`. The key is the word and value is an id that represents the word. \nvocab = {word:idx+1 for idx, word in enumerate(filtered_words)}\n# Reverse of the `vocab` dictionary. The key is word id and value is the word. \nid2vocab = {idx:word for word, idx in vocab.items()}\n\n# tokenized with the words not in `filtered_words` removed.\nfiltered = []\nfilename = 'filteredTwits.txt'\nif os.path.exists(filename):\n print('Load preprocessed filteredTwits')\n with open(filename, 'rb') as fp:\n filtered = pickle.load(fp)\nelse:\n print('Filtering twits')\n for twit in tokenized_twits:\n filtered.append([word for word in twit if word in filtered_words])\n with open(filename, 'wb') as fp:\n pickle.dump(filtered, fp)", "Load preprocessed filteredTwits\n" ] ], [ [ "### Balancing the classes\nLet's do a few last pre-processing steps. If we look at how our twits are labeled, we'll find that 50% of them are neutral. This means that our network will be 50% accurate just by guessing 0 every single time. To help our network learn appropriately, we'll want to balance our classes.\nThat is, make sure each of our different sentiment scores show up roughly as frequently in the data.\n\nWhat we can do here is go through each of our examples and randomly drop twits with neutral sentiment. What should be the probability we drop these twits if we want to get around 20% neutral twits starting at 50% neutral? We should also take this opportunity to remove messages with length 0.", "_____no_output_____" ] ], [ [ "balanced = {'messages': [], 'sentiments':[]}\n\nn_neutral = sum(1 for each in sentiments if each == 2)\nN_examples = len(sentiments)\nkeep_prob = (N_examples - n_neutral)/4/n_neutral\n\nfor idx, sentiment in enumerate(sentiments):\n message = filtered[idx]\n if len(message) == 0:\n # skip this message because it has length zero\n continue\n elif sentiment != 2 or random.random() < keep_prob:\n balanced['messages'].append(message)\n balanced['sentiments'].append(sentiment) ", "_____no_output_____" ] ], [ [ "If you did it correctly, you should see the following result ", "_____no_output_____" ] ], [ [ "n_neutral = sum(1 for each in balanced['sentiments'] if each == 2)\nN_examples = len(balanced['sentiments'])\nn_neutral/N_examples", "_____no_output_____" ] ], [ [ "Finally let's convert our tokens into integer ids which we can pass to the network.", "_____no_output_____" ] ], [ [ "token_ids = [[vocab[word] for word in message] for message in balanced['messages']]\nsentiments = balanced['sentiments']", "_____no_output_____" ], [ "print(token_ids[:10])\nprint(sentiments[:10])", "[[1, 2, 3, 4], [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 2], [18, 19, 20, 21, 22, 23, 21, 24, 25, 22, 26, 27], [30, 31, 32, 33, 34, 35, 36, 37], [38, 39, 40, 41, 42, 43], [44, 45, 46, 47, 48, 49, 13, 50, 51, 52], [53, 54, 46, 55, 56, 57, 58], [31, 59, 60, 61, 62, 63, 64, 65, 66, 57, 63, 67, 68, 69, 70, 71], [72, 2], [73, 74, 73, 75, 76]]\n[4, 4, 3, 3, 0, 3, 4, 0, 4, 0]\n" ] ], [ [ "## Neural Network\nNow we have our vocabulary which means we can transform our tokens into ids, which are then passed to our network. So, let's define the network now!\n\nHere is a nice diagram showing the network we'd like to build: \n\n#### Embed -> RNN -> Dense -> Softmax\n### Implement the text classifier\nBefore we build text classifier, if you remember from the other network that you built in \"Sentiment Analysis with an RNN\" exercise - which there, the network called \" SentimentRNN\", here we named it \"TextClassifer\" - consists of three main parts: 1) init function `__init__` 2) forward pass `forward` 3) hidden state `init_hidden`. \n\nThis network is pretty similar to the network you built expect in the `forward` pass, we use softmax instead of sigmoid. The reason we are not using sigmoid is that the output of NN is not a binary. In our network, sentiment scores have 5 possible outcomes. We are looking for an outcome with the highest probability thus softmax is a better choice.", "_____no_output_____" ] ], [ [ "train_on_gpu = torch.cuda.is_available()\n\nprint(\"Train on GPU\") if train_on_gpu else print(\"Train on CPU\")", "Train on GPU\n" ], [ "class TextClassifier(nn.Module):\n def __init__(self, vocab_size, embed_size, lstm_size, output_size, lstm_layers=1, dropout=0.1):\n \"\"\"\n Initialize the model by setting up the layers.\n \n Parameters\n ----------\n vocab_size : The vocabulary size.\n embed_size : The embedding layer size.\n lstm_size : The LSTM layer size.\n output_size : The output size.\n lstm_layers : The number of LSTM layers.\n dropout : The dropout probability.\n \"\"\"\n \n super().__init__()\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.lstm_size = lstm_size\n self.output_size = output_size\n self.lstm_layers = lstm_layers\n self.dropout = dropout\n \n # TODO Implement\n \n # Setup embedding layer\n # The input to the module is a list of indices, \n # and the output is the corresponding word embeddings.\n self.embedding = nn.Embedding(vocab_size, embed_size)\n \n # Setup LSTM (hidden) layers\n self.lstm = nn.LSTM(embed_size, lstm_size, lstm_layers, \n dropout = dropout, batch_first = True)\n \n # Fully-connected layer\n self.fc = nn.Linear(lstm_size, output_size)\n self.dropout = nn.Dropout(p=0.3)\n self.log_softmax = nn.LogSoftmax(dim = 1)\n\n\n def init_hidden(self, batch_size):\n \"\"\" \n Initializes hidden state\n \n Parameters\n ----------\n batch_size : The size of batches.\n \n Returns\n -------\n hidden_state\n \n \"\"\"\n \n # TODO Implement \n \n # Create two new tensors with sizes n_layers x batch_size x hidden_dim,\n # initialized to zero, for hidden state and cell state of LSTM\n weight = next(self.parameters()).data\n if train_on_gpu:\n hidden = (weight.new(self.lstm_layers, batch_size, self.lstm_size).zero_().cuda(),\n weight.new(self.lstm_layers, batch_size, self.lstm_size).zero_().cuda())\n else:\n hidden = (weight.new(self.lstm_layers, batch_size, self.lstm_size).zero_(),\n weight.new(self.lstm_layers, batch_size, self.lstm_size).zero_())\n return hidden\n\n\n def forward(self, nn_input, hidden_state):\n \"\"\"\n Perform a forward pass of our model on nn_input.\n \n Parameters\n ----------\n nn_input : The batch of input to the NN.\n hidden_state : The LSTM hidden state.\n\n Returns\n -------\n logps: log softmax output\n hidden_state: The new hidden state.\n\n \"\"\"\n \n # TODO Implement\n \n # nn_input(batch_size, seq_length)\n embedding_out = self.embedding(nn_input)\n \n # embedding_out(batch_size, seq_length, embed_size)\n # hidden_state(n_layers x batch_size x hidden_dim)\n lstm_out, hidden_state = self.lstm(embedding_out, hidden_state)\n # lstm_out(batch_size, seq_length, lstm_size)\n \n # For each batch, get the last lstm output in the sequence\n lstm_out = lstm_out[:, -1, :]\n \n fc_out = self.dropout(self.fc(lstm_out))\n \n # fc_out(batch_size*seq_length, output_size)\n logps = self.log_softmax(fc_out);\n \n return logps, hidden_state", "_____no_output_____" ], [ "vocab_size = len(vocab)\nembed_size = 10\nlstm_size = 6\noutput_size = 5\nlstm_layers= 2\n\nmodel = TextClassifier(vocab_size, embed_size, lstm_size, output_size, dropout=0.1, lstm_layers = lstm_layers)\nprint(model)", "TextClassifier(\n (embedding): Embedding(6612, 10)\n (lstm): LSTM(10, 6, num_layers=2, batch_first=True, dropout=0.1)\n (fc): Linear(in_features=6, out_features=5, bias=True)\n (dropout): Dropout(p=0.3)\n (log_softmax): LogSoftmax()\n)\n" ] ], [ [ "### View Model", "_____no_output_____" ] ], [ [ "vocab_size = len(vocab)\nembed_size = 10\nlstm_size = 6\noutput_size = 5\nlstm_layers= 2\n\nmodel = TextClassifier(vocab_size, embed_size, lstm_size, output_size, dropout=0.1, lstm_layers = lstm_layers)\nmodel.embedding.weight.data.uniform_(-1, 1)\n\nbatch_size = 4\nseq_length = 5\ninput = torch.randint(0, 1000, (batch_size, seq_length), dtype=torch.int64)\nif train_on_gpu:\n input = input.cuda()\n model.cuda()\n \nhidden = model.init_hidden(batch_size)\n\nlogps, _ = model.forward(input, hidden)\nprint(logps)", "tensor([[-2.1172, -1.1270, -1.9864, -1.5644, -1.5644],\n [-2.0450, -1.1264, -1.9436, -1.5337, -1.6739],\n [-2.0721, -1.5504, -1.9691, -1.0842, -1.6918],\n [-2.2199, -1.2164, -2.0770, -1.1794, -1.8183]], device='cuda:0')\n" ] ], [ [ "## Training\n### DataLoaders and Batching\nNow we should build a generator that we can use to loop through our data. It'll be more efficient if we can pass our sequences in as batches. Our input tensors should look like `(batch_size, sequence_length)`. So if our sequences are 40 tokens long and we pass in 25 sequences, then we'd have an input size of `(25, 40)`.\n\nIf we set our sequence length to 40, what do we do with messages that are more or less than 40 tokens? For messages with fewer than 40 tokens, we will pad the empty spots with zeros. We should be sure to **left** pad so that the RNN starts from nothing before going through the data. If the message has 20 tokens, then the first 20 spots of our 40 long sequence will be 0. If a message has more than 40 tokens, we'll just keep the first 40 tokens.", "_____no_output_____" ] ], [ [ "def dataloader(messages, labels, sequence_length=30, batch_size=32, shuffle=False):\n \"\"\" \n Build a dataloader.\n \"\"\"\n if shuffle:\n indices = list(range(len(messages)))\n random.shuffle(indices)\n messages = [messages[idx] for idx in indices]\n labels = [labels[idx] for idx in indices]\n\n total_sequences = len(messages)\n\n for ii in range(0, total_sequences, batch_size):\n batch_messages = messages[ii: ii+batch_size]\n \n # First initialize a tensor of all zeros\n batch = torch.zeros((len(batch_messages), sequence_length), dtype=torch.int64)\n for batch_num, tokens in enumerate(batch_messages):\n token_tensor = torch.tensor(tokens)\n # Left pad!\n start_idx = max(sequence_length - len(token_tensor), 0)\n batch[batch_num, start_idx:] = token_tensor[:sequence_length]\n \n label_tensor = torch.tensor(labels[ii: ii+len(batch_messages)])\n \n yield batch, label_tensor", "_____no_output_____" ] ], [ [ "### Training and Validation\nWith our data in nice shape, we'll split it into training and validation sets.", "_____no_output_____" ] ], [ [ "\"\"\"\nSplit data into training and validation datasets. Use an appropriate split size.\nThe features are the `token_ids` and the labels are the `sentiments`.\n\"\"\" \n\n# TODO Implement \n\nn_training = np.int(0.8 * len(token_ids))\nn_validation = np.int(0.1 * len(token_ids))\n\ntrain_features = token_ids[:n_training]\nvalid_features = token_ids[n_training:n_training + n_validation]\ntrain_labels = sentiments[:n_training]\nvalid_labels = sentiments[n_training:n_training + n_validation]\nprint(len(train_features), len(valid_labels))", "821712 102714\n" ], [ "text_batch, labels = next(iter(dataloader(train_features, train_labels, sequence_length=20, batch_size=64)))\nmodel = TextClassifier(len(vocab)+1, 200, 128, 5, dropout=0.)\nhidden = model.init_hidden(64)\nif train_on_gpu:\n text_batch = text_batch.cuda()\n model.cuda()\nlogps, hidden = model.forward(text_batch, hidden)", "_____no_output_____" ] ], [ [ "### Training\nIt's time to train the neural network!", "_____no_output_____" ] ], [ [ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nmodel = TextClassifier(len(vocab)+1, 1024, 512, 5, lstm_layers=2, dropout=0.2)\nmodel.embedding.weight.data.uniform_(-1, 1)\nmodel.to(device)", "_____no_output_____" ], [ "\"\"\"\nTrain your model with dropout. Make sure to clip your gradients.\nPrint the training loss, validation loss, and validation accuracy for every 100 steps.\n\"\"\"\n\nepochs = 1\nbatch_size = 512\nsequence_length = 40\nlearning_rate = 0.001\nclip = 5\n\nprint_every = 100\ncriterion = nn.NLLLoss()\noptimizer = optim.Adam(model.parameters(), lr=learning_rate)\nmodel.train()\n\nfor epoch in range(epochs):\n print('Starting epoch {}'.format(epoch + 1))\n \n hidden = model.init_hidden(batch_size)\n \n steps = 0\n for text_batch, labels in dataloader(\n train_features, train_labels, batch_size=batch_size, sequence_length = sequence_length, shuffle=True):\n \n if text_batch.shape != torch.Size([batch_size, sequence_length]):\n continue\n \n steps += 1\n hidden = tuple([each.data for each in hidden])\n \n # Set Device\n text_batch, labels = text_batch.to(device), labels.to(device)\n for each in hidden:\n each.to(device)\n \n # TODO Implement: Train Model\n model.zero_grad()\n \n logps, hidden = model.forward(text_batch, hidden)\n \n loss = criterion(logps.squeeze(), labels)\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), clip)\n optimizer.step()\n \n if steps % print_every == 0:\n model.eval()\n \n valid_hidden = model.init_hidden(batch_size)\n valid_loss = []\n # TODO Implement: Print metrics\n for valid_texts, labels in dataloader(\n valid_features, valid_labels, batch_size=batch_size, sequence_length = sequence_length, shuffle=True):\n \n if valid_texts.shape != torch.Size([batch_size, sequence_length]):\n continue\n \n valid_hidden = tuple([each.data for each in valid_hidden])\n valid_texts, labels = valid_texts.to(device), labels.to(device)\n for each in valid_hidden:\n each.to(device)\n \n valid_logps, valid_hidden = model.forward(valid_texts, valid_hidden)\n \n loss = criterion(valid_logps.squeeze(), labels)\n valid_loss.append(loss.item())\n \n print(f'Epoch: ({epoch+1}/{epochs}...)',\n f'Step: {steps}...',\n f'Validation loss: {np.mean(valid_loss)}')\n model.train()", "Starting epoch 1\nEpoch: (1/1...) Step: 100... Validation loss: 1.0021054914593697\nEpoch: (1/1...) Step: 200... Validation loss: 0.9045261889696121\nEpoch: (1/1...) Step: 300... Validation loss: 0.8630593079328537\nEpoch: (1/1...) Step: 400... Validation loss: 0.8263770627975464\nEpoch: (1/1...) Step: 500... Validation loss: 0.8017379134893418\nEpoch: (1/1...) Step: 600... Validation loss: 0.7890897753834725\nEpoch: (1/1...) Step: 700... Validation loss: 0.7752371823787689\nEpoch: (1/1...) Step: 800... Validation loss: 0.7584137234091759\nEpoch: (1/1...) Step: 900... Validation loss: 0.7579039126634598\nEpoch: (1/1...) Step: 1000... Validation loss: 0.7444847962260246\nEpoch: (1/1...) Step: 1100... Validation loss: 0.744362855553627\nEpoch: (1/1...) Step: 1200... Validation loss: 0.739463067650795\nEpoch: (1/1...) Step: 1300... Validation loss: 0.7347753727436066\nEpoch: (1/1...) Step: 1400... Validation loss: 0.7320591765642166\nEpoch: (1/1...) Step: 1500... Validation loss: 0.7173329085111618\nEpoch: (1/1...) Step: 1600... Validation loss: 0.7176626390218734\n" ] ], [ [ "## Making Predictions\n### Prediction \nOkay, now that you have a trained model, try it on some new twits and see if it works appropriately. Remember that for any new text, you'll need to preprocess it first before passing it to the network. Implement the `predict` function to generate the prediction vector from a message.", "_____no_output_____" ] ], [ [ "def predict(text, model, vocab):\n \"\"\" \n Make a prediction on a single sentence.\n\n Parameters\n ----------\n text : The string to make a prediction on.\n model : The model to use for making the prediction.\n vocab : Dictionary for word to word ids. The key is the word and the value is the word id.\n\n Returns\n -------\n pred : Prediction vector\n \"\"\" \n \n # TODO Implement\n \n tokens = preprocess(text)\n \n # Filter non-vocab words\n tokens = [word for word in tokens if word in filtered_words]\n # Convert words to ids\n tokens = [vocab[word] for word in tokens] \n\n # Adding a batch dimension\n text_input = torch.tensor(tokens).unsqueeze(0)\n \n if train_on_gpu:\n text_input = text_input.cuda()\n model.cuda()\n # Get the NN output\n hidden = model.init_hidden(text_input.size(0))\n logps, _ = model.forward(text_input, hidden)\n # Take the exponent of the NN output to get a range of 0 to 1 for each label.\n pred = torch.exp(logps)\n \n return pred", "_____no_output_____" ], [ "text = \"Google is working on self driving cars, I'm bullish on $goog\"\nmodel.eval()\nmodel.to(\"cpu\")\npredict(text, model, vocab)", "_____no_output_____" ] ], [ [ "### Questions: What is the prediction of the model? What is the uncertainty of the prediction?\nGoogle has sentiment $3$ with probability $0.7671$", "_____no_output_____" ], [ "Now we have a trained model and we can make predictions. We can use this model to track the sentiments of various stocks by predicting the sentiments of twits as they are coming in. Now we have a stream of twits. For each of those twits, pull out the stocks mentioned in them and keep track of the sentiments. Remember that in the twits, ticker symbols are encoded with a dollar sign as the first character, all caps, and 2-4 letters, like $AAPL. Ideally, you'd want to track the sentiments of the stocks in your universe and use this as a signal in your larger model(s).\n\n## Testing\n### Load the Data ", "_____no_output_____" ] ], [ [ "with open(os.path.join('..', '..', 'data', 'project_6_stocktwits', 'test_twits.json'), 'r') as f:\n test_data = json.load(f)", "_____no_output_____" ] ], [ [ "### Twit Stream", "_____no_output_____" ] ], [ [ "def twit_stream():\n for twit in test_data['data']:\n yield twit\n\nnext(twit_stream())", "_____no_output_____" ] ], [ [ "Using the `prediction` function, let's apply it to a stream of twits.", "_____no_output_____" ] ], [ [ "def score_twits(stream, model, vocab, universe):\n \"\"\" \n Given a stream of twits and a universe of tickers, return sentiment scores for tickers in the universe.\n \"\"\"\n for twit in stream:\n\n # Get the message text\n text = twit['message_body']\n symbols = re.findall('\\$[A-Z]{2,4}', text)\n score = predict(text, model, vocab)\n\n for symbol in symbols:\n if symbol in universe:\n yield {'symbol': symbol, 'score': score, 'timestamp': twit['timestamp']}", "_____no_output_____" ], [ "universe = {'$BBRY', '$AAPL', '$AMZN', '$BABA', '$YHOO', '$LQMT', '$FB', '$GOOG', '$BBBY', '$JNUG', '$SBUX', '$MU'}\nscore_stream = score_twits(twit_stream(), model, vocab, universe)\n\nnext(score_stream)", "_____no_output_____" ] ], [ [ "That's it. You have successfully built a model for sentiment analysis! ", "_____no_output_____" ], [ "## Submission\nNow that you're done with the project, it's time to submit it. Click the submit button in the bottom right. One of our reviewers will give you feedback on your project with a pass or not passed grade. You can continue to the next section while you wait for feedback.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
ec58ebf68cfe475cb50291967cc2e066014ee8b0
47,841
ipynb
Jupyter Notebook
content/ch-ex/ex3.ipynb
luke7mm/qiskit-textbook
463cf2f13529c7ac7f6dc09d7808f2731edcb2ef
[ "Apache-2.0" ]
526
2020-06-21T16:38:52.000Z
2022-03-30T00:42:43.000Z
content/ch-ex/ex3.ipynb
luke7mm/qiskit-textbook
463cf2f13529c7ac7f6dc09d7808f2731edcb2ef
[ "Apache-2.0" ]
602
2020-06-19T17:09:23.000Z
2022-03-31T08:54:55.000Z
content/ch-ex/ex3.ipynb
luke7mm/qiskit-textbook
463cf2f13529c7ac7f6dc09d7808f2731edcb2ef
[ "Apache-2.0" ]
512
2020-06-19T20:29:13.000Z
2022-03-31T11:49:39.000Z
49.938413
11,507
0.516565
[ [ [ "# Building the Best AND Gate\n\nLet's import everything:", "_____no_output_____" ] ], [ [ "from qiskit import *\nfrom qiskit.tools.visualization import plot_histogram\nfrom qiskit.providers.aer import noise\nimport numpy as np", "_____no_output_____" ] ], [ [ "In Problem Set 1, you made an AND gate with quantum gates. This time you'll do the same again, but for a real device. Using real devices gives you two major constraints to deal with. One is the connectivity, and the other is noise.\n\nThe connectivity tells you what `cx` gates it is possible to do perform directly. For example, the device `ibmq_5_tenerife` has five qubits numbered from 0 to 4. It has a connectivity defined by", "_____no_output_____" ] ], [ [ "coupling_map = [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], [4, 2]]", "_____no_output_____" ] ], [ [ "Here the `[1,0]` tells us that we can implement a `cx` with qubit 1 as control and qubit 0 as target, the `[2,0]` tells us we can have qubit 2 as control and 0 as target, and so on. These are the `cx` gates that the device can implement directly.\n\nThe 'noise' of a device is the collective effects of all the things that shouldn't happen, but nevertheless do happen. Noise results in the output not always having the result we expect. There is noise associated with all processes in a quantum circuit: preparing the initial states, applying gates and measuring the output. For the gates, noise levels can vary between different gates and between different qubits. The `cx` gates are typically more noisy than any single qubit gate.\n\nWe can also simulate noise using a noise model. And we can set the noise model based on measurements of the noise for a real device. The following noise model is based on `ibmq_5_tenerife`.\n\n", "_____no_output_____" ] ], [ [ "noise_dict = {'errors': [{'type': 'qerror', 'operations': ['u2'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0004721766167523067, 0.0004721766167523067, 0.0004721766167523067, 0.9985834701497431], 'gate_qubits': [[0]]}, {'type': 'qerror', 'operations': ['u2'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0005151090708174488, 0.0005151090708174488, 0.0005151090708174488, 0.9984546727875476], 'gate_qubits': [[1]]}, {'type': 'qerror', 'operations': ['u2'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0005151090708174488, 0.0005151090708174488, 0.0005151090708174488, 0.9984546727875476], 'gate_qubits': [[2]]}, {'type': 'qerror', 'operations': ['u2'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.000901556048412383, 0.000901556048412383, 0.000901556048412383, 0.9972953318547628], 'gate_qubits': [[3]]}, {'type': 'qerror', 'operations': ['u2'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0011592423249461303, 0.0011592423249461303, 0.0011592423249461303, 0.9965222730251616], 'gate_qubits': [[4]]}, {'type': 'qerror', 'operations': ['u3'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0009443532335046134, 0.0009443532335046134, 0.0009443532335046134, 0.9971669402994862], 'gate_qubits': [[0]]}, {'type': 'qerror', 'operations': ['u3'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0010302181416348977, 0.0010302181416348977, 0.0010302181416348977, 0.9969093455750953], 'gate_qubits': [[1]]}, {'type': 'qerror', 'operations': ['u3'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0010302181416348977, 0.0010302181416348977, 0.0010302181416348977, 0.9969093455750953], 'gate_qubits': [[2]]}, {'type': 'qerror', 'operations': ['u3'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.001803112096824766, 0.001803112096824766, 0.001803112096824766, 0.9945906637095256], 'gate_qubits': [[3]]}, {'type': 'qerror', 'operations': ['u3'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0023184846498922607, 0.0023184846498922607, 0.0023184846498922607, 0.9930445460503232], 'gate_qubits': [[4]]}, {'type': 'qerror', 'operations': ['cx'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'x', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.9672573379090872], 'gate_qubits': [[1, 0]]}, {'type': 'qerror', 'operations': ['cx'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'x', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.9699888805021712], 'gate_qubits': [[2, 0]]}, {'type': 'qerror', 'operations': ['cx'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'x', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.9627184072576159], 'gate_qubits': [[2, 1]]}, {'type': 'qerror', 'operations': ['cx'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'x', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.9437457618579164], 'gate_qubits': [[3, 2]]}, {'type': 'qerror', 'operations': ['cx'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'x', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.9339816349935997], 'gate_qubits': [[3, 4]]}, {'type': 'qerror', 'operations': ['cx'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'x', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.9307167621063416], 'gate_qubits': [[4, 2]]}, {'type': 'roerror', 'operations': ['measure'], 'probabilities': [[0.9372499999999999, 0.06275000000000008], [0.06275000000000008, 0.9372499999999999]], 'gate_qubits': [[0]]}, {'type': 'roerror', 'operations': ['measure'], 'probabilities': [[0.9345, 0.0655], [0.0655, 0.9345]], 'gate_qubits': [[1]]}, {'type': 'roerror', 'operations': ['measure'], 'probabilities': [[0.97075, 0.029249999999999998], [0.029249999999999998, 0.97075]], 'gate_qubits': [[2]]}, {'type': 'roerror', 'operations': ['measure'], 'probabilities': [[0.9742500000000001, 0.02574999999999994], [0.02574999999999994, 0.9742500000000001]], 'gate_qubits': [[3]]}, {'type': 'roerror', 'operations': ['measure'], 'probabilities': [[0.8747499999999999, 0.12525000000000008], [0.12525000000000008, 0.8747499999999999]], 'gate_qubits': [[4]]}], 'x90_gates': []}\nnoise_model = noise.noise_model.NoiseModel.from_dict( noise_dict )", "_____no_output_____" ] ], [ [ "Running directly on the device requires you to have an IBMQ account, and for you to sign in to it within your program. In order to not worry about all this, we'll instead use a simulation of the 5 qubit device defined by the constraints set above.", "_____no_output_____" ] ], [ [ "qr = QuantumRegister(5, 'qr')\ncr = ClassicalRegister(1, 'cr')\nbackend = Aer.get_backend('aer_simulator')", "_____no_output_____" ] ], [ [ "We now define the `AND` function. This has a few differences to the version in Exercise 1. Firstly, it is defined on a 5 qubit circuit, so you'll need to decide which of the 5 qubits are used to encode `input1`, `input2` and the output. Secondly, the output is a histogram of the number of times that each output is found when the process is repeated over 10000 samples.", "_____no_output_____" ] ], [ [ "def AND (input1,input2, q_1=0,q_2=1,q_out=2):\n # The keyword q_1 specifies the qubit used to encode input1\n # The keyword q_2 specifies qubit used to encode input2\n # The keyword q_out specifies qubit to be as output\n \n qc = QuantumCircuit(qr, cr)\n \n # prepare input on qubits q1 and q2\n if input1=='1':\n qc.x( qr[ q_1 ] )\n if input2=='1':\n qc.x( qr[ q_2 ] )\n \n qc.ccx(qr[ q_1 ],qr[ q_2 ],qr[ q_out ]) # the AND just needs a c\n qc.measure(qr[ q_out ],cr[0]) # output from qubit 1 is measured\n \n # the circuit is run on a simulator, but we do it so that the noise and connectivity of Tenerife are also reproduced \n job = execute(qc, backend, shots=10000, noise_model=noise_model,\n coupling_map=coupling_map,\n basis_gates=noise_model.basis_gates)\n output = job.result().get_counts()\n \n return output", "_____no_output_____" ] ], [ [ "For example, here are the results when both inputs are `0`.", "_____no_output_____" ] ], [ [ "result = AND('0','0')\nprint( result )\nplot_histogram( result )", "{'1': 991, '0': 9009}\n" ] ], [ [ "We'll compare across all results to find the most unreliable.\n\n", "_____no_output_____" ] ], [ [ "worst = 1\nfor input1 in ['0','1']:\n for input2 in ['0','1']:\n print('\\nProbability of correct answer for inputs',input1,input2)\n prob = AND(input1,input2, q_1=0,q_2=1,q_out=2)[str(int( input1=='1' and input2=='1' ))]/10000\n print( prob )\n worst = min(worst,prob)\nprint('\\nThe lowest of these probabilities was',worst)", "\nProbability of correct answer for inputs 0 0\n0.9035\n\nProbability of correct answer for inputs 0 1\n0.8978\n\nProbability of correct answer for inputs 1 0\n0.8995\n\nProbability of correct answer for inputs 1 1\n0.9046\n\nThe lowest of these probabilities was 0.8978\n" ] ], [ [ "The `AND` function above uses the `ccx` gate the implement the required operation. But you now know how to make your own. Find a way to implement an `AND` for which the lowest of the above probabilities is better than for a simple `ccx`.", "_____no_output_____" ] ], [ [ "import qiskit.tools.jupyter\n%qiskit_version_table", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec58ee4192939a4ab832b82df83eef9d4a7053bc
41,851
ipynb
Jupyter Notebook
Course 1 - Natural Language Processing with Classification and Vector Spaces/Week2/C1_W2_Assignment.ipynb
TRoboto/Natural-Language-Processing-Specialization
9c22e94738fac49dcb02859a837e5b0dddd99d41
[ "MIT" ]
null
null
null
Course 1 - Natural Language Processing with Classification and Vector Spaces/Week2/C1_W2_Assignment.ipynb
TRoboto/Natural-Language-Processing-Specialization
9c22e94738fac49dcb02859a837e5b0dddd99d41
[ "MIT" ]
1
2020-08-23T18:04:58.000Z
2020-08-23T18:04:58.000Z
Course 1 - Natural Language Processing with Classification and Vector Spaces/Week2/C1_W2_Assignment.ipynb
TRoboto/Natural-Language-Processing-Specialization
9c22e94738fac49dcb02859a837e5b0dddd99d41
[ "MIT" ]
null
null
null
38.011807
345
0.563117
[ [ [ "# Assignment 2: Naive Bayes\nWelcome to week two of this specialization. You will learn about Naive Bayes. Concretely, you will be using Naive Bayes for sentiment analysis on tweets. Given a tweet, you will decide if it has a positive sentiment or a negative one. Specifically you will: \n\n* Train a naive bayes model on a sentiment analysis task\n* Test using your model\n* Compute ratios of positive words to negative words\n* Do some error analysis\n* Predict on your own tweet\n\nYou may already be familiar with Naive Bayes and its justification in terms of conditional probabilities and independence.\n* In this week's lectures and assignments we used the ratio of probabilities between positive and negative sentiments.\n* This approach gives us simpler formulas for these 2-way classification tasks.\n\nLoad the cell below to import some packages.\nYou may want to browse the documentation of unfamiliar libraries and functions.", "_____no_output_____" ] ], [ [ "from utils import process_tweet, lookup\nimport pdb\nfrom nltk.corpus import stopwords, twitter_samples\nimport numpy as np\nimport pandas as pd\nimport nltk\nimport string\nfrom nltk.tokenize import TweetTokenizer\nfrom os import getcwd", "_____no_output_____" ] ], [ [ "If you are running this notebook in your local computer,\ndon't forget to download the twitter samples and stopwords from nltk.\n\n```\nnltk.download('stopwords')\nnltk.download('twitter_samples')\n```", "_____no_output_____" ] ], [ [ "# add folder, tmp2, from our local workspace containing pre-downloaded corpora files to nltk's data path\nfilePath = f\"{getcwd()}/../tmp2/\"\nnltk.data.path.append(filePath)", "_____no_output_____" ], [ "# get the sets of positive and negative tweets\nall_positive_tweets = twitter_samples.strings('positive_tweets.json')\nall_negative_tweets = twitter_samples.strings('negative_tweets.json')\n\n# split the data into two pieces, one for training and one for testing (validation set)\ntest_pos = all_positive_tweets[4000:]\ntrain_pos = all_positive_tweets[:4000]\ntest_neg = all_negative_tweets[4000:]\ntrain_neg = all_negative_tweets[:4000]\n\ntrain_x = train_pos + train_neg\ntest_x = test_pos + test_neg\n\n# avoid assumptions about the length of all_positive_tweets\ntrain_y = np.append(np.ones(len(train_pos)), np.zeros(len(train_neg)))\ntest_y = np.append(np.ones(len(test_pos)), np.zeros(len(test_neg)))", "_____no_output_____" ] ], [ [ "# Part 1: Process the Data\n\nFor any machine learning project, once you've gathered the data, the first step is to process it to make useful inputs to your model.\n- **Remove noise**: You will first want to remove noise from your data -- that is, remove words that don't tell you much about the content. These include all common words like 'I, you, are, is, etc...' that would not give us enough information on the sentiment.\n- We'll also remove stock market tickers, retweet symbols, hyperlinks, and hashtags because they can not tell you a lot of information on the sentiment.\n- You also want to remove all the punctuation from a tweet. The reason for doing this is because we want to treat words with or without the punctuation as the same word, instead of treating \"happy\", \"happy?\", \"happy!\", \"happy,\" and \"happy.\" as different words.\n- Finally you want to use stemming to only keep track of one variation of each word. In other words, we'll treat \"motivation\", \"motivated\", and \"motivate\" similarly by grouping them within the same stem of \"motiv-\".\n\nWe have given you the function `process_tweet()` that does this for you.", "_____no_output_____" ] ], [ [ "custom_tweet = \"RT @Twitter @chapagain Hello There! Have a great day. :) #good #morning http://chapagain.com.np\"\n\n# print cleaned tweet\nprint(process_tweet(custom_tweet))", "['hello', 'great', 'day', ':)', 'good', 'morn']\n" ] ], [ [ "## Part 1.1 Implementing your helper functions\n\nTo help train your naive bayes model, you will need to build a dictionary where the keys are a (word, label) tuple and the values are the corresponding frequency. Note that the labels we'll use here are 1 for positive and 0 for negative.\n\nYou will also implement a `lookup()` helper function that takes in the `freqs` dictionary, a word, and a label (1 or 0) and returns the number of times that word and label tuple appears in the collection of tweets.\n\nFor example: given a list of tweets `[\"i am rather excited\", \"you are rather happy\"]` and the label 1, the function will return a dictionary that contains the following key-value pairs:\n\n{\n (\"rather\", 1): 2\n (\"happi\", 1) : 1\n (\"excit\", 1) : 1\n}\n\n- Notice how for each word in the given string, the same label 1 is assigned to each word.\n- Notice how the words \"i\" and \"am\" are not saved, since it was removed by process_tweet because it is a stopword.\n- Notice how the word \"rather\" appears twice in the list of tweets, and so its count value is 2.\n\n#### Instructions\nCreate a function `count_tweets()` that takes a list of tweets as input, cleans all of them, and returns a dictionary.\n- The key in the dictionary is a tuple containing the stemmed word and its class label, e.g. (\"happi\",1).\n- The value the number of times this word appears in the given collection of tweets (an integer).", "_____no_output_____" ], [ "<details>\n<summary>\n <font size=\"3\" color=\"darkgreen\"><b>Hints</b></font>\n</summary>\n<p>\n<ul>\n <li>Please use the `process_tweet` function that was imported above, and then store the words in their respective dictionaries and sets.</li>\n <li>You may find it useful to use the `zip` function to match each element in `tweets` with each element in `ys`.</li>\n <li>Remember to check if the key in the dictionary exists before adding that key to the dictionary, or incrementing its value.</li>\n <li>Assume that the `result` dictionary that is input will contain clean key-value pairs (you can assume that the values will be integers that can be incremented). It is good practice to check the datatype before incrementing the value, but it's not required here.</li>\n</ul>\n</p>", "_____no_output_____" ] ], [ [ "# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\ndef count_tweets(result, tweets, ys):\n '''\n Input:\n result: a dictionary that will be used to map each pair to its frequency\n tweets: a list of tweets\n ys: a list corresponding to the sentiment of each tweet (either 0 or 1)\n Output:\n result: a dictionary mapping each pair to its frequency\n '''\n\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n for y, tweet in zip(ys, tweets):\n for word in process_tweet(tweet):\n # define the key, which is the word and label tuple\n pair = (word, y)\n\n # if the key exists in the dictionary, increment the count\n if pair in result:\n result[pair] += 1\n\n # else, if the key is new, add it to the dictionary and set the count to 1\n else:\n result[pair] = 1\n ### END CODE HERE ###\n\n return result", "_____no_output_____" ], [ "# Testing your function\n\n\nresult = {}\ntweets = ['i am happy', 'i am tricked', 'i am sad', 'i am tired', 'i am tired']\nys = [1, 0, 0, 0, 0]\ncount_tweets(result, tweets, ys)", "_____no_output_____" ] ], [ [ "**Expected Output**: {('happi', 1): 1, ('trick', 0): 1, ('sad', 0): 1, ('tire', 0): 2}", "_____no_output_____" ], [ "# Part 2: Train your model using Naive Bayes\n\nNaive bayes is an algorithm that could be used for sentiment analysis. It takes a short time to train and also has a short prediction time.\n\n#### So how do you train a Naive Bayes classifier?\n- The first part of training a naive bayes classifier is to identify the number of classes that you have.\n- You will create a probability for each class.\n$P(D_{pos})$ is the probability that the document is positive.\n$P(D_{neg})$ is the probability that the document is negative.\nUse the formulas as follows and store the values in a dictionary:\n\n$$P(D_{pos}) = \\frac{D_{pos}}{D}\\tag{1}$$\n\n$$P(D_{neg}) = \\frac{D_{neg}}{D}\\tag{2}$$\n\nWhere $D$ is the total number of documents, or tweets in this case, $D_{pos}$ is the total number of positive tweets and $D_{neg}$ is the total number of negative tweets.", "_____no_output_____" ], [ "#### Prior and Logprior\n\nThe prior probability represents the underlying probability in the target population that a tweet is positive versus negative. In other words, if we had no specific information and blindly picked a tweet out of the population set, what is the probability that it will be positive versus that it will be negative? That is the \"prior\".\n\nThe prior is the ratio of the probabilities $\\frac{P(D_{pos})}{P(D_{neg})}$.\nWe can take the log of the prior to rescale it, and we'll call this the logprior\n\n$$\\text{logprior} = log \\left( \\frac{P(D_{pos})}{P(D_{neg})} \\right) = log \\left( \\frac{D_{pos}}{D_{neg}} \\right)$$.\n\nNote that $log(\\frac{A}{B})$ is the same as $log(A) - log(B)$. So the logprior can also be calculated as the difference between two logs:\n\n$$\\text{logprior} = \\log (P(D_{pos})) - \\log (P(D_{neg})) = \\log (D_{pos}) - \\log (D_{neg})\\tag{3}$$", "_____no_output_____" ], [ "#### Positive and Negative Probability of a Word\nTo compute the positive probability and the negative probability for a specific word in the vocabulary, we'll use the following inputs:\n\n- $freq_{pos}$ and $freq_{neg}$ are the frequencies of that specific word in the positive or negative class. In other words, the positive frequency of a word is the number of times the word is counted with the label of 1.\n- $N_{pos}$ and $N_{neg}$ are the total number of positive and negative words for all documents (for all tweets), respectively.\n- $V$ is the number of unique words in the entire set of documents, for all classes, whether positive or negative.\n\nWe'll use these to compute the positive and negative probability for a specific word using this formula:\n\n$$ P(W_{pos}) = \\frac{freq_{pos} + 1}{N_{pos} + V}\\tag{4} $$\n$$ P(W_{neg}) = \\frac{freq_{neg} + 1}{N_{neg} + V}\\tag{5} $$\n\nNotice that we add the \"+1\" in the numerator for additive smoothing. This [wiki article](https://en.wikipedia.org/wiki/Additive_smoothing) explains more about additive smoothing.", "_____no_output_____" ], [ "#### Log likelihood\nTo compute the loglikelihood of that very same word, we can implement the following equations:\n\n$$\\text{loglikelihood} = \\log \\left(\\frac{P(W_{pos})}{P(W_{neg})} \\right)\\tag{6}$$", "_____no_output_____" ], [ "##### Create `freqs` dictionary\n- Given your `count_tweets()` function, you can compute a dictionary called `freqs` that contains all the frequencies.\n- In this `freqs` dictionary, the key is the tuple (word, label)\n- The value is the number of times it has appeared.\n\nWe will use this dictionary in several parts of this assignment.", "_____no_output_____" ] ], [ [ "# Build the freqs dictionary for later uses\n\nfreqs = count_tweets({}, train_x, train_y)", "_____no_output_____" ] ], [ [ "#### Instructions\nGiven a freqs dictionary, `train_x` (a list of tweets) and a `train_y` (a list of labels for each tweet), implement a naive bayes classifier.\n\n##### Calculate $V$\n- You can then compute the number of unique words that appear in the `freqs` dictionary to get your $V$ (you can use the `set` function).\n\n##### Calculate $freq_{pos}$ and $freq_{neg}$\n- Using your `freqs` dictionary, you can compute the positive and negative frequency of each word $freq_{pos}$ and $freq_{neg}$.\n\n##### Calculate $N_{pos}$ and $N_{neg}$\n- Using `freqs` dictionary, you can also compute the total number of positive words and total number of negative words $N_{pos}$ and $N_{neg}$.\n\n##### Calculate $D$, $D_{pos}$, $D_{neg}$\n- Using the `train_y` input list of labels, calculate the number of documents (tweets) $D$, as well as the number of positive documents (tweets) $D_{pos}$ and number of negative documents (tweets) $D_{neg}$.\n- Calculate the probability that a document (tweet) is positive $P(D_{pos})$, and the probability that a document (tweet) is negative $P(D_{neg})$\n\n##### Calculate the logprior\n- the logprior is $log(D_{pos}) - log(D_{neg})$\n\n##### Calculate log likelihood\n- Finally, you can iterate over each word in the vocabulary, use your `lookup` function to get the positive frequencies, $freq_{pos}$, and the negative frequencies, $freq_{neg}$, for that specific word.\n- Compute the positive probability of each word $P(W_{pos})$, negative probability of each word $P(W_{neg})$ using equations 4 & 5.\n\n$$ P(W_{pos}) = \\frac{freq_{pos} + 1}{N_{pos} + V}\\tag{4} $$\n$$ P(W_{neg}) = \\frac{freq_{neg} + 1}{N_{neg} + V}\\tag{5} $$\n\n**Note:** We'll use a dictionary to store the log likelihoods for each word. The key is the word, the value is the log likelihood of that word).\n\n- You can then compute the loglikelihood: $log \\left( \\frac{P(W_{pos})}{P(W_{neg})} \\right)\\tag{6}$.", "_____no_output_____" ] ], [ [ "# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\ndef train_naive_bayes(freqs, train_x, train_y):\n '''\n Input:\n freqs: dictionary from (word, label) to how often the word appears\n train_x: a list of tweets\n train_y: a list of labels correponding to the tweets (0,1)\n Output:\n logprior: the log prior. (equation 3 above)\n loglikelihood: the log likelihood of you Naive bayes equation. (equation 6 above)\n '''\n loglikelihood = {}\n logprior = 0\n\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n\n # calculate V, the number of unique words in the vocabulary\n vocab = set([pair[0] for pair in freqs.keys()])\n V = len(vocab)\n\n # calculate N_pos and N_neg\n N_pos = N_neg = 0\n for pair in freqs.keys():\n # if the label is positive (greater than zero)\n if pair[1] > 0:\n\n # Increment the number of positive words by the count for this (word, label) pair\n N_pos += freqs[pair]\n\n # else, the label is negative\n else:\n\n # increment the number of negative words by the count for this (word,label) pair\n N_neg += freqs[pair]\n\n # Calculate D, the number of documents\n D = len(train_x)\n\n # Calculate D_pos, the number of positive documents (*hint: use sum(<np_array>))\n D_pos = np.sum(train_y == 1)\n\n # Calculate D_neg, the number of negative documents (*hint: compute using D and D_pos)\n D_neg = D - D_pos\n\n # Calculate logprior\n logprior = np.log(D_pos) - np.log(D_neg)\n\n # For each word in the vocabulary...\n for word in vocab:\n # get the positive and negative frequency of the word\n freq_pos = freqs[(word, 1)] if (word, 1) in freqs else 0\n freq_neg = freqs[(word, 0)] if (word, 0) in freqs else 0\n\n # calculate the probability that each word is positive, and negative\n p_w_pos = (freq_pos + 1)/(N_pos + V)\n p_w_neg = (freq_neg + 1)/(N_neg + V)\n\n # calculate the log likelihood of the word\n loglikelihood[word] = np.log(p_w_pos/p_w_neg)\n\n ### END CODE HERE ###\n\n return logprior, loglikelihood\n", "_____no_output_____" ], [ "# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\n# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything\nlogprior, loglikelihood = train_naive_bayes(freqs, train_x, train_y)\nprint(logprior)\nprint(len(loglikelihood))", "0.0\n9089\n" ] ], [ [ "**Expected Output**:\n\n0.0\n\n9089", "_____no_output_____" ], [ "# Part 3: Test your naive bayes\n\nNow that we have the `logprior` and `loglikelihood`, we can test the naive bayes function by making predicting on some tweets!\n\n#### Implement `naive_bayes_predict`\n**Instructions**:\nImplement the `naive_bayes_predict` function to make predictions on tweets.\n* The function takes in the `tweet`, `logprior`, `loglikelihood`.\n* It returns the probability that the tweet belongs to the positive or negative class.\n* For each tweet, sum up loglikelihoods of each word in the tweet.\n* Also add the logprior to this sum to get the predicted sentiment of that tweet.\n\n$$ p = logprior + \\sum_i^N (loglikelihood_i)$$\n\n#### Note\nNote we calculate the prior from the training data, and that the training data is evenly split between positive and negative labels (4000 positive and 4000 negative tweets). This means that the ratio of positive to negative 1, and the logprior is 0.\n\nThe value of 0.0 means that when we add the logprior to the log likelihood, we're just adding zero to the log likelihood. However, please remember to include the logprior, because whenever the data is not perfectly balanced, the logprior will be a non-zero value.", "_____no_output_____" ] ], [ [ "# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\ndef naive_bayes_predict(tweet, logprior, loglikelihood):\n '''\n Input:\n tweet: a string\n logprior: a number\n loglikelihood: a dictionary of words mapping to numbers\n Output:\n p: the sum of all the logliklihoods of each word in the tweet (if found in the dictionary) + logprior (a number)\n\n '''\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n # process the tweet to get a list of words\n word_l = process_tweet(tweet)\n\n # initialize probability to zero\n p = 0\n\n # add the logprior\n p += logprior\n\n for word in word_l:\n\n # check if the word exists in the loglikelihood dictionary\n if word in loglikelihood:\n # add the log likelihood of that word to the probability\n p += loglikelihood[word]\n\n ### END CODE HERE ###\n\n return p\n", "_____no_output_____" ], [ "# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\n# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything\n\n# Experiment with your own tweet.\nmy_tweet = 'She smiled.'\np = naive_bayes_predict(my_tweet, logprior, loglikelihood)\nprint('The expected output is', p)", "The expected output is 1.5740278623499175\n" ] ], [ [ "**Expected Output**:\n- The expected output is around 1.57\n- The sentiment is positive.", "_____no_output_____" ], [ "#### Implement test_naive_bayes\n**Instructions**:\n* Implement `test_naive_bayes` to check the accuracy of your predictions.\n* The function takes in your `test_x`, `test_y`, log_prior, and loglikelihood\n* It returns the accuracy of your model.\n* First, use `naive_bayes_predict` function to make predictions for each tweet in text_x.", "_____no_output_____" ] ], [ [ "# UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\ndef test_naive_bayes(test_x, test_y, logprior, loglikelihood):\n \"\"\"\n Input:\n test_x: A list of tweets\n test_y: the corresponding labels for the list of tweets\n logprior: the logprior\n loglikelihood: a dictionary with the loglikelihoods for each word\n Output:\n accuracy: (# of tweets classified correctly)/(total # of tweets)\n \"\"\"\n accuracy = 0 # return this properly\n\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n y_hats = []\n for tweet in test_x:\n # if the prediction is > 0\n if naive_bayes_predict(tweet, logprior, loglikelihood) > 0:\n # the predicted class is 1\n y_hat_i = 1\n else:\n # otherwise the predicted class is 0\n y_hat_i = 0\n\n # append the predicted class to the list y_hats\n y_hats.append(y_hat_i)\n\n # error is the average of the absolute values of the differences between y_hats and test_y\n error = np.mean(np.abs(y_hats - test_y))\n\n # Accuracy is 1 minus the error\n accuracy = 1 - error\n\n ### END CODE HERE ###\n\n return accuracy\n", "_____no_output_____" ], [ "print(\"Naive Bayes accuracy = %0.4f\" %\n (test_naive_bayes(test_x, test_y, logprior, loglikelihood)))", "Naive Bayes accuracy = 0.9940\n" ] ], [ [ "**Expected Accuracy**:\n\n0.9940", "_____no_output_____" ] ], [ [ "# UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\n# You do not have to input any code in this cell, but it is relevant to grading, so please do not change anything\n\n# Run this cell to test your function\nfor tweet in ['I am happy', 'I am bad', 'this movie should have been great.', 'great', 'great great', 'great great great', 'great great great great']:\n # print( '%s -> %f' % (tweet, naive_bayes_predict(tweet, logprior, loglikelihood)))\n p = naive_bayes_predict(tweet, logprior, loglikelihood)\n# print(f'{tweet} -> {p:.2f} ({p_category})')\n print(f'{tweet} -> {p:.2f}')", "I am happy -> 2.15\nI am bad -> -1.29\nthis movie should have been great. -> 2.14\ngreat -> 2.14\ngreat great -> 4.28\ngreat great great -> 6.41\ngreat great great great -> 8.55\n" ] ], [ [ "**Expected Output**:\n- I am happy -> 2.15\n- I am bad -> -1.29\n- this movie should have been great. -> 2.14\n- great -> 2.14\n- great great -> 4.28\n- great great great -> 6.41\n- great great great great -> 8.55", "_____no_output_____" ] ], [ [ "# Feel free to check the sentiment of your own tweet below\nmy_tweet = 'you are bad :('\nnaive_bayes_predict(my_tweet, logprior, loglikelihood)", "_____no_output_____" ] ], [ [ "# Part 4: Filter words by Ratio of positive to negative counts\n\n- Some words have more positive counts than others, and can be considered \"more positive\". Likewise, some words can be considered more negative than others.\n- One way for us to define the level of positiveness or negativeness, without calculating the log likelihood, is to compare the positive to negative frequency of the word.\n - Note that we can also use the log likelihood calculations to compare relative positivity or negativity of words.\n- We can calculate the ratio of positive to negative frequencies of a word.\n- Once we're able to calculate these ratios, we can also filter a subset of words that have a minimum ratio of positivity / negativity or higher.\n- Similarly, we can also filter a subset of words that have a maximum ratio of positivity / negativity or lower (words that are at least as negative, or even more negative than a given threshold).\n\n#### Implement `get_ratio()`\n- Given the `freqs` dictionary of words and a particular word, use `lookup(freqs,word,1)` to get the positive count of the word.\n- Similarly, use the `lookup()` function to get the negative count of that word.\n- Calculate the ratio of positive divided by negative counts\n\n$$ ratio = \\frac{\\text{pos_words} + 1}{\\text{neg_words} + 1} $$\n\nWhere pos_words and neg_words correspond to the frequency of the words in their respective classes. \n<table>\n <tr>\n <td>\n <b>Words</b>\n </td>\n <td>\n Positive word count\n </td>\n <td>\n Negative Word Count\n </td>\n </tr>\n <tr>\n <td>\n glad\n </td>\n <td>\n 41\n </td>\n <td>\n 2\n </td>\n </tr>\n <tr>\n <td>\n arriv\n </td>\n <td>\n 57\n </td>\n <td>\n 4\n </td>\n </tr>\n <tr>\n <td>\n :(\n </td>\n <td>\n 1\n </td>\n <td>\n 3663\n </td>\n </tr>\n <tr>\n <td>\n :-(\n </td>\n <td>\n 0\n </td>\n <td>\n 378\n </td>\n </tr>\n</table>", "_____no_output_____" ] ], [ [ "# UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\ndef get_ratio(freqs, word):\n '''\n Input:\n freqs: dictionary containing the words\n word: string to lookup\n\n Output: a dictionary with keys 'positive', 'negative', and 'ratio'.\n Example: {'positive': 10, 'negative': 20, 'ratio': 0.5}\n '''\n pos_neg_ratio = {'positive': 0, 'negative': 0, 'ratio': 0.0}\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n # use lookup() to find positive counts for the word (denoted by the integer 1)\n pos_neg_ratio['positive'] = lookup(freqs,word,1)\n\n # use lookup() to find negative counts for the word (denoted by integer 0)\n pos_neg_ratio['negative'] = lookup(freqs,word,0)\n\n # calculate the ratio of positive to negative counts for the word\n pos_neg_ratio['ratio'] = (pos_neg_ratio['positive'] + 1) / (pos_neg_ratio['negative'] + 1)\n ### END CODE HERE ###\n return pos_neg_ratio\n", "_____no_output_____" ], [ "get_ratio(freqs, 'happi')", "_____no_output_____" ] ], [ [ "#### Implement `get_words_by_threshold(freqs,label,threshold)`\n\n* If we set the label to 1, then we'll look for all words whose threshold of positive/negative is at least as high as that threshold, or higher.\n* If we set the label to 0, then we'll look for all words whose threshold of positive/negative is at most as low as the given threshold, or lower.\n* Use the `get_ratio()` function to get a dictionary containing the positive count, negative count, and the ratio of positive to negative counts.\n* Append a dictionary to a list, where the key is the word, and the dictionary is the dictionary `pos_neg_ratio` that is returned by the `get_ratio()` function.\nAn example key-value pair would have this structure:\n```\n{'happi':\n {'positive': 10, 'negative': 20, 'ratio': 0.5}\n}\n```", "_____no_output_____" ] ], [ [ "# UNQ_C9 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)\ndef get_words_by_threshold(freqs, label, threshold):\n '''\n Input:\n freqs: dictionary of words\n label: 1 for positive, 0 for negative\n threshold: ratio that will be used as the cutoff for including a word in the returned dictionary\n Output:\n word_set: dictionary containing the word and information on its positive count, negative count, and ratio of positive to negative counts.\n example of a key value pair:\n {'happi':\n {'positive': 10, 'negative': 20, 'ratio': 0.5}\n }\n '''\n word_list = {}\n\n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n for key in freqs.keys():\n word, _ = key\n\n # get the positive/negative ratio for a word\n pos_neg_ratio = get_ratio(freqs, word)\n\n # if the label is 1 and the ratio is greater than or equal to the threshold...\n if label == 1 and pos_neg_ratio['ratio'] >= threshold:\n\n # Add the pos_neg_ratio to the dictionary\n word_list[word] = pos_neg_ratio\n\n # If the label is 0 and the pos_neg_ratio is less than or equal to the threshold...\n elif label == 0 and pos_neg_ratio['ratio'] <= threshold:\n\n # Add the pos_neg_ratio to the dictionary\n word_list[word] = pos_neg_ratio\n\n # otherwise, do not include this word in the list (do nothing)\n\n ### END CODE HERE ###\n return word_list\n", "_____no_output_____" ], [ "# Test your function: find negative words at or below a threshold\nget_words_by_threshold(freqs, label=0, threshold=0.05)", "_____no_output_____" ], [ "# Test your function; find positive words at or above a threshold\nget_words_by_threshold(freqs, label=1, threshold=10)", "_____no_output_____" ] ], [ [ "Notice the difference between the positive and negative ratios. Emojis like :( and words like 'me' tend to have a negative connotation. Other words like 'glad', 'community', and 'arrives' tend to be found in the positive tweets.", "_____no_output_____" ], [ "# Part 5: Error Analysis\n\nIn this part you will see some tweets that your model missclassified. Why do you think the misclassifications happened? Were there any assumptions made by the naive bayes model?", "_____no_output_____" ] ], [ [ "# Some error analysis done for you\nprint('Truth Predicted Tweet')\nfor x, y in zip(test_x, test_y):\n y_hat = naive_bayes_predict(x, logprior, loglikelihood)\n if y != (np.sign(y_hat) > 0):\n print('%d\\t%0.2f\\t%s' % (y, np.sign(y_hat) > 0, ' '.join(\n process_tweet(x)).encode('ascii', 'ignore')))", "Truth Predicted Tweet\n1\t0.00\tb''\n1\t0.00\tb'truli later move know queen bee upward bound movingonup'\n1\t0.00\tb'new report talk burn calori cold work harder warm feel better weather :p'\n1\t0.00\tb'harri niall 94 harri born ik stupid wanna chang :D'\n1\t0.00\tb''\n1\t0.00\tb''\n1\t0.00\tb'park get sunlight'\n1\t0.00\tb'uff itna miss karhi thi ap :p'\n0\t1.00\tb'hello info possibl interest jonatha close join beti :( great'\n0\t1.00\tb'u prob fun david'\n0\t1.00\tb'pat jay'\n0\t1.00\tb'whatev stil l young >:-('\n" ] ], [ [ "# Part 6: Predict with your own tweet\n\nIn this part you can predict the sentiment of your own tweet.", "_____no_output_____" ] ], [ [ "# Test with your own tweet - feel free to modify `my_tweet`\nmy_tweet = 'I am happy because I am learning :)'\n\np = naive_bayes_predict(my_tweet, logprior, loglikelihood)\nprint(p)", "9.574768961173339\n" ] ], [ [ "Congratulations on completing this assignment. See you next week!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec58f6126f9a36cb22ef9d917e0a7c3741c8ec9c
18,630
ipynb
Jupyter Notebook
doc/introduction.ipynb
rdarie/seaborn
1e804404818e60e815fc0a0ba5c7ee8d3565cd88
[ "MIT", "BSD-3-Clause" ]
1
2021-08-18T12:41:29.000Z
2021-08-18T12:41:29.000Z
doc/introduction.ipynb
rdarie/seaborn
1e804404818e60e815fc0a0ba5c7ee8d3565cd88
[ "MIT", "BSD-3-Clause" ]
null
null
null
doc/introduction.ipynb
rdarie/seaborn
1e804404818e60e815fc0a0ba5c7ee8d3565cd88
[ "MIT", "BSD-3-Clause" ]
1
2021-07-06T07:48:33.000Z
2021-07-06T07:48:33.000Z
38.333333
646
0.628717
[ [ [ ".. _introduction:\n\n.. currentmodule:: seaborn\n\nAn introduction to seaborn\n==========================\n\n.. raw:: html\n\n <div class=col-md-9>\n\nSeaborn is a library for making statistical graphics in Python. It builds on top of `matplotlib <https://matplotlib.org/>`_ and integrates closely with `pandas <https://pandas.pydata.org/>`_ data structures.\n\nSeaborn helps you explore and understand your data. Its plotting functions operate on dataframes and arrays containing whole datasets and internally perform the necessary semantic mapping and statistical aggregation to produce informative plots. Its dataset-oriented, declarative API lets you focus on what the different elements of your plots mean, rather than on the details of how to draw them.\n\nOur first seaborn plot\n----------------------\n\nHere's an example of what seaborn can do:", "_____no_output_____" ] ], [ [ "# Import seaborn\nimport seaborn as sns\n\n# Apply the default theme\nsns.set_theme()\n\n# Load an example dataset\ntips = sns.load_dataset(\"tips\")\n\n# Create a visualization\nsns.relplot(\n data=tips,\n x=\"total_bill\", y=\"tip\", col=\"time\",\n hue=\"smoker\", style=\"smoker\", size=\"size\",\n)", "_____no_output_____" ] ], [ [ "A few things have happened here. Let's go through them one by one:", "_____no_output_____" ] ], [ [ "# Import seaborn\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "Seaborn is the only library we need to import for this simple example. By convention, it is imported with the shorthand ``sns``.\n\nBehind the scenes, seaborn uses matplotlib to draw its plots. For interactive work, it's recommended to use a Jupyter/IPython interface in `matplotlib mode <https://ipython.readthedocs.io/en/stable/interactive/plotting.html>`_, or else you'll have to call :func:`matplotlib.pyplot.show` when you want to see the plot.", "_____no_output_____" ] ], [ [ "# Apply the default theme\nsns.set_theme()", "_____no_output_____" ] ], [ [ "This uses the :ref:`matplotlib rcParam system <matplotlib:matplotlib-rcparams>` and will affect how all matplotlib plots look, even if you don't make them with seaborn. Beyond the default theme, there are :doc:`several other options <tutorial/aesthetics>`, and you can independently control the style and scaling of the plot to quickly translate your work between presentation contexts (e.g., making a version of your figure that will have readable fonts when projected during a talk). If you like the matplotlib defaults or prefer a different theme, you can skip this step and still use the seaborn plotting functions.", "_____no_output_____" ] ], [ [ "# Load an example dataset\ntips = sns.load_dataset(\"tips\")", "_____no_output_____" ] ], [ [ "Most code in the docs will use the :func:`load_dataset` function to get quick access to an example dataset. There's nothing special about these datasets: they are just pandas dataframes, and we could have loaded them with :func:`pandas.read_csv` or built them by hand. Most of the examples in the documentation will specify data using pandas dataframes, but seaborn is very flexible about the :doc:`data structures <tutorial/data_structure>` that it accepts.", "_____no_output_____" ] ], [ [ "# Create a visualization\nsns.relplot(\n data=tips,\n x=\"total_bill\", y=\"tip\", col=\"time\",\n hue=\"smoker\", style=\"smoker\", size=\"size\",\n)", "_____no_output_____" ] ], [ [ "This plot shows the relationship between five variables in the tips dataset using a single call to the seaborn function :func:`relplot`. Notice how we provided only the names of the variables and their roles in the plot. Unlike when using matplotlib directly, it wasn't necessary to specify attributes of the plot elements in terms of the color values or marker codes. Behind the scenes, seaborn handled the translation from values in the dataframe to arguments that matplotlib understands. This declarative approach lets you stay focused on the questions that you want to answer, rather than on the details of how to control matplotlib.\n\n.. _intro_api_abstraction:\n\nAPI abstraction across visualizations\n-------------------------------------\n\nThere is no universally best way to visualize data. Different questions are best answered by different plots. Seaborn makes it easy to switch between different visual representations by using a consistent dataset-oriented API.\n\nThe function :func:`relplot` is named that way because it is designed to visualize many different statistical *relationships*. While scatter plots are often effective, relationships where one variable represents a measure of time are better represented by a line. The :func:`relplot` function has a convenient ``kind`` parameter that lets you easily switch to this alternate representation:", "_____no_output_____" ] ], [ [ "dots = sns.load_dataset(\"dots\")\nsns.relplot(\n data=dots, kind=\"line\",\n x=\"time\", y=\"firing_rate\", col=\"align\",\n hue=\"choice\", size=\"coherence\", style=\"choice\",\n facet_kws=dict(sharex=False),\n)", "_____no_output_____" ] ], [ [ "Notice how the ``size`` and ``style`` parameters are used in both the scatter and line plots, but they affect the two visualizations differently: changing the marker area and symbol in the scatter plot vs the line width and dashing in the line plot. We did not need to keep those details in mind, letting us focus on the overall structure of the plot and the information we want it to convey.\n\n.. _intro_stat_estimation:\n\nStatistical estimation and error bars\n-------------------------------------\n\nOften, we are interested in the *average* value of one variable as a function of other variables. Many seaborn functions will automatically perform the statistical estimation that is necessary to answer these questions:", "_____no_output_____" ] ], [ [ "fmri = sns.load_dataset(\"fmri\")\nsns.relplot(\n data=fmri, kind=\"line\",\n x=\"timepoint\", y=\"signal\", col=\"region\",\n hue=\"event\", style=\"event\",\n)", "_____no_output_____" ] ], [ [ "When statistical values are estimated, seaborn will use bootstrapping to compute confidence intervals and draw error bars representing the uncertainty of the estimate.\n\nStatistical estimation in seaborn goes beyond descriptive statistics. For example, it is possible to enhance a scatterplot by including a linear regression model (and its uncertainty) using :func:`lmplot`:", "_____no_output_____" ] ], [ [ "sns.lmplot(data=tips, x=\"total_bill\", y=\"tip\", col=\"time\", hue=\"smoker\")", "_____no_output_____" ] ], [ [ ".. _intro_distributions:\n\n\nInformative distributional summaries\n------------------------------------\n\nStatistical analyses require knowledge about the distribution of variables in your dataset. The seaborn function :func:`displot` supports several approaches to visualizing distributions. These include classic techniques like histograms and computationally-intensive approaches like kernel density estimation:", "_____no_output_____" ] ], [ [ "sns.displot(data=tips, x=\"total_bill\", col=\"time\", kde=True)", "_____no_output_____" ] ], [ [ "Seaborn also tries to promote techniques that are powerful but less familiar, such as calculating and plotting the empirical cumulative distribution function of the data:", "_____no_output_____" ] ], [ [ "sns.displot(data=tips, kind=\"ecdf\", x=\"total_bill\", col=\"time\", hue=\"smoker\", rug=True)", "_____no_output_____" ] ], [ [ ".. _intro_categorical:\n\nSpecialized plots for categorical data\n--------------------------------------\n\nSeveral specialized plot types in seaborn are oriented towards visualizing categorical data. They can be accessed through :func:`catplot`. These plots offer different levels of granularity. At the finest level, you may wish to see every observation by drawing a \"swarm\" plot: a scatter plot that adjusts the positions of the points along the categorical axis so that they don't overlap:", "_____no_output_____" ] ], [ [ "sns.catplot(data=tips, kind=\"swarm\", x=\"day\", y=\"total_bill\", hue=\"smoker\")", "_____no_output_____" ] ], [ [ "Alternately, you could use kernel density estimation to represent the underlying distribution that the points are sampled from:", "_____no_output_____" ] ], [ [ "sns.catplot(data=tips, kind=\"violin\", x=\"day\", y=\"total_bill\", hue=\"smoker\", split=True)", "_____no_output_____" ] ], [ [ "Or you could show only the mean value and its confidence interval within each nested category:", "_____no_output_____" ] ], [ [ "sns.catplot(data=tips, kind=\"bar\", x=\"day\", y=\"total_bill\", hue=\"smoker\")", "_____no_output_____" ] ], [ [ ".. _intro_dataset_funcs:\n\nComposite views onto multivariate datasets\n------------------------------------------\n\nSome seaborn functions combine multiple kinds of plots to quickly give informative summaries of a dataset. One, :func:`jointplot`, focuses on a single relationship. It plots the joint distribution between two variables along with each variable's marginal distribution:", "_____no_output_____" ] ], [ [ "penguins = sns.load_dataset(\"penguins\")\nsns.jointplot(data=penguins, x=\"flipper_length_mm\", y=\"bill_length_mm\", hue=\"species\")", "_____no_output_____" ] ], [ [ "The other, :func:`pairplot`, takes a broader view: it shows joint and marginal distributions for all pairwise relationships and for each variable, respectively:", "_____no_output_____" ] ], [ [ "sns.pairplot(data=penguins, hue=\"species\")", "_____no_output_____" ] ], [ [ ".. _intro_figure_classes:\n\nClasses and functions for making complex graphics\n-------------------------------------------------\n\nThese tools work by combining :doc:`axes-level <tutorial/function_overview>` plotting functions with objects that manage the layout of the figure, linking the structure of a dataset to a :doc:`grid of axes <tutorial/axis_grids>`. Both elements are part of the public API, and you can use them directly to create complex figures with only a few more lines of code:", "_____no_output_____" ] ], [ [ "g = sns.PairGrid(penguins, hue=\"species\", corner=True)\ng.map_lower(sns.kdeplot, hue=None, levels=5, color=\".2\")\ng.map_lower(sns.scatterplot, marker=\"+\")\ng.map_diag(sns.histplot, element=\"step\", linewidth=0, kde=True)\ng.add_legend(frameon=True)\ng.legend.set_bbox_to_anchor((.61, .6))", "_____no_output_____" ] ], [ [ ".. _intro_defaults:\n\nOpinionated defaults and flexible customization\n-----------------------------------------------\n\nSeaborn creates complete graphics with a single function call: when possible, its functions will automatically add informative axis labels and legends that explain the semantic mappings in the plot.\n\nIn many cases, seaborn will also choose default values for its parameters based on characteristics of the data. For example, the :doc:`color mappings <tutorial/color_palettes>` that we have seen so far used distinct hues (blue, orange, and sometimes green) to represent different levels of the categorical variables assigned to ``hue``. When mapping a numeric variable, some functions will switch to a continuous gradient:", "_____no_output_____" ] ], [ [ "sns.relplot(\n data=penguins,\n x=\"bill_length_mm\", y=\"bill_depth_mm\", hue=\"body_mass_g\"\n)", "_____no_output_____" ] ], [ [ "When you're ready to share or publish your work, you'll probably want to polish the figure beyond what the defaults achieve. Seaborn allows for several levels of customization. It defines multiple built-in :doc:`themes <tutorial/aesthetics>` that apply to all figures, its functions have standardized parameters that can modify the semantic mappings for each plot, and additional keyword arguments are passed down to the underlying matplotlib artsts, allowing even more control. Once you've created a plot, its properties can be modified through both the seaborn API and by dropping down to the matplotlib layer for fine-grained tweaking:", "_____no_output_____" ] ], [ [ "sns.set_theme(style=\"ticks\", font_scale=1.25)\ng = sns.relplot(\n data=penguins,\n x=\"bill_length_mm\", y=\"bill_depth_mm\", hue=\"body_mass_g\",\n palette=\"crest\", marker=\"x\", s=100,\n)\ng.set_axis_labels(\"Bill length (mm)\", \"Bill depth (mm)\", labelpad=10)\ng.legend.set_title(\"Body mass (g)\")\ng.fig.set_size_inches(6.5, 4.5)\ng.ax.margins(.15)\ng.despine(trim=True)", "_____no_output_____" ] ], [ [ ".. _intro_matplotlib:\n\nRelationship to matplotlib\n--------------------------\n\nSeaborn's integration with matplotlib allows you to use it across the many environments that matplotlib supports, inlcuding exploratory analysis in notebooks, real-time interaction in GUI applications, and archival output in a number of raster and vector formats.\n\nWhile you can be productive using only seaborn functions, full customization of your graphics will require some knowledge of matplotlib's concepts and API. One aspect of the learning curve for new users of seaborn will be knowing when dropping down to the matplotlib layer is necessary to achieve a particular customization. On the other hand, users coming from matplotlib will find that much of their knowledge transfers.\n\nMatplotlib has a comprehensive and powerful API; just about any attribute of the figure can be changed to your liking. A combination of seaborn's high-level interface and matplotlib's deep customizability will allow you both to quickly explore your data and to create graphics that can be tailored into a `publication quality <https://github.com/wagnerlabpapers/Waskom_PNAS_2017>`_ final product.", "_____no_output_____" ], [ ".. _intro_next_steps:\n\nNext steps\n----------\n\nYou have a few options for where to go next. You might first want to learn how to :doc:`install seaborn <installing>`. Once that's done, you can browse the :doc:`example gallery <examples/index>` to get a broader sense for what kind of graphics seaborn can produce. Or you can read through the :doc:`user guide and tutorial <tutorial>` for a deeper discussion of the different tools and what they are designed to accomplish. If you have a specific plot in mind and want to know how to make it, you could check out the :doc:`API reference <api>`, which documents each function's parameters and shows many examples to illustrate usage.", "_____no_output_____" ], [ ".. raw:: html\n \n </div>", "_____no_output_____" ] ] ]
[ "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw" ]
[ [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code" ], [ "raw", "raw", "raw" ] ]
ec590ae9c488c5b14c4f6be0ddb4a43695c2b275
8,742
ipynb
Jupyter Notebook
Homework notebooks/(HW notebooks) netology Mathematics and Python/5. dz5 (A.Sib).ipynb
Alex110117/data_analysis
3cac3aac63d617b9fbd862788c778c2858445622
[ "MIT" ]
2
2020-07-22T07:33:19.000Z
2020-09-01T12:53:28.000Z
Homework notebooks/(HW notebooks) netology Mathematics and Python/5. dz5 (A.Sib).ipynb
sibalex/data_analysis
3cac3aac63d617b9fbd862788c778c2858445622
[ "MIT" ]
null
null
null
Homework notebooks/(HW notebooks) netology Mathematics and Python/5. dz5 (A.Sib).ipynb
sibalex/data_analysis
3cac3aac63d617b9fbd862788c778c2858445622
[ "MIT" ]
null
null
null
21.964824
217
0.45962
[ [ [ "### Домашнее задание к занятию «Python для анализа данных: numpy и scipy»", "_____no_output_____" ], [ "### **Задание 1**\nСоздайте numpy array с элементами от числа N до 0 (например, для N = 10 это будет \n\narray([9, 8, 7, 6, 5, 4, 3, 2, 1, 0]))", "_____no_output_____" ], [ "### Решение", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n\nrev_array = lambda n: np.arange(n)[::-1]\nrev_array(10)", "_____no_output_____" ], [ "def func(N):\n res = np.flip(np.arange(N))\n return res\n\nfunc(10)", "_____no_output_____" ], [ "print(np.flip(np.arange(10)))", "[9 8 7 6 5 4 3 2 1 0]\n" ] ], [ [ "### **Задание 2**\nСоздайте диагональную матрицу с элементами от N до 0. Посчитайте сумму ее значений на диагонали.", "_____no_output_____" ], [ "### Решение", "_____no_output_____" ] ], [ [ "np.diag(rev_array(10))", "_____no_output_____" ], [ "np.diag(rev_array(10)).sum()", "_____no_output_____" ] ], [ [ "### **Задание 3**\nСкачайте с сайта https://grouplens.org/datasets/movielens/ датасет любого размера. Определите какому фильму было выставлено больше всего оценок 5.0.", "_____no_output_____" ], [ "### Решение", "_____no_output_____" ] ], [ [ "ratings = pd.read_csv('/Users/aleksandr/Downloads/ml-20m/ratings.csv')\nmovies = pd.read_csv('/Users/aleksandr/Downloads/ml-20m/movies.csv')", "_____no_output_____" ], [ "evaluation = ratings[ratings['rating'] == 5.0]\nres = evaluation.groupby('movieId').count().sort_values('rating', ascending=False).index.tolist()[0]\n\nmovies[movies['movieId'] == res]", "_____no_output_____" ] ], [ [ "### **Задание 4**\nПо данным файла power.csv посчитайте суммарное потребление стран Прибалтики (Латвия, Литва и Эстония) категорий 4, 12 и 21 за период с 2005 по 2010 года. Не учитывайте в расчетах отрицательные значения quantity.", "_____no_output_____" ], [ "### Решение", "_____no_output_____" ] ], [ [ "df = pd.read_csv('/Users/aleksandr/Downloads/Python_5_pandas/power.csv')", "_____no_output_____" ], [ "baltic = ['Latvia', 'Lithuania', 'Estonia']\ncategory = [4, 12, 21]\nperiod = range(2005, 2011)", "_____no_output_____" ], [ "# https://www.geeksforgeeks.org/python-pandas-dataframe-isin/\n# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.isin.html\n# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.isin.html\n\nres = df[\n df['country'].isin(baltic) & \n df['category'].isin(category) & \n df['year'].isin(period) & \n df['quantity'] > 0]", "_____no_output_____" ], [ "res['quantity'].sum()", "_____no_output_____" ] ], [ [ "### **Задание 5**\nРешите систему уравнений:\n\n4x + 2y + z = 4\n\nx + 3y = 12\n\n5y + 4z = -3", "_____no_output_____" ], [ "### Решение", "_____no_output_____" ] ], [ [ "equation = [[4, 2, 1], [1, 3, 0], [0, 5, 4]]\nvalue = [4, 12, -3]", "_____no_output_____" ], [ "# https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.solve.html\n# https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.linalg.html#matrix-and-vector-products\n\nnp.linalg.solve(equation, value)", "_____no_output_____" ], [ "pass", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]