hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ecdaade98929e94992fd146968ccf4c31770ed1c | 2,420 | ipynb | Jupyter Notebook | notebooks/requests.ipynb | caiquefilipini/mlops | 0f972763efaf2c04e3b28ac2d7788e925802e6a9 | [
"MIT"
] | null | null | null | notebooks/requests.ipynb | caiquefilipini/mlops | 0f972763efaf2c04e3b28ac2d7788e925802e6a9 | [
"MIT"
] | null | null | null | notebooks/requests.ipynb | caiquefilipini/mlops | 0f972763efaf2c04e3b28ac2d7788e925802e6a9 | [
"MIT"
] | null | null | null | 17.042254 | 59 | 0.467355 | [
[
[
"import requests",
"_____no_output_____"
],
[
"url = 'http://127.0.0.1:5000/cotacao/'",
"_____no_output_____"
],
[
"dados = {\n \"tamanho\": 120,\n \"ano\": 2001,\n \"garagem\": 2\n}",
"_____no_output_____"
],
[
"auth = requests.auth.HTTPBasicAuth('caique', 'alura')",
"_____no_output_____"
],
[
"response = requests.post(url, json=dados, auth=auth)",
"_____no_output_____"
],
[
"response.status_code",
"_____no_output_____"
],
[
"response.text",
"_____no_output_____"
],
[
"response.json()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdabf8d219048664721b46f642d0e5469db64fe | 205,439 | ipynb | Jupyter Notebook | Nasa2021/CNN_523embed_noisy3k.ipynb | ShepherdCode/ShepherdML | fd8d71c63f7bd788ea0052294d93e43246254a12 | [
"MIT"
] | null | null | null | Nasa2021/CNN_523embed_noisy3k.ipynb | ShepherdCode/ShepherdML | fd8d71c63f7bd788ea0052294d93e43246254a12 | [
"MIT"
] | 4 | 2020-03-24T18:05:09.000Z | 2020-12-22T17:42:54.000Z | Nasa2021/CNN_523embed_noisy3k.ipynb | ShepherdCode/ShepherdML | fd8d71c63f7bd788ea0052294d93e43246254a12 | [
"MIT"
] | null | null | null | 208.144883 | 33,186 | 0.845117 | [
[
[
"<a href=\"https://colab.research.google.com/github/ShepherdCode/ShepherdML/blob/master/Nasa2021/CNN_523embed_noisy3k.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"<a href=\"https://colab.research.google.com/github/ShepherdCode/ShepherdML/blob/master/Nasa2021/CNN_523.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# CNN 523 3k noisy\n\n1. Four layers of CNNS and MaxPooling\n2. Drop out at 0.2\n3. Variable filters and dense neurons\n4. 10% Noise, datasets of 3k coding and noncoding from 3k to 192k\n\n\n\n\n\n\n\n\n\n\n\n\n",
"_____no_output_____"
]
],
[
[
"#NC_FILENAME='ncRNA.tiny50.fasta'\n#PC_FILENAME='pcRNA.tiny50.fasta'\n#NC_FILENAME='ncRNA.gc34.processed.fasta'\n#PC_FILENAME='pcRNA.gc34.processed.fasta'\nNC_FILENAME='noisy_noncod_3000_10.fasta' # CHANGE THIS TO 1000, 2000, 4000, etc.\nPC_FILENAME='noisy_coding_3000_10.fasta'\nNC_VAL_FILE='noncod_validation.fasta' # 'noncod_validation.fasta' # CHANGE THIS TO THE UNIFORM VALIDATION FILE\nPC_VAL_FILE='coding_validation.fasta' # 'coding_validation.fasta'\n\nMODEL_FILE='JUNK1' # CHANGE THIS IF YOU WANT TO SAVE THE MODEL!\nDATAPATH=''\n\ntry:\n from google.colab import drive\n IN_COLAB = True\n PATH='/content/drive/'\n drive.mount(PATH)\n DATAPATH=PATH+'My Drive/data/' # must end in \"/\"\nexcept:\n IN_COLAB = False\n DATAPATH='data/' # must end in \"/\"\nNC_FILENAME = DATAPATH+NC_FILENAME\nPC_FILENAME = DATAPATH+PC_FILENAME\nNC_VAL_FILE = DATAPATH+NC_VAL_FILE\nPC_VAL_FILE = DATAPATH+PC_VAL_FILE\nMODEL_FILE=DATAPATH+MODEL_FILE\n\nEPOCHS=20 # DECIDE ON SOME AMOUNT AND STICK WITH IT\nSPLITS=5\nK=1\nVOCABULARY_SIZE=4**K+1 # e.g. K=3 => 64 DNA K-mers + 'NNN'\nEMBED_DIMEN=2\nFILTERS=32\nKERNEL=3\nNEURONS=24\nDROP=0.2\nMINLEN=200\nMAXLEN=1000 # THIS HAS TO MATCH THE SIMULATION DATA\nDENSE_LEN = 1000\nACT=\"tanh\"",
"Mounted at /content/drive/\n"
],
[
"# Load our own tools\n# TO DO: don't go to GitHub if the file is already local.\nGITHUB = True\nif GITHUB:\n #!pip install requests # Uncomment this if necessary. Seems to be pre-installed.\n import requests\n r = requests.get('https://raw.githubusercontent.com/ShepherdCode/ShepherdML/master/Strings/tools_fasta.py')\n with open('tools_fasta.py', 'w') as f:\n f.write(r.text)\n # TO DO: delete the file after import\nimport tools_fasta as tools\ntools.yahoo() # If this prints \"Yahoo!\" the the import was successful.\n\nTOOLS_CHANGED = False # set to True to re-run with a new version of tools\nif TOOLS_CHANGED:\n from importlib import reload \n tools=reload(tools)\n print(dir(tools)) # run this to see EVERYTHING in the tools module",
"Yahoo!\n"
],
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.model_selection import StratifiedKFold\nimport tensorflow as tf\nfrom tensorflow import keras\nimport time\ndt='float32'\ntf.keras.backend.set_floatx(dt)",
"_____no_output_____"
]
],
[
[
"Build model",
"_____no_output_____"
]
],
[
[
"def compile_model(model):\n print(\"COMPILE...\")\n bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)\n model.compile(loss=bc, optimizer=\"adam\", metrics=[\"accuracy\"])\n print(\"...COMPILED\")\n return model\n\ndef build_model():\n #SHAPE=(MAXLEN,5) \n SHAPE=(MAXLEN,4) \n # 4 input letters, 4 output dimensions, 1000 letters/RNA\n elayer = keras.layers.Embedding(4,4,input_length=1000) \n\n clayer1 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding=\"same\",\n input_shape=SHAPE)\n clayer2 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding=\"same\")\n clayer3 = keras.layers.MaxPooling1D(2)\n clayer4 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding=\"same\")\n clayer5 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding=\"same\")\n clayer6 = keras.layers.MaxPooling1D(2)\n clayer7 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding=\"same\")\n clayer8 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding=\"same\")\n clayer9 = keras.layers.MaxPooling1D(2)\n clayer10 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding=\"same\")\n clayer11 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding=\"same\")\n clayer12 = keras.layers.MaxPooling1D(2)\n\n clayer13 = keras.layers.Flatten()\n\n dlayer1 = keras.layers.Dense(NEURONS, activation=ACT,dtype=dt, input_shape=[DENSE_LEN])\n dlayer2 = keras.layers.Dropout(DROP)\n dlayer3 = keras.layers.Dense(NEURONS, activation=ACT,dtype=dt)\n dlayer4 = keras.layers.Dropout(DROP)\n output_layer = keras.layers.Dense(1, activation=\"sigmoid\", dtype=dt)\n\n cnn = keras.models.Sequential()\n cnn.add(elayer)\n cnn.add(clayer1)\n cnn.add(clayer2)\n cnn.add(clayer3)\n cnn.add(clayer4)\n cnn.add(clayer5)\n cnn.add(clayer6)\n cnn.add(clayer7)\n cnn.add(clayer8)\n cnn.add(clayer9)\n cnn.add(clayer10)\n cnn.add(clayer11)\n cnn.add(clayer12)\n cnn.add(clayer13)\n cnn.add(dlayer1)\n cnn.add(dlayer2)\n cnn.add(dlayer3)\n cnn.add(dlayer4)\n cnn.add(output_layer)\n mlpc = compile_model(cnn)\n return mlpc",
"_____no_output_____"
]
],
[
[
"Cross validation",
"_____no_output_____"
]
],
[
[
"def do_cross_validation(X,y,given_model,X_VALID,Y_VALID):\n cv_scores = []\n fold=0\n splitter = ShuffleSplit(n_splits=SPLITS, test_size=0.1, random_state=37863)\n for train_index,valid_index in splitter.split(X):\n fold += 1\n X_train=X[train_index] # use iloc[] for dataframe\n y_train=y[train_index]\n X_valid=X[valid_index]\n y_valid=y[valid_index] \n # Avoid continually improving the same model.\n model = compile_model(keras.models.clone_model(given_model))\n bestname=MODEL_FILE+\".cv.\"+str(fold)+\".best\"\n mycallbacks = [keras.callbacks.ModelCheckpoint(\n filepath=bestname, save_best_only=True, \n monitor='val_accuracy', mode='max')] \n print(\"FIT\")\n start_time=time.time()\n history=model.fit(X_train, y_train, \n epochs=EPOCHS, verbose=1, callbacks=mycallbacks,\n validation_data=(X_valid,y_valid))\n # THE VALIDATION ABOVE IS JUST FOR SHOW\n end_time=time.time()\n elapsed_time=(end_time-start_time) \n print(\"Fold %d, %d epochs, %d sec\"%(fold,EPOCHS,elapsed_time))\n pd.DataFrame(history.history).plot(figsize=(8,5))\n plt.grid(True)\n plt.gca().set_ylim(0,1)\n plt.show()\n best_model=keras.models.load_model(bestname)\n # THE VALIDATION BELOW IS FOR KEEPS\n scores = best_model.evaluate(X_VALID, Y_VALID, verbose=0)\n print(\"%s: %.2f%%\" % (best_model.metrics_names[1], scores[1]*100))\n cv_scores.append(scores[1] * 100) \n print()\n print(\"%d-way Cross Validation mean %.2f%% (+/- %.2f%%)\" % (fold, np.mean(cv_scores), np.std(cv_scores)))",
"_____no_output_____"
]
],
[
[
"## Train on RNA lengths 200-1Kb",
"_____no_output_____"
]
],
[
[
"print (\"Compile the model\")\nmodel=build_model()\nprint (\"Summarize the model\")\nprint(model.summary()) # Print this only once\n#model.save(MODEL_FILE+'.model')\n",
"Compile the model\nCOMPILE...\n...COMPILED\nSummarize the model\nModel: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding (Embedding) (None, 1000, 4) 16 \n_________________________________________________________________\nconv1d (Conv1D) (None, 1000, 32) 416 \n_________________________________________________________________\nconv1d_1 (Conv1D) (None, 1000, 32) 3104 \n_________________________________________________________________\nmax_pooling1d (MaxPooling1D) (None, 500, 32) 0 \n_________________________________________________________________\nconv1d_2 (Conv1D) (None, 500, 32) 3104 \n_________________________________________________________________\nconv1d_3 (Conv1D) (None, 500, 32) 3104 \n_________________________________________________________________\nmax_pooling1d_1 (MaxPooling1 (None, 250, 32) 0 \n_________________________________________________________________\nconv1d_4 (Conv1D) (None, 250, 32) 3104 \n_________________________________________________________________\nconv1d_5 (Conv1D) (None, 250, 32) 3104 \n_________________________________________________________________\nmax_pooling1d_2 (MaxPooling1 (None, 125, 32) 0 \n_________________________________________________________________\nconv1d_6 (Conv1D) (None, 125, 32) 3104 \n_________________________________________________________________\nconv1d_7 (Conv1D) (None, 125, 32) 3104 \n_________________________________________________________________\nmax_pooling1d_3 (MaxPooling1 (None, 62, 32) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 1984) 0 \n_________________________________________________________________\ndense (Dense) (None, 24) 47640 \n_________________________________________________________________\ndropout (Dropout) (None, 24) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 24) 600 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 24) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 1) 25 \n=================================================================\nTotal params: 70,425\nTrainable params: 70,425\nNon-trainable params: 0\n_________________________________________________________________\nNone\n"
],
[
"def load_data_from_files(nc_filename,pc_filename):\n FREEMEM=True # use False for debugging, True for production\n print(\"Load data from files.\")\n nc_seq=tools.load_fasta(nc_filename,0)\n pc_seq=tools.load_fasta(pc_filename,1)\n train_set=pd.concat((nc_seq,pc_seq),axis=0)\n print(\"Ready: train_set\")\n subset=tools.make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y\n if FREEMEM:\n nc_seq=None\n pc_seq=None\n train_set=None\n (X1,y_train)=tools.separate_X_and_y(subset)\n # X1 is pandas df of (\"list\" of one sequence)\n X2=X1.to_numpy() # numpy ndarray of (\"list\" of one sequence)\n X3=[elem[0] for elem in X2] # numpy ndarray of ACGT-str\n # X3? It might be faster to use int-array than char-array. Come back to this.\n X4=X3 # no-op\n print(\"X4\",type(X4))\n #print(X4[0])\n if FREEMEM:\n X1=None\n X2=None\n X3=None\n X5=[]\n dna_to_int = {'A':0,'C':1,'G':2,'T':3}\n for x in X4:\n a=[]\n for c in x:\n i = dna_to_int[c]\n a.append(i)\n X5.append(a)\n X5=np.asarray(X5)\n print(\"X5\",type(X5))\n print(X5.shape)\n if FREEMEM:\n X4=None\n X_train=X5\n if FREEMEM:\n X5=None\n print(\"X_train\",type(X_train))\n y_train=y_train.to_numpy()\n print(X_train.shape)\n print(X_train[0].shape)\n print(X_train[0])\n return X_train,y_train\n",
"_____no_output_____"
],
[
"print(\"Loading training data...\")\nX_train,y_train = load_data_from_files(NC_FILENAME,PC_FILENAME)",
"Loading training data...\nLoad data from files.\nReady: train_set\nX4 <class 'list'>\nX5 <class 'numpy.ndarray'>\n(6000, 1000)\nX_train <class 'numpy.ndarray'>\n(6000, 1000)\n(1000,)\n[3 1 1 0 2 3 2 2 3 2 1 1 1 0 2 1 2 2 0 0 0 1 0 3 3 3 3 3 1 0 2 2 3 3 2 3 2\n 2 1 3 2 3 2 3 0 2 2 1 0 0 0 1 2 2 0 2 1 2 0 3 2 0 1 3 3 2 2 0 2 2 3 3 3 3\n 3 0 3 3 1 3 2 3 1 2 3 1 3 0 1 1 3 1 3 0 2 1 2 3 3 0 3 2 0 0 2 2 3 2 3 1 2\n 3 1 2 2 2 1 1 1 1 2 3 3 1 3 1 1 2 1 3 1 3 1 2 0 2 1 3 2 3 3 3 2 1 1 2 3 3\n 1 2 2 1 1 0 2 0 1 3 0 1 2 2 1 2 0 0 3 0 0 2 1 3 2 3 3 0 1 1 0 3 2 3 0 1 3\n 0 1 0 3 1 3 0 2 0 1 0 3 0 0 2 1 0 0 3 1 2 1 2 0 0 1 1 2 0 3 2 2 3 0 2 2 1\n 3 3 1 2 3 2 3 1 2 0 1 3 2 0 3 2 0 3 0 2 3 2 2 1 0 1 2 2 1 0 2 3 1 0 0 0 0\n 2 1 0 0 0 1 2 2 2 1 2 0 1 0 3 3 1 1 3 1 1 2 2 3 0 1 1 1 3 3 3 1 1 3 0 1 3\n 0 1 0 3 1 2 2 2 0 2 0 0 2 0 1 1 1 3 1 2 3 2 2 3 1 0 1 0 0 2 3 2 2 2 2 2 3\n 3 2 1 0 3 1 0 0 3 2 3 2 2 3 3 1 2 0 0 1 3 0 0 3 0 3 0 1 3 2 1 1 3 0 1 0 0\n 2 3 0 3 1 2 1 0 1 0 2 0 1 2 1 1 3 0 2 2 0 1 0 2 3 3 3 1 2 1 2 2 2 3 3 0 2\n 2 3 1 0 1 2 2 1 2 2 1 3 2 1 0 2 1 3 0 0 3 2 3 1 1 0 0 0 3 1 0 2 0 2 3 3 1\n 3 0 3 0 2 3 2 2 3 1 1 2 0 0 0 1 1 2 2 3 2 3 0 1 0 3 1 0 0 0 3 2 3 2 1 1 2\n 2 1 0 3 1 3 1 1 0 1 0 2 2 1 2 3 1 0 2 3 3 1 2 2 2 2 0 2 3 3 1 0 1 3 1 2 2\n 3 0 2 2 1 0 0 3 2 1 0 2 0 1 1 1 2 0 0 3 2 2 1 3 0 2 3 1 1 1 3 2 0 1 0 3 3\n 3 0 1 1 2 3 3 1 0 0 2 1 3 3 3 1 0 1 2 2 2 3 3 3 1 0 3 3 3 2 3 0 1 3 3 3 0\n 1 0 2 1 2 1 3 2 0 0 2 2 2 3 3 3 1 1 2 3 3 2 3 2 1 0 3 1 0 0 2 0 1 3 2 1 1\n 1 0 3 1 0 3 3 0 3 2 2 0 1 3 2 2 0 0 1 0 0 1 3 3 2 2 2 1 3 0 3 1 2 0 2 3 1\n 1 0 2 2 3 3 1 3 1 3 1 1 2 1 3 0 1 1 0 0 3 0 3 1 1 3 2 2 2 3 0 0 2 2 2 1 1\n 0 2 1 2 3 3 2 2 1 1 2 1 0 1 0 0 0 1 1 3 0 1 1 0 3 0 3 1 1 0 3 2 1 0 2 0 0\n 2 3 3 0 2 1 2 2 1 3 3 0 2 3 2 0 2 3 0 1 0 2 2 2 2 1 2 0 2 0 0 2 0 1 0 1 3\n 1 0 1 1 2 1 2 1 1 1 0 2 2 3 0 1 2 0 1 2 1 1 0 3 1 0 3 2 3 0 3 0 3 0 2 1 3\n 0 2 1 0 1 1 2 1 2 0 2 0 0 3 2 2 1 0 2 2 2 3 1 0 0 0 0 3 1 2 0 3 0 0 0 2 2\n 2 3 1 1 2 2 2 3 0 0 3 2 2 2 2 0 3 3 3 1 1 2 2 0 1 3 0 0 1 2 1 2 0 0 0 2 2\n 1 0 3 3 2 3 2 1 0 1 0 2 1 0 3 0 2 1 0 0 1 2 3 1 0 3 2 0 0 1 1 0 2 2 1 0 2\n 1 1 0 0 0 0 1 3 2 0 3 1 3 2 0 1 3 1 1 3 3 3 1 3 1 0 2 1 1 3 0 0 0 1 2 3 3\n 3 0 0 2 0 0 2 2 2 1 0 1 3 2 3 3 2 3 1 1 3 0 3 2 2 0 1 1 1 1 2 1 2 3 3 1 2\n 1]\n"
],
[
"print(\"Loading validation data...\")\nX_VALID,Y_VALID = load_data_from_files(NC_VAL_FILE,PC_VAL_FILE)",
"Loading validation data...\nLoad data from files.\nReady: train_set\nX4 <class 'list'>\nX5 <class 'numpy.ndarray'>\n(100000, 1000)\nX_train <class 'numpy.ndarray'>\n(100000, 1000)\n(1000,)\n[0 2 0 0 3 3 1 2 2 3 1 2 2 0 2 0 2 2 2 1 2 0 3 2 0 3 0 0 3 3 2 3 0 3 3 1 0\n 1 1 0 2 1 1 1 1 2 3 3 0 2 1 1 3 2 3 2 1 2 3 3 0 0 1 2 2 2 3 2 0 1 2 2 2 2\n 2 1 3 3 0 1 2 0 1 2 0 3 2 3 2 3 1 3 3 1 3 0 2 1 1 1 2 3 2 2 2 1 0 3 3 2 3\n 3 3 1 1 3 3 2 1 1 2 1 3 2 2 1 3 1 3 1 0 0 3 2 2 2 1 2 0 0 1 0 2 0 1 3 2 1\n 3 2 2 0 3 1 2 0 3 3 3 2 3 2 1 0 0 3 0 2 0 2 1 2 1 1 0 2 3 0 1 2 0 1 2 2 0\n 2 1 2 3 1 1 1 2 3 3 3 2 1 1 0 2 3 1 1 2 1 0 0 3 2 1 0 2 2 3 2 0 0 3 1 3 0\n 2 3 0 2 3 1 2 3 1 1 2 3 3 3 3 3 0 0 1 0 0 0 2 0 2 2 0 3 3 0 0 0 0 2 2 2 1\n 0 0 1 1 1 0 2 0 3 1 2 0 0 3 3 1 1 2 1 3 2 3 2 1 2 2 0 0 1 0 1 0 3 3 3 0 0\n 0 2 3 0 3 2 3 0 1 1 3 0 1 1 1 3 2 1 3 3 1 0 0 0 1 2 3 0 2 3 1 1 1 3 0 3 1\n 2 0 1 2 1 2 2 3 1 1 1 2 0 2 0 0 3 3 0 2 1 0 0 1 0 1 3 3 2 3 3 1 0 3 2 3 0\n 2 3 3 2 0 1 0 0 3 1 1 2 2 1 3 2 0 1 0 0 3 2 3 1 0 3 0 3 3 1 1 3 0 0 0 1 2\n 1 1 1 3 1 2 2 1 1 1 2 1 0 1 1 2 3 2 1 2 1 3 3 2 3 2 2 2 3 3 0 3 1 0 3 3 0\n 3 2 2 0 1 2 1 1 1 0 3 3 0 1 1 2 3 2 1 2 2 2 0 0 0 2 1 2 1 3 0 3 1 0 0 1 3\n 2 3 3 2 1 0 3 3 0 1 1 0 0 3 3 1 0 2 2 1 2 0 0 0 2 0 1 2 2 3 1 0 0 2 0 0 1\n 3 3 2 0 1 0 3 1 3 2 2 0 1 1 0 2 0 0 0 3 2 3 2 0 0 3 3 2 1 3 0 0 2 1 2 1 2\n 1 0 2 2 2 3 2 3 3 2 1 2 0 0 2 3 1 0 1 2 1 3 2 3 1 0 2 0 2 1 0 2 1 1 1 1 3\n 1 2 3 3 0 3 0 2 2 1 2 3 2 0 2 0 2 3 2 3 3 0 0 1 0 3 1 1 0 0 2 0 1 3 3 2 1\n 3 3 3 0 2 2 3 1 2 1 3 2 3 0 2 3 1 0 2 0 2 2 0 1 3 3 0 0 0 3 0 1 1 0 0 3 0\n 0 1 2 0 2 3 1 1 2 3 2 2 1 2 1 0 1 0 3 0 3 1 1 1 3 3 3 2 2 3 3 3 1 3 3 1 3\n 0 2 3 0 2 2 3 0 3 3 1 3 1 1 2 3 0 3 2 0 1 1 3 1 1 1 1 3 0 1 3 2 1 0 2 3 2\n 3 2 3 2 2 1 2 2 1 1 2 3 1 2 0 1 3 0 1 3 1 0 1 3 3 3 3 1 1 1 1 2 2 2 0 0 2\n 1 0 0 3 1 0 2 2 2 2 1 2 3 2 2 0 3 1 1 1 1 3 0 0 2 1 3 0 2 2 0 3 3 0 2 2 2\n 3 2 1 3 3 1 3 3 3 2 0 1 3 2 0 2 2 1 0 2 3 1 0 1 3 0 1 1 1 0 1 3 2 3 2 2 3\n 2 3 1 0 1 0 3 0 1 1 2 0 3 1 0 2 1 1 2 0 0 1 3 2 1 3 1 3 0 2 3 1 0 1 0 2 2\n 0 1 3 2 3 1 2 0 2 2 3 3 3 2 0 3 1 1 1 1 0 2 3 2 1 0 3 3 1 3 2 1 0 0 1 2 2\n 3 2 1 0 0 2 2 3 0 2 0 1 2 3 3 1 0 3 2 3 0 2 1 2 3 0 1 0 2 3 1 0 1 0 1 0 1\n 0 0 3 1 3 0 1 2 0 0 3 0 2 2 1 1 1 3 2 0 2 2 1 2 0 0 1 1 2 2 1 1 1 1 1 2 0\n 3]\n"
],
[
"print (\"Cross validation\")\ndo_cross_validation(X_train,y_train,model,X_VALID,Y_VALID) \nprint (\"Done\")",
"Cross validation\nCOMPILE...\n...COMPILED\nFIT\nEpoch 1/20\n169/169 [==============================] - 6s 10ms/step - loss: 0.6958 - accuracy: 0.5135 - val_loss: 0.6922 - val_accuracy: 0.5450\nINFO:tensorflow:Assets written to: /content/drive/My Drive/data/JUNK1.cv.1.best/assets\nEpoch 2/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.6944 - accuracy: 0.5209 - val_loss: 0.6721 - val_accuracy: 0.6083\nINFO:tensorflow:Assets written to: /content/drive/My Drive/data/JUNK1.cv.1.best/assets\nEpoch 3/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.6839 - accuracy: 0.5663 - val_loss: 0.6535 - val_accuracy: 0.6200\nINFO:tensorflow:Assets written to: /content/drive/My Drive/data/JUNK1.cv.1.best/assets\nEpoch 4/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.6512 - accuracy: 0.6125 - val_loss: 0.6415 - val_accuracy: 0.6483\nINFO:tensorflow:Assets written to: /content/drive/My Drive/data/JUNK1.cv.1.best/assets\nEpoch 5/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.6208 - accuracy: 0.6681 - val_loss: 0.6478 - val_accuracy: 0.6217\nEpoch 6/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.5651 - accuracy: 0.7185 - val_loss: 0.6732 - val_accuracy: 0.6233\nEpoch 7/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.5042 - accuracy: 0.7609 - val_loss: 0.7150 - val_accuracy: 0.6217\nEpoch 8/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.4614 - accuracy: 0.7958 - val_loss: 0.7561 - val_accuracy: 0.6050\nEpoch 9/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.3846 - accuracy: 0.8425 - val_loss: 0.8151 - val_accuracy: 0.6150\nEpoch 10/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.3291 - accuracy: 0.8636 - val_loss: 0.9421 - val_accuracy: 0.6033\nEpoch 11/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.2704 - accuracy: 0.8886 - val_loss: 1.0095 - val_accuracy: 0.6083\nEpoch 12/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.2121 - accuracy: 0.9196 - val_loss: 1.1611 - val_accuracy: 0.5817\nEpoch 13/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.1893 - accuracy: 0.9250 - val_loss: 1.2142 - val_accuracy: 0.5817\nEpoch 14/20\n169/169 [==============================] - 1s 5ms/step - loss: 0.1605 - accuracy: 0.9383 - val_loss: 1.3505 - val_accuracy: 0.6050\nEpoch 15/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.1566 - accuracy: 0.9398 - val_loss: 1.3819 - val_accuracy: 0.6250\nEpoch 16/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.1145 - accuracy: 0.9587 - val_loss: 1.4849 - val_accuracy: 0.6150\nEpoch 17/20\n169/169 [==============================] - 1s 5ms/step - loss: 0.1416 - accuracy: 0.9505 - val_loss: 1.5617 - val_accuracy: 0.5917\nEpoch 18/20\n169/169 [==============================] - 1s 5ms/step - loss: 0.0954 - accuracy: 0.9631 - val_loss: 1.5868 - val_accuracy: 0.6067\nEpoch 19/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.0867 - accuracy: 0.9677 - val_loss: 1.6941 - val_accuracy: 0.6033\nEpoch 20/20\n169/169 [==============================] - 1s 6ms/step - loss: 0.1140 - accuracy: 0.9536 - val_loss: 1.5402 - val_accuracy: 0.5983\nFold 1, 20 epochs, 33 sec\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdac0d68f4aa865d89488d5cdbc891d1a2689b6 | 3,126 | ipynb | Jupyter Notebook | 180726_calculate/examples/plot.ipynb | newlawrence/Talks | d4f4e6b6ecf8f1aaf32ca9a2364c99ee23612745 | [
"CC-BY-4.0"
] | null | null | null | 180726_calculate/examples/plot.ipynb | newlawrence/Talks | d4f4e6b6ecf8f1aaf32ca9a2364c99ee23612745 | [
"CC-BY-4.0"
] | 1 | 2019-01-15T14:21:34.000Z | 2019-01-15T15:19:41.000Z | 180726_calculate/examples/plot.ipynb | newlawrence/Talks | d4f4e6b6ecf8f1aaf32ca9a2364c99ee23612745 | [
"CC-BY-4.0"
] | null | null | null | 22.985294 | 89 | 0.493282 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ecdac4100c4bfc3ac018057a9ce90d58186f6d49 | 123,653 | ipynb | Jupyter Notebook | techjam_label_transformation.ipynb | KBTG-TechJam/techjam2019-datatrack-code-example | 9c0c1f9ab23fe4150798a36e243ce724c1c3b3f2 | [
"Apache-2.0"
] | 4 | 2019-12-21T12:20:37.000Z | 2020-09-25T09:37:05.000Z | techjam_label_transformation.ipynb | KBTG-TechJam/techjam2019-datatrack-code-example | 9c0c1f9ab23fe4150798a36e243ce724c1c3b3f2 | [
"Apache-2.0"
] | null | null | null | techjam_label_transformation.ipynb | KBTG-TechJam/techjam2019-datatrack-code-example | 9c0c1f9ab23fe4150798a36e243ce724c1c3b3f2 | [
"Apache-2.0"
] | 1 | 2020-01-30T20:26:42.000Z | 2020-01-30T20:26:42.000Z | 85.336784 | 31,608 | 0.819632 | [
[
[
"import os\nimport datetime\nimport gc\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport lightgbm as lgb\nimport numpy as np\nimport seaborn as sns\nfrom scipy import stats\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import QuantileTransformer\n\n### feature engineer part\nimport techjam_fe\n\n%matplotlib inline",
"_____no_output_____"
],
[
"def techjam_score(y_pred, y_true):\n y_pred = np.array(y_pred)\n y_true = np.array(y_true)\n \n return 100 - 100 * np.mean((y_pred-y_true) ** 2 / (np.minimum(2*y_true, y_pred) + y_true)**2)",
"_____no_output_____"
],
[
"# Edit data directory here\nDATA_DIR = \".\\\\techjam\"",
"_____no_output_____"
],
[
"X, y, test_df = techjam_fe.get_prep_data(DATA_DIR)",
"C:\\Users\\phiratath.n\\Desktop\\tj\\techjam_fe.py:28: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version\nof pandas will change to not sort by default.\n\nTo accept the future behavior, pass 'sort=False'.\n\nTo retain the current behavior and silence the warning, pass 'sort=True'.\n\n label = pd.concat([train,test],axis=0,ignore_index=True)\n"
],
[
"#### Add categorical feature\ncategorical_feats = ['gender','ocp_cd','age_gnd','gnd_ocp','age_ocp']\nX[categorical_feats] = X[categorical_feats].astype('category')\ntest_df[categorical_feats] = test_df[categorical_feats].astype('category')",
"_____no_output_____"
],
[
"X.reset_index(drop=True,inplace=True)\ntest_df.reset_index(inplace=True)",
"_____no_output_____"
]
],
[
[
"# Test Train Split",
"_____no_output_____"
]
],
[
[
"### Log Transformation\ny_log = np.log(y)",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y_log, test_size=0.2, random_state=42)\n\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)",
"_____no_output_____"
]
],
[
[
"# Model ",
"_____no_output_____"
]
],
[
[
"lgb_train = lgb.Dataset(X_train, y_train)\nlgb_eval = lgb.Dataset(X_val, y_val, reference=lgb_train)",
"_____no_output_____"
],
[
"##### Log Transformation ",
"_____no_output_____"
],
[
"def techjam_feval(y_pred, dtrain):\n y_true = dtrain.get_label()\n return 'techjam_score', techjam_score(np.exp(y_pred), np.exp(y_true)), True",
"_____no_output_____"
],
[
"pred_score_list=[]",
"_____no_output_____"
],
[
"\nmodel_params = {\n \"objective\": \"mape\",\n 'boosting_type': 'gbdt',\n 'metric': {'mape'},\n 'num_leaves': 511,\n 'learning_rate': 0.01,\n 'feature_fraction': 0.7,\n 'bagging_fraction': 0.8,\n 'bagging_freq': 5,\n 'verbose': 0\n}",
"_____no_output_____"
],
[
"gbm = lgb.train(model_params,\n lgb_train,\n num_boost_round=1000,\n valid_sets=lgb_eval,\n feval= techjam_feval,\n verbose_eval=50,\n early_stopping_rounds=50)",
"Training until validation scores don't improve for 50 rounds\n[50]\tvalid_0's mape: 0.0454491\tvalid_0's techjam_score: 91.3665\n[100]\tvalid_0's mape: 0.0422578\tvalid_0's techjam_score: 92.1406\n[150]\tvalid_0's mape: 0.0407483\tvalid_0's techjam_score: 92.4009\n[200]\tvalid_0's mape: 0.04001\tvalid_0's techjam_score: 92.4645\n[250]\tvalid_0's mape: 0.0396312\tvalid_0's techjam_score: 92.4538\nEarly stopping, best iteration is:\n[219]\tvalid_0's mape: 0.0398344\tvalid_0's techjam_score: 92.4686\n"
],
[
"y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)",
"_____no_output_____"
],
[
"X_test['pred'] = np.exp(y_pred)",
"C:\\Users\\phiratath.n\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"pred_score_list.append(np.exp(y_pred))",
"_____no_output_____"
],
[
"techjam_score(X_test['pred'],np.exp(y_test['income']))",
"_____no_output_____"
],
[
"#### Raw Income",
"_____no_output_____"
],
[
"def techjam_feval_no_log(y_pred, dtrain):\n y_true = dtrain.get_label()\n return 'techjam_score', techjam_score((y_pred), (y_true)), True",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\nX_train, X_val, y_train, y_val = train_test_split(X_train, (y_train), test_size=0.2, random_state=42)",
"_____no_output_____"
],
[
"lgb_train = lgb.Dataset(X_train, y_train,free_raw_data=False)\nlgb_eval = lgb.Dataset(X_val, y_val,free_raw_data=False, reference=lgb_train)",
"_____no_output_____"
],
[
"gbm = lgb.train(model_params,\n lgb_train,\n num_boost_round=1000,\n valid_sets=lgb_eval,\n feval= techjam_feval_no_log,\n verbose_eval=50,\n early_stopping_rounds=50)",
"Training until validation scores don't improve for 50 rounds\n[50]\tvalid_0's mape: 0.403095\tvalid_0's techjam_score: 88.7412\n[100]\tvalid_0's mape: 0.388182\tvalid_0's techjam_score: 89.7564\n[150]\tvalid_0's mape: 0.380022\tvalid_0's techjam_score: 90.3626\n[200]\tvalid_0's mape: 0.375079\tvalid_0's techjam_score: 90.77\n[250]\tvalid_0's mape: 0.372082\tvalid_0's techjam_score: 91.0265\n[300]\tvalid_0's mape: 0.370303\tvalid_0's techjam_score: 91.2138\n[350]\tvalid_0's mape: 0.369347\tvalid_0's techjam_score: 91.3234\n[400]\tvalid_0's mape: 0.368815\tvalid_0's techjam_score: 91.4065\n[450]\tvalid_0's mape: 0.368535\tvalid_0's techjam_score: 91.4577\n[500]\tvalid_0's mape: 0.368326\tvalid_0's techjam_score: 91.5051\n[550]\tvalid_0's mape: 0.368207\tvalid_0's techjam_score: 91.5427\n[600]\tvalid_0's mape: 0.368149\tvalid_0's techjam_score: 91.576\n[650]\tvalid_0's mape: 0.368131\tvalid_0's techjam_score: 91.6003\n[700]\tvalid_0's mape: 0.368097\tvalid_0's techjam_score: 91.627\n[750]\tvalid_0's mape: 0.368141\tvalid_0's techjam_score: 91.6524\nEarly stopping, best iteration is:\n[719]\tvalid_0's mape: 0.36809\tvalid_0's techjam_score: 91.6361\n"
],
[
"y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)\n\n",
"_____no_output_____"
],
[
"X_test['pred'] = y_pred",
"C:\\Users\\phiratath.n\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"pred_score_list.append(y_pred)",
"_____no_output_____"
],
[
"techjam_score(X_test['pred'],y_test['income'])",
"_____no_output_____"
],
[
"#### Quantile Transformer",
"_____no_output_____"
],
[
"from sklearn.preprocessing import QuantileTransformer",
"_____no_output_____"
],
[
"def techjam_feval_quantile(y_pred, dtrain):\n y_true = dtrain.get_label()\n y_pred = qt.inverse_transform(y_pred.reshape((len(y_pred), 1)))[:,0]\n y_true = qt.inverse_transform(y_true.reshape((len(y_true), 1)))[:,0]\n return 'techjam_score', techjam_score(y_pred, y_true), True",
"_____no_output_____"
],
[
"#### Leak in Validate set",
"_____no_output_____"
],
[
"qt = QuantileTransformer(n_quantiles=10000, output_distribution='uniform', random_state=42)\n#y_q = qt.fit_transform(y_train.values.reshape((len(y_q), 1)))[:,0]\n#y_val = qt.transform(y_val.values.reshape((len(y_val), 1)))[:,0]",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, (y), test_size=0.2, random_state=42)\ny_q = qt.fit_transform(y_train.values.reshape((len(y_train), 1)))[:,0]\n\nX_train, X_val, y_train, y_val = train_test_split(X_train, (y_q), test_size=0.2, random_state=42)",
"_____no_output_____"
],
[
"lgb_train = lgb.Dataset(X_train, y_train,free_raw_data=False)\nlgb_eval = lgb.Dataset(X_val, y_val,free_raw_data=False, reference=lgb_train)",
"_____no_output_____"
],
[
"gbm = lgb.train(model_params,\n lgb_train,\n num_boost_round=1000,\n valid_sets=lgb_eval,\n feval= techjam_feval_quantile,\n verbose_eval=50,\n early_stopping_rounds=50)",
"Training until validation scores don't improve for 50 rounds\n[50]\tvalid_0's mape: 0.212844\tvalid_0's techjam_score: 91.1185\n[100]\tvalid_0's mape: 0.195744\tvalid_0's techjam_score: 91.9179\n[150]\tvalid_0's mape: 0.187604\tvalid_0's techjam_score: 92.2658\n[200]\tvalid_0's mape: 0.183451\tvalid_0's techjam_score: 92.4142\n[250]\tvalid_0's mape: 0.181352\tvalid_0's techjam_score: 92.4658\n[300]\tvalid_0's mape: 0.180217\tvalid_0's techjam_score: 92.4769\nEarly stopping, best iteration is:\n[286]\tvalid_0's mape: 0.180478\tvalid_0's techjam_score: 92.4791\n"
],
[
"y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)\ny_pred = qt.inverse_transform(y_pred.reshape((len(y_pred), 1)))[:,0]",
"_____no_output_____"
],
[
"X_test['pred'] = y_pred",
"C:\\Users\\phiratath.n\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"techjam_score(X_test['pred'],y_test['income'])",
"_____no_output_____"
],
[
"#### No leak in Validate set",
"_____no_output_____"
],
[
"qt = QuantileTransformer(n_quantiles=10000, output_distribution='uniform', random_state=42)",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, (y), test_size=0.2, random_state=42)\n#y_q = qt.fit_transform(y_train.values.reshape((len(y_train), 1)))[:,0]\n\nX_train, X_val, y_train, y_val = train_test_split(X_train, (y_train), test_size=0.2, random_state=42)",
"_____no_output_____"
],
[
"\ny_raw = y_train.copy()\n\n#### Split before use transformer\n\ny_train= qt.fit_transform(y_train.values.reshape((len(y_train), 1)))[:,0]\n\n### Only transform\n\ny_val= qt.transform(y_val.values.reshape((len(y_val), 1)))[:,0]",
"_____no_output_____"
],
[
"#### Compare distribution",
"_____no_output_____"
],
[
"y_raw.values.reshape((len(y_train), 1))[:,0]",
"_____no_output_____"
],
[
"\n\nax = sns.kdeplot(y_raw.values.reshape((len(y_train), 1))[:,0], label='raw_income')\n#ax = sns.kdeplot(y_train, label='income')\nax.set(xlabel='income', ylabel='density')\nplt.legend()",
"C:\\Users\\phiratath.n\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
],
[
"#ax = sns.kdeplot(y_raw, label='raw_income')\nax = sns.kdeplot(y_train, label='income')\nax.set(xlabel='income', ylabel='density')\nplt.legend()",
"C:\\Users\\phiratath.n\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
],
[
"lgb_train = lgb.Dataset(X_train,y_train,free_raw_data=False)\nlgb_eval = lgb.Dataset(X_val, y_val,free_raw_data=False, reference=lgb_train)",
"_____no_output_____"
],
[
"gbm = lgb.train(model_params,\n lgb_train,\n num_boost_round=1000,\n valid_sets=lgb_eval,\n feval= techjam_feval_quantile,\n verbose_eval=50,\n early_stopping_rounds=50)",
"Training until validation scores don't improve for 50 rounds\n[50]\tvalid_0's mape: 0.212269\tvalid_0's techjam_score: 91.1147\n[100]\tvalid_0's mape: 0.195202\tvalid_0's techjam_score: 91.9151\n[150]\tvalid_0's mape: 0.186972\tvalid_0's techjam_score: 92.276\n[200]\tvalid_0's mape: 0.182923\tvalid_0's techjam_score: 92.4251\n[250]\tvalid_0's mape: 0.180862\tvalid_0's techjam_score: 92.471\n[300]\tvalid_0's mape: 0.179738\tvalid_0's techjam_score: 92.4783\nEarly stopping, best iteration is:\n[291]\tvalid_0's mape: 0.179886\tvalid_0's techjam_score: 92.4882\n"
],
[
"y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)\ny_pred = qt.inverse_transform(y_pred.reshape((len(y_pred), 1)))[:,0]",
"_____no_output_____"
],
[
"X_test['pred'] = y_pred",
"C:\\Users\\phiratath.n\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"pred_score_list.append(y_pred)",
"_____no_output_____"
],
[
"techjam_score(X_test['pred'],y_test['income'])",
"_____no_output_____"
],
[
"#### Clip Target",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, (y), test_size=0.2, random_state=42)\n\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train.clip(upper=np.percentile(y_train,95),lower=np.percentile(y_train,5)), test_size=0.2, random_state=42)",
"_____no_output_____"
],
[
"lgb_train = lgb.Dataset(X_train,y_train,free_raw_data=False)\nlgb_eval = lgb.Dataset(X_val, y_val,free_raw_data=False, reference=lgb_train)",
"_____no_output_____"
],
[
"gbm = lgb.train(model_params,\n lgb_train,\n num_boost_round=1000,\n valid_sets=lgb_eval,\n feval= techjam_feval_no_log,\n verbose_eval=50,\n early_stopping_rounds=50)",
"Training until validation scores don't improve for 50 rounds\n[50]\tvalid_0's mape: 0.388839\tvalid_0's techjam_score: 89.9901\n[100]\tvalid_0's mape: 0.374004\tvalid_0's techjam_score: 90.9528\n[150]\tvalid_0's mape: 0.365797\tvalid_0's techjam_score: 91.5199\n[200]\tvalid_0's mape: 0.360518\tvalid_0's techjam_score: 91.912\n[250]\tvalid_0's mape: 0.35778\tvalid_0's techjam_score: 92.137\n[300]\tvalid_0's mape: 0.355983\tvalid_0's techjam_score: 92.298\n[350]\tvalid_0's mape: 0.354993\tvalid_0's techjam_score: 92.3956\n[400]\tvalid_0's mape: 0.354261\tvalid_0's techjam_score: 92.4796\n[450]\tvalid_0's mape: 0.353681\tvalid_0's techjam_score: 92.5435\n[500]\tvalid_0's mape: 0.353355\tvalid_0's techjam_score: 92.5975\n[550]\tvalid_0's mape: 0.35319\tvalid_0's techjam_score: 92.6385\nEarly stopping, best iteration is:\n[541]\tvalid_0's mape: 0.353158\tvalid_0's techjam_score: 92.6344\n"
],
[
"y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)",
"_____no_output_____"
],
[
"X_test['pred'] = y_pred",
"C:\\Users\\phiratath.n\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"techjam_score(X_test['pred'],y_test['income'])",
"_____no_output_____"
],
[
"pred_score_list.append(y_pred)",
"_____no_output_____"
]
],
[
[
"# Predict Distribution ",
"_____no_output_____"
]
],
[
[
"bin_df = pd.DataFrame(y_test)\nfor i in range(5):\n bin_df.loc[(bin_df['income'] > 20000*i)&(bin_df['income'] <= 20000*(i+1)),'bin']=i\n if i==4:\n bin_df.loc[(bin_df['income'] > 100000),'bin']=i",
"_____no_output_____"
],
[
"ax = sns.kdeplot(pred_score_list[0], label='loged_income')\nax = sns.kdeplot(pred_score_list[1], label='income')\nax = sns.kdeplot(pred_score_list[-1], label='clipped_income')\nax = sns.kdeplot(pred_score_list[2], label='quantile_income')\nax.set(xlabel='income', ylabel='density')\nplt.legend()",
"C:\\Users\\phiratath.n\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
],
[
"bin_df['loged_income']=pred_score_list[0]\nbin_df['raw_income']=pred_score_list[1]\nbin_df['clipped_income']=pred_score_list[3]\nbin_df['quantile_income']=pred_score_list[2]",
"_____no_output_____"
],
[
"bin_df.head()",
"_____no_output_____"
],
[
"y_test =bin_df['income'].values",
"_____no_output_____"
],
[
"score_summary = pd.DataFrame(bin_df.apply(lambda x:techjam_score(x,y_test)),columns=['techjam_score'])",
"_____no_output_____"
],
[
"score_summary.iloc[2:,:]",
"_____no_output_____"
],
[
"df_log=pd.DataFrame(bin_df.groupby('bin').apply(lambda x: techjam_score(x['loged_income'],x['income'])))\ndf_raw=pd.DataFrame(bin_df.groupby('bin').apply(lambda x: techjam_score(x['raw_income'],x['income'])))\ndf_clip=pd.DataFrame(bin_df.groupby('bin').apply(lambda x: techjam_score(x['clipped_income'],x['income'])))\ndf_quantile=pd.DataFrame(bin_df.groupby('bin').apply(lambda x: techjam_score(x['quantile_income'],x['income'])))\nbin_summary = pd.concat([df_raw,df_clip,df_log,df_quantile],axis=1)",
"_____no_output_____"
],
[
"bin_summary.columns = ['raw_income','clipped_income','loged_income','quantile_income']",
"_____no_output_____"
],
[
"bin_summary['income bin'] = ['0-20,000','20,000-40,000','40,000-60,000','60,000-80,000','100,000+']",
"_____no_output_____"
],
[
"bin_summary",
"_____no_output_____"
],
[
"fig_dims = (12, 9)\ndata = bin_summary.melt('income bin', var_name='group_income', value_name='techjam_score')\nfig, ax = plt.subplots(figsize=fig_dims)\nax=sns.barplot(x='income bin', y='techjam_score', hue='group_income', data=data)\n\nax.set_xticklabels(ax.get_xticklabels())",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdad3095b2a169a8e92e8b46e64e257fb5c81c3 | 12,557 | ipynb | Jupyter Notebook | Learning Deconvolution Network for Semantic Segmentation (DeconvNet) Review/code/DeconvNet.ipynb | choco9966/Semantic-Segmentation-Review | 2518a0ca39bf9f6459f8fbf9c8dc26b6572abdf1 | [
"Apache-2.0"
] | 23 | 2020-12-15T16:11:00.000Z | 2021-12-22T03:11:31.000Z | Learning Deconvolution Network for Semantic Segmentation (DeconvNet) Review/code/DeconvNet.ipynb | hjcho12/Semantic-Segmentation-Review | 2518a0ca39bf9f6459f8fbf9c8dc26b6572abdf1 | [
"Apache-2.0"
] | null | null | null | Learning Deconvolution Network for Semantic Segmentation (DeconvNet) Review/code/DeconvNet.ipynb | hjcho12/Semantic-Segmentation-Review | 2518a0ca39bf9f6459f8fbf9c8dc26b6572abdf1 | [
"Apache-2.0"
] | 10 | 2021-02-10T10:40:51.000Z | 2021-09-22T09:14:37.000Z | 33.307692 | 111 | 0.417536 | [
[
[
"'''\nreference \nhttp://cvlab.postech.ac.kr/research/deconvnet/model/DeconvNet/DeconvNet_inference_deploy.prototxt\n'''\n\nimport torch\nimport torch.nn as nn\nfrom torchvision import models\n\n\n\nclass DeconvNet(nn.Module):\n def __init__(self, num_classes=21, init_weights=True):\n super(DeconvNet, self).__init__()\n self.relu = nn.ReLU(inplace=True)\n \n def CBR(in_channels, out_channels, kernel_size=3, stride=1, padding=1):\n \n return nn.Sequential(\n nn.Conv2d(in_channels=in_channels, \n out_channels=out_channels,\n kernel_size=kernel_size, \n stride=stride, \n padding=padding),\n nn.BatchNorm2d(out_channels),\n nn.ReLU())\n \n def DCB(in_channels, out_channels, kernel_size=3, stride=1, padding=1):\n \n return nn.Sequential(\n nn.ConvTranspose2d(in_channels=in_channels, \n out_channels=out_channels,\n kernel_size=kernel_size, \n stride=stride,\n padding=padding),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()) \n \n '''\n input: \"data\"\n input_dim: 1\n input_dim: 3\n input_dim: 224\n input_dim: 224\n '''\n \n # 224 x 224\n # conv1\n self.conv1_1 = CBR(3, 64, 3, 1, 1)\n self.conv1_2 = CBR(64, 64, 3, 1, 1)\n self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True, return_indices=True) # 1/2\n \n # 112 x 112\n # conv2 \n self.conv2_1 = CBR(64, 128, 3, 1, 1)\n self.conv2_2 = CBR(128, 128, 3, 1, 1)\n self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True, return_indices=True) # 1/4\n \n # 56 x 56\n # conv3\n self.conv3_1 = CBR(128, 256, 3, 1, 1)\n self.conv3_2 = CBR(256, 256, 3, 1, 1)\n self.conv3_3 = CBR(256, 256, 3, 1, 1) \n self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True, return_indices=True) # 1/8\n \n # 28 x 28\n # conv4\n self.conv4_1 = CBR(256, 512, 3, 1, 1)\n self.conv4_2 = CBR(512, 512, 3, 1, 1)\n self.conv4_3 = CBR(512, 512, 3, 1, 1) \n self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True, return_indices=True) # 1/16\n \n # 14 x 14\n # conv5\n self.conv5_1 = CBR(512, 512, 3, 1, 1)\n self.conv5_2 = CBR(512, 512, 3, 1, 1)\n self.conv5_3 = CBR(512, 512, 3, 1, 1)\n self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True, return_indices=True)\n \n # 7 x 7\n # fc6\n self.fc6 = CBR(512, 4096, 7, 1, 0)\n self.drop6 = nn.Dropout2d(0.5)\n \n # 1 x 1\n # fc7\n self.fc7 = CBR(4096, 4096, 1, 1, 0)\n self.drop7 = nn.Dropout2d(0.5)\n \n # 7 x 7\n # fc6-deconv\n self.fc6_deconv = DCB(4096, 512, 7, 1, 0)\n \n # 14 x 14\n # unpool5\n self.unpool5 = nn.MaxUnpool2d(2, stride=2)\n self.deconv5_1 = DCB(512, 512, 3, 1, 1)\n self.deconv5_2 = DCB(512, 512, 3, 1, 1)\n self.deconv5_3 = DCB(512, 512, 3, 1, 1)\n \n # 28 x 28\n # unpool4\n self.unpool4 = nn.MaxUnpool2d(2, stride=2)\n self.deconv4_1 = DCB(512, 512, 3, 1, 1)\n self.deconv4_2 = DCB(512, 512, 3, 1, 1)\n self.deconv4_3 = DCB(512, 256, 3, 1, 1) \n\n # 56 x 56 \n # unpool3\n self.unpool3 = nn.MaxUnpool2d(2, stride=2)\n self.deconv3_1 = DCB(256, 256, 3, 1, 1)\n self.deconv3_2 = DCB(256, 256, 3, 1, 1)\n self.deconv3_3 = DCB(256, 128, 3, 1, 1) \n \n # 112 x 112 \n # unpool2\n self.unpool2 = nn.MaxUnpool2d(2, stride=2)\n self.deconv2_1 = DCB(128, 128, 3, 1, 1)\n self.deconv2_2 = DCB(128, 64, 3, 1, 1)\n\n # 224 x 224 \n # unpool1\n self.unpool1 = nn.MaxUnpool2d(2, stride=2)\n self.deconv1_1 = DCB(64, 64, 3, 1, 1)\n self.deconv1_2 = DCB(64, 64, 3, 1, 1)\n \n # Score\n self.score_fr = nn.Conv2d(64, num_classes, 1, 1, 0, 1)\n \n if init_weights:\n self._initialize_weights()\n \n def forward(self, x):\n \n h = self.conv1_1(x)\n h = self.conv1_2(h)\n h, pool1_indices = self.pool1(h)\n \n h = self.conv2_1(h)\n h = self.conv2_2(h)\n h, pool2_indices = self.pool2(h)\n \n h = self.conv3_1(h)\n h = self.conv3_2(h)\n h = self.conv3_3(h) \n h, pool3_indices = self.pool3(h)\n \n h = self.conv4_1(h)\n h = self.conv4_2(h)\n h = self.conv4_3(h) \n h, pool4_indices = self.pool4(h) \n \n h = self.conv5_1(h)\n h = self.conv5_2(h)\n h = self.conv5_3(h) \n h, pool5_indices = self.pool5(h)\n \n h = self.fc6(h)\n h = self.drop6(h)\n \n h = self.fc7(h)\n h = self.drop7(h)\n \n h = self.fc6_deconv(h) \n \n h = self.unpool5(h, pool5_indices)\n h = self.deconv5_1(h) \n h = self.deconv5_2(h) \n h = self.deconv5_3(h) \n\n h = self.unpool4(h, pool4_indices)\n h = self.deconv4_1(h) \n h = self.deconv4_2(h) \n h = self.deconv4_3(h) \n\n h = self.unpool3(h, pool3_indices)\n h = self.deconv3_1(h) \n h = self.deconv3_2(h) \n h = self.deconv3_3(h) \n \n h = self.unpool2(h, pool2_indices)\n h = self.deconv2_1(h) \n h = self.deconv2_2(h) \n\n h = self.unpool1(h, pool1_indices)\n h = self.deconv1_1(h) \n h = self.deconv1_2(h) \n \n \n h = self.score_fr(h) \n \n return h\n \n def _initialize_weights(self):\n for m in self.named_modules():\n if isinstance(m[1], nn.Conv2d):\n if m[0] == 'features.38':\n nn.init.normal_(m[1].weight.data, mean=0, std=0.01)\n nn.init.constant_(m[1].bias.data, 0.0)",
"_____no_output_____"
],
[
"# model 선언\nmodel = DeconvNet(num_classes=21, init_weights=True)",
"_____no_output_____"
],
[
"x = torch.rand(2, 3, 224, 224)\nx.shape",
"_____no_output_____"
],
[
"output = model(x)\noutput.shape",
"conv1(block) : torch.Size([2, 64, 112, 112])\nconv2(block) : torch.Size([2, 128, 56, 56])\nconv3(block) : torch.Size([2, 256, 28, 28])\nconv4(block) : torch.Size([2, 512, 14, 14])\nconv5(block) : torch.Size([2, 512, 7, 7])\nfc6 : torch.Size([2, 4096, 1, 1])\nfc7 : torch.Size([2, 4096, 1, 1])\nfc6_deconv : torch.Size([2, 512, 7, 7])\ndeconv5_3 : torch.Size([2, 512, 14, 14])\ndeconv4_3 : torch.Size([2, 256, 28, 28])\ndeconv3_3 : torch.Size([2, 128, 56, 56])\ndeconv2_3 : torch.Size([2, 64, 112, 112])\ndeconv1_3 : torch.Size([2, 64, 224, 224])\nscore_fr : torch.Size([2, 21, 224, 224])\n"
],
[
"def forward(x):\n \n h = self.conv1_1(x)\n h = self.conv1_2(h)\n h, pool1_indices = self.pool1(h)\n \n ... (중략) ...\n \n h = self.conv5_1(h)\n h = self.conv5_2(h)\n h = self.conv5_3(h) \n h, pool5_indices = self.pool5(h)\n \n ... (중략) ...\n \n h = self.unpool5(h, pool5_indices)\n h = self.deconv5_1(h) \n h = self.deconv5_2(h) \n h = self.deconv5_3(h) \n \n ... (중략) ...\n \n h = self.unpool1(h, pool1_indices)\n h = self.deconv1_1(h) \n h = self.deconv1_2(h) \n \n output = self.score_fr(h)\n \n return output",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
ecdae0abf84339a6b89eaf1f4dc0cc93fc1488f4 | 1,774 | ipynb | Jupyter Notebook | tests/test_region_data.ipynb | CitizenScienceInAstronomyWorkshop/P4_sandbox | d04f688945848c0627f67496a55a654d661fb758 | [
"0BSD"
] | 1 | 2016-09-29T21:12:51.000Z | 2016-09-29T21:12:51.000Z | tests/test_region_data.ipynb | CitizenScienceInAstronomyWorkshop/P4_sandbox | d04f688945848c0627f67496a55a654d661fb758 | [
"0BSD"
] | 47 | 2015-08-10T05:57:27.000Z | 2020-06-12T21:21:20.000Z | tests/test_region_data.ipynb | CitizenScienceInAstronomyWorkshop/P4_sandbox | d04f688945848c0627f67496a55a654d661fb758 | [
"0BSD"
] | 1 | 2015-08-16T19:20:48.000Z | 2015-08-16T19:20:48.000Z | 18.102041 | 86 | 0.516911 | [
[
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\" style=\"margin-top: 1em;\"><ul class=\"toc-item\"></ul></div>",
"_____no_output_____"
]
],
[
[
"from planet4 import region_data",
"_____no_output_____"
],
[
"p = region_data.Manhattan()",
"_____no_output_____"
],
[
"p.seasons",
"_____no_output_____"
],
[
"p.season1",
"_____no_output_____"
],
[
"p.available_seasons",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
ecdae1c8ce27cf236818b307822025aa1df3ea86 | 523,727 | ipynb | Jupyter Notebook | SpyTorchTutorial1-Flux.jl.ipynb | briandepasquale/spytorch-flux.jl | b79906a0cdbb5c0687f6b1f4600f3c815beabf10 | [
"MIT"
] | 1 | 2022-03-15T08:30:15.000Z | 2022-03-15T08:30:15.000Z | SpyTorchTutorial1-Flux.jl.ipynb | briandepasquale/spytorch-flux.jl | b79906a0cdbb5c0687f6b1f4600f3c815beabf10 | [
"MIT"
] | null | null | null | SpyTorchTutorial1-Flux.jl.ipynb | briandepasquale/spytorch-flux.jl | b79906a0cdbb5c0687f6b1f4600f3c815beabf10 | [
"MIT"
] | null | null | null | 348.454424 | 121,082 | 0.933171 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ecdae6cd816f8e1f5b956d578ea807eea4079794 | 463,197 | ipynb | Jupyter Notebook | examples/dev/exampleThermostats.ipynb | philthiel/Ensembler | 943efac3c673eb40165927e81336386788e3a19f | [
"MIT"
] | 39 | 2020-05-19T08:45:27.000Z | 2022-03-17T16:58:34.000Z | examples/dev/exampleThermostats.ipynb | SchroederB/Ensembler | 943efac3c673eb40165927e81336386788e3a19f | [
"MIT"
] | 38 | 2020-06-18T13:02:18.000Z | 2022-02-25T14:29:17.000Z | examples/dev/exampleThermostats.ipynb | SchroederB/Ensembler | 943efac3c673eb40165927e81336386788e3a19f | [
"MIT"
] | 13 | 2020-05-19T08:45:57.000Z | 2022-03-10T16:18:20.000Z | 162.183824 | 105,657 | 0.757347 | [
[
[
"import os, sys\npath = os.getcwd()+\"/..\"\nprint(path)\nsys.path.append(path) #give here the path to the folder containing the package,default does not work!!\n\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\n%matplotlib inline\n\nfrom ensembler.system import system\nfrom ensembler.potentials.OneD import harmonicOscillator\nfrom ensembler.potentials.ND import envelopedPotential\n\nfrom ensembler.integrator import monteCarloIntegrator, metropolisMonteCarloIntegrator, positionVerletIntegrator, velocityVerletIntegrator\nfrom ensembler.conditions.thermostats import berendsenThermostate\nfrom ensembler.visualisation.plotSimulations import static_sim_plots\nfrom ensembler.visualisation.animationSimulation import animation_trajectory\n",
"c:\\Users\\benja\\OneDrive - ETHZ\\PhD\\Code\\ensembler_riniker_develop\\examples/..\n"
],
[
"from scipy.stats import maxwell, boltzmann\nfrom scipy import constants as const\nmean, var, skew, kurt = maxwell.stats(moments='mvsk')\nr = maxwell.rvs(loc=0, scale=5, size=1000)",
"_____no_output_____"
],
[
"plt.hist(r, bins=10)",
"_____no_output_____"
],
[
"r = boltzmann.rvs(298, const.Avogadro, size=1000) \nplt.hist(r, bins=100)",
"_____no_output_____"
]
],
[
[
"# Plots of Simulations",
"_____no_output_____"
],
[
"## Newtonian Integrator",
"_____no_output_____"
]
],
[
[
"#Simple Verlet integration simulation:\n#UNDER DEVELOPMENT!\n# The temperature is not constant!!!\n#settings\nsim_steps = 200\npot=harmonicOscillator()\nthermo = berendsenThermostate(tau=3,dt=0.01)\nthermo.verbose = False\nintegrator = positionVerletIntegrator(dt=0.01)#maxStepSize=max_step_size,\nsys=system(potential=pot, integrator=integrator, conditions=[thermo], position=-10)\n\n#simulate\ncur_state = sys.simulate(sim_steps, withdrawTraj=True, initSystem=True)\n\nprint(sys.trajectory)\n#plot\n#static_sim_plots(sys, title=\"Thermostat\", x_range=list(range(-10,11)))",
"position temperature totEnergy totPotEnergy totKinEnergy \\\n0 -10.000000 298.0 50.301544 50.000000 0.301544 \n1 -10.000000 298.0 53.945426 50.000000 3.945426 \n2 -9.928091 298.0 75.138258 49.283492 25.854766 \n3 -9.646471 298.0 443.074451 46.527205 396.547246 \n4 -9.262478 298.0 780.152873 42.896745 737.256128 \n5 -8.811941 298.0 1053.740933 38.825152 1014.915781 \n6 -8.317469 298.0 1257.100977 34.590149 1222.510828 \n7 -7.795203 298.0 1394.193069 30.382596 1363.810473 \n8 -7.257206 298.0 1473.537515 26.333520 1447.203995 \n9 -6.712659 298.0 1505.188164 22.529895 1482.658269 \n10 -6.168576 298.0 1499.159219 19.025662 1480.133557 \n11 -5.630286 298.0 1464.628054 15.850060 1448.777993 \n12 -5.101786 298.0 1409.576815 13.014109 1396.562706 \n13 -4.586001 298.0 1340.685778 10.515702 1330.170076 \n14 -4.084997 298.0 1263.368405 8.343600 1255.024806 \n15 -3.600146 298.0 1181.881488 6.480526 1175.400962 \n16 -3.132265 298.0 1099.469739 4.905542 1094.564197 \n17 -2.681727 298.0 1018.520336 3.595829 1014.924508 \n18 -2.248556 298.0 940.713099 2.528001 938.185097 \n19 -1.832508 298.0 867.158467 1.679042 865.479425 \n20 -1.433135 298.0 798.519527 1.026938 797.492589 \n21 -1.049843 298.0 735.116868 0.551085 734.565783 \n22 -0.681934 298.0 677.016575 0.232517 676.784058 \n23 -0.328650 298.0 624.102504 0.054005 624.048498 \n24 0.010801 298.0 576.134427 0.000058 576.134368 \n25 0.337217 298.0 532.793811 0.056858 532.736954 \n26 0.651385 298.0 493.718988 0.212151 493.506837 \n27 0.954065 298.0 458.531402 0.455120 458.076282 \n28 1.245982 298.0 426.854505 0.776236 426.078270 \n29 1.527819 298.0 398.326710 1.167115 397.159594 \n.. ... ... ... ... ... \n172 14.531741 298.0 109.874829 105.585749 4.289080 \n173 14.560699 298.0 110.199771 106.006977 4.192794 \n174 14.589330 298.0 110.523085 106.424281 4.098804 \n175 14.617640 298.0 110.844744 106.837694 4.007050 \n176 14.645631 298.0 111.164721 107.247248 3.917474 \n177 14.673307 298.0 111.482994 107.652974 3.830019 \n178 14.700674 298.0 111.799539 108.054906 3.744633 \n179 14.727734 298.0 112.114336 108.453075 3.661261 \n180 14.754492 298.0 112.427364 108.847512 3.579852 \n181 14.780951 298.0 112.738605 109.238249 3.500356 \n182 14.807114 298.0 113.048042 109.625317 3.422725 \n183 14.832987 298.0 113.355659 110.008748 3.346911 \n184 14.858571 298.0 113.661440 110.388572 3.272868 \n185 14.883872 298.0 113.965372 110.764820 3.200553 \n186 14.908891 298.0 114.267443 111.137522 3.129920 \n187 14.933634 298.0 114.567640 111.506710 3.060929 \n188 14.958102 298.0 114.865952 111.872413 2.993539 \n189 14.982300 298.0 115.162370 112.234662 2.927709 \n190 15.006231 298.0 115.456886 112.593485 2.863400 \n191 15.029898 298.0 115.749490 112.948914 2.800575 \n192 15.053304 298.0 116.040175 113.300977 2.739198 \n193 15.076452 298.0 116.328936 113.649705 2.679232 \n194 15.099346 298.0 116.615767 113.995125 2.620642 \n195 15.121988 298.0 116.900663 114.337267 2.563396 \n196 15.144382 298.0 117.183619 114.676160 2.507460 \n197 15.166531 298.0 117.464633 115.011832 2.452801 \n198 15.188437 298.0 117.743701 115.344311 2.399390 \n199 15.210104 298.0 118.020822 115.673627 2.347195 \n200 15.231534 298.0 118.295993 115.999806 2.296187 \n201 15.252729 298.0 118.569214 116.322878 2.246337 \n\n dhdpos velocity \n0 0.000000 0.776587 \n1 0.000000 -2.809066 \n2 -10.000000 18.233843 \n3 -9.928091 28.752907 \n4 -9.646471 35.791174 \n5 -9.262478 40.635219 \n6 -8.811941 43.909161 \n7 -8.317469 46.004499 \n8 -7.795203 47.197514 \n9 -7.257206 47.695678 \n10 -6.712659 47.660376 \n11 -6.168576 47.219742 \n12 -5.630286 46.476700 \n13 -5.101786 45.514394 \n14 -4.586001 44.400072 \n15 -4.084997 43.187976 \n16 -3.600146 41.921581 \n17 -3.132265 40.635362 \n18 -2.681727 39.356238 \n19 -2.248556 38.104758 \n20 -1.832508 36.896117 \n21 -1.433135 35.741027 \n22 -1.049843 34.646482 \n23 -0.681934 33.616434 \n24 -0.328650 32.652399 \n25 0.010801 31.753991 \n26 0.337217 30.919397 \n27 0.651385 30.145785 \n28 0.954065 29.429651 \n29 1.245982 28.767101 \n.. ... ... \n172 14.502453 17.427529 \n173 14.531741 17.423846 \n174 14.560699 17.420249 \n175 14.589330 17.416737 \n176 14.617640 17.413308 \n177 14.645631 17.409960 \n178 14.673307 17.406690 \n179 14.700674 17.403496 \n180 14.727734 17.400377 \n181 14.754492 17.397331 \n182 14.780951 17.394356 \n183 14.807114 17.391450 \n184 14.832987 17.388612 \n185 14.858571 17.385839 \n186 14.883872 17.383131 \n187 14.908891 17.380484 \n188 14.933634 17.377899 \n189 14.958102 17.375374 \n190 14.982300 17.372906 \n191 15.006231 17.370495 \n192 15.029898 17.368139 \n193 15.053304 17.365837 \n194 15.076452 17.363588 \n195 15.099346 17.361390 \n196 15.121988 17.359242 \n197 15.144382 17.357143 \n198 15.166531 17.355091 \n199 15.188437 17.353086 \n200 15.210104 17.351126 \n201 15.231534 17.349211 \n\n[202 rows x 7 columns]\n"
],
[
"static_sim_plots(sys, title=\"Thermostat\", x_range=list(range(-10,11)))",
"_____no_output_____"
]
],
[
[
"# Animations of Simulations",
"_____no_output_____"
]
],
[
[
"#Simple Verlet integration simulation:\n#UNDER DEVELOPMENT!The temperature is not constant!!!\n#settings\nsim_steps = 10\nx_min, x_max = (-10, 10)\nmax_step_size = 2.0\npot=harmonicOsc()\n#thermo = berendsenThermostate(tau=3, dt=1, MConstraintsDims=-1)\n#thermo.verbose = True\nintegrator = positionVerletIntegrator(dt=1)#maxStepSize=max_step_size,\nintegrator.verbose = True\nsys=system(potential=pot, integrator=integrator, position=-10)\n\n#simulate\ncur_state = sys.simulate(sim_steps, withdrawTraj=True, initSystem=True)\nprint(\"init_state\", sys.trajectory[0], \"\\n\")\nprint(\"last_state: \", sys.trajectory[-1], \"\\n\")\nprint(\"Trajectory length: \",len(sys.trajectory))\n\n#animation\nani, out_path = animation_trajectory(sys, [x_min, x_max])\n\nimport tempfile\nos.chdir(tempfile.gettempdir())\nfrom IPython.display import HTML\nHTML(ani.to_jshtml())",
"Ensembler.src.potentials._baseclassesin _set_singlePos_mode 1\nINTEGRATOR: current forces\t -10.0\nINTEGRATOR: current Velocities\t 3.10010795360161\nINTEGRATOR: current_position\t -10\nINTEGRATOR: newVel\t 13.10010795360161\nINTEGRATOR: newPosition\t 3.1001079536016096\n\n\nINTEGRATOR: current forces\t 3.1001079536016096\nINTEGRATOR: current Velocities\t 13.10010795360161\nINTEGRATOR: current_position\t 3.1001079536016096\nINTEGRATOR: newVel\t 10.0\nINTEGRATOR: newPosition\t 13.10010795360161\n\n\nINTEGRATOR: current forces\t 13.10010795360161\nINTEGRATOR: current Velocities\t 10.0\nINTEGRATOR: current_position\t 13.10010795360161\nINTEGRATOR: newVel\t -3.1001079536016096\nINTEGRATOR: newPosition\t 10.0\n\n\nINTEGRATOR: current forces\t 10.0\nINTEGRATOR: current Velocities\t -3.1001079536016096\nINTEGRATOR: current_position\t 10.0\nINTEGRATOR: newVel\t -13.10010795360161\nINTEGRATOR: newPosition\t -3.1001079536016096\n\n\nINTEGRATOR: current forces\t -3.1001079536016096\nINTEGRATOR: current Velocities\t -13.10010795360161\nINTEGRATOR: current_position\t -3.1001079536016096\nINTEGRATOR: newVel\t -10.0\nINTEGRATOR: newPosition\t -13.10010795360161\n\n\nINTEGRATOR: current forces\t -13.10010795360161\nINTEGRATOR: current Velocities\t -10.0\nINTEGRATOR: current_position\t -13.10010795360161\nINTEGRATOR: newVel\t 3.1001079536016096\nINTEGRATOR: newPosition\t -10.0\n\n\nINTEGRATOR: current forces\t -10.0\nINTEGRATOR: current Velocities\t 3.1001079536016096\nINTEGRATOR: current_position\t -10.0\nINTEGRATOR: newVel\t 13.10010795360161\nINTEGRATOR: newPosition\t 3.1001079536016096\n\n\nINTEGRATOR: current forces\t 3.1001079536016096\nINTEGRATOR: current Velocities\t 13.10010795360161\nINTEGRATOR: current_position\t 3.1001079536016096\nINTEGRATOR: newVel\t 10.0\nINTEGRATOR: newPosition\t 13.10010795360161\n\n\nINTEGRATOR: current forces\t 13.10010795360161\nINTEGRATOR: current Velocities\t 10.0\nINTEGRATOR: current_position\t 13.10010795360161\nINTEGRATOR: newVel\t -3.1001079536016096\nINTEGRATOR: newPosition\t 10.0\n\n\nINTEGRATOR: current forces\t 10.0\nINTEGRATOR: current Velocities\t -3.1001079536016096\nINTEGRATOR: current_position\t 10.0\nINTEGRATOR: newVel\t -13.10010795360161\nINTEGRATOR: newPosition\t -3.1001079536016096\n\n\ninit_state State(position=-10, temperature=298.0, totEnergy=54.80533466199198, totPotEnergy=50.0, totKinEnergy=4.8053346619919814, dhdpos=nan, velocity=3.10010795360161) \n\nlast_state: State(position=10.0, temperature=298.0, totEnergy=54.80533466199198, totPotEnergy=50.0, totKinEnergy=4.80533466199198, dhdpos=13.10010795360161, velocity=-3.1001079536016096) \n\nTrajectory length: 10\n"
],
[
"#Simple Verlet integration simulation:\n#UNDER DEVELOPMENT!The temperature is not constant!!!\n#settings\nsim_steps = 100\nx_min, x_max = (-10, 10)\nmax_step_size = 2.0\npot=harmonicOsc()\nthermo = berendsenThermostate(tau=20, dt=1)\n#thermo.verbose = True\nintegrator = velocityVerletIntegrator(dt=1)#maxStepSize=max_step_size,\n#integrator.verbose = True\nsys=system(potential=pot, integrator=integrator, conditions=[thermo], position=-5, temperature=0)\n\n#simulate\ncur_state = sys.simulate(sim_steps, withdrawTraj=True, initSystem=True)\nprint(\"init_state\", sys.trajectory[0], \"\\n\")\nprint(\"last_state: \", sys.trajectory[-1], \"\\n\")\nprint(\"Trajectory length: \",len(sys.trajectory))\n\n#animation\nani, out_path = animation_trajectory(sys, [x_min, x_max])\n\nimport tempfile\nos.chdir(tempfile.gettempdir())\nfrom IPython.display import HTML\nHTML(ani.to_jshtml())",
"Ensembler.src.potentials._baseclassesin _set_singlePos_mode 1\n"
],
[
"from scipy.stats import boltzmann\n \nboltzmann()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
ecdaf1f555d691a6334aaaa03edcd5233aceb405 | 23,439 | ipynb | Jupyter Notebook | 04_Applied Text Mining in Python/Week_1/Regex+with+Pandas+and+Named+Groups.ipynb | vblacklion/03_Applied-Data-Science-with-Python-Specialization | 7880eaa7f4042ff3f0b4a690d09efba9f34a02cd | [
"MIT"
] | null | null | null | 04_Applied Text Mining in Python/Week_1/Regex+with+Pandas+and+Named+Groups.ipynb | vblacklion/03_Applied-Data-Science-with-Python-Specialization | 7880eaa7f4042ff3f0b4a690d09efba9f34a02cd | [
"MIT"
] | null | null | null | 04_Applied Text Mining in Python/Week_1/Regex+with+Pandas+and+Named+Groups.ipynb | vblacklion/03_Applied-Data-Science-with-Python-Specialization | 7880eaa7f4042ff3f0b4a690d09efba9f34a02cd | [
"MIT"
] | null | null | null | 26.072303 | 294 | 0.350271 | [
[
[
"---\n\n_You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-text-mining/resources/d9pwm) course resource._\n\n---",
"_____no_output_____"
],
[
"# Working with Text Data in pandas",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\ntime_sentences = [\"Monday: The doctor's appointment is at 2:45pm.\", \n \"Tuesday: The dentist's appointment is at 11:30 am.\",\n \"Wednesday: At 7:00pm, there is a basketball game!\",\n \"Thursday: Be back home by 11:15 pm at the latest.\",\n \"Friday: Take the train at 08:10 am, arrive at 09:00am.\"]\n\ndf = pd.DataFrame(time_sentences, columns=['text'])\ndf",
"_____no_output_____"
],
[
"# find the number of characters for each string in df['text']\ndf['text'].str.len()",
"_____no_output_____"
],
[
"# find the number of tokens for each string in df['text']\ndf['text'].str.split().str.len()",
"_____no_output_____"
],
[
"# find which entries contain the word 'appointment'\ndf['text'].str.contains('appointment')",
"_____no_output_____"
],
[
"# find how many times a digit occurs in each string\ndf['text'].str.count(r'\\d')",
"_____no_output_____"
],
[
"# find all occurances of the digits\ndf['text'].str.findall(r'\\d')",
"_____no_output_____"
],
[
"# group and find the hours and minutes\ndf['text'].str.findall(r'(\\d?\\d):(\\d\\d)')",
"_____no_output_____"
],
[
"# group and find the hours and minutes\ndf['text'].str.findall(r'(\\d?\\d:\\d\\d)')",
"_____no_output_____"
],
[
"# replace weekdays with '???'\ndf['text'].str.replace(r'\\w+day\\b', '???')",
"_____no_output_____"
],
[
"# replace weekdays with 3 letter abbrevations\ndf['text'].str.replace(r'(\\w+day\\b)', lambda x: x.groups()[0][:3])",
"_____no_output_____"
],
[
"# create new columns from first match of extracted groups\ndf['text'].str.extract(r'(\\d?\\d):(\\d\\d)')",
"/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:2: FutureWarning: currently extract(expand=None) means expand=False (return Index/Series/DataFrame) but in a future version of pandas this will be changed to expand=True (return DataFrame)\n \n"
],
[
"# extract the entire time, the hours, the minutes, and the period\ndf['text'].str.extractall(r'((\\d?\\d):(\\d\\d) ?([ap]m))')",
"_____no_output_____"
],
[
"# extract the entire time, the hours, the minutes, and the period with group names\na = df['text'].str.extractall(r'(?P<time>(?P<hour>\\d?\\d):(?P<minute>\\d\\d) ?(?P<period>[ap]m))')",
"_____no_output_____"
],
[
"a",
"_____no_output_____"
],
[
"a = a.reset_index()\na",
"_____no_output_____"
],
[
"a = a.drop([\"level_0\", \"match\", \"time\"], axis=1)\na",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdaf1f8f7af7a72bee9352f9eceeb10088a1040 | 132 | ipynb | Jupyter Notebook | notebooks/001_cartoonify.ipynb | bilha-analytics/read_with_me | 4073750d4a23fb800c8bb8abd5252e021036879b | [
"MIT"
] | null | null | null | notebooks/001_cartoonify.ipynb | bilha-analytics/read_with_me | 4073750d4a23fb800c8bb8abd5252e021036879b | [
"MIT"
] | null | null | null | notebooks/001_cartoonify.ipynb | bilha-analytics/read_with_me | 4073750d4a23fb800c8bb8abd5252e021036879b | [
"MIT"
] | null | null | null | 33 | 75 | 0.886364 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ecdaf5235d8d0cd903ad85e2120552c3f1abdc05 | 17,844 | ipynb | Jupyter Notebook | Titanic_prediction.ipynb | Mananhina/Titanic-Machine_Learning | 18273fef4833226209cc2260f1a3debdd5d5afb8 | [
"BSD-2-Clause"
] | 1 | 2021-11-27T07:53:31.000Z | 2021-11-27T07:53:31.000Z | Titanic_prediction.ipynb | Mananhina/Titanic-Machine_Learning | 18273fef4833226209cc2260f1a3debdd5d5afb8 | [
"BSD-2-Clause"
] | null | null | null | Titanic_prediction.ipynb | Mananhina/Titanic-Machine_Learning | 18273fef4833226209cc2260f1a3debdd5d5afb8 | [
"BSD-2-Clause"
] | 1 | 2021-11-27T07:53:47.000Z | 2021-11-27T07:53:47.000Z | 40.189189 | 6,984 | 0.678996 | [
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix, accuracy_score",
"_____no_output_____"
]
],
[
[
"# Getting the hang of the data",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('titanic.csv')\ndf.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 891 entries, 0 to 890\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 PassengerId 891 non-null int64 \n 1 Survived 891 non-null int64 \n 2 Pclass 891 non-null int64 \n 3 Name 891 non-null object \n 4 Sex 891 non-null object \n 5 Age 714 non-null float64\n 6 SibSp 891 non-null int64 \n 7 Parch 891 non-null int64 \n 8 Ticket 891 non-null object \n 9 Fare 891 non-null float64\n 10 Cabin 204 non-null object \n 11 Embarked 889 non-null object \ndtypes: float64(2), int64(5), object(5)\nmemory usage: 83.7+ KB\n"
]
],
[
[
"This dataframe contains information only about survived passengers",
"_____no_output_____"
],
[
"Do some data analysis",
"_____no_output_____"
]
],
[
[
"print(df.pivot_table(index = 'Survived',\n columns = 'Pclass',\n values = 'Age',\n aggfunc = 'mean'))",
"Pclass 1 2 3\nSurvived \n0 43.695312 33.544444 26.555556\n1 35.368197 25.901566 20.646118\n"
]
],
[
[
"The richer the class was, the more likely the older passenger was to survive",
"_____no_output_____"
],
[
"Сount the number of people who survived and died in each class. Use the 'Sex' column, because the sex is indicated for each passenger",
"_____no_output_____"
]
],
[
[
"d = df.pivot_table(index = 'Pclass',\n columns = 'Survived',\n values = 'Sex',\n aggfunc = 'count')\nd.plot(kind = 'barh', grid = True)",
"_____no_output_____"
]
],
[
[
"The richer the class, the more likely it is to survive",
"_____no_output_____"
]
],
[
[
"print(df.pivot_table(index = 'Survived',\n columns = 'Sex',\n values = 'Name',\n aggfunc = 'count'))",
"Sex female male\nSurvived \n0 81 468\n1 233 109\n"
]
],
[
[
"More women survived than men",
"_____no_output_____"
],
[
"# Data cleansing",
"_____no_output_____"
],
[
"Removing unnecessary columns",
"_____no_output_____"
]
],
[
[
"df.drop(['PassengerId','Name','Ticket','Cabin'], axis = 1, inplace = True)\ndf.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 891 entries, 0 to 890\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Survived 891 non-null int64 \n 1 Pclass 891 non-null int64 \n 2 Sex 891 non-null object \n 3 Age 714 non-null float64\n 4 SibSp 891 non-null int64 \n 5 Parch 891 non-null int64 \n 6 Fare 891 non-null float64\n 7 Embarked 889 non-null object \ndtypes: float64(2), int64(4), object(2)\nmemory usage: 55.8+ KB\n"
]
],
[
[
"Columns 'PassengerId', 'Name', 'Ticket' contain useless data. The Cabin number could be useful, but most of this data is missing.",
"_____no_output_____"
],
[
"Replace the null values in 'Embarked' column with the most popular value.",
"_____no_output_____"
]
],
[
[
"df['Embarked'].fillna(df['Embarked'].mode()[0], inplace = True)",
"_____no_output_____"
]
],
[
[
"Replace the null values in 'Age' column with madian age of passengers in corresponding class",
"_____no_output_____"
]
],
[
[
"age_1 = df[df['Pclass'] == 1]['Age'].median()\nage_2 = df[df['Pclass'] == 2]['Age'].median()\nage_3 = df[df['Pclass'] == 3]['Age'].median()\n\ndef fill_age(row):\n if pd.isnull(row['Age']):\n if row['Pclass'] == 1:\n return age_1\n elif row['Pclass'] == 2:\n return age_2\n else:\n return age_3\n return row['Age']\n\ndf['Age'] = df.apply(fill_age, axis = 1)",
"_____no_output_____"
]
],
[
[
"Convert values in 'Sex' column to Integer type",
"_____no_output_____"
]
],
[
[
"df['Sex'] = df['Sex'].map({'male': 1, 'female': 0})",
"_____no_output_____"
]
],
[
[
"Create 3 columns with names according to the first letters of the name of each port. For each passenger fill 1 in the column corresponding to his port, fill 0 in other columns ",
"_____no_output_____"
]
],
[
[
"df[list(pd.get_dummies(df['Embarked']).columns)] = pd.get_dummies(df['Embarked'])\ndf.drop('Embarked', axis = 1, inplace = True)",
"_____no_output_____"
]
],
[
[
"Now dataframe contains 10 columns with 891 non-null numerical values",
"_____no_output_____"
],
[
"# Prediction",
"_____no_output_____"
],
[
"Separation the dataframe into training data (X) and target variable (y)",
"_____no_output_____"
]
],
[
[
"X = df.drop('Survived', axis = 1)\ny = df['Survived']",
"_____no_output_____"
]
],
[
[
"Separation of data into training (train) and test (test)",
"_____no_output_____"
]
],
[
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 42)",
"_____no_output_____"
]
],
[
[
"Data scaling",
"_____no_output_____"
]
],
[
[
"sc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)",
"_____no_output_____"
]
],
[
[
"Create a model with a specified number of neighbors and train.\n Find the optimal number of neighbors for maximum accuracy",
"_____no_output_____"
]
],
[
[
"ks = []\nfor k in range(1, 50, 2):\n classifier = KNeighborsClassifier(n_neighbors = k)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n ks.append((accuracy_score(y_test, y_pred)* 100, k))\n\nmax_accuracy = ks[0][0]\nmax_k = ks[0][1]\nfor i in range(1, len(ks)):\n if ks[i][0] > max_accuracy:\n max_accuracy = ks[i][0]\n max_k = ks[i][1]\n \nprint('Optimal number of nearest neighbors: ', max_k, '\\n' + 'Maximum accuracy: ', max_accuracy)",
"Optimal number of nearest neighbors: 15 \nMaximum accuracy: 80.71748878923766\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecdaff23ba0a99035bf19a60a927baff6babe36e | 35,037 | ipynb | Jupyter Notebook | docs/notebooks/tutorials/geometry.ipynb | goodhertz/coldtype | 2460b66abb28e9532f9e2b55167ae565f95366e7 | [
"Apache-2.0"
] | 142 | 2020-06-12T17:01:58.000Z | 2022-03-16T23:21:37.000Z | docs/notebooks/tutorials/geometry.ipynb | goodhertz/coldtype | 2460b66abb28e9532f9e2b55167ae565f95366e7 | [
"Apache-2.0"
] | 35 | 2020-04-15T15:34:54.000Z | 2022-03-19T20:26:47.000Z | docs/notebooks/tutorials/geometry.ipynb | goodhertz/coldtype | 2460b66abb28e9532f9e2b55167ae565f95366e7 | [
"Apache-2.0"
] | 14 | 2020-06-23T18:56:46.000Z | 2022-03-31T15:54:56.000Z | 35,037 | 35,037 | 0.911094 | [
[
[
"# Working with Rectangles in Coldtype",
"_____no_output_____"
]
],
[
[
"!pip install coldtype[notebook]\n#!pip install -q \"coldtype[notebook] @ git+https://github.com/goodhertz/coldtype\"\n\nfrom coldtype.notebook import *\nr = Rect(700, 300)",
"_____no_output_____"
]
],
[
[
"## Dividing Rect(angles)\n\nOne of the core concepts of Coldtype is the use of the `coldtype.geometry.Rect` class to encapsulate rectangles and methods for slicing & dicing them.\n\nThe most basic rectangle is the one passed to a `renderable`, i.e. the `r` variable you get when you define a renderable function, like `def r1(r)` below. So to fill the entire canvas with a single random color, you can do something like this:",
"_____no_output_____"
]
],
[
[
"@renderable((700, 300))\ndef r1(r):\n return P(r).f(hsl(random()))",
"_____no_output_____"
]
],
[
[
"## Inset, offset, take, divide, subdivide...\n\nAll @renderables have a rectangle associated with them (the full rectangle of the artifact canvas), and all rendering functions are passed rectangles, either via the first and only argument, or as a property of the first argument, as is the case with @animation renderables, which pass a Frame argument that makes the rectangle accessible via f.a.r (where f is the Frame).\n\nBut we’re getting ahead of ourselves.\n\nA Rect has lots of methods, though the most useful ones are `inset`, `offset`, `take`, `divide`, and `subdivide`.\n\nHere’s a simple example that insets, offsets, subtracts, and then subtracts again. (Probably not something I’d write in reality, but good for demonstration purposes.)",
"_____no_output_____"
]
],
[
[
"@renderable((700, 300))\ndef r2(r):\n return (P().rect(r\n .take(0.5, \"W\") # \"W\" for \"West\"\n .inset(20, 20)\n .offset(0, 10)\n .subtract(20, \"E\")\n .subtract(10, \"N\"))\n .f(hsl(0.5)))",
"_____no_output_____"
]
],
[
[
"## More complex slicing & dicing\n\nYou may have noticed that the rect functions take a mix of float and int arguments. That’s because a value less than `1.0` will be treated, by the dividing-series of rect functions, as percentages of the dimension implied by the edge argument. So in that `take(0.5, \"W\")` above, the `0.5` specifies 50% of the width of the rectangle (width because of the `W` edge argument).\n\nHere’s an example that divides a rectangle into left and right rectangles, and shows another useful method, `square` (which takes the largest square possible from the center of the given rectangle).",
"_____no_output_____"
]
],
[
[
"@renderable((700, 300))\ndef lr(r):\n ri = r.inset(50, 50)\n left, right = ri.divide(0.5, \"W\")\n return PS([\n (P().rect(ri)\n .f(None)\n .s(0.75)\n .sw(2)),\n (P().oval(left\n .square()\n .offset(100, 0))\n .f(hsl(0.6, a=0.5))),\n (P().oval(right\n .square()\n .inset(-50))\n .f(hsl(0, a=0.5)))])",
"_____no_output_____"
]
],
[
[
"Here’s an example using subdivide to subdivide a larger rectangle into smaller pieces, essentially columns.",
"_____no_output_____"
]
],
[
[
"@renderable((700, 300))\ndef columns(r):\n cs = r.inset(10).subdivide(5, \"W\")\n return PS.Enumerate(cs, lambda x:\n P(x.el.inset(10)).f(hsl(random())))",
"_____no_output_____"
]
],
[
[
"Of course, columns like that aren’t very typographic. Here’s an example using subdivide_with_leading, a useful method for quickly getting standard rows or columns with classic spacing.\n\n",
"_____no_output_____"
]
],
[
[
"@renderable((700, 500))\ndef columns_leading(r):\n cs = r.subdivide_with_leading(5, 20, \"N\")\n return PS.Enumerate(cs, lambda x:\n P(x.el).f(hsl(random())))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecdb0b09879577c5c2f455787237f13e1a20c3a0 | 22,401 | ipynb | Jupyter Notebook | wp/notebooks/model/mc_dropout/.ipynb_checkpoints/debug-checkpoint.ipynb | ExLeonem/master-thesis-code | 559ad55f15c99772358384146bd30dd517b1dfe8 | [
"MIT"
] | null | null | null | wp/notebooks/model/mc_dropout/.ipynb_checkpoints/debug-checkpoint.ipynb | ExLeonem/master-thesis-code | 559ad55f15c99772358384146bd30dd517b1dfe8 | [
"MIT"
] | null | null | null | wp/notebooks/model/mc_dropout/.ipynb_checkpoints/debug-checkpoint.ipynb | ExLeonem/master-thesis-code | 559ad55f15c99772358384146bd30dd517b1dfe8 | [
"MIT"
] | null | null | null | 40.803279 | 1,503 | 0.609973 | [
[
[
"## Check if model converges at all",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload",
"_____no_output_____"
],
[
"import os, sys, importlib\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\nimport tensorflow as tf\n\nBASE_PATH = os.path.join(os.getcwd(), \"..\", \"..\", \"..\")\nMODULE_PATH = os.path.join(BASE_PATH, \"modules\")\nDATASETS_PATH = os.path.join(BASE_PATH, \"datasets\")\n\nsys.path.append(MODULE_PATH)\n\nfrom active_learning import TrainConfig\nfrom data import BenchmarkData, DataSetType\nfrom models import setup_growth, default_model\nfrom bayesian import BayesModel, McDropout, MomentPropagation\n\nimport mp.MomentPropagation as mp",
"_____no_output_____"
],
[
"setup_growth()",
"1 Physical GPU's, 1 Logical GPU's\n"
],
[
"output_classes = 5",
"_____no_output_____"
],
[
"benchmark = BenchmarkData(DataSetType.MNIST, os.path.join(DATASETS_PATH, \"mnist\"), classes=output_classes)",
"_____no_output_____"
],
[
"x_train, x_test, y_train, y_test = train_test_split(benchmark.inputs, benchmark.targets)",
"_____no_output_____"
],
[
"print(\"Datapoints: {}\".format(len(benchmark.targets)))\nprint(\"Unique labels: {}\".format(np.unique(benchmark.targets)))",
"Datapoints: 35735\nUnique labels: [0 1 2 3 4]\n"
],
[
"model = default_model(output_classes=output_classes)\nmodel.compile(optimizer=\"adadelta\", loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])",
"_____no_output_____"
],
[
"x_test.shape",
"_____no_output_____"
],
[
"y_test.shape",
"_____no_output_____"
],
[
"model.fit(x_train, y_train, epochs=10, batch_size=100)",
"Epoch 1/10\n269/269 [==============================] - 5s 19ms/step - loss: 10.1622 - accuracy: 0.2204\nEpoch 2/10\n269/269 [==============================] - 2s 6ms/step - loss: 10.1622 - accuracy: 0.2204\nEpoch 3/10\n269/269 [==============================] - 2s 6ms/step - loss: 10.1622 - accuracy: 0.2204\nEpoch 4/10\n269/269 [==============================] - 2s 6ms/step - loss: 10.1622 - accuracy: 0.2204\nEpoch 5/10\n269/269 [==============================] - 2s 6ms/step - loss: 10.1622 - accuracy: 0.2204\nEpoch 6/10\n269/269 [==============================] - 2s 6ms/step - loss: 10.1622 - accuracy: 0.2204\nEpoch 7/10\n 1/269 [..............................] - ETA: 0s - loss: 10.6864 - accuracy: 0.1700"
],
[
"model.evaluate(x_test, y_test)",
"280/280 [==============================] - 4s 15ms/step - loss: nan - accuracy: 0.1934\n"
]
],
[
[
"# Model Debug",
"_____no_output_____"
]
],
[
[
"mc_model = McDropout(model)\nmc_model.evaluate(x_test, y_test)",
"_____no_output_____"
],
[
"mc_model.get_model().evaluate(x_test[:100], y_test[:100])",
"4/4 [==============================] - 3s 728ms/step - loss: 5.3774e-05 - binary_accuracy: 1.0000\n"
],
[
"mc_model.evaluate(x_test[:100], y_test[:100], sample_size=100)",
"_____no_output_____"
],
[
"new_model = mc_model.get_model()",
"_____no_output_____"
],
[
"new_mc = McDropout(new_model)",
"_____no_output_____"
],
[
"new_mc.evaluate(x_test[:100], y_test[:100], sample_size=1)",
"_____no_output_____"
],
[
"new_mc.evaluate(x_test, y_test)",
"_____no_output_____"
],
[
"predictions = new_mc(x_test[:100], sample_size=10)",
"_____no_output_____"
],
[
"predictions",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdb0daf320e339f30c491c1d7397239a1be21ef | 6,480 | ipynb | Jupyter Notebook | solutions/solutions_first_part.ipynb | non87/tools-of-the-trade | e7db3bd2ae753b065e6596092d0aac4e6cc3cc3d | [
"MIT"
] | 4 | 2021-02-08T16:47:36.000Z | 2021-04-26T13:56:37.000Z | solutions/solutions_first_part.ipynb | non87/tools-of-the-trade | e7db3bd2ae753b065e6596092d0aac4e6cc3cc3d | [
"MIT"
] | null | null | null | solutions/solutions_first_part.ipynb | non87/tools-of-the-trade | e7db3bd2ae753b065e6596092d0aac4e6cc3cc3d | [
"MIT"
] | 2 | 2021-02-09T19:39:51.000Z | 2021-04-06T18:11:04.000Z | 35.409836 | 256 | 0.555556 | [
[
[
"### Solutions, First Part\n\nThis notebook contains the solutions the exercise of the First part of the workshop. To see the solutions in action *copy-paste* the code in the code block of the exercise. Notice, this code will not run as is, you have to copy-paste before running. ",
"_____no_output_____"
],
[
"#### First Exercise",
"_____no_output_____"
]
],
[
[
"def whole_shuffler(M, rng):\n '''\n Your function here. Notice, we must pass the rng to ensure reproducibility\n \n '''\n shape = M.shape\n N = shape[0] * shape[1]\n M = M.flatten()\n inds = rng.choice(M, size = N, replace=False)\n # Fancy indexing\n shuffled = M[inds].reshape(shape)\n return(shuffled)\n \n# Shuffle a matrix using the function you wrote\nseed = 101112\nrng = np.random.default_rng(seed)\n\nto_be_shuffled = np.arange(10).reshape(5,2)\nshuffled = whole_shuffler(to_be_shuffled, rng)\n\nprint(f\"This is the non-shuffled matrix:\\n{to_be_shuffled}\\n\")\nprint(f\"This is the wholly-shuffled matrix:\\n{shuffled}\")",
"_____no_output_____"
]
],
[
[
"#### Second Exercise\n\nFirst block:",
"_____no_output_____"
]
],
[
[
"# Instantiate a rng\nseed = 1122334455\nrng = np.random.default_rng()\n\n# Instantiate two non spherical bivariate normal distribution.\nmvn_0 = ss.multivariate_normal(mean=np.array([3,3]), cov=np.array([[1.5,0.8],[0.8, 1.5]]))\nmvn_0.random_state = rng\nmvn_1 = ss.multivariate_normal(mean=np.array([-1, 2]), cov=np.array([[1.5,-0.8],[-0.8, 1.5]]))\nmvn_1.random_state = rng\n# Put those in a list, it will be useful to sample the variates\nnormals = [mvn_0, mvn_1]\n\n# mixture for the two distributions\nmixture = [0.3, 0.7]\n# number of variates in the sample\nN = 350\n\n# Data generation process\n# First Randomly pick one of the two normal distribution following the mixture probabilities\n# ultimately we want a random vector of 0s and 1s (e.g. [0,1,1,1,0, etc.]) based on the mixture probability. \n# This is the object of our inference\nselected_norm = rng.choice([0,1], size=N, p=mixture)\n\n# Second, generate the normal variates depending on the selected_norm vector\nsample = np.zeros((N,2))\nfor i,j in enumerate(selected_norm):\n # Notice, we are generating a random *vector*, so we will store it as a row in a matrix\n sample[i,:] = normals[j].rvs(size=1)\n\nprint(f\"This is the start of our sample:\\n{np.round(sample[:10,:], 3)}\")",
"_____no_output_____"
]
],
[
[
"Second block:",
"_____no_output_____"
]
],
[
[
"# Implement the formula above in a function\ndef which_norm(samp, mvnorm_0, mvnorm_1, prior=[0.5, 0.5]):\n '''\n Implement Bayes rule to calculate the probability that samp was generated by the multivariate normals\n mvnorm_0 and mvnorm_1.\n\n :param samp: The data point as a np.array\n :param mvnorm_0: The first multivariate normal, a scipy.stats.multivariate_normal frozen distribution\n :param mvnorm_1: The second multivariate normal, a scipy.stats.multivariate_normal frozen distribution\n :param prior: Your prior belief about which mvn generated the datapoint. A vector or list\n :return: A np.array containing the probability of samp being generated by mvnorm_0 or mvnorm_1\n '''\n \n pdf_0 = mvnorm_0 # Get the pdf of the first mvnorm at samp. Use the .pdf() method #.pdf(samp)\n pdf_1 = mvnorm_1 # Get the pdf of the second mvnorm at samp. Use the .pdf() method #.pdf(samp)\n # Write the numerator from the formula above using the prior vector.\n numerator = np.array([pdf_0*prior[0]\n , pdf_1*prior[1]\n ])\n denominator = numerator.sum()\n return numerator/denominator\n\n# apply along axis\nPs = np.apply_along_axis(func1d=which_norm, \n arr=sample, \n # Which mvnorm should we pass?\n mvnorm_0=mvn_0, \n mvnorm_1=mvn_1, \n # What is our prior? \n prior=[0.5,0.5], \n # What axis are we applying this to?\n axis=1\n )\nprint(f\"These are the first 10 inferred assignemnts:\\n{np.round(Ps[:10,:],3)}\\n\")\n\n# Extra:\n# can you reproduce the plot above with the calculate probability (Ps) instead of the real values (selected_norm)?\n# Hint: Check the c argument of ax.plot() OR ax.scatter() from matplotlib\nfig, ax = plt.subplots()\nax.scatter(x=sample[:,0], y=sample[:,1], c=Ps[:,1])\nax.set_aspect('equal')\n_ = ax.set_title(\"Normal Mixture Inference\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecdb120d4b9bce5c58a1cd2eaffe29579b8e9aed | 248,487 | ipynb | Jupyter Notebook | python/notebooks/01_XOR_Keras.ipynb | choas/iotconf2019 | 5eef4f9d2c1974148f0a47b6c1ae9c79b37aa364 | [
"Apache-2.0"
] | null | null | null | python/notebooks/01_XOR_Keras.ipynb | choas/iotconf2019 | 5eef4f9d2c1974148f0a47b6c1ae9c79b37aa364 | [
"Apache-2.0"
] | 5 | 2020-01-28T22:49:27.000Z | 2022-02-10T00:21:22.000Z | python/notebooks/01_XOR_Keras.ipynb | choas/iotconf2019 | 5eef4f9d2c1974148f0a47b6c1ae9c79b37aa364 | [
"Apache-2.0"
] | null | null | null | 55.540232 | 243 | 0.377348 | [
[
[
"# 01 - XOR model with Keras",
"_____no_output_____"
]
],
[
[
"import sys\nimport numpy as np\nimport tensorflow as tf\nimport keras.backend as K\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.activations import sigmoid\nfrom keras.activations import relu\nfrom keras.activations import tanh\nfrom keras.losses import MSE\nfrom keras.losses import mean_squared_error\nfrom keras.losses import binary_crossentropy\nfrom keras.optimizers import SGD, Adam\nfrom keras.metrics import binary_accuracy\n\nfrom tensorflow.python.framework.graph_util import convert_variables_to_constants\n\nimport keras\nprint keras.__name__, keras.__version__\nprint tf.__name__, tf.__version__",
"keras 2.2.2\ntensorflow 1.12.0\n"
]
],
[
[
"Note: Keras should be Version 2.2.2 and TensorFlow 1.12.0",
"_____no_output_____"
],
[
"#### Tensorflow and Keras session",
"_____no_output_____"
]
],
[
[
"sess=tf.Session()\nK.set_session(sess)",
"_____no_output_____"
]
],
[
[
"#### training and test data",
"_____no_output_____"
]
],
[
[
"training_data = np.array([[0,0], [0,1], [1,0], [1,1]])\ntarget_data = np.array([ [0], [1], [1], [0]])",
"_____no_output_____"
]
],
[
[
"#### XOR model\n\n (credit: https://commons.wikimedia.org/wiki/File:Neural_network.svg)",
"_____no_output_____"
]
],
[
[
"model = Sequential()\nmodel.add(Dense(5, input_dim=2, activation='sigmoid', name='hiddenlayer'))\nmodel.add(Dense(1, activation='sigmoid', name='outputlayer'))",
"_____no_output_____"
]
],
[
[
"input layer with hidden layer\n- [dense](https://keras.io/layers/core/#dense)\n- 5 hidden nodes\n- 2 input values (training data)\n\noutput Layer\n- 1 node",
"_____no_output_____"
],
[
"activation method: [sigmoid function](https://en.wikipedia.org/wiki/Sigmoid_function)\n (credit: https://commons.wikimedia.org/wiki/File:Logistic-curve.svg)",
"_____no_output_____"
],
[
"#### layers\n (credit: https://commons.wikimedia.org/wiki/File:ArtificialNeuronModel_english.png)",
"_____no_output_____"
]
],
[
[
"model.compile(loss='mean_squared_error', optimizer=SGD(lr=1), metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"loss function: [mean_squared_error](https://keras.io/losses/)\n\noptimizer: [SGD](https://keras.io/optimizers/) \n- [Stochastic gradient descent](https://en.wikipedia.org/wiki/Stochastic_gradient_descent)\n- [XOR tutorial with TensorFlow](https://martin-thoma.com/tf-xor-tutorial/)",
"_____no_output_____"
],
[
"#### TensorBoard\n[TensorBoard Callback](https://keras.io/callbacks/#tensorboard)",
"_____no_output_____"
]
],
[
[
"from keras.callbacks import TensorBoard\nfrom tensorflow.contrib.tensorboard.plugins import projector\ntensorboard = TensorBoard(log_dir='./logs/xor_keras', histogram_freq=0,\n write_graph=True, write_images=False)",
"_____no_output_____"
]
],
[
[
"start TensorBoard with:\n\n```sh\ntensorboard --logdir ./notebooks/logs\n```\n\nopen TensorBoard:\n\n[TensorBoard at localhost:6006](http://localhost:6006)",
"_____no_output_____"
],
[
"#### start training",
"_____no_output_____"
]
],
[
[
"epochs = 2000\nmodel.fit(training_data, target_data, epochs=epochs, callbacks=[tensorboard])",
"Epoch 1/2000\n4/4 [==============================] - 0s 12ms/step - loss: 0.2505 - acc: 0.2500\nEpoch 2/2000\n4/4 [==============================] - 0s 247us/step - loss: 0.2504 - acc: 0.2500\nEpoch 3/2000\n4/4 [==============================] - 0s 294us/step - loss: 0.2503 - acc: 0.2500\nEpoch 4/2000\n4/4 [==============================] - 0s 298us/step - loss: 0.2502 - acc: 0.5000\nEpoch 5/2000\n4/4 [==============================] - 0s 266us/step - loss: 0.2502 - acc: 0.5000\nEpoch 6/2000\n4/4 [==============================] - 0s 264us/step - loss: 0.2502 - acc: 0.5000\nEpoch 7/2000\n4/4 [==============================] - 0s 288us/step - loss: 0.2501 - acc: 0.7500\nEpoch 8/2000\n4/4 [==============================] - 0s 159us/step - loss: 0.2501 - acc: 0.7500\nEpoch 9/2000\n4/4 [==============================] - 0s 198us/step - loss: 0.2501 - acc: 0.7500\nEpoch 10/2000\n4/4 [==============================] - 0s 200us/step - loss: 0.2500 - acc: 0.7500\nEpoch 11/2000\n4/4 [==============================] - 0s 210us/step - loss: 0.2500 - acc: 0.7500\nEpoch 12/2000\n4/4 [==============================] - 0s 221us/step - loss: 0.2500 - acc: 0.7500\nEpoch 13/2000\n4/4 [==============================] - 0s 206us/step - loss: 0.2500 - acc: 0.7500\nEpoch 14/2000\n4/4 [==============================] - 0s 180us/step - loss: 0.2499 - acc: 0.7500\nEpoch 15/2000\n4/4 [==============================] - 0s 250us/step - loss: 0.2499 - acc: 0.7500\nEpoch 16/2000\n4/4 [==============================] - 0s 235us/step - loss: 0.2499 - acc: 0.7500\nEpoch 17/2000\n4/4 [==============================] - 0s 329us/step - loss: 0.2498 - acc: 0.7500\nEpoch 18/2000\n4/4 [==============================] - 0s 215us/step - loss: 0.2498 - acc: 0.7500\nEpoch 19/2000\n4/4 [==============================] - 0s 289us/step - loss: 0.2498 - acc: 0.7500\nEpoch 20/2000\n4/4 [==============================] - 0s 232us/step - loss: 0.2498 - acc: 0.7500\nEpoch 21/2000\n4/4 [==============================] - 0s 233us/step - loss: 0.2497 - acc: 0.7500\nEpoch 22/2000\n4/4 [==============================] - 0s 250us/step - loss: 0.2497 - acc: 0.7500\nEpoch 23/2000\n4/4 [==============================] - 0s 245us/step - loss: 0.2497 - acc: 0.7500\nEpoch 24/2000\n4/4 [==============================] - 0s 263us/step - loss: 0.2496 - acc: 0.7500\nEpoch 25/2000\n4/4 [==============================] - 0s 279us/step - loss: 0.2496 - acc: 0.7500\nEpoch 26/2000\n4/4 [==============================] - 0s 183us/step - loss: 0.2496 - acc: 0.7500\nEpoch 27/2000\n4/4 [==============================] - 0s 186us/step - loss: 0.2496 - acc: 0.7500\nEpoch 28/2000\n4/4 [==============================] - 0s 200us/step - loss: 0.2495 - acc: 0.7500\nEpoch 29/2000\n4/4 [==============================] - 0s 275us/step - loss: 0.2495 - acc: 0.7500\nEpoch 30/2000\n4/4 [==============================] - 0s 230us/step - loss: 0.2495 - acc: 0.7500\nEpoch 31/2000\n4/4 [==============================] - 0s 215us/step - loss: 0.2494 - acc: 0.7500\nEpoch 32/2000\n4/4 [==============================] - 0s 314us/step - loss: 0.2494 - acc: 0.7500\nEpoch 33/2000\n4/4 [==============================] - 0s 184us/step - loss: 0.2494 - acc: 0.7500\nEpoch 34/2000\n4/4 [==============================] - 0s 229us/step - loss: 0.2494 - acc: 0.7500\nEpoch 35/2000\n4/4 [==============================] - 0s 305us/step - loss: 0.2493 - acc: 0.7500\nEpoch 36/2000\n4/4 [==============================] - 0s 253us/step - loss: 0.2493 - acc: 0.7500\nEpoch 37/2000\n4/4 [==============================] - 0s 319us/step - loss: 0.2493 - acc: 0.7500\nEpoch 38/2000\n4/4 [==============================] - 0s 310us/step - loss: 0.2492 - acc: 0.7500\nEpoch 39/2000\n4/4 [==============================] - 0s 324us/step - loss: 0.2492 - acc: 0.5000\nEpoch 40/2000\n4/4 [==============================] - 0s 188us/step - loss: 0.2492 - acc: 0.5000\nEpoch 41/2000\n4/4 [==============================] - 0s 192us/step - loss: 0.2492 - acc: 0.5000\nEpoch 42/2000\n4/4 [==============================] - 0s 227us/step - loss: 0.2491 - acc: 0.5000\nEpoch 43/2000\n4/4 [==============================] - 0s 294us/step - loss: 0.2491 - acc: 0.5000\nEpoch 44/2000\n4/4 [==============================] - 0s 288us/step - loss: 0.2491 - acc: 0.5000\nEpoch 45/2000\n4/4 [==============================] - 0s 364us/step - loss: 0.2490 - acc: 0.5000\nEpoch 46/2000\n4/4 [==============================] - 0s 214us/step - loss: 0.2490 - acc: 0.5000\nEpoch 47/2000\n4/4 [==============================] - 0s 354us/step - loss: 0.2490 - acc: 0.5000\nEpoch 48/2000\n4/4 [==============================] - 0s 198us/step - loss: 0.2490 - acc: 0.5000\nEpoch 49/2000\n4/4 [==============================] - 0s 177us/step - loss: 0.2489 - acc: 0.5000\nEpoch 50/2000\n4/4 [==============================] - 0s 334us/step - loss: 0.2489 - acc: 0.5000\nEpoch 51/2000\n4/4 [==============================] - 0s 313us/step - loss: 0.2489 - acc: 0.5000\nEpoch 52/2000\n4/4 [==============================] - 0s 291us/step - loss: 0.2488 - acc: 0.5000\nEpoch 53/2000\n4/4 [==============================] - 0s 229us/step - loss: 0.2488 - acc: 0.5000\nEpoch 54/2000\n4/4 [==============================] - 0s 192us/step - loss: 0.2488 - acc: 0.5000\nEpoch 55/2000\n4/4 [==============================] - 0s 298us/step - loss: 0.2487 - acc: 0.5000\nEpoch 56/2000\n4/4 [==============================] - 0s 263us/step - loss: 0.2487 - acc: 0.5000\nEpoch 57/2000\n4/4 [==============================] - 0s 393us/step - loss: 0.2487 - acc: 0.5000\nEpoch 58/2000\n4/4 [==============================] - 0s 523us/step - loss: 0.2487 - acc: 0.5000\nEpoch 59/2000\n4/4 [==============================] - 0s 249us/step - loss: 0.2486 - acc: 0.5000\nEpoch 60/2000\n4/4 [==============================] - 0s 258us/step - loss: 0.2486 - acc: 0.5000\nEpoch 61/2000\n4/4 [==============================] - 0s 310us/step - loss: 0.2486 - acc: 0.5000\nEpoch 62/2000\n4/4 [==============================] - 0s 423us/step - loss: 0.2485 - acc: 0.5000\nEpoch 63/2000\n4/4 [==============================] - 0s 362us/step - loss: 0.2485 - acc: 0.5000\nEpoch 64/2000\n4/4 [==============================] - 0s 418us/step - loss: 0.2485 - acc: 0.5000\nEpoch 65/2000\n4/4 [==============================] - 0s 398us/step - loss: 0.2484 - acc: 0.5000\nEpoch 66/2000\n4/4 [==============================] - 0s 450us/step - loss: 0.2484 - acc: 0.5000\nEpoch 67/2000\n4/4 [==============================] - 0s 365us/step - loss: 0.2484 - acc: 0.5000\nEpoch 68/2000\n4/4 [==============================] - 0s 351us/step - loss: 0.2483 - acc: 0.5000\nEpoch 69/2000\n4/4 [==============================] - 0s 322us/step - loss: 0.2483 - acc: 0.5000\nEpoch 70/2000\n4/4 [==============================] - 0s 560us/step - loss: 0.2483 - acc: 0.5000\nEpoch 71/2000\n4/4 [==============================] - 0s 226us/step - loss: 0.2482 - acc: 0.5000\nEpoch 72/2000\n4/4 [==============================] - 0s 284us/step - loss: 0.2482 - acc: 0.5000\nEpoch 73/2000\n4/4 [==============================] - 0s 211us/step - loss: 0.2482 - acc: 0.5000\nEpoch 74/2000\n4/4 [==============================] - 0s 274us/step - loss: 0.2481 - acc: 0.5000\nEpoch 75/2000\n4/4 [==============================] - 0s 225us/step - loss: 0.2481 - acc: 0.5000\nEpoch 76/2000\n4/4 [==============================] - 0s 299us/step - loss: 0.2481 - acc: 0.5000\nEpoch 77/2000\n4/4 [==============================] - 0s 212us/step - loss: 0.2480 - acc: 0.5000\nEpoch 78/2000\n4/4 [==============================] - 0s 279us/step - loss: 0.2480 - acc: 0.5000\nEpoch 79/2000\n4/4 [==============================] - 0s 335us/step - loss: 0.2480 - acc: 0.5000\nEpoch 80/2000\n4/4 [==============================] - 0s 434us/step - loss: 0.2479 - acc: 0.5000\nEpoch 81/2000\n4/4 [==============================] - 0s 408us/step - loss: 0.2479 - acc: 0.5000\nEpoch 82/2000\n4/4 [==============================] - 0s 485us/step - loss: 0.2478 - acc: 0.5000\nEpoch 83/2000\n4/4 [==============================] - 0s 264us/step - loss: 0.2478 - acc: 0.5000\nEpoch 84/2000\n4/4 [==============================] - 0s 425us/step - loss: 0.2478 - acc: 0.5000\nEpoch 85/2000\n4/4 [==============================] - 0s 220us/step - loss: 0.2477 - acc: 0.5000\nEpoch 86/2000\n4/4 [==============================] - 0s 254us/step - loss: 0.2477 - acc: 0.5000\n"
]
],
[
[
"#### result",
"_____no_output_____"
]
],
[
[
"print \"loss:\", model.evaluate(x=training_data, y=target_data, verbose=0)\nprint \"\"\nprint model.predict(training_data)",
"loss: [0.0019034724682569504, 1.0]\n\n[[0.03184402]\n [0.95542383]\n [0.95711863]\n [0.05266879]]\n"
]
],
[
[
"## ouput names",
"_____no_output_____"
]
],
[
[
"output_names = [model.outputs[0].name]\nprint output_names",
"[u'outputlayer/Sigmoid:0']\n"
]
],
[
[
"## uTensor\n\n```sh\nutensor-cli convert xor_keras.pb --output-nodes=outputlayer/Sigmoid\n```",
"_____no_output_____"
],
[
"unsupported op type in uTensor: QuantizedBiasAdd",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
ecdb15a06cdf48a5861efdc3b92ffba7953d17fe | 130 | ipynb | Jupyter Notebook | 1c_prep_meta.ipynb | rgilman33/obs-tower | 895faff27f0bfcd7beb3f0f53047467c567106c5 | [
"Apache-2.0"
] | null | null | null | 1c_prep_meta.ipynb | rgilman33/obs-tower | 895faff27f0bfcd7beb3f0f53047467c567106c5 | [
"Apache-2.0"
] | 2 | 2021-10-12T22:04:31.000Z | 2021-10-12T22:50:08.000Z | 1c_prep_meta.ipynb | rgilman33/obs-tower | 895faff27f0bfcd7beb3f0f53047467c567106c5 | [
"Apache-2.0"
] | null | null | null | 32.5 | 75 | 0.884615 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ecdb20ec20df423201a786151663903d36428885 | 7,641 | ipynb | Jupyter Notebook | slider_gabor.ipynb | kenny-co/procedural-advml | 176ed1b526fb94f7dfa6a2ed5aebef36fcc6682f | [
"MIT"
] | 52 | 2018-11-16T19:43:13.000Z | 2021-09-06T09:52:15.000Z | slider_gabor.ipynb | kenny-co/procedural-advml | 176ed1b526fb94f7dfa6a2ed5aebef36fcc6682f | [
"MIT"
] | 2 | 2019-03-20T02:11:02.000Z | 2020-06-19T06:55:13.000Z | slider_gabor.ipynb | kenny-co/procedural-advml | 176ed1b526fb94f7dfa6a2ed5aebef36fcc6682f | [
"MIT"
] | 7 | 2018-11-16T19:08:38.000Z | 2021-08-06T01:39:43.000Z | 38.396985 | 327 | 0.569035 | [
[
[
"%matplotlib inline\n#%matplotlib notebook",
"_____no_output_____"
],
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nfrom ipywidgets import interactive\nfrom ipywidgets import Dropdown, FloatSlider, IntSlider, ToggleButtons\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.applications.inception_v3 import decode_predictions, preprocess_input\nfrom keras.preprocessing import image\nfrom utils_attack import colorize, perturb\nfrom utils_noise import gaborN_rand, gaborN_uni\nfrom utils_noise import normalize, normalize_var",
"_____no_output_____"
]
],
[
[
"# Model & Images",
"_____no_output_____"
],
[
"<b>Model:</b> [InceptionV3](https://keras.io/applications/#inceptionv3) NN architecture using Keras weights from training on the ImageNet dataset. This achieves a top-1 accuracy of 77.9% and top-5 accuracy of 93.7%. Other models can be found [here](https://keras.io/applications/#documentation-for-individual-models). \n<br><b>Images:</b> Stock photos taken from [Pexels](https://www.pexels.com/).",
"_____no_output_____"
]
],
[
[
"img_dir = 'images/'\n\n# Specify image dimensions\nsize = 299\n\n# Load model\nmodel = InceptionV3(weights = 'imagenet')\n\n# Load images\nall_images = {}\nfor filename in os.listdir(img_dir):\n if not filename.startswith('.'):\n img = image.load_img(img_dir + filename, target_size = (size, size)) # We assume all images have the same dimensions\n img = image.img_to_array(img)\n all_images[filename.split('.')[0]] = img\n\n# Display images\nfor key, vals in all_images.items():\n fig2 = plt.figure()\n plt.axis('off')\n plt.imshow(vals.astype(np.uint8))",
"_____no_output_____"
]
],
[
[
"# Gabor Noise",
"_____no_output_____"
]
],
[
[
"grid = 23 # fixed grid size\n\n# Interactive slider\ndef PlotGaborRand(img_key, max_norm, num_kern, ksize, sigma, theta, lambd, color = 'Black-White'):\n image = all_images[img_key]\n fig = plt.figure(figsize = (18, 7.5))\n plt.subplots_adjust(wspace = 0.05)\n plt.title('Anisotropic Gabor Noise', size = 20)\n plt.axis('off')\n \n if color == 'Black-White': coloring = [1, 1, 1]\n if color == 'Red-Cyan': coloring = [1, -1, -1]\n if color == 'Green-Magenta': coloring = [-1, 1, -1]\n if color == 'Blue-Yellow': coloring = [-1, -1, 1]\n \n # Noise pattern\n ax = fig.add_subplot(1, 3, 3) \n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n ax.tick_params(axis = 'both', which = 'both', length = 0)\n ax.set_title('Noise Pattern', size = 16)\n \n noise = gaborN_rand(size = size, grid = grid, num_kern = num_kern, ksize = ksize, sigma = sigma, theta = theta, lambd = lambd)\n noise = normalize_var(noise)\n noise = colorize(noise, coloring)\n plt.imshow(normalize(noise))\n \n # Original image\n ax = fig.add_subplot(1, 3, 1)\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n ax.tick_params(axis = 'both', which = 'both', length = 0)\n ax.set_title('Original Image', size = 16)\n payload = perturb(img = image, noise = np.zeros((size, size, 3)), norm = max_norm)\n plt.imshow(payload.astype(np.uint8))\n prob = model.predict(preprocess_input(payload.astype(np.float).reshape((1, size, size, 3))))\n plt.xlabel('\\n'.join((item[1] + ' %.3f' % item[2]) for item in decode_predictions(prob)[0]), horizontalalignment = 'right', x = 1, size = 16)\n \n # Altered image\n ax = fig.add_subplot(1, 3, 2)\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n ax.tick_params(axis = 'both', which = 'both', length = 0)\n ax.set_title('Altered Image', size = 16)\n payload = perturb(img = image, noise = noise, norm = max_norm)\n plt.imshow(payload.astype(np.uint8))\n prob = model.predict(preprocess_input(payload.astype(np.float).reshape((1, size, size, 3))))\n plt.xlabel('\\n'.join((item[1] + ' %.3f' % item[2]) for item in decode_predictions(prob)[0]), horizontalalignment = 'right', x = 1, size = 16)\n\n# Parameter sliders\ns_img_key = Dropdown(options = list(all_images.keys()), value = 'boat', description = 'Image:')\ns_max_norm = IntSlider(min = 0, max = 64, value = 12, step = 2, continuous_update = False, description = 'Max Change:')\ns_num_kern = IntSlider(min = 1, max = 100, value = 23, step = 1, continuous_update = False, description = 'No. Kernels:')\ns_ksize = IntSlider(min = 1, max = 100, value = 23, step = 1, continuous_update = False, description = 'Kernel Size:')\ns_sigma = FloatSlider(min = 1, max = 20, value = 8, step = 0.25, continuous_update = False, description = 'Kernel Var:')\ns_theta = FloatSlider(min = 0, max = np.pi, value = np.pi / 4, step = np.pi / 24, continuous_update = False, description = 'Orientation:')\ns_lambd = FloatSlider(min = 0.25, max = 20, value = 8, step = 0.25, continuous_update = False, description = 'Bandwidth:')\ns_color = ToggleButtons(options = ['Black-White', 'Red-Cyan', 'Green-Magenta', 'Blue-Yellow'], description = 'Color:', button_style='', disabled = False)\n\ninteractive(PlotGaborRand,\n img_key = s_img_key,\n max_norm = s_max_norm,\n num_kern = s_num_kern,\n ksize = s_ksize,\n sigma = s_sigma,\n theta = s_theta,\n lambd = s_lambd,\n color = s_color)",
"_____no_output_____"
]
],
[
[
"The classifier's top 5 predictions are displayed under each image.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ecdb233da14e307427beecef88f344aabea5f218 | 99,842 | ipynb | Jupyter Notebook | Deliverable 3 Opt/AlphabetSoupCharity_Optimization_Add_Hidden_Layer_Add_Neurons.ipynb | jzebker/Neural_Network_Charity_Analysis | f1eb342ae68c78570bb74b9aaf14ee0ccf184c70 | [
"MIT"
] | null | null | null | Deliverable 3 Opt/AlphabetSoupCharity_Optimization_Add_Hidden_Layer_Add_Neurons.ipynb | jzebker/Neural_Network_Charity_Analysis | f1eb342ae68c78570bb74b9aaf14ee0ccf184c70 | [
"MIT"
] | null | null | null | Deliverable 3 Opt/AlphabetSoupCharity_Optimization_Add_Hidden_Layer_Add_Neurons.ipynb | jzebker/Neural_Network_Charity_Analysis | f1eb342ae68c78570bb74b9aaf14ee0ccf184c70 | [
"MIT"
] | null | null | null | 57.248853 | 16,592 | 0.542818 | [
[
[
"### Deliverable 1: Preprocessing the Data for a Neural Network",
"_____no_output_____"
]
],
[
[
"# Import our dependencies\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler,OneHotEncoder\nimport pandas as pd\nimport tensorflow as tf\n\n# Import and read the charity_data.csv.\nimport pandas as pd \napplication_df = pd.read_csv(\"charity_data.csv\")\napplication_df.head()",
"_____no_output_____"
],
[
"# Drop the non-beneficial ID columns, 'EIN' and 'NAME'.\napplication_df = application_df.drop(['EIN', 'NAME'], axis=1)",
"_____no_output_____"
],
[
"# Determine the number of unique values in each column.\napplication_df.nunique()",
"_____no_output_____"
],
[
"# Look at APPLICATION_TYPE value counts for binning\nAPPLICATION_TYPE_counts = application_df.APPLICATION_TYPE.value_counts()\nAPPLICATION_TYPE_counts",
"_____no_output_____"
],
[
"# Visualize the value counts of APPLICATION_TYPE\nAPPLICATION_TYPE_counts.plot.density()",
"_____no_output_____"
],
[
"# Determine which values to replace if counts are less than ...?\nreplace_application = list(APPLICATION_TYPE_counts[APPLICATION_TYPE_counts < 500].index)\n\n# Replace in dataframe\nfor app in replace_application:\n application_df.APPLICATION_TYPE = application_df.APPLICATION_TYPE.replace(app,\"Other\")\n \n# Check to make sure binning was successful\napplication_df.APPLICATION_TYPE.value_counts()",
"_____no_output_____"
],
[
"# Look at CLASSIFICATION value counts for binning\nCLASSIFICATION_counts = application_df.CLASSIFICATION.value_counts()\nCLASSIFICATION_counts",
"_____no_output_____"
],
[
"# Visualize the value counts of CLASSIFICATION\nCLASSIFICATION_counts.plot.density()",
"_____no_output_____"
],
[
"# Determine which values to replace if counts are less than ..?\nreplace_class = list(CLASSIFICATION_counts[CLASSIFICATION_counts < 1800].index)\n\n# Replace in dataframe\nfor cls in replace_class:\n application_df.CLASSIFICATION = application_df.CLASSIFICATION.replace(cls,\"Other\")\n \n# Check to make sure binning was successful\napplication_df.CLASSIFICATION.value_counts()",
"_____no_output_____"
],
[
"# Generate our categorical variable lists\napplication_cat = [\"APPLICATION_TYPE\",\"AFFILIATION\",\"CLASSIFICATION\",\"USE_CASE\",\"ORGANIZATION\",\"INCOME_AMT\",\"SPECIAL_CONSIDERATIONS\"]",
"_____no_output_____"
],
[
"# Create a OneHotEncoder instance\nenc = OneHotEncoder(sparse=False)\n\n# Fit and transform the OneHotEncoder using the categorical variable list\nencode_df = pd.DataFrame(enc.fit_transform(application_df[application_cat]))\n\n# Add the encoded variable names to the dataframe\nencode_df.columns = enc.get_feature_names(application_cat)\nencode_df",
"_____no_output_____"
],
[
"# Merge one-hot encoded features and drop the originals\napplication_df = application_df.merge(encode_df,left_index=True,right_index=True).drop(application_cat,1)\napplication_df",
"_____no_output_____"
],
[
"# Split our preprocessed data into our features and target arrays\ny = application_df[\"IS_SUCCESSFUL\"].values\nX = application_df.drop([\"IS_SUCCESSFUL\"],1).values\n\n# Split the preprocessed data into a training and testing dataset\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)",
"_____no_output_____"
],
[
"# Create a StandardScaler instances\nscaler = StandardScaler()\n\n# Fit the StandardScaler\nX_scaler = scaler.fit(X_train)\n\n# Scale the data\nX_train_scaled = X_scaler.transform(X_train)\nX_test_scaled = X_scaler.transform(X_test)",
"_____no_output_____"
]
],
[
[
"### Deliverable 2: Compile, Train and Evaluate the Model",
"_____no_output_____"
]
],
[
[
"# Import checkpoint dependencies\nimport os\nfrom tensorflow.keras.callbacks import ModelCheckpoint\n\n# Define the checkpoint path and filenames\nos.makedirs(\"checkpoints/\",exist_ok=True)\ncheckpoint_path = \"checkpoints/weights.{epoch:02d}.hdf5\"",
"_____no_output_____"
],
[
"# Define the model - deep neural net, i.e., the number of input features and hidden nodes for each layer.\nnumber_input_features = len(X_train[0])\nhidden_nodes_layer1 = 80\nhidden_nodes_layer2 = 75\nhidden_nodes_layer3 = 30\n\nnn = tf.keras.models.Sequential()\n\n# First hidden layer\nnn.add(\n tf.keras.layers.Dense(units=hidden_nodes_layer1, input_dim=number_input_features, activation=\"relu\")\n)\n\n# Second hidden layer\nnn.add(tf.keras.layers.Dense(units=hidden_nodes_layer2, activation=\"relu\"))\n\n# Third hidden layer\nnn.add(tf.keras.layers.Dense(units=hidden_nodes_layer3, activation=\"relu\"))\n\n# Output layer\nnn.add(tf.keras.layers.Dense(units=1, activation=\"sigmoid\"))\n\n# Check the structure of the model\nnn.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 80) 3520 \n_________________________________________________________________\ndense_1 (Dense) (None, 75) 6075 \n_________________________________________________________________\ndense_2 (Dense) (None, 30) 2280 \n_________________________________________________________________\ndense_3 (Dense) (None, 1) 31 \n=================================================================\nTotal params: 11,906\nTrainable params: 11,906\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"# Compile the model\nnn.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\n# Create a callback that saves the model's weights every epoch\ncp_callback = ModelCheckpoint(\n filepath=checkpoint_path,\n verbose=1,\n save_weights_only=True,\n save_freq='epoch',\n period=5)",
"WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n"
],
[
"# Train the model\nfit_model = nn.fit(X_train_scaled,y_train,epochs=100,callbacks=[cp_callback])",
"Epoch 1/100\n804/804 [==============================] - 2s 2ms/step - loss: 0.5682 - accuracy: 0.7226\nEpoch 2/100\n804/804 [==============================] - 2s 2ms/step - loss: 0.5551 - accuracy: 0.7292\nEpoch 3/100\n804/804 [==============================] - 2s 2ms/step - loss: 0.5516 - accuracy: 0.7305\nEpoch 4/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5510 - accuracy: 0.7319\nEpoch 5/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5490 - accuracy: 0.7330\n\nEpoch 00005: saving model to checkpoints/weights.05.hdf5\nEpoch 6/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5482 - accuracy: 0.7326\nEpoch 7/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5476 - accuracy: 0.7325\nEpoch 8/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5473 - accuracy: 0.7333\nEpoch 9/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5459 - accuracy: 0.7341\nEpoch 10/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5459 - accuracy: 0.7341\n\nEpoch 00010: saving model to checkpoints/weights.10.hdf5\nEpoch 11/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5452 - accuracy: 0.7349\nEpoch 12/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5443 - accuracy: 0.7353\nEpoch 13/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5443 - accuracy: 0.7353\nEpoch 14/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5442 - accuracy: 0.7348\nEpoch 15/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5438 - accuracy: 0.7363\n\nEpoch 00015: saving model to checkpoints/weights.15.hdf5\nEpoch 16/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5431 - accuracy: 0.7364\nEpoch 17/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5434 - accuracy: 0.7358\nEpoch 18/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5427 - accuracy: 0.7367\nEpoch 19/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5425 - accuracy: 0.7371\nEpoch 20/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5423 - accuracy: 0.7369\n\nEpoch 00020: saving model to checkpoints/weights.20.hdf5\nEpoch 21/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5418 - accuracy: 0.7369\nEpoch 22/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5414 - accuracy: 0.7360\nEpoch 23/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5411 - accuracy: 0.7372\nEpoch 24/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5413 - accuracy: 0.7385\nEpoch 25/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5407 - accuracy: 0.7379\n\nEpoch 00025: saving model to checkpoints/weights.25.hdf5\nEpoch 26/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5401 - accuracy: 0.7380\nEpoch 27/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5402 - accuracy: 0.7374\nEpoch 28/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5396 - accuracy: 0.7374\nEpoch 29/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5395 - accuracy: 0.7380\nEpoch 30/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5397 - accuracy: 0.7365\n\nEpoch 00030: saving model to checkpoints/weights.30.hdf5\nEpoch 31/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5390 - accuracy: 0.7391\nEpoch 32/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5393 - accuracy: 0.7387\nEpoch 33/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5385 - accuracy: 0.7385\nEpoch 34/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5391 - accuracy: 0.7386\nEpoch 35/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5384 - accuracy: 0.7384\n\nEpoch 00035: saving model to checkpoints/weights.35.hdf5\nEpoch 36/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5389 - accuracy: 0.7389\nEpoch 37/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5379 - accuracy: 0.7383\nEpoch 38/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5374 - accuracy: 0.7390\nEpoch 39/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5380 - accuracy: 0.7396\nEpoch 40/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5381 - accuracy: 0.7381\n\nEpoch 00040: saving model to checkpoints/weights.40.hdf5\nEpoch 41/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5371 - accuracy: 0.7390\nEpoch 42/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5372 - accuracy: 0.7403\nEpoch 43/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5374 - accuracy: 0.7394\nEpoch 44/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5369 - accuracy: 0.7391\nEpoch 45/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5369 - accuracy: 0.7388\n\nEpoch 00045: saving model to checkpoints/weights.45.hdf5\nEpoch 46/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5366 - accuracy: 0.7386\nEpoch 47/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5365 - accuracy: 0.7393\nEpoch 48/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5361 - accuracy: 0.7391\nEpoch 49/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5366 - accuracy: 0.7390\nEpoch 50/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5360 - accuracy: 0.7397\n\nEpoch 00050: saving model to checkpoints/weights.50.hdf5\nEpoch 51/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5361 - accuracy: 0.7393\nEpoch 52/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5355 - accuracy: 0.7395\nEpoch 53/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5357 - accuracy: 0.7394\nEpoch 54/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5359 - accuracy: 0.7394\nEpoch 55/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5358 - accuracy: 0.7392\n\nEpoch 00055: saving model to checkpoints/weights.55.hdf5\nEpoch 56/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5356 - accuracy: 0.7400\nEpoch 57/100\n804/804 [==============================] - 2s 2ms/step - loss: 0.5353 - accuracy: 0.7401\nEpoch 58/100\n804/804 [==============================] - 3s 4ms/step - loss: 0.5353 - accuracy: 0.7393\nEpoch 59/100\n804/804 [==============================] - 2s 3ms/step - loss: 0.5352 - accuracy: 0.7397\nEpoch 60/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5352 - accuracy: 0.7400\n\nEpoch 00060: saving model to checkpoints/weights.60.hdf5\nEpoch 61/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5356 - accuracy: 0.7389\nEpoch 62/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5349 - accuracy: 0.7402: 0s - loss: 0.5332 - accu\nEpoch 63/100\n804/804 [==============================] - 1s 2ms/step - loss: 0.5348 - accuracy: 0.7396\nEpoch 64/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5346 - accuracy: 0.7398\nEpoch 65/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5346 - accuracy: 0.7399\n\nEpoch 00065: saving model to checkpoints/weights.65.hdf5\nEpoch 66/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5345 - accuracy: 0.7402\nEpoch 67/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5346 - accuracy: 0.7401\nEpoch 68/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5346 - accuracy: 0.7400\nEpoch 69/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5345 - accuracy: 0.7399\nEpoch 70/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5340 - accuracy: 0.7398\n\nEpoch 00070: saving model to checkpoints/weights.70.hdf5\nEpoch 71/100\n804/804 [==============================] - 1s 1ms/step - loss: 0.5344 - accuracy: 0.7402\nEpoch 72/100\n804/804 [==============================] - 2s 3ms/step - loss: 0.5344 - accuracy: 0.7404\nEpoch 73/100\n"
],
[
"# Evaluate the model using the test data\nmodel_loss, model_accuracy = nn.evaluate(X_test_scaled,y_test,verbose=2)\nprint(f\"Loss: {model_loss}, Accuracy: {model_accuracy}\")",
"268/268 - 0s - loss: 0.5674 - accuracy: 0.7272\nLoss: 0.5674124360084534, Accuracy: 0.7272303104400635\n"
],
[
"# Export our model to HDF5 file\nnn.save(\"AlphabetSoupCharity.h5\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdb32182a4972aad0b793e0918cd595e01c15d6 | 439,214 | ipynb | Jupyter Notebook | Code/Cluster Asssessment.ipynb | wezteoh/hack_on_data | f200f90feb6a0557d3cf2d391bb037921441cc15 | [
"Apache-2.0"
] | null | null | null | Code/Cluster Asssessment.ipynb | wezteoh/hack_on_data | f200f90feb6a0557d3cf2d391bb037921441cc15 | [
"Apache-2.0"
] | null | null | null | Code/Cluster Asssessment.ipynb | wezteoh/hack_on_data | f200f90feb6a0557d3cf2d391bb037921441cc15 | [
"Apache-2.0"
] | null | null | null | 29.606606 | 53,662 | 0.400361 | [
[
[
"# Introduction\nThis `Python 2` / `PySpark` script analyzes the effectiveness of our clustering algorithm, by finding the most common word in the title of all products wihtin each cluster.",
"_____no_output_____"
],
[
"# Notebook Setup",
"_____no_output_____"
],
[
"## Initialise modules",
"_____no_output_____"
]
],
[
[
"import findspark\nimport pyspark\nimport pyspark.sql.functions as F\nimport pyspark.sql.types as T\n\nimport pymongo\nimport pandas as pd\n\nimport gzip # To parse gzip file\nimport re # Regex for text processing\nimport os # For setting up Mongo-Spark connector\nimport csv # To read/write CSV files\n\nfrom collections import Counter # To count the word frequencies\n\nimport plotly\nimport plotly.plotly as py\nimport plotly.offline as pyo\nimport plotly.graph_objs as go\nimport colorlover as cl",
"_____no_output_____"
]
],
[
[
"## Initialise PySpark session",
"_____no_output_____"
],
[
"Load `MongoDB-Spark` connector when starting up `PySpark`.",
"_____no_output_____"
]
],
[
[
"packages = 'org.mongodb.spark:mongo-spark-connector_2.11:2.2.0'\ndedicated_memory = '4g'\n\nos.environ['PYSPARK_SUBMIT_ARGS'] = '--packages {} --driver-memory {} pyspark-shell' \\\n .format(packages, dedicated_memory)",
"_____no_output_____"
],
[
"# Find SPARK_HOME\nfindspark.init()\n\n# Create SparkSession\nspark = (pyspark.sql.SparkSession\n .builder.appName('ClusterAssessment')\n .getOrCreate())",
"_____no_output_____"
]
],
[
[
"## Initiate Plotly Offline notebook mode",
"_____no_output_____"
]
],
[
[
"pyo.init_notebook_mode(connected=True)",
"_____no_output_____"
]
],
[
[
"## Configure Pandas HTML display",
"_____no_output_____"
]
],
[
[
"pd.set_option('display.max_colwidth', -1)",
"_____no_output_____"
]
],
[
[
"## Define helper methods",
"_____no_output_____"
]
],
[
[
"def parse(path):\n '''\n Unzip a json.gz at `path` and returns a generator.\n '''\n g = gzip.open(path, 'rb')\n for line in g:\n yield eval(line)\n\ndef import_to_mongo(path, coll, db='hackon', create_index=True):\n '''\n Unzip and import json.gz file from `path` and loads it into mongo server.\n Create database index if `create_index` is True. \n '''\n # Obtain handle to Mongo database and collection\n client = pymongo.MongoClient()\n collection = client[db][coll]\n \n # Return prematurely if database.collection already exists\n if (collection.count() != 0):\n print '{}.{} already exists on MongoDisk server. Exiting without loading JSON data.'.format(db, coll)\n return\n \n # Insert datapoints into Mongo database\n try:\n collection.insert_many((datapoint for datapoint in parse(path)))\n print 'JSON data successfully imported to Mongo at \\'{}.{}.\\''.format(db, coll)\n except Exception as e:\n print 'Error loading data.\\n{}'.format(e)\n client.close()\n return\n \n if not create_index:\n client.close()\n return\n \n # Create database index for improved searching\n # collection.create_index([('asin', pymongo.ASCENDING), ('reviewerID', pymongo.DESCENDING)])\n\ndef load_mongo_to_spark(coll, db='hackon'):\n '''\n Load the Mongo database to a Spark Session and returns the Spark DataFrame\n '''\n try:\n return (spark\n .read\n .format('com.mongodb.spark.sql.DefaultSource')\n .option('uri', 'mongodb://127.0.0.1/{}.{}'.format(db, coll))\n .load())\n except Exception as e:\n print 'Failed to create Spark dataframe.\\n{}'.format(e)\n\ndef displayDF(sparkDF, n=10):\n '''\n Interactively displays the first n rows of a sparkDF as a pandas dataframe\n '''\n return (sparkDF\n .limit(n)\n .drop('_id', 'unixReviewTime')\n .toPandas())",
"_____no_output_____"
],
[
"import_to_mongo('../../Datasets/reviews_Baby.json.gz', coll='baby')",
"hackon.baby already exists on MongoDisk server. Exiting without loading JSON data.\n"
]
],
[
[
"## Load stopwords\nA list of stopwords is loaded as a Python list and broadcasted in PySpark.",
"_____no_output_____"
]
],
[
[
"# TODO: Get brands from rawDF instead of a new file here.\n# TODO: Replace & and '\n# Find distinct brands in the dataset\nuniqueBrands = (load_mongo_to_spark('baby_meta')\n .select('brand')\n .distinct()\n .rdd\n .map(lambda x: x[0])\n .collect())",
"_____no_output_____"
],
[
"# Load stopwords into list\nwith open('stopwords.csv', 'r') as csvFile:\n fileReader = csv.reader(csvFile)\n stopwords = []\n for word in fileReader:\n stopwords.extend(word)\n \n# Add '' to stopwords\nstopwords.append('')\n\n# Add brands into stopwords\nstopwords.extend(uniqueBrands)\n\n# Broadcast stopwords\nstopwords_broadcast = spark.sparkContext.broadcast(stopwords)\n \nprint 'First 100 stopwords:\\n\\n{}'.format(stopwords[:100])",
"First 100 stopwords:\n\n['all', 'just', 'being', 'over', 'both', 'through', 'yourselves', 'its', 'before', 'with', 'had', 'should', 'to', 'only', 'under', 'ours', 'has', 'do', 'them', 'his', 'very', 'they', 'not', 'during', 'now', 'him', 'nor', 'did', 'these', 't', 'each', 'where', 'because', 'doing', 'theirs', 'some', 'are', 'our', 'ourselves', 'out', 'what', 'for', 'below', 'does', 'above', 'between', 'she', 'be', 'we', 'after', 'here', 'hers', 'by', 'on', 'about', 'of', 'against', 's', 'or', 'own', 'into', 'yourself', 'down', 'your', 'from', 'her', 'whom', 'there', 'been', 'few', 'too', 'themselves', 'was', 'until', 'more', 'himself', 'that', 'but', 'off', 'herself', 'than', 'those', 'he', 'me', 'myself', 'this', 'up', 'will', 'while', 'can', 'were', 'my', 'and', 'then', 'is', 'in', 'am', 'it', 'an', 'as']\n"
]
],
[
[
"# Cluster Assessment Script\nThe following code analyzes how frequent words appear in product titles of a given cluster. This is used as a tentative proxy for clustering effectiveness.",
"_____no_output_____"
]
],
[
[
"# Load stopwords into list\nwith open('stopwords.csv', 'r') as csvFile:\n fileReader = csv.reader(csvFile)\n stopwords = []\n for word in fileReader:\n stopwords.extend(word)\n \n# Add '' to stopwords\nstopwords.append('')\n\n# Broadcast stopwords\nstopwords_broadcast = spark.sparkContext.broadcast(stopwords)\n\[email protected](returnType=T.ArrayType(T.StringType()))\ndef tokenize_set_and_filter_stopwords(text):\n '''\n Tokenizes a list of words, before filtering for stopwords. Return a setted list of words.\n \n Input:\n text: A string.\n Returns:\n A list of setted words with stopwords removed.\n '''\n string_set = set(re.split(r'\\W+', text.lower()))\n\n return [word for word in string_set if word not in stopwords_broadcast.value]\n\[email protected](returnType=T.IntegerType())\ndef assign_clusterID():\n return random.randint(0, 10)\n\[email protected](returnType=T.ArrayType(T.StructType([\n T.StructField('token', T.StringType()),\n T.StructField('frequency', T.FloatType())\n])))\ndef analyze_word_frequency(tokens, N=10):\n '''\n Calculate the frequency of words in title appearing in products titles of a given cluster.\n \n Inputs:\n tokens: A list of list of words.\n Returns:\n A list of tuple (token, frequency) of the top N words, sorted in decreasing frequency.\n '''\n counter = Counter(word for words in tokens for word in words)\n L = len(tokens)\n return map(lambda (k, v): (k, float(v) / L), \n counter.most_common()[:N])",
"_____no_output_____"
]
],
[
[
"## Import clustered products metadata",
"_____no_output_____"
]
],
[
[
"titlesDF = load_mongo_to_spark('baby_meta').select('asin', 'title')\ndisplayDF(titlesDF)",
"_____no_output_____"
],
[
"clusteredProductMetaDF = (spark\n .read\n .format('com.databricks.spark.csv')\n .options(header='true', inferschema='true')\n .load('../../Datasets/baby_2000_2_cluster_df.csv')\n .drop('_c0')\n .dropna()\n .join(titlesDF, 'asin')\n )\ndisplayDF(clusteredProductMetaDF)",
"_____no_output_____"
],
[
"topWordsDF = (clusteredProductMetaDF\n .select('clusterID', tokenize_set_and_filter_stopwords('title').alias('tokens'))\n .groupBy('clusterID')\n .agg(F.count('clusterID').alias('productCount'), F.collect_list('tokens').alias('tokens'))\n .select('clusterID', 'productCount', analyze_word_frequency('tokens').alias('topWordsAndFreq'))\n )\ndisplayDF(topWordsDF)",
"_____no_output_____"
]
],
[
[
"### Clusters that have only 1 product",
"_____no_output_____"
]
],
[
[
"print '{} / {}'.format(topWordsDF.filter(F.col('productCount') == 1).count(), topWordsDF.count())",
"461 / 1500\n"
],
[
"displayDF(topWordsDF.filter(F.col('productCount') > 2))",
"_____no_output_____"
]
],
[
[
"## Visualizations",
"_____no_output_____"
]
],
[
[
"def plot_2D_histogram(DF):\n '''\n Plot a coloured histogram of top word frequency.\n '''\n def convert_to_list(singleColDF):\n '''\n Convert a PySpark DataFrame (with a single column) into a Python list.\n '''\n assert len(singleColDF.schema.names) == 1\n return singleColDF.rdd.map(lambda x: x[0]).collect()\n \n def create_colourscale():\n '''\n Create a custom colorscale.\n '''\n # Define colourscale\n colourscale = cl.scales['9']['seq']['Blues']\n \n return [[0.125 * i, colourscale[i]] for i in range(9)]\n \n \n x_val = DF.select('topWordsAndFreq').rdd.map(lambda x: x[0][0]['frequency']).collect()\n y_val = convert_to_list(DF.select('productCount'))\n \n # Define colourscale\n colourscale = cl.interp(cl.scales['9']['seq']['Blues'], 20)\n \n scatterTrace = go.Scatter(\n x = x_val, \n y = y_val,\n mode = 'markers', \n name = 'points',\n marker = dict(\n color = 'rgb(60,121,214)', \n size = 2\n )\n )\n \n hist2DTrace = go.Histogram2dcontour(\n x = x_val, \n y = y_val, \n ncontours = 20,\n colorscale = create_colourscale(), \n reversescale = False, \n showscale = True,\n histnorm = 'probability'\n )\n xHistTrace = go.Histogram(\n x = x_val, \n marker = dict(color='rgb(60,121,214)'),\n yaxis = 'y2'\n )\n yHistTrace = go.Histogram(\n y = y_val, \n marker = dict(color='rgb(60,121,214)'),\n xaxis = 'x2'\n )\n data = [scatterTrace, hist2DTrace, xHistTrace, yHistTrace]\n\n layout = go.Layout(\n showlegend=False,\n autosize=False,\n width=600,\n height=550,\n xaxis=dict(\n domain=[0, 0.85],\n showgrid=False,\n zeroline=False\n ),\n yaxis=dict(\n domain=[0, 0.85],\n showgrid=False,\n zeroline=False\n ),\n margin=dict(\n t=50\n ),\n hovermode='closest',\n bargap=0,\n xaxis2=dict(\n domain=[0.85, 1],\n showgrid=False,\n zeroline=False\n ),\n yaxis2=dict(\n domain=[0.85, 1],\n showgrid=False,\n zeroline=False\n )\n )\n \n figure = go.Figure(data=data, layout=layout)\n return pyo.iplot(figure)\n \n traces = []\n \n x_val = DF.select('topWordsAndFreq').rdd.map(lambda x: x[0][0]['frequency']).collect()\n y_val = convert_to_list(DF.select('productCount'))\n \n # Define Histogram traces\n horHist = go.Histogram(\n x = x_val,\n autobinx = False,\n xbins = {\n 'start': 0,\n 'end': 1,\n 'size': 50\n },\n yaxis = 'y2'\n )\n \n vertHist = go.Histogram(\n x = y_val,\n autobiny = False,\n ybins = {\n 'start': 0,\n 'end': 1,\n 'size': 50\n },\n xaxis = 'x2'\n )\n \n # Define 2D Histogram Contour trace\n contourHist =go.Histogram2dContour(\n x = x_val,\n y = y_val,\n histnorm = 'probability',\n \n )\n \n data = go.Data([horHist, vertHist, contourHist])\n \n layout = go.Layout(\n showlegend = False,\n autosize = False,\n width = 1200,\n height = 1200,\n xaxis = dict(\n domain = [0, 0.85],\n showgrid = False,\n zeroline = False\n ),\n yaxis = dict(\n domain = [0, 0.85],\n showgrid = False,\n zeroline = False\n ),\n margin=dict(\n t=50\n ),\n hovermode='closest',\n bargap=0,\n xaxis2=dict(\n domain=[0.85, 1],\n showgrid=False,\n zeroline=False,\n showticklabels=False,\n showline = False\n ),\n yaxis2=dict(\n domain=[0.85, 1],\n showgrid=False,\n zeroline=False\n )\n )\n \n figure = go.Figure(data=data, layout=layout)\n \n return py.plot(figure, filename='Test', sharing='secret', auto_open=False)",
"_____no_output_____"
],
[
"plot_2D_histogram(topWordsDF)",
"_____no_output_____"
],
[
"plot_2D_histogram(topWordsDF)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
ecdb3e28b8ec20a21e594696cb936eff42a401d3 | 25,905 | ipynb | Jupyter Notebook | notebooks/Dstripes/adversarial/basic/inference_adversarial/convolutional/AE/DstripesAAEssmi Convolutional.ipynb | Fidan13/Generative_Models | 2c700da53210a16f75c468ba521061106afa6982 | [
"MIT"
] | null | null | null | notebooks/Dstripes/adversarial/basic/inference_adversarial/convolutional/AE/DstripesAAEssmi Convolutional.ipynb | Fidan13/Generative_Models | 2c700da53210a16f75c468ba521061106afa6982 | [
"MIT"
] | null | null | null | notebooks/Dstripes/adversarial/basic/inference_adversarial/convolutional/AE/DstripesAAEssmi Convolutional.ipynb | Fidan13/Generative_Models | 2c700da53210a16f75c468ba521061106afa6982 | [
"MIT"
] | null | null | null | 24.052925 | 215 | 0.549276 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ecdb491e748af43d6ab10e87656ff837b679530c | 278,235 | ipynb | Jupyter Notebook | Clustering_Train1.ipynb | inesleite/Data-Science | c6249a723ef977d5abe9cc06cbc76906463d3a98 | [
"Apache-2.0"
] | null | null | null | Clustering_Train1.ipynb | inesleite/Data-Science | c6249a723ef977d5abe9cc06cbc76906463d3a98 | [
"Apache-2.0"
] | null | null | null | Clustering_Train1.ipynb | inesleite/Data-Science | c6249a723ef977d5abe9cc06cbc76906463d3a98 | [
"Apache-2.0"
] | null | null | null | 139.536108 | 33,664 | 0.838 | [
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import mixture",
"_____no_output_____"
],
[
"train_df = pd.read_csv(\"./clean_data/clean_train.csv\")\ntest_df = pd.read_csv(\"./clean_data/clean_test.csv\")",
"_____no_output_____"
],
[
"train_df.head(4)",
"_____no_output_____"
],
[
"test_df.head(4)",
"_____no_output_____"
],
[
"train_df = pd.get_dummies(train_df)\ntest_df = pd.get_dummies(test_df)",
"_____no_output_____"
],
[
"train_df.head(4)",
"_____no_output_____"
],
[
"X = train_df = train_df.drop(columns=['class_neg', 'class_pos'], axis=1)\nZ = test_df = test_df.drop(columns=['class_neg', 'class_pos'], axis=1)",
"_____no_output_____"
],
[
"print(type(X))\nX = X.as_matrix()\nprint(type(Z))\nZ = Z.as_matrix()",
"<class 'pandas.core.frame.DataFrame'>\n<class 'pandas.core.frame.DataFrame'>\n"
],
[
"print(type(X))\nprint(type(Z))",
"<class 'numpy.ndarray'>\n<class 'numpy.ndarray'>\n"
],
[
"print(X)\nprint(Z)",
"[[7.66980000e+04 7.13188501e-01 2.13070644e+09 ... 0.00000000e+00\n 0.00000000e+00 0.00000000e+00]\n [3.30580000e+04 7.13188501e-01 0.00000000e+00 ... 1.50000000e+03\n 0.00000000e+00 0.00000000e+00]\n [4.10400000e+04 7.13188501e-01 2.28000000e+02 ... 5.14000000e+02\n 0.00000000e+00 0.00000000e+00]\n ...\n [1.12000000e+02 0.00000000e+00 2.13070643e+09 ... 0.00000000e+00\n 0.00000000e+00 0.00000000e+00]\n [8.02920000e+04 7.13188501e-01 2.13070643e+09 ... 3.88422000e+05\n 0.00000000e+00 0.00000000e+00]\n [4.02220000e+04 7.13188501e-01 6.98000000e+02 ... 1.58000000e+02\n 0.00000000e+00 0.00000000e+00]]\n[[6.00000000e+01 0.00000000e+00 2.00000000e+01 ... 0.00000000e+00\n 0.00000000e+00 0.00000000e+00]\n [8.20000000e+01 0.00000000e+00 6.80000000e+01 ... 0.00000000e+00\n 0.00000000e+00 0.00000000e+00]\n [6.60020000e+04 2.00000000e+00 2.12000000e+02 ... 0.00000000e+00\n 0.00000000e+00 0.00000000e+00]\n ...\n [7.96360000e+04 7.72064889e-01 1.67000000e+03 ... 1.95480000e+04\n 0.00000000e+00 0.00000000e+00]\n [1.10000000e+02 7.72064889e-01 3.60000000e+01 ... 0.00000000e+00\n 0.00000000e+00 0.00000000e+00]\n [8.00000000e+00 0.00000000e+00 6.00000000e+00 ... 0.00000000e+00\n 0.00000000e+00 0.00000000e+00]]\n"
],
[
"X.shape\nZ.shape",
"_____no_output_____"
],
[
"print(X[1,:])\nprint(Z[1,:])",
"[3.30580000e+04 7.13188501e-01 0.00000000e+00 1.90620639e+05\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 1.82540000e+04 6.53294000e+05\n 1.72080000e+06 5.16724000e+05 3.16420000e+04 0.00000000e+00\n 1.39335200e+06 0.00000000e+00 6.80000000e+01 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 2.56089800e+06 2.12715000e+06\n 1.08459800e+06 3.38544000e+05 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 7.15100000e+04 7.72720000e+05 1.99692400e+06\n 9.95600000e+04 0.00000000e+00 7.33600000e+03 7.80800000e+03\n 1.37760000e+04 1.30860000e+04 1.01007400e+06 1.87390200e+06\n 1.47260000e+04 6.00000000e+00 0.00000000e+00 0.00000000e+00\n 1.37857600e+06 4.47166000e+05 1.99512000e+05 1.54298000e+05\n 1.37280000e+05 1.38668000e+05 1.65908000e+05 2.29652000e+05\n 8.70820000e+04 4.70800000e+03 3.64666000e+06 8.60000000e+01\n 4.54000000e+02 3.64000000e+02 3.50000000e+02 1.39335200e+06\n 4.90280000e+04 6.88314000e+05 3.92208000e+05 3.41420000e+05\n 3.59780000e+05 3.66560000e+05 4.63710834e+05 5.13147820e+05\n 5.51389799e+05 5.82871323e+05 6.04886613e+05 6.70000000e+03\n 3.30575100e+04 3.64666000e+06 3.64666000e+06 3.58203400e+06\n 1.77330000e+04 2.60120000e+05 1.15626000e+05 6.90000000e+03\n 2.94285000e+06 1.20960000e+06 0.00000000e+00 1.90221811e+05\n 9.15204147e+01 4.43075832e-04 2.29107936e+06 0.00000000e+00\n 6.43536960e+05 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 3.80000000e+01 9.86440000e+04 1.17950200e+06\n 1.28673600e+06 3.36388000e+05 3.62940000e+04 5.19200000e+03\n 5.60000000e+01 1.90515567e+05 0.00000000e+00 3.64666000e+06\n 3.70629800e+01 6.16000000e+03 7.96000000e+02 1.64860000e+05\n 3.50066000e+05 2.72956000e+05 1.83760000e+06 3.01242000e+05\n 9.14800000e+03 2.20000000e+01 0.00000000e+00 7.49091271e+02\n 1.22296099e+03 1.92882489e+06 3.51510236e+05 2.74179338e+02\n 1.93742905e+04 7.39379113e+00 1.34166522e+01 2.20075221e+06\n 3.31200000e+03 5.22000000e+02 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 3.37360000e+04 3.69460000e+04\n 5.93600000e+03 0.00000000e+00 0.00000000e+00 1.03330000e+05\n 1.62540000e+04 4.51008000e+06 8.68538000e+05 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 3.47782000e+06\n 2.21176000e+03 2.33400000e+03 6.64504000e+05 8.24154000e+05\n 4.21400000e+05 1.78064000e+05 2.93306000e+05 2.45416000e+05\n 1.33654000e+05 8.11400000e+04 9.75760000e+04 1.50000000e+03\n 0.00000000e+00 0.00000000e+00]\n[8.20000000e+01 0.00000000e+00 6.80000000e+01 4.00000000e+01\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 7.48000000e+02 1.25940000e+04\n 3.63600000e+03 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 5.24400000e+03 0.00000000e+00 6.00000000e+01 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 2.31740000e+04 1.81660000e+04\n 2.36860000e+04 1.27000000e+03 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 1.20000000e+01 8.20000000e+01\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 6.92000000e+02\n 1.62860000e+04 0.00000000e+00 2.80000000e+02 4.40000000e+01\n 5.00000000e+01 1.27400000e+03 8.66000000e+02 3.36200000e+03\n 1.11020000e+04 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 1.25640000e+04 1.75600000e+03 6.38000000e+02 2.76000000e+02\n 1.72000000e+02 1.32000000e+02 8.12000000e+02 3.08000000e+02\n 1.92000000e+02 1.28000000e+02 4.68940000e+04 4.00000000e+00\n 2.00000000e+00 3.80000000e+01 0.00000000e+00 5.24400000e+03\n 3.60000000e+02 2.05200000e+04 3.13400000e+03 2.80200505e+05\n 3.20483011e+05 4.00614716e+05 4.57347105e+05 5.03282158e+05\n 5.38685709e+05 5.67845949e+05 5.87884465e+05 2.33200000e+04\n 8.18900000e+01 4.68940000e+04 4.68940000e+04 4.83240000e+04\n 6.80000000e+01 0.00000000e+00 4.48600000e+03 4.64800000e+04\n 1.70500000e+04 1.20960000e+06 7.26000000e+02 2.00000000e+00\n 4.00000000e+00 0.00000000e+00 7.22496000e+03 0.00000000e+00\n 7.76832000e+03 0.00000000e+00 4.20000000e+01 0.00000000e+00\n 0.00000000e+00 4.00000000e+00 7.06400000e+03 6.20000000e+03\n 2.45200000e+03 1.24600000e+03 1.20000000e+01 0.00000000e+00\n 0.00000000e+00 1.40000000e+01 5.40000000e+01 4.68940000e+04\n 0.00000000e+00 2.20200000e+03 2.80000000e+01 1.14000000e+02\n 3.50000000e+02 7.00000000e+02 1.70800000e+03 9.62200000e+03\n 2.17400000e+03 8.00000000e+01 0.00000000e+00 8.00000000e+01\n 2.06000000e+02 7.80200000e+03 1.46600000e+03 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 7.91800000e+03\n 7.80000000e+01 4.00000000e+01 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 3.52000000e+02 0.00000000e+00\n 0.00000000e+00 3.99600000e+03 5.84000000e+02 2.00000000e+02\n 6.20000000e+01 3.75800000e+04 3.75600000e+03 6.36800000e+03\n 3.60000000e+01 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 2.86000000e+00 1.02000000e+02 1.00400000e+04 3.31000000e+03\n 1.06800000e+03 2.76000000e+02 1.62000000e+03 1.16000000e+02\n 8.60000000e+01 4.62000000e+02 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00]\n"
],
[
"print(X[:4,:])",
"[[7.66980000e+04 7.13188501e-01 2.13070644e+09 2.80000000e+02\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 3.72500000e+04 1.43286400e+06\n 3.66415600e+06 1.00768400e+06 2.58960000e+04 0.00000000e+00\n 2.55169600e+06 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 4.93329600e+06 3.65516600e+06\n 1.76600800e+06 1.13204000e+06 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 1.01200000e+03 2.68000000e+02\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 4.69014000e+05 4.23966000e+06 7.03300000e+05\n 7.55876000e+05 0.00000000e+00 5.37400000e+03 2.10800000e+03\n 4.11400000e+03 1.23480000e+04 6.15248000e+05 5.52627600e+06\n 2.37800000e+03 4.00000000e+00 0.00000000e+00 0.00000000e+00\n 2.32874600e+06 1.02230400e+06 4.15432000e+05 2.87230000e+05\n 3.10246000e+05 6.81504000e+05 1.11881400e+06 3.57400000e+03\n 0.00000000e+00 0.00000000e+00 6.70021400e+06 0.00000000e+00\n 1.00000000e+01 1.08000000e+02 5.00000000e+01 2.55169600e+06\n 9.75180000e+04 9.47550000e+05 7.99478000e+05 3.30760000e+05\n 3.53400000e+05 2.99160000e+05 3.05200000e+05 2.83680000e+05\n 5.51389799e+05 5.82871323e+05 6.04886613e+05 1.78540000e+05\n 7.66980800e+04 6.70021400e+06 6.70021400e+06 6.59989200e+06\n 4.35660000e+04 6.86560000e+04 5.40640000e+04 6.38360000e+05\n 6.16785000e+06 1.20960000e+06 2.46244000e+05 2.00000000e+00\n 9.60000000e+01 0.00000000e+00 5.24575200e+06 0.00000000e+00\n 9.16567680e+05 6.00000000e+00 1.92400000e+03 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 1.18196000e+05 1.30947200e+06\n 3.24718200e+06 1.38136200e+06 9.88220000e+04 1.12080000e+04\n 1.60800000e+03 2.20000000e+02 2.40000000e+02 6.70021400e+06\n 3.70629800e+01 1.04760000e+04 1.22600000e+03 2.67998000e+05\n 5.21832000e+05 4.28776000e+05 4.01585400e+06 8.95240000e+05\n 2.63300000e+04 1.18000000e+02 0.00000000e+00 5.32000000e+02\n 7.34000000e+02 4.12270400e+06 5.12880000e+04 0.00000000e+00\n 5.32572000e+05 0.00000000e+00 1.80000000e+01 5.33069000e+06\n 4.73200000e+03 1.12600000e+03 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 6.22820000e+04 8.59080000e+04\n 3.27900000e+04 0.00000000e+00 0.00000000e+00 2.02710000e+05\n 3.79280000e+04 1.47455800e+07 1.87664400e+06 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 2.80118000e+06\n 2.44580000e+03 2.71200000e+03 9.65866000e+05 1.70690800e+06\n 1.24052000e+06 4.93384000e+05 7.21044000e+05 4.69792000e+05\n 3.39156000e+05 1.57956000e+05 7.32240000e+04 0.00000000e+00\n 0.00000000e+00 0.00000000e+00]\n [3.30580000e+04 7.13188501e-01 0.00000000e+00 1.90620639e+05\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 1.82540000e+04 6.53294000e+05\n 1.72080000e+06 5.16724000e+05 3.16420000e+04 0.00000000e+00\n 1.39335200e+06 0.00000000e+00 6.80000000e+01 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 2.56089800e+06 2.12715000e+06\n 1.08459800e+06 3.38544000e+05 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 7.15100000e+04 7.72720000e+05 1.99692400e+06\n 9.95600000e+04 0.00000000e+00 7.33600000e+03 7.80800000e+03\n 1.37760000e+04 1.30860000e+04 1.01007400e+06 1.87390200e+06\n 1.47260000e+04 6.00000000e+00 0.00000000e+00 0.00000000e+00\n 1.37857600e+06 4.47166000e+05 1.99512000e+05 1.54298000e+05\n 1.37280000e+05 1.38668000e+05 1.65908000e+05 2.29652000e+05\n 8.70820000e+04 4.70800000e+03 3.64666000e+06 8.60000000e+01\n 4.54000000e+02 3.64000000e+02 3.50000000e+02 1.39335200e+06\n 4.90280000e+04 6.88314000e+05 3.92208000e+05 3.41420000e+05\n 3.59780000e+05 3.66560000e+05 4.63710834e+05 5.13147820e+05\n 5.51389799e+05 5.82871323e+05 6.04886613e+05 6.70000000e+03\n 3.30575100e+04 3.64666000e+06 3.64666000e+06 3.58203400e+06\n 1.77330000e+04 2.60120000e+05 1.15626000e+05 6.90000000e+03\n 2.94285000e+06 1.20960000e+06 0.00000000e+00 1.90221811e+05\n 9.15204147e+01 4.43075832e-04 2.29107936e+06 0.00000000e+00\n 6.43536960e+05 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 3.80000000e+01 9.86440000e+04 1.17950200e+06\n 1.28673600e+06 3.36388000e+05 3.62940000e+04 5.19200000e+03\n 5.60000000e+01 1.90515567e+05 0.00000000e+00 3.64666000e+06\n 3.70629800e+01 6.16000000e+03 7.96000000e+02 1.64860000e+05\n 3.50066000e+05 2.72956000e+05 1.83760000e+06 3.01242000e+05\n 9.14800000e+03 2.20000000e+01 0.00000000e+00 7.49091271e+02\n 1.22296099e+03 1.92882489e+06 3.51510236e+05 2.74179338e+02\n 1.93742905e+04 7.39379113e+00 1.34166522e+01 2.20075221e+06\n 3.31200000e+03 5.22000000e+02 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 3.37360000e+04 3.69460000e+04\n 5.93600000e+03 0.00000000e+00 0.00000000e+00 1.03330000e+05\n 1.62540000e+04 4.51008000e+06 8.68538000e+05 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 3.47782000e+06\n 2.21176000e+03 2.33400000e+03 6.64504000e+05 8.24154000e+05\n 4.21400000e+05 1.78064000e+05 2.93306000e+05 2.45416000e+05\n 1.33654000e+05 8.11400000e+04 9.75760000e+04 1.50000000e+03\n 0.00000000e+00 0.00000000e+00]\n [4.10400000e+04 7.13188501e-01 2.28000000e+02 1.00000000e+02\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 1.64800000e+03 3.70592000e+05\n 1.88337400e+06 2.92936000e+05 1.20160000e+04 0.00000000e+00\n 1.23413200e+06 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 2.37199000e+06 2.17363400e+06\n 3.00796000e+05 1.53698000e+05 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 3.58000000e+02 1.10000000e+02\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 8.70456000e+05 2.39798000e+05\n 1.45031200e+06 0.00000000e+00 1.62000000e+03 1.15600000e+03\n 1.22800000e+03 3.42500000e+04 1.81160600e+06 7.10672000e+05\n 3.40000000e+01 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 7.90690000e+05 6.72026000e+05 3.32340000e+05 2.54892000e+05\n 1.89596000e+05 1.35758000e+05 1.03552000e+05 8.16660000e+04\n 4.60000000e+01 0.00000000e+00 2.67333800e+06 1.28000000e+02\n 2.02000000e+02 5.76000000e+02 4.00000000e+00 1.23413200e+06\n 2.88040000e+04 1.60176000e+05 1.39730000e+05 1.37160000e+05\n 1.30640000e+05 3.99603168e+05 4.63710834e+05 5.13147820e+05\n 5.51389799e+05 5.82871323e+05 6.04886613e+05 2.80000000e+04\n 4.10400800e+04 2.67333800e+06 2.67333800e+06 2.67853400e+06\n 1.54390000e+04 7.46600000e+03 2.24360000e+04 2.48240000e+05\n 2.56056600e+06 1.20960000e+06 6.33280000e+04 0.00000000e+00\n 1.24000000e+02 0.00000000e+00 2.32269216e+06 0.00000000e+00\n 2.36099520e+05 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 3.32760000e+04 1.21528000e+06\n 1.10279800e+06 1.96502000e+05 1.02600000e+04 2.42200000e+03\n 2.80000000e+01 0.00000000e+00 6.00000000e+00 2.67333800e+06\n 3.70629800e+01 3.58400000e+03 5.00000000e+02 5.63620000e+04\n 1.49726000e+05 1.00326000e+05 1.74483800e+06 4.88302000e+05\n 1.66820000e+04 2.46000000e+02 0.00000000e+00 2.30000000e+02\n 2.92000000e+02 2.18052800e+06 2.91880000e+04 2.20000000e+01\n 2.03460000e+04 0.00000000e+00 0.00000000e+00 2.34104800e+06\n 1.49400000e+03 1.52000000e+02 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 1.38760000e+04 3.81820000e+04\n 8.13800000e+03 0.00000000e+00 0.00000000e+00 6.57720000e+04\n 1.05340000e+04 3.00240000e+05 4.80280000e+04 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 1.04012000e+06\n 1.01864000e+03 1.02000000e+03 2.62032000e+05 4.53378000e+05\n 2.77378000e+05 1.59812000e+05 4.23992000e+05 4.09564000e+05\n 3.20746000e+05 1.58022000e+05 9.51280000e+04 5.14000000e+02\n 0.00000000e+00 0.00000000e+00]\n [1.20000000e+01 0.00000000e+00 7.00000000e+01 6.60000000e+01\n 0.00000000e+00 1.00000000e+01 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 3.18000000e+02 2.21200000e+03 3.23200000e+03\n 1.87200000e+03 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 2.66800000e+03 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 6.42000000e+02 3.89400000e+03 1.01840000e+04 7.55400000e+03\n 1.07640000e+04 1.01400000e+03 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 6.00000000e+01 6.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 2.03800000e+03\n 5.59600000e+03 0.00000000e+00 6.40000000e+01 6.00000000e+00\n 6.00000000e+00 9.14000000e+02 7.60000000e+01 2.47800000e+03\n 2.39800000e+03 1.69200000e+03 0.00000000e+00 0.00000000e+00\n 6.17600000e+03 3.40000000e+02 3.04000000e+02 1.02000000e+02\n 7.40000000e+01 4.06000000e+02 2.16000000e+02 1.60000000e+01\n 0.00000000e+00 0.00000000e+00 2.16140000e+04 2.00000000e+00\n 1.20000000e+01 0.00000000e+00 0.00000000e+00 2.66800000e+03\n 1.84000000e+02 7.63200000e+03 3.09000000e+03 2.80429114e+05\n 3.21353687e+05 3.99603168e+05 4.63710834e+05 5.13147820e+05\n 5.51389799e+05 5.82871323e+05 6.04886613e+05 1.05800000e+04\n 1.26900000e+01 2.16140000e+04 2.16140000e+04 2.17720000e+04\n 3.20000000e+01 5.00000000e+01 1.99400000e+03 2.14000000e+04\n 7.71000000e+03 1.20960000e+06 3.02000000e+02 2.00000000e+00\n 6.00000000e+00 0.00000000e+00 2.13504000e+03 0.00000000e+00\n 4.52544000e+03 2.00000000e+00 1.60000000e+01 0.00000000e+00\n 5.20000000e+01 2.54400000e+03 1.89400000e+03 2.17000000e+03\n 8.22000000e+02 1.52000000e+02 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 2.00000000e+00 2.00000000e+00 2.16140000e+04\n 0.00000000e+00 1.03200000e+03 6.00000000e+00 2.40000000e+01\n 6.56000000e+02 6.92000000e+02 4.83600000e+03 3.88000000e+02\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 1.38000000e+02\n 8.00000000e+00 1.66600000e+03 7.20000000e+01 0.00000000e+00\n 1.20000000e+01 0.00000000e+00 0.00000000e+00 2.57800000e+03\n 7.60000000e+01 6.20000000e+01 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 2.32000000e+02 0.00000000e+00\n 0.00000000e+00 2.01400000e+03 3.70000000e+02 4.80000000e+01\n 1.80000000e+01 1.57400000e+04 1.82200000e+03 2.01740000e+04\n 4.40000000e+01 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 1.08000000e+00 5.40000000e+01 5.67000000e+03 1.56600000e+03\n 2.40000000e+02 4.60000000e+01 5.80000000e+01 4.40000000e+01\n 1.00000000e+01 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 4.00000000e+00 3.20000000e+01]]\n"
],
[
"print(X[0:2,0:2])",
"[[7.66980000e+04 7.13188501e-01]\n [3.30580000e+04 7.13188501e-01]]\n"
],
[
"print(X[0:2,:])",
"[[7.66980000e+04 7.13188501e-01 2.13070644e+09 2.80000000e+02\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 3.72500000e+04 1.43286400e+06\n 3.66415600e+06 1.00768400e+06 2.58960000e+04 0.00000000e+00\n 2.55169600e+06 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 4.93329600e+06 3.65516600e+06\n 1.76600800e+06 1.13204000e+06 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 1.01200000e+03 2.68000000e+02\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 4.69014000e+05 4.23966000e+06 7.03300000e+05\n 7.55876000e+05 0.00000000e+00 5.37400000e+03 2.10800000e+03\n 4.11400000e+03 1.23480000e+04 6.15248000e+05 5.52627600e+06\n 2.37800000e+03 4.00000000e+00 0.00000000e+00 0.00000000e+00\n 2.32874600e+06 1.02230400e+06 4.15432000e+05 2.87230000e+05\n 3.10246000e+05 6.81504000e+05 1.11881400e+06 3.57400000e+03\n 0.00000000e+00 0.00000000e+00 6.70021400e+06 0.00000000e+00\n 1.00000000e+01 1.08000000e+02 5.00000000e+01 2.55169600e+06\n 9.75180000e+04 9.47550000e+05 7.99478000e+05 3.30760000e+05\n 3.53400000e+05 2.99160000e+05 3.05200000e+05 2.83680000e+05\n 5.51389799e+05 5.82871323e+05 6.04886613e+05 1.78540000e+05\n 7.66980800e+04 6.70021400e+06 6.70021400e+06 6.59989200e+06\n 4.35660000e+04 6.86560000e+04 5.40640000e+04 6.38360000e+05\n 6.16785000e+06 1.20960000e+06 2.46244000e+05 2.00000000e+00\n 9.60000000e+01 0.00000000e+00 5.24575200e+06 0.00000000e+00\n 9.16567680e+05 6.00000000e+00 1.92400000e+03 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 1.18196000e+05 1.30947200e+06\n 3.24718200e+06 1.38136200e+06 9.88220000e+04 1.12080000e+04\n 1.60800000e+03 2.20000000e+02 2.40000000e+02 6.70021400e+06\n 3.70629800e+01 1.04760000e+04 1.22600000e+03 2.67998000e+05\n 5.21832000e+05 4.28776000e+05 4.01585400e+06 8.95240000e+05\n 2.63300000e+04 1.18000000e+02 0.00000000e+00 5.32000000e+02\n 7.34000000e+02 4.12270400e+06 5.12880000e+04 0.00000000e+00\n 5.32572000e+05 0.00000000e+00 1.80000000e+01 5.33069000e+06\n 4.73200000e+03 1.12600000e+03 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 6.22820000e+04 8.59080000e+04\n 3.27900000e+04 0.00000000e+00 0.00000000e+00 2.02710000e+05\n 3.79280000e+04 1.47455800e+07 1.87664400e+06 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 2.80118000e+06\n 2.44580000e+03 2.71200000e+03 9.65866000e+05 1.70690800e+06\n 1.24052000e+06 4.93384000e+05 7.21044000e+05 4.69792000e+05\n 3.39156000e+05 1.57956000e+05 7.32240000e+04 0.00000000e+00\n 0.00000000e+00 0.00000000e+00]\n [3.30580000e+04 7.13188501e-01 0.00000000e+00 1.90620639e+05\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 1.82540000e+04 6.53294000e+05\n 1.72080000e+06 5.16724000e+05 3.16420000e+04 0.00000000e+00\n 1.39335200e+06 0.00000000e+00 6.80000000e+01 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 2.56089800e+06 2.12715000e+06\n 1.08459800e+06 3.38544000e+05 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 7.15100000e+04 7.72720000e+05 1.99692400e+06\n 9.95600000e+04 0.00000000e+00 7.33600000e+03 7.80800000e+03\n 1.37760000e+04 1.30860000e+04 1.01007400e+06 1.87390200e+06\n 1.47260000e+04 6.00000000e+00 0.00000000e+00 0.00000000e+00\n 1.37857600e+06 4.47166000e+05 1.99512000e+05 1.54298000e+05\n 1.37280000e+05 1.38668000e+05 1.65908000e+05 2.29652000e+05\n 8.70820000e+04 4.70800000e+03 3.64666000e+06 8.60000000e+01\n 4.54000000e+02 3.64000000e+02 3.50000000e+02 1.39335200e+06\n 4.90280000e+04 6.88314000e+05 3.92208000e+05 3.41420000e+05\n 3.59780000e+05 3.66560000e+05 4.63710834e+05 5.13147820e+05\n 5.51389799e+05 5.82871323e+05 6.04886613e+05 6.70000000e+03\n 3.30575100e+04 3.64666000e+06 3.64666000e+06 3.58203400e+06\n 1.77330000e+04 2.60120000e+05 1.15626000e+05 6.90000000e+03\n 2.94285000e+06 1.20960000e+06 0.00000000e+00 1.90221811e+05\n 9.15204147e+01 4.43075832e-04 2.29107936e+06 0.00000000e+00\n 6.43536960e+05 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 3.80000000e+01 9.86440000e+04 1.17950200e+06\n 1.28673600e+06 3.36388000e+05 3.62940000e+04 5.19200000e+03\n 5.60000000e+01 1.90515567e+05 0.00000000e+00 3.64666000e+06\n 3.70629800e+01 6.16000000e+03 7.96000000e+02 1.64860000e+05\n 3.50066000e+05 2.72956000e+05 1.83760000e+06 3.01242000e+05\n 9.14800000e+03 2.20000000e+01 0.00000000e+00 7.49091271e+02\n 1.22296099e+03 1.92882489e+06 3.51510236e+05 2.74179338e+02\n 1.93742905e+04 7.39379113e+00 1.34166522e+01 2.20075221e+06\n 3.31200000e+03 5.22000000e+02 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 3.37360000e+04 3.69460000e+04\n 5.93600000e+03 0.00000000e+00 0.00000000e+00 1.03330000e+05\n 1.62540000e+04 4.51008000e+06 8.68538000e+05 0.00000000e+00\n 0.00000000e+00 0.00000000e+00 0.00000000e+00 3.47782000e+06\n 2.21176000e+03 2.33400000e+03 6.64504000e+05 8.24154000e+05\n 4.21400000e+05 1.78064000e+05 2.93306000e+05 2.45416000e+05\n 1.33654000e+05 8.11400000e+04 9.75760000e+04 1.50000000e+03\n 0.00000000e+00 0.00000000e+00]]\n"
],
[
"from sklearn.cluster import KMeans",
"_____no_output_____"
],
[
"cluster_range = range( 1, 20 )\ncluster_range1 = range( 1, 20 )\ncluster_errors = []\ncluster_errors1 = []\n\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nX_scaled = scaler.fit_transform( X )\nZ_scaled = scaler.fit_transform( Z )\n\nfor num_clusters in cluster_range:\n clusters = KMeans( num_clusters )\n clusters.fit( X_scaled )\n print(\"Fazendo cenas\")\n \n cluster_errors.append( clusters.inertia_ )\n\nfor num_clusters1 in cluster_range1:\n clusters1 = KMeans( num_clusters1 )\n clusters1.fit( Z_scaled )\n clusters.fit( X_scaled )\n print(\"Também faço cenas\")\n \n cluster_errors1.append( clusters1.inertia_ )",
"Fazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nFazendo cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\nTambém faço cenas\n"
],
[
"clusters_df = pd.DataFrame( { \"num_clusters\":cluster_range, \"cluster_errors\": cluster_errors } )\nclusters1_df = pd.DataFrame( { \"num_clusters\":cluster_range1, \"cluster_errors\": cluster_errors1 } )",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(12,6))\nax.plot( clusters_df.num_clusters, clusters_df.cluster_errors, marker = \"o\" ,color = \"skyblue\", label='Train')\nax.plot( clusters1_df.num_clusters, clusters1_df.cluster_errors, marker = \"o\" ,color = \"blue\", label='Test')\n\nlegend = ax.legend(loc='upper right', shadow=True, fontsize='x-large')\n",
"_____no_output_____"
],
[
"kmeans = KMeans(n_clusters=2, random_state=0).fit(X)",
"_____no_output_____"
],
[
"kmeans.labels_",
"_____no_output_____"
],
[
"plt.scatter(X[:, 1], X[:, 2], c=kmeans.labels_, s=40, cmap='viridis');",
"_____no_output_____"
],
[
"from sklearn.decomposition import PCA\nfrom sklearn.preprocessing import MinMaxScaler",
"_____no_output_____"
],
[
"scaler = MinMaxScaler(feature_range=[0, 1])\ndata_rescaled = scaler.fit_transform(X)\ndata_rescaled1 = scaler.fit_transform(Z)",
"_____no_output_____"
],
[
"#Fitting the PCA algorithm with our Data\npca = PCA().fit(data_rescaled)\npca1 = PCA().fit(data_rescaled1)\n#Plotting the Cumulative Summation of the Explained Variance\nplt.figure()\nfig, ax = plt.subplots(figsize=(12,6))\nax.plot(np.cumsum(pca.explained_variance_ratio_),label='Train')\nax.plot(np.cumsum(pca1.explained_variance_ratio_),label='Test')\nax.set(xlabel='Number of Components', ylabel='Variance (%)') #for each component\nax.set(title='Pulsar Dataset Explained Variance')\nlegend = ax.legend(loc='upper right', shadow=True, fontsize='x-large')\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### o que queremos aqui e o valor de componentes que explica entre 90-99% da variancia... esta tudo ok aqui!! eu escolhi 10 componentes por exemplo...",
"_____no_output_____"
]
],
[
[
"pca = PCA(n_components=10)\ndataset = pca.fit_transform(data_rescaled)\ndataset1 = pca1.fit_transform(data_rescaled1)",
"_____no_output_____"
],
[
"cluster_range = range( 1, 20 )\ncluster_errors = []\ncluster_range1 = range( 1, 20 )\ncluster_errors1 = []\n\nfor num_clusters in cluster_range:\n clusters = KMeans( num_clusters )\n clusters.fit( dataset )\n cluster_errors.append( clusters.inertia_ )\n\nfor num_clusters1 in cluster_range1:\n clusters1 = KMeans( num_clusters1 )\n clusters1.fit( Z_scaled )\n \n cluster_errors1.append( clusters1.inertia_)",
"_____no_output_____"
],
[
"from scipy.spatial.distance import cdist\n\ndef plot_kmeans(kmeans, X, n_clusters=4, rseed=0, ax=None):\n labels = kmeans.fit_predict(X)\n\n # plot the input data\n ax = ax or plt.gca()\n ax.axis('equal')\n ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=2)\n\n # plot the representation of the KMeans model\n centers = kmeans.cluster_centers_\n radii = [cdist(X[labels == i], [center]).max()\n for i, center in enumerate(centers)]\n for c, r in zip(centers, radii):\n ax.add_patch(plt.Circle(c, r, fc='#CCCCCC', lw=3, alpha=0.5, zorder=1))",
"_____no_output_____"
],
[
"clusters_df_pca = pd.DataFrame( { \"num_clusters\":cluster_range, \"cluster_errors\": cluster_errors } )\nclusters_df_pca1 = pd.DataFrame( { \"num_clusters\":cluster_range1, \"cluster_errors\": cluster_errors1 } )",
"_____no_output_____"
],
[
"plt.figure()\nfig, ax = plt.subplots(figsize=(12,6))\nax.plot( clusters_df.num_clusters, clusters_df.cluster_errors, marker = \"o\" ,color = \"skyblue\",label='Train' )\nax.plot( clusters1_df.num_clusters, clusters1_df.cluster_errors, marker = \"o\" ,color = \"blue\",label='Test')\nax.plot( clusters_df_pca.num_clusters, clusters_df_pca.cluster_errors, marker = \"o\" ,color = \"darkred\",label='Train after PCA')\nax.plot( clusters_df_pca1.num_clusters, clusters_df_pca1.cluster_errors, marker = \"o\" ,color = \"red\" ,label='Test after PCA')\nax.set(xlabel='Number of Components')\nax.set(ylabel='Distance between clusters') #for each component\nlegend = ax.legend(loc='upper right', shadow=True, fontsize='x-large')",
"_____no_output_____"
],
[
"kmeans = KMeans(n_clusters=4, random_state=0).fit(dataset)",
"_____no_output_____"
],
[
"plt.scatter(X[:, 1], X[:, 2], c=kmeans.labels_, s=40, cmap='viridis');",
"_____no_output_____"
],
[
"plot_kmeans(kmeans, X)",
"_____no_output_____"
],
[
"rng = np.random.RandomState(13)\nX_stretched = np.dot(X, rng.randn(170,2))",
"_____no_output_____"
],
[
"plot_kmeans(kmeans, X_stretched)",
"_____no_output_____"
]
],
[
[
"### Modelos de mistura - corrigir de forma a dar para mais componentes\n",
"_____no_output_____"
]
],
[
[
"gmm = mixture.GaussianMixture(n_components=1).fit(X)",
"_____no_output_____"
],
[
"labels = gmm.predict(X)",
"_____no_output_____"
],
[
"plt.scatter(X[:, 1], X[:, 2], c=labels, s=40, cmap='viridis');",
"_____no_output_____"
],
[
"probs = gmm.predict_proba(X)",
"_____no_output_____"
],
[
"print(probs[:5].round(3))",
"[[1.]\n [1.]\n [1.]\n [1.]\n [1.]]\n"
],
[
"print(probs[5:10].round(3))",
"[[1.]\n [1.]\n [1.]\n [1.]\n [1.]]\n"
],
[
"print(probs[150:155].round(3))",
"[[1.]\n [1.]\n [1.]\n [1.]\n [1.]]\n"
],
[
"print(probs[75:80].round(3))",
"[[1.]\n [1.]\n [1.]\n [1.]\n [1.]]\n"
],
[
"size = 50 * probs.max(1)",
"_____no_output_____"
],
[
"plt.scatter(X[:, 1], X[:, 2], c=labels, s=40, cmap='viridis');",
"_____no_output_____"
],
[
"gmm = mixture.GaussianMixture(n_components=3, covariance_type='full', random_state=42)",
"_____no_output_____"
],
[
"plt.scatter(X[:, 1], X[:, 2], c=labels, s=40, cmap='viridis');",
"_____no_output_____"
],
[
"from matplotlib.patches import Ellipse\n\ndef draw_ellipse(position, covariance, ax=None, **kwargs):\n \"\"\"Draw an ellipse with a given position and covariance\"\"\"\n ax = ax or plt.gca()\n \n # Convert covariance to principal axes\n if covariance.shape == (2, 2):\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n else:\n angle = 0\n width, height = 2 * np.sqrt(covariance)\n \n # Draw the Ellipse\n for nsig in range(1, 4):\n ax.add_patch(Ellipse(position, nsig * width, nsig * height,\n angle, **kwargs))\n \ndef plot_gmm(gmm, X, label=True, ax=None):\n ax = ax or plt.gca()\n labels = gmm.fit(X).predict(X)\n if label:\n ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=2)\n else:\n ax.scatter(X[:, 0], X[:, 1], s=40, zorder=2)\n ax.axis('equal')\n \n w_factor = 0.2 / gmm.weights_.max()\n for pos, covar, w in zip(gmm.means_, gmm.covars_, gmm.weights_):\n draw_ellipse(pos, covar, alpha=w * w_factor)",
"_____no_output_____"
],
[
"plot_gmm(gmm, X_stretched)",
"_____no_output_____"
],
[
"plot_gmm(gmm, X, label=False)",
"_____no_output_____"
],
[
"plot_gmm(gmm, X_stretched, label=False)",
"_____no_output_____"
],
[
"gmm16 = mixture.GaussianMixture(n_components=16, covariance_type='full', random_state=0)\nplot_gmm(gmm16, X_stretched, label=False)",
"_____no_output_____"
],
[
"reg_covar = [0.0, 1.0e-12, 1.0e-10, 1.0e-8, 1.0e-6, 1.0e-4, 1.0e-2]\nprob = np.zeros((len(reg_covar), len(X)))\nfor i in range(1, len(reg_covar)):\n\n n_components = np.arange(1, 21)\n models = [mixture.GaussianMixture(n_components=1, covariance_type='full', reg_covar = reg_covar[i]).fit(X)\n for n in n_components]\n\nplt.plot(n_components, [m.bic(X) for m in models], label='BIC')\nplt.plot(n_components, [m.aic(X) for m in models], label='AIC')\nplt.legend(loc='best')\nplt.xlabel('n_components');",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdb55d8d19371539ddf9b7b7b7e83a6148dd0c5 | 540,565 | ipynb | Jupyter Notebook | cnn/style_transfer2.ipynb | opplieam/Udemy-Lazy | 89e757152a87603d630593e13b0db4d5422222fc | [
"Apache-2.0"
] | null | null | null | cnn/style_transfer2.ipynb | opplieam/Udemy-Lazy | 89e757152a87603d630593e13b0db4d5422222fc | [
"Apache-2.0"
] | null | null | null | cnn/style_transfer2.ipynb | opplieam/Udemy-Lazy | 89e757152a87603d630593e13b0db4d5422222fc | [
"Apache-2.0"
] | null | null | null | 1,604.050445 | 263,455 | 0.956736 | [
[
[
"from keras.layers import AveragePooling2D, MaxPooling2D\nfrom keras.models import Model, Sequential\nfrom keras.applications.vgg16 import VGG16, preprocess_input\nfrom keras.preprocessing import image\nfrom scipy.optimize import fmin_l_bfgs_b\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport keras.backend as K\n\nconfig = tf.ConfigProto(allow_soft_placement=True)\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\nK.set_session(sess)",
"_____no_output_____"
]
],
[
[
"In this script, we will focus on generating an image\nwith the same style as the input image.\nBut NOT the same content.\nIt should capture only the essence of the style.",
"_____no_output_____"
]
],
[
[
"def VGG16_AvgPool(shape):\n # we want to account for features across the entrie image\n # so we get rid of the maxpool which throws away information\n vgg = VGG16(input_shape=shape, weights='imagenet', include_top=False)\n \n new_model = Sequential()\n for layer in vgg.layers:\n if layer.__class__ == MaxPooling2D:\n # replace it with average pooling\n new_model.add(AveragePooling2D())\n else:\n new_model.add(layer)\n return new_model",
"_____no_output_____"
],
[
"def unpreprocess(img):\n # un-preprocess image from keras vgg which subtract mean\n img[..., 0] += 103.939\n img[..., 1] += 116.779\n img[..., 2] += 126.68\n img = img[..., ::-1]\n return img\n\ndef scale_img(x):\n x = x - x.min()\n x = x / x.max()\n return x",
"_____no_output_____"
],
[
"def gram_matrix(img):\n # input is (H, W, C) (C = # features)\n # we first need to convert it to (C, H*W)\n X = K.batch_flatten(K.permute_dimensions(img, (2, 0, 1)))\n # calculate the gram matrix\n # gram = XX^T / N\n # the constant is not important since we'll be weighting these\n G = K.dot(X, K.transpose(X)) / img.get_shape().num_elements()\n return G",
"_____no_output_____"
],
[
"def style_loss(y, t):\n return K.mean(K.square(gram_matrix(y) - gram_matrix(t)))",
"_____no_output_____"
],
[
"def minimize(fn, epochs, batch_shape):\n losses = []\n x = np.random.randn(np.prod(batch_shape))\n for i in range(epochs):\n x, l, _ = fmin_l_bfgs_b(\n func=fn,\n x0=x,\n maxfun=20\n )\n x = np.clip(x, -127, 127)\n print(\"iter=\", i, ',', 'loss=', l)\n losses.append(l)\n \n plt.plot(losses)\n \n newimg = x.reshape(*batch_shape)\n final_img = unpreprocess(newimg)\n return final_img[0]",
"_____no_output_____"
],
[
"path = './data/styles/starrynight.jpg'\nimg = image.load_img(path)\nplt.imshow(img)",
"_____no_output_____"
],
[
"# convert image to array and preprocess for vgg\nx = image.img_to_array(img)\nprint(x.shape)\n# make it (1, H, W, C)\nx = np.expand_dims(x, axis=0)\nprint(x.shape)\nx = preprocess_input(x)\nprint(x.shape)",
"(300, 454, 3)\n(1, 300, 454, 3)\n(1, 300, 454, 3)\n"
],
[
"batch_shape = x.shape\nshape = x.shape[1:]",
"_____no_output_____"
],
[
"# let's take the first convolution at each block of convolutions\n# to be our target outputs\nvgg = VGG16_AvgPool(shape)\nprint(vgg.summary())",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nblock1_conv1 (Conv2D) (None, 300, 454, 64) 1792 \n_________________________________________________________________\nblock1_conv2 (Conv2D) (None, 300, 454, 64) 36928 \n_________________________________________________________________\naverage_pooling2d_1 (Average (None, 150, 227, 64) 0 \n_________________________________________________________________\nblock2_conv1 (Conv2D) (None, 150, 227, 128) 73856 \n_________________________________________________________________\nblock2_conv2 (Conv2D) (None, 150, 227, 128) 147584 \n_________________________________________________________________\naverage_pooling2d_2 (Average (None, 75, 113, 128) 0 \n_________________________________________________________________\nblock3_conv1 (Conv2D) (None, 75, 113, 256) 295168 \n_________________________________________________________________\nblock3_conv2 (Conv2D) (None, 75, 113, 256) 590080 \n_________________________________________________________________\nblock3_conv3 (Conv2D) (None, 75, 113, 256) 590080 \n_________________________________________________________________\naverage_pooling2d_3 (Average (None, 37, 56, 256) 0 \n_________________________________________________________________\nblock4_conv1 (Conv2D) (None, 37, 56, 512) 1180160 \n_________________________________________________________________\nblock4_conv2 (Conv2D) (None, 37, 56, 512) 2359808 \n_________________________________________________________________\nblock4_conv3 (Conv2D) (None, 37, 56, 512) 2359808 \n_________________________________________________________________\naverage_pooling2d_4 (Average (None, 18, 28, 512) 0 \n_________________________________________________________________\nblock5_conv1 (Conv2D) (None, 18, 28, 512) 2359808 \n_________________________________________________________________\nblock5_conv2 (Conv2D) (None, 18, 28, 512) 2359808 \n_________________________________________________________________\nblock5_conv3 (Conv2D) (None, 18, 28, 512) 2359808 \n_________________________________________________________________\naverage_pooling2d_5 (Average (None, 9, 14, 512) 0 \n=================================================================\nTotal params: 14,714,688\nTrainable params: 14,714,688\nNon-trainable params: 0\n_________________________________________________________________\nNone\n"
],
[
"# Note: need to select output at index 1, since outputs at\n# index 0 correspond to the original vgg with maxpool\nsymbolic_conv_outputs = [\n layer.get_output_at(1) for layer in vgg.layers if layer.name.endswith('conv1')\n]",
"_____no_output_____"
],
[
"# pick the earlier layers for\n# a more \"localized\" representation\n# this is opposed to the content model\n# where the later layers represent a more \"global\" structure\n# symbolic_conv_outputs = symbolic_conv_outputs[:2]",
"_____no_output_____"
],
[
"# make a big model that outputs multiple layers's outputs\nmulti_output_model = Model(vgg.input, symbolic_conv_outputs)",
"_____no_output_____"
],
[
"# calculate the targets that are output at each layer\nstyle_layers_outputs = [K.variable(y) for y in\n multi_output_model.predict(x)]",
"_____no_output_____"
],
[
"# calculate the total style loss\nloss = 0\nfor symbolic, actual in zip(symbolic_conv_outputs, style_layers_outputs):\n # gram_matrix() expects a (H, W, C) as input\n loss += style_loss(symbolic[0], actual[0])",
"_____no_output_____"
],
[
"grads = K.gradients(loss, multi_output_model.input)\nget_loss_and_grads = K.function(\n inputs=[multi_output_model.input],\n outputs=[loss] + grads\n)\ndef get_loss_and_grads_wrapper(x_vec):\n l, g = get_loss_and_grads([x_vec.reshape(*batch_shape)])\n return l.astype(np.float64), g.flatten().astype(np.float64)",
"_____no_output_____"
],
[
"final_img = minimize(get_loss_and_grads_wrapper, 10, batch_shape)\nplt.imshow(scale_img(final_img))",
"iter= 0 , loss= 5793.65869140625\niter= 1 , loss= 236.61898803710938\niter= 2 , loss= 97.71456146240234\niter= 3 , loss= 63.22955322265625\niter= 4 , loss= 40.59492874145508\niter= 5 , loss= 25.29944610595703\niter= 6 , loss= 14.36585807800293\niter= 7 , loss= 9.370770454406738\niter= 8 , loss= 6.301311492919922\niter= 9 , loss= 4.809230327606201\n"
],
[
"\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdb594b7d4aaa2a90d556751484536c81d92fd5 | 25,768 | ipynb | Jupyter Notebook | intro-to-pytorch/Part 5 - Inference and Validation (Solution).ipynb | atchiasso/DLPyTorch_2ndPart | fb3c1bb3120e461214c3c5287441b6fcd77a88ac | [
"MIT"
] | null | null | null | intro-to-pytorch/Part 5 - Inference and Validation (Solution).ipynb | atchiasso/DLPyTorch_2ndPart | fb3c1bb3120e461214c3c5287441b6fcd77a88ac | [
"MIT"
] | null | null | null | intro-to-pytorch/Part 5 - Inference and Validation (Solution).ipynb | atchiasso/DLPyTorch_2ndPart | fb3c1bb3120e461214c3c5287441b6fcd77a88ac | [
"MIT"
] | null | null | null | 44.970332 | 1,324 | 0.587007 | [
[
[
"# Inference and Validation\n\nNow that you have a trained network, you can use it for making predictions. This is typically called **inference**, a term borrowed from statistics. However, neural networks have a tendency to perform *too well* on the training data and aren't able to generalize to data that hasn't been seen before. This is called **overfitting** and it impairs inference performance. To test for overfitting while training, we measure the performance on data not in the training set called the **validation** set. We avoid overfitting through regularization such as dropout while monitoring the validation performance during training. In this notebook, I'll show you how to do this in PyTorch. \n\nAs usual, let's start by loading the dataset through torchvision. You'll learn more about torchvision and loading data in a later part. This time we'll be taking advantage of the test set which you can get by setting `train=False` here:\n\n```python\ntestset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)\n```\n\nThe test set contains images just like the training set. Typically you'll see 10-20% of the original dataset held out for testing and validation with the rest being used for training.",
"_____no_output_____"
]
],
[
[
"import torch\nfrom torchvision import datasets, transforms\n\n# Define a transform to normalize the data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))])\n# Download and load the training data\ntrainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n\n# Download and load the test data\ntestset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)",
"_____no_output_____"
]
],
[
[
"Here I'll create a model like normal, using the same one from my solution for part 4.",
"_____no_output_____"
]
],
[
[
"from torch import nn, optim\nimport torch.nn.functional as F\n\nclass Classifier(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(784, 256)\n self.fc2 = nn.Linear(256, 128)\n self.fc3 = nn.Linear(128, 64)\n self.fc4 = nn.Linear(64, 10)\n \n def forward(self, x):\n # make sure input tensor is flattened\n x = x.view(x.shape[0], -1)\n \n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.log_softmax(self.fc4(x), dim=1)\n \n return x",
"_____no_output_____"
]
],
[
[
"The goal of validation is to measure the model's performance on data that isn't part of the training set. Performance here is up to the developer to define though. Typically this is just accuracy, the percentage of classes the network predicted correctly. Other options are [precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall#Definition_(classification_context)) and top-5 error rate. We'll focus on accuracy here. First I'll do a forward pass with one batch from the test set.",
"_____no_output_____"
]
],
[
[
"model = Classifier()\n\nimages, labels = next(iter(testloader))\n# Get the class probabilities\nps = torch.exp(model(images))\n# Make sure the shape is appropriate, we should get 10 class probabilities for 64 examples\nprint(ps.shape)",
"torch.Size([64, 10])\n"
]
],
[
[
"With the probabilities, we can get the most likely class using the `ps.topk` method. This returns the $k$ highest values. Since we just want the most likely class, we can use `ps.topk(1)`. This returns a tuple of the top-$k$ values and the top-$k$ indices. If the highest value is the fifth element, we'll get back 4 as the index.",
"_____no_output_____"
]
],
[
[
"top_p, top_class = ps.topk(1, dim=1)\n# Look at the most likely classes for the first 10 examples\nprint(top_class[:10,:])",
"tensor([[1],\n [1],\n [1],\n [1],\n [1],\n [1],\n [1],\n [1],\n [1],\n [1]])\n"
]
],
[
[
"Now we can check if the predicted classes match the labels. This is simple to do by equating `top_class` and `labels`, but we have to be careful of the shapes. Here `top_class` is a 2D tensor with shape `(64, 1)` while `labels` is 1D with shape `(64)`. To get the equality to work out the way we want, `top_class` and `labels` must have the same shape.\n\nIf we do\n\n```python\nequals = top_class == labels\n```\n\n`equals` will have shape `(64, 64)`, try it yourself. What it's doing is comparing the one element in each row of `top_class` with each element in `labels` which returns 64 True/False boolean values for each row.",
"_____no_output_____"
]
],
[
[
"equals = top_class == labels.view(*top_class.shape)",
"_____no_output_____"
]
],
[
[
"Now we need to calculate the percentage of correct predictions. `equals` has binary values, either 0 or 1. This means that if we just sum up all the values and divide by the number of values, we get the percentage of correct predictions. This is the same operation as taking the mean, so we can get the accuracy with a call to `torch.mean`. If only it was that simple. If you try `torch.mean(equals)`, you'll get an error\n\n```\nRuntimeError: mean is not implemented for type torch.ByteTensor\n```\n\nThis happens because `equals` has type `torch.ByteTensor` but `torch.mean` isn't implement for tensors with that type. So we'll need to convert `equals` to a float tensor. Note that when we take `torch.mean` it returns a scalar tensor, to get the actual value as a float we'll need to do `accuracy.item()`.",
"_____no_output_____"
]
],
[
[
"accuracy = torch.mean(equals.type(torch.FloatTensor))\nprint(f'Accuracy: {accuracy.item()*100}%')",
"Accuracy: 10.9375%\n"
]
],
[
[
"The network is untrained so it's making random guesses and we should see an accuracy around 10%. Now let's train our network and include our validation pass so we can measure how well the network is performing on the test set. Since we're not updating our parameters in the validation pass, we can speed up the by turning off gradients using `torch.no_grad()`:\n\n```python\n# turn off gradients\nwith torch.no_grad():\n # validation pass here\n for images, labels in testloader:\n ...\n```\n\n>**Exercise:** Implement the validation loop below. You can largely copy and paste the code from above, but I suggest typing it in because writing it out yourself is essential for building the skill. In general you'll always learn more by typing it rather than copy-pasting.",
"_____no_output_____"
]
],
[
[
"model = Classifier()\ncriterion = nn.NLLLoss(reduction='sum')\noptimizer = optim.Adam(model.parameters(), lr=0.003)\n\nepochs = 30\n\ntrain_losses, test_losses = [], []\nfor e in range(epochs):\n tot_train_loss = 0\n for images, labels in trainloader:\n optimizer.zero_grad()\n \n log_ps = model(images)\n loss = criterion(log_ps, labels)\n tot_train_loss += loss.item()\n \n loss.backward()\n optimizer.step()\n else:\n tot_test_loss = 0\n test_correct = 0 # Number of correct predictions on the test set\n \n # Turn off gradients for validation, saves memory and computations\n with torch.no_grad():\n for images, labels in testloader:\n log_ps = model(images)\n loss = criterion(log_ps, labels)\n tot_test_loss += loss.item()\n\n ps = torch.exp(log_ps)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n test_correct += equals.sum().item()\n\n # Get mean loss to enable comparison between train and test sets\n train_loss = tot_train_loss / len(trainloader.dataset)\n test_loss = tot_test_loss / len(testloader.dataset)\n\n # At completion of epoch\n train_losses.append(train_loss)\n test_losses.append(test_loss)\n\n print(\"Epoch: {}/{}.. \".format(e+1, epochs),\n \"Training Loss: {:.3f}.. \".format(train_loss),\n \"Test Loss: {:.3f}.. \".format(test_loss),\n \"Test Accuracy: {:.3f}\".format(test_correct / len(testloader.dataset)))",
"Epoch: 1/30.. Training Loss: 0.514.. Test Loss: 0.428.. Test Accuracy: 0.847\nEpoch: 2/30.. Training Loss: 0.392.. Test Loss: 0.399.. Test Accuracy: 0.857\nEpoch: 3/30.. Training Loss: 0.349.. Test Loss: 0.392.. Test Accuracy: 0.857\nEpoch: 4/30.. Training Loss: 0.332.. Test Loss: 0.393.. Test Accuracy: 0.861\nEpoch: 5/30.. Training Loss: 0.316.. Test Loss: 0.378.. Test Accuracy: 0.869\nEpoch: 6/30.. Training Loss: 0.306.. Test Loss: 0.406.. Test Accuracy: 0.854\nEpoch: 7/30.. Training Loss: 0.291.. Test Loss: 0.367.. Test Accuracy: 0.870\nEpoch: 8/30.. Training Loss: 0.283.. Test Loss: 0.383.. Test Accuracy: 0.863\nEpoch: 9/30.. Training Loss: 0.274.. Test Loss: 0.364.. Test Accuracy: 0.871\nEpoch: 10/30.. Training Loss: 0.270.. Test Loss: 0.403.. Test Accuracy: 0.861\nEpoch: 11/30.. Training Loss: 0.256.. Test Loss: 0.411.. Test Accuracy: 0.866\nEpoch: 12/30.. Training Loss: 0.257.. Test Loss: 0.404.. Test Accuracy: 0.872\nEpoch: 13/30.. Training Loss: 0.248.. Test Loss: 0.370.. Test Accuracy: 0.879\nEpoch: 14/30.. Training Loss: 0.244.. Test Loss: 0.359.. Test Accuracy: 0.882\nEpoch: 15/30.. Training Loss: 0.235.. Test Loss: 0.370.. Test Accuracy: 0.880\nEpoch: 16/30.. Training Loss: 0.234.. Test Loss: 0.410.. Test Accuracy: 0.871\nEpoch: 17/30.. Training Loss: 0.225.. Test Loss: 0.376.. Test Accuracy: 0.878\n"
],
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"plt.plot(train_losses, label='Training loss')\nplt.plot(test_losses, label='Validation loss')\nplt.legend(frameon=False)",
"_____no_output_____"
]
],
[
[
"## Overfitting\n\nIf we look at the training and validation losses as we train the network, we can see a phenomenon known as overfitting.\n\n<img src='assets/overfitting.png' width=450px>\n\nThe network learns the training set better and better, resulting in lower training losses. However, it starts having problems generalizing to data outside the training set leading to the validation loss increasing. The ultimate goal of any deep learning model is to make predictions on new data, so we should strive to get the lowest validation loss possible. One option is to use the version of the model with the lowest validation loss, here the one around 8-10 training epochs. This strategy is called *early-stopping*. In practice, you'd save the model frequently as you're training then later choose the model with the lowest validation loss.\n\nThe most common method to reduce overfitting (outside of early-stopping) is *dropout*, where we randomly drop input units. This forces the network to share information between weights, increasing it's ability to generalize to new data. Adding dropout in PyTorch is straightforward using the [`nn.Dropout`](https://pytorch.org/docs/stable/nn.html#torch.nn.Dropout) module.\n\n```python\nclass Classifier(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(784, 256)\n self.fc2 = nn.Linear(256, 128)\n self.fc3 = nn.Linear(128, 64)\n self.fc4 = nn.Linear(64, 10)\n \n # Dropout module with 0.2 drop probability\n self.dropout = nn.Dropout(p=0.2)\n \n def forward(self, x):\n # make sure input tensor is flattened\n x = x.view(x.shape[0], -1)\n \n # Now with dropout\n x = self.dropout(F.relu(self.fc1(x)))\n x = self.dropout(F.relu(self.fc2(x)))\n x = self.dropout(F.relu(self.fc3(x)))\n \n # output so no dropout here\n x = F.log_softmax(self.fc4(x), dim=1)\n \n return x\n```\n\nDuring training we want to use dropout to prevent overfitting, but during inference we want to use the entire network. So, we need to turn off dropout during validation, testing, and whenever we're using the network to make predictions. To do this, you use `model.eval()`. This sets the model to evaluation mode where the dropout probability is 0. You can turn dropout back on by setting the model to train mode with `model.train()`. In general, the pattern for the validation loop will look like this, where you turn off gradients, set the model to evaluation mode, calculate the validation loss and metric, then set the model back to train mode.\n\n```python\n# turn off gradients\nwith torch.no_grad():\n \n # set model to evaluation mode\n model.eval()\n \n # validation pass here\n for images, labels in testloader:\n ...\n\n# set model back to train mode\nmodel.train()\n```",
"_____no_output_____"
],
[
"> **Exercise:** Add dropout to your model and train it on Fashion-MNIST again. See if you can get a lower validation loss.",
"_____no_output_____"
]
],
[
[
"class Classifier(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(784, 256)\n self.fc2 = nn.Linear(256, 128)\n self.fc3 = nn.Linear(128, 64)\n self.fc4 = nn.Linear(64, 10)\n\n # Dropout module with 0.2 drop probability\n self.dropout = nn.Dropout(p=0.2)\n\n def forward(self, x):\n # make sure input tensor is flattened\n x = x.view(x.shape[0], -1)\n\n # Now with dropout\n x = self.dropout(F.relu(self.fc1(x)))\n x = self.dropout(F.relu(self.fc2(x)))\n x = self.dropout(F.relu(self.fc3(x)))\n\n # output so no dropout here\n x = F.log_softmax(self.fc4(x), dim=1)\n\n return x",
"_____no_output_____"
],
[
"model = Classifier()\ncriterion = nn.NLLLoss()\noptimizer = optim.Adam(model.parameters(), lr=0.003)\n\nepochs = 30\nsteps = 0\n\ntrain_losses, test_losses = [], []\nfor e in range(epochs):\n running_loss = 0\n for images, labels in trainloader:\n \n optimizer.zero_grad()\n \n log_ps = model(images)\n loss = criterion(log_ps, labels)\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item()\n \n else:\n test_loss = 0\n accuracy = 0\n \n # Turn off gradients for validation, saves memory and computations\n with torch.no_grad():\n model.eval()\n for images, labels in testloader:\n log_ps = model(images)\n test_loss += criterion(log_ps, labels)\n \n ps = torch.exp(log_ps)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor))\n \n model.train()\n \n train_losses.append(running_loss/len(trainloader))\n test_losses.append(test_loss/len(testloader))\n\n print(\"Epoch: {}/{}.. \".format(e+1, epochs),\n \"Training Loss: {:.3f}.. \".format(train_losses[-1]),\n \"Test Loss: {:.3f}.. \".format(test_losses[-1]),\n \"Test Accuracy: {:.3f}\".format(accuracy/len(testloader)))",
"_____no_output_____"
],
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"plt.plot(train_losses, label='Training loss')\nplt.plot(test_losses, label='Validation loss')\nplt.legend(frameon=False)",
"_____no_output_____"
]
],
[
[
"## Inference\n\nNow that the model is trained, we can use it for inference. We've done this before, but now we need to remember to set the model in inference mode with `model.eval()`. You'll also want to turn off autograd with the `torch.no_grad()` context.",
"_____no_output_____"
]
],
[
[
"# Import helper module (should be in the repo)\nimport helper\n\n# Test out your network!\n\nmodel.eval()\n\ndataiter = iter(testloader)\nimages, labels = dataiter.next()\nimg = images[0]\n# Convert 2D image to 1D vector\nimg = img.view(1, 784)\n\n# Calculate the class probabilities (softmax) for img\nwith torch.no_grad():\n output = model.forward(img)\n\nps = torch.exp(output)\n\n# Plot the image and probabilities\nhelper.view_classify(img.view(1, 28, 28), ps, version='Fashion')",
"_____no_output_____"
]
],
[
[
"## Next Up!\n\nIn the next part, I'll show you how to save your trained models. In general, you won't want to train a model everytime you need it. Instead, you'll train once, save it, then load the model when you want to train more or use if for inference.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ecdb61e2977477ef1ce1ae5a66d91599927efe31 | 4,991 | ipynb | Jupyter Notebook | Tableau_PoC.ipynb | TableauStudyGroup/Data-Science-Lessons | 113dcb700405587578a235ae8862f7b2fe2d90cb | [
"MIT"
] | null | null | null | Tableau_PoC.ipynb | TableauStudyGroup/Data-Science-Lessons | 113dcb700405587578a235ae8862f7b2fe2d90cb | [
"MIT"
] | null | null | null | Tableau_PoC.ipynb | TableauStudyGroup/Data-Science-Lessons | 113dcb700405587578a235ae8862f7b2fe2d90cb | [
"MIT"
] | null | null | null | 80.5 | 1,771 | 0.6113 | [
[
[
"<a href=\"https://colab.research.google.com/github/VernonNaidoo-Toronto/COVID-19_Test/blob/master/Tableau_PoC.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"%%HTML\n<div class='tableauPlaceholder' id='viz1588427310708' style='position: relative'><noscript><a href='#'><img alt=' ' src='https://public.tableau.com/static/images/DX/DX6CPGJBC/1_rss.png' style='border: none' /></a></noscript><object class='tableauViz' style='display:none;'><param name='host_url' value='https%3A%2F%2Fpublic.tableau.com%2F' /> <param name='embed_code_version' value='3' /> <param name='path' value='shared/DX6CPGJBC' /> <param name='toolbar' value='yes' /><param name='static_image' value='https://public.tableau.com/static/images/DX/DX6CPGJBC/1.png' /> <param name='animate_transition' value='yes' /><param name='display_static_image' value='yes' /><param name='display_spinner' value='yes' /><param name='display_overlay' value='yes' /><param name='display_count' value='yes' /><param name='filter' value='publish=yes' /></object></div> <script type='text/javascript'> var divElement = document.getElementById('viz1588427310708'); var vizElement = divElement.getElementsByTagName('object')[0]; if ( divElement.offsetWidth > 800 ) { vizElement.style.width='100%';vizElement.style.height=(divElement.offsetWidth*0.75)+'px';} else if ( divElement.offsetWidth > 500 ) { vizElement.style.width='100%';vizElement.style.height=(divElement.offsetWidth*0.75)+'px';} else { vizElement.style.width='100%';vizElement.style.height='1327px';} var scriptElement = document.createElement('script'); scriptElement.src = 'https://public.tableau.com/javascripts/api/viz_v1.js'; vizElement.parentNode.insertBefore(scriptElement, vizElement); </script>",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
ecdb7610f9ca317df4fe6352d1b752c75760a0c4 | 54,138 | ipynb | Jupyter Notebook | lijin-THU:notes-python/02-python-essentials/02.01-a-tour-of-python.ipynb | Maecenas/python-getting-started | 2739444e0f4aa692123dcd0c1b9a44218281f9b6 | [
"MIT"
] | null | null | null | lijin-THU:notes-python/02-python-essentials/02.01-a-tour-of-python.ipynb | Maecenas/python-getting-started | 2739444e0f4aa692123dcd0c1b9a44218281f9b6 | [
"MIT"
] | null | null | null | lijin-THU:notes-python/02-python-essentials/02.01-a-tour-of-python.ipynb | Maecenas/python-getting-started | 2739444e0f4aa692123dcd0c1b9a44218281f9b6 | [
"MIT"
] | null | null | null | 27.963843 | 16,900 | 0.656378 | [
[
[
"# Python 入门演示",
"_____no_output_____"
],
[
"## 简单的数学运算",
"_____no_output_____"
],
[
"整数相加,得到整数:",
"_____no_output_____"
]
],
[
[
"2 + 2",
"_____no_output_____"
]
],
[
[
"浮点数相加,得到浮点数:",
"_____no_output_____"
]
],
[
[
"2.0 + 2.5",
"_____no_output_____"
]
],
[
[
"整数和浮点数相加,得到浮点数:",
"_____no_output_____"
]
],
[
[
"2 + 2.5",
"_____no_output_____"
]
],
[
[
"## 变量赋值",
"_____no_output_____"
],
[
"**Python**使用`<变量名>=<表达式>`的方式对变量进行赋值",
"_____no_output_____"
]
],
[
[
"a = 0.2",
"_____no_output_____"
]
],
[
[
"## 字符串 String",
"_____no_output_____"
],
[
"字符串的生成,单引号与双引号是等价的:",
"_____no_output_____"
]
],
[
[
"s = \"hello world\"\ns",
"_____no_output_____"
],
[
"s = 'hello world'\ns",
"_____no_output_____"
]
],
[
[
"三引号用来输入包含多行文字的字符串:",
"_____no_output_____"
]
],
[
[
"s = \"\"\"hello\nworld\"\"\"\nprint s",
"hello\nworld\n"
],
[
"s = '''hello\nworld'''\nprint s",
"hello\nworld\n"
]
],
[
[
"字符串的加法:",
"_____no_output_____"
]
],
[
[
"s = \"hello\" + \" world\"\ns",
"_____no_output_____"
]
],
[
[
"字符串索引:\n",
"_____no_output_____"
]
],
[
[
"s[0]",
"_____no_output_____"
],
[
"s[-1]",
"_____no_output_____"
],
[
"s[0:5]",
"_____no_output_____"
]
],
[
[
"字符串的分割:",
"_____no_output_____"
]
],
[
[
"s = \"hello world\"\ns.split()",
"_____no_output_____"
]
],
[
[
"查看字符串的长度:",
"_____no_output_____"
]
],
[
[
"len(s)",
"_____no_output_____"
]
],
[
[
"## 列表 List",
"_____no_output_____"
],
[
"Python用`[]`来生成列表",
"_____no_output_____"
]
],
[
[
"a = [1, 2.0, 'hello', 5 + 1.0]\na",
"_____no_output_____"
]
],
[
[
"列表加法:",
"_____no_output_____"
]
],
[
[
"a + a",
"_____no_output_____"
]
],
[
[
"列表索引:",
"_____no_output_____"
]
],
[
[
"a[1]",
"_____no_output_____"
]
],
[
[
"列表长度:",
"_____no_output_____"
]
],
[
[
"len(a)",
"_____no_output_____"
]
],
[
[
"向列表中添加元素:",
"_____no_output_____"
]
],
[
[
"a.append(\"world\")\na",
"_____no_output_____"
]
],
[
[
"## 集合 Set",
"_____no_output_____"
],
[
"Python用{}来生成集合,集合中不含有相同元素。",
"_____no_output_____"
]
],
[
[
"s = {2, 3, 4, 2}\ns",
"_____no_output_____"
]
],
[
[
"集合的长度:",
"_____no_output_____"
]
],
[
[
"len(s)",
"_____no_output_____"
]
],
[
[
"向集合中添加元素:",
"_____no_output_____"
]
],
[
[
"s.add(1)\ns",
"_____no_output_____"
]
],
[
[
"集合的交:",
"_____no_output_____"
]
],
[
[
"a = {1, 2, 3, 4}\nb = {2, 3, 4, 5}\na & b",
"_____no_output_____"
]
],
[
[
"并:",
"_____no_output_____"
]
],
[
[
"a | b",
"_____no_output_____"
]
],
[
[
"差:",
"_____no_output_____"
]
],
[
[
"a - b",
"_____no_output_____"
]
],
[
[
"对称差:",
"_____no_output_____"
]
],
[
[
"a ^ b",
"_____no_output_____"
]
],
[
[
"## 字典 Dictionary ",
"_____no_output_____"
],
[
"Python用`{key:value}`来生成Dictionary。",
"_____no_output_____"
]
],
[
[
"d = {'dogs':5, 'cats':4}\nd",
"_____no_output_____"
]
],
[
[
"字典的大小",
"_____no_output_____"
]
],
[
[
"len(d)",
"_____no_output_____"
]
],
[
[
"查看字典某个键对应的值:",
"_____no_output_____"
]
],
[
[
"d[\"dogs\"]",
"_____no_output_____"
]
],
[
[
"修改键值:",
"_____no_output_____"
]
],
[
[
"d[\"dogs\"] = 2\nd",
"_____no_output_____"
]
],
[
[
"插入键值:",
"_____no_output_____"
]
],
[
[
"d[\"pigs\"] = 7\nd",
"_____no_output_____"
]
],
[
[
"所有的键:",
"_____no_output_____"
]
],
[
[
"d.keys()",
"_____no_output_____"
]
],
[
[
"所有的值:",
"_____no_output_____"
]
],
[
[
"d.values()",
"_____no_output_____"
]
],
[
[
"所有的键值对:",
"_____no_output_____"
]
],
[
[
"d.items()",
"_____no_output_____"
]
],
[
[
"## 数组 Numpy Arrays",
"_____no_output_____"
],
[
"需要先导入需要的包,Numpy数组可以进行很多列表不能进行的运算。",
"_____no_output_____"
]
],
[
[
"from numpy import array\na = array([1, 2, 3, 4])\na",
"_____no_output_____"
]
],
[
[
"加法:",
"_____no_output_____"
]
],
[
[
"a + 2",
"_____no_output_____"
],
[
"a + a",
"_____no_output_____"
]
],
[
[
"## 画图 Plot",
"_____no_output_____"
],
[
"Python提供了一个很像MATLAB的绘图接口。",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nfrom matplotlib.pyplot import plot\nplot(a, a**2)",
"_____no_output_____"
]
],
[
[
"## 循环 Loop",
"_____no_output_____"
]
],
[
[
"line = '1 2 3 4 5'\nfields = line.split()\nfields",
"_____no_output_____"
],
[
"total = 0\nfor field in fields:\n total += int(field)\ntotal",
"_____no_output_____"
]
],
[
[
"Python中有一种叫做列表推导式(List comprehension)的用法:",
"_____no_output_____"
]
],
[
[
"numbers = [int(field) for field in fields]\nnumbers",
"_____no_output_____"
],
[
"sum(numbers)",
"_____no_output_____"
]
],
[
[
"写在一行:",
"_____no_output_____"
]
],
[
[
"sum([int(field) for field in line.split()])",
"_____no_output_____"
]
],
[
[
"## 文件操作 File IO",
"_____no_output_____"
]
],
[
[
"cd ~",
"d:\\Users\\lijin\n"
]
],
[
[
"写文件:",
"_____no_output_____"
]
],
[
[
"f = open('data.txt', 'w')\nf.write('1 2 3 4\\n')\nf.write('2 3 4 5\\n')\nf.close()",
"_____no_output_____"
]
],
[
[
"读文件:",
"_____no_output_____"
]
],
[
[
"f = open('data.txt')\ndata = []\nfor line in f:\n data.append([int(field) for field in line.split()])\nf.close()\ndata\n",
"_____no_output_____"
],
[
"for row in data:\n print row",
"[1, 2, 3, 4]\n[2, 3, 4, 5]\n"
]
],
[
[
"删除文件:",
"_____no_output_____"
]
],
[
[
"import os\nos.remove('data.txt')",
"_____no_output_____"
]
],
[
[
"## 函数 Function",
"_____no_output_____"
],
[
"Python用关键词`def`来定义函数。",
"_____no_output_____"
]
],
[
[
"def poly(x, a, b, c):\n y = a * x ** 2 + b * x + c\n return y\n\nx = 1\npoly(x, 1, 2, 3)",
"_____no_output_____"
]
],
[
[
"用Numpy数组做参数x:",
"_____no_output_____"
]
],
[
[
"x = array([1, 2, 3])\npoly(x, 1, 2, 3)",
"_____no_output_____"
]
],
[
[
"可以在定义时指定参数的默认值:",
"_____no_output_____"
]
],
[
[
"from numpy import arange\n\ndef poly(x, a = 1, b = 2, c = 3):\n y = a*x**2 + b*x + c\n return y\n\nx = arange(10)\nx\narray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])",
"_____no_output_____"
],
[
"poly(x)",
"_____no_output_____"
],
[
"poly(x, b = 1)",
"_____no_output_____"
]
],
[
[
"## 模块 Module",
"_____no_output_____"
],
[
"Python中使用`import`关键词来导入模块。",
"_____no_output_____"
]
],
[
[
"import os",
"_____no_output_____"
]
],
[
[
"当前进程号:",
"_____no_output_____"
]
],
[
[
"os.getpid()",
"_____no_output_____"
]
],
[
[
"系统分隔符:",
"_____no_output_____"
]
],
[
[
"os.sep",
"_____no_output_____"
]
],
[
[
"## - 类 Class",
"_____no_output_____"
],
[
"用`class`来定义一个类。\n`Person(object)`表示继承自`object`类;\n`__init__`函数用来初始化对象;\n`self`表示对象自身,类似于`C` `Java`里面`this`。",
"_____no_output_____"
]
],
[
[
"class Person(object):\n def __init__(self, first, last, age):\n self.first = first\n self.last = last\n self.age = age\n def full_name(self):\n return self.first + ' ' + self.last",
"_____no_output_____"
]
],
[
[
"构建新对象:",
"_____no_output_____"
]
],
[
[
"person = Person('Mertle', 'Sedgewick', 52)",
"_____no_output_____"
]
],
[
[
"调用对象的属性:",
"_____no_output_____"
]
],
[
[
"person.first",
"_____no_output_____"
]
],
[
[
"调用对象的方法:",
"_____no_output_____"
]
],
[
[
"person.full_name()",
"_____no_output_____"
]
],
[
[
"修改对象的属性:",
"_____no_output_____"
]
],
[
[
"person.last = 'Smith'",
"_____no_output_____"
]
],
[
[
"添加新属性,d是之前定义的字典:",
"_____no_output_____"
]
],
[
[
"person.critters = d\nperson.critters",
"_____no_output_____"
]
],
[
[
"## 网络数据 Data from Web",
"_____no_output_____"
]
],
[
[
"url = 'http://ichart.finance.yahoo.com/table.csv?s=GE&d=10&e=5&f=2013&g=d&a=0&b=2&c=1962&ignore=.csv'",
"_____no_output_____"
]
],
[
[
"处理后就相当于一个可读文件:",
"_____no_output_____"
]
],
[
[
"import urllib2\nge_csv = urllib2.urlopen(url)\ndata = []\nfor line in ge_csv:\n data.append(line.split(','))\ndata[:4]",
"_____no_output_____"
]
],
[
[
"使用`pandas`处理数据:",
"_____no_output_____"
]
],
[
[
"ge_csv = urllib2.urlopen(url)\nimport pandas\nge = pandas.read_csv(ge_csv, index_col=0, parse_dates=True)\nge.plot(y='Adj Close')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecdb81ad4e8ae0dc1d0abb1171b002bce799b9d3 | 30,573 | ipynb | Jupyter Notebook | Software/UniSat-USK-15-20.ipynb | unisatkz/Course-Materials | d9bf3a3548ce3d2f665b90bf0a9c085e7ada23e7 | [
"MIT"
] | 4 | 2020-04-26T04:31:52.000Z | 2020-04-29T19:52:32.000Z | Software/UniSat-USK-15-20.ipynb | Zhannaspace/Course-Materials | d9bf3a3548ce3d2f665b90bf0a9c085e7ada23e7 | [
"MIT"
] | null | null | null | Software/UniSat-USK-15-20.ipynb | Zhannaspace/Course-Materials | d9bf3a3548ce3d2f665b90bf0a9c085e7ada23e7 | [
"MIT"
] | 2 | 2020-04-27T09:28:53.000Z | 2020-04-29T19:52:35.000Z | 68.395973 | 230 | 0.490891 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ecdb8d8eee59bd72289174fd2befb210ebad047e | 979,078 | ipynb | Jupyter Notebook | stock market.ipynb | mittshah2/Data-Analysis-Projects | ae04748f3a18ea012b78497885654e363adf753c | [
"MIT"
] | null | null | null | stock market.ipynb | mittshah2/Data-Analysis-Projects | ae04748f3a18ea012b78497885654e363adf753c | [
"MIT"
] | null | null | null | stock market.ipynb | mittshah2/Data-Analysis-Projects | ae04748f3a18ea012b78497885654e363adf753c | [
"MIT"
] | null | null | null | 843.305771 | 164,944 | 0.951541 | [
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as p\nimport scipy.stats as stats\nimport seaborn as se\nfrom pandas_datareader import data\nfrom datetime import datetime\n%matplotlib inline\nse.set_style('whitegrid')\n\ntech_list=['AAPL','GOOG','MSFT','AMZN']\n\nend=datetime.now()\n\nstart=datetime(end.year-1,end.month,end.day)\n\nfor stock in tech_list:\n globals()[stock]=data.DataReader(stock,'yahoo',start,end)\n\naapl=pd.DataFrame(AAPL)\ngoog=pd.DataFrame(GOOG)\nmsft=pd.DataFrame(MSFT)\namzn=pd.DataFrame(AMZN)",
"c:\\users\\mitts\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\pandas_datareader\\compat\\__init__.py:7: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n from pandas.util.testing import assert_frame_equal\n"
],
[
"aapl.describe()",
"_____no_output_____"
],
[
"aapl.info()",
"<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 253 entries, 2019-06-10 to 2020-06-09\nData columns (total 6 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 High 253 non-null float64\n 1 Low 253 non-null float64\n 2 Open 253 non-null float64\n 3 Close 253 non-null float64\n 4 Volume 253 non-null float64\n 5 Adj Close 253 non-null float64\ndtypes: float64(6)\nmemory usage: 13.8 KB\n"
],
[
"aapl['Adj Close'].plot(legend=True,figsize=(15,5))",
"_____no_output_____"
],
[
"aapl['Volume'].plot(legend=True,figsize=(18,5))",
"_____no_output_____"
],
[
"ma_day=[10,20,50]\n\nfor ma in ma_day:\n column_name=str('MA for '+str(ma)+'days')\n aapl[column_name]=aapl['Adj Close'].rolling(ma).mean()",
"_____no_output_____"
],
[
"aapl[['Adj Close','MA for 10days','MA for 20days','MA for 50days']].plot(subplots=False,figsize=(15,5))",
"_____no_output_____"
],
[
"aapl['Daily Return']=aapl['Adj Close'].pct_change()\naapl['Daily Return'].plot(legend=True,figsize=(15,5))",
"_____no_output_____"
],
[
"se.distplot(aapl['Daily Return'].dropna(),bins=100,color='indianred')",
"_____no_output_____"
],
[
"closing=data.DataReader(tech_list,'yahoo',start,end)['Adj Close']\nclosing.head()",
"_____no_output_____"
],
[
"tech_rets=closing.pct_change()\ntech_rets.head()",
"_____no_output_____"
],
[
"b=se.jointplot('GOOG','GOOG',tech_rets,kind='hex')\nb.annotate(stats.pearsonr)",
"c:\\users\\mitts\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\seaborn\\axisgrid.py:1848: UserWarning: JointGrid annotation is deprecated and will be removed in a future release.\n warnings.warn(UserWarning(msg))\n"
],
[
"a=se.jointplot('GOOG','MSFT',tech_rets,kind='hex')\na.annotate(stats.pearsonr)",
"c:\\users\\mitts\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\seaborn\\axisgrid.py:1848: UserWarning: JointGrid annotation is deprecated and will be removed in a future release.\n warnings.warn(UserWarning(msg))\n"
],
[
"se.pairplot(tech_rets.dropna())",
"_____no_output_____"
],
[
"fig=se.PairGrid(tech_rets.dropna())\n\nfig.map_upper(p.scatter,color='indianred')\nfig.map_lower(se.kdeplot,cmap='cool')\nfig.map_diag(p.hist,bins=30)",
"_____no_output_____"
],
[
"fig=se.PairGrid(closing.dropna())\n\nfig.map_upper(p.scatter,color='indianred')\nfig.map_lower(se.kdeplot,cmap='cool')\nfig.map_diag(p.hist,bins=30)",
"_____no_output_____"
],
[
"corr=tech_rets.dropna().corr()\nse.heatmap(corr,annot=True)",
"_____no_output_____"
],
[
"corr=closing.dropna().corr()\nse.heatmap(corr,annot=True)",
"_____no_output_____"
],
[
"rets=tech_rets.dropna()",
"_____no_output_____"
],
[
"area=np.pi*20\n\np.scatter(rets.mean(),rets.std(),s=area)\np.xlabel('expected return')\np.ylabel('risk')\nfor label, x, y in zip(rets.columns, rets.mean(), rets.std()):\n p.annotate(\n label, \n xy = (x, y), xytext = (100, 50),\n textcoords = 'offset points', ha = 'right', va = 'bottom',\n arrowprops = dict(arrowstyle = '-', connectionstyle = 'arc3,rad=-0.3',color='black'))",
"_____no_output_____"
],
[
"rets['AAPL'].quantile(0.05) #value at risk (95 % data wil have return greater than it)",
"_____no_output_____"
],
[
"days=365\n\ndt=1/days\n\nmu=rets['GOOG'].mean()\n\nsigma=rets['GOOG'].std()",
"_____no_output_____"
],
[
"def stock_monte_carlo(start_price,days,mu,sigma):\n \n price=np.zeros(days)\n price[0]=start_price\n shock=np.zeros(days)\n drift=np.zeros(days)\n \n for x in range(1,days):\n shock[x]=np.random.normal(loc=mu*dt,scale=sigma*np.sqrt(dt))\n \n drift[x]=mu*dt\n \n price[x]=price[x-1]+((shock[x]+drift[x])*price[x-1])\n return price ",
"_____no_output_____"
],
[
"goog.head()",
"_____no_output_____"
],
[
"start_price=1100.900024\n\nfor run in range(100):\n p.plot(stock_monte_carlo(start_price,days,mu,sigma))\np.xlabel('days')\np.ylabel('price')\np.title('monte carlo simulation for Google',weight='bold')",
"_____no_output_____"
],
[
"runs=10000\n\nsimulations=np.zeros(runs)\nfor run in range(runs):\n simulations[run]=stock_monte_carlo(start_price,days,mu,sigma)[days-1]",
"_____no_output_____"
],
[
"q=np.percentile(simulations,1)\n\np.hist(simulations,bins=200)\np.figtext(0.6,0.8,s='start price='+str(start_price))\np.figtext(0.6,0.7,s='mean final price='+str(simulations.mean()))\np.figtext(0.6,0.6,s='value at risk='+str(start_price-q))\np.figtext(0.15,0.6,s='q(0.99)='+str(q))\np.axvline(x=q,linewidth=2,color='red')\np.title(\"Final price distribution for Google Stock after \"+str(days)+' days', weight='bold')\np.show()",
"_____no_output_____"
],
[
"rets['GOOG'].plot(figsize=(15,5))",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdb9f8dc34fa2e5bd054d358c2902362929c9dd | 61,486 | ipynb | Jupyter Notebook | capstone_project/notebooks/model_construction/model_06.ipynb | elbydata/flatiron_capstone | a4238fdcb92261aff361b7e2d5a290db9bee7ae1 | [
"MIT"
] | null | null | null | capstone_project/notebooks/model_construction/model_06.ipynb | elbydata/flatiron_capstone | a4238fdcb92261aff361b7e2d5a290db9bee7ae1 | [
"MIT"
] | null | null | null | capstone_project/notebooks/model_construction/model_06.ipynb | elbydata/flatiron_capstone | a4238fdcb92261aff361b7e2d5a290db9bee7ae1 | [
"MIT"
] | null | null | null | 47.960998 | 249 | 0.538903 | [
[
[
"# Model 6: Kernel Initialization",
"_____no_output_____"
],
[
"This notebook contains the construction and training of the model iterations and various experiments. The notebook is split up into four sections: training mode selection (where the model will run), set-up, model constrution, and training. \n\nEvaluation will take place in the *model_optimization_and_evaluation.ipynb* notebook found in the *notebooks* folder.",
"_____no_output_____"
]
],
[
[
"# importing necessary packages and libraries\nimport numpy as np\nimport pandas as pd\nfrom keras import layers\nfrom keras import models\nfrom keras import optimizers\nfrom keras import applications \nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ModelCheckpoint\nimport pickle",
"Using TensorFlow backend.\n"
]
],
[
[
"## Step 1: Training Mode Selection",
"_____no_output_____"
],
[
"In the cell below, specify the training mode for the model. This will determine the location from which the source data is drawn, and to which the trained models (and training histories) are saved. ",
"_____no_output_____"
],
[
"- **training_mode = 'floydhub'** (runs on Floydhub)\n- **training_mode = 'local'** (runs on local disk and processor)",
"_____no_output_____"
]
],
[
[
"# select training mode\ntraining_mode = 'floydhub'",
"_____no_output_____"
]
],
[
[
"## Step 2: Set-up",
"_____no_output_____"
]
],
[
[
"# directory base paths\ndata_path_local = '../../data/0002_array_data/train_data/'\nmodel_path_local = '../../notebooks/model_construction/saved_models/'\ndata_path_floydhub = '/floyd/input/capstone_mushrooms/'\nmodel_path_floydhub = '/floyd/home/'\n\n# setting directory paths based on training mode selection\nif training_mode == 'floydhub':\n data_path = data_path_floydhub\n model_path = model_path_floydhub\nelif training_mode == 'local':\n data_path = data_path_local\n model_path = model_path_local\nelse:\n raise Exception('Please choose valid training mode: \"floydhub\" or \"local\".')",
"_____no_output_____"
],
[
"# loading the training and validation data subsets\nX_train = np.load(f'{data_path}X_train_data.npy')\ny_train = np.load(f'{data_path}y_train_data.npy')\nX_val = np.load(f'{data_path}X_val_data.npy')\ny_val = np.load(f'{data_path}y_val_data.npy')",
"_____no_output_____"
],
[
"# setting training parameters\nbatch_size = 8\nn_classes = 20\nn_epochs = 30\nimg_shape = X_train.shape[1:]\nmodel_names = []\nmodel_list = []\nmodel_hists = []",
"_____no_output_____"
]
],
[
[
"## Step 3: Model Construction",
"_____no_output_____"
],
[
"### Trial 1 - Glorot Normal Initialization",
"_____no_output_____"
]
],
[
[
"# defining the model architecture\nm6_t1 = models.Sequential()\n\n# convolution/max pool stacks\nm6_t1.add(layers.Conv2D(32,(3,3), input_shape=img_shape, padding='same', kernel_initializer='glorot_normal'))\nm6_t1.add(layers.LeakyReLU(alpha=0.1))\nm6_t1.add(layers.MaxPooling2D((2,2)))\n\nm6_t1.add(layers.Conv2D(64,(3,3), padding='same', kernel_initializer='glorot_normal'))\nm6_t1.add(layers.LeakyReLU(alpha=0.1))\nm6_t1.add(layers.MaxPooling2D((2,2)))\n\nm6_t1.add(layers.Conv2D(128,(3,3), padding='same', kernel_initializer='glorot_normal'))\nm6_t1.add(layers.LeakyReLU(alpha=0.1))\nm6_t1.add(layers.MaxPooling2D((2,2)))\n\nm6_t1.add(layers.Conv2D(256,(3,3), padding='same', kernel_initializer='glorot_normal'))\nm6_t1.add(layers.LeakyReLU(alpha=0.1))\nm6_t1.add(layers.MaxPooling2D((2,2)))\n\n# fully connected layers\nm6_t1.add(layers.Flatten())\nm6_t1.add(layers.Dense(512, kernel_initializer='glorot_normal'))\nm6_t1.add(layers.LeakyReLU(alpha=0.1))\nm6_t1.add(layers.Dense(n_classes, activation='softmax'))\n\n# reviewing the model architecture and adding model and name to list\nm6_t1.summary()\nmodel_names.append('m6_t1')\nmodel_list.append(m6_t1)",
"Model: \"sequential_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_1 (Conv2D) (None, 200, 200, 32) 896 \n_________________________________________________________________\nleaky_re_lu_1 (LeakyReLU) (None, 200, 200, 32) 0 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 100, 100, 32) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 100, 100, 64) 18496 \n_________________________________________________________________\nleaky_re_lu_2 (LeakyReLU) (None, 100, 100, 64) 0 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 50, 50, 64) 0 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 50, 50, 128) 73856 \n_________________________________________________________________\nleaky_re_lu_3 (LeakyReLU) (None, 50, 50, 128) 0 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 25, 25, 128) 0 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 25, 25, 256) 295168 \n_________________________________________________________________\nleaky_re_lu_4 (LeakyReLU) (None, 25, 25, 256) 0 \n_________________________________________________________________\nmax_pooling2d_4 (MaxPooling2 (None, 12, 12, 256) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 36864) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 512) 18874880 \n_________________________________________________________________\nleaky_re_lu_5 (LeakyReLU) (None, 512) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 20) 10260 \n=================================================================\nTotal params: 19,273,556\nTrainable params: 19,273,556\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"### Trial 2 - Glorot Uniform Initialization (model_05 re-run)",
"_____no_output_____"
]
],
[
[
"# defining the model architecture\nm6_t2 = models.Sequential()\n\n# convolution/max pool stacks\nm6_t2.add(layers.Conv2D(32,(3,3), input_shape=img_shape, padding='same', kernel_initializer='glorot_uniform'))\nm6_t2.add(layers.LeakyReLU(alpha=0.1))\nm6_t2.add(layers.MaxPooling2D((2,2)))\n\nm6_t2.add(layers.Conv2D(64,(3,3), padding='same', kernel_initializer='glorot_uniform'))\nm6_t2.add(layers.LeakyReLU(alpha=0.1))\nm6_t2.add(layers.MaxPooling2D((2,2)))\n\nm6_t2.add(layers.Conv2D(128,(3,3), padding='same', kernel_initializer='glorot_uniform'))\nm6_t2.add(layers.LeakyReLU(alpha=0.1))\nm6_t2.add(layers.MaxPooling2D((2,2)))\n\nm6_t2.add(layers.Conv2D(256,(3,3), padding='same', kernel_initializer='glorot_uniform'))\nm6_t2.add(layers.LeakyReLU(alpha=0.1))\nm6_t2.add(layers.MaxPooling2D((2,2)))\n\n# fully connected layers\nm6_t2.add(layers.Flatten())\nm6_t2.add(layers.Dense(512, kernel_initializer='glorot_uniform'))\nm6_t2.add(layers.LeakyReLU(alpha=0.1))\nm6_t2.add(layers.Dense(n_classes, activation='softmax'))\n\n# reviewing the model architecture and adding model and name to list\nm6_t2.summary()\nmodel_names.append('m6_t2')\nmodel_list.append(m6_t2)",
"Model: \"sequential_2\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_5 (Conv2D) (None, 200, 200, 32) 896 \n_________________________________________________________________\nleaky_re_lu_6 (LeakyReLU) (None, 200, 200, 32) 0 \n_________________________________________________________________\nmax_pooling2d_5 (MaxPooling2 (None, 100, 100, 32) 0 \n_________________________________________________________________\nconv2d_6 (Conv2D) (None, 100, 100, 64) 18496 \n_________________________________________________________________\nleaky_re_lu_7 (LeakyReLU) (None, 100, 100, 64) 0 \n_________________________________________________________________\nmax_pooling2d_6 (MaxPooling2 (None, 50, 50, 64) 0 \n_________________________________________________________________\nconv2d_7 (Conv2D) (None, 50, 50, 128) 73856 \n_________________________________________________________________\nleaky_re_lu_8 (LeakyReLU) (None, 50, 50, 128) 0 \n_________________________________________________________________\nmax_pooling2d_7 (MaxPooling2 (None, 25, 25, 128) 0 \n_________________________________________________________________\nconv2d_8 (Conv2D) (None, 25, 25, 256) 295168 \n_________________________________________________________________\nleaky_re_lu_9 (LeakyReLU) (None, 25, 25, 256) 0 \n_________________________________________________________________\nmax_pooling2d_8 (MaxPooling2 (None, 12, 12, 256) 0 \n_________________________________________________________________\nflatten_2 (Flatten) (None, 36864) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 512) 18874880 \n_________________________________________________________________\nleaky_re_lu_10 (LeakyReLU) (None, 512) 0 \n_________________________________________________________________\ndense_4 (Dense) (None, 20) 10260 \n=================================================================\nTotal params: 19,273,556\nTrainable params: 19,273,556\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"### Trial 3 - He Normal Initialization",
"_____no_output_____"
]
],
[
[
"# defining the model architecture\nm6_t3 = models.Sequential()\n\n# convolution/max pool stacks\nm6_t3.add(layers.Conv2D(32,(3,3), input_shape=img_shape, padding='same', kernel_initializer='he_normal'))\nm6_t3.add(layers.LeakyReLU(alpha=0.1))\nm6_t3.add(layers.MaxPooling2D((2,2)))\n\nm6_t3.add(layers.Conv2D(64,(3,3), padding='same', kernel_initializer='he_normal'))\nm6_t3.add(layers.LeakyReLU(alpha=0.1))\nm6_t3.add(layers.MaxPooling2D((2,2)))\n\nm6_t3.add(layers.Conv2D(128,(3,3), padding='same', kernel_initializer='he_normal'))\nm6_t3.add(layers.LeakyReLU(alpha=0.1))\nm6_t3.add(layers.MaxPooling2D((2,2)))\n\nm6_t3.add(layers.Conv2D(256,(3,3), padding='same', kernel_initializer='he_normal'))\nm6_t3.add(layers.LeakyReLU(alpha=0.1))\nm6_t3.add(layers.MaxPooling2D((2,2)))\n\n# fully connected layers\nm6_t3.add(layers.Flatten())\nm6_t3.add(layers.Dense(512, kernel_initializer='he_normal'))\nm6_t3.add(layers.LeakyReLU(alpha=0.1))\nm6_t3.add(layers.Dense(n_classes, activation='softmax'))\n\n# reviewing the model architecture and adding model and name to list\nm6_t3.summary()\nmodel_names.append('m6_t3')\nmodel_list.append(m6_t3)",
"Model: \"sequential_3\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_9 (Conv2D) (None, 200, 200, 32) 896 \n_________________________________________________________________\nleaky_re_lu_11 (LeakyReLU) (None, 200, 200, 32) 0 \n_________________________________________________________________\nmax_pooling2d_9 (MaxPooling2 (None, 100, 100, 32) 0 \n_________________________________________________________________\nconv2d_10 (Conv2D) (None, 100, 100, 64) 18496 \n_________________________________________________________________\nleaky_re_lu_12 (LeakyReLU) (None, 100, 100, 64) 0 \n_________________________________________________________________\nmax_pooling2d_10 (MaxPooling (None, 50, 50, 64) 0 \n_________________________________________________________________\nconv2d_11 (Conv2D) (None, 50, 50, 128) 73856 \n_________________________________________________________________\nleaky_re_lu_13 (LeakyReLU) (None, 50, 50, 128) 0 \n_________________________________________________________________\nmax_pooling2d_11 (MaxPooling (None, 25, 25, 128) 0 \n_________________________________________________________________\nconv2d_12 (Conv2D) (None, 25, 25, 256) 295168 \n_________________________________________________________________\nleaky_re_lu_14 (LeakyReLU) (None, 25, 25, 256) 0 \n_________________________________________________________________\nmax_pooling2d_12 (MaxPooling (None, 12, 12, 256) 0 \n_________________________________________________________________\nflatten_3 (Flatten) (None, 36864) 0 \n_________________________________________________________________\ndense_5 (Dense) (None, 512) 18874880 \n_________________________________________________________________\nleaky_re_lu_15 (LeakyReLU) (None, 512) 0 \n_________________________________________________________________\ndense_6 (Dense) (None, 20) 10260 \n=================================================================\nTotal params: 19,273,556\nTrainable params: 19,273,556\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"### Trial 4 - He Uniform Initialization",
"_____no_output_____"
]
],
[
[
"# defining the model architecture\nm6_t4 = models.Sequential()\n\n# convolution/max pool stacks\nm6_t4.add(layers.Conv2D(32,(3,3), input_shape=img_shape, padding='same', kernel_initializer='he_uniform'))\nm6_t4.add(layers.LeakyReLU(alpha=0.1))\nm6_t4.add(layers.MaxPooling2D((2,2)))\n\nm6_t4.add(layers.Conv2D(64,(3,3), padding='same', kernel_initializer='he_uniform'))\nm6_t4.add(layers.LeakyReLU(alpha=0.1))\nm6_t4.add(layers.MaxPooling2D((2,2)))\n\nm6_t4.add(layers.Conv2D(128,(3,3), padding='same', kernel_initializer='he_uniform'))\nm6_t4.add(layers.LeakyReLU(alpha=0.1))\nm6_t4.add(layers.MaxPooling2D((2,2)))\n\nm6_t4.add(layers.Conv2D(256,(3,3), padding='same', kernel_initializer='he_uniform'))\nm6_t4.add(layers.LeakyReLU(alpha=0.1))\nm6_t4.add(layers.MaxPooling2D((2,2)))\n\n# fully connected layers\nm6_t4.add(layers.Flatten())\nm6_t4.add(layers.Dense(512, kernel_initializer='he_uniform'))\nm6_t4.add(layers.LeakyReLU(alpha=0.1))\nm6_t4.add(layers.Dense(n_classes, activation='softmax'))\n\n# reviewing the model architecture and adding model and name to list\nm6_t4.summary()\nmodel_names.append('m6_t4')\nmodel_list.append(m6_t4)",
"Model: \"sequential_4\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_13 (Conv2D) (None, 200, 200, 32) 896 \n_________________________________________________________________\nleaky_re_lu_16 (LeakyReLU) (None, 200, 200, 32) 0 \n_________________________________________________________________\nmax_pooling2d_13 (MaxPooling (None, 100, 100, 32) 0 \n_________________________________________________________________\nconv2d_14 (Conv2D) (None, 100, 100, 64) 18496 \n_________________________________________________________________\nleaky_re_lu_17 (LeakyReLU) (None, 100, 100, 64) 0 \n_________________________________________________________________\nmax_pooling2d_14 (MaxPooling (None, 50, 50, 64) 0 \n_________________________________________________________________\nconv2d_15 (Conv2D) (None, 50, 50, 128) 73856 \n_________________________________________________________________\nleaky_re_lu_18 (LeakyReLU) (None, 50, 50, 128) 0 \n_________________________________________________________________\nmax_pooling2d_15 (MaxPooling (None, 25, 25, 128) 0 \n_________________________________________________________________\nconv2d_16 (Conv2D) (None, 25, 25, 256) 295168 \n_________________________________________________________________\nleaky_re_lu_19 (LeakyReLU) (None, 25, 25, 256) 0 \n_________________________________________________________________\nmax_pooling2d_16 (MaxPooling (None, 12, 12, 256) 0 \n_________________________________________________________________\nflatten_4 (Flatten) (None, 36864) 0 \n_________________________________________________________________\ndense_7 (Dense) (None, 512) 18874880 \n_________________________________________________________________\nleaky_re_lu_20 (LeakyReLU) (None, 512) 0 \n_________________________________________________________________\ndense_8 (Dense) (None, 20) 10260 \n=================================================================\nTotal params: 19,273,556\nTrainable params: 19,273,556\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"## Step 4: Training",
"_____no_output_____"
]
],
[
[
"# setting up standardization and augmentation parameters\ntrain_datagen = ImageDataGenerator(rescale=1./255,\n rotation_range=40,\n width_shift_range=0.3,\n height_shift_range=0.3,\n shear_range=0.3,\n zoom_range=0.3,\n fill_mode='nearest',\n horizontal_flip=True,\n vertical_flip=True)\nval_datagen = ImageDataGenerator(rescale=1./255)",
"_____no_output_____"
],
[
"%%time\n# data standardization and augmentation\ntrain_generator = train_datagen.flow(X_train, y_train, batch_size=batch_size)\nval_generator = val_datagen.flow(X_val, y_val, batch_size=batch_size)",
"CPU times: user 403 ms, sys: 861 ms, total: 1.26 s\nWall time: 1.27 s\n"
],
[
"# compiling loss functions\nm6_t1.compile(loss='categorical_crossentropy', optimizer='adamax', metrics=['acc'])\nm6_t2.compile(loss='categorical_crossentropy', optimizer='adamax', metrics=['acc'])\nm6_t3.compile(loss='categorical_crossentropy', optimizer='adamax', metrics=['acc'])\nm6_t4.compile(loss='categorical_crossentropy', optimizer='adamax', metrics=['acc'])",
"_____no_output_____"
],
[
"%%time\n# setting up model saving checkpoints\nm6_t1_cp = ModelCheckpoint(filepath=f'{model_path}m6_t1.h5',\n monitor='val_acc',\n verbose=1,\n save_best_only=True)\n\n# fitting model\nm6_t1_history = m6_t1.fit(train_generator,\n steps_per_epoch=len(X_train)//batch_size,\n epochs=n_epochs,\n callbacks=[m6_t1_cp],\n validation_data=val_generator,\n validation_steps=len(X_val)//batch_size)\n\n# adding training history to list\nmodel_hists.append(m6_t1_history)",
"Epoch 1/30\n1013/1013 [==============================] - 77s 76ms/step - loss: 2.6710 - acc: 0.1541 - val_loss: 2.6768 - val_acc: 0.2822\n\nEpoch 00001: val_acc improved from -inf to 0.28218, saving model to /floyd/home/m6_t1.h5\nEpoch 2/30\n1013/1013 [==============================] - 77s 76ms/step - loss: 2.3122 - acc: 0.2578 - val_loss: 1.9666 - val_acc: 0.3392\n\nEpoch 00002: val_acc improved from 0.28218 to 0.33915, saving model to /floyd/home/m6_t1.h5\nEpoch 3/30\n1013/1013 [==============================] - 77s 76ms/step - loss: 2.1752 - acc: 0.2953 - val_loss: 2.5922 - val_acc: 0.4239\n\nEpoch 00003: val_acc improved from 0.33915 to 0.42394, saving model to /floyd/home/m6_t1.h5\nEpoch 4/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.0961 - acc: 0.3300 - val_loss: 2.1492 - val_acc: 0.4140\n\nEpoch 00004: val_acc did not improve from 0.42394\nEpoch 5/30\n1013/1013 [==============================] - 77s 76ms/step - loss: 1.9991 - acc: 0.3516 - val_loss: 1.2973 - val_acc: 0.4875\n\nEpoch 00005: val_acc improved from 0.42394 to 0.48753, saving model to /floyd/home/m6_t1.h5\nEpoch 6/30\n1013/1013 [==============================] - 77s 76ms/step - loss: 1.9331 - acc: 0.3856 - val_loss: 1.2149 - val_acc: 0.5125\n\nEpoch 00006: val_acc improved from 0.48753 to 0.51247, saving model to /floyd/home/m6_t1.h5\nEpoch 7/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.8529 - acc: 0.4098 - val_loss: 1.3579 - val_acc: 0.4489\n\nEpoch 00007: val_acc did not improve from 0.51247\nEpoch 8/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.8269 - acc: 0.4207 - val_loss: 1.5919 - val_acc: 0.4738\n\nEpoch 00008: val_acc did not improve from 0.51247\nEpoch 9/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.7696 - acc: 0.4369 - val_loss: 1.0412 - val_acc: 0.5249\n\nEpoch 00009: val_acc improved from 0.51247 to 0.52494, saving model to /floyd/home/m6_t1.h5\nEpoch 10/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.7363 - acc: 0.4505 - val_loss: 1.6408 - val_acc: 0.5387\n\nEpoch 00010: val_acc improved from 0.52494 to 0.53865, saving model to /floyd/home/m6_t1.h5\nEpoch 11/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.6957 - acc: 0.4684 - val_loss: 1.5052 - val_acc: 0.5162\n\nEpoch 00011: val_acc did not improve from 0.53865\nEpoch 12/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.6536 - acc: 0.4770 - val_loss: 1.6862 - val_acc: 0.4988\n\nEpoch 00012: val_acc did not improve from 0.53865\nEpoch 13/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.6083 - acc: 0.4886 - val_loss: 1.5350 - val_acc: 0.5648\n\nEpoch 00013: val_acc improved from 0.53865 to 0.56484, saving model to /floyd/home/m6_t1.h5\nEpoch 14/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.5899 - acc: 0.4936 - val_loss: 1.0825 - val_acc: 0.5574\n\nEpoch 00014: val_acc did not improve from 0.56484\nEpoch 15/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.5525 - acc: 0.4993 - val_loss: 0.7778 - val_acc: 0.5399\n\nEpoch 00015: val_acc did not improve from 0.56484\nEpoch 16/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.5121 - acc: 0.5163 - val_loss: 1.2167 - val_acc: 0.5761\n\nEpoch 00016: val_acc improved from 0.56484 to 0.57606, saving model to /floyd/home/m6_t1.h5\nEpoch 17/30\n1013/1013 [==============================] - 77s 76ms/step - loss: 1.4934 - acc: 0.5260 - val_loss: 1.9171 - val_acc: 0.5711\n\nEpoch 00017: val_acc did not improve from 0.57606\nEpoch 18/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.4592 - acc: 0.5442 - val_loss: 0.5857 - val_acc: 0.6234\n\nEpoch 00018: val_acc improved from 0.57606 to 0.62344, saving model to /floyd/home/m6_t1.h5\nEpoch 19/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.4259 - acc: 0.5507 - val_loss: 1.1405 - val_acc: 0.5860\n\nEpoch 00019: val_acc did not improve from 0.62344\nEpoch 20/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.4143 - acc: 0.5488 - val_loss: 1.2332 - val_acc: 0.6010\n\nEpoch 00020: val_acc did not improve from 0.62344\nEpoch 21/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3724 - acc: 0.5633 - val_loss: 1.3343 - val_acc: 0.6147\n\nEpoch 00021: val_acc did not improve from 0.62344\nEpoch 22/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3647 - acc: 0.5635 - val_loss: 2.3659 - val_acc: 0.6110\n\nEpoch 00022: val_acc did not improve from 0.62344\nEpoch 23/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3130 - acc: 0.5810 - val_loss: 1.0401 - val_acc: 0.6247\n\nEpoch 00023: val_acc improved from 0.62344 to 0.62469, saving model to /floyd/home/m6_t1.h5\nEpoch 24/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3299 - acc: 0.5794 - val_loss: 1.1893 - val_acc: 0.5885\n\nEpoch 00024: val_acc did not improve from 0.62469\nEpoch 25/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.2931 - acc: 0.5821 - val_loss: 0.7516 - val_acc: 0.5810\n\nEpoch 00025: val_acc did not improve from 0.62469\nEpoch 26/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.2703 - acc: 0.5975 - val_loss: 1.2669 - val_acc: 0.6596\n\nEpoch 00026: val_acc improved from 0.62469 to 0.65960, saving model to /floyd/home/m6_t1.h5\nEpoch 27/30\n1013/1013 [==============================] - 77s 76ms/step - loss: 1.2550 - acc: 0.6036 - val_loss: 0.7135 - val_acc: 0.6097\n\nEpoch 00027: val_acc did not improve from 0.65960\nEpoch 28/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.2350 - acc: 0.6163 - val_loss: 0.8678 - val_acc: 0.6209\n\nEpoch 00028: val_acc did not improve from 0.65960\nEpoch 29/30\n1013/1013 [==============================] - 75s 74ms/step - loss: 1.2083 - acc: 0.6158 - val_loss: 0.7494 - val_acc: 0.6509\n\nEpoch 00029: val_acc did not improve from 0.65960\nEpoch 30/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.2091 - acc: 0.6189 - val_loss: 1.0884 - val_acc: 0.6234\n\nEpoch 00030: val_acc did not improve from 0.65960\nCPU times: user 53min 18s, sys: 6min 8s, total: 59min 27s\nWall time: 38min 27s\n"
],
[
"%%time\n# setting up model saving checkpoints\nm6_t2_cp = ModelCheckpoint(filepath=f'{model_path}m6_t2.h5',\n monitor='val_acc',\n verbose=1,\n save_best_only=True)\n\n# fitting model\nm6_t2_history = m6_t2.fit(train_generator,\n steps_per_epoch=len(X_train)//batch_size,\n epochs=n_epochs,\n callbacks=[m6_t2_cp],\n validation_data=val_generator,\n validation_steps=len(X_val)//batch_size)\n\n# adding training history to list\nmodel_hists.append(m6_t2_history)",
"Epoch 1/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.7505 - acc: 0.1386 - val_loss: 2.9185 - val_acc: 0.2153\n\nEpoch 00001: val_acc improved from -inf to 0.21535, saving model to /floyd/home/m6_t2.h5\nEpoch 2/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.3673 - acc: 0.2264 - val_loss: 1.8793 - val_acc: 0.3092\n\nEpoch 00002: val_acc improved from 0.21535 to 0.30923, saving model to /floyd/home/m6_t2.h5\nEpoch 3/30\n1013/1013 [==============================] - 77s 76ms/step - loss: 2.2187 - acc: 0.2831 - val_loss: 1.8935 - val_acc: 0.3516\n\nEpoch 00003: val_acc improved from 0.30923 to 0.35162, saving model to /floyd/home/m6_t2.h5\nEpoch 4/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.1186 - acc: 0.3216 - val_loss: 1.9212 - val_acc: 0.4302\n\nEpoch 00004: val_acc improved from 0.35162 to 0.43017, saving model to /floyd/home/m6_t2.h5\nEpoch 5/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.0216 - acc: 0.3536 - val_loss: 1.7460 - val_acc: 0.4339\n\nEpoch 00005: val_acc improved from 0.43017 to 0.43392, saving model to /floyd/home/m6_t2.h5\nEpoch 6/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.9356 - acc: 0.3741 - val_loss: 1.3708 - val_acc: 0.4825\n\nEpoch 00006: val_acc improved from 0.43392 to 0.48254, saving model to /floyd/home/m6_t2.h5\nEpoch 7/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.8855 - acc: 0.4015 - val_loss: 1.0997 - val_acc: 0.4800\n\nEpoch 00007: val_acc did not improve from 0.48254\nEpoch 8/30\n1013/1013 [==============================] - 75s 74ms/step - loss: 1.8227 - acc: 0.4258 - val_loss: 1.7412 - val_acc: 0.5000\n\nEpoch 00008: val_acc improved from 0.48254 to 0.50000, saving model to /floyd/home/m6_t2.h5\nEpoch 9/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.7838 - acc: 0.4341 - val_loss: 1.5312 - val_acc: 0.4950\n\nEpoch 00009: val_acc did not improve from 0.50000\nEpoch 10/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.7239 - acc: 0.4490 - val_loss: 1.6214 - val_acc: 0.4751\n\nEpoch 00010: val_acc did not improve from 0.50000\nEpoch 11/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.6947 - acc: 0.4583 - val_loss: 1.9941 - val_acc: 0.5187\n\nEpoch 00011: val_acc improved from 0.50000 to 0.51870, saving model to /floyd/home/m6_t2.h5\nEpoch 12/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.6172 - acc: 0.4875 - val_loss: 1.6251 - val_acc: 0.5511\n\nEpoch 00012: val_acc improved from 0.51870 to 0.55112, saving model to /floyd/home/m6_t2.h5\nEpoch 13/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.6007 - acc: 0.4907 - val_loss: 1.6367 - val_acc: 0.5436\n\nEpoch 00013: val_acc did not improve from 0.55112\nEpoch 14/30\n1013/1013 [==============================] - 75s 74ms/step - loss: 1.5708 - acc: 0.4981 - val_loss: 1.5721 - val_acc: 0.5324\n\nEpoch 00014: val_acc did not improve from 0.55112\nEpoch 15/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.5459 - acc: 0.5121 - val_loss: 2.1472 - val_acc: 0.5299\n\nEpoch 00015: val_acc did not improve from 0.55112\nEpoch 16/30\n1013/1013 [==============================] - 75s 74ms/step - loss: 1.5140 - acc: 0.5248 - val_loss: 1.3374 - val_acc: 0.6209\n\nEpoch 00016: val_acc improved from 0.55112 to 0.62095, saving model to /floyd/home/m6_t2.h5\nEpoch 17/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.4840 - acc: 0.5314 - val_loss: 0.4331 - val_acc: 0.5873\n\nEpoch 00017: val_acc did not improve from 0.62095\nEpoch 18/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.4658 - acc: 0.5305 - val_loss: 1.8750 - val_acc: 0.5599\n\nEpoch 00018: val_acc did not improve from 0.62095\nEpoch 19/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.4264 - acc: 0.5501 - val_loss: 1.7285 - val_acc: 0.5960\n\nEpoch 00019: val_acc did not improve from 0.62095\nEpoch 20/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.4129 - acc: 0.5474 - val_loss: 2.7438 - val_acc: 0.5337\n\nEpoch 00020: val_acc did not improve from 0.62095\nEpoch 21/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3816 - acc: 0.5552 - val_loss: 1.0579 - val_acc: 0.6247\n\nEpoch 00021: val_acc improved from 0.62095 to 0.62469, saving model to /floyd/home/m6_t2.h5\nEpoch 22/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3605 - acc: 0.5649 - val_loss: 0.8042 - val_acc: 0.6484\n\nEpoch 00022: val_acc improved from 0.62469 to 0.64838, saving model to /floyd/home/m6_t2.h5\nEpoch 23/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3585 - acc: 0.5670 - val_loss: 0.7826 - val_acc: 0.6621\n\nEpoch 00023: val_acc improved from 0.64838 to 0.66209, saving model to /floyd/home/m6_t2.h5\nEpoch 24/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3147 - acc: 0.5843 - val_loss: 0.9866 - val_acc: 0.6072\n\nEpoch 00024: val_acc did not improve from 0.66209\nEpoch 25/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.2876 - acc: 0.5926 - val_loss: 1.5987 - val_acc: 0.6309\n\nEpoch 00025: val_acc did not improve from 0.66209\nEpoch 26/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.2602 - acc: 0.5975 - val_loss: 1.2531 - val_acc: 0.6596\n\nEpoch 00026: val_acc did not improve from 0.66209\nEpoch 27/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.2535 - acc: 0.5979 - val_loss: 0.6246 - val_acc: 0.6384\n\nEpoch 00027: val_acc did not improve from 0.66209\nEpoch 28/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.2420 - acc: 0.6058 - val_loss: 0.5762 - val_acc: 0.6297\n\nEpoch 00028: val_acc did not improve from 0.66209\nEpoch 29/30\n1013/1013 [==============================] - 78s 77ms/step - loss: 1.1823 - acc: 0.6258 - val_loss: 1.0309 - val_acc: 0.6384\n\nEpoch 00029: val_acc did not improve from 0.66209\nEpoch 30/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.1945 - acc: 0.6196 - val_loss: 0.8481 - val_acc: 0.6646\n\nEpoch 00030: val_acc improved from 0.66209 to 0.66459, saving model to /floyd/home/m6_t2.h5\nCPU times: user 52min 56s, sys: 6min 7s, total: 59min 4s\nWall time: 38min 25s\n"
],
[
"%%time\n# setting up model saving checkpoints\nm6_t3_cp = ModelCheckpoint(filepath=f'{model_path}m6_t3.h5',\n monitor='val_acc',\n verbose=1,\n save_best_only=True)\n\n# fitting model\nm6_t3_history = m6_t3.fit(train_generator,\n steps_per_epoch=len(X_train)//batch_size,\n epochs=n_epochs,\n callbacks=[m6_t3_cp],\n validation_data=val_generator,\n validation_steps=len(X_val)//batch_size)\n\n# adding training history to list\nmodel_hists.append(m6_t3_history)",
"Epoch 1/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.9541 - acc: 0.1677 - val_loss: 2.9807 - val_acc: 0.2723\n\nEpoch 00001: val_acc improved from -inf to 0.27228, saving model to /floyd/home/m6_t3.h5\nEpoch 2/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.3421 - acc: 0.2572 - val_loss: 2.1329 - val_acc: 0.3566\n\nEpoch 00002: val_acc improved from 0.27228 to 0.35661, saving model to /floyd/home/m6_t3.h5\nEpoch 3/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.2194 - acc: 0.2860 - val_loss: 2.4192 - val_acc: 0.3666\n\nEpoch 00003: val_acc improved from 0.35661 to 0.36658, saving model to /floyd/home/m6_t3.h5\nEpoch 4/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.1033 - acc: 0.3270 - val_loss: 1.7728 - val_acc: 0.4501\n\nEpoch 00004: val_acc improved from 0.36658 to 0.45012, saving model to /floyd/home/m6_t3.h5\nEpoch 5/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.0029 - acc: 0.3616 - val_loss: 1.8867 - val_acc: 0.4489\n\nEpoch 00005: val_acc did not improve from 0.45012\nEpoch 6/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.9231 - acc: 0.3848 - val_loss: 2.3081 - val_acc: 0.4713\n\nEpoch 00006: val_acc improved from 0.45012 to 0.47132, saving model to /floyd/home/m6_t3.h5\nEpoch 7/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.8573 - acc: 0.4117 - val_loss: 1.4678 - val_acc: 0.4751\n\nEpoch 00007: val_acc improved from 0.47132 to 0.47506, saving model to /floyd/home/m6_t3.h5\nEpoch 8/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.8095 - acc: 0.4244 - val_loss: 1.2634 - val_acc: 0.4688\n\nEpoch 00008: val_acc did not improve from 0.47506\nEpoch 9/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.7349 - acc: 0.4494 - val_loss: 0.8570 - val_acc: 0.5349\n\nEpoch 00009: val_acc improved from 0.47506 to 0.53491, saving model to /floyd/home/m6_t3.h5\nEpoch 10/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.6977 - acc: 0.4542 - val_loss: 1.2531 - val_acc: 0.5511\n\nEpoch 00010: val_acc improved from 0.53491 to 0.55112, saving model to /floyd/home/m6_t3.h5\nEpoch 11/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.6527 - acc: 0.4785 - val_loss: 1.1586 - val_acc: 0.4938\n\nEpoch 00011: val_acc did not improve from 0.55112\nEpoch 12/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.6153 - acc: 0.4944 - val_loss: 1.5127 - val_acc: 0.5287\n\nEpoch 00012: val_acc did not improve from 0.55112\nEpoch 13/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.5806 - acc: 0.4962 - val_loss: 1.0453 - val_acc: 0.5835\n\nEpoch 00013: val_acc improved from 0.55112 to 0.58354, saving model to /floyd/home/m6_t3.h5\nEpoch 14/30\n1013/1013 [==============================] - 77s 76ms/step - loss: 1.5577 - acc: 0.5051 - val_loss: 0.5549 - val_acc: 0.5561\n\nEpoch 00014: val_acc did not improve from 0.58354\nEpoch 15/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.5283 - acc: 0.5154 - val_loss: 1.6895 - val_acc: 0.5200\n\nEpoch 00015: val_acc did not improve from 0.58354\nEpoch 16/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.4770 - acc: 0.5373 - val_loss: 1.4672 - val_acc: 0.5998\n\nEpoch 00016: val_acc improved from 0.58354 to 0.59975, saving model to /floyd/home/m6_t3.h5\nEpoch 17/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.4585 - acc: 0.5362 - val_loss: 1.0987 - val_acc: 0.5885\n\nEpoch 00017: val_acc did not improve from 0.59975\nEpoch 18/30\n1013/1013 [==============================] - 78s 77ms/step - loss: 1.4165 - acc: 0.5544 - val_loss: 0.9250 - val_acc: 0.6097\n\nEpoch 00018: val_acc improved from 0.59975 to 0.60973, saving model to /floyd/home/m6_t3.h5\nEpoch 19/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3962 - acc: 0.5528 - val_loss: 1.4004 - val_acc: 0.6072\n\nEpoch 00019: val_acc did not improve from 0.60973\nEpoch 20/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3541 - acc: 0.5701 - val_loss: 0.8967 - val_acc: 0.5773\n\nEpoch 00020: val_acc did not improve from 0.60973\nEpoch 21/30\n1013/1013 [==============================] - 75s 74ms/step - loss: 1.3664 - acc: 0.5717 - val_loss: 1.6981 - val_acc: 0.6272\n\nEpoch 00021: val_acc improved from 0.60973 to 0.62718, saving model to /floyd/home/m6_t3.h5\nEpoch 22/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3387 - acc: 0.5759 - val_loss: 0.5856 - val_acc: 0.6234\n\nEpoch 00022: val_acc did not improve from 0.62718\nEpoch 23/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3076 - acc: 0.5921 - val_loss: 0.6289 - val_acc: 0.6496\n\nEpoch 00023: val_acc improved from 0.62718 to 0.64963, saving model to /floyd/home/m6_t3.h5\nEpoch 24/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.2795 - acc: 0.5948 - val_loss: 0.6141 - val_acc: 0.6459\n\nEpoch 00024: val_acc did not improve from 0.64963\nEpoch 25/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.2616 - acc: 0.6016 - val_loss: 1.0728 - val_acc: 0.5561\n\nEpoch 00025: val_acc did not improve from 0.64963\nEpoch 26/30\n1013/1013 [==============================] - 75s 74ms/step - loss: 1.2494 - acc: 0.6099 - val_loss: 1.6296 - val_acc: 0.6222\n\nEpoch 00026: val_acc did not improve from 0.64963\nEpoch 27/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.2268 - acc: 0.6163 - val_loss: 0.5074 - val_acc: 0.6097\n\nEpoch 00027: val_acc did not improve from 0.64963\nEpoch 28/30\n1013/1013 [==============================] - 75s 74ms/step - loss: 1.1938 - acc: 0.6222 - val_loss: 1.1985 - val_acc: 0.5411\n\nEpoch 00028: val_acc did not improve from 0.64963\nEpoch 29/30\n1013/1013 [==============================] - 75s 74ms/step - loss: 1.2000 - acc: 0.6231 - val_loss: 1.0994 - val_acc: 0.5786\n\nEpoch 00029: val_acc did not improve from 0.64963\nEpoch 30/30\n1013/1013 [==============================] - 75s 74ms/step - loss: 1.1700 - acc: 0.6299 - val_loss: 1.4247 - val_acc: 0.6047\n\nEpoch 00030: val_acc did not improve from 0.64963\nCPU times: user 53min 1s, sys: 6min 8s, total: 59min 10s\nWall time: 38min 21s\n"
],
[
"%%time\n# setting up model saving checkpoints\nm6_t4_cp = ModelCheckpoint(filepath=f'{model_path}m6_t4.h5',\n monitor='val_acc',\n verbose=1,\n save_best_only=True)\n\n# fitting model\nm6_t4_history = m6_t4.fit(train_generator,\n steps_per_epoch=len(X_train)//batch_size,\n epochs=n_epochs,\n callbacks=[m6_t4_cp],\n validation_data=val_generator,\n validation_steps=len(X_val)//batch_size)\n\n# adding training history to list\nmodel_hists.append(m6_t4_history)",
"Epoch 1/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.9465 - acc: 0.1719 - val_loss: 2.0469 - val_acc: 0.2995\n\nEpoch 00001: val_acc improved from -inf to 0.29950, saving model to /floyd/home/m6_t4.h5\nEpoch 2/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.3500 - acc: 0.2557 - val_loss: 1.6644 - val_acc: 0.3229\n\nEpoch 00002: val_acc improved from 0.29950 to 0.32294, saving model to /floyd/home/m6_t4.h5\nEpoch 3/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.2191 - acc: 0.2881 - val_loss: 1.2348 - val_acc: 0.3429\n\nEpoch 00003: val_acc improved from 0.32294 to 0.34289, saving model to /floyd/home/m6_t4.h5\nEpoch 4/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.0843 - acc: 0.3346 - val_loss: 2.6100 - val_acc: 0.3953\n\nEpoch 00004: val_acc improved from 0.34289 to 0.39526, saving model to /floyd/home/m6_t4.h5\nEpoch 5/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 2.0163 - acc: 0.3591 - val_loss: 2.0516 - val_acc: 0.4526\n\nEpoch 00005: val_acc improved from 0.39526 to 0.45262, saving model to /floyd/home/m6_t4.h5\nEpoch 6/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.9288 - acc: 0.3820 - val_loss: 2.2124 - val_acc: 0.4676\n\nEpoch 00006: val_acc improved from 0.45262 to 0.46758, saving model to /floyd/home/m6_t4.h5\nEpoch 7/30\n1013/1013 [==============================] - 78s 77ms/step - loss: 1.8710 - acc: 0.4088 - val_loss: 1.4783 - val_acc: 0.4913\n\nEpoch 00007: val_acc improved from 0.46758 to 0.49127, saving model to /floyd/home/m6_t4.h5\nEpoch 8/30\n1013/1013 [==============================] - 77s 76ms/step - loss: 1.8208 - acc: 0.4256 - val_loss: 1.0768 - val_acc: 0.4863\n\nEpoch 00008: val_acc did not improve from 0.49127\nEpoch 9/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.7682 - acc: 0.4367 - val_loss: 1.0089 - val_acc: 0.4863\n\nEpoch 00009: val_acc did not improve from 0.49127\nEpoch 10/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.7058 - acc: 0.4516 - val_loss: 1.8436 - val_acc: 0.5125\n\nEpoch 00010: val_acc improved from 0.49127 to 0.51247, saving model to /floyd/home/m6_t4.h5\nEpoch 11/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.6839 - acc: 0.4628 - val_loss: 2.3732 - val_acc: 0.4601\n\nEpoch 00011: val_acc did not improve from 0.51247\nEpoch 12/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.6330 - acc: 0.4783 - val_loss: 1.8400 - val_acc: 0.5224\n\nEpoch 00012: val_acc improved from 0.51247 to 0.52244, saving model to /floyd/home/m6_t4.h5\nEpoch 13/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.6076 - acc: 0.4957 - val_loss: 2.1440 - val_acc: 0.5287\n\nEpoch 00013: val_acc improved from 0.52244 to 0.52868, saving model to /floyd/home/m6_t4.h5\nEpoch 14/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.5600 - acc: 0.5096 - val_loss: 0.9978 - val_acc: 0.5511\n\nEpoch 00014: val_acc improved from 0.52868 to 0.55112, saving model to /floyd/home/m6_t4.h5\nEpoch 15/30\n1013/1013 [==============================] - 75s 75ms/step - loss: 1.5300 - acc: 0.5193 - val_loss: 1.7040 - val_acc: 0.6222\n\nEpoch 00015: val_acc improved from 0.55112 to 0.62219, saving model to /floyd/home/m6_t4.h5\nEpoch 16/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.4960 - acc: 0.5260 - val_loss: 1.1368 - val_acc: 0.5025\n\nEpoch 00016: val_acc did not improve from 0.62219\nEpoch 17/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.4688 - acc: 0.5369 - val_loss: 1.5455 - val_acc: 0.5574\n\nEpoch 00017: val_acc did not improve from 0.62219\nEpoch 18/30\n1013/1013 [==============================] - 77s 76ms/step - loss: 1.4349 - acc: 0.5432 - val_loss: 2.0361 - val_acc: 0.5761\n\nEpoch 00018: val_acc did not improve from 0.62219\nEpoch 19/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.4255 - acc: 0.5443 - val_loss: 1.2144 - val_acc: 0.5935\n\nEpoch 00019: val_acc did not improve from 0.62219\nEpoch 20/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.4114 - acc: 0.5509 - val_loss: 1.7353 - val_acc: 0.5960\n\nEpoch 00020: val_acc did not improve from 0.62219\nEpoch 21/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3660 - acc: 0.5648 - val_loss: 2.4577 - val_acc: 0.5998\n\nEpoch 00021: val_acc did not improve from 0.62219\nEpoch 22/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3569 - acc: 0.5748 - val_loss: 1.0595 - val_acc: 0.5798\n\nEpoch 00022: val_acc did not improve from 0.62219\nEpoch 23/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.3433 - acc: 0.5846 - val_loss: 1.1161 - val_acc: 0.6496\n\nEpoch 00023: val_acc improved from 0.62219 to 0.64963, saving model to /floyd/home/m6_t4.h5\nEpoch 24/30\n1013/1013 [==============================] - 78s 77ms/step - loss: 1.3175 - acc: 0.5838 - val_loss: 1.3411 - val_acc: 0.5786\n\nEpoch 00024: val_acc did not improve from 0.64963\nEpoch 25/30\n1013/1013 [==============================] - 78s 77ms/step - loss: 1.2893 - acc: 0.5923 - val_loss: 0.8508 - val_acc: 0.6359\n\nEpoch 00025: val_acc did not improve from 0.64963\nEpoch 26/30\n1013/1013 [==============================] - 78s 77ms/step - loss: 1.2680 - acc: 0.6015 - val_loss: 2.6457 - val_acc: 0.6384\n\nEpoch 00026: val_acc did not improve from 0.64963\nEpoch 27/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.2438 - acc: 0.6106 - val_loss: 1.2376 - val_acc: 0.6110\n\nEpoch 00027: val_acc did not improve from 0.64963\nEpoch 28/30\n1013/1013 [==============================] - 77s 76ms/step - loss: 1.2427 - acc: 0.6084 - val_loss: 0.9293 - val_acc: 0.6284\n\nEpoch 00028: val_acc did not improve from 0.64963\nEpoch 29/30\n1013/1013 [==============================] - 77s 76ms/step - loss: 1.2425 - acc: 0.6044 - val_loss: 1.1389 - val_acc: 0.5773\n\nEpoch 00029: val_acc did not improve from 0.64963\nEpoch 30/30\n1013/1013 [==============================] - 76s 75ms/step - loss: 1.2136 - acc: 0.6195 - val_loss: 1.2597 - val_acc: 0.5985\n\nEpoch 00030: val_acc did not improve from 0.64963\nCPU times: user 53min 17s, sys: 6min 8s, total: 59min 25s\nWall time: 38min 30s\n"
],
[
"# creating dictionary for model names, models, and histories from respective lists\nmodels_dict = {i:[j,k] for i,j,k in zip(model_names,model_list,model_hists)}",
"_____no_output_____"
],
[
"# evaluating models on validation set\nfor key, value in models_dict.items():\n model = models.load_model(f'{model_path}{key}.h5')\n (val_loss, val_accuracy) = model.evaluate(val_generator,verbose=1)\n print(f'{key} Val Accuracy: {round((val_accuracy*100),2)}%')\n print(f'{key} Val Loss: {round(val_loss,4)}')\n print('---')",
"102/102 [==============================] - 2s 17ms/step\nm6_t1 Val Accuracy: 64.57%\nm6_t1 Val Loss: 1.0852\n---\n102/102 [==============================] - 2s 17ms/step\nm6_t2 Val Accuracy: 66.67%\nm6_t2 Val Loss: 2.523\n---\n102/102 [==============================] - 2s 17ms/step\nm6_t3 Val Accuracy: 63.46%\nm6_t3 Val Loss: 1.7824\n---\n102/102 [==============================] - 2s 17ms/step\nm6_t4 Val Accuracy: 65.31%\nm6_t4 Val Loss: 1.911\n---\n"
],
[
"# saving training histories\nfor key, value in models_dict.items():\n with open(f'{model_path}{key}_history', 'wb') as file_pi:\n pickle.dump(value[1].history, file_pi)\n print(f'{key}_history saved in {model_path}')",
"m6_t1_history saved in /floyd/home/\nm6_t2_history saved in /floyd/home/\nm6_t3_history saved in /floyd/home/\nm6_t4_history saved in /floyd/home/\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdba30809f2e66c30ba08f12b1c11f547a89c03 | 18,539 | ipynb | Jupyter Notebook | examples/geoclaw_test1/MakeInputFiles_test1.ipynb | clawpack/new_features_for_v5.7.0 | 70e2ef96be3f3cd492f85e935392b149606de3bf | [
"BSD-3-Clause"
] | 2 | 2021-06-24T23:22:43.000Z | 2021-10-04T02:51:58.000Z | examples/geoclaw_test1/MakeInputFiles_test1.ipynb | clawpack/new_features_for_v5.7.0 | 70e2ef96be3f3cd492f85e935392b149606de3bf | [
"BSD-3-Clause"
] | null | null | null | examples/geoclaw_test1/MakeInputFiles_test1.ipynb | clawpack/new_features_for_v5.7.0 | 70e2ef96be3f3cd492f85e935392b149606de3bf | [
"BSD-3-Clause"
] | 1 | 2020-01-11T00:57:09.000Z | 2020-01-11T00:57:09.000Z | 30.949917 | 411 | 0.567237 | [
[
[
"# Make Input Files -- Test 1\n\nFor this example simple artificial topography is generated in order to illustrate various things.\n\nContents:\n\n - [Define ocean topography](#topo_ocean)\n - [Define topo for small coastal region](#topo_coast)\n - [Create dtopo for an earthquake source](#dtopo)\n - [Force Dry array](#force_dry)\n ",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"from pylab import *\nfrom scipy.interpolate import interp1d",
"_____no_output_____"
],
[
"sys.path.insert(0,'../../new_python')\nimport topotools, marching_front, plottools, dtopotools",
"_____no_output_____"
]
],
[
[
"<div id=\"topo_ocean\"></div>\n\n## Define ocean topography\n\nThis simple topography is piecewise linear in $x$ (longitude) with a continental shelf and beach, and constant in the $y$ (latitude) direction. It is placed at the equator so distances are roughly equal in $x$ and $y$, and also placed at longitude 0. ",
"_____no_output_____"
]
],
[
[
"# Define piecewise linear function (unequally spaced):\nxocean = array([-2,-1,-0.5,-0.1,0.1])\nzocean = array([-3000,-3000,-100,-100,100])\n\n# Interpolate to equally spaced grid for topofile:\nxo = arange(-2,0.2,0.1)\nyo = array([-2,2])\nzfunc = interp1d(xocean,zocean,fill_value=\"extrapolate\")\nzo = zfunc(xo)\n\n# Convert to 2d arrays:\nXo,Yo = meshgrid(xo,yo)\nZo = vstack((zo,zo))",
"_____no_output_____"
],
[
"figure(figsize=(12,5))\nsubplot(121)\ncontourf(Xo,Yo,Zo)\ncolorbar()\ntitle('Ocean Topography')\nsubplot(122)\nplot(xo,zo,'k-')\nfill_between(xo,zo,maximum(zo,0),color=[.5,.5,1])\ntitle('Topography on transect')",
"_____no_output_____"
]
],
[
[
"### Save as a topofile:",
"_____no_output_____"
]
],
[
[
"topo = topotools.Topography()\ntopo.set_xyZ(xo,yo,Zo)\n\ntopo.write('input_files/topo_ocean.tt3', topo_type=3, Z_format=\"%11.3e\")",
"_____no_output_____"
]
],
[
[
"<div id=\"topo_coast\"></div>\n\n## Define topo for small coastal region\n\nWe define some more complicated topography on a finer grid over a small coastal region with 1/3 arcsecond resolution, chosen to be aligned with integer multiples of degrees (e.g. a grid point at longitude `x=0` and latitude `y=0`) as typical of real DEMs from NCEI. This is important when aligning computational grids and fgmax grids (if used) in `setrun.py`. \n\nWe will use a cutoff function so that this fine-scale topo matches the linear beach profile of the ocean topography along the edges of this rectangle. The cutoff is 1 in the center of the rectangle and decays to 0 at the edges:",
"_____no_output_____"
]
],
[
[
"# choose DEM grid points:\narcsec13 = 1./(3*3600.) # 1/3 arcsecond\nprint('arcsec13 = %.6f degrees = %.2f meters' % (arcsec13,arcsec13*111e3))\nx = arange(-100*arcsec13, 150*arcsec13, arcsec13)\ny = arange(-55*arcsec13, 55*arcsec13, arcsec13)\nX,Y = meshgrid(x,y)\nprint('X.shape = ', X.shape)\n\nx1,x2 = x.min(), x.max()\ny1,y2 = y.min(), y.max()\nprint('Extent of coastal topo: (%.6f, %.6f, %.6f, %.6f)' % (x1,x2,y1,y2))\n\n# define and plot the cutoff function:\n\nw = 0.001 # width of cutoff layer\ncutoff = 1. / (1. + exp(1e4*(X-(x2-w))) + exp(1e4*((x1+w)-X)) \\\n + exp(1e4*(Y-(y2-w))) + exp(1e4*((y1+w)-Y)))\n\nfigure(figsize=(10,6))\ncontourf(X,Y,cutoff)\ncolorbar(shrink=0.5)\ngca().set_aspect(1)\ntitle('Cutoff function');",
"_____no_output_____"
]
],
[
[
"The topography in this region is the linearly sloping beach augmented by a Gaussian dip. The beach slope is chosen to agree with the ocean topography offshore (1 km / degree, about 1/100), while onshore there is a smaller slope in this region for illustration.",
"_____no_output_____"
]
],
[
[
"Z0 = 1e3*X # sloping beach matching ocean topography\nZ1 = where(X<0, 1e3*X, 0.2e3*X) # smaller slope on shore\nR1 = (X-0.004)**2 + (Y-0.002)**2\nZ1 += -4*exp(-500000*R1) # Gaussian dip\nZ = (1-cutoff)*Z0 + cutoff*Z1",
"_____no_output_____"
]
],
[
[
"### Plot the coastal topography:",
"_____no_output_____"
]
],
[
[
"# colors:\nc = [[.2,.2,1],[.5,.5,1],[.8,.8,1],[.7,1,.7],[.2,.8,0],[.9,.8,.2]]\n\nfigure(figsize=(12,7))\nsubplot(211)\ncontourf(X,Y,Z,[-2,-1,0,1,2],colors=c,extend='both')\ncb = colorbar(shrink=0.9)\ncb.set_label('meters')\ncontour(X,Y,Z,[-2,-1,0,1,2],colors=['b','b','r','g','g'])\ngca().set_aspect(1.)\nxticks(rotation=20)\nxlabel('Longitude')\nylabel('Latitude')\n\nsubplot(212)\ncontourf(X*111e3,Y*111e3,Z,[-2,-1,0,1,2],colors=c,extend='both')\ncb = colorbar(shrink=0.9)\ncb.set_label('meters')\ncontour(X*111e3,Y*111e3,Z,[-2,-1,0,1,2],colors=['b','b','r','g','g'])\ngca().set_aspect(1.)\nxticks(rotation=20)\nxlabel('meters')\nylabel('meters')\n\ntight_layout();",
"_____no_output_____"
]
],
[
[
"The lower plot in the figure above shows the same topography as on the top, but with x,y units of meters to better show the scale. Recall that 1 degree is about 111 km and 1/3 arcsec is about 10 meters.\n\nIn the plots above, the red contour is at $Z = 0$, and hence is the \"shoreline\". However, the isolated \"lake\" with elevation $Z < 0$ could be dry land below sea level. Normally with GeoClaw this region would be filled with water initially up to $Z = 0$ everywhere. Below in [the Force_Dry section](#force_dry), we discuss how to force this region to be initialized as dry if it is in fact dry land.",
"_____no_output_____"
],
[
"### Save this as a topofile:",
"_____no_output_____"
]
],
[
[
"topo = topotools.Topography()\ntopo.set_xyZ(x,y,Z)\n\ntopo.write('input_files/topo_shore.tt3', topo_type=3, Z_format=\"%11.3e\")\n#topo.plot()",
"_____no_output_____"
]
],
[
[
"### Plot both topo sets together\n\nThe coastal region above is very small compared to the ocean region defined above. Here we plot both together:",
"_____no_output_____"
]
],
[
[
"def plot_topo(add_colorbar=False):\n contourf(Xo,Yo,Zo,[-2,-1,0,1,2],colors=c,extend='both')\n contourf(X,Y,Z,[-2,-1,0,1,2],colors=c,extend='both')\n if add_colorbar: \n cb = colorbar()\n cb.set_label('meters')\n #contour(X,Y,Z,[-2,-1,0,1,2],colors=['b','b','r','g','g'])\n plot([x1,x1,x2,x2,x1],[y1,y2,y2,y1,y1],'k')\n gca().set_aspect(1.)\n xticks(rotation=20)\n xlabel('Longitude')\n ylabel('Latitude')\n\nfigure(figsize=(12,6))\nsubplot(121)\nplot_topo()\ntitle('Ocean Topography')\nsubplot(122)\nplot_topo(add_colorbar=True)\naxis([-0.005, 0.015, -0.01, 0.01])\ntitle('Zoom around shore')\ntight_layout()",
"_____no_output_____"
]
],
[
[
"In the plot on the left above, the black rectangle showing the extent of the coastal DEM is barely visible. Zooming in shows that the topography does match up near the edges of this rectangle. In GeoClaw the finest available topography is used when computing cell-averaged topo values, so the coastal DEM will be used for any cell that overlaps this region. ",
"_____no_output_____"
],
[
"<div id=\"dtopo\"></div>\n\n## Create dtopo for an earthquake source:\n\nWe define a simple earthquake in which there is uniform slip on a single subfault. The parameters are chosen to be somewhat reasonable for a subduction zone event offshore, but the shape is a bit odd (width 100 km and length 50 km) in order to give a smallish event with the desired onshore subsidence, for illustration purposes.",
"_____no_output_____"
]
],
[
[
"subfault = dtopotools.SubFault()\nsubfault.strike = 0.\nsubfault.length = 50.e3\nsubfault.width = 100.e3\nsubfault.depth = 10.e3\nsubfault.slip = 5.\nsubfault.rake = 90.\nsubfault.dip = 10.\nsubfault.longitude = -1.\nsubfault.latitude = 0.\nsubfault.coordinate_specification = \"top center\"\n\nfault = dtopotools.Fault()\nfault.subfaults = [subfault]\n\nprint(\"Earthquake magnitude: Mw = %.2f\" % fault.Mw())\ndtopo_fname = 'input_files/dtopo_test.tt3'\nprint(\"Using Okada model to create dtopo file\", dtopo_fname)\n\nx_deform = linspace(-2, 1, 100)\ny_deform = linspace(-1, 1, 100)\ntimes = [1.]\n\nfault.create_dtopography(x_deform,y_deform,times)\ndtopo = fault.dtopo\n\ndtopo.write(dtopo_fname, dtopo_type=3)\n\nfigure(figsize=(12,6))\nax = subplot(121)\ndtopo.plot_dZ_colors(2.,axes=ax,dZ_interval=0.5)\ncontour(Xo,Yo,Zo,[-110,-90,0],colors=['b','b','r'],linestyles='--')\nax.set_aspect(1.)\naxis([-2,0.5,-2,2])\nxlabel('Longitude')\nylabel('Latitude')\n\nax = subplot(122)\nylat = 0.\njlat = where(dtopo.y<=ylat)[0].max()\nplot(dtopo.x, dtopo.dZ[0,jlat,:],'g')\nplot(dtopo.x, 0*dtopo.x, 'k')\nxlabel('Longitude')\ntitle('Vertical displacement on transect at latitude %.2f' % ylat);",
"_____no_output_____"
]
],
[
[
"The left plot above shows the sea floor deformation as contours and colors, along with the extent of the continental shelf as blue dashed lines and the shoreline as a red dashed line. The plot on the right shows the vertical deformation along a transect at latitude 0 going through the coastal region of interest. \n\nWe can compute the subsidence at the location on the shoreline where our fine scale topography is defined as:",
"_____no_output_____"
]
],
[
[
"xlon = 0.\nilon = where(dtopo.x<=xlon)[0].max()\nylat = 0.\njlat = where(dtopo.y<=ylat)[0].max()\n#print(ilon,jlat)\ndz0 = dtopo.dZ[0,jlat,ilon]\nprint('Surface deformation at x=%.2f, y=%.2f is dz = %.2f meters' \\\n % (xlon,ylat,dz0))",
"_____no_output_____"
]
],
[
[
"This subsidence is enough to significantly change the shoreline location, as seen below:",
"_____no_output_____"
]
],
[
[
"figure(figsize=(12,6))\nsubplot(211)\ncontourf(X,Y,Z,[-2,-1,0,1,2],colors=c,extend='both')\ncb = colorbar(shrink=0.9)\ncb.set_label('meters')\ncontour(X,Y,Z,[-2,-1,0,1,2],colors=['b','b','r','g','g'])\ngca().set_aspect(1.)\nxticks(rotation=20)\n#xlim(-0.002,0.008)\nxlabel('Longitude')\nylabel('Latitude')\ntitle('Original topo')\n\nsubplot(212)\nZ_postquake = Z + dz0\ncontourf(X,Y,Z_postquake,[-2,-1,0,1,2],colors=c,extend='both')\ncb = colorbar(shrink=0.9)\ncb.set_label('meters')\ncontour(X,Y,Z_postquake,[-2,-1,0,1,2],colors=['b','b','r','g','g'])\ngca().set_aspect(1.)\nxticks(rotation=20)\n#xlim(-0.002,0.008)\nxlabel('Longitude')\nylabel('Latitude')\ntitle('Subsided topo, dz = %.2f m' % dz0);\n\ntight_layout()\nsavefig('topo_with_dz.png')",
"_____no_output_____"
]
],
[
[
"<div id=\"force_dry\"></div>\n\n# Force Dry array\n\nNow suppose that the onshore lake shown in the plots above is really a depression that should be dry land in spite of being below sea level. We can use the marching front algorithm described in the notebook [MarchingFront.ipynb](MarchingFront.ipynb) to identify points that are below sea level but disconnected from the coast. \n\nWe use the marching front algorithm starting by assuming any point with `Z < Z1 = -5` meters should be wet and marching to find all connected points with elevation up to `Z = Z2 = 0`:",
"_____no_output_____"
]
],
[
[
"wet_points = marching_front.select_by_flooding(topo.Z, Z1=-5., Z2=0., max_iters=None)",
"_____no_output_____"
]
],
[
[
"See the notebook [ForceDry.ipynb](ForceDry.ipynb) for more discussion of the cells below...",
"_____no_output_____"
]
],
[
[
"Zdry = ma.masked_array(topo.Z, wet_points)\nZwet = ma.masked_array(topo.Z, logical_not(wet_points))\n\nfigure(figsize=(12,6))\nsubplot(211)\ncontourf(X,Y,Zdry,[-2,-1,0,1,2],colors=c,extend='both')\ncb = colorbar(shrink=0.9)\ncb.set_label('meters')\ncontour(X,Y,Z,[-2,-1,0,1,2],colors='k',linewidths=0.8)\ngca().set_aspect(1.)\nxticks(rotation=20)\n#xlim(-0.002,0.008)\nxlabel('Longitude')\nylabel('Latitude')\ntitle('Colored points are identified as initially dry');\n\nsubplot(212)\ncontourf(X,Y,Zwet,[-2,-1,0,1,2],colors=c,extend='both')\ncb = colorbar(shrink=0.9)\ncb.set_label('meters')\ncontour(X,Y,Z,[-2,-1,0,1,2],colors='k',linewidths=0.8)\ngca().set_aspect(1.)\nxticks(rotation=20)\n#xlim(-0.002,0.008)\nxlabel('Longitude')\nylabel('Latitude')\ntitle('Colored points are identified as initially wet');\ntight_layout()",
"_____no_output_____"
]
],
[
[
"## Create `force_dry_init` array for GeoClaw\n\nFirst we buffer the points identified above as discussed in the [ForceDry.ipynb](ForceDry.ipynb) notebook.",
"_____no_output_____"
]
],
[
[
"dry_points = 1 - wet_points\ndry_points_sum = dry_points[1:-1,1:-1] + dry_points[0:-2,1:-1] + dry_points[2:,1:-1] + \\\n dry_points[1:-1,0:-2] + dry_points[0:-2,0:-2] + dry_points[2:,0:-2] + \\\n dry_points[1:-1,2:] + dry_points[0:-2,2:] + dry_points[2:,2:]\n \n# initialize array to 0 everywhere:\nforce_dry_init = zeros(dry_points.shape)\n# reset in interior to 1 if all points in the 3x3 block around it are dry:\nforce_dry_init[1:-1,1:-1] = where(dry_points_sum == 9, 1, 0)",
"_____no_output_____"
]
],
[
[
"And finally create the input file needed for GeoClaw:",
"_____no_output_____"
]
],
[
[
"force_dry_init_topo = topotools.Topography()\nforce_dry_init_topo.set_xyZ(topo.x,topo.y,force_dry_init)\n\nfname_force_dry_init = 'input_files/force_dry_init.data'\nforce_dry_init_topo.write(fname_force_dry_init, topo_type=3, Z_format='%1i')\nprint('Created %s' % fname_force_dry_init)",
"_____no_output_____"
]
],
[
[
"See [RunGeoclaw.ipynb](RunGeoclaw.ipynb) for more discussion and sample GeoClaw results.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ecdba582681993b9f38f62720404b6731cb7c485 | 19,241 | ipynb | Jupyter Notebook | tutorials/0_getting_started.ipynb | apulverizer/movingpandas | 5e73a53f3e28c8a6af9dd338f827f9e01ef1ea6b | [
"BSD-3-Clause"
] | null | null | null | tutorials/0_getting_started.ipynb | apulverizer/movingpandas | 5e73a53f3e28c8a6af9dd338f827f9e01ef1ea6b | [
"BSD-3-Clause"
] | null | null | null | tutorials/0_getting_started.ipynb | apulverizer/movingpandas | 5e73a53f3e28c8a6af9dd338f827f9e01ef1ea6b | [
"BSD-3-Clause"
] | null | null | null | 26.723611 | 285 | 0.594044 | [
[
[
"# Tutorial 0: Getting started with MovingPandas\n\nMovingPandas provides a trajectory datatype based on GeoPandas.\nThe project home is at https://github.com/anitagraser/movingpandas\n\nThis tutorial presents some of the trajectory manipulation and visualization functions implemented in MovingPandas.\n\nAfter following this tutorial, you will have a basic understanding of what MovingPandas is and what it can be used for. You'll be ready to dive into application examples presented in the the follow-up tutorials:\n* [Tutorial 1: Ship data analysis](1_ship_data_analysis.ipynb)\n* [Tutorial 2: Bird migration analysis](2_bird_migration_analysis.ipynb)\n* [Tutorial 3: Horse collar data exploration](3_horse_collar.ipynb)\n* [Tutorial 4: Trajectory aggregation (flow maps)](4_generalization_and_aggregation.ipynb)",
"_____no_output_____"
],
[
"## Introduction\n\nMovingPandas follows the **trajectories = timeseries with geometries** approach of modeling movement data.\n\nA MovingPandas trajectory can be interpreted as either a time series of points or a time series of line segments.\nThe line-based approach has many advantages for trajectory analysis and visualization. (For more detail, see e.g. Westermeier (2018))\n\n\n\n\n\n\n### References\n\n* Graser, A. (2019). MovingPandas: Efficient Structures for Movement Data in Python. GI_Forum ‒ Journal of Geographic Information Science 2019, 1-2019, 54-68. doi:10.1553/giscience2019_01_s54. URL: https://www.austriaca.at/rootcollection?arp=0x003aba2b\n* Westermeier, E.M. (2018). Contextual Trajectory Modeling and Analysis. Master Thesis, Interfaculty Department of Geoinformatics, University of Salzburg.\n",
"_____no_output_____"
],
[
"## Jupyter notebook setup",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import urllib\nimport os\nimport pandas as pd\nimport geopandas as gpd\nfrom geopandas import GeoDataFrame, read_file\nfrom shapely.geometry import Point, LineString, Polygon\nfrom fiona.crs import from_epsg\nfrom datetime import datetime, timedelta\nfrom matplotlib import pyplot as plt\n\nimport sys\nsys.path.append(\"..\")\nimport movingpandas as mpd\n\nimport warnings\nwarnings.simplefilter(\"ignore\")",
"_____no_output_____"
],
[
"CRS_METRIC = from_epsg(31256)",
"_____no_output_____"
]
],
[
[
"## Creating a trajectory from scratch\n\nTrajectory objects consist of a trajectory ID and a GeoPandas GeoDataFrame with a DatetimeIndex. The data frame therefore represents the trajectory data as a Pandas time series with associated point locations (and optional further attributes).\n\nLet's create a small toy trajectory to see how this works:",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame([\n {'geometry':Point(0,0), 't':datetime(2018,1,1,12,0,0)},\n {'geometry':Point(6,0), 't':datetime(2018,1,1,12,6,0)},\n {'geometry':Point(6,6), 't':datetime(2018,1,1,12,10,0)},\n {'geometry':Point(9,9), 't':datetime(2018,1,1,12,15,0)}\n]).set_index('t')\ngeo_df = GeoDataFrame(df, crs=CRS_METRIC)\ntoy_traj = mpd.Trajectory(geo_df, 1)\ntoy_traj.df",
"_____no_output_____"
]
],
[
[
"We can access **key information** about our trajectory by looking at the print output:",
"_____no_output_____"
]
],
[
[
"print(toy_traj)",
"_____no_output_____"
]
],
[
[
"We can also access the trajectories GeoDataFrame:",
"_____no_output_____"
]
],
[
[
"toy_traj.df",
"_____no_output_____"
]
],
[
[
"## Visualizing trajectories\n\nTo **visualize the trajectory**, we can turn it into a linestring.\n\n(The notebook environment automatically plots Shapely geometry objects like the LineString returned by to_linestring().)",
"_____no_output_____"
]
],
[
[
"toy_traj.to_linestring()",
"_____no_output_____"
]
],
[
[
"We can **compute the speed** of movement along the trajectory (between consecutive points). The values are in meters per second:",
"_____no_output_____"
]
],
[
[
"toy_traj.add_speed(overwrite=True)\ntoy_traj.df",
"_____no_output_____"
]
],
[
[
"We can also visualize the speed values:",
"_____no_output_____"
]
],
[
[
"toy_traj.plot(column=\"speed\", linewidth=5, capstyle='round', legend=True)",
"_____no_output_____"
]
],
[
[
"In contrast to the earlier example where we visualized the whole trajectory as one linestring, the trajectory plot() function draws each line segment individually and thus each can have a different color.",
"_____no_output_____"
],
[
"## Analyzing trajectories",
"_____no_output_____"
],
[
"MovingPandas provides many functions for trajectory analysis. \n\nTo see all available functions of the MovingPandas.Trajectory class use:",
"_____no_output_____"
]
],
[
[
"dir(mpd.Trajectory)",
"_____no_output_____"
]
],
[
[
"Functions that start with an underscore (e.g. ```__str__```) should not be called directly. All other functions are free to use.",
"_____no_output_____"
],
[
"### Extracting a moving object's position was at a certain time\n\nFor example, let's have a look at the get_position_at() function:",
"_____no_output_____"
]
],
[
[
"help(mpd.Trajectory.get_position_at)",
"_____no_output_____"
]
],
[
[
"When we call this method, the resulting point is directly rendered:",
"_____no_output_____"
]
],
[
[
"toy_traj.get_position_at(datetime(2018,1,1,12,6,0), method=\"nearest\") ",
"_____no_output_____"
]
],
[
[
"To see its coordinates, we can look at the print output:",
"_____no_output_____"
]
],
[
[
"print(toy_traj.get_position_at(datetime(2018,1,1,12,6,0), method=\"nearest\"))",
"_____no_output_____"
]
],
[
[
"The method parameter describes what the function should do if there is no entry in the trajectory GeoDataFrame for the specified timestamp. \n\nFor example, there is no entry at 2018-01-01 12:07:00",
"_____no_output_____"
]
],
[
[
"toy_traj.df",
"_____no_output_____"
],
[
"print(toy_traj.get_position_at(datetime(2018,1,1,12,7,0), method=\"nearest\"))\nprint(toy_traj.get_position_at(datetime(2018,1,1,12,7,0), method=\"interpolated\"))\nprint(toy_traj.get_position_at(datetime(2018,1,1,12,7,0), method=\"ffill\")) # from the previous row\nprint(toy_traj.get_position_at(datetime(2018,1,1,12,7,0), method=\"bfill\")) # from the following row",
"_____no_output_____"
]
],
[
[
"### Extracting trajectory segments based on time or geometry (i.e. clipping)\n\nFirst, let's extract the trajectory segment for a certain time period:",
"_____no_output_____"
]
],
[
[
"segment = toy_traj.get_segment_between(datetime(2018,1,1,12,6,0),datetime(2018,1,1,12,12,0))\nprint(segment)",
"_____no_output_____"
]
],
[
[
"Now, let's extract the trajectory segment that intersects with a given polygon:",
"_____no_output_____"
]
],
[
[
"xmin, xmax, ymin, ymax = 2, 8, -10, 5\npolygon = Polygon([(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin), (xmin, ymin)])\npolygon",
"_____no_output_____"
],
[
"intersections = toy_traj.clip(polygon)\nprint(intersections[0])",
"_____no_output_____"
],
[
"intersections[0].plot(linewidth=5, capstyle='round')",
"_____no_output_____"
]
],
[
[
"## Beyond toy trajectories: Loading trajectory data from GeoPackage\n\nThe MovingPandas repository contains a demo GeoPackage file that can be loaded as follows:",
"_____no_output_____"
]
],
[
[
"%%time\ndf = read_file('data/demodata_geolife.gpkg')\ndf['t'] = pd.to_datetime(df['t'])\ndf = df.set_index('t').tz_localize(None)\nprint(\"Finished reading {} rows\".format(len(df)))",
"_____no_output_____"
]
],
[
[
"After reading the trajectory point data from file, we want to construct the trajectories.\n\nThere are two options:\n\n1. Manually calling the Trajectory constructor\n2. Using TrajectoryCollection",
"_____no_output_____"
],
[
"### Option 1: Creating trajectories manually\n\nPandas makes it straightforward to group trajectory points by trajectory id. After the grouping step, we can call the Trajectory constructor: ",
"_____no_output_____"
]
],
[
[
"%%time\ntrajectories = []\nfor key, values in df.groupby(['trajectory_id']):\n trajectory = mpd.Trajectory(values, key)\n print(trajectory)\n trajectories.append(trajectory)\n\nprint(\"Finished creating {} trajectories\".format(len(trajectories)))",
"_____no_output_____"
]
],
[
[
"### Option 2: Creating trajectories with TrajectoryCollection\n\nTrajectoryCollection is a convenience class that takes care of creating trajectories from a GeoDataFrame:",
"_____no_output_____"
]
],
[
[
"traj_collection = mpd.TrajectoryCollection(df, 'trajectory_id')\nprint(traj_collection)",
"_____no_output_____"
],
[
"traj_collection.plot(column='trajectory_id', legend=True, figsize=(9,5))",
"_____no_output_____"
]
],
[
[
"#### Let's look at one of those trajectories:",
"_____no_output_____"
]
],
[
[
"traj_collection.trajectories[1].plot(column='speed', linewidth=5, capstyle='round', figsize=(9,3), legend=True, vmax=20)",
"_____no_output_____"
]
],
[
[
"To visualize trajectories in their geographical context, we can also create interactive plots with basemaps:",
"_____no_output_____"
]
],
[
[
"traj_collection.trajectories[1].hvplot(c='speed', width=700, height=400, line_width=7.0, tiles='StamenTonerBackground', cmap='Viridis', colorbar=True, clim=(0,20))",
"_____no_output_____"
]
],
[
[
"## Trajectory manipulation and handling\n\n### Finding intersections with a Shapely polygon\n\nThe clip function can be used to extract trajectory segments that are located within an area of interest polygon.\n\nThis is how to use clip on a list of Trajectory objects:",
"_____no_output_____"
]
],
[
[
"xmin, xmax, ymin, ymax = 116.3685035,116.3702945,39.904675,39.907728\npolygon = Polygon([(xmin,ymin), (xmin,ymax), (xmax,ymax), (xmax,ymin), (xmin,ymin)])\n\nintersections = []\nfor traj in trajectories:\n for intersection in traj.clip(polygon):\n intersections.append(intersection)\nprint(\"Found {} intersections\".format(len(intersections)))",
"_____no_output_____"
],
[
"intersections[2].plot(linewidth=5.0, capstyle='round')",
"_____no_output_____"
]
],
[
[
"Alternatively, using **TrajectoryCollection**:",
"_____no_output_____"
]
],
[
[
"clipped = traj_collection.clip(polygon)\nclipped.trajectories[2].plot(linewidth=5.0, capstyle='round')",
"_____no_output_____"
]
],
[
[
"### Splitting trajectories\n\nGaps are quite common in trajectories. For example, GPS tracks may contain gaps if moving objects enter tunnels where GPS reception is lost. In other use cases, moving objects may leave the observation area for longer time before returning and continuing their recorded track.\n\nDepending on the use case, we therefore might want to split trajectories at observation gaps that exceed a certain minimum duration:",
"_____no_output_____"
]
],
[
[
"my_traj = trajectories[1]\nprint(my_traj)\nmy_traj.plot(linewidth=5.0, capstyle='round')",
"_____no_output_____"
],
[
"split = my_traj.split_by_observation_gap(timedelta(minutes=5))\nfor traj in split:\n print(traj)",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(nrows=1, ncols=len(split), figsize=(19,4))\nfor i, traj in enumerate(split):\n traj.plot(ax=axes[i], linewidth=5.0, capstyle='round')",
"_____no_output_____"
]
],
[
[
"### Generalizing trajectories\n\nTo reduce the size of trajectory objects, we can generalize them, for example, using the Douglas-Peucker algorithm:",
"_____no_output_____"
]
],
[
[
"original_traj = trajectories[1]\nprint(original_traj)",
"_____no_output_____"
],
[
"original_traj.plot(column='speed', linewidth=5, capstyle='round', figsize=(9,3), legend=True)",
"_____no_output_____"
]
],
[
[
"Try different tolerance settings and observe the results in line geometry and therefore also length:",
"_____no_output_____"
]
],
[
[
"generalized_traj = original_traj.generalize(mode='douglas-peucker', tolerance=0.001)\ngeneralized_traj.plot(column='speed', linewidth=5, capstyle='round', figsize=(9,3), legend=True)",
"_____no_output_____"
],
[
"print('Original length: %s'%(original_traj.get_length()))\nprint('Generalized length: %s'%(generalized_traj.get_length()))",
"_____no_output_____"
]
],
[
[
"An alternative generalization method is to down-sample the trajectory to ensure a certain time delta between records:",
"_____no_output_____"
]
],
[
[
"time_generalized = original_traj.generalize(mode='min-time-delta', tolerance=timedelta(minutes=1))\ntime_generalized.plot(column='speed', linewidth=5, capstyle='round', figsize=(9,3), legend=True)",
"_____no_output_____"
],
[
"time_generalized.df.head(10)",
"_____no_output_____"
],
[
"original_traj.df.head(10)",
"_____no_output_____"
]
],
[
[
"## Continue exploring MovingPandas\n\n* [Tutorial 1: Ship data analysis](1_ship_data_analysis.ipynb)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
ecdbaac9c5ebbd3b93947213f44769c85981e5f9 | 1,787 | ipynb | Jupyter Notebook | Krishna/NBCH02/NBCH02.ipynb | jeremy-evert/PyInventGames | 90f772a8058d3e2a35ec2927bd381b8b20e1e2e3 | [
"MIT"
] | null | null | null | Krishna/NBCH02/NBCH02.ipynb | jeremy-evert/PyInventGames | 90f772a8058d3e2a35ec2927bd381b8b20e1e2e3 | [
"MIT"
] | null | null | null | Krishna/NBCH02/NBCH02.ipynb | jeremy-evert/PyInventGames | 90f772a8058d3e2a35ec2927bd381b8b20e1e2e3 | [
"MIT"
] | null | null | null | 17.182692 | 48 | 0.461108 | [
[
[
">>> spam = 'hello'\n>>> spam",
"_____no_output_____"
],
[
">>> 'Hello ' + 'World!'",
"_____no_output_____"
],
[
" print('Hello world!')\nprint('What is your name?') \nmyName = input()\nprint('It is good to meet you, ' + myName)",
"Hello world!\nWhat is your name?\nKrishna\nIt is good to meet you, Krishna\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
ecdbae56ad1d8fc9c5c4ed0dbada34b58ceecdf3 | 106,811 | ipynb | Jupyter Notebook | dynamic-programming/Dynamic_Programming.ipynb | tarunk04/RL-udacity | d0779c536b8bef13f1cfbafbf23f067cb4ea4792 | [
"MIT"
] | null | null | null | dynamic-programming/Dynamic_Programming.ipynb | tarunk04/RL-udacity | d0779c536b8bef13f1cfbafbf23f067cb4ea4792 | [
"MIT"
] | 4 | 2020-11-13T18:57:45.000Z | 2022-02-10T02:09:10.000Z | dynamic-programming/Dynamic_Programming.ipynb | tarunk04/RL-udacity | d0779c536b8bef13f1cfbafbf23f067cb4ea4792 | [
"MIT"
] | 1 | 2020-11-10T02:52:01.000Z | 2020-11-10T02:52:01.000Z | 112.551106 | 21,448 | 0.835691 | [
[
[
"# Dynamic Programming\n\nIn this notebook, you will write your own implementations of many classical dynamic programming algorithms. \n\nWhile we have provided some starter code, you are welcome to erase these hints and write your code from scratch.\n\n---\n\n### Part 0: Explore FrozenLakeEnv\n\nWe begin by importing the necessary packages.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport copy\n\nimport check_test\nfrom frozenlake import FrozenLakeEnv\nfrom plot_utils import plot_values",
"_____no_output_____"
]
],
[
[
"Use the code cell below to create an instance of the [FrozenLake](https://github.com/openai/gym/blob/master/gym/envs/toy_text/frozen_lake.py) environment.",
"_____no_output_____"
]
],
[
[
"env = FrozenLakeEnv(is_slippery=True)",
"_____no_output_____"
]
],
[
[
"The agent moves through a $4 \\times 4$ gridworld, with states numbered as follows:\n```\n[[ 0 1 2 3]\n [ 4 5 6 7]\n [ 8 9 10 11]\n [12 13 14 15]]\n```\nand the agent has 4 potential actions:\n```\nLEFT = 0\nDOWN = 1\nRIGHT = 2\nUP = 3\n```\n\nThus, $\\mathcal{S}^+ = \\{0, 1, \\ldots, 15\\}$, and $\\mathcal{A} = \\{0, 1, 2, 3\\}$. Verify this by running the code cell below.",
"_____no_output_____"
]
],
[
[
"# print the state space and action space\nprint(env.observation_space)\nprint(env.action_space)\n\n# print the total number of states and actions\nprint(env.nS)\nprint(env.nA)",
"Discrete(16)\nDiscrete(4)\n16\n4\n"
]
],
[
[
"Dynamic programming assumes that the agent has full knowledge of the MDP. We have already amended the `frozenlake.py` file to make the one-step dynamics accessible to the agent. \n\nExecute the code cell below to return the one-step dynamics corresponding to a particular state and action. In particular, `env.P[1][0]` returns the the probability of each possible reward and next state, if the agent is in state 1 of the gridworld and decides to go left.",
"_____no_output_____"
]
],
[
[
"env.P[14][1]",
"_____no_output_____"
]
],
[
[
"Each entry takes the form \n```\nprob, next_state, reward, done\n```\nwhere: \n- `prob` details the conditional probability of the corresponding (`next_state`, `reward`) pair, and\n- `done` is `True` if the `next_state` is a terminal state, and otherwise `False`.\n\nThus, we can interpret `env.P[1][0]` as follows:\n$$\n\\mathbb{P}(S_{t+1}=s',R_{t+1}=r|S_t=1,A_t=0) = \\begin{cases}\n \\frac{1}{3} \\text{ if } s'=1, r=0\\\\\n \\frac{1}{3} \\text{ if } s'=0, r=0\\\\\n \\frac{1}{3} \\text{ if } s'=5, r=0\\\\\n 0 \\text{ else}\n \\end{cases}\n$$\n\nTo understand the value of `env.P[1][0]`, note that when you create a FrozenLake environment, it takes as an (optional) argument `is_slippery`, which defaults to `True`. \n\nTo see this, change the first line in the notebook from `env = FrozenLakeEnv()` to `env = FrozenLakeEnv(is_slippery=False)`. Then, when you check `env.P[1][0]`, it should look like what you expect (i.e., `env.P[1][0] = [(1.0, 0, 0.0, False)]`).\n\nThe default value for the `is_slippery` argument is `True`, and so `env = FrozenLakeEnv()` is equivalent to `env = FrozenLakeEnv(is_slippery=True)`. In the event that `is_slippery=True`, you see that this can result in the agent moving in a direction that it did not intend (where the idea is that the ground is *slippery*, and so the agent can slide to a location other than the one it wanted).\n\nFeel free to change the code cell above to explore how the environment behaves in response to other (state, action) pairs. \n\nBefore proceeding to the next part, make sure that you set `is_slippery=True`, so that your implementations below will work with the slippery environment!",
"_____no_output_____"
],
[
"### Part 1: Iterative Policy Evaluation\n\nIn this section, you will write your own implementation of iterative policy evaluation.\n\nYour algorithm should accept four arguments as **input**:\n- `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics.\n- `policy`: This is a 2D numpy array with `policy.shape[0]` equal to the number of states (`env.nS`), and `policy.shape[1]` equal to the number of actions (`env.nA`). `policy[s][a]` returns the probability that the agent takes action `a` while in state `s` under the policy.\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n- `theta`: This is a very small positive number that is used to decide if the estimate has sufficiently converged to the true value function (default value: `1e-8`).\n\nThe algorithm returns as **output**:\n- `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s` under the input policy.\n\nPlease complete the function in the code cell below.",
"_____no_output_____"
]
],
[
[
"def policy_evaluation(env, policy, gamma=1, theta=1e-8):\n V = np.zeros(env.nS)\n \n ## TODO: complete the function\n while True:\n delta = 0\n for i, state in enumerate(env.P.values()):\n v = V[i]\n state_value = 0\n for a, action in enumerate(state.values()):\n for p, next_state, r, _ in action:\n state_value += policy[i][a] * p * (r + gamma * V[next_state])\n V[i] = state_value\n delta = max(delta, abs(v - V[i]))\n if delta <= theta:\n break\n return V",
"_____no_output_____"
]
],
[
[
"We will evaluate the equiprobable random policy $\\pi$, where $\\pi(a|s) = \\frac{1}{|\\mathcal{A}(s)|}$ for all $s\\in\\mathcal{S}$ and $a\\in\\mathcal{A}(s)$. \n\nUse the code cell below to specify this policy in the variable `random_policy`.",
"_____no_output_____"
]
],
[
[
"random_policy = np.ones([env.nS, env.nA]) / env.nA",
"_____no_output_____"
]
],
[
[
"Run the next code cell to evaluate the equiprobable random policy and visualize the output. The state-value function has been reshaped to match the shape of the gridworld.",
"_____no_output_____"
]
],
[
[
"# evaluate the policy \nV = policy_evaluation(env, random_policy)\n\nplot_values(V)",
"_____no_output_____"
]
],
[
[
"Run the code cell below to test your function. If the code cell returns **PASSED**, then you have implemented the function correctly! \n\n**Note:** In order to ensure accurate results, make sure that your `policy_evaluation` function satisfies the requirements outlined above (with four inputs, a single output, and with the default values of the input arguments unchanged).",
"_____no_output_____"
]
],
[
[
"check_test.run_check('policy_evaluation_check', policy_evaluation)",
"_____no_output_____"
]
],
[
[
"### Part 2: Obtain $q_\\pi$ from $v_\\pi$\n\nIn this section, you will write a function that takes the state-value function estimate as input, along with some state $s\\in\\mathcal{S}$. It returns the **row in the action-value function** corresponding to the input state $s\\in\\mathcal{S}$. That is, your function should accept as input both $v_\\pi$ and $s$, and return $q_\\pi(s,a)$ for all $a\\in\\mathcal{A}(s)$.\n\nYour algorithm should accept four arguments as **input**:\n- `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics.\n- `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`.\n- `s`: This is an integer corresponding to a state in the environment. It should be a value between `0` and `(env.nS)-1`, inclusive.\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n\nThe algorithm returns as **output**:\n- `q`: This is a 1D numpy array with `q.shape[0]` equal to the number of actions (`env.nA`). `q[a]` contains the (estimated) value of state `s` and action `a`.\n\nPlease complete the function in the code cell below.",
"_____no_output_____"
]
],
[
[
"def q_from_v(env, V, s, gamma=1):\n q = np.zeros(env.nA)\n \n ## TODO: complete the function\n for i, action in enumerate(env.P[s].values()):\n action_value = 0\n for p, new_s, r, _ in action:\n action_value += p * (r + gamma * V[new_s])\n q[i] = action_value \n return q",
"_____no_output_____"
]
],
[
[
"Run the code cell below to print the action-value function corresponding to the above state-value function.",
"_____no_output_____"
]
],
[
[
"Q = np.zeros([env.nS, env.nA])\nfor s in range(env.nS):\n Q[s] = q_from_v(env, V, s)\nprint(\"Action-Value Function:\")\nprint(Q)",
"Action-Value Function:\n[[0.0147094 0.01393978 0.01393978 0.01317015]\n [0.00852356 0.01163091 0.0108613 0.01550788]\n [0.02444514 0.02095298 0.02406033 0.01435346]\n [0.01047649 0.01047649 0.00698432 0.01396865]\n [0.02166487 0.01701828 0.01624865 0.01006281]\n [0. 0. 0. 0. ]\n [0.05433538 0.04735105 0.05433538 0.00698432]\n [0. 0. 0. 0. ]\n [0.01701828 0.04099204 0.03480619 0.04640826]\n [0.07020885 0.11755991 0.10595784 0.05895312]\n [0.18940421 0.17582037 0.16001424 0.04297382]\n [0. 0. 0. 0. ]\n [0. 0. 0. 0. ]\n [0.08799677 0.20503718 0.23442716 0.17582037]\n [0.25238823 0.53837051 0.52711478 0.43929118]\n [0. 0. 0. 0. ]]\n"
]
],
[
[
"Run the code cell below to test your function. If the code cell returns **PASSED**, then you have implemented the function correctly! \n\n**Note:** In order to ensure accurate results, make sure that the `q_from_v` function satisfies the requirements outlined above (with four inputs, a single output, and with the default values of the input arguments unchanged).",
"_____no_output_____"
]
],
[
[
"check_test.run_check('q_from_v_check', q_from_v)",
"_____no_output_____"
]
],
[
[
"### Part 3: Policy Improvement\n\nIn this section, you will write your own implementation of policy improvement. \n\nYour algorithm should accept three arguments as **input**:\n- `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics.\n- `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`.\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n\nThe algorithm returns as **output**:\n- `policy`: This is a 2D numpy array with `policy.shape[0]` equal to the number of states (`env.nS`), and `policy.shape[1]` equal to the number of actions (`env.nA`). `policy[s][a]` returns the probability that the agent takes action `a` while in state `s` under the policy.\n\nPlease complete the function in the code cell below. You are encouraged to use the `q_from_v` function you implemented above.",
"_____no_output_____"
]
],
[
[
"def policy_improvement(env, V, gamma=1):\n policy = np.zeros([env.nS, env.nA]) / env.nA\n \n ## TODO: complete the function\n for s in range(env.nS):\n q = q_from_v(env, V, s, gamma)\n policy[s][np.argmax(q)] = 1\n \n return policy",
"_____no_output_____"
],
[
"np.argmax([9,10,3])",
"_____no_output_____"
]
],
[
[
"Run the code cell below to test your function. If the code cell returns **PASSED**, then you have implemented the function correctly! \n\n**Note:** In order to ensure accurate results, make sure that the `policy_improvement` function satisfies the requirements outlined above (with three inputs, a single output, and with the default values of the input arguments unchanged).\n\nBefore moving on to the next part of the notebook, you are strongly encouraged to check out the solution in **Dynamic_Programming_Solution.ipynb**. There are many correct ways to approach this function!",
"_____no_output_____"
]
],
[
[
"check_test.run_check('policy_improvement_check', policy_improvement)",
"_____no_output_____"
]
],
[
[
"### Part 4: Policy Iteration\n\nIn this section, you will write your own implementation of policy iteration. The algorithm returns the optimal policy, along with its corresponding state-value function.\n\nYour algorithm should accept three arguments as **input**:\n- `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics.\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n- `theta`: This is a very small positive number that is used to decide if the policy evaluation step has sufficiently converged to the true value function (default value: `1e-8`).\n\nThe algorithm returns as **output**:\n- `policy`: This is a 2D numpy array with `policy.shape[0]` equal to the number of states (`env.nS`), and `policy.shape[1]` equal to the number of actions (`env.nA`). `policy[s][a]` returns the probability that the agent takes action `a` while in state `s` under the policy.\n- `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`.\n\nPlease complete the function in the code cell below. You are strongly encouraged to use the `policy_evaluation` and `policy_improvement` functions you implemented above.",
"_____no_output_____"
]
],
[
[
"def policy_iteration(env, gamma=1, theta=1e-8):\n policy = np.ones([env.nS, env.nA]) / env.nA\n \n ## TODO: complete the function\n while True:\n V = policy_evaluation(env, policy, gamma, theta)\n policy_updated = policy_improvement(env, V, gamma)\n if (policy == policy_updated).all():\n break\n policy = policy_updated\n \n return policy, V",
"_____no_output_____"
]
],
[
[
"Run the next code cell to solve the MDP and visualize the output. The optimal state-value function has been reshaped to match the shape of the gridworld.\n\n**Compare the optimal state-value function to the state-value function from Part 1 of this notebook**. _Is the optimal state-value function consistently greater than or equal to the state-value function for the equiprobable random policy?_",
"_____no_output_____"
]
],
[
[
"# obtain the optimal policy and optimal state-value function\npolicy_pi, V_pi = policy_iteration(env)\n\n# print the optimal policy\nprint(\"\\nOptimal Policy (LEFT = 0, DOWN = 1, RIGHT = 2, UP = 3):\")\nprint(policy_pi,\"\\n\")\n\nplot_values(V_pi)",
"\nOptimal Policy (LEFT = 0, DOWN = 1, RIGHT = 2, UP = 3):\n[[1. 0. 0. 0.]\n [0. 0. 0. 1.]\n [0. 0. 0. 1.]\n [0. 0. 0. 1.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [0. 0. 0. 1.]\n [0. 1. 0. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [0. 0. 1. 0.]\n [0. 1. 0. 0.]\n [1. 0. 0. 0.]] \n\n"
]
],
[
[
"Run the code cell below to test your function. If the code cell returns **PASSED**, then you have implemented the function correctly! \n\n**Note:** In order to ensure accurate results, make sure that the `policy_iteration` function satisfies the requirements outlined above (with three inputs, two outputs, and with the default values of the input arguments unchanged).",
"_____no_output_____"
]
],
[
[
"check_test.run_check('policy_iteration_check', policy_iteration)",
"_____no_output_____"
]
],
[
[
"### Part 5: Truncated Policy Iteration\n\nIn this section, you will write your own implementation of truncated policy iteration. \n\nYou will begin by implementing truncated policy evaluation. Your algorithm should accept five arguments as **input**:\n- `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics.\n- `policy`: This is a 2D numpy array with `policy.shape[0]` equal to the number of states (`env.nS`), and `policy.shape[1]` equal to the number of actions (`env.nA`). `policy[s][a]` returns the probability that the agent takes action `a` while in state `s` under the policy.\n- `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`.\n- `max_it`: This is a positive integer that corresponds to the number of sweeps through the state space (default value: `1`).\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n\nThe algorithm returns as **output**:\n- `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`.\n\nPlease complete the function in the code cell below.",
"_____no_output_____"
]
],
[
[
"def truncated_policy_evaluation(env, policy, V, max_it=1, gamma=1):\n \n ## TODO: complete the function\n counter = 0\n while counter < max_it:\n for i, s in enumerate(env.P.values()):\n state_value = 0\n for j, a in enumerate(s.values()):\n for p, new_s, r, _ in a:\n state_value += policy[i][j] * p * (r + gamma*V[new_s]) \n V[i] = state_value\n counter += 1 \n \n return V",
"_____no_output_____"
]
],
[
[
"Next, you will implement truncated policy iteration. Your algorithm should accept five arguments as **input**:\n- `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics.\n- `max_it`: This is a positive integer that corresponds to the number of sweeps through the state space (default value: `1`).\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n- `theta`: This is a very small positive number that is used for the stopping criterion (default value: `1e-8`).\n\nThe algorithm returns as **output**:\n- `policy`: This is a 2D numpy array with `policy.shape[0]` equal to the number of states (`env.nS`), and `policy.shape[1]` equal to the number of actions (`env.nA`). `policy[s][a]` returns the probability that the agent takes action `a` while in state `s` under the policy.\n- `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`.\n\nPlease complete the function in the code cell below.",
"_____no_output_____"
]
],
[
[
"def truncated_policy_iteration(env, max_it=1, gamma=1, theta=1e-8):\n V = np.zeros(env.nS)\n policy = np.zeros([env.nS, env.nA]) / env.nA\n \n ## TODO: complete the function\n while True:\n policy = policy_improvement(env, V, gamma)\n V_old = copy.copy(V)\n V = truncated_policy_evaluation(env, policy, V, max_it, gamma)\n if abs(V_old - V).max() <= theta:\n break\n \n return policy, V",
"_____no_output_____"
]
],
[
[
"Run the next code cell to solve the MDP and visualize the output. The state-value function has been reshaped to match the shape of the gridworld.\n\nPlay with the value of the `max_it` argument. Do you always end with the optimal state-value function?",
"_____no_output_____"
]
],
[
[
"policy_tpi, V_tpi = truncated_policy_iteration(env, max_it=1)\n\n# print the optimal policy\nprint(\"\\nOptimal Policy (LEFT = 0, DOWN = 1, RIGHT = 2, UP = 3):\")\nprint(policy_tpi,\"\\n\")\n\n# plot the optimal state-value function\nplot_values(V_tpi)",
"\nOptimal Policy (LEFT = 0, DOWN = 1, RIGHT = 2, UP = 3):\n[[1. 0. 0. 0.]\n [0. 0. 0. 1.]\n [0. 0. 0. 1.]\n [0. 0. 0. 1.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [0. 0. 0. 1.]\n [0. 1. 0. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [0. 0. 1. 0.]\n [0. 1. 0. 0.]\n [1. 0. 0. 0.]] \n\n"
]
],
[
[
"Run the code cell below to test your function. If the code cell returns **PASSED**, then you have implemented the function correctly! \n\n**Note:** In order to ensure accurate results, make sure that the `truncated_policy_iteration` function satisfies the requirements outlined above (with four inputs, two outputs, and with the default values of the input arguments unchanged).",
"_____no_output_____"
]
],
[
[
"check_test.run_check('truncated_policy_iteration_check', truncated_policy_iteration)",
"_____no_output_____"
]
],
[
[
"### Part 6: Value Iteration\n\nIn this section, you will write your own implementation of value iteration.\n\nYour algorithm should accept three arguments as input:\n- `env`: This is an instance of an OpenAI Gym environment, where `env.P` returns the one-step dynamics.\n- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).\n- `theta`: This is a very small positive number that is used for the stopping criterion (default value: `1e-8`).\n\nThe algorithm returns as **output**:\n- `policy`: This is a 2D numpy array with `policy.shape[0]` equal to the number of states (`env.nS`), and `policy.shape[1]` equal to the number of actions (`env.nA`). `policy[s][a]` returns the probability that the agent takes action `a` while in state `s` under the policy.\n- `V`: This is a 1D numpy array with `V.shape[0]` equal to the number of states (`env.nS`). `V[s]` contains the estimated value of state `s`.",
"_____no_output_____"
]
],
[
[
"def value_iteration(env, gamma=1, theta=1e-8):\n V = np.zeros(env.nS)\n \n ## TODO: complete the function\n while True:\n delta = 0\n for i, s in enumerate(env.P.values()):\n v = V[i]\n V[i] = max(q_from_v(env, V, i, gamma))\n delta = max(delta, abs(v - V[i]))\n if delta < theta:\n break\n \n policy = policy_improvement(env, V, gamma)\n \n return policy, V",
"_____no_output_____"
]
],
[
[
"Use the next code cell to solve the MDP and visualize the output. The state-value function has been reshaped to match the shape of the gridworld.",
"_____no_output_____"
]
],
[
[
"policy_vi, V_vi = value_iteration(env)\n\n# print the optimal policy\nprint(\"\\nOptimal Policy (LEFT = 0, DOWN = 1, RIGHT = 2, UP = 3):\")\nprint(policy_vi,\"\\n\")\n\n# plot the optimal state-value function\nplot_values(V_vi)",
"\nOptimal Policy (LEFT = 0, DOWN = 1, RIGHT = 2, UP = 3):\n[[1. 0. 0. 0.]\n [0. 0. 0. 1.]\n [0. 0. 0. 1.]\n [0. 0. 0. 1.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [0. 0. 0. 1.]\n [0. 1. 0. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [1. 0. 0. 0.]\n [0. 0. 1. 0.]\n [0. 1. 0. 0.]\n [1. 0. 0. 0.]] \n\n"
]
],
[
[
"Run the code cell below to test your function. If the code cell returns **PASSED**, then you have implemented the function correctly! \n\n**Note:** In order to ensure accurate results, make sure that the `value_iteration` function satisfies the requirements outlined above (with three inputs, two outputs, and with the default values of the input arguments unchanged).",
"_____no_output_____"
]
],
[
[
"check_test.run_check('value_iteration_check', value_iteration)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecdbb50a324ccd30bb13b5c4fdf5be30cf8da524 | 10,890 | ipynb | Jupyter Notebook | data_set/plotDiffPhones.ipynb | lunjohnzhang/RERAN | e77a900e35bdc8164954f810cb506cf451f57dd4 | [
"BSD-2-Clause"
] | null | null | null | data_set/plotDiffPhones.ipynb | lunjohnzhang/RERAN | e77a900e35bdc8164954f810cb506cf451f57dd4 | [
"BSD-2-Clause"
] | null | null | null | data_set/plotDiffPhones.ipynb | lunjohnzhang/RERAN | e77a900e35bdc8164954f810cb506cf451f57dd4 | [
"BSD-2-Clause"
] | null | null | null | 93.87931 | 6,976 | 0.844536 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"import sys\ndef readIn(file, actual):\n try:\n replay = open(file)\n except:\n print(\"file not found\")\n sys.exit()\n\n lines = replay.readlines()\n times = []\n for i, line in enumerate(lines):\n if i >= 40:\n break;\n timeRaw = line.split(\"real\")[0].strip()\n times.append(float(timeRaw)/actual)\n return times\ntimes1 = readIn(\"exp2/replayTimeSet1.txt\", 12.512803)\nprint(times1)\ntimes2 = readIn(\"exp2/replayTimeSet2.txt\", 11.53)\nprint(times2)",
"[1.598362892790688, 1.6151457031649903, 1.6742851301982458, 1.593567804112316, 1.6439162352352228, 1.598362892790688, 1.6686908600734782, 1.5272357440615023, 1.6439162352352228, 1.5160472038119674, 1.5999612556834788, 1.9156379270096395, 1.4361290591724332, 1.5616005462565021, 1.3450223742833638, 1.34741991862255, 1.3354321969266199, 1.368198636228829, 1.3466207371761547, 1.3442231928369688, 1.3689978176752242, 1.3522150073009223, 1.3562109145328987, 1.3554117330865036, 1.365801091889643, 1.3282395639090618, 1.3226452937842945, 1.353813370193713, 1.317051023659527, 1.3594076403184803, 1.333034652587434, 1.342624829944178, 1.3434240113905733, 1.353813370193713, 1.3586084588720848, 1.3322354711410387, 1.3314362896946432, 1.3370305598194105, 1.3434240113905733, 1.34741991862255]\n[1.4804856895056375, 1.4778837814397225, 1.464006938421509, 1.463139635732871, 1.4978317432784043, 1.4787510841283609, 1.530789245446661, 1.4778837814397225, 1.4648742411101476, 1.4657415437987857, 1.4787510841283609, 1.496964440589766, 1.5047701647875111, 1.4822202948829142, 1.4648742411101476, 1.5394622723330442, 1.5065047701647878, 1.4692107545533393, 1.476149176062446, 1.4744145706851692, 1.4752818733738078, 1.4882914137033825, 1.4648742411101476, 1.4770164787510842, 1.5377276669557678, 1.4796183868169992, 1.4934952298352124, 1.4804856895056375, 1.476149176062446, 1.4744145706851692, 1.4787510841283609, 1.559410234171726, 1.4882914137033825, 1.5108412836079794, 1.4770164787510842, 1.4726799653078926, 1.476149176062446, 1.4752818733738078, 1.476149176062446, 1.476149176062446]\n"
],
[
"plt.axis([0, 40, 1, 2])\nplt.plot(times1, 'ro', times2, 'bs')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
ecdbc181761e8475339553b91f34823b8db02b4e | 457,541 | ipynb | Jupyter Notebook | examples/example-1a-Spin-bath-model-basic.ipynb | tehruhn/bofin_fast | ffdcae887f1e6a6fd644d7a0d5bbd29fc445d1e5 | [
"BSD-3-Clause"
] | 2 | 2020-10-30T09:39:47.000Z | 2021-11-09T04:27:26.000Z | examples/example-1a-Spin-bath-model-basic.ipynb | tehruhn/bofin_fast | ffdcae887f1e6a6fd644d7a0d5bbd29fc445d1e5 | [
"BSD-3-Clause"
] | 1 | 2020-10-21T04:45:09.000Z | 2020-10-21T04:45:09.000Z | examples/example-1a-Spin-bath-model-basic.ipynb | tehruhn/bofin_fast | ffdcae887f1e6a6fd644d7a0d5bbd29fc445d1e5 | [
"BSD-3-Clause"
] | 1 | 2020-10-30T09:39:52.000Z | 2020-10-30T09:39:52.000Z | 328.929547 | 135,344 | 0.921784 | [
[
[
"# Example 1a: Spin-Bath model (basic)\n\n### Introduction",
"_____no_output_____"
],
[
"The HEOM method solves the dynamics and steady state of a system and its environment, the latter of which is encoded in a set of auxiliary density matrices.\n\nIn this example we show the evolution of a single two-level system in contact with a single Bosonic environment. The properties of the system are encoded in Hamiltonian, and a coupling operator which describes how it is coupled to the environment.\n\nThe Bosonic environment is implicitly assumed to obey a particular Hamiltonian (see paper), the parameters of which are encoded in the spectral density, and subsequently the free-bath correlation functions.\n\nIn the example below we show how to model the overdamped Drude-Lorentz Spectral Density, commonly used with the HEOM. We show how to do this the Matsubara, Pade and fitting decompositions, and compare their convergence. \n\n### Drude-Lorentz (overdamped) spectral density\nThe Drude-Lorentz spectral density is:\n\n$$J_D(\\omega)= \\frac{2\\omega\\lambda\\gamma}{{\\gamma}^2 + \\omega^2}$$\n\nwhere $\\lambda$ scales the coupling strength, and $\\gamma$ is the cut-off frequency. We use the convention,\n\\begin{equation*}\nC(t) = \\int_0^{\\infty} d\\omega \\frac{J_D(\\omega)}{\\pi}[\\coth(\\beta\\omega) \\cos(\\omega \\tau) - i \\sin(\\omega \\tau)]\n\\end{equation*}\n\nWith the HEOM we must use an exponential decomposition:\n\n\\begin{equation*}\nC(t)=\\sum_{k=0}^{k=\\infty} c_k e^{-\\nu_k t}\n\\end{equation*}\n\nAs an example, the Matsubara decomposition of the Drude-Lorentz spectral density is given by:\n\n\\begin{equation*}\n \\nu_k = \\begin{cases}\n \\gamma & k = 0\\\\\n {2 \\pi k} / {\\beta } & k \\geq 1\\\\\n \\end{cases}\n\\end{equation*}\n\n\\begin{equation*}\n c_k = \\begin{cases}\n \\lambda \\gamma (\\cot(\\beta \\gamma / 2) - i) & k = 0\\\\\n 4 \\lambda \\gamma \\nu_k / \\{(nu_k^2 - \\gamma^2)\\beta \\} & k \\geq 1\\\\\n \\end{cases}\n\\end{equation*}\n\nNote that in the above, and the following, we set $\\hbar = k_\\mathrm{B} = 1$.\n\n\n\nNote that in the above, and the following, we set $\\hbar = k_\\mathrm{B} = 1$.\n\n",
"_____no_output_____"
]
],
[
[
"%pylab inline\nfrom qutip import *",
"The history saving thread hit an unexpected error (DatabaseError('database disk image is malformed')).History will not be written to the database.\nPopulating the interactive namespace from numpy and matplotlib\n"
],
[
"%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"\nfrom bofinfast.heom import BosonicHEOMSolver",
"_____no_output_____"
],
[
"\ndef cot(x):\n return 1./np.tan(x)\n",
"_____no_output_____"
],
[
"# Defining the system Hamiltonian\neps = .5 # Energy of the 2-level system.\nDel = 1.0 # Tunnelling term\nHsys = 0.5 * eps * sigmaz() + 0.5 * Del* sigmax()",
"_____no_output_____"
],
[
"# Initial state of the system.\nrho0 = basis(2,0) * basis(2,0).dag() ",
"_____no_output_____"
],
[
"# System-bath coupling (Drude-Lorentz spectral density)\nQ = sigmaz() # coupling operator\n\ntlist = np.linspace(0, 50, 1000)\n\n#Bath properties:\ngamma = .5 # cut off frequency\nlam = .1 # coupling strength\nT = 0.5\nbeta = 1./T\n\n#HEOM parameters\nNC = 5 # cut off parameter for the bath\n\n\n",
"_____no_output_____"
],
[
"#Plot of spectral density\n\nwlist = np.linspace(0, 5, 1000)\npref = 1.\n\nJ = [w * 2 * lam * gamma / ((gamma**2 + w**2)) for w in wlist]\n\n# Plot the results\nfig, axes = plt.subplots(1, 1, sharex=True, figsize=(8,8))\naxes.plot(wlist, J, 'r', linewidth=2)\naxes.set_xlabel(r'$\\omega$', fontsize=28)\naxes.set_ylabel(r'J', fontsize=28)\n\n",
"_____no_output_____"
],
[
"Nk = 2 # number of exponentials in approximation of the Matsubara approximation\n\n\ndef _calc_matsubara_params():\n \"\"\"\n Calculate the Matsubara coefficents and frequencies\n Returns\n -------\n c, nu: both list(float)\n \"\"\"\n c = []\n nu = []\n lam0 = lam\n gam = gamma\n hbar = 1\n beta = 1.0/T\n N_m = Nk\n\n g = 2*np.pi / (beta)\n for k in range(N_m):\n if k == 0:\n nu.append(gam)\n c.append(lam0*gam*\n (1.0/np.tan(gam*hbar*beta/2.0) - 1j) / hbar)\n else:\n g = 2*np.pi / (beta)\n nu.append(k*g)\n c.append(4*lam0*gam*nu[k] /\n ((nu[k]**2 - gam**2)*beta*hbar**2))\n\n \n return c, nu\n\nctest,nutest=_calc_matsubara_params()\n\n\n\nckAR = [ lam * gamma * (cot(gamma / (2 * T)))]\nckAR.extend([(4 * lam * gamma * T * 2 * np.pi * k * T / (( 2 * np.pi * k * T)**2 - gamma**2)) for k in range(1,Nk+1)])\n\nvkAR = [gamma]\nvkAR.extend([2 * np.pi * k * T for k in range(1,Nk+1)])\n\nckAI = [lam * gamma * (-1.0)]\n\nvkAI = [gamma]\n\n",
"_____no_output_____"
],
[
"\nNR = len(ckAR)\nNI = len(ckAI)\nQ2 = [Q for kk in range(NR+NI)]\n# print(Q2)\noptions = Options(nsteps=15000, store_states=True, rtol=1e-14, atol=1e-14)\nimport time\nstart = time.time()\nHEOMMats = BosonicHEOMSolver(Hsys, Q2, ckAR, ckAI, vkAR, vkAI, NC, options=options)\nend = time.time()\nprint(\"Construction time\", end - start)\n\n\nstart = time.time()\nresultMats = HEOMMats.run(rho0, tlist) #normal 115\nend = time.time()\nprint(\"ODE solver time\", end - start)",
"Construction time 0.18285799026489258\nODE solver time 0.3123600482940674\n"
],
[
"# Define some operators with which we will measure the system\n# 1,1 element of density matrix - corresonding to groundstate\nP11p=basis(2,0) * basis(2,0).dag()\nP22p=basis(2,1) * basis(2,1).dag()\n# 1,2 element of density matrix - corresonding to coherence\nP12p=basis(2,0) * basis(2,1).dag()\n# Calculate expectation values in the bases\nP11exp = expect(resultMats.states, P11p)\nP22exp = expect(resultMats.states, P22p)\nP12exp = expect(resultMats.states, P12p)",
"_____no_output_____"
]
],
[
[
"## Ishizaki-Tanimura Terminator\n\nThe value of $Re[C(t=0)]$ diverges. We can treat that component as a delta-function distribution, and include it as Lindblad correction. This is sometimes known as the Ishizaki-Tanimura Terminator.\n\nIn more detail, given\n\n\\begin{equation*}\nC(t)=\\sum_{k=0}^{\\infty} c_k e^{-\\nu_k t}\n\\end{equation*}\nsince $\\nu_k=\\frac{2 \\pi k}{\\beta }$, if $1/\\nu_k$ is much much smaller than other important time-scales, we can approximate, $ e^{-\\nu_k t} \\approx \\delta(t)/\\nu_k$, and $C(t)=\\sum_{k=N_k}^{\\infty} \\frac{c_k}{\\nu_k} \\delta(t)$\n\nIt is convenient to calculate the whole sum $C(t)=\\sum_{k=0}^{\\infty} \\frac{c_k}{\\nu_k} = 2 \\lambda / (\\beta \\gamma) - i\\lambda $, and subtract off the contribution from the finite number of Matsubara terms that are kept in the hierarchy, and treat the residual as a Lindblad.",
"_____no_output_____"
]
],
[
[
"#This is clearer if we plot the correlation function with a large number of matsubara terms: the real part is \n#slowly diverging at t=0\n\nlmaxmats = 2\nanamax = 15000\ntlist_corr=linspace(0,2,100)\n\ndef c(t,mats):\n\n c_temp = (pref * lam * gamma * (-1.0j + cot(gamma / (2 * T))) * np.exp(-gamma * t))\n for k in range(1, mats):\n vk = 2 * np.pi * k * T\n c_temp += ((pref * 4 * lam * gamma * T * vk / (vk**2 - gamma**2)) * np.exp(- vk * t) ) \n \n \n return c_temp\n\n# Reals parts\ncorrRana = [np.real(c(t,anamax)) for t in tlist_corr]\n# Imaginary parts\ncorrIana = [np.imag((pref * lam * gamma * (-1.0j + cot(gamma / (2 * T))) * np.exp(-gamma * t))) for t in tlist_corr]\n\n\n\ncppL = c( tlist_corr,lmaxmats)\n\nfig, ax1 = plt.subplots(figsize=(12, 7))\n#print(gam_list)\nax1.plot( tlist_corr,real(cppL), color=\"b\", linewidth=3, label= r\"Mats = 2 real\")\nax1.plot( tlist_corr,imag(cppL), color=\"r\", linewidth=3, label= r\"Mats = 2 imag\")\nax1.plot( tlist_corr,corrRana, \"b--\", linewidth=3, label= r\"Mats = 15000 real\")\nax1.plot( tlist_corr,corrIana, \"r--\", linewidth=3, label= r\"Mats = 15000 imag\")\n\n\nax1.set_xlabel(\"t\")\nax1.set_ylabel(r\"$C$\")\nax1.legend()",
"_____no_output_____"
],
[
"#do version with tanimura terminator\n\nop = -2*spre(Q)*spost(Q.dag()) + spre(Q.dag()*Q) + spost(Q.dag()*Q)\n\napprox_factr = ((2 * lam / (beta * gamma)) - 1j*lam) \n\napprox_factr -= lam * gamma * (-1.0j + cot(gamma / (2 * T)))/gamma\nfor k in range(1,Nk+1):\n vk = 2 * np.pi * k * T\n \n approx_factr -= ((pref * 4 * lam * gamma * T * vk / (vk**2 - gamma**2))/ vk)\n \nL_bnd = -approx_factr*op\n\nLtot = -1.0j*(spre(Hsys)-spost(Hsys)) + L_bnd\nLtot = liouvillian(Hsys) + L_bnd\n\nNR = len(ckAR)\nNI = len(ckAI)\nQ2 = [Q for kk in range(NR+NI)]\n\noptions = Options(nsteps=15000, store_states=True, rtol=1e-14, atol=1e-14)\n\nHEOMMatsT = BosonicHEOMSolver(Ltot, Q2, ckAR, ckAI, vkAR, vkAI, NC, options=options)\n# Initial state of the system.\nrho0 = basis(2,0) * basis(2,0).dag() \n\n\nresultMatsT = HEOMMatsT.run(rho0, tlist)\n\n",
"_____no_output_____"
],
[
"# Define some operators with which we will measure the system\n# 1,1 element of density matrix - corresonding to groundstate\nP11p=basis(2,0) * basis(2,0).dag()\nP22p=basis(2,1) * basis(2,1).dag()\n# 1,2 element of density matrix - corresonding to coherence\nP12p=basis(2,0) * basis(2,1).dag()\n# Calculate expectation values in the bases\nP11expT = expect(resultMatsT.states, P11p)\nP22expT = expect(resultMatsT.states, P22p)\nP12expT = expect(resultMatsT.states, P12p)",
"_____no_output_____"
],
[
"DL = \" 2*pi* 2.0 * {lam} / (pi * {gamma} * {beta}) if (w==0) else 2*pi*(2.0*{lam}*{gamma} *w /(pi*(w**2+{gamma}**2))) * ((1/(exp((w) * {beta})-1))+1)\".format(gamma=gamma, beta = beta, lam = lam)\n\noptionsODE = Options(nsteps=15000, store_states=True,rtol=1e-12,atol=1e-12)\noutputBR = brmesolve(Hsys, rho0, tlist, a_ops=[[sigmaz(),DL]], options = optionsODE)\n\n\n# Calculate expectation values in the bases\nP11BR = expect(outputBR.states, P11p)\nP22BR = expect(outputBR.states, P22p)\nP12BR = expect(outputBR.states, P12p)",
"_____no_output_____"
],
[
"\n# Plot the results\nfig, axes = plt.subplots(1, 1, sharex=True, figsize=(8,8))\n#axes.plot(tlist, np.real(P11exp)+ np.real(P22exp), 'b', linewidth=2, label=\"P11\")\naxes.plot(tlist, np.real(P11exp), 'b', linewidth=2, label=\"P11 Mats\")\naxes.plot(tlist, np.real(P12exp), 'r', linewidth=2, label=\"P12 Mats\")\naxes.plot(tlist, np.real(P11expT), 'b--', linewidth=2, label=\"P11 Mats + Term\")\naxes.plot(tlist, np.real(P12expT), 'r--', linewidth=2, label=\"P12 Mats + Term\")\naxes.plot(tlist, np.real(P11BR), 'g--', linewidth=2, label=\"P11 Bloch Redfield\")\naxes.plot(tlist, np.real(P12BR), 'g--', linewidth=2, label=\"P11 Bloch Redfield\")\naxes.set_xlabel(r't', fontsize=28)\naxes.legend(loc=0, fontsize=12)",
"_____no_output_____"
],
[
"# Plot the results\nfig, axes = plt.subplots(1, 1, sharex=True, figsize=(8,8))\n#axes.plot(tlist, np.real(P11exp)+ np.real(P22exp), 'b', linewidth=2, label=\"P11\")\naxes.plot(tlist, np.real(P11exp), 'b', linewidth=2, label=\"P11 Mats\")\naxes.plot(tlist, np.real(P12exp), 'r', linewidth=2, label=\"P12 Mats\")\naxes.set_xlabel(r't', fontsize=28)\naxes.legend(loc=0, fontsize=12)\nfig.savefig(\"figures/docsfig1.png\")",
"_____no_output_____"
],
[
"#We can compare the Matsubara result to the faster-converging Pade decomposition\n\n\nlmax = 2\n\n\n\n\ndef deltafun(j,k):\n if j==k: \n return 1.\n else:\n return 0.\n\n\n\n\nAlpha =np.zeros((2*lmax,2*lmax))\nfor j in range(2*lmax):\n for k in range(2*lmax):\n #Alpha[j][k] = (deltafun(j,k+1)+deltafun(j,k-1))/sqrt((2*(j+1)-1)*(2*(k+1)-1)) #fermi\n Alpha[j][k] = (deltafun(j,k+1)+deltafun(j,k-1))/sqrt((2*(j+1)+1)*(2*(k+1)+1)) #bose\n \neigvalsA=eigvalsh(Alpha) \n\neps = []\nfor val in eigvalsA[0:lmax]:\n #print(-2/val)\n eps.append(-2/val)\n \n\nAlphaP =np.zeros((2*lmax-1,2*lmax-1))\nfor j in range(2*lmax-1):\n for k in range(2*lmax-1):\n #AlphaP[j][k] = (deltafun(j,k+1)+deltafun(j,k-1))/sqrt((2*(j+1)+1)*(2*(k+1)+1)) #fermi\n \n AlphaP[j][k] = (deltafun(j,k+1)+deltafun(j,k-1))/sqrt((2*(j+1)+3)*(2*(k+1)+3)) #Bose: This is +3 because +1 (bose) + 2*(+1)(from bm+1)\n \neigvalsAP=eigvalsh(AlphaP) \n\n\nchi = []\nfor val in eigvalsAP[0:lmax-1]:\n \n chi.append(-2/val)\n\n \neta_list = []\nprefactor = 0.5*lmax*(2*(lmax + 1) + 1)\n\nfor j in range(lmax):\n term = prefactor\n for k1 in range(lmax - 1):\n term *= (chi[k1]**2 - eps[j]**2)/(eps[k1]**2 - eps[j]**2 + deltafun(j,k1)) \n \n for k2 in range(lmax-1,lmax):\n term /= (eps[k2]**2 - eps[j]**2 + deltafun(j,k2))\n \n \n eta_list.append(term)\n\n\nkappa = [0]+eta_list\nepsilon = [0]+eps\n\n\n\nbeta = 1/T\n\ndef f_approx(x):\n f = 0.5\n for l in range(1,lmax+1):\n f= f - 2*kappa[l]*x/(x**2+epsilon[l]**2)\n return f\n\ndef f(x):\n kB=1.\n return 1/(1-exp(-x)) #this is n(w)+1 (for bosons)\n\n\ndef C(tlist):\n eta_list = []\n gamma_list =[]\n \n eta_0 =lam*gamma*(1.0/np.tan(gamma*beta/2.0) - 1.0j)\n gamma_0 = gamma\n eta_list.append(eta_0)\n gamma_list.append(gamma_0)\n if lmax>0:\n for l in range(1,lmax+1):\n eta_list.append((kappa[l]/beta)*4*lam*gamma*(epsilon[l]/beta)/((epsilon[l]**2/beta**2)-gamma**2))\n gamma_list.append(epsilon[l]/beta)\n \n \n c_tot = []\n for t in tlist:\n c_tot.append(sum([eta_list[l]*exp(-gamma_list[l]*t) for l in range(lmax+1)]))\n return c_tot, eta_list, gamma_list\n\n\ncppLP,etapLP,gampLP = C( tlist_corr)\n\n\nfig, ax1 = plt.subplots(figsize=(12, 7))\n#print(gam_list)\nax1.plot( tlist_corr,real(cppLP), color=\"b\", linewidth=3, label= r\"real pade 2 terms\")\n#ax1.plot(tlist,imag(cppL), color=\"r\", linewidth=3, label= r\"imag alt\")\nax1.plot( tlist_corr,corrRana, \"r--\", linewidth=3, label= r\"real mats 15000 terms\")\nax1.plot( tlist_corr,real(cppL), \"g--\", linewidth=3, label= r\"real mats 2 terms\")\n#ax1.plot(tlist,corrIana, \"r--\", linewidth=3, label= r\"imag ana\")\n\n\n\nax1.set_xlabel(\"t\")\nax1.set_ylabel(r\"$C$\")\nax1.legend()\n\n\nfig, ax1 = plt.subplots(figsize=(12, 7))\n#print(gam_list)\n#ax1.plot(tlist,real(cppL), color=\"b\", linewidth=3, label= r\"real alt\")\n#ax1.plot(tlist,imag(cppL), color=\"r\", linewidth=3, label= r\"imag alt\")\n#ax1.plot(tlist,corrRana, \"b--\", linewidth=3, label= r\"real ana\")\n#ax1.plot(tlist,corrIana, \"r--\", linewidth=3, label= r\"imag ana\")\n\nax1.plot( tlist_corr,real(cppLP)-corrRana, color=\"b\", linewidth=3, label= r\"pade error\")\nax1.plot( tlist_corr,real(cppL)-corrRana,\"r--\", linewidth=3, label= r\"mats error\")\n#ax1.plot(tlist,real(cppL)-corrRana, color=\"b\", linewidth=3, label= r\"mats error\")\n\nax1.set_xlabel(\"t\")\nax1.set_ylabel(r\"Error\")\nax1.legend()\n",
"_____no_output_____"
],
[
"#put pade parameters in lists for heom solver\nckAR = [real(eta) +0j for eta in etapLP]\nckAI = [imag(etapLP[0]) + 0j]\nvkAR = [gam +0j for gam in gampLP]\nvkAI = [gampLP[0] + 0j]",
"_____no_output_____"
],
[
"\n\nNR = len(ckAR)\nNI = len(ckAI)\nQ2 = [Q for kk in range(NR+NI)]\nprint(Q2)\noptions = Options(nsteps=15000, store_states=True, rtol=1e-14, atol=1e-14)\n\nHEOMPade = BosonicHEOMSolver(Hsys, Q2, ckAR, ckAI, vkAR, vkAI, NC, options=options)\n\n# Initial state of the system.\nrho0 = basis(2,0) * basis(2,0).dag() \n# Times to record state\n#tlist = np.linspace(0, 40, 600)\n\nresultPade = HEOMPade.run(rho0, tlist)",
"[Quantum object: dims = [[2], [2]], shape = (2, 2), type = oper, isherm = True\nQobj data =\n[[ 1. 0.]\n [ 0. -1.]], Quantum object: dims = [[2], [2]], shape = (2, 2), type = oper, isherm = True\nQobj data =\n[[ 1. 0.]\n [ 0. -1.]], Quantum object: dims = [[2], [2]], shape = (2, 2), type = oper, isherm = True\nQobj data =\n[[ 1. 0.]\n [ 0. -1.]], Quantum object: dims = [[2], [2]], shape = (2, 2), type = oper, isherm = True\nQobj data =\n[[ 1. 0.]\n [ 0. -1.]]]\n"
],
[
"\n# Define some operators with which we will measure the system\n# 1,1 element of density matrix - corresonding to groundstate\nP11p=basis(2,0) * basis(2,0).dag()\nP22p=basis(2,1) * basis(2,1).dag()\n# 1,2 element of density matrix - corresonding to coherence\nP12p=basis(2,0) * basis(2,1).dag()\n# Calculate expectation values in the bases\nP11expP = expect(resultPade.states, P11p)\nP22expP = expect(resultPade.states, P22p)\nP12expP = expect(resultPade.states, P12p)\n\n# Plot the results\nfig, axes = plt.subplots(1, 1, sharex=True, figsize=(8,8))\n#axes.plot(tlist, np.real(P11exp)+ np.real(P22exp), 'b', linewidth=2, label=\"P11\")\naxes.plot(tlist, np.real(P11exp), 'b', linewidth=2, label=\"P11 mats\")\naxes.plot(tlist, np.real(P11expT), 'y', linewidth=2, label=\"P11 mats T\")\naxes.plot(tlist, np.real(P11expP), 'b--', linewidth=2, label=\"P11 pade\")\naxes.plot(tlist, np.real(P12exp), 'r', linewidth=2, label=\"P12 mats\")\naxes.plot(tlist, np.real(P12expT), 'g', linewidth=2, label=\"P12 mats T\")\naxes.plot(tlist, np.real(P12expP), 'r--', linewidth=2, label=\"P12 pade\")\naxes.set_xlabel(r't', fontsize=28)\naxes.legend(loc=0, fontsize=12)",
"_____no_output_____"
]
],
[
[
"### Next we do fitting of correlation, and compare to Mats and Pade. We collect again a large sum of matsubara terms for many time steps\n",
"_____no_output_____"
]
],
[
[
"\ntlist2= linspace(0,2,10000)\n\n\nlmaxmats = 15000\n\ndef c(t,anamax):\n\n c_temp = (pref * lam * gamma * (-1.0j + cot(gamma / (2 * T))) * np.exp(-gamma * t))\n for k in range(1, anamax):\n vk = 2 * np.pi * k * T\n c_temp += ((pref * 4 * lam * gamma * T * vk / (vk**2 - gamma**2)) * np.exp(- vk * t) ) \n \n \n return c_temp\n\n# Reals parts\ncorrRana = [np.real(c(t,lmaxmats)) for t in tlist2]\n# Imaginary parts\ncorrIana = [np.imag((pref * lam * gamma * (-1.0j + cot(gamma / (2 * T))) * np.exp(-gamma * t))) for t in tlist2]\n\n\n",
"_____no_output_____"
],
[
"#We then fit this sum with standard least-squares approach.\n\nfrom scipy.optimize import curve_fit\ndef wrapper_fit_func(x, N, *args):\n a, b = list(args[0][:N]), list(args[0][N:2*N])\n # print(\"debug\")\n return fit_func(x, a, b, N)\n\n# actual fitting function\ndef fit_func(x, a, b, N):\n tot = 0\n for i in range(N):\n # print(i)\n tot += a[i]*np.exp(b[i]*x)\n return tot\n\n\ndef fitter(ans, tlist, k):\n # the actual computing of fit\n popt = []\n pcov = [] \n # tries to fit for k exponents\n for i in range(k):\n params_0 = [0]*(2*(i+1))\n upper_a = abs(max(ans, key = abs))*10\n #sets initial guess\n guess = []\n aguess = [ans[0]]*(i+1)#[max(ans)]*(i+1)\n bguess = [0]*(i+1)\n guess.extend(aguess)\n guess.extend(bguess)\n # sets bounds\n b_lower = []\n alower = [-upper_a]*(i+1)\n blower = [-np.inf]*(i+1)\n b_lower.extend(alower)\n b_lower.extend(blower)\n # sets higher bound\n b_higher = []\n ahigher = [upper_a]*(i+1)\n bhigher = [0]*(i+1)\n b_higher.extend(ahigher)\n b_higher.extend(bhigher)\n param_bounds = (b_lower, b_higher)\n p1, p2 = curve_fit(lambda x, *params_0: wrapper_fit_func(x, i+1, \\\n params_0), tlist, ans, p0=guess, sigma=[0.01 for t in tlist2], bounds = param_bounds,maxfev = 1e8)\n popt.append(p1)\n pcov.append(p2)\n print(i+1)\n return popt\n# print(popt)\n\n# function that evaluates values with fitted params at\n# given inputs\ndef checker(tlist, vals):\n y = []\n for i in tlist:\n # print(i)\n y.append(wrapper_fit_func(i, int(len(vals)/2), vals))\n return y\n\n#Number of exponents to use for real part\nk = 4\npopt1 = fitter(corrRana, tlist2, k)\n\n\ncorrRMats = [np.real(c(t,Nk)) for t in tlist2]\n\nfor i in range(k):\n y = checker(tlist2, popt1[i])\n plt.plot(tlist2, corrRana, tlist2, y, tlist2, corrRMats)\n \n plt.show()\n\n#number of exponents for imaginary part\nk1 = 1\npopt2 = fitter(corrIana, tlist2, k1)\nfor i in range(k1):\n y = checker(tlist2, popt2[i])\n plt.plot(tlist2, corrIana, tlist2, y)\n plt.show() \n",
"1\n2\n3\n4\n"
],
[
"ckAR1 = list(popt1[k-1])[:len(list(popt1[k-1]))//2]\nckAR = [x+0j for x in ckAR1]\nckAI1 = list(popt2[k1-1])[:len(list(popt2[k1-1]))//2]\n\nckAI = [x+0j for x in ckAI1]\n# vkAR, vkAI\nvkAR1 = list(popt1[k-1])[len(list(popt1[k-1]))//2:]\nvkAR = [-x+0j for x in vkAR1]\nvkAI1 = list(popt2[k1-1])[len(list(popt2[k1-1]))//2:]\nvkAI = [-x+0j for x in vkAI1]\n\n",
"_____no_output_____"
],
[
"#overwrite imaginary fit with analytical value (not much reason to use the fit for this)\n\nckAI = [pref * lam * gamma * (-1.0) + 0.j]\n\nvkAI = [gamma+0.j]\n\nprint(ckAI)\nprint(vkAI)",
"[(-0.05+0j)]\n[(0.5+0j)]\n"
],
[
"\nNC = 8\n\nNR = len(ckAR)\nNI = len(ckAI)\nQ2 = [Q for kk in range(NR+NI)]\nprint(Q2)\noptions = Options(nsteps=1500, store_states=True, rtol=1e-12, atol=1e-12, method=\"bdf\") \n#BDF because we have a slightly stiff problem\n\nHEOMFit = BosonicHEOMSolver(Hsys, Q2, ckAR, ckAI, vkAR, vkAI, NC, options=options)\n",
"[Quantum object: dims = [[2], [2]], shape = (2, 2), type = oper, isherm = True\nQobj data =\n[[ 1. 0.]\n [ 0. -1.]], Quantum object: dims = [[2], [2]], shape = (2, 2), type = oper, isherm = True\nQobj data =\n[[ 1. 0.]\n [ 0. -1.]], Quantum object: dims = [[2], [2]], shape = (2, 2), type = oper, isherm = True\nQobj data =\n[[ 1. 0.]\n [ 0. -1.]], Quantum object: dims = [[2], [2]], shape = (2, 2), type = oper, isherm = True\nQobj data =\n[[ 1. 0.]\n [ 0. -1.]], Quantum object: dims = [[2], [2]], shape = (2, 2), type = oper, isherm = True\nQobj data =\n[[ 1. 0.]\n [ 0. -1.]]]\n"
],
[
"\n\n\n\nstart = time.time()\nresultFit = HEOMFit.run(rho0, tlist)\n\nend = time.time()\nprint(\"ODE solver time\", end - start)",
"ODE solver time 44.665501832962036\n"
],
[
"# Define some operators with which we will measure the system\n# 1,1 element of density matrix - corresonding to groundstate\nP11p=basis(2,0) * basis(2,0).dag()\nP22p=basis(2,1) * basis(2,1).dag()\n# 1,2 element of density matrix - corresonding to coherence\nP12p=basis(2,0) * basis(2,1).dag()\n# Calculate expectation values in the bases\nP11expF = expect(resultFit.states, P11p)\nP22expF = expect(resultFit.states, P22p)\nP12expF = expect(resultFit.states, P12p)\n",
"_____no_output_____"
],
[
"print(Hsys.eigenstates())\nenergies, states = Hsys.eigenstates()\nrhoss = (states[0]*states[0].dag()*exp(-beta*energies[0]) + states[1]*states[1].dag()*exp(-beta*energies[1]))\nrhoss = rhoss/rhoss.norm()\n\nP12 = expect(rhoss,P12p)\nP11 = expect(rhoss,P11p)",
"(array([-0.55901699, 0.55901699]), array([Quantum object: dims = [[2], [1]], shape = (2, 1), type = ket\nQobj data =\n[[ 0.52573111]\n [-0.85065081]],\n Quantum object: dims = [[2], [1]], shape = (2, 1), type = ket\nQobj data =\n[[-0.85065081]\n [-0.52573111]]], dtype=object))\n"
],
[
"dot_energy, dot_state = Hsys.eigenstates()\ndeltaE = dot_energy[1] - dot_energy[0]\n\ngamma2 = deltaE / (2 * np.pi * gamma)\nwa = 2 * np.pi * gamma2 * gamma # reaction coordinate frequency\ng = np.sqrt(np.pi * wa * lam / 2.0) # reaction coordinate coupling\ng = np.sqrt(np.pi * wa * lam / 4.0) # reaction coordinate coupling Factor over 2 because of diff in J(w) (I have 2 lam now)\n#nb = (1 / (np.exp(wa/w_th) - 1))\n\nNRC = 10\n\nHsys_exp = tensor(qeye(NRC), Hsys)\nQ_exp = tensor(qeye(NRC), Q)\na = tensor(destroy(NRC), qeye(2))\n\nH0 = wa * a.dag() * a + Hsys_exp\n# interaction\nH1 = (g * (a.dag() + a) * Q_exp)\n\nH = H0 + H1\n\n#print(H.eigenstates())\nenergies, states = H.eigenstates()\nrhoss = 0*states[0]*states[0].dag()\nfor kk, energ in enumerate(energies):\n rhoss += (states[kk]*states[kk].dag()*exp(-beta*energies[kk])) \n\n#rhoss = (states[0]*states[0].dag()*exp(-beta*energies[0]) + states[1]*states[1].dag()*exp(-beta*energies[1]))\n\nrhoss = rhoss/rhoss.norm()\n\nP12RC = tensor(qeye(NRC), basis(2,0) * basis(2,1).dag())\n\nP12RC = expect(rhoss,P12RC)\n\n\nP11RC = tensor(qeye(NRC), basis(2,0) * basis(2,0).dag())\n\nP11RC = expect(rhoss,P11RC)",
"_____no_output_____"
],
[
"matplotlib.rcParams['figure.figsize'] = (7, 5)\nmatplotlib.rcParams['axes.titlesize'] = 25\nmatplotlib.rcParams['axes.labelsize'] = 30\nmatplotlib.rcParams['xtick.labelsize'] = 28\nmatplotlib.rcParams['ytick.labelsize'] = 28\nmatplotlib.rcParams['legend.fontsize'] = 28\nmatplotlib.rcParams['axes.grid'] = False\nmatplotlib.rcParams['savefig.bbox'] = 'tight'\nmatplotlib.rcParams['lines.markersize'] = 5\nmatplotlib.rcParams['font.family'] = 'STIXgeneral' \nmatplotlib.rcParams['mathtext.fontset'] = 'stix'\nmatplotlib.rcParams[\"font.serif\"] = \"STIX\"\nmatplotlib.rcParams['text.usetex']=False",
"_____no_output_____"
],
[
"#matplotlib.rcParams.update({'font.size': 18, 'text.usetex': True})\n#matplotlib.rcParams.update({'font.size': 18, 'font.family': 'STIXGeneral', 'mathtext.fontset': 'stix','text.usetex': False})\n\n# Plot the results\nfig, axes = plt.subplots(2, 1, sharex=False, figsize=(12,15))\nplt.sca(axes[0])\nplt.yticks([np.real(P11RC),0.6,1.0],[0.32,0.6,1])\n\naxes[0].plot(tlist, np.real(P11BR), 'y-.', linewidth=2, label=\"Bloch-Redfield\")\naxes[0].plot(tlist, np.real(P11exp), 'b', linewidth=2, label=\"Matsubara $N_k=2$\")\naxes[0].plot(tlist, np.real(P11expT), 'g--', linewidth=3, label=\"Matsubara $N_k=2$ & Terminator\")\naxes[0].plot(tlist, np.real(P11expF ), 'r', dashes=[3,2],linewidth=2, label=r\"Fit $N_f = 4$, $N_k=15\\times 10^3$\")\naxes[0].plot(tlist, [np.real(P11RC) for t in tlist], 'black', ls='--',linewidth=2, label=\"Thermal\")\n\n\n\naxes[0].locator_params(axis='y', nbins=4)\naxes[0].locator_params(axis='x', nbins=4)\n\n\n\naxes[0].set_ylabel(r\"$\\rho_{11}$\", fontsize=30)\naxes[0].legend(loc=0)\n\naxes[0].text(5,0.9,\"(a)\",fontsize=30)\naxes[0].set_xlim(0,50)\n\n\nplt.sca(axes[1])\nplt.yticks([np.real(P12RC),-0.2,0.0,0.2],[-0.33,-0.2,0,0.2])\naxes[1].plot(tlist, np.real(P12BR), 'y-.', linewidth=2, label=\"Bloch Redfield\")\naxes[1].plot(tlist, np.real(P12exp), 'b', linewidth=2, label=\"Matsubara $N_k=2$\")\naxes[1].plot(tlist, np.real(P12expT), 'g--', linewidth=3, label=\"Matsubara $N_k=2$ & Terminator\")\naxes[1].plot(tlist, np.real(P12expF ), 'r', dashes=[3,2], linewidth=2, label=r\"Fit $N_f = 4$, $N_k=15\\times 10^3$\")\naxes[1].plot(tlist, [np.real(P12RC) for t in tlist], 'black', ls='--', linewidth=2, label=\"Thermal\")\n\n\n\naxes[1].locator_params(axis='y', nbins=4)\naxes[1].locator_params(axis='x', nbins=4)\n\naxes[1].text(5,0.1,\"(b)\",fontsize=30)\n\n\naxes[1].set_xlabel(r'$t \\Delta$', fontsize=30)\naxes[1].set_ylabel(r'$\\rho_{01}$', fontsize=30)\n\naxes[1].set_xlim(0,50)\nfig.tight_layout()\nfig.savefig(\"figures/fig1.pdf\")",
"_____no_output_____"
],
[
"from qutip.ipynbtools import version_table\n\nversion_table()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdbe47399f53676fdd96619d952260f962242b8 | 3,105 | ipynb | Jupyter Notebook | 43_Count_the_Number_of_Each_Vowel.ipynb | A-Little-Hat/Python-Basics | f7e5177ecedbda990c157e4cb9977b254e551a21 | [
"MIT"
] | null | null | null | 43_Count_the_Number_of_Each_Vowel.ipynb | A-Little-Hat/Python-Basics | f7e5177ecedbda990c157e4cb9977b254e551a21 | [
"MIT"
] | 1 | 2021-04-24T07:46:27.000Z | 2021-04-24T07:46:27.000Z | 43_Count_the_Number_of_Each_Vowel.ipynb | A-Little-Hat/Python-Basics | f7e5177ecedbda990c157e4cb9977b254e551a21 | [
"MIT"
] | 1 | 2021-05-20T19:03:14.000Z | 2021-05-20T19:03:14.000Z | 20.294118 | 85 | 0.374879 | [
[
[
"x=input(\"enter a string : \")",
"enter a string : a quick brown fox jumped over the lazy dog.\n"
],
[
"a=0",
"_____no_output_____"
],
[
"e=0",
"_____no_output_____"
],
[
"i=0",
"_____no_output_____"
],
[
"o=0",
"_____no_output_____"
],
[
"u=0",
"_____no_output_____"
],
[
"for letter in x:\n if(letter=='a'):\n a=a+1\n if(letter=='e'):\n e=e+1\n if(letter=='i'):\n i=i+1\n if(letter=='o'):\n o=o+1\n if(letter=='u'):\n u=u+1",
"_____no_output_____"
],
[
"print(\"a = \", a ,\" e = \", e ,\" i = \", i , \" o = \", o ,\" u = \", u)",
"a = 2 e = 3 i = 1 o = 4 u = 2\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdbe9e2c0eaf9c3bdb1412ed24ef5de033af181 | 31,900 | ipynb | Jupyter Notebook | Numpy Crash Course.ipynb | Chirag-Juneja/numpy | b07051ec79ad74f924c948f197d99dd973bbeb06 | [
"MIT"
] | 1 | 2021-08-25T05:29:37.000Z | 2021-08-25T05:29:37.000Z | Numpy Crash Course.ipynb | Chirag-Juneja/numpy | b07051ec79ad74f924c948f197d99dd973bbeb06 | [
"MIT"
] | null | null | null | Numpy Crash Course.ipynb | Chirag-Juneja/numpy | b07051ec79ad74f924c948f197d99dd973bbeb06 | [
"MIT"
] | null | null | null | 17.732073 | 433 | 0.432853 | [
[
[
"# Numpy Crash Course",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
]
],
[
[
"## Numpy Arrays",
"_____no_output_____"
],
[
"### List to array",
"_____no_output_____"
]
],
[
[
"l = [1, 2, 3]",
"_____no_output_____"
],
[
"type(l)",
"_____no_output_____"
],
[
"np.array(l)",
"_____no_output_____"
],
[
"arr = np.array(l)",
"_____no_output_____"
],
[
"arr.shape",
"_____no_output_____"
]
],
[
[
"### Nested list to Matrix",
"_____no_output_____"
]
],
[
[
"nestedlist = [[1,2,3],[4,5,6],[7,8,9]]",
"_____no_output_____"
],
[
"matrix = np.array(nestedlist)",
"_____no_output_____"
],
[
"matrix",
"_____no_output_____"
],
[
"matrix.shape",
"_____no_output_____"
]
],
[
[
"## Numpy Range",
"_____no_output_____"
]
],
[
[
"np.arange(0,10)",
"_____no_output_____"
],
[
"np.arange(0,10,2)",
"_____no_output_____"
]
],
[
[
"## Zeros",
"_____no_output_____"
]
],
[
[
"np.zeros(5)",
"_____no_output_____"
],
[
"np.zeros((5,5))",
"_____no_output_____"
]
],
[
[
"## Ones",
"_____no_output_____"
]
],
[
[
"np.ones(5)",
"_____no_output_____"
],
[
"np.ones((3,3))",
"_____no_output_____"
]
],
[
[
"## Linspace",
"_____no_output_____"
]
],
[
[
"np.linspace(0,10,4)",
"_____no_output_____"
],
[
"np.linspace(0,10,20)",
"_____no_output_____"
]
],
[
[
"### Identity Matrix",
"_____no_output_____"
]
],
[
[
"np.eye(3)",
"_____no_output_____"
]
],
[
[
"## Random ",
"_____no_output_____"
],
[
"### Uniform Distribution",
"_____no_output_____"
]
],
[
[
"np.random.rand(1)",
"_____no_output_____"
],
[
"np.random.rand(5)",
"_____no_output_____"
],
[
"np.random.rand(3,3)",
"_____no_output_____"
]
],
[
[
"### Starndard Normal Distribution (mean = 0, sd = 1)",
"_____no_output_____"
]
],
[
[
"np.random.randn(5)",
"_____no_output_____"
],
[
"np.random.randn(3,3)",
"_____no_output_____"
]
],
[
[
"### Normal Distribution",
"_____no_output_____"
]
],
[
[
"np.random.normal(10,1) # mean =10, sd =1",
"_____no_output_____"
]
],
[
[
"### Random Integer",
"_____no_output_____"
]
],
[
[
"np.random.randint(0,100) # low inclusive, high exclusive",
"_____no_output_____"
],
[
"np.random.randint(0,100,10)",
"_____no_output_____"
]
],
[
[
"### Seed",
"_____no_output_____"
]
],
[
[
"np.random.seed(42)\nnp.random.rand(4)",
"_____no_output_____"
]
],
[
[
"## Array Attributes and Methods",
"_____no_output_____"
]
],
[
[
"arr = np.arange(25)",
"_____no_output_____"
],
[
"arr",
"_____no_output_____"
],
[
"ranarr = np.random.randint(0,50,10)",
"_____no_output_____"
],
[
"ranarr",
"_____no_output_____"
]
],
[
[
"### Reshape",
"_____no_output_____"
]
],
[
[
"arr.reshape(5,5)",
"_____no_output_____"
],
[
"arr.reshape(5,3) # new shape should fit the data",
"_____no_output_____"
]
],
[
[
"### Maximum and Minimum",
"_____no_output_____"
]
],
[
[
"# maximum \nranarr.max()",
"_____no_output_____"
],
[
"# minimum\nranarr.min()",
"_____no_output_____"
],
[
"# index of max\nranarr.argmax()",
"_____no_output_____"
],
[
"# index of min\nranarr.argmin()",
"_____no_output_____"
]
],
[
[
"### Array datatype",
"_____no_output_____"
]
],
[
[
"ranarr.dtype",
"_____no_output_____"
]
],
[
[
" ## Index Selection",
"_____no_output_____"
]
],
[
[
"arr = np.arange(0,11)",
"_____no_output_____"
],
[
"arr",
"_____no_output_____"
],
[
"arr[3]",
"_____no_output_____"
],
[
"arr[2:4]",
"_____no_output_____"
],
[
"arr[:5]",
"_____no_output_____"
],
[
"arr[5:]",
"_____no_output_____"
]
],
[
[
"## BroadCasting",
"_____no_output_____"
]
],
[
[
"arr = np.arange(0,10)",
"_____no_output_____"
],
[
"arr",
"_____no_output_____"
],
[
"# broadcasting operation on all elements\narr ** 2",
"_____no_output_____"
],
[
"arr",
"_____no_output_____"
],
[
"# slice_arr holds the orignal array (shallow copy)\narr_slice = arr[:5]",
"_____no_output_____"
],
[
"arr_slice",
"_____no_output_____"
],
[
"arr_slice[1] = 10",
"_____no_output_____"
],
[
"arr_slice",
"_____no_output_____"
],
[
"arr",
"_____no_output_____"
],
[
"# deep copy\narr_copy = arr.copy()",
"_____no_output_____"
],
[
"arr_copy",
"_____no_output_____"
],
[
"arr_copy[1] = 100",
"_____no_output_____"
],
[
"arr_copy",
"_____no_output_____"
],
[
"arr",
"_____no_output_____"
]
],
[
[
"## Indexing on 2D array",
"_____no_output_____"
]
],
[
[
"matrix = np.arange(1,10).reshape(3,3)*5",
"_____no_output_____"
],
[
"matrix",
"_____no_output_____"
],
[
"matrix[1]",
"_____no_output_____"
],
[
"matrix[1][1]",
"_____no_output_____"
],
[
"matrix[1,1]",
"_____no_output_____"
],
[
"matrix[:2,1:]",
"_____no_output_____"
]
],
[
[
"## Conditional Selection",
"_____no_output_____"
]
],
[
[
"arr = np.arange(1,11)\narr",
"_____no_output_____"
],
[
"arr > 4",
"_____no_output_____"
],
[
"arr[arr > 4]",
"_____no_output_____"
]
],
[
[
"## Numpy Array Operations",
"_____no_output_____"
]
],
[
[
"arr = np.arange(0,11)",
"_____no_output_____"
],
[
"arr",
"_____no_output_____"
],
[
"arr+4",
"_____no_output_____"
],
[
"arr ** 2",
"_____no_output_____"
],
[
"arr/arr",
"<ipython-input-73-50b4ced5627e>:1: RuntimeWarning: invalid value encountered in true_divide\n arr/arr\n"
],
[
"np.sqrt(arr)",
"_____no_output_____"
],
[
"np.log(arr)",
"<ipython-input-75-a67b4ae04e95>:1: RuntimeWarning: divide by zero encountered in log\n np.log(arr)\n"
],
[
"arr.sum()",
"_____no_output_____"
],
[
"arr.mean()",
"_____no_output_____"
],
[
"matrix = np.arange(1,13).reshape(3,4)",
"_____no_output_____"
],
[
"matrix.sum()",
"_____no_output_____"
],
[
"matrix.sum(axis=0) # sum on vertical axis (accross the rows)",
"_____no_output_____"
],
[
"matrix.sum(axis=1) # sum on horizontal axis (acrsso the columns)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdc0c6db8a8c0fc46b0d7094142931c097aae77 | 7,597 | ipynb | Jupyter Notebook | 14-The-period-of-the-tides/14.3-Tidal-period-and-sideral-day.ipynb | misterhay/RabbitMath | 8089e6cdf5ee0d70827b65c8be64619892b22b63 | [
"MIT"
] | null | null | null | 14-The-period-of-the-tides/14.3-Tidal-period-and-sideral-day.ipynb | misterhay/RabbitMath | 8089e6cdf5ee0d70827b65c8be64619892b22b63 | [
"MIT"
] | null | null | null | 14-The-period-of-the-tides/14.3-Tidal-period-and-sideral-day.ipynb | misterhay/RabbitMath | 8089e6cdf5ee0d70827b65c8be64619892b22b63 | [
"MIT"
] | null | null | null | 33.615044 | 398 | 0.602606 | [
[
[
"# 14.3 Tidal period and sideral day\n\nThe second major reason that high tides don't happen twice every 24 hours is that there are two kinds of day lengths. Our standard interpretation of the day is what is called the *solar day* because it’s determined with reference to the sun, this is 24 hours.\n\nThe other kind of day is called the *sidereal day*, it’s the time required for the earth to rotate through 360°. It can be defined for any rotating body whether it has a sun or not; in a sense its reference is the rest of the universe.\n\nThis requires a minor correction to our previous calculations.",
"_____no_output_____"
],
[
"Suppose we are above the plane of the earth’s orbit around the sun (again with the north pole pointing up). Draw a line between the centres of the sun and the earth and take t = 0 to be a moment at which that line intersects the surface of the earth at the zero meridian of longitude (coloured red). Now let the earth rotate exactly 360° and draw that line again (between the two centres).\n\nThe line doesn't pass through the zero meridian, the earth needs to rotate a little more (since it is rotating counter-clockwise and revolving counter-clockwise). That 360° plus a little more takes 24 hours.\n\nThis means that a sideral day is a little shorter (about 4 minutes less) than a solar day.",
"_____no_output_____"
],
[
"## Using geometry to calculate siderial day length\n\nThis new diagram of the earth's orbit includes markings for the two segments of the solar day. $E$ represents the time for the earth to rotate 360° and $\\epsilon$ for the time the second part takes.\n\nAs well, the angle $\\phi$ represents the angle through which the earth rotates during a siderial day. Since the earth takes 365 days, 6 hour, 9 minutes, and 9.76 seconds to orbit the sun, we can calculate how many degrees it rotates through per solar day.",
"_____no_output_____"
]
],
[
[
"daysPerYear = 365 + 6/24 + 9/(24*60) + 9.76/(24*60*60)\nprint(daysPerYear, 'days per year')\n\nphi = 1/daysPerYear\nprint(phi, 'degrees per day')",
"_____no_output_____"
]
],
[
[
"A solar day is 24 hours, so $E + \\epsilon = 24$.\n\nFrom the geometry of parallel lines we know that it takes a fraction $\\phi$ of the time to for a complete rotation $E$, so $\\epsilon = \\phi E$.\n\nCombine $E + \\epsilon = 24$ and $\\epsilon = \\phi E$ to calculate $E$, the length of the siderial day.",
"_____no_output_____"
]
],
[
[
"# fill in your equation below then run this cell\n# if you get really stuck, check out the cell at the bottom of this notebook\n\nE = \n\nprint(E, 'hours, which is about four minutes less than 24 hours')",
"_____no_output_____"
]
],
[
[
"So let's finally fit a sine curve to our tides data using the new calculated tidal period. If all goes well it should be a good fit. You may need to adjust the phase of the sine wave, $c$, and perhaps the amplitude, $a$, depending on when you run the code.",
"_____no_output_____"
]
],
[
[
"T_E = E # from your calculation above\nT_M = 29*24 + 12 + 44/60 + 2.8/(60*60)\nT = (T_E * T_M) / (T_M - T_E)\n\na = 5.5\nb = (2*360)/T\nc = 0\nd = 6.5\n\nimport plotly.graph_objects as go\nimport math\nimport requests\nimport pandas as pd\nimport plotly.express as px\n\n# Get tide data from Fisheries and Oceans Canada for Advocate Harbour (hourly for the next 7 days)\nurl = 'https://www.waterlevels.gc.ca/eng/station?sid=215' # Joggins Wharf\nresponse = requests.get(url)\n\ndfs = pd.read_html(response.text) # construct dataframe\ntideHeight = dfs[7]['Hour'].transpose() # select the 7th frame and transpose columns and rows\n\ntideList = [] # construct a list from the tideHeight data\nfor key in tideHeight.keys():\n tideList.extend(tideHeight[key].tolist())\n\ndf = pd.DataFrame(tideList) # turn the list into a dataframe\ndf.columns = ['Height (m)'] # label the height column\ndf['Time (h)'] = range(0,len(tideList)) # add a time (in hours) column\n\nxlist = df['Time (h)'].tolist()\nylist = []\nfor x in xlist:\n y = a * math.sin(math.radians(b * x + c)) + d\n ylist.append(y)\n\nfig = go.Figure()\nfig.add_trace(go.Scatter(x=df['Time (h)'], y=df['Height (m)'], name='tide data'))\nfig.add_trace(go.Scatter(x=xlist, y=ylist, name='sine fit'))\n\nfig.show()",
"_____no_output_____"
]
],
[
[
"So given some things we know about orbital mechanics and the mathematics of sine waves, we've been able to determine a pretty good curve fit for the frequency (and amplitude) of the tides at this location.\n\nA couple things to explore further would be:\n\n- Does the same curve fit the tides data from [different locations](https://www.waterlevels.gc.ca/eng), either in the Bay of Fundy or elsewhere?\n- Why does the amplitude of the tides seem to alternate between slightly larger and slightly smaller values?",
"_____no_output_____"
]
],
[
[
"# If you are stuck on finding E...\n\nE = 24*(daysPerYear/(daysPerYear+1))",
"_____no_output_____"
]
],
[
[
"You have now finished \"The Period of the Tides\" activity. Well done.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ecdc10e25ae0db8978e8e85963a21276a88269ff | 17,985 | ipynb | Jupyter Notebook | notebooks/json_file_processing-humidity.ipynb | rajgupt/task-risk | 30c2821476452af06ad871d3deee5004190da375 | [
"MIT"
] | 3 | 2020-04-04T23:47:29.000Z | 2020-06-18T17:53:59.000Z | notebooks/json_file_processing-humidity.ipynb | rajgupt/task-risk | 30c2821476452af06ad871d3deee5004190da375 | [
"MIT"
] | null | null | null | notebooks/json_file_processing-humidity.ipynb | rajgupt/task-risk | 30c2821476452af06ad871d3deee5004190da375 | [
"MIT"
] | 7 | 2020-04-01T19:46:49.000Z | 2020-04-29T18:16:37.000Z | 42.21831 | 202 | 0.54134 | [
[
[
"import os\nimport pandas as pd\nimport scholarly\nos.chdir('C:\\\\Users\\\\Cafral\\\\Desktop\\\\kaggle\\\\CORD-19-research-challenge\\\\data_v7')",
"_____no_output_____"
]
],
[
[
"# Load the data",
"_____no_output_____"
]
],
[
[
"df_method = pd.read_csv('method_df.csv')\ndf_result = pd.read_csv('result_df.csv')\n\nprint(\"No of unique papers in method section : \", df_method['paper_id'].nunique(), \" out of \", \n len(df_method), \" rows in dataframe\")\nprint(\"No of unique papers in result section : \", df_result['paper_id'].nunique(), \" out of \", \n len(df_result), \" rows in dataframe\")\n\ndf_method.info()",
"C:\\Users\\Cafral\\Anaconda3\\lib\\site-packages\\IPython\\core\\interactiveshell.py:3058: DtypeWarning: Columns (36) have mixed types. Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n"
]
],
[
[
"# Extracting sentences which contain topic ngrams",
"_____no_output_____"
]
],
[
[
"def find_ngrams(dataframe,columnToSearch,keywords):\n df_w_ngrams = dataframe[dataframe[columnToSearch].str.contains('|'.join(keywords), case=False) == True]\n return df_w_ngrams\n\nngrams = [' humidity',' rain is',' rain was',' rain has',' damp weather', ' damp climate',\n ' monsoon',' rainy',' water vapour',' rainfall']#'sweat','damp',\n\n#Extracting sentences which contain ngrams\n\ndf_method_p = find_ngrams(df_method,'sentence',ngrams)\n\ndf_result_p = find_ngrams(df_result,'sentence',ngrams)\n\nprint(\"There are {} sentences containing keywords/ngrams in Method section.\".format(len(df_method_p)))\nprint(\"There are {} sentences containing keywords/ngrams in Result section.\".format(len(df_result_p)))\n\n# Merging the method and result section sentences into single dataframe\ndf_real = pd.concat([df_method_p, df_result_p])\n\nprint(\"Total unique papers in Method section : {}\".format(df_method_p['paper_id'].nunique()))\nprint(\"Total unique papers in Result section : {}\".format(df_result_p['paper_id'].nunique()))\nprint(\"Total unique papers in combined section : {}\".format(df_real['paper_id'].nunique()))",
"There are 312 sentences containing keywords/ngrams in Method section.\nThere are 256 sentences containing keywords/ngrams in Result section.\nTotal unique papers in Method section : 222\nTotal unique papers in Result section : 100\nTotal unique papers in combined section : 296\n"
]
],
[
[
"# Keeping all the sentences from papers that had topic ngrams",
"_____no_output_____"
]
],
[
[
"df_method_all_sentence = pd.merge(df_method[['paper_id','sentence']],df_method_p['paper_id'],on='paper_id',how='right')\ndf_method_all_sentence.rename(columns={'sentence_x':'all_sentences','sentence_y':'ngram_sentence'},inplace=True)\n\ndf_result_all_sentence = pd.merge(df_result[['paper_id','sentence']],df_result_p['paper_id'],on='paper_id',how='right')\ndf_result_all_sentence.rename(columns={'sentence_x':'all_sentences','sentence_y':'ngram_sentence'},inplace=True)\n\ndf_all_sentences = pd.concat([df_method_all_sentence, df_result_all_sentence])\nprint(\"Total unique papers in combined section : {}\".format(df_all_sentences['paper_id'].nunique()))",
"Total unique papers in combined section : 296\n"
]
],
[
[
"# Extracting methodolody,sample size,causal nature,sentences refering to coronavirus, fatality",
"_____no_output_____"
]
],
[
[
"def extract_features(ngramDf,allSentdataFrame):\n # extracting methodology\n methods_list = ['regression','OLS','ordinary least squares','logistic regression' , 'neural network',\n 'random forest','logistic function','time series','model','modelling','simulation',\n 'forecast','forecasting']\n methodology = find_ngrams(allSentdataFrame,'sentence',methods_list)\n\n #extracting sample size\n sample_size_list = ['population size','sample size','number of samples','number of observations',\n 'number of subjects']\n sample_size = find_ngrams(allSentdataFrame,'sentence',sample_size_list)\n\n #extracting nature of correlation\n causal_list =['statistically significant','statistical significance',\n 'correlation','positively correlated','negatively correlated','correlated',\n 'p value','p-value','chi square','chi-square','t statistic','standard error'\n 'confidence interval','odds ratio','coefficient']\n\n causality_type = find_ngrams(allSentdataFrame,'sentence',causal_list)\n\n # extracting coronavirus related sentence #can someone check and update this list?\n coronavirus_list = ['severe acute respiratory syndrome','sars-cov','sars-like',\n 'middle east respiratory syndrome','mers-cov','mers-like',\n 'covid-19','sars-cov-2','2019-ncov','sars-2',\n 'sarscov-2','novel coronavirus','corona virus','coronaviruses',\n 'sars','mers','covid19','covid 19']\n\n coronavirus = find_ngrams(allSentdataFrame,'sentence',coronavirus_list)\n\n # extracting outcome\n disease_stage_list = ['lethal', 'morbid',\"death\", \"fatality\", \"mortality\",\"lethal\", \"lethality\", \"morbidity\"]\n\n fatality = find_ngrams(allSentdataFrame,'sentence',disease_stage_list)\n\n df_list = [methodology,sample_size,causality_type,coronavirus,fatality]\n df_list_name = ['methodology','sample_size','causality_type','coronavirus','fatality']\n i=0\n for one_df in df_list:\n one_df.rename(columns={'sentence':df_list_name[i]},inplace=True)\n grouped_one_df = one_df.groupby(['paper_id'], as_index=False)[df_list_name[i]].sum()\n ngramDf = pd.merge(ngramDf,grouped_one_df,on='paper_id',how='left')\n i=i+1\n return ngramDf",
"_____no_output_____"
],
[
"df_real = extract_features(df_real,df_all_sentences)",
"_____no_output_____"
]
],
[
[
"# Merge with Metadata",
"_____no_output_____"
]
],
[
[
"metadata = pd.read_csv('clean_metadata.csv')\nmetadata.rename(columns={'sha':'paper_id'}, inplace = True)\nmetadata['paper_id'] = metadata['paper_id'].astype(\"str\")\n\n#Merging the given papers with their metadata\ndf_real = df_real.merge(metadata[['paper_id', 'title', 'abstract', 'publish_time', 'authors',\n 'url']], on='paper_id', how='left') #'title_w_ngram','abstract_w_ngram'\n\n#Keeping only the fields which are relevant to us.\ndf_real = df_real[['paper_id','language', 'section', 'sentence', 'lemma', 'UMLS', 'sentence_id', \n 'publish_time', 'authors', 'url','methodology','sample_size','causality_type','coronavirus',\n 'fatality','title','abstract','publish_time','authors',\n 'url','TAXON']]#'title_w_ngram','abstract_w_ngram',\ndf_real.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 568 entries, 0 to 567\nData columns (total 21 columns):\npaper_id 568 non-null object\nlanguage 568 non-null object\nsection 568 non-null object\nsentence 568 non-null object\nlemma 568 non-null object\nUMLS 568 non-null object\nsentence_id 567 non-null object\npublish_time 524 non-null object\nauthors 521 non-null object\nurl 523 non-null object\nmethodology 363 non-null object\nsample_size 74 non-null object\ncausality_type 298 non-null object\ncoronavirus 144 non-null object\nfatality 108 non-null object\ntitle 524 non-null object\nabstract 515 non-null object\npublish_time 524 non-null object\nauthors 521 non-null object\nurl 523 non-null object\nTAXON 568 non-null object\ndtypes: object(21)\nmemory usage: 97.6+ KB\n"
],
[
"grouped = df_real.groupby('paper_id')\ndef keywordcounter(sentences, keywords_list):\n '''\n Input : List of sentences, List of keywords\n Returns : Keywords present in sentences, Total count of all keywords present in Input\n '''\n keyword = {}\n sent = \" \".join(sentences)\n for pol in keywords_list:\n counter = sent.lower().count(pol)\n if (counter > 0):\n keyword[pol] = counter\n return list(keyword.keys()), sum(keyword.values())\n\ndef aggregation(item,keyWordList,RiskFactor):\n '''\n Input : Dataframe of sentences of a paper\n Return : Datframe in Standard Output format\n '''\n dfo = {}\n \n dfo['Risk Factor'] = RiskFactor\n dfo['Title'] = item['title'].iloc[0]\n dfo['Keyword/Ngram'], dfo['No of keyword occurence in Paper'] = keywordcounter(item['sentence'].tolist(),\n keyWordList)\n dfo['paper_id'] = item['paper_id'].iloc[0]\n \n if (item['url'].iloc[0].isnull().any()==False):\n dfo['URL'] = item['url'].iloc[0].tolist()\n else:\n dfo['URL']=''\n #dfo['Sentences from Title']= item['title_w_ngram'].iloc[0] \n #dfo['Sentences from Abstract']= item['abstract_w_ngram'].iloc[0]\n dfo['Sentences from Method'] = item[item['section']=='methods']['sentence'].tolist()\n dfo['Sentences from Result'] = item[item['section']=='results']['sentence'].tolist()\n \n if (item['authors'].iloc[0].isnull().any()==False):#(item['authors'].iloc[0].isnull()==False):\n dfo['Authors'] = item['authors'].iloc[0].tolist()\n else:\n dfo['Authors'] = ''\n # For papers which do not have title (not in metadata) we have to resolve exceptions\n #try:\n # dfo['No of Citations'] = next(scholarly.search_pubs_query(item['title'].iloc[0])).citedby\n #except:\n dfo['No of Citations'] = 0\n \n dfo['Correlation'] = item['causality_type'].iloc[0]\n dfo['Design Methodology'] = item['methodology'].iloc[0]\n dfo['Sample Size'] = item['sample_size'].iloc[0]\n dfo['Coronavirus'] = item['coronavirus'].iloc[0]\n dfo['Fatality'] = item['fatality'].iloc[0]\n dfo['TAXON'] =item['TAXON'].iloc[0]\n \n return dfo\n\ndf_output = pd.DataFrame(columns=['Risk Factor', 'Title','Keyword/Ngram', 'No of keyword occurence in Paper',\n 'paper_id', 'URL',\n 'Sentences from Result', 'Sentences from Method',\n 'Authors','No of Citations', 'Correlation', \n 'Design Methodology', 'Sample Size',\n 'Coronavirus','Fatality','TAXON'])#Sentences from Title','Sentences from Abstract',\nfor key, item in grouped:\n df_output = pd.concat([df_output, pd.DataFrame([aggregation(item,ngrams,'Humidity')])])\n\ndf_output = df_output.reset_index()\ndf_output.to_excel('humidity_json.xlsx')",
"C:\\Users\\Cafral\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:63: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version\nof pandas will change to not sort by default.\n\nTo accept the future behavior, pass 'sort=False'.\n\nTo retain the current behavior and silence the warning, pass 'sort=True'.\n\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
ecdc11eccb75beecf60c75c2b60651eb1ae1928c | 13,348 | ipynb | Jupyter Notebook | provisioning/files/Data_analysis/machine_learning/notebook-files/1 Gaussian Naive Bayes.ipynb | fpkmatthi/db3-server | b32699b6d63e91d4fc94f418a6667f44da701b6a | [
"MIT"
] | null | null | null | provisioning/files/Data_analysis/machine_learning/notebook-files/1 Gaussian Naive Bayes.ipynb | fpkmatthi/db3-server | b32699b6d63e91d4fc94f418a6667f44da701b6a | [
"MIT"
] | null | null | null | provisioning/files/Data_analysis/machine_learning/notebook-files/1 Gaussian Naive Bayes.ipynb | fpkmatthi/db3-server | b32699b6d63e91d4fc94f418a6667f44da701b6a | [
"MIT"
] | null | null | null | 28.4 | 161 | 0.413171 | [
[
[
"import pandas as pd\nimport numpy as np\n# numpy is a Python library that offers lots of data manipulation functions",
"_____no_output_____"
],
[
"# Load the dataset persons.txt and check if the dataset is loaded correctly\n# The seperator is ;\npersons = pd.read_csv('./persons.txt', sep = \";\")\npersons.head(10)",
"_____no_output_____"
],
[
"# Convert male to 0 and female to 1\n# Check the dataset afterwards\npersons['gender'] = np.where(persons['gender']=='male', 0, 1)\npersons.head(10)",
"_____no_output_____"
]
],
[
[
"Before we can compute for the probability distribution for features $x$, \nwe must first compute for the mean $μ$ and variance $\\sigma^{2}$ values of $x_{i}$ for each $k$ class.",
"_____no_output_____"
]
],
[
[
"mean_male = persons[persons['gender'] == 0][['height', 'weight', 'footsize']].mean()\nmean_female = persons[persons['gender'] == 1][['height', 'weight', 'footsize']].mean()\nvar_male = persons[persons['gender'] == 0][['height', 'weight', 'footsize']].var()\nvar_female = persons[persons['gender'] == 1][['height', 'weight', 'footsize']].var()",
"_____no_output_____"
],
[
"# We take a look at the computed values.\n\nprint('Mean values for male features: ')\nprint(mean_male)\nprint()\nprint('Mean values for female features: ')\nprint(mean_female)\nprint()\nprint('Variance values for male features: ')\nprint(var_male)\nprint()\nprint('Variance values for female features: ')\nprint(var_female)\nprint()",
"Mean values for male features: \nheight 178.250\nweight 79.875\nfootsize 45.250\ndtype: float64\n\nMean values for female features: \nheight 165.00\nweight 60.00\nfootsize 39.75\ndtype: float64\n\nVariance values for male features: \nheight 32.250000\nweight 24.062500\nfootsize 0.916667\ndtype: float64\n\nVariance values for female features: \nheight 87.500000\nweight 118.000000\nfootsize 2.916667\ndtype: float64\n\n"
]
],
[
[
"Now that we have the $μ$ and $\\sigma^{2}$ values for each features $x_{i}$ per $k$ -class, \nlet us now write a function for the likelihood computation, i.e. $p(x_{i}|C_{k})$ . \nWe are going to plugin the likelihood computation into the Gaussian probability density function:\n$$ p(x = x_{i} | C_{k}) = \\dfrac{1}{\\sqrt{2 \\pi \\sigma_{k}^{2}}} \\cdot exp\\bigg(\\dfrac{-(x_{i} - \\mu_{k})^2}{2 \\sigma_{k}^{2}}\\bigg) $$\n\nHence, we implement the likelihood function as follows:",
"_____no_output_____"
]
],
[
[
"def likelihood(feature, mean, variance):\n return (1 / np.sqrt(2 * np.pi * variance)) * np.exp((-(feature - mean) ** 2) / (2 * variance))",
"_____no_output_____"
]
],
[
[
"Now we want to calculate the chance that a person with height = 175 and weight = 59 and footsize = 40 is male or female\nSo we have to calculate 2 things:\n- P(male | height = 175 , weight = 59, footsize = 40) \n = P(height = 175 | male) * P(weight = 59 | male) * P(footsize = 40 | male) * P(male) / (P(height = 175) * P(weight = 59) * P(footsize = 40))\n- P(female | height = 175 , weight = 59, footsize = 40)\n = P(height = 175 | female) * P(weight = 59 | female) * P(footsize = 40 | female) * P(female) / (P(height = 175) * P(weight = 59) * P(footsize = 40))\n\nBecause P(male) = P(female) = 0.5 and (P(height = 175) * P (weight = 59) * P(footsize = 40)) is for both formulas the same, these values don't matter\nSo we only need to calculate\n- P(height = 175 | male) * P(weight = 59 | male) * P(footsize = 40 | male)\n- P(height = 175 | female) * P(weight = 59 | female) * P(footsize = 40 | female)",
"_____no_output_____"
]
],
[
[
"p_height = likelihood(feature=175,\n mean=np.array([mean_male['height'], mean_female['height']]),\n variance=np.array([var_male['height'], var_female['height']]))\nprint(p_height)",
"[0.0596383 0.02408451]\n"
],
[
"p_weight = likelihood(feature=59,\n mean=np.array([mean_male['weight'], mean_female['weight']]),\n variance=np.array([var_male['weight'], var_female['weight']]))\nprint(p_weight)",
"[9.50078654e-06 3.65703260e-02]\n"
],
[
"p_footsize = likelihood(feature=40,\n mean=np.array([mean_male['footsize'], mean_female['footsize']]),\n variance=np.array([var_male['footsize'], var_female['footsize']]))\nprint(p_footsize)",
"[1.23191750e-07 2.31107219e-01]\n"
],
[
"result = p_height * p_weight * p_footsize\nprint(result)",
"[6.98017742e-14 2.03554218e-04]\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
ecdc131610b46178281f095f00e46c42ed33a467 | 11,192 | ipynb | Jupyter Notebook | exercices/python_oop.ipynb | kaoutharBousbaa1/python-bootcamp | 5936a8bcf43fef6602f04f8db8fc9117db1db821 | [
"MIT"
] | null | null | null | exercices/python_oop.ipynb | kaoutharBousbaa1/python-bootcamp | 5936a8bcf43fef6602f04f8db8fc9117db1db821 | [
"MIT"
] | null | null | null | exercices/python_oop.ipynb | kaoutharBousbaa1/python-bootcamp | 5936a8bcf43fef6602f04f8db8fc9117db1db821 | [
"MIT"
] | null | null | null | 34.2263 | 360 | 0.54941 | [
[
[
"## Exercise 1:\n\n- consedering the following code\n\n```python\nimport datetime # we will use this for date objects\n\nclass Person:\n\n def __init__(self, name, surname, birthdate, address, telephone, email):\n self.name = name\n self.surname = surname\n self.birthdate = birthdate\n\n self.address = address\n self.telephone = telephone\n self.email = email\n def age(self):\n today = datetime.date.today()\n age = today.year - self.birthdate.year\n\n if today < datetime.date(today.year, self.birthdate.month, self.birthdate.day):\n age -= 1\n\n return age\n\nperson = Person(\n \"Jane\",\n \"Doe\",\n datetime.date(1992, 3, 12), # year, month, day\n \"No. 12 Short Street, Greenville\",\n \"555 456 0987\",\n \"[email protected]\"\n)\n\nprint(person.name)\nprint(person.email)\nprint(person.age())\n```\nExplain what the following variables refer to, and their scope:\n1. Person\n2. person\n3. surname\n4. self\n5. age (the function name)\n6. age (the variable used inside the function)\n7. self.email\n8. person.email",
"_____no_output_____"
],
[
"### write your answer here\n1. Person(): It is a global class.\n2. person: Instance of the Person class.\n3. surname: A paramater passed to the __initi__ method, it is a local variable.\n4. self: A parameter that is has been passed into the methods of each class, it will be replaced by the instance object when the method is called with a point.\n5. age (the function name): age is a method of the class Person.\n6. age (the variable used inside the function): It is a local variable of the method age.\n7. self.email: The attribute (email) of an object.\n8. person.email: The person instance is referred to by the variable name \"person\". We can use person.email or person.name...to access to the attributes. ",
"_____no_output_____"
],
[
"## Exercise 2\n1. Rewrite the `Person` class so that a person’s age is calculated for the first time when a new person instance is created, and recalculated (when it is requested) if the day has changed since the last time that it was calculated.",
"_____no_output_____"
]
],
[
[
"import datetime\n\nclass Person:\n\n def __init__(self, name, surname, birthdate, address, telephone, email):\n self.name = name\n self.surname = surname\n self.birthdate = birthdate\n\n self.address = address\n self.telephone = telephone\n self.email = email\n\n self.age2 = 0\n self.year_last_recalculated = 0\n\n self.recalculate_age()\n today = datetime.date.today()\n age = today.year - self.birthdate.year\n\n if today < datetime.date(today.year, self.birthdate.month, self.birthdate.day):\n age -= 1\n\n self.year_last_recalculated = today\n\n return age\n\n def recalculate_age(self):\n today = datetime.date.today()\n if (datetime.date.today() > self.year_last_recalculated):\n age = today.year - self.birthdate.year\n if today < datetime.date(today.year, self.birthdate.month, self.birthdate.day):\n age -= 1\n return age",
"_____no_output_____"
]
],
[
[
"## Exercise 3\n1. Explain the differences between the attributes `name`, `surname` and `profession`, and what values they can have in different instances of this class:\n```python\nclass Smith:\n surname = \"Smith\"\n profession = \"smith\"\n\n def __init__(self, name, profession=None):\n self.name = name\n if profession is not None:\n self.profession = profession\n```",
"_____no_output_____"
],
[
"\"surname\" is always a class attribute, every instance will have a surname value of Smith. \n\"Profession\" too is a class attribute, but it can optionally be overridden by an instance attribute in the constructor _initi_.\n\"name\" is always an instance attribute which is set in the constructor, and each class instance can have a different name value. \nEach instance will have a \"profession\" value of smith unless the optional surname parameter is passed into the constructor _initi_ with a different value.",
"_____no_output_____"
],
[
"## Exercise 4:\n1. Create a class called `Numbers`, which has a single class attribute called `MULTIPLIER`, and a constructor which takes the parameters `x` and `y` (these should all be numbers).\n 1. Write a method called `add` which returns the sum of the attributes `x` and `y`.\n 2. Write a class method called `multiply`, which takes a single number parameter `a` and returns the product of `a` and `MULTIPLIER`.\n 3. Write a static method called `subtract`, which takes two number parameters, `b` and `c`, and returns `b - c`.\n 4. Write a method called `value` which returns a tuple containing the values of `x` and `y`. Make this method into a property, and write a setter and a deleter for manipulating the values of `x` and `y`.",
"_____no_output_____"
]
],
[
[
"class Numbers:\n \n MULTIPLIER = 20\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def add(self):\n return self.x + self.y\n\n @classmethod\n def multiply(cls, a):\n return cls.MULTIPLIER * a\n\n @staticmethod\n def subtract(b, c):\n return b - c\n\n @property\n def value(self):\n return (self.x, self.y)\n\n @value.setter\n def value(self, x_y):\n self.x, self.y = x_y\n\n @value.deleter\n def value(self):\n del self.x\n del self.y",
"_____no_output_____"
]
],
[
[
"## Exercise 5:\n1. Create an instance of the `Person` class from example 2. Use the `dir` function on the instance. Then use the `dir` function on the class.\n 1. What happens if you call the `__str__` method on the instance? Verify that you get the same result if you call the `str` function with the instance as a parameter.\n 2. What is the type of the instance?\n 3. What is the type of the class?\n 4. Write a function which prints out the names and values of all the custom attributes of any object that is passed in as a parameter.",
"_____no_output_____"
]
],
[
[
"import datetime\nclass Person:\n \n def __init__(self, name, surname, birthdate, address, telephone, email):\n self.name = name\n self.surname = surname\n self.birthdate = birthdate\n\n self.address = address\n self.telephone = telephone\n self.email = email\n def age(self):\n today = datetime.date.today()\n age = today.year - self.birthdate.year\n\n if today < datetime.date(today.year, self.birthdate.month, self.birthdate.day):\n age -= 1\n return age\n\nperson = Person(\n \"Kaouthar\",\n \"Bousbaa\",\n datetime.date(2002, 1, 18), # year, month, day\n \"No. 12 Short Street, Greenville\",\n \"555 456 0987\",\n \"[email protected]\"\n)",
"_____no_output_____"
]
],
[
[
"1)- When we call the __str__ method on the instance, we will see something like '<__main__.Person object at 0x9fcb2456901d0>'.\n\n2)- <class '__main__.Person'> – __main__ is Python’s name for the program we are executing.\n\n3)- <class 'type'> – The class has the type type.",
"_____no_output_____"
]
],
[
[
"def print_attributes(object):\n for i, j in object.__dict__.items():\n print(\"%s: %s\" % (i, j))",
"_____no_output_____"
]
],
[
[
"## Exercise 6:\nWrite a class for creating completely generic objects: its `__init__` function should accept any number of keyword parameters, and set them on the object as attributes with the keys as names. Write a `__str__` method for the class – the string it returns should include the name of the class and the values of all the object’s custom instance attributes.",
"_____no_output_____"
]
],
[
[
"class class_:\n def __init__(self, **kwargs):\n for i, j in kwargs.items():\n setattr(self, i, j)\n\n def __str__(self):\n attributes = [\"%s=%s\" % (i, j) for (i, j) in self.__dict__.items()]\n class_name = self.__class__.__name__\n return \"%s: %s\" % (class_name, \" \".join(attributes))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecdc1e0c5631a4b7e3aabc18b4638a32b94769aa | 142,318 | ipynb | Jupyter Notebook | 4-seaborn/.ipynb_checkpoints/Exo2-checkpoint.ipynb | JbDumaine/PythonEDA | da82fde6c4beb4d3e9e671bdcfd5cc74ccd3f50f | [
"MIT"
] | null | null | null | 4-seaborn/.ipynb_checkpoints/Exo2-checkpoint.ipynb | JbDumaine/PythonEDA | da82fde6c4beb4d3e9e671bdcfd5cc74ccd3f50f | [
"MIT"
] | null | null | null | 4-seaborn/.ipynb_checkpoints/Exo2-checkpoint.ipynb | JbDumaine/PythonEDA | da82fde6c4beb4d3e9e671bdcfd5cc74ccd3f50f | [
"MIT"
] | null | null | null | 502.890459 | 56,440 | 0.940703 | [
[
[
"Exo 2 :",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"dataSalesPredictions=pd.read_csv('sales_predictions.csv')\ndataSalesPredictions.head()",
"_____no_output_____"
],
[
"sns.relplot(x=dataSalesPredictions[\"item_price\"],y=dataSalesPredictions[\"date\"],data=dataSalesPredictions)",
"_____no_output_____"
],
[
"newDSP = dataSalesPredictions.sample(50)",
"_____no_output_____"
],
[
"sns.relplot(x=newDSP[\"item_price\"],y=newDSP[\"date\"],data=newDSP)",
"_____no_output_____"
],
[
"a4_dims = (11.7, 8.27)\nfig, ax = plt.subplots(figsize=a4_dims)\nsns.scatterplot(x=newDSP[\"item_price\"],y=newDSP[\"date\"],ax=ax,data=newDSP)",
"_____no_output_____"
],
[
"newDSP[\"date\"] = pd.to_datetime(newDSP[\"date\"], format=\"%m/%d/%Y, %H:%M:%S\")\nsns.scatterplot(x=newDSP[\"item_price\"],y=newDSP[\"date\"],ax=ax,data=newDSP)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdc252b830627d92ef21ba28907457c1802b578 | 38,031 | ipynb | Jupyter Notebook | hokkaido.ipynb | geneasyura/cov19-hm | 1dba1b0198ab90c263fd75cd1d423131388bb836 | [
"Apache-2.0"
] | 1 | 2020-12-13T14:17:48.000Z | 2020-12-13T14:17:48.000Z | hokkaido.ipynb | geneasyura/cov19-hm | 1dba1b0198ab90c263fd75cd1d423131388bb836 | [
"Apache-2.0"
] | null | null | null | hokkaido.ipynb | geneasyura/cov19-hm | 1dba1b0198ab90c263fd75cd1d423131388bb836 | [
"Apache-2.0"
] | null | null | null | 31.327018 | 112 | 0.518551 | [
[
[
"#!/usr/bin/python3\n# coding: utf-8\n# Hokkaido",
"_____no_output_____"
],
[
"import codecs\nfrom datetime import datetime as dt\nfrom datetime import timedelta as td\nfrom jma_csvdl import save_jma_data, parse_jma_csv\nimport json\nimport matplotlib\nimport sys\nif \"ipy\" not in sys.argv[0]:\n matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom sklearn.neighbors import LocalOutlierFactor\nimport numpy as np\nimport os\nimport pandas as pd\nimport plotly\nimport plotly.express as px\nimport plotly.tools as tls\nimport plotly.graph_objects as go\nimport plotly.io as pio\nimport plotly.offline as offline\nfrom plotly.subplots import make_subplots\nimport sys\nif \"ipy\" in sys.argv[0]:\n offline.init_notebook_mode()\nfrom PIL import Image\nfrom cov19utils import create_basic_plot_figure, \\\n show_and_clear, moving_average, \\\n blank2zero, csv2array, \\\n get_twitter, tweet_with_image, \\\n get_gpr_predict, FONT_NAME, DT_OFFSET, \\\n download_if_needed, json2nparr, code2int, age2int, \\\n get_populations, get_os_idx_of_arr, dump_val_in_arr, \\\n calc_last1w2w_dif, create_basic_scatter_figure, \\\n show_and_save_plotly, make_exp_fit_graph, save_plotly_in_en\nfrom hokkaidomap import sub_prefs, get_sub_code, get_hokkaido, \\\n make_hokkaido_choropleth, make_hokkaido_plotly\nfrom urllib.request import urlretrieve",
"_____no_output_____"
],
[
"if dt.now().weekday() != 5:\n print(\"Today is not Saturday.\")\n if not \"ipy\" in sys.argv[0]:\n sys.exit()",
"_____no_output_____"
],
[
"if dt.now().hour < 18:\n print(\"before 6 pm.\")\n if not \"ipy\" in sys.argv[0]:\n sys.exit()",
"_____no_output_____"
],
[
"pref_day_file = \"hokkaido_01shinkoukyoku_day.csv\"\nu='https://www.harp.lg.jp/opendata/api/package_show?id=752c577e-0cbe-46e0-bebd-eb47b71b38bf'",
"_____no_output_____"
],
[
"urlretrieve(u, 'hokkaido-api.json')",
"_____no_output_____"
],
[
"f=codecs.open(\"hokkaido-api.json\", encoding='utf-8')\ntry:\n x=json.load(f)\nexcept:\n print(\"decode error: file is invalid.\")\n if \"ipy\" in sys.argv[0]:\n pass#exit()\n else:\n sys.exit()\nf.close()",
"_____no_output_____"
],
[
"#x",
"_____no_output_____"
],
[
"with open(\"hokkaido.prev.tmp\", \"rt\") as f:\n prev = f.read().rstrip()\n\nfor i in x['result']['resources']:\n if i['filename'] == pref_day_file:\n updated = i['updated'][:16]\n break\n\nprint(updated, prev)\nif updated == prev:\n print(\"maybe the same data, nothing to do.\")\n if \"ipy\" in sys.argv[0]:\n pass#exit()\n else:\n sys.exit()",
"_____no_output_____"
],
[
"today_str = dt.now().isoformat()[:16].replace('T', ' ')\n# 北海道 OpenData を参照する\n# https://www.harp.lg.jp/opendata/dataset/1369.html\n# 北海道は面積が広いため、振興局別に集計を行う\nbase_uri = \"https://www.harp.lg.jp/opendata/dataset/1369/resource/3883/\"\ndownload_if_needed(base_uri, pref_day_file)\nbase_uri = \"https://www.harp.lg.jp/opendata/dataset/1369/resource/2853/\"\ndata_file = \"covid19_data.csv\"\ndownload_if_needed(base_uri, data_file)",
"_____no_output_____"
],
[
"df = pd.read_csv(pref_day_file, encoding='shift-jis')\nprint(\"Total: {}\".format(len(df)))",
"_____no_output_____"
],
[
"# Pandas DataFrame を作成する\ndf['道外+非公表'] = df['道外他'] + df['非公表']\ndf['年月日'] = pd.to_datetime(df['年月日'], format=\"%Y-%m-%d\")\n#df",
"_____no_output_____"
],
[
"# Daily の新規感染者を集計する\ndaily_new = df\n# duration\nfrm_date = df.at[0, '年月日']\nend_date = df.at[len(df)-1, '年月日']\nprint(\"From: {} To: {}\".format(frm_date, end_date))",
"_____no_output_____"
],
[
"# moving average by week\nave_mov_days = 7\n# 移動平均を算出する\nmov_mean = moving_average(df['日計'])",
"_____no_output_____"
],
[
"# 4 weeks later\nxbins = df['年月日'].to_list()\ndays2pred = 2 * ave_mov_days # 2 weeks\n# 2週間先の日付列を取得する\ntwo_weeks_later = pd.date_range(end_date + td(days=1), end_date + td(days=days2pred)).to_pydatetime()\nxbins_pred = xbins.copy()\nxbins_pred.extend(two_weeks_later)\n#print(xbins_pred)\nX = np.arange(0, len(df['年月日']))[:, np.newaxis]\nX_pred = np.arange(0, len(xbins_pred))[:, np.newaxis]",
"_____no_output_____"
],
[
"y_gpr = get_gpr_predict(X, df['日計'].to_list(), X_pred, 10, 10, 10)",
"_____no_output_____"
],
[
"#X",
"_____no_output_____"
],
[
"save_jma_data(\"sapporo-jma.csv\", city_code=\"s47412\")\nweather_sapporo = parse_jma_csv(\"sapporo-jma.csv\")\nprint(\"Loaded {} data.\".format(len(weather_sapporo)))\ndf_weather = pd.DataFrame(weather_sapporo, columns=['Date', 'Temp', 'RH', 'VP', 'AP', 'AH', 'Fd'])\nfig = px.scatter(df_weather, x='Date', y=['AH'])\nfig.update_layout(template='plotly_dark')\nif \"ipy\" in sys.argv[0]:\n fig.show()",
"_____no_output_____"
],
[
"np_weather = np.array(weather_sapporo)",
"_____no_output_____"
],
[
"#np_weather[:, 0]",
"_____no_output_____"
],
[
"fig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(go.Scatter(\n x=xbins, y=df['日計'], mode='markers', name='新規',\n marker=dict(size=4)), secondary_y=False)\nfig.add_trace(go.Bar(\n x=xbins, y=mov_mean, name='7日移動平均', opacity=0.6),\n secondary_y=False)\nfig.add_trace(go.Scatter(\n x=xbins_pred, y=y_gpr, mode='lines', name='予測',\n line=dict(width=1)), secondary_y=False)\nfig.add_trace(go.Scatter(\n x=np_weather[:, 0], # 日付\n y=moving_average(np_weather[:, 1]), # 気温\n name=\"札幌平均気温\",\n line=dict(width=1)), secondary_y=True)\nfig.update_layout(\n xaxis=dict(title='日付', type='date',\n dtick=1209600000.0, tickformat=\"%_m/%-d\",\n range=(xbins[4], xbins_pred[-1])),\n yaxis=dict(title='新規感染者数', type=\"log\"),\n yaxis2=dict(title='札幌平均気温の移動平均'),\n title='北海道 新型コロナ 新規感染者数/札幌平均気温({})'.format(today_str),\n)\nshow_and_save_plotly(fig, \"hokkaido.jpg\", js=False, show=False)",
"_____no_output_____"
],
[
"today_str = dt.now().isoformat()[:19].replace('T', ' ')\ntw_body = \"北海道 新型コロナ 新規感染者数/気温(\" + today_str + \" 時点)\"\ntw_body += \" https://geneasyura.github.io/cov19-hm/hokkaido.html \"\ntw_body += \"(気象業務法第13~24条に接触するため、予報を含まない前日までの気温を表示) \"\ntw = get_twitter()\ntweet_with_image(tw, \"docs/images/hokkaido.jpg\", tw_body)",
"_____no_output_____"
],
[
"tw_body = 'Hokkaido Daily new confirmed COVID-19 cases; last updated on ' + today_str\nfig['layout']['title']['text'] = tw_body\nfig['layout']['xaxis']['title']['text'] = 'date'\nfig['layout']['yaxis']['title']['text'] = 'new cases [log]'\nfig['layout']['yaxis2']['title']['text'] = \"air temperature [℃]\"\nfig['data'][0]['name'] = 'cases'\nfig['data'][1]['name'] = 'rolling'\nfig['data'][2]['name'] = 'filtered'\nfig['data'][3]['name'] = 'temperature'\nfig.update_layout(width=1000, height=700)\nsave_plotly_in_en(fig, \"hokkaido.jpg\")",
"_____no_output_____"
],
[
"tw_body += \" (air temperature values are shown in the rolling 7-day average) \"\ntweet_with_image(tw, \"docs/images/en/hokkaido.jpg\", tw_body)",
"_____no_output_____"
],
[
"fig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(go.Scatter(\n x=xbins, y=daily_new, mode='markers', name='新規',\n marker=dict(size=4)), secondary_y=False)\nfig.add_trace(go.Bar(\n x=xbins, y=mov_mean, name='7日移動平均', opacity=0.6),\n secondary_y=False)\nfig.add_trace(go.Scatter(\n x=xbins_pred, y=y_gpr, mode='lines', name='予測',\n line=dict(width=1)), secondary_y=False)\nfig.add_trace(go.Scatter(\n x=np_weather[:, 0], # 日付\n y=moving_average(np_weather[:, 5]), # 絶対湿度\n name=\"絶対湿度\",\n line=dict(width=1)), secondary_y=True)\nfig.update_layout(\n xaxis=dict(title='日付', type='date',\n dtick=1209600000.0, tickformat=\"%_m/%-d\",\n range=(xbins[4], xbins_pred[-1])),\n yaxis=dict(title='新規感染者数', type=\"log\"),\n yaxis2=dict(title='札幌平均容積絶対湿度 [g/㎥] 移動平均'),\n title='北海道 新型コロナ 新規感染者数/札幌絶対湿度({})'.format(today_str),\n)\nshow_and_save_plotly(fig, \"hokkaido-ah.jpg\", js=False, show=False)",
"_____no_output_____"
],
[
"today_str = dt.now().isoformat()[:19].replace('T', ' ')\ntw_body = \"北海道 新型コロナ 新規感染者数/絶対湿度(\" + today_str + \" 時点)\"\ntw_body += \" https://geneasyura.github.io/cov19-hm/hokkaido.html \"\ntw_body += \"(気象業務法第13~24条に接触するため、予報を含まない前日までの絶対湿度を表示) \"\n#tw = get_twitter()\ntweet_with_image(tw, \"docs/images/hokkaido-ah.jpg\", tw_body)",
"_____no_output_____"
],
[
"tw_body = 'Hokkaido Daily new confirmed COVID-19 cases; last updated on ' + today_str\nfig['layout']['title']['text'] = tw_body\nfig['layout']['xaxis']['title']['text'] = 'date'\nfig['layout']['yaxis']['title']['text'] = 'new cases [log]'\nfig['layout']['yaxis2']['title']['text'] = \"volumetric humidity [g/㎥]\"\nfig['data'][0]['name'] = 'cases'\nfig['data'][1]['name'] = 'rolling'\nfig['data'][2]['name'] = 'filtered'\nfig['data'][3]['name'] = 'VH [g/㎥]'\nfig.update_layout(width=1000, height=700)\nsave_plotly_in_en(fig, \"hokkaido-ah.jpg\")",
"_____no_output_____"
],
[
"tw_body += \" (volumetric humidity values are shown in the rolling 7-day average) \"\ntweet_with_image(tw, \"docs/images/en/hokkaido-ah.jpg\", tw_body)",
"_____no_output_____"
],
[
"fig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(go.Scatter(\n x=xbins, y=daily_new, mode='markers', name='新規',\n marker=dict(size=4)), secondary_y=False)\nfig.add_trace(go.Bar(\n x=xbins, y=mov_mean, name='7日移動平均', opacity=0.6),\n secondary_y=False)\nfig.add_trace(go.Scatter(\n x=xbins_pred, y=y_gpr, mode='lines', name='予測',\n line=dict(width=1)), secondary_y=False)\nfig.add_trace(go.Scatter(\n x=np_weather[:, 0], # 日付\n y=np_weather[:, 6], # 絶対湿度\n name=\"空気抵抗力\",\n line=dict(width=1)), secondary_y=True)\nfig.update_layout(\n xaxis=dict(title='日付', type='date',\n dtick=1209600000.0, tickformat=\"%_m/%-d\",\n range=(xbins[4], xbins_pred[-1])),\n yaxis=dict(title='人数', type=\"log\"),\n yaxis2=dict(title='ウィルスに働く空気抵抗力'),\n title='北海道 新型コロナ 新規感染者数/空気抵抗力({})'.format(today_str),\n)\nshow_and_save_plotly(fig, \"hokkaido-fd.jpg\", js=False, show=False)",
"_____no_output_____"
],
[
"if False:\n tw_body = \"北海道 新型コロナ 新規感染者数/空気抵抗力(\" + today_str + \" 時点)\"\n tw_body += \" https://geneasyura.github.io/cov19-hm/hokkaido.html \"\n tw_body += \"(ウィルス微粒子に働く空気抵抗力をプロット、低いほど活発になるはず)\"\n #tw = get_twitter()\n tweet_with_image(tw, \"docs/images/hokkaido-fd.jpg\", tw_body)\n #tw_body",
"_____no_output_____"
],
[
"cov19data = []\n# CSVデータを整形する\nwith codecs.open(data_file, encoding=\"shift-jis\") as f:\n l = f.readline() # 先頭行をスキップ\n while l:\n l = f.readline().replace(\"\\r\\n\", \"\")\n arr = l.split(',')\n if len(arr) == 25:\n tracked = blank2zero(arr[20])\n untracked = blank2zero(arr[21])\n unknown_rate = (untracked / max(1.0, (tracked + untracked))) * 100.0\n #print(tracked, untracked, unknown_rate)\n cov19data.append(\n [\n dt(int(arr[1]), int(arr[2]), int(arr[3])), # date\n blank2zero(arr[4]), # tests\n blank2zero(arr[6]), # positive\n blank2zero(arr[19]), # postive rate [%]\n tracked, # tracked 濃厚接触\n untracked, # untracked 濃厚接触以外\n unknown_rate # 経路不明率\n ]\n )\n\nprint(\"Total: {}\".format(len(cov19data)))",
"_____no_output_____"
],
[
"pos_rate_np = np.array(cov19data)\n#print(\"{}\".format(pos_rate_np))",
"_____no_output_____"
],
[
"xbins = pos_rate_np[:, 0]\ntwo_weeks_later = pd.date_range(xbins[-1] + td(days=1), xbins[-1] + td(days=days2pred)).to_pydatetime()\nxbins_pred = xbins.tolist()\nxbins_pred.extend(two_weeks_later)\nX = np.arange(0, len(pos_rate_np[:, 0]))[:, np.newaxis]\nX_pred = np.arange(0, len(xbins_pred))[:, np.newaxis]\n\ny_test = get_gpr_predict(X, pos_rate_np[:, 1], X_pred, 10, 10, 10)\ny_rate = get_gpr_predict(X, pos_rate_np[:, 3], X_pred, 10, 10, 10)\ny_unkn = get_gpr_predict(X, pos_rate_np[:, 6], X_pred, 10, 10, 10)",
"_____no_output_____"
],
[
"fig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(go.Scatter(x=xbins, y=pos_rate_np[:, 1], mode='markers', name='検査人数',\n marker=dict(size=4)), secondary_y=False)\nfig.add_trace(go.Bar(x=xbins, y=moving_average(pos_rate_np[:, 1]), name='移動平均', opacity=0.5), \n secondary_y=False)\nfig.add_trace(go.Scatter(x=xbins_pred, y=y_test, mode='lines', name='予測値',\n line=dict(width=1)), secondary_y=False)\nfig.add_trace(go.Bar(x=xbins, y=pos_rate_np[:, 3], name=\"陽性率[%]\", opacity=0.5),\n secondary_y=True)\nfig.add_trace(go.Scatter(x=xbins_pred, y=y_rate, name=\"予測値\",\n line=dict(width=1)), secondary_y=True)\nfig.update_layout(\n xaxis=dict(title='日付', type='date',\n dtick=1209600000.0, tickformat=\"%_m/%-d\",\n range=[xbins_pred[30], xbins_pred[-1]]),\n #yaxis=dict(title='人数', range=[0, np.max(pos_rate_np[:, 1])]),\n yaxis=dict(title='人数', type=\"log\"),\n yaxis2=dict(title='陽性率[%]', range=[0, np.max(pos_rate_np[:, 3])]),\n title='北海道 新型コロナ 検査人数/陽性率({})'.format(today_str),\n)\nshow_and_save_plotly(fig, \"hokkaido-rate.jpg\", js=False, show=False)",
"_____no_output_____"
],
[
"tw_body = \"北海道 新型コロナ 検査人数/陽性率(\" + today_str + \" 時点)\"\ntw_body += \" https://geneasyura.github.io/cov19-hm/hokkaido.html \"\ntweet_with_image(tw, \"docs/images/hokkaido-rate.jpg\", tw_body)",
"_____no_output_____"
],
[
"tw_body = 'Hokkaido Daily new COVID-19 tests and positive rates;'\ntw_body += \"<br> last updated on \" + today_str\nfig['layout']['title']['text'] = tw_body\nfig['layout']['xaxis']['title']['text'] = 'date'\nfig['layout']['yaxis']['title']['text'] = 'tests [log]'\nfig['layout']['yaxis2']['title']['text'] = 'positive rate [%]'\n\nfig['data'][0]['name'] = 'tests'\nfig['data'][1]['name'] = 'rolling'\nfig['data'][2]['name'] = 'filtered'\nfig['data'][3]['name'] = 'positive rates'\nfig['data'][4]['name'] = 'filtered'\nfig.update_layout(width=1000, height=700)\nsave_plotly_in_en(fig, \"hokkaido-rate.jpg\")",
"_____no_output_____"
],
[
"tweet_with_image(tw, \"docs/images/en/hokkaido-rate.jpg\", tw_body.replace('<br>', ''))",
"_____no_output_____"
],
[
"fig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(go.Scatter(x=xbins, y=pos_rate_np[:, 1], mode='markers', name='検査人数',\n marker=dict(size=4)), secondary_y=False)\nfig.add_trace(go.Bar(x=xbins, y=moving_average(pos_rate_np[:, 1]), name='移動平均', opacity=0.5), \n secondary_y=False)\nfig.add_trace(go.Scatter(x=xbins_pred, y=y_test, mode='lines', name='予測値',\n line=dict(width=1)), secondary_y=False)\nfig.add_trace(go.Bar(x=xbins, y=pos_rate_np[:, 6], name=\"経路不明率[%]\", opacity=0.5),\n secondary_y=True)\nfig.add_trace(go.Scatter(x=xbins_pred, y=y_unkn, name=\"予測値\",\n line=dict(width=1)), secondary_y=True)\nfig.update_layout(\n xaxis=dict(title='日付', type='date',\n dtick=1209600000.0, tickformat=\"%_m/%-d\",\n range=[xbins_pred[30], xbins_pred[-1]]),\n yaxis=dict(title='人数', range=[0, np.max(pos_rate_np[:, 1])]),\n yaxis2=dict(title='経路不明率[%]', range=[0, np.max(pos_rate_np[:, 6])]),\n title='北海道 新型コロナ 検査人数/経路不明率({})'.format(today_str),\n)\nshow_and_save_plotly(fig, \"hokkaido-unknown.jpg\", js=False, show=False)",
"_____no_output_____"
],
[
"today_str = dt.now().isoformat()[:19].replace('T', ' ')\ntw_body = \"北海道 新型コロナ 検査人数/経路不明率(\" + today_str + \" 時点)\"\ntw_body += \" https://geneasyura.github.io/cov19-hm/hokkaido.html \"\ntweet_with_image(tw, \"docs/images/hokkaido-unknown.jpg\", tw_body)",
"_____no_output_____"
],
[
"# 振興局別感染者数\nsub_poss = daily_new.sum()\n#print(\"Sub-pref Pos: \\n{}\".format(sub_poss))\npnames = [\"道外+非公表\", \"石狩\", \"渡島\", \"檜山\", \"後志\", \"空知\", \"上川\",\n\"留萌\", \"宗谷\", \"オホーツク\", \"胆振\", \"日高\", \"十勝\", \"釧路\", \"根室\"]\nvals_cases = [sub_poss[p] for p in pnames]\n#vals_cases",
"_____no_output_____"
],
[
"tw_body = \"北海道 新型コロナ 振興局別 罹患率[全期間] (\" + today_str + \")\"\nimgname = \"hokkaido-all.jpg\"\nvals = np.zeros(len(sub_prefs.keys()), dtype=float)\nfor i in range(len(vals_cases)):\n vals[i] = (vals_cases[i] / sub_prefs[i]['total']) * 100.0\nmake_hokkaido_choropleth(imgname, tw_body, vals, show=False)",
"_____no_output_____"
],
[
"tw_body += \" https://geneasyura.github.io/cov19-hm/hokkaido-hm.html \"\ntweet_with_image(tw, \"docs/images/{}\".format(imgname), tw_body)",
"_____no_output_____"
],
[
"tw_body = \"北海道 新型コロナ 振興局別 陽性者数[全期間] (\" + today_str + \")\"\nimgname = \"hokkaido-all-n.jpg\"\nmake_hokkaido_choropleth(imgname, tw_body, vals_cases, show=False)",
"_____no_output_____"
],
[
"#tw_body += \" https://geneasyura.github.io/cov19-hm/hokkaido.html \"\n#tweet_with_image(tw, \"docs/images/{}\".format(imgname), tw_body)",
"_____no_output_____"
],
[
"dt_last1w = end_date - td(7)\ndt_last2w = end_date - td(14)\nprint(end_date, dt_last1w, dt_last2w)",
"_____no_output_____"
],
[
"df = daily_new",
"_____no_output_____"
],
[
"# 直近1週間\ndf_last1w = df[df['年月日'] > dt_last1w]\nsub_last1w = df_last1w.sum()\n#sub_last1w",
"_____no_output_____"
],
[
"# 直近2週間\ndf_last2w = df[df['年月日'] > dt_last2w]\nsub_last2w = df_last2w.sum()\n#sub_last2w",
"_____no_output_____"
],
[
"# 直近1週間陽性者数\nsub_pos_last1w = np.zeros(len(pnames))\nfor i in range(len(pnames)):\n sub_pos_last1w[i] = sub_last1w[pnames[i]]\nprint(sub_pos_last1w.astype(int))\n# 直近2週間陽性者数\nsub_pos_last2w = np.zeros(len(pnames))\nfor i in range(len(pnames)):\n sub_pos_last2w[i] = sub_last2w[pnames[i]]\nprint(sub_pos_last2w.astype(int))",
"_____no_output_____"
],
[
"# 計算式は「(直近7日間の新規陽性者数/その前7日間の新規陽性者数)^(平均世代時間/報告間隔)」\nagt = 5 # 平均世代時間\nri = 7 # 報告間隔\nmin_smpl = 5 # 最低サンプリング数\nRt = []\nfor i in np.arange(len(sub_pos_last1w)):\n div = (sub_pos_last2w[i] - sub_pos_last1w[i])\n if div == 0 and sub_pos_last1w[i] > 0:\n Rt.append(1.0) # 0から増加した場合は他地域からの流入\n elif div == 0:\n Rt.append(0.0) # 0を維持\n elif sub_pos_last2w[i] < min_smpl and sub_pos_last1w[i] < min_smpl:\n Rt.append(1) # サンプリングが少ない場合、1 と仮定\n else:\n r = (sub_pos_last1w[i] / div) ** (agt / ri)\n Rt.append(r)\nprint(Rt)",
"_____no_output_____"
],
[
"tw_body = \"北海道 新型コロナ 振興局別 実効再生産数[簡易計算] (\" + today_str + \")\"\nimgname = \"hokkaido-Rt.jpg\"\nmake_hokkaido_choropleth(imgname, tw_body, Rt, show=False)\n#make_hokkaido_plotly(imgname, tw_body, Rt)\ntw_body += \" https://geneasyura.github.io/cov19-hm/hokkaido-hm.html \"\ntweet_with_image(tw, \"docs/images/{}\".format(imgname), tw_body)",
"_____no_output_____"
],
[
"tw_body = \"北海道 新型コロナ 振興局別 直近2週間罹患率 (\" + today_str + \")\"\nimgname = \"hokkaido-2w.jpg\"\nvals = []\nfor k, v in sub_prefs.items():\n vals.append((sub_pos_last2w[k] / v['total']) * 100.0)\n#print(vals)\nmake_hokkaido_choropleth(imgname, tw_body, vals, show=False)\n#make_hokkaido_plotly(imgname, tw_body, vals)\ntw_body += \" https://geneasyura.github.io/cov19-hm/hokkaido-hm.html \"\ntweet_with_image(tw, \"docs/images/{}\".format(imgname), tw_body)",
"_____no_output_____"
],
[
"if False:\n title = '北海道 新型コロナ 新規感染者数/指数近似 (' + today_str + ')'\n xos = 300\n make_exp_fit_graph(tw,\n pos_rate_np[xos:,0], pos_rate_np[xos:, 2],\n title, \"hokkaido-fit.jpg\",\n \"hokkaido-doubling-time.html\", \"hokkaido-fit.html\")",
"_____no_output_____"
],
[
"if False:\n title = '北海道 新型コロナ 新規移動平均/指数近似 (' + today_str + ')'\n xos = 265\n xbins = np.array([i.to_pydatetime() for i in daily_new.index.tolist()])\n ybins = np.array(daily_new.rolling(ave_mov_days).mean().to_list())\n make_exp_fit_graph(tw,\n xbins[xos:], ybins[xos:],\n title, \"hokkaido-fit-ave.jpg\",\n \"hokkaido-doubling-time-ave.html\", \"hokkaido-fit.html\")",
"_____no_output_____"
],
[
"# 新規感染者数 移動平均\n#mv_mean = daily_new.rolling(ave_mov_days).mean()\nmv_mean = mov_mean\n# 札幌平均気温 移動平均\nx_mv = moving_average(np_weather[:, 1])\n# 札幌絶対湿度 移動平均\ny_mv = moving_average(np_weather[:, 5])\n# 札幌 移動平均\nz_mv = moving_average(np_weather[:, 2])\n# 気象情報 日付\nweather_dates = np_weather[:, 0]\n#print(weather_dates[0], weather_dates[-1])",
"_____no_output_____"
],
[
"#xbins",
"_____no_output_____"
],
[
"# 詰め替え\nsizes = []\nxbins = []\nybins = []\nzbins = []\ncolors = []\ntexts = []\ndofs = 7 # = 報告日 - 感染日\nall_days = df['年月日'].to_list()\nmv_max = np.max(mv_mean)\nfor i in range(len(all_days)):\n v = mv_mean[i]\n ts = all_days[i]\n if ts < dt(2021, 1, 1, 0, 0, 0, 0): continue\n if ts in weather_dates:\n i = int(np.where(weather_dates == ts)[0]) - dofs\n ts_str = \"{}\".format(ts - td(days=dofs))\n msg = \"%s =%d v:%.2f t:%.2f vh:%.2f f:%.2f\" % \\\n (ts_str[:10], i, v, x_mv[i], y_mv[i], z_mv[i])\n xbins.append(x_mv[i]);\n ybins.append(y_mv[i]);\n zbins.append(z_mv[i]);\n colors.append(v); sizes.append(max(5,v*10/mv_max))\n texts.append(\"%s: %.1f人\" % (ts_str[:10], v)); \n #print(msg)",
"_____no_output_____"
],
[
"def gen_hokkaido_rel_graph(x, y, xtype, ytype, xlabel, ylabel, imgname):\n fig = go.Figure()\n fig.add_trace(go.Scatter(\n mode='markers', x=x, y=y, text=texts,\n marker=dict(\n opacity=0.85, size=sizes, color=colors,\n colorscale=[[0, 'rgb(59, 70, 222)'],\n [.3, 'rgb(255, 255, 255)'],\n [1, 'rgb(178, 10, 28)']],\n colorbar_title = '人数移動平均')))\n fig.update_layout(template='plotly_dark')\n tw_body = '北海道(札幌) {}/{}と感染日 ('.format(xtype, ytype) + \\\n today_str[:13] + '時)'\n fig.update_layout(\n height=500, width=500, margin=dict(l=12, r=5, b=12, t=42),\n xaxis =dict(domain=[0, 1], showgrid=True, title=xlabel),\n yaxis =dict(domain=[0, 1], title=ylabel),\n title=tw_body,\n showlegend=False)\n show_and_save_plotly(fig, imgname, js=False)\n if False:\n tw_body += \" (2020/5/25以降) \"\n tw_body += \" https://geneasyura.github.io/cov19-hm/{} \".format(\"hokkaido-trh-tvh.html\")\n tweet_with_image(tw, \"docs/images/{}\".format(imgname), tw_body)",
"_____no_output_____"
],
[
"gen_hokkaido_rel_graph(\n xbins, ybins, \"気温\", \"絶対湿度\", \n '札幌 平均気温 移動平均 [℃]',\n '札幌 容積絶対湿度 移動平均 [g/㎥]',\n \"hokkaido-tvh.jpg\")",
"_____no_output_____"
],
[
"gen_hokkaido_rel_graph(\n xbins, zbins, \"気温\", \"相対湿度\", \n '札幌 平均気温 移動平均 [℃]',\n '札幌 相対湿度 移動平均 [%RH]',\n \"hokkaido-trh.jpg\")",
"_____no_output_____"
],
[
"def gen_hokkaido_rel_contour(x, y, xtype, ytype, xlabel, ylabel, imgname):\n fig = go.Figure()\n fig.add_trace(go.Contour(\n x=x, y=y, z=colors, text=texts,\n colorbar_title='感染人数',\n contours=dict(\n coloring='heatmap',\n showlabels=True,\n labelfont=dict(size=12,color='white')\n )))\n fig.update_layout(template='plotly_dark')\n\n fig.add_trace(go.Scatter(\n mode='markers', x=x, y=y, text=texts,\n marker_line_width=1, marker_line_color=\"black\",\n marker=dict(\n opacity=0.8, size=sizes, color=colors,\n colorscale=[[0, 'rgb(0, 255, 0)'],\n [.4, 'rgb(128, 128, 0)'],\n [1, 'rgb(255, 0, 0)']])))\n #colorbar_title = '人数移動平均')))\n\n tw_body = '北海道(札幌) 感染日の{}/{} ('.format(xtype, ytype) + \\\n today_str[:13] + '時)'\n fig.update_layout(\n height=500, width=500, margin=dict(l=12, r=5, b=12, t=42),\n xaxis=dict(domain=[0, 1], showgrid=False, title=xlabel,\n range=[min(x), max(x)]),\n yaxis=dict(domain=[0, 1], showgrid=False, title=ylabel,\n range=[min(y), max(y)]),\n title=tw_body,\n showlegend=False)\n show_and_save_plotly(fig, imgname, js=False)\n if False:\n tw_body += \" (2020/5/25以降) \"\n tw_body += \" https://geneasyura.github.io/cov19-hm/{} \".format(\"hokkaido-trh-tvh.html\")\n tweet_with_image(tw, \"docs/images/{}\".format(imgname), tw_body)\n return fig",
"_____no_output_____"
],
[
"fig = gen_hokkaido_rel_contour(\n xbins, ybins, \"気温\", \"絶対湿度\", \n '札幌 平均気温 移動平均 [℃]',\n '札幌 容積絶対湿度 移動平均 [g/㎥]',\n \"hokkaido-tvh-contour.jpg\")",
"_____no_output_____"
],
[
"fig['layout']['width'] = 800\nfig['layout']['height'] = 800\ntw_body = 'Hokkaido (Sapporo) Daily new confirmed COVID-19 cases '\nfig['layout']['title']['text'] = tw_body\nfig['layout']['xaxis']['title']['text'] = 'air temperature in Sapporo [℃]'\nfig['layout']['yaxis']['title']['text'] = 'volumetric humidity in Sapporo [g/㎥]'\nfig['data'][0]['colorbar']['title']['text'] = 'cases'\nsave_plotly_in_en(fig, \"hokkaido-tvh-contour.jpg\")",
"_____no_output_____"
],
[
"if False:\n tw_body += \" (plotted values are shown in the rolling 7-day average) \"\n tweet_with_image(tw, \"docs/images/en/hokkaido-tvh-contour.jpg\", tw_body)",
"_____no_output_____"
],
[
"fig = gen_hokkaido_rel_contour(\n xbins, zbins, \"気温\", \"相対湿度\", \n '札幌 平均気温 移動平均 [℃]',\n '札幌 相対湿度 移動平均 [%RH]',\n \"hokkaido-trh-contour.jpg\")",
"_____no_output_____"
],
[
"fig['layout']['width'] = 800\nfig['layout']['height'] = 800\ntw_body = 'Hokkaido (Sapporo) Daily new confirmed COVID-19 cases '\nfig['layout']['title']['text'] = tw_body\nfig['layout']['xaxis']['title']['text'] = 'air temperature in Sapporo [℃]'\nfig['layout']['yaxis']['title']['text'] = 'relative humidity in Sapporo [%RH]'\nfig['data'][0]['colorbar']['title']['text'] = 'cases'\nsave_plotly_in_en(fig, \"hokkaido-trh-contour.jpg\")",
"_____no_output_____"
],
[
"if False:\n tw_body += \" (plotted values are shown in the rolling 7-day average) \"\n tweet_with_image(tw, \"docs/images/en/hokkaido-trh-contour.jpg\", tw_body)",
"_____no_output_____"
],
[
"with open(\"hokkaido.prev.tmp\", \"wt\") as f:\n f.write(updated)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdc2bb22ecebcd34eaf157b69a15eb01d75335b | 264,710 | ipynb | Jupyter Notebook | Visualizzazione-FCM.ipynb | ritafolisi/Tirocinio | c9a14ac33ab20c3c6524d32de4634f93ece001fb | [
"CC-BY-4.0"
] | null | null | null | Visualizzazione-FCM.ipynb | ritafolisi/Tirocinio | c9a14ac33ab20c3c6524d32de4634f93ece001fb | [
"CC-BY-4.0"
] | null | null | null | Visualizzazione-FCM.ipynb | ritafolisi/Tirocinio | c9a14ac33ab20c3c6524d32de4634f93ece001fb | [
"CC-BY-4.0"
] | null | null | null | 422.859425 | 52,304 | 0.940097 | [
[
[
"import pandas as pd\nimport numpy as np\nimport sklearn as sk\nimport os\nimport sys\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"sys.path.append('/home/alessia/Tirocinio/Fuzzy-C')",
"_____no_output_____"
],
[
"from FCM import *",
"_____no_output_____"
]
],
[
[
"# Setosa",
"_____no_output_____"
]
],
[
[
"dataset=pd.read_csv(\"Dataset/iris-setosa.csv\")\nX = dataset[[\"sepal_length\", \"sepal_width\"]].values\ny = dataset[\"species\"].values\n#print(\"features:\\n\", X, \"\\nLabels:\\n \", y)",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler\niris_values_std = StandardScaler().fit_transform(X)\n\nfrom sklearn.decomposition import PCA\npca_2d = PCA(n_components=2)\niris_values_2d = pca_2d.fit_transform(iris_values_std)\n#print(iris_values_2d)",
"_____no_output_____"
],
[
"#stampa\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\ndef gr_dataset():\n plt.figure(figsize=(7,7))\n plt.xticks(np.arange(-4, 4, 1))\n for lab, col in zip((1, 0),\n ('blue', 'red')):\n plt.scatter(iris_values_2d[y==lab, 0],\n iris_values_2d[y==lab, 1],\n label=lab,\n c=col)\n\ngr_dataset()",
"_____no_output_____"
],
[
"def gr_membership_contour(estimated_membership):\n x = np.arange(-4, 4, .1)\n y = np.arange(-4, 4, .1)\n X, Y = np.meshgrid(x, y)\n #print(Y)\n zs = np.array([estimated_membership(np.array(list(zip(np.ravel(x), np.ravel(y)))), 2, centers, 2)\n for x,y in zip(np.ravel(X), np.ravel(Y))])\n zs1 = []\n zs2 = []\n for i in range(0, len(zs)):\n zs1.append(zs[i][0][0])\n zs2.append(zs[i][0][1])\n Z = np.array(zs1).reshape(X.shape)\n \n membership_contour = plt.contour(X, Y, Z,\n levels=(.1, .3, .5, .55, .6, .78, .95), colors='k')\n #print(membership_contour)\n plt.clabel(membership_contour, inline=1)",
"_____no_output_____"
],
[
"def generator(m):\n return (-4 + np.random.random(2*m) * 8).reshape((m, 2))",
"_____no_output_____"
],
[
"xTrain, xTest, yTrain, yTest = train_test_split(iris_values_2d,y, test_size=80)\nmodel = FCM()\n\ntrain_membership, centers = model.fuzzy_train(xTrain , 2 , 2)",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\ngr_dataset()\ngr_membership_contour(model.fuzzy_predict)\nplt.savefig(\"setosacurve.png\")\n#plt.show()",
"_____no_output_____"
],
[
"x = np.arange(-4, 4, .1)\ny = np.arange(-4, 4, .1)\nX, Y = np.meshgrid(x, y)\n#print(Y)\nzs = np.array([model.fuzzy_predict(np.array(list(zip(np.ravel(x), np.ravel(y)))), 2, centers, 2)\n for x,y in zip(np.ravel(X), np.ravel(Y))])\nzs1 = []\nzs2 = []\nfor i in range(0, len(zs)):\n zs1.append(zs[i][0][0])\n zs2.append(zs[i][0][1])\nZ = np.array(zs1).reshape(X.shape)\n#plt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.clf()\nplt.imshow(Z, extent=[-4, 4, -4, 4], origin='lower')\nplt.xticks(np.arange(-4,4,1))\nplt.yticks(np.arange(-4,4,1))\nplt.colorbar()\n#plt.show()\nplt.savefig(\"setosahm.png\")",
"_____no_output_____"
]
],
[
[
"# Versicolor",
"_____no_output_____"
]
],
[
[
"dataset=pd.read_csv(\"Dataset/iris-versicolor.csv\")\nX = dataset[[\"sepal_length\", \"sepal_width\"]].values\ny = dataset[\"species\"].values\n#print(\"features:\\n\", X, \"\\nLabels:\\n \", y)",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler\niris_values_std = StandardScaler().fit_transform(X)\n\nfrom sklearn.decomposition import PCA\npca_2d = PCA(n_components=2)\niris_values_2d = pca_2d.fit_transform(iris_values_std)\n#print(iris_values_2d)",
"_____no_output_____"
],
[
"#stampa\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\ndef gr_dataset(): \n plt.figure(figsize=(7,7))\n for lab, col in zip((1, 0),\n ('blue', 'red')):\n plt.scatter(iris_values_2d[y==lab, 0],\n iris_values_2d[y==lab, 1],\n label=lab,\n c=col)\n\ngr_dataset()",
"_____no_output_____"
],
[
"def gr_membership_contour(estimated_membership):\n x = np.arange(-4, 4, .1)\n y = np.arange(-4, 4, .1)\n X, Y = np.meshgrid(x, y)\n #print(Y)\n zs = np.array([estimated_membership(np.array(list(zip(np.ravel(x), np.ravel(y)))), 2, centers, 2)\n for x,y in zip(np.ravel(X), np.ravel(Y))])\n zs1 = []\n zs2 = []\n for i in range(0, len(zs)):\n zs1.append(zs[i][0][0])\n zs2.append(zs[i][0][1])\n Z = np.array(zs2).reshape(X.shape)\n \n membership_contour = plt.contour(X, Y, Z,\n levels=(.1, .3, .5, .55, .6, .78, .95), colors='k')\n #print(membership_contour)\n plt.clabel(membership_contour, inline=1)\n\ndef generator(m):\n return (-4 + np.random.random(2*m) * 8).reshape((m, 2)) ",
"_____no_output_____"
],
[
"xTrain, xTest, yTrain, yTest = train_test_split(iris_values_2d,y, test_size=80)\nmodel = FCM()\n\ntrain_membership, centers = model.fuzzy_train(xTrain , 2 , 2)\nplt.figure(figsize=(7,7))\ngr_dataset()\ngr_membership_contour(model.fuzzy_predict)\n#plt.show()\nplt.savefig(\"versicolorcurve.png\")",
"_____no_output_____"
],
[
"#gr_dataset()\nx = np.arange(-4, 4, .1)\ny = np.arange(-4, 4, .1)\nX, Y = np.meshgrid(x, y)\n#print(Y)\nzs = np.array([model.fuzzy_predict(np.array(list(zip(np.ravel(x), np.ravel(y)))), 2, centers, 2)\n for x,y in zip(np.ravel(X), np.ravel(Y))])\nzs1 = []\nzs2 = []\nfor i in range(0, len(zs)):\n zs1.append(zs[i][0][0])\n zs2.append(zs[i][0][1])\nZ = np.array(zs2).reshape(X.shape)\n\nnormalized = (Z-Z.min())/(Z.max()-Z.min())\n#gr_membership_contour(clf.predict)\n\n#plt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.clf()\nplt.imshow(Z, extent=[-4, 4, -4, 4], origin='lower')\nplt.xticks(np.arange(-4,4,1))\nplt.yticks(np.arange(-4,4,1))\nplt.colorbar()\n#plt.show()\nplt.savefig(\"versicolorhm.png\")",
"_____no_output_____"
]
],
[
[
"# Virginica",
"_____no_output_____"
]
],
[
[
"dataset=pd.read_csv(\"Dataset/iris-virginica.csv\")\nX = dataset[[\"petal_length\", \"petal_width\"]].values\ny = dataset[\"species\"].values\n#print(\"features:\\n\", X, \"\\nLabels:\\n \", y)",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler\niris_values_std = StandardScaler().fit_transform(X)\n\nfrom sklearn.decomposition import PCA\npca_2d = PCA(n_components=2)\niris_values_2d = pca_2d.fit_transform(iris_values_std)\n#print(iris_values_2d)",
"_____no_output_____"
],
[
"#stampa\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\ndef gr_dataset(): \n plt.figure(figsize=(7,7))\n for lab, col in zip((1, 0),\n ('blue', 'red')):\n plt.scatter(iris_values_2d[y==lab, 0],\n iris_values_2d[y==lab, 1],\n label=lab,\n c=col)\n\ngr_dataset()",
"_____no_output_____"
],
[
"xTrain, xTest, yTrain, yTest = train_test_split(iris_values_2d,y, test_size=80)\nmodel = FCM()\n\ntrain_membership, centers = model.fuzzy_train(xTrain , 2 , 2)\nplt.figure(figsize=(7,7))\ngr_dataset()\ngr_membership_contour(model.fuzzy_predict)\n#plt.show()\nplt.savefig(\"virginicacurve.png\")",
"_____no_output_____"
],
[
"\n#gr_dataset()\nx = np.arange(-4, 4, .1)\ny = np.arange(-4, 4, .1)\nX, Y = np.meshgrid(x, y)\n#print(Y)\nzs = np.array([model.fuzzy_predict(np.array(list(zip(np.ravel(x), np.ravel(y)))), 2, centers, 2)\n for x,y in zip(np.ravel(X), np.ravel(Y))])\nzs1 = []\nzs2 = []\nfor i in range(0, len(zs)):\n zs1.append(zs[i][0][0])\n zs2.append(zs[i][0][1])\nZ = np.array(zs2).reshape(X.shape)\n\nnormalized = (Z-Z.min())/(Z.max()-Z.min())\n#gr_membership_contour(clf.predict)\n\n#plt.show()",
"_____no_output_____"
],
[
"plt.figure(figsize=(7,7))\nplt.clf()\nplt.imshow(Z, extent=[-4, 4, -4, 4], origin='lower')\nplt.xticks(np.arange(-4,4,1))\nplt.yticks(np.arange(-4,4,1))\nplt.colorbar()\n#plt.show()\nplt.savefig(\"virginicahm.png\")",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdc322a9f12b8375f4b14ae1a1f139068a7a005 | 48,751 | ipynb | Jupyter Notebook | dataxHWSp2021/HW3-4_ConvNets/student/ConvNets.ipynb | UCBerkeley-SCET/DataX-Berkeley | f912d22c838b511d3ada4ecfa3548afd80437b74 | [
"Apache-2.0"
] | 28 | 2020-06-15T23:53:36.000Z | 2022-03-19T09:27:02.000Z | dataxHWSp2021/HW3-4_ConvNets/student/ConvNets.ipynb | UCBerkeley-SCET/DataX-Berkeley | f912d22c838b511d3ada4ecfa3548afd80437b74 | [
"Apache-2.0"
] | 4 | 2020-06-24T22:20:31.000Z | 2022-02-28T01:37:36.000Z | dataxHWSp2021/HW3-4_ConvNets/student/ConvNets.ipynb | UCBerkeley-SCET/DataX-Berkeley | f912d22c838b511d3ada4ecfa3548afd80437b74 | [
"Apache-2.0"
] | 78 | 2020-06-19T09:41:01.000Z | 2022-02-05T00:13:29.000Z | 32.917623 | 532 | 0.608172 | [
[
[
"# Initialize Otter Grader\nimport otter\ngrader = otter.Notebook()",
"_____no_output_____"
]
],
[
[
"\n\n___\n\n#### NAME:\n\n#### STUDENT ID:\n___\n\n",
"_____no_output_____"
],
[
"# **HW3-4: Convolutional Neural Networks**\n**(Total 120 points)**\n\n",
"_____no_output_____"
],
[
"In this homework, you will compare the performance achieved by convolutional neural networks (CNNs) with the fully connected networks and also some shallow learning methods such as SVM in classifying the images from the CIFAR-10 dataset.",
"_____no_output_____"
],
[
"## 1. Loading and Exploring the CIFAR-10 Dataset",
"_____no_output_____"
],
[
"Run the following cell to load the required modules.",
"_____no_output_____"
]
],
[
[
"## Load the required modules\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tensorflow import keras\nfrom timeit import default_timer as timer",
"_____no_output_____"
]
],
[
[
"CIFAR is an acronym that stands for the Canadian Institute For Advanced Research. The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. This dataset was collected by Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton. Read more about this dataset [here](https://www.cs.toronto.edu/~kriz/cifar.html).\n\nThe class labels and their standard associated integer values are listed below:\n\n* 0: airplane\n* 1: car\n* 2: bird\n* 3: cat\n* 4: deer\n* 5: dog\n* 6: frog\n* 7: horse\n* 8: ship\n* 9: truck\n",
"_____no_output_____"
],
[
"Run the following cell without any modifications to load the CIFAR-10 dataset.",
"_____no_output_____"
]
],
[
[
"## Load the CIFAR-10 dataset\n(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()",
"_____no_output_____"
]
],
[
[
"Let's make sure that the number and shape of the training and test images are as described above.",
"_____no_output_____"
]
],
[
[
"## Run this cell, no need to modify\nprint('Training: x_train=%s, y_train=%s' % (x_train.shape, y_train.shape))\nprint('Test: x_test=%s, y_test=%s' % (x_test.shape, y_test.shape))",
"_____no_output_____"
]
],
[
[
"The next cell plots the first 16 images from this dataset. It is clear that the images are indeed very small compared to modern photographs; it can be challenging to see what exactly is represented in some of the images given the extremely low resolution. Check the 11th image for example.\n",
"_____no_output_____"
]
],
[
[
"# Run this cell, no need to modify\nint2label = {0: 'airplane', 1: 'car', 2: 'bird', 3: 'cat', \n 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', \n 9: 'truck'}\nfig, axs = plt.subplots(4, 4, figsize=(10, 10))\naxs = axs.flatten()\nfor i, ax in enumerate(axs):\n ax.imshow(x_train[i])\n ax.set_title('This is a %s' % int2label[y_train[i].item()])\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
],
[
[
"You might think that this low resolution is likely to limit the performance achieved by machine learning algorithms, but you should not underestimate the power of deep learning. Checkout this [leaderbord](https://paperswithcode.com/sota/image-classification-on-cifar-10) to see the performance that top-of-the-line deep learning algorithms are able to achieve on this dataset.",
"_____no_output_____"
],
[
"## 2. Data Preprocessing ",
"_____no_output_____"
],
[
"For the purpose of this homework, we pick the first 49000 training images for as training set and the last 1000 training images as the validation set. We do not touch the test set until the last part of this homework. \n\nRun the following cell to get the training, validation, and test sets and their corresponding labels.",
"_____no_output_____"
]
],
[
[
"## Run this cell, no need to modify\n\n# Valdiation set\nx_val = x_train[49000:]\ny_val = np.squeeze(y_train[49000:])\n\n# Training set\nx_train = x_train[:49000]\ny_train = np.squeeze(y_train[:49000])\n\n# Test set\nx_test = x_test\ny_test = np.squeeze(y_test)\n\nprint('Training: x_train=%s, y_train=%s' % (x_train.shape, y_train.shape))\nprint('Validation: x_val=%s, y_val=%s' % (x_val.shape, y_val.shape))\nprint('Test: x_test=%s, y_test=%s' % (x_test.shape, y_test.shape))",
"_____no_output_____"
]
],
[
[
"The pixel values for each image in the dataset are unsigned integers in the range between no color and full color, or 0 and 255. Thus, we need to convert the data type from unsigned integers to floats.\n\nFurthermore, neural networks process inputs using small weight values, and inputs with large integer values can disrupt or slow down the learning process. As such, it is a good practice to normalize the pixel values so that each pixel value has a value between 0 and 1. Dividing the pixel values by the maximum value does the job.\n\nRunning the following cell changes the data type of each pixel and normalize their value. ",
"_____no_output_____"
]
],
[
[
"# Run this cell, no need to modify\nx_train = x_train.astype('float32') / 255.0\nx_val = x_val.astype('float32') / 255.0\nx_test = x_test.astype('float32') / 255.0",
"_____no_output_____"
]
],
[
[
"## 3. Shallow Learning on CIFAR-10\n\n**(Total 40 points)**",
"_____no_output_____"
],
[
"Our ultimate goal is to have a model achieving a high accuracy on the **validation set** (why not the test set?). First, let's see how some of the models you learnt in the previous homework (core concepts) perform on this dataset. In case they achieve a high validation accuracy, then there is no need to bother ourselves with the neural nets.\n\nRun the following cell to load the required modules.\n\n",
"_____no_output_____"
]
],
[
[
"## Load the required modules\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC",
"_____no_output_____"
]
],
[
[
"Since the models you havve seen in the HW2 do not process images and rather use a vector of features, we first need to flatten the images in our training and valdiation sets. \n",
"_____no_output_____"
],
[
"**3.1) (5 points)** Flatten the images in the training and validation sets and store the results in `x_train_flat` and `x_val_flat`. \n\n> **Note:** The shape of `x_train_flat` and `x_val_flat` should be (49000, 3072) and (1000, 3072), respectively.\n\n<!--\nBEGIN QUESTION\nname: q31\nmanual: false\npoints: 5\n-->",
"_____no_output_____"
]
],
[
[
"## Your code here\nx_train_flat = ...\nx_val_flat = ...",
"_____no_output_____"
],
[
"grader.check(\"q31\")",
"_____no_output_____"
]
],
[
[
"\nCurrently, we have 49000 training data points each with 3072 features. In order to be able to run some of the algorithms in HW2 on this dataset in a reasonable amount of time, we need to reduce the dimensionality of the features. \n\nThere are plenty of ways to reduce the dimensionality of the problem, but here we use PCA. Do not worry if you are not familiar with this method as we have implemented it for you. If you are curious to know how PCA works, check [this](https://en.wikipedia.org/wiki/Principal_component_analysis#:~:text=Principal%20component%20analysis%20(PCA)%20is,components%20and%20ignoring%20the%20rest.) out. \n",
"_____no_output_____"
]
],
[
[
"## Run this cell, no need to modify\ncombined = np.vstack((x_train_flat, x_val_flat))\npca = PCA().fit(combined)\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.xlabel('number of components')\nplt.ylabel('cumulative explained variance');",
"_____no_output_____"
]
],
[
[
"The plot that you observe above depicts how much variance in the data is retained for different choices of the number of principal components. For example, by reducing the number of components to 50 (from 3072), we retain 84.3% of the variance in the data. Run the following cell for different values of `n` to further explore the plot above. ",
"_____no_output_____"
]
],
[
[
"## Run this cell for different values of n\nn = 50\nprint('By reducing the number of components to %d, \\\nwe retain %s percent of the variance in the data.' \\\n% (n, np.round(100 * pca.explained_variance_ratio_.cumsum()[n-1], 1)))",
"_____no_output_____"
]
],
[
[
"Choose how many principal components you want to keep and store it in the variable `your_n`. Then, run the following cell to get the low dimensional training and validation sets (`x_train_reduced` and `x_val_reduced`).",
"_____no_output_____"
]
],
[
[
"## Your code here\nyour_n = ...\n\n## Do not modify the following lines\npca = PCA(n_components=your_n) \npca.fit(combined)\ntransformed = pca.transform(combined)\nx_train_reduced = transformed[:49000]\nx_val_reduced = transformed[49000:]",
"_____no_output_____"
]
],
[
[
"**3.2) (35 points)** Train a kernel SVM using the low dimensional training set that achieves a reasonable training and validation accuracy. You will receive credit according to the following scheme:\n\n > Full credit if **99% $<$ training accuracy** and **50% $<$ validation accuracy**.\n\n > 15 points if **99% $<$ training accuracy** and **45% $<$ validation accuracy $\\leq$ 50%**.\n\n > 0 points otherwise. \n\n**Make sure you follow these instructions:**\n\n* You should already be familiar with sklearn function [SVC](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html#sklearn.svm.SVC) from part 5 of HW2. You need to set the following options/hyperparameters in SVC: `C`, `kernel`, `degree`, `gamma`, and `max_iter`.\n\n* The variables `x_train_3` and `y_train_3` contain the training set and its labels, respectively. And `x_val_3`, and `y_val_3` contain the validation set and its labels, respectively. \n\n * Although the number of features is significantly reduced (depending on your choice of the number of principal components), it could still take quite a bit of time to train the SVM model on the whole training set. You may want to choose a subset of your training data and their corresponding labels and assign them to `x_train_3` and `y_train_3`, respectively. Set `n_train` to choose the first `n_train` training data points.\n > For example, you may choose to set `n_train = 1000` in which case you are using the first 1000 training data points and their corresponding labels to train your model.\n\n * You must not modify neither of `x_val_3` nor `y_val_3`. \n\n* Note that the number of components you chose to keep in the previous cell, `your_n`, is important for both the training time and the validation accuracy of your model.",
"_____no_output_____"
],
[
"The test \"q32a\" checks if you acheive >99% training accuracy and >45% validation accuracy.\n\n<!--\nBEGIN QUESTION\nname: q32a\nmanual: false\npoints: 15\n-->",
"_____no_output_____"
]
],
[
[
"## Your code here\nC = ...\nkernel = ...\ndegree = ...\ngamma = ...\nmax_iter = ...\nn_train = ...\n\n## Do not modify the following lines\nx_train_3 = x_train_reduced[:n_train] \ny_train_3 = y_train[:n_train] \nx_val_3 = x_val_reduced\ny_val_3 = y_val\n\ntime_start = timer()\nsvm_model = SVC(C=C, kernel=kernel, degree=degree, gamma=gamma, max_iter=max_iter)\nsvm_model.fit(x_train_3, y_train_3)\ntime_end = timer()\nprint (\"Wall time for training the model: {0} second\".format(time_end-time_start))\n\ntime_start = timer()\ntrain_acc = np.mean(svm_model.predict(x_train_3) == y_train_3)\nprint('Training Accuracy = {0:f}'.format(train_acc))\nval_acc = np.mean(svm_model.predict(x_val_3) == y_val_3)\nprint('Validation Accuracy = {0:f}'.format(val_acc))\ntime_end = timer()\nprint (\"Wall time for computing the training and validation accuracies: {0} second\".format(time_end-time_start))",
"_____no_output_____"
],
[
"grader.check(\"q32a\")",
"_____no_output_____"
]
],
[
[
"The test \"q32b\" checks if you acheive >99% training accuracy and >50% validation accuracy.\n\n<!--\nBEGIN QUESTION\nname: q32b\nmanual: false\npoints: 20\n-->",
"_____no_output_____"
]
],
[
[
"## Dummy Cell, DO NOT MODIFY",
"_____no_output_____"
],
[
"grader.check(\"q32b\")",
"_____no_output_____"
]
],
[
[
"Note that a random classifier would achieve an accuracy of about 10% on the CIFAR-10 dataset. The 50% accuracy (although it's the validation accuracy, not the test accuracy) we achieved using SVM is a big step up from a random classifier, but still it is far from being ideal. \n\nYou may also pick another model from HW2, train it the way we trained the SVM model above, and see if you could achieve a higher accuracy.",
"_____no_output_____"
],
[
"## 4. Fully Connected Neural Networks on CIFAR-10\n**(Total 40 points)**",
"_____no_output_____"
],
[
"In this part, you will train a fully connected neural network on CIFAR-10 dataset with the hope of getting a higher validation accuracy than the one you obtained using an SVM model.\n\nRun the cell below to load the required modules.",
"_____no_output_____"
]
],
[
[
"## Load the required modules\nfrom keras.utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import BatchNormalization\nfrom keras.layers import Dropout\nfrom keras import initializers",
"_____no_output_____"
]
],
[
[
"\n\nFirst of all, make sure you study the neural networks [module](https://datax.berkeley.edu/wp-content/uploads/2020/09/NN-.pdf) in the course website and go through the corresponding jupyter notebook to understand how neural nets, specifically fully connected neural nets, work. To exercise your knowledge, you may either work on the neural nets homework on the course webpage, or check the [NeuralNet](https://github.com/scetx/datax/tree/master/dataxHWSp2021/HW3-4_NeuralNet/student) homework that we have recently released.\n\nBefore you start building and training your own model, as a demo, let's train a simple fully connected neural net with only one hidden layer on the CIFAR-10 dataset. \n\nLike the SVM model, we need to flatten the images before feeding them into the fully connected neural network. Note that this throws away the information about the 2D structure of the image. Also, since we will be using the [categorical_crossentropy](https://keras.io/api/losses/probabilistic_losses/#categoricalcrossentropy-class) as the model's loss function, we need to encode different image classes using [one-hot](https://en.wikipedia.org/wiki/One-hot) encoding. \n\n\n\n",
"_____no_output_____"
],
[
"**4.1) (5 points)** Store the training and validation sets and their labels in the variables `x_train_4`, `y_train_4`, `x_val_4`, and `y_val_4`. Note that we have already flattened the images in the previous part. Use keras function [to_categorical](https://www.tensorflow.org/api_docs/python/tf/keras/utils/to_categorical) to encode your training and validation labels. \n\n<!--\nBEGIN QUESTION\nname: q41\nmanual: false\npoints: 5\n-->",
"_____no_output_____"
]
],
[
[
"## Your code here\n\n# Training set\nx_train_4 = ...\ny_train_4 = ...\n\n# Validation set\nx_val_4 = ...\ny_val_4 = ...",
"_____no_output_____"
],
[
"grader.check(\"q41\")",
"_____no_output_____"
]
],
[
[
"We start by creating a sequential model using keras [Sequential](https://keras.io/guides/sequential_model/) class. This allows us to build neural nets like legos, by adding one layer on top of the other, and swapping layers. Note that a sequential model is appropriate for a plain stack of layers where each layer has exactly one input tensor and one output tensor. Such model is not appropriate when:\n\n* Your model has multiple inputs or multiple outputs.\n* Any of your layers has multiple inputs or multiple outputs.\n* Your model requires layer sharing.\n* You want to model non-linear topology (e.g. a residual connection, a multi-branch model).",
"_____no_output_____"
]
],
[
[
"## Run this cell, no need to modify\nfc_demo = Sequential()",
"_____no_output_____"
]
],
[
[
"Now, we can import layer classes and stack layers by using `fc_demo.add()`. Model needs to know what input shape it should expect. For this reason, the first layer in a sequential model needs to receive information about its input shape. \n\nWe let the hidden layer have 1000 neurons and activate them by relu function. To know more about the syntax, consult with the [Dense layer](https://keras.io/api/layers/core_layers/dense/) API.",
"_____no_output_____"
]
],
[
[
"## Run this cell, no need to modify\nfc_demo.add(Dense(units=1000, activation='relu', \\\n input_shape=(3072, ), name='hidden'))\nfc_demo.add(Dense(units=10, activation='softmax', name='output'))",
"_____no_output_____"
]
],
[
[
"Let's review the summary of our model.",
"_____no_output_____"
]
],
[
[
"## Run this cell, no need to modify\nfc_demo.summary() ",
"_____no_output_____"
]
],
[
[
"As we mentioned at the beginning of this homework, the images in CIFAR-10 dataset are indeed very small compared to modern photographs, yet as you can see, in a simple fully connected neural network with only one hidden layer, we have about 3 million trainable paramteres. You can imagine how this number would grow if we wanted to work with high rsolution images and have a network consisting of several hidden layers. \n\nBefore training a model, you need to configure the learning process, which is done via the [compile](https://keras.io/api/models/model_training_apis/) method `.compile()`. `.compile` receives at least the following three arguments:\n\n* 1) A [loss](https://keras.io/api/losses/) function - This is the objective that the model will try to minimize. It can be the string identifier of an existing loss function (such as `categorical_crossentropy` or `mse`), or it can be an objective function.\n* 2) An [optimizer](https://keras.io/api/optimizers/) - This could be the string identifier of an existing optimizer (such as `rmsprop`, `gradientdescent`, or `adam`), or an instance of the Optimizer class.\n* 3) A list of [metrics](https://keras.io/api/metrics/) (optional) - For any classification problem you will want to set this to `metrics=['accuracy']`. A metric could be the string identifier of an existing metric or a custom metric function.\n\n\n\n",
"_____no_output_____"
],
[
"We use the `categorical_crossentropy` as our loss function, and [SGD](https://keras.io/api/optimizers/sgd/) as our optimizer. ",
"_____no_output_____"
]
],
[
[
"## Run this cell, no need to modify\nfc_demo.compile(loss='categorical_crossentropy',\n optimizer=keras.optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True),\n metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"Now that we have created our model and configured its learning process, we need to use the [fit](https://keras.io/api/models/model_training_apis/) method `.fit()` to train the model. ",
"_____no_output_____"
]
],
[
[
"## Run this cell, no need to modify\nfc_demo.fit(x=x_train_4, \n y=y_train_4, \n epochs=2, \n validation_data=(x_val_4, y_val_4),\n batch_size=32)\n\n_, train_acc = fc_demo.evaluate(x_train_4, y_train_4, verbose=0)\nprint('Training Accuracy = {0:f}'.format(train_acc))\n_, val_acc = fc_demo.evaluate(x_val_4, y_val_4, verbose=0)\nprint('Validation Accuracy = {0:f}'.format(val_acc))",
"_____no_output_____"
]
],
[
[
"The training and validation accuracies are relatively low. Some potential reasons could be our training process (the optimizer is not tuned, or the number of epochs is small) or the simple structure of our network. ",
"_____no_output_____"
],
[
"**4.2) (35 points)** Train your own fully connected neural network that achieves a reasonable training and validation accuracy. You will receive credit according to the following scheme:\n\n > Full credit if **50% $<$ training accuracy** and **50% $<$ validation accuracy**.\n\n > 15 points if **50% $<$ training accuracy** and **45% $<$ validation accuracy $\\leq$ 50%**.\n\n > 0 points otherwise. \n\n**Make sure you follow these instructions:**\n\n* You must use `x_train_4`, `y_train_4`, `x_val_4`, and `y_val_4` to train and validate your model. You must not modify any of them. So, you will be training your model on the whole training set as we did with our demo model. \n\n* A sequential model named `fc_model` is created below. As shown above, you need to add your desired layers to this model using the add method `.add()`. \n\n* You can only use dense layers, batchnormalization layers, and dropout layers to build your model. You may want to leverage [Batchnormalization](https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization?version=nightly) to accelerate the training process and [Dropout](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dropout) to get a higher validation accuracy.\n\n > If you are not familiar with these techniques, check the [NeuralNet](https://github.com/scetx/datax/tree/master/dataxHWSp2021/HW3-4_NeuralNet/student) homework that we have recently released.\n\n* The way you [initialize](https://keras.io/api/layers/initializers/) the layer weights is also important in the learning process.\n\n* You need to choose your own optimizer `fc_optimizer` as well the number of training epochs `n_epochs` and the batch size `batch_size`.",
"_____no_output_____"
],
[
"The test \"q42a\" checks if you achieve >50% training accuracy and >45% validation accuracy.\n\n<!--\nBEGIN QUESTION\nname: q42a\nmanual: false\npoints: 15\n-->",
"_____no_output_____"
]
],
[
[
"fc_model = Sequential()\n\n## Your code here\nfc_optimizer = ...\nn_epochs = ...\nbatch_size = ...\n\n## Do not modify the following cells\nfc_model.compile(loss='categorical_crossentropy',\n optimizer=fc_optimizer,\n metrics=['accuracy'])\n\nhistory = fc_model.fit(x=x_train_4, \n y=y_train_4, \n epochs=n_epochs, \n validation_data=(x_val_4, y_val_4),\n batch_size=batch_size)\n\nfig, [ax1, ax2] = plt.subplots(2, 1, figsize=(5, 5))\n# plot loss\nax1.set_title('Cross Entropy Loss')\nax1.plot(np.arange(n_epochs) + 1, history.history['loss'], color='blue', label='training')\nax1.plot(np.arange(n_epochs) + 1, history.history['val_loss'], color='orange', label='validation')\nax1.legend()\n# plot accuracy\nax2.set_title('Classification Accuracy')\nax2.set_xlabel('Epoch')\nax2.plot(np.arange(n_epochs) + 1, history.history['accuracy'], color='blue', label='training')\nax2.plot(np.arange(n_epochs) + 1, history.history['val_accuracy'], color='orange', label='validation')\nax2.legend()\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
],
[
"grader.check(\"q42a\")",
"_____no_output_____"
]
],
[
[
"The test \"q42b\" checks if you acheive >50% training accuracy and >50% validation accuracy.\n\n<!--\nBEGIN QUESTION\nname: q42b\nmanual: false\npoints: 20\n-->\n",
"_____no_output_____"
]
],
[
[
"## Dummy Cell, DO NOT MODIFY",
"_____no_output_____"
],
[
"grader.check(\"q42b\")",
"_____no_output_____"
]
],
[
[
"Compare the performance of `fc_model` and `svm_model`. Which one of these models would achieve a higher test accuracy? (You don't need to write any answers.) We will compare their performance on the test set in the last part of this homework. ",
"_____no_output_____"
],
[
"## 5. Convolutional Neural Networks on CIFAR-10\n\n**(total 40 points)**",
"_____no_output_____"
],
[
"With the limitations that shallow learning methods have and the fact that fully connected neural networks are not appropriate to process images (huge number of parameters, lack of the ability to exploit the spatial information in the images, etc.), convolutional neural networks are the to-go model for most of the tasks in computer vision. In this part, you will train a convolutional neural network that achieves a relatively high validation accuracy on the CIFAR-10 dataset.\n\nRun the following cell to load the required modules.",
"_____no_output_____"
]
],
[
[
"## Load the required modules \nfrom keras.layers import Flatten\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import AveragePooling2D",
"_____no_output_____"
]
],
[
[
"Check the convolutional neural networks [module](https://datax.berkeley.edu/wp-content/uploads/2020/09/slides-m430-convolutional-neural-networks.pdf) to understand how conv nets work. To get more familiar with this architecture and pick up some coding skills you may take a look at the two corresponding jupyter notebooks as well. \n\nSince conv nets use the 2D/3D structure of the image, we do not need to flatten the images before feeding them into these networks. Moreover, due to the relatively small number of trainable parameters in each filter, conv nets allow us to go much deeper (in terms of the number of layers) than the fully connected networks.\n\nThe training and validation sets and their corresponding labels are given to you below. Note that we again need to encode different image classes using [one-hot](https://en.wikipedia.org/wiki/One-hot) encoding. \n> Make sure you pass the test \"q41\" before running the cell below. ",
"_____no_output_____"
]
],
[
[
"## Run this cell, no need to modify \n\n# Training set\nx_train_5 = x_train\ny_train_5 = y_train_4\n\n# Validation set\nx_val_5 = x_val \ny_val_5 = y_val_4",
"_____no_output_____"
]
],
[
[
"As a demo, we build and train a simple conv net on our dataset. Same as the previous part, we start by creating a sequential model and then use the add method to import different layers into it and stack them on top of each other. \n\n",
"_____no_output_____"
]
],
[
[
"## Run this cell, no need to modify\ncnn_demo = Sequential()",
"_____no_output_____"
]
],
[
[
"Convolutional and pooling layers are the main building blocks of a conv net. We implement convolutional layers using [Conv2D](https://keras.io/api/layers/convolution_layers/convolution2d/). \n> Make sure you understand the concepts of stride and padding. \n\nPooling layers are used to subsample the input image to reduce computational load, memory usage, and number of prameters. Pooling layers also require size, stride and padding type, but unlike convolutional layers, neurons in pooling layers do not have weights. We use [MaxPooling2D](https://keras.io/api/layers/pooling_layers/max_pooling2d/) and [AveragePooling2d](https://keras.io/api/layers/pooling_layers/average_pooling2d/) to implement these layers. \n\nAfter using several blocks of convolutional and pooling layers, we get something small enough that we can flatten and feed into a standard fully connected layer. \n\nThe architecture that we have chosen for `cnn_demo` is quite similar to LeNet-5, the architecture that Yann LeCun, Leon Bottou, Yosuha Bengio and Patrick Haffner proposed for handwritten character recognition in 1990’s.",
"_____no_output_____"
]
],
[
[
"# Run this cell, no need to modify\ncnn_demo.add(Conv2D(filters=6, kernel_size=(3, 3), padding='same', \\\n activation='relu', input_shape=(32,32,3))) # First layer\ncnn_demo.add(AveragePooling2D()) # Second layer\ncnn_demo.add(Conv2D(filters=16, kernel_size=(5, 5), \\\n activation='relu')) # Third layer\ncnn_demo.add(AveragePooling2D()) # Fourth layer\ncnn_demo.add(Flatten()) \ncnn_demo.add(Dense(units=100, activation='relu')) # Fifth layer\ncnn_demo.add(Dense(units=10, activation = 'softmax')) # Output layer",
"_____no_output_____"
]
],
[
[
"Let's review the summary of our model.",
"_____no_output_____"
]
],
[
[
"cnn_demo.summary()",
"_____no_output_____"
]
],
[
[
"Note the difference between the number of trainable parameters in the network above and the simple fully connected network (`fc_demo`) we built in the previous part. Even though `fc_demo` has only one hidden layer, it has considerably more parameters than the 6-layer conv net above. \n\nNow, we need to configure the learning process and then train our model using the compile and fit methods, respectively. ",
"_____no_output_____"
]
],
[
[
"# Run this cell, no need to modify\ncnn_demo.compile(loss = 'categorical_crossentropy',\n optimizer = keras.optimizers.SGD(lr=0.001, momentum = 0.9, nesterov=True),\n metrics = ['accuracy'])\n\ncnn_demo.fit(x=x_train_5, \n y=y_train_5, \n epochs=2, \n validation_data=(x_val_5, y_val_5),\n batch_size=32)\n\n_, train_acc = cnn_demo.evaluate(x_train_5, y_train_5, verbose=0)\nprint('Training Accuracy = {0:f}'.format(train_acc))\n_, val_acc = cnn_demo.evaluate(x_val_5, y_val_5, verbose=0)\nprint('Validation Accuracy = {0:f}'.format(val_acc))",
"_____no_output_____"
]
],
[
[
"Like the previous demo, the training and validation accuracies are relatively low. You need to try different architectures and fine-tune some of the hyperparameters to get outstanding results.",
"_____no_output_____"
],
[
"**5.1) (40 points)** Train your own convolutional neural network that achieves a high training and validation accuracy. You will receive credit according to the following scheme:\n\n > Full credit if **70% $<$ training accuracy** and **70% $<$ validation accuracy**.\n\n > 15 points if **70% $<$ training accuracy** and **65% $<$ validation accuracy $\\leq$ 70%**.\n\n > 0 points otherwise. \n\n**Make sure you follow these instructions:**\n\n* You must use `x_train_5`, `y_train_5`, `x_val_5`, and `y_val_5` to train and validate your model. You must not modify any of them. So, you will be training your model on the whole training set as we did with our demo model. \n\n* A sequential model named `cnn_model` is created below. You need to add your desired layers to this model using the add method `.add()`. \n\n* You can only use the following layers: Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, BatchNormalization, and Dropout. \n\n* The way you [initialize](https://keras.io/api/layers/initializers/) the layer weights is also important in the learning process.\n\n* You need to choose your own optimizer `cnn_optimizer` as well the number of training epochs `n_epochs` and the batch size `batch_size`.",
"_____no_output_____"
],
[
"The test \"q51a\" checks if you acheive >70% training accuracy and >65% validation accuracy.\n\n<!--\nBEGIN QUESTION\nname: q51a\nmanual: false\npoints: 15\n-->",
"_____no_output_____"
]
],
[
[
"cnn_model = Sequential()\n\n## Your code here\ncnn_optimizer = ...\nn_epochs = ...\nbatch_size = ...\n\n## Do not modify the following cells\ncnn_model.compile(loss='categorical_crossentropy',\n optimizer=cnn_optimizer,\n metrics=['accuracy'])\n\nhistory = cnn_model.fit(x=x_train_5, \n y=y_train_5, \n epochs=n_epochs, \n validation_data=(x_val_5, y_val_5),\n batch_size=batch_size)\n\nfig, [ax1, ax2] = plt.subplots(2, 1, figsize=(5, 5))\n# plot loss\nax1.set_title('Cross Entropy Loss')\nax1.plot(np.arange(n_epochs) + 1, history.history['loss'], color='blue', label='training')\nax1.plot(np.arange(n_epochs) + 1, history.history['val_loss'], color='orange', label='validation')\nax1.legend()\n# plot accuracy\nax2.set_title('Classification Accuracy')\nax2.set_xlabel('Epoch')\nax2.plot(np.arange(n_epochs) + 1, history.history['accuracy'], color='blue', label='training')\nax2.plot(np.arange(n_epochs) + 1, history.history['val_accuracy'], color='orange', label='validation')\nax2.legend()\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
],
[
"grader.check(\"q51a\")",
"_____no_output_____"
]
],
[
[
"The test \"q51b\" checks if you acheive >70% training accuracy and >70% validation accuracy.\n\n<!--\nBEGIN QUESTION\nname: q51b\nmanual: false\npoints: 25\n-->\n",
"_____no_output_____"
]
],
[
[
"## Dummy Cell, DO NOT MODIFY",
"_____no_output_____"
],
[
"grader.check(\"q51b\")",
"_____no_output_____"
]
],
[
[
"## 6. Test Accuracy",
"_____no_output_____"
],
[
"At the beginning of this homework, we put a test set aside as we would like to test our models' performance on a totally unseen dataset. As we fine-tune the hyper paramteres in our models and change their structure to achieve a higher validation accuracy, the models gain access to some information from the validation set, and therefore it should not be interpreted as an unseen dataset at all.\n\nRun the cell below to find out the accuracy of the different models you trained above on the test set.",
"_____no_output_____"
]
],
[
[
"## Run this cell, no need to modify\nsvm_test_acc = np.mean(svm_model.predict(pca.transform(x_test.reshape((x_test.shape[0], -1)))) == y_test)\nprint('Test accuracy of the SVM model= {0:f}'.format(svm_test_acc))\n\nfc_test_acc = fc_model.evaluate(x_test.reshape((x_test.shape[0], -1)), to_categorical(y_test), verbose=0)[1]\nprint('Test accuracy of the fully connected neural network= {0:f}'.format(fc_test_acc))\n\ncnn_test_acc = cnn_model.evaluate(x_test, to_categorical(y_test), verbose=0)[1]\nprint('Test accuracy of the convolutional neural network= {0:f}'.format(cnn_test_acc))\n",
"_____no_output_____"
]
],
[
[
"**Keep this in mind:**\nThe way you initialize and train these models is perhaps not deterministic, unless you set all the random states (for the layers and also the optimizers). Consequently, it could be possible that you pass the tests in one run, but fail some of them in another run. \n\n",
"_____no_output_____"
],
[
"# Submit\nMake sure you have run all cells in your notebook in order before running the cell below, so that all images/graphs appear in the output.\n**Please save before submitting!**",
"_____no_output_____"
]
],
[
[
"# Save your notebook first, then run this cell to create a pdf for your reference.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
ecdc35147982e3e743452726a64341890ee33f90 | 5,888 | ipynb | Jupyter Notebook | (8) Profiling_Data.ipynb | HarryHoangNguyen/Functional_Python_for_ETL | f1671c3da20aa9bdad5fb3951837b9cab634421e | [
"MIT"
] | null | null | null | (8) Profiling_Data.ipynb | HarryHoangNguyen/Functional_Python_for_ETL | f1671c3da20aa9bdad5fb3951837b9cab634421e | [
"MIT"
] | null | null | null | (8) Profiling_Data.ipynb | HarryHoangNguyen/Functional_Python_for_ETL | f1671c3da20aa9bdad5fb3951837b9cab634421e | [
"MIT"
] | null | null | null | 28.862745 | 1,288 | 0.587976 | [
[
[
"### Import libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd \nimport numpy as np \nimport sqlalchemy as sa\nimport configparser \n\nfrom Functions.sql_processes import *",
"_____no_output_____"
]
],
[
[
"### Load the configuration file",
"_____no_output_____"
]
],
[
[
"# Load Config\nconfig = configparser.ConfigParser()\nconfig.read('Config/config.ini')",
"_____no_output_____"
]
],
[
[
"### Set the connection string",
"_____no_output_____"
]
],
[
[
"# Initialize Variables\neng_conn = config['Dev']['conn_string']",
"_____no_output_____"
]
],
[
[
"### Set the query string used for lookup",
"_____no_output_____"
]
],
[
[
"query = '''\n Select * FROM AdventureWorks2017.Sales.SalesOrderHeader\n'''",
"_____no_output_____"
]
],
[
[
"### Connect to sql and execute the query, returning results to dataframe",
"_____no_output_____"
]
],
[
[
"df = pd.read_sql_query(query, eng_conn)",
"_____no_output_____"
]
],
[
[
"### Get row counts",
"_____no_output_____"
]
],
[
[
"df.count()",
"_____no_output_____"
]
],
[
[
"### Get the number of blank/null values",
"_____no_output_____"
]
],
[
[
"df.isnull().sum()",
"_____no_output_____"
]
],
[
[
"### Create a column for PaymentMethod. Setting Cash if CreditCardID is null and Card if value exists",
"_____no_output_____"
]
],
[
[
"df['PaymentMethod'] = np.where(df['CreditCardID'].isnull(), 'Cash', 'Card')",
"_____no_output_____"
]
],
[
[
"### Output the top 10 results",
"_____no_output_____"
]
],
[
[
"print(df.head(10))",
"_____no_output_____"
]
],
[
[
"### Start here for Breweries data Profile (Live coding, what could go wrong!)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ecdc3c159281d653dba89c7b6cfc07ee58d49fe2 | 16,662 | ipynb | Jupyter Notebook | BDMI/WEEK5/sqlite-Tkinter-stan.ipynb | jessie-233/python-exercise | 097f60978d316a7380e85686e0403cdf1ac482e9 | [
"MIT"
] | null | null | null | BDMI/WEEK5/sqlite-Tkinter-stan.ipynb | jessie-233/python-exercise | 097f60978d316a7380e85686e0403cdf1ac482e9 | [
"MIT"
] | 1 | 2020-10-01T13:52:54.000Z | 2020-10-01T13:54:37.000Z | BDMI/WEEK5/sqlite-Tkinter-stan.ipynb | jessie-233/python-exercise | 097f60978d316a7380e85686e0403cdf1ac482e9 | [
"MIT"
] | null | null | null | 38.041096 | 3,390 | 0.485716 | [
[
[
"import sqlite3\nimport random\nimport string",
"_____no_output_____"
]
],
[
[
"### 练习1\n创建 university 数据库 \\\n创建students表 \\\n 至少包括 ID,姓名,性别,年龄等必要字段,并自己为这些字段选择合适的字段类型",
"_____no_output_____"
]
],
[
[
"# 数据库文件是university.db,不存在,则自动创建\nwith sqlite3.connect('university.db') as db:\n# 创建一个cursor:\n cursor = db.cursor()",
"_____no_output_____"
],
[
"# 执行一条SQL语句:创建students表\ncursor.execute('create table if not exists students( \\\n id integer primary key, \\\n name text not null, \\\n gender text not null, \\\n age intger not null, \\\n major text not null)')\ndb.commit()",
"_____no_output_____"
]
],
[
[
"### 练习2\n增加和更新students 表的数据 \\\n至少有3名同学(周边)\n\n在university 数据库中建立class表 \\\n增加和更新表的数据\n\n至少有三门以上课程 \\\n提示,至少包括课号,课程名,教师。",
"_____no_output_____"
]
],
[
[
"# 插入数据:\ngenders = ['male', 'female']\nmajors = ['Math', 'CS', 'Finance', 'Economics']\n\nfor i in range(20):\n name = ''.join(random.sample(string.ascii_lowercase, 5))\n gender = genders[random.randint(0, 1)]\n age = random.randint(12, 24)\n major = majors[random.randint(0, 3)]\n cursor.execute('insert into students (id, name,gender,age,major) \\\n values ({},\"{}\",\"{}\",{},\"{}\")'.format(i+1, name, gender, age, major))\ndb.commit()",
"_____no_output_____"
],
[
"cursor.execute('select * from students')\nprint('---------------students--------------------')\nfor x in cursor.fetchall():\n print(x)",
"---------------students--------------------\n(1, 'tlkbp', 'male', 19, 'Math')\n(2, 'ubahs', 'female', 12, 'Math')\n(3, 'bxwld', 'male', 12, 'Finance')\n(4, 'aerod', 'female', 21, 'CS')\n(5, 'twknc', 'male', 14, 'Finance')\n(6, 'maqtz', 'female', 19, 'CS')\n(7, 'ugato', 'female', 22, 'Finance')\n(8, 'mtvpd', 'female', 19, 'CS')\n(9, 'gopzy', 'female', 14, 'Math')\n(10, 'rejts', 'male', 14, 'CS')\n(11, 'mexqs', 'female', 20, 'Economics')\n(12, 'umwlb', 'male', 24, 'Economics')\n(13, 'vebqo', 'male', 14, 'Math')\n(14, 'xulny', 'female', 14, 'Finance')\n(15, 'aqokr', 'female', 17, 'Finance')\n(16, 'rfwmg', 'female', 20, 'CS')\n(17, 'iosuj', 'female', 18, 'CS')\n(18, 'lsier', 'male', 21, 'Math')\n(19, 'prbex', 'female', 22, 'Economics')\n(20, 'wlbrv', 'male', 15, 'Finance')\n"
],
[
"# 执行一条SQL语句:创建class表\ncursor.execute('create table if not exists class( \\\n class_id integer primary key, \\\n class_name text, \\\n lecture text, \\\n credit integer )')\n\nclasses = ['Python', 'Java', 'C++', 'C', 'R', 'Go']\nlectures = ['Adam', 'Bob', 'Cyrus', 'Dan', 'Eric', 'Frank']\nCredits = [3, 2, 3, 2, 1, 1]\nfor i in range(6):\n cursor.execute('insert into class (class_id, class_name,lecture,credit) \\\n values ({},\"{}\",\"{}\",{})'.format(i+1, classes[i], lectures[i], Credits[i]))\ndb.commit()",
"_____no_output_____"
],
[
"cursor.execute('select * from class')\nprint('---------------class--------------------')\nfor x in cursor.fetchall():\n print(x)",
"---------------class--------------------\n(1, 'Python', 'Adam', 3)\n(2, 'Java', 'Bob', 2)\n(3, 'C++', 'Cyrus', 3)\n(4, 'C', 'Dan', 2)\n(5, 'R', 'Eric', 1)\n(6, 'Go', 'Frank', 1)\n"
]
],
[
[
"### 练习3\n在university 数据库中创建选课表enrolled表;\\\n至少包括:学生ID,选课的课号,考试分数 \\\n增加和更新表的数据\n",
"_____no_output_____"
]
],
[
[
"# 执行一条SQL语句:创建enrolled表\ncursor.execute('create table if not exists enrolled( \\\n student_id integer, \\\n class_id integer, \\\n credit integer, \\\n score integer, \\\n primary key(student_id,class_id) )')\n\nfor i in range(20):\n student_id = i+1\n for j in range(random.randint(1, 6)):\n class_id = j+1\n credit = Credits[j]\n score = random.randint(0, 100)\n cursor.execute('insert into enrolled (student_id,class_id, credit,score) \\\n values ({},{},{},{})'.format(student_id, class_id, credit, score))\ndb.commit()",
"_____no_output_____"
],
[
"cursor.execute('select * from enrolled')\nprint('---------------students--------------------')\nfor x in cursor.fetchall():\n print(x)",
"---------------students--------------------\n(1, 1, 3, 99)\n(1, 2, 2, 78)\n(1, 3, 3, 58)\n(1, 4, 2, 28)\n(1, 5, 1, 49)\n(1, 6, 1, 93)\n(2, 1, 3, 71)\n(2, 2, 2, 32)\n(2, 3, 3, 26)\n(2, 4, 2, 33)\n(3, 1, 3, 1)\n(3, 2, 2, 53)\n(3, 3, 3, 64)\n(3, 4, 2, 81)\n(3, 5, 1, 19)\n(3, 6, 1, 90)\n(4, 1, 3, 15)\n(4, 2, 2, 95)\n(4, 3, 3, 17)\n(4, 4, 2, 80)\n(5, 1, 3, 84)\n(6, 1, 3, 15)\n(7, 1, 3, 53)\n(7, 2, 2, 27)\n(7, 3, 3, 30)\n(8, 1, 3, 46)\n(8, 2, 2, 0)\n(9, 1, 3, 100)\n(9, 2, 2, 68)\n(9, 3, 3, 19)\n(10, 1, 3, 48)\n(10, 2, 2, 49)\n(11, 1, 3, 4)\n(11, 2, 2, 0)\n(11, 3, 3, 31)\n(11, 4, 2, 3)\n(12, 1, 3, 6)\n(12, 2, 2, 64)\n(12, 3, 3, 99)\n(13, 1, 3, 27)\n(13, 2, 2, 87)\n(14, 1, 3, 100)\n(14, 2, 2, 83)\n(15, 1, 3, 20)\n(15, 2, 2, 79)\n(15, 3, 3, 2)\n(15, 4, 2, 32)\n(15, 5, 1, 9)\n(15, 6, 1, 5)\n(16, 1, 3, 44)\n(16, 2, 2, 34)\n(16, 3, 3, 47)\n(16, 4, 2, 20)\n(16, 5, 1, 44)\n(16, 6, 1, 57)\n(17, 1, 3, 62)\n(18, 1, 3, 94)\n(18, 2, 2, 38)\n(18, 3, 3, 73)\n(18, 4, 2, 51)\n(18, 5, 1, 24)\n(18, 6, 1, 30)\n(19, 1, 3, 46)\n(20, 1, 3, 52)\n(20, 2, 2, 77)\n"
]
],
[
[
"### 练习4\n\n使用 INNER JOIN 查询学生的个人信息和每个课程的分数。",
"_____no_output_____"
]
],
[
[
"# 查询学生的个人信息及分数\ncursor.execute('''select id,name,gender,age,major,class_name,credit,score\n from students s\n inner join\n (select student_id,class_name,c.credit,score\n from class c\n inner join enrolled e\n on c.class_id = e.class_id) tc\n on s.id =tc.student_id''')\nprint('---------------inner join--------------------')\nfor x in cursor.fetchall():\n print(x)",
"---------------inner join--------------------\n(1, 'tlkbp', 'male', 19, 'Math', 'Python', 3, 99)\n(1, 'tlkbp', 'male', 19, 'Math', 'Java', 2, 78)\n(1, 'tlkbp', 'male', 19, 'Math', 'C++', 3, 58)\n(1, 'tlkbp', 'male', 19, 'Math', 'C', 2, 28)\n(1, 'tlkbp', 'male', 19, 'Math', 'R', 1, 49)\n(1, 'tlkbp', 'male', 19, 'Math', 'Go', 1, 93)\n(2, 'ubahs', 'female', 12, 'Math', 'Python', 3, 71)\n(2, 'ubahs', 'female', 12, 'Math', 'Java', 2, 32)\n(2, 'ubahs', 'female', 12, 'Math', 'C++', 3, 26)\n(2, 'ubahs', 'female', 12, 'Math', 'C', 2, 33)\n(3, 'bxwld', 'male', 12, 'Finance', 'Python', 3, 1)\n(3, 'bxwld', 'male', 12, 'Finance', 'Java', 2, 53)\n(3, 'bxwld', 'male', 12, 'Finance', 'C++', 3, 64)\n(3, 'bxwld', 'male', 12, 'Finance', 'C', 2, 81)\n(3, 'bxwld', 'male', 12, 'Finance', 'R', 1, 19)\n(3, 'bxwld', 'male', 12, 'Finance', 'Go', 1, 90)\n(4, 'aerod', 'female', 21, 'CS', 'Python', 3, 15)\n(4, 'aerod', 'female', 21, 'CS', 'Java', 2, 95)\n(4, 'aerod', 'female', 21, 'CS', 'C++', 3, 17)\n(4, 'aerod', 'female', 21, 'CS', 'C', 2, 80)\n(5, 'twknc', 'male', 14, 'Finance', 'Python', 3, 84)\n(6, 'maqtz', 'female', 19, 'CS', 'Python', 3, 15)\n(7, 'ugato', 'female', 22, 'Finance', 'Python', 3, 53)\n(7, 'ugato', 'female', 22, 'Finance', 'Java', 2, 27)\n(7, 'ugato', 'female', 22, 'Finance', 'C++', 3, 30)\n(8, 'mtvpd', 'female', 19, 'CS', 'Python', 3, 46)\n(8, 'mtvpd', 'female', 19, 'CS', 'Java', 2, 0)\n(9, 'gopzy', 'female', 14, 'Math', 'Python', 3, 100)\n(9, 'gopzy', 'female', 14, 'Math', 'Java', 2, 68)\n(9, 'gopzy', 'female', 14, 'Math', 'C++', 3, 19)\n(10, 'rejts', 'male', 14, 'CS', 'Python', 3, 48)\n(10, 'rejts', 'male', 14, 'CS', 'Java', 2, 49)\n(11, 'mexqs', 'female', 20, 'Economics', 'Python', 3, 4)\n(11, 'mexqs', 'female', 20, 'Economics', 'Java', 2, 0)\n(11, 'mexqs', 'female', 20, 'Economics', 'C++', 3, 31)\n(11, 'mexqs', 'female', 20, 'Economics', 'C', 2, 3)\n(12, 'umwlb', 'male', 24, 'Economics', 'Python', 3, 6)\n(12, 'umwlb', 'male', 24, 'Economics', 'Java', 2, 64)\n(12, 'umwlb', 'male', 24, 'Economics', 'C++', 3, 99)\n(13, 'vebqo', 'male', 14, 'Math', 'Python', 3, 27)\n(13, 'vebqo', 'male', 14, 'Math', 'Java', 2, 87)\n(14, 'xulny', 'female', 14, 'Finance', 'Python', 3, 100)\n(14, 'xulny', 'female', 14, 'Finance', 'Java', 2, 83)\n(15, 'aqokr', 'female', 17, 'Finance', 'Python', 3, 20)\n(15, 'aqokr', 'female', 17, 'Finance', 'Java', 2, 79)\n(15, 'aqokr', 'female', 17, 'Finance', 'C++', 3, 2)\n(15, 'aqokr', 'female', 17, 'Finance', 'C', 2, 32)\n(15, 'aqokr', 'female', 17, 'Finance', 'R', 1, 9)\n(15, 'aqokr', 'female', 17, 'Finance', 'Go', 1, 5)\n(16, 'rfwmg', 'female', 20, 'CS', 'Python', 3, 44)\n(16, 'rfwmg', 'female', 20, 'CS', 'Java', 2, 34)\n(16, 'rfwmg', 'female', 20, 'CS', 'C++', 3, 47)\n(16, 'rfwmg', 'female', 20, 'CS', 'C', 2, 20)\n(16, 'rfwmg', 'female', 20, 'CS', 'R', 1, 44)\n(16, 'rfwmg', 'female', 20, 'CS', 'Go', 1, 57)\n(17, 'iosuj', 'female', 18, 'CS', 'Python', 3, 62)\n(18, 'lsier', 'male', 21, 'Math', 'Python', 3, 94)\n(18, 'lsier', 'male', 21, 'Math', 'Java', 2, 38)\n(18, 'lsier', 'male', 21, 'Math', 'C++', 3, 73)\n(18, 'lsier', 'male', 21, 'Math', 'C', 2, 51)\n(18, 'lsier', 'male', 21, 'Math', 'R', 1, 24)\n(18, 'lsier', 'male', 21, 'Math', 'Go', 1, 30)\n(19, 'prbex', 'female', 22, 'Economics', 'Python', 3, 46)\n(20, 'wlbrv', 'male', 15, 'Finance', 'Python', 3, 52)\n(20, 'wlbrv', 'male', 15, 'Finance', 'Java', 2, 77)\n"
]
],
[
[
"创建视图备练习5使用",
"_____no_output_____"
]
],
[
[
"#创建视图以便查询\ncursor.execute('create view score as \\\n select id,name,class_name,credit,score \\\n from students s inner join\\\n (select student_id,class_name,c.credit,score from class c inner join enrolled e on c.class_id = e.class_id)tc \\\n on s.id =tc.student_id')\ndb.commit()",
"_____no_output_____"
],
[
"# 关闭Cursor:\ncursor.close()\n# 关闭connection:\ndb.close()",
"_____no_output_____"
]
],
[
[
"### 练习5\n结合Tkinter,做一个成绩查询的SQLite系统 \\\n提示: \\\n创建一个查询按钮,查询所有students表\n",
"_____no_output_____"
],
[
"## test_tkinter.py\n 简单的成绩查询界面",
"_____no_output_____"
]
],
[
[
"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nimport sqlite3\n\ndef query():\n id=in_id.get()\n name=in_name.get()\n course=in_co.get()\n #清空treeview\n items=tree.get_children()\n for item in items:\n tree.delete(item)\n #查询条件\n q_id = ''if id=='' else 'id='+id\n q_name = '' if name=='' else 'name=\"{}\"'.format(name)\n q1 = ' and ' if (q_id!='' and q_name!='') else ''\n q_course = '' if course=='' else 'class_name=\"{}\"'.format(course)\n q2 = ' and ' if ((q_id+q_name)!='' and q_course!='') else ''\n q = q_id+q1+q_name+q2+q_course\n q = q if q=='' else ' where '+q\n print(q)\n #执行查询,插入结果到表格\n cursor.execute('select * from score'+q)\n #cursor.execute('select * from students'+q)\n count=0 \n for item in cursor.fetchall():\n tree.insert('','end',values=item)\n count+=1\n if count==0:\n messagebox.showinfo('提醒','未查询到相关结果')\n\n# 数据库文件是university.db,不存在,则自动创建\nwith sqlite3.connect('university.db') as db:\n# 创建一个cursor:\n cursor = db.cursor()\n\n# 创建主窗口对象\nroot = Tk()\n# 设置初始化界面大小\nroot.geometry('600x400')\nroot.resizable(False, False)\n# 窗口标题\nroot.title(\"成绩查询系统\")\n\nframe1 = Frame(root)\nframe2 = Frame(root)\n \nframe1.pack(side='top',ipady=10)\nframe2.pack()\n\nlb_id=Label(frame1,text='学号').pack(side='left')\nin_id=Entry(frame1,width=10)\nin_id.pack(side='left',padx=10)\nlb_name=Label(frame1,text='姓名').pack(side='left')\nin_name=Entry(frame1,width=10)\nin_name.pack(side='left',padx=10)\nlb_co=Label(frame1,text='课程').pack(side='left')\nin_co=Entry(frame1,width=10)\nin_co.pack(side='left',padx=10)\n\nbt_query = Button(frame1,text = '查询',command = query)\nbt_query.pack(side='right',padx=10)\n\ntree=ttk.Treeview(frame2,show=\"headings\") #表格\ntree['columns']=('学号','姓名','课程','学分','成绩')\ntree.column('学号',width=100,anchor='center') #表示列,不显示\ntree.column('姓名',width=80,anchor='center')\ntree.column('课程',width=100,anchor='center')\ntree.column('学分',width=50,anchor='center') \ntree.column('成绩',width=80,anchor='center')\n\ntree.heading('学号',text='学号') #显示表头\ntree.heading('姓名',text='姓名')\ntree.heading('课程',text='课程')\ntree.heading('学分',text='学分')\ntree.heading('成绩',text='成绩')\n\n#滚动条\nvbar = ttk.Scrollbar(frame2,orient=VERTICAL,command=tree.yview)\ntree.configure(yscrollcommand=vbar.set)\nvbar.pack(side='right',fill='y')\n\nhbar = ttk.Scrollbar(frame2,orient=HORIZONTAL,command=tree.xview)\ntree.configure(xscrollcommand=hbar.set)\nhbar.pack(side='bottom',fill='x')\ntree.pack()\n\n#关闭窗口时断开数据库\ndef on_closing():\n # 关闭Cursor:\n cursor.close()\n # 关闭connection:\n db.close() \n root.destroy()\n\nroot.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\n# 加入消息循环\nroot.mainloop()",
" where id=2\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
ecdc4007c7ac17388346d6358f4762e330103049 | 19,584 | ipynb | Jupyter Notebook | datasets/switchboard-corpus/Pull_example_DA.ipynb | Gnome42/Cornell-Conversational-Analysis-Toolkit | 6b0381ee66960b596df4dda8bf52bc322ec5ad48 | [
"MIT"
] | null | null | null | datasets/switchboard-corpus/Pull_example_DA.ipynb | Gnome42/Cornell-Conversational-Analysis-Toolkit | 6b0381ee66960b596df4dda8bf52bc322ec5ad48 | [
"MIT"
] | null | null | null | datasets/switchboard-corpus/Pull_example_DA.ipynb | Gnome42/Cornell-Conversational-Analysis-Toolkit | 6b0381ee66960b596df4dda8bf52bc322ec5ad48 | [
"MIT"
] | null | null | null | 36.537313 | 133 | 0.401705 | [
[
[
"# This code pulls a sample of dialogue acts and exports an excel spreadsheet\n\nimport os\nos.chdir(\"../../\") # import convokit\nimport convokit\nimport numpy as np\nimport pandas as pd\nimport re\nfrom collections import defaultdict\n",
"_____no_output_____"
],
[
"from convokit import meta_index\nos.chdir(\"datasets/switchboard-corpus\") # then come back for swda\n\n# Download corpus\ncorpus = convokit.Corpus(filename = \"./corpus\")\n\n",
"_____no_output_____"
],
[
"# Create dictionary with metadata collapsed to utterance level\nutterance_ids = corpus.get_utterance_ids()\nconvo_ids = corpus.get_conversation_ids()\n\nrows = []\nfor uid in utterance_ids:\n # Extract conv id from uid. There's probably a simpler approach.\n conv_id = re.search(r\"\\d*(?=-)\", uid).group()\n \n # Combine relevant information from dictionaries into a row\n rows.append({**corpus.utterances[uid].meta,\n **{'text': corpus.utterances[uid].text},\n **corpus.utterances[uid].user.meta,\n **corpus.meta['metadata'][conv_id]})",
"_____no_output_____"
],
[
"# Create dataframe out of list of rows\nmetadata = pd.DataFrame(rows, index=utterance_ids)\nmetadata.head(10)",
"_____no_output_____"
],
[
"# Add a column that gives the names for each tag\n# https://web.stanford.edu/~jurafsky/ws97/manual.august1.html\n\n# Some tags are omitted from the tag_dict. See the above link for these. \ntag_dict = {'sd':'Statement-non-opinion',\n'b':'Acknowledge (Backchannel)',\n'sv':'Statement-opinion',\n'aa':'Agree/Accept',\n'%':'Abandoned or Turn-Exit',\n'ba':'Appreciation',\n'qy':'Yes-No-Question',\n'x':'Non-verbal',\n'ny':'Yes answers',\n'fc':'Conventional-closing',\n'%':'Uninterpretable',\n'qw':'Wh-Question',\n'nn':'No answers',\n'bk':'Response Acknowledgement',\n'h':'Hedge',\n'qy^d':'Declarative Yes-No-Question',\n'fo_o_fw_by_bc':'Other',\n'bh':'Backchannel in question form',\n'^q':'Quotation',\n'bf':'Summarize/reformulate',\n'na':'Affirmative non-yes answers',\n'ad':'Action-directive',\n'^2':'Collaborative Completion',\n'b^m':'Repeat-phrase',\n'qo':'Open-Question',\n'qh':'Rhetorical-Questions',\n'^h':'Hold before answer/agreement',\n'ar':'Reject',\n'ng':'Negative non-no answers',\n'br':'Signal-non-understanding',\n'no':'Other answers',\n'fp':'Conventional-opening',\n'qrr':'Or-Clause',\n'arp_nd':'Dispreferred answers',\n't3':'3rd-party-talk',\n'oo_co_cc':'Offers, Options, Commits',\n't1':'Self-talk',\n'bd':'Downplayer',\n'aap_am':'Maybe/Accept-part',\n'^g':'Tag-Question',\n'qw^d':'Declarative Wh-Question',\n'fa':'Apology',\n'ft':'Thanking',\n'+': 'Segment (multi-utterance)'}\n",
"_____no_output_____"
],
[
"# Create a new column mapping on the tag names\nmetadata['tag_name'] = metadata['tag'].map(tag_dict)",
"_____no_output_____"
],
[
"metadata = metadata[['tag_name', 'tag', 'text']]",
"_____no_output_____"
],
[
"# Number of examples of each dialogue act to pull\nsample_number = 10\n\n# First drop all tag groups that have less than 10 occurences, then sample without replacement within each remaining group. \nmetadata_filtered = metadata.groupby('tag').filter(lambda x: len(x) >= sample_number)\n\nmetadata_output = metadata_filtered.sample(frac=1).groupby('tag').head(sample_number)",
"_____no_output_____"
],
[
"metadata_output.to_excel(\"Sample_of_dialogue_acts.xlsx\")",
"_____no_output_____"
],
[
"len(metadata_output)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdc63a5e07a16bbfe0e81d08605fabc17cb1277 | 617,222 | ipynb | Jupyter Notebook | ssd7_training.ipynb | huang1ei/tc-detection-keras | d8f5d7e365d42b81039cfba538acb45fc497b860 | [
"Apache-2.0"
] | 1 | 2019-04-12T05:00:36.000Z | 2019-04-12T05:00:36.000Z | ssd7_training.ipynb | huang1ei/tc-detection-keras | d8f5d7e365d42b81039cfba538acb45fc497b860 | [
"Apache-2.0"
] | null | null | null | ssd7_training.ipynb | huang1ei/tc-detection-keras | d8f5d7e365d42b81039cfba538acb45fc497b860 | [
"Apache-2.0"
] | null | null | null | 842.049113 | 517,840 | 0.929492 | [
[
[
"# SSD7 Training Tutorial\n\nThis tutorial explains how to train an SSD7 on the Udacity road traffic datasets, and just generally how to use this SSD implementation.\n\nDisclaimer about SSD7:\nAs you will see below, training SSD7 on the aforementioned datasets yields alright results, but I'd like to emphasize that SSD7 is not a carefully optimized network architecture. The idea was just to build a low-complexity network that is fast (roughly 127 FPS or more than 3 times as fast as SSD300 on a GTX 1070) for testing purposes. Would slightly different anchor box scaling factors or a slightly different number of filters in individual convolution layers make SSD7 significantly better at similar complexity? I don't know, I haven't tried.",
"_____no_output_____"
]
],
[
[
"from keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TerminateOnNaN, CSVLogger\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom math import ceil\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom models.keras_ssd7 import build_model\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\nfrom keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\nfrom keras_layers.keras_layer_DecodeDetections import DecodeDetections\nfrom keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\n\nfrom ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\nfrom ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n\nfrom data_generator.object_detection_2d_data_generator import DataGenerator\nfrom data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\nfrom data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize\nfrom data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize\nfrom data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\n\n%matplotlib inline",
"Using TensorFlow backend.\n"
]
],
[
[
"## 1. Set the model configuration parameters\n\nThe cell below sets a number of parameters that define the model configuration. The parameters set here are being used both by the `build_model()` function that builds the model as well as further down by the constructor for the `SSDInputEncoder` object that is needed to to match ground truth and anchor boxes during the training.\n\nHere are just some comments on a few of the parameters, read the documentation for more details:\n\n* Set the height, width, and number of color channels to whatever you want the model to accept as image input. If your input images have a different size than you define as the model input here, or if your images have non-uniform size, then you must use the data generator's image transformations (resizing and/or cropping) so that your images end up having the required input size before they are fed to the model. to convert your images to the model input size during training. The SSD300 training tutorial uses the same image pre-processing and data augmentation as the original Caffe implementation, so take a look at that to see one possibility of how to deal with non-uniform-size images.\n* The number of classes is the number of positive classes in your dataset, e.g. 20 for Pascal VOC or 80 for MS COCO. Class ID 0 must always be reserved for the background class, i.e. your positive classes must have positive integers as their IDs in your dataset.\n* The `mode` argument in the `build_model()` function determines whether the model will be built with or without a `DecodeDetections` layer as its last layer. In 'training' mode, the model outputs the raw prediction tensor, while in 'inference' and 'inference_fast' modes, the raw predictions are being decoded into absolute coordinates and filtered via confidence thresholding, non-maximum suppression, and top-k filtering. The difference between latter two modes is that 'inference' uses the decoding procedure of the original Caffe implementation, while 'inference_fast' uses a faster, but possibly less accurate decoding procedure.\n* The reason why the list of scaling factors has 5 elements even though there are only 4 predictor layers in tSSD7 is that the last scaling factor is used for the second aspect-ratio-1 box of the last predictor layer. Refer to the documentation for details.\n* `build_model()` and `SSDInputEncoder` have two arguments for the anchor box aspect ratios: `aspect_ratios_global` and `aspect_ratios_per_layer`. You can use either of the two, you don't need to set both. If you use `aspect_ratios_global`, then you pass one list of aspect ratios and these aspect ratios will be used for all predictor layers. Every aspect ratio you want to include must be listed once and only once. If you use `aspect_ratios_per_layer`, then you pass a nested list containing lists of aspect ratios for each individual predictor layer. This is what the SSD300 training tutorial does. It's your design choice whether all predictor layers should use the same aspect ratios or whether you think that for your dataset, certain aspect ratios are only necessary for some predictor layers but not for others. Of course more aspect ratios means more predicted boxes, which in turn means increased computational complexity.\n* If `two_boxes_for_ar1 == True`, then each predictor layer will predict two boxes with aspect ratio one, one a bit smaller, the other one a bit larger.\n* If `clip_boxes == True`, then the anchor boxes will be clipped so that they lie entirely within the image boundaries. It is recommended not to clip the boxes. The anchor boxes form the reference frame for the localization prediction. This reference frame should be the same at every spatial position.\n* In the matching process during the training, the anchor box offsets are being divided by the variances. Leaving them at 1.0 for each of the four box coordinates means that they have no effect. Setting them to less than 1.0 spreads the imagined anchor box offset distribution for the respective box coordinate.\n* `normalize_coords` converts all coordinates from absolute coordinate to coordinates that are relative to the image height and width. This setting has no effect on the outcome of the training.",
"_____no_output_____"
]
],
[
[
"img_height = 300 # Height of the input images\nimg_width = 480 # Width of the input images\nimg_channels = 3 # Number of color channels of the input images\nintensity_mean = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.\nintensity_range = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.\nn_classes = 5 # Number of positive classes\nscales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\naspect_ratios = [0.5, 1.0, 2.0] # The list of aspect ratios for the anchor boxes\ntwo_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1\nsteps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\noffsets = None # In case you'd like to set the offsets for the anchor box grids manually; not recommended\nclip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries\nvariances = [1.0, 1.0, 1.0, 1.0] # The list of variances by which the encoded target coordinates are scaled\nnormalize_coords = True # Whether or not the model is supposed to use coordinates relative to the image size",
"_____no_output_____"
]
],
[
[
"## 2. Build or load the model\n\nYou will want to execute either of the two code cells in the subsequent two sub-sections, not both.",
"_____no_output_____"
],
[
"### 2.1 Create a new model\n\nIf you want to create a new model, this is the relevant section for you. If you want to load a previously saved model, skip ahead to section 2.2.\n\nThe code cell below does the following things:\n1. It calls the function `build_model()` to build the model.\n2. It optionally loads some weights into the model.\n3. It then compiles the model for the training. In order to do so, we're defining an optimizer (Adam) and a loss function (SSDLoss) to be passed to the `compile()` method.\n\n`SSDLoss` is a custom Keras loss function that implements the multi-task log loss for classification and smooth L1 loss for localization. `neg_pos_ratio` and `alpha` are set as in the paper.",
"_____no_output_____"
]
],
[
[
"# 1: Build the Keras model\n\nK.clear_session() # Clear previous models from memory.\n\nmodel = build_model(image_size=(img_height, img_width, img_channels),\n n_classes=n_classes,\n mode='training',\n l2_regularization=0.0005,\n scales=scales,\n aspect_ratios_global=aspect_ratios,\n aspect_ratios_per_layer=None,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n normalize_coords=normalize_coords,\n subtract_mean=intensity_mean,\n divide_by_stddev=intensity_range)\n\n# 2: Optional: Load some weights\n\n#model.load_weights('./ssd7_weights.h5', by_name=True)\n\n# 3: Instantiate an Adam optimizer and the SSD loss function and compile the model\n\nadam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n\nssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\nmodel.compile(optimizer=adam, loss=ssd_loss.compute_loss)",
"_____no_output_____"
]
],
[
[
"### 2.2 Load a saved model\n\nIf you have previously created and saved a model and would now like to load it, simply execute the next code cell. The only thing you need to do is to set the path to the saved model HDF5 file that you would like to load.\n\nThe SSD model contains custom objects: Neither the loss function, nor the anchor box or detection decoding layer types are contained in the Keras core library, so we need to provide them to the model loader.\n\nThis next code cell assumes that you want to load a model that was created in 'training' mode. If you want to load a model that was created in 'inference' or 'inference_fast' mode, you'll have to add the `DecodeDetections` or `DecodeDetectionsFast` layer type to the `custom_objects` dictionary below.",
"_____no_output_____"
]
],
[
[
"# TODO: Set the path to the `.h5` file of the model to be loaded.\nmodel_path = 'ssd7.h5'\n\n# We need to create an SSDLoss object in order to pass that to the model loader.\nssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\nK.clear_session() # Clear previous models from memory.\n\nmodel = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n 'compute_loss': ssd_loss.compute_loss})",
"_____no_output_____"
]
],
[
[
"## 3. Set up the data generators for the training\n\nThe code cells below set up data generators for the training and validation datasets to train the model. You will have to set the file paths to your dataset. Depending on the annotations format of your dataset, you might also have to switch from the CSV parser to the XML or JSON parser, or you might have to write a new parser method in the `DataGenerator` class that can handle whatever format your annotations are in. The [README](https://github.com/pierluigiferrari/ssd_keras/blob/master/README.md) of this repository provides a summary of the design of the `DataGenerator`, which should help you in case you need to write a new parser or adapt one of the existing parsers to your needs.\n\nNote that the generator provides two options to speed up the training. By default, it loads the individual images for a batch from disk. This has two disadvantages. First, for compressed image formats like JPG, this is a huge computational waste, because every image needs to be decompressed again and again every time it is being loaded. Second, the images on disk are likely not stored in a contiguous block of memory, which may also slow down the loading process. The first option that `DataGenerator` provides to deal with this is to load the entire dataset into memory, which reduces the access time for any image to a negligible amount, but of course this is only an option if you have enough free memory to hold the whole dataset. As a second option, `DataGenerator` provides the possibility to convert the dataset into a single HDF5 file. This HDF5 file stores the images as uncompressed arrays in a contiguous block of memory, which dramatically speeds up the loading time. It's not as good as having the images in memory, but it's a lot better than the default option of loading them from their compressed JPG state every time they are needed. Of course such an HDF5 dataset may require significantly more disk space than the compressed images. You can later load these HDF5 datasets directly in the constructor.\n\nSet the batch size to to your preference and to what your GPU memory allows, it's not the most important hyperparameter. The Caffe implementation uses a batch size of 32, but smaller batch sizes work fine, too.\n\nThe `DataGenerator` itself is fairly generic. I doesn't contain any data augmentation or bounding box encoding logic. Instead, you pass a list of image transformations and an encoder for the bounding boxes in the `transformations` and `label_encoder` arguments of the data generator's `generate()` method, and the data generator will then apply those given transformations and the encoding to the data. Everything here is preset already, but if you'd like to learn more about the data generator and its data augmentation capabilities, take a look at the detailed tutorial in [this](https://github.com/pierluigiferrari/data_generator_object_detection_2d) repository.\n\nThe image processing chain defined further down in the object named `data_augmentation_chain` is just one possibility of what a data augmentation pipeline for unform-size images could look like. Feel free to put together other image processing chains, you can use the `DataAugmentationConstantInputSize` class as a template. Or you could use the original SSD data augmentation pipeline by instantiting an `SSDDataAugmentation` object and passing that to the generator instead. This procedure is not exactly efficient, but it evidently produces good results on multiple datasets.\n\nAn `SSDInputEncoder` object, `ssd_input_encoder`, is passed to both the training and validation generators. As explained above, it matches the ground truth labels to the model's anchor boxes and encodes the box coordinates into the format that the model needs.",
"_____no_output_____"
],
[
"### Note:\n\nThe example setup below was used to train SSD7 on two road traffic datasets released by [Udacity](https://github.com/udacity/self-driving-car/tree/master/annotations) with around 20,000 images in total and 5 object classes (car, truck, pedestrian, bicyclist, traffic light), although the vast majority of the objects are cars. The original datasets have a constant image size of 1200x1920 RGB. I consolidated the two datasets, removed a few bad samples (although there are probably many more), and resized the images to 300x480 RGB, i.e. to one sixteenth of the original image size. In case you'd like to train a model on the same dataset, you can download the consolidated and resized dataset I used [here](https://drive.google.com/open?id=1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D) (about 900 MB).",
"_____no_output_____"
]
],
[
[
"# 1: Instantiate two `DataGenerator` objects: One for training, one for validation.\n\n# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.\n\ntrain_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\nval_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n\n# 2: Parse the image and label lists for the training and validation datasets.\n\n# TODO: Set the paths to your dataset here.\n\n# Images\nimages_dir = '../../datasets/udacity_driving_datasets/'\n\n# Ground truth\ntrain_labels_filename = '../../datasets/udacity_driving_datasets/labels_train.csv'\nval_labels_filename = '../../datasets/udacity_driving_datasets/labels_val.csv'\n\ntrain_dataset.parse_csv(images_dir=images_dir,\n labels_filename=train_labels_filename,\n input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'], # This is the order of the first six columns in the CSV file that contains the labels for your dataset. If your labels are in XML format, maybe the XML parser will be helpful, check the documentation.\n include_classes='all')\n\nval_dataset.parse_csv(images_dir=images_dir,\n labels_filename=val_labels_filename,\n input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'],\n include_classes='all')\n\n# Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will\n# speed up the training. Doing this is not relevant in case you activated the `load_images_into_memory`\n# option in the constructor, because in that cas the images are in memory already anyway. If you don't\n# want to create HDF5 datasets, comment out the subsequent two function calls.\n\ntrain_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_train.h5',\n resize=False,\n variable_image_size=True,\n verbose=True)\n\nval_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_val.h5',\n resize=False,\n variable_image_size=True,\n verbose=True)\n\n# Get the number of samples in the training and validations datasets.\ntrain_dataset_size = train_dataset.get_dataset_size()\nval_dataset_size = val_dataset.get_dataset_size()\n\nprint(\"Number of images in the training dataset:\\t{:>6}\".format(train_dataset_size))\nprint(\"Number of images in the validation dataset:\\t{:>6}\".format(val_dataset_size))",
"Number of images in the training dataset:\t 18000\nNumber of images in the validation dataset:\t 4241\n"
],
[
"# 3: Set the batch size.\n\nbatch_size = 16\n\n# 4: Define the image processing chain.\n\ndata_augmentation_chain = DataAugmentationConstantInputSize(random_brightness=(-48, 48, 0.5),\n random_contrast=(0.5, 1.8, 0.5),\n random_saturation=(0.5, 1.8, 0.5),\n random_hue=(18, 0.5),\n random_flip=0.5,\n random_translate=((0.03,0.5), (0.03,0.5), 0.5),\n random_scale=(0.5, 2.0, 0.5),\n n_trials_max=3,\n clip_boxes=True,\n overlap_criterion='area',\n bounds_box_filter=(0.3, 1.0),\n bounds_validator=(0.5, 1.0),\n n_boxes_min=1,\n background=(0,0,0))\n\n# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.\n\n# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\npredictor_sizes = [model.get_layer('classes4').output_shape[1:3],\n model.get_layer('classes5').output_shape[1:3],\n model.get_layer('classes6').output_shape[1:3],\n model.get_layer('classes7').output_shape[1:3]]\n\nssd_input_encoder = SSDInputEncoder(img_height=img_height,\n img_width=img_width,\n n_classes=n_classes,\n predictor_sizes=predictor_sizes,\n scales=scales,\n aspect_ratios_global=aspect_ratios,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n matching_type='multi',\n pos_iou_threshold=0.5,\n neg_iou_limit=0.3,\n normalize_coords=normalize_coords)\n\n# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.\n\ntrain_generator = train_dataset.generate(batch_size=batch_size,\n shuffle=True,\n transformations=[data_augmentation_chain],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\nval_generator = val_dataset.generate(batch_size=batch_size,\n shuffle=False,\n transformations=[],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)",
"_____no_output_____"
]
],
[
[
"## 4. Set the remaining training parameters and train the model\n\nWe've already chosen an optimizer and a learning rate and set the batch size above, now let's set the remaining training parameters.\n\nI'll set a few Keras callbacks below, one for early stopping, one to reduce the learning rate if the training stagnates, one to save the best models during the training, and one to continuously stream the training history to a CSV file after every epoch. Logging to a CSV file makes sense, because if we didn't do that, in case the training terminates with an exception at some point or if the kernel of this Jupyter notebook dies for some reason or anything like that happens, we would lose the entire history for the trained epochs. Feel free to add more callbacks if you want TensorBoard summaries or whatever.",
"_____no_output_____"
]
],
[
[
"# Define model callbacks.\n\n# TODO: Set the filepath under which you want to save the weights.\nmodel_checkpoint = ModelCheckpoint(filepath='ssd7_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n save_weights_only=False,\n mode='auto',\n period=1)\n\ncsv_logger = CSVLogger(filename='ssd7_training_log.csv',\n separator=',',\n append=True)\n\nearly_stopping = EarlyStopping(monitor='val_loss',\n min_delta=0.0,\n patience=10,\n verbose=1)\n\nreduce_learning_rate = ReduceLROnPlateau(monitor='val_loss',\n factor=0.2,\n patience=8,\n verbose=1,\n epsilon=0.001,\n cooldown=0,\n min_lr=0.00001)\n\ncallbacks = [model_checkpoint,\n csv_logger,\n early_stopping,\n reduce_learning_rate]",
"_____no_output_____"
]
],
[
[
"I'll set one epoch to consist of 1,000 training steps I'll arbitrarily set the number of epochs to 20 here. This does not imply that 20,000 training steps is the right number. Depending on the model, the dataset, the learning rate, etc. you might have to train much longer to achieve convergence, or maybe less.\n\nInstead of trying to train a model to convergence in one go, you might want to train only for a few epochs at a time.\n\nIn order to only run a partial training and resume smoothly later on, there are a few things you should note:\n1. Always load the full model if you can, rather than building a new model and loading previously saved weights into it. Optimizers like SGD or Adam keep running averages of past gradient moments internally. If you always save and load full models when resuming a training, then the state of the optimizer is maintained and the training picks up exactly where it left off. If you build a new model and load weights into it, the optimizer is being initialized from scratch, which, especially in the case of Adam, leads to small but unnecessary setbacks every time you resume the training with previously saved weights.\n2. You should tell `fit_generator()` which epoch to start from, otherwise it will start with epoch 0 every time you resume the training. Set `initial_epoch` to be the next epoch of your training. Note that this parameter is zero-based, i.e. the first epoch is epoch 0. If you had trained for 10 epochs previously and now you'd want to resume the training from there, you'd set `initial_epoch = 10` (since epoch 10 is the eleventh epoch). Furthermore, set `final_epoch` to the last epoch you want to run. To stick with the previous example, if you had trained for 10 epochs previously and now you'd want to train for another 10 epochs, you'd set `initial_epoch = 10` and `final_epoch = 20`.\n3. Callbacks like `ModelCheckpoint` or `ReduceLROnPlateau` are stateful, so you might want ot save their state somehow if you want to pick up a training exactly where you left off.",
"_____no_output_____"
]
],
[
[
"# TODO: Set the epochs to train for.\n# If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.\ninitial_epoch = 0\nfinal_epoch = 20\nsteps_per_epoch = 1000\n\nhistory = model.fit_generator(generator=train_generator,\n steps_per_epoch=steps_per_epoch,\n epochs=final_epoch,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=ceil(val_dataset_size/batch_size),\n initial_epoch=initial_epoch)",
"_____no_output_____"
]
],
[
[
"Let's look at how the training and validation loss evolved to check whether our training is going in the right direction:",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(20,12))\nplt.plot(history.history['loss'], label='loss')\nplt.plot(history.history['val_loss'], label='val_loss')\nplt.legend(loc='upper right', prop={'size': 24});",
"_____no_output_____"
]
],
[
[
"The validation loss has been decreasing at a similar pace as the training loss, indicating that our model has been learning effectively over the last 30 epochs. We could try to train longer and see if the validation loss can be decreased further. Once the validation loss stops decreasing for a couple of epochs in a row, that's when we will want to stop training. Our final weights will then be the weights of the epoch that had the lowest validation loss.",
"_____no_output_____"
],
[
"### 5. Make predictions\n\nNow let's make some predictions on the validation dataset with the trained model. For convenience we'll use the validation generator which we've already set up above. Feel free to change the batch size.\n\nYou can set the `shuffle` option to `False` if you would like to check the model's progress on the same image(s) over the course of the training.",
"_____no_output_____"
]
],
[
[
"# 1: Set the generator for the predictions.\n\npredict_generator = val_dataset.generate(batch_size=1,\n shuffle=True,\n transformations=[],\n label_encoder=None,\n returns={'processed_images',\n 'processed_labels',\n 'filenames'},\n keep_images_without_gt=False)",
"_____no_output_____"
],
[
"# 2: Generate samples\n\nbatch_images, batch_labels, batch_filenames = next(predict_generator)\n\ni = 0 # Which batch item to look at\n\nprint(\"Image:\", batch_filenames[i])\nprint()\nprint(\"Ground truth boxes:\\n\")\nprint(batch_labels[i])",
"Image: ../../datasets/Udacity_Driving/driving_dataset_consolidated_small/1479503098787107173.jpg\n\nGround truth boxes:\n\n[[ 1 12 141 60 177]\n [ 1 50 142 123 184]\n [ 1 112 143 134 161]\n [ 1 126 144 141 160]\n [ 1 196 141 208 150]\n [ 1 213 139 223 149]\n [ 1 219 140 244 158]\n [ 1 369 110 479 217]]\n"
],
[
"# 3: Make a prediction\n\ny_pred = model.predict(batch_images)",
"_____no_output_____"
]
],
[
[
"Now let's decode the raw predictions in `y_pred`.\n\nHad we created the model in 'inference' or 'inference_fast' mode, then the model's final layer would be a `DecodeDetections` layer and `y_pred` would already contain the decoded predictions, but since we created the model in 'training' mode, the model outputs raw predictions that still need to be decoded and filtered. This is what the `decode_detections()` function is for. It does exactly what the `DecodeDetections` layer would do, but using Numpy instead of TensorFlow (i.e. on the CPU instead of the GPU).\n\n`decode_detections()` with default argument values follows the procedure of the original SSD implementation: First, a very low confidence threshold of 0.01 is applied to filter out the majority of the predicted boxes, then greedy non-maximum suppression is performed per class with an intersection-over-union threshold of 0.45, and out of what is left after that, the top 200 highest confidence boxes are returned. Those settings are for precision-recall scoring purposes though. In order to get some usable final predictions, we'll set the confidence threshold much higher, e.g. to 0.5, since we're only interested in the very confident predictions.",
"_____no_output_____"
]
],
[
[
"# 4: Decode the raw prediction `y_pred`\n\ny_pred_decoded = decode_detections(y_pred,\n confidence_thresh=0.5,\n iou_threshold=0.45,\n top_k=200,\n normalize_coords=normalize_coords,\n img_height=img_height,\n img_width=img_width)\n\nnp.set_printoptions(precision=2, suppress=True, linewidth=90)\nprint(\"Predicted boxes:\\n\")\nprint(' class conf xmin ymin xmax ymax')\nprint(y_pred_decoded[i])",
"Predicted boxes:\n\n class conf xmin ymin xmax ymax\n[[ 1. 0.95 363.69 123.34 494.48 223.61]\n [ 1. 0.91 217.38 140.01 240.73 160.38]\n [ 1. 0.91 53.77 145.21 118.32 187.84]\n [ 1. 0.62 13.87 145.2 56.61 176.59]\n [ 1. 0.62 110.87 143.69 134.8 163.92]\n [ 1. 0.52 216.01 130.4 248.78 156.57]]\n"
]
],
[
[
"Finally, let's draw the predicted boxes onto the image. Each predicted box says its confidence next to the category name. The ground truth boxes are also drawn onto the image in green for comparison.",
"_____no_output_____"
]
],
[
[
"# 5: Draw the predicted boxes onto the image\n\nplt.figure(figsize=(20,12))\nplt.imshow(batch_images[i])\n\ncurrent_axis = plt.gca()\n\ncolors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist() # Set the colors for the bounding boxes\nclasses = ['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'light'] # Just so we can print class names onto the image instead of IDs\n\n# Draw the ground truth boxes in green (omit the label for more clarity)\nfor box in batch_labels[i]:\n xmin = box[1]\n ymin = box[2]\n xmax = box[3]\n ymax = box[4]\n label = '{}'.format(classes[int(box[0])])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2)) \n #current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'green', 'alpha':1.0})\n\n# Draw the predicted boxes in blue\nfor box in y_pred_decoded[i]:\n xmin = box[-4]\n ymin = box[-3]\n xmax = box[-2]\n ymax = box[-1]\n color = colors[int(box[0])]\n label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2)) \n current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecdc6f9a912b50e5d2e097c9da2cfa13fda72ed7 | 15,482 | ipynb | Jupyter Notebook | main.ipynb | robertosousa1/probability-functions | 979bd89cad64045494ab93464d837c4d201e0dae | [
"MIT"
] | null | null | null | main.ipynb | robertosousa1/probability-functions | 979bd89cad64045494ab93464d837c4d201e0dae | [
"MIT"
] | null | null | null | main.ipynb | robertosousa1/probability-functions | 979bd89cad64045494ab93464d837c4d201e0dae | [
"MIT"
] | null | null | null | 28.355311 | 347 | 0.46596 | [
[
[
"# Probability distributions.\n \n1. First part, answering questions about an artificial * data set * with data from a normal and a binomial sample.\n2. The second part will be about analyzing the distribution of a variable from the _data set_ [Pulsar Star](https://archive.ics.uci.edu/ml/datasets/HTRU2).",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats as sct\nimport seaborn as sns\nfrom statsmodels.distributions.empirical_distribution import ECDF\nfrom scipy.stats import norm",
"_____no_output_____"
],
[
"%matplotlib inline\n\nfrom IPython.core.pylabtools import figsize\n\n\nfigsize(12, 8)\n\nsns.set()",
"_____no_output_____"
]
],
[
[
"### First part",
"_____no_output_____"
]
],
[
[
"np.random.seed(42)\n \ndataframe = pd.DataFrame({\"normal\": sct.norm.rvs(20, 4, size=10000),\n \"binomial\": sct.binom.rvs(100, 0.2, size=10000)})",
"_____no_output_____"
]
],
[
[
"#### Analysis start",
"_____no_output_____"
]
],
[
[
"dataframe.head()",
"_____no_output_____"
]
],
[
[
"#### Question 1\n\nWhat is the difference between the quartiles (Q1, Q2 and Q3) of the `normal` and` binomial` variables of `dataframe`? Respond as a tuple of three elements rounded to three decimal places.\n\nIn other words, let `q1_norm`,` q2_norm` and `q3_norm` be the quantiles of the variable` normal` and `q1_binom`,` q2_binom` and `q3_binom` the quantiles of the variable` binom`, what is the difference `(q1_norm - q1 binom, q2_norm - q2_binom, q3_norm - q3_binom)`?",
"_____no_output_____"
]
],
[
[
"def q1():\n normal = np.percentile(dataframe.normal, [25, 50, 75])\n binomial = np.percentile(dataframe.binomial, [25, 50, 75]) \n return (round(normal[0]-binomial[0], 3),round(normal[1]-binomial[1], 3),round(normal[2]-binomial[2], 3))\n\nq1()",
"_____no_output_____"
]
],
[
[
"#### Question 2\n\nConsider the interval $ [\\ bar {x} - s, \\ bar {x} + s] $, where $ \\ bar {x} $ is the sample mean and $ s $ is the standard deviation. What is the probability in this interval, calculated by the empirical cumulative distribution function (empirical CDF) of the `normal` variable? Respond as a single scalar rounded to three decimal places.",
"_____no_output_____"
]
],
[
[
"def q2():\n ecdf = ECDF(dataframe.normal)\n mean = dataframe.normal.mean()\n deviation = dataframe.normal.std()\n \n result = (round(ecdf(mean + deviation) - ecdf(mean - deviation), 3))\n result = result.item()\n return result\n\nq2()",
"_____no_output_____"
]
],
[
[
"#### Question 3\n\nWhat is the difference between the means and the variances of the `binomial` and` normal` variables? Respond as a tuple of two elements rounded to three decimal places.\n\nIn other words, let `m_binom` and` v_binom` be the mean and variance of the `binomial` variable, and` m_norm` and `v_norm` be the mean and variance of the` normal` variable. What are the differences `(m_binom - m_norm, v_binom - v_norm)`?",
"_____no_output_____"
]
],
[
[
"def q3():\n m_norm = dataframe.normal.mean()\n v_norm = dataframe.normal.var()\n m_binom = dataframe.binomial.mean()\n v_binom = dataframe.binomial.var()\n return (round(m_binom - m_norm, 3), round(v_binom - v_norm, 3))\n\nq3()",
"_____no_output_____"
]
],
[
[
"### Part 2",
"_____no_output_____"
]
],
[
[
"stars = pd.read_csv(\"pulsar_stars.csv\")\n\nstars.rename({old_name: new_name\n for (old_name, new_name)\n in zip(stars.columns,\n [\"mean_profile\", \"sd_profile\", \"kurt_profile\", \"skew_profile\", \"mean_curve\", \"sd_curve\", \"kurt_curve\", \"skew_curve\", \"target\"])\n },\n axis=1, inplace=True)\n\nstars.loc[:, \"target\"] = stars.target.astype(bool)",
"_____no_output_____"
]
],
[
[
"### Analysis start",
"_____no_output_____"
]
],
[
[
"stars.head()",
"_____no_output_____"
]
],
[
[
"#### Question 4\n\nConsidering the `mean_profile` variable of` stars`:\n\n1. Filter only the values of `mean_profile` where` target == 0` (ie, where the star is not a pulsar).\n2. Standardize the `mean_profile` variable previously filtered to have mean 0 and variance 1.\n\nWe will call the resulting variable `false_pulsar_mean_profile_standardized`.\n\nFind the theoretical quantiles for a normal distribution of mean 0 and variance 1 for 0.80, 0.90 and 0.95 using the `norm.ppf ()` function available in `scipy.stats`.\n\nWhat are the probabilities associated with these quantiles using the empirical CDF of the variable `false_pulsar_mean_profile_standardized`? Respond as a tuple of three elements rounded to three decimal places.",
"_____no_output_____"
]
],
[
[
"aux = stars[stars['target'] == False]\naux = aux['mean_profile']\nstandardized = (aux - aux.mean()) / aux.std()",
"_____no_output_____"
],
[
"def q4():\n ecdf = ECDF(standardized)\n media = standardized.mean()\n desvio = standardized.var()\n q1 = norm.ppf(0.80, loc=0, scale=1)\n q2 = norm.ppf(0.90, loc=0, scale=1)\n q3 = norm.ppf(0.95, loc=0, scale=1)\n\n return (round(ecdf(q1), 3),\n round(ecdf(q2), 3),\n round(ecdf(q3), 3))\n\nq4()",
"_____no_output_____"
]
],
[
[
"#### Question 5\n\nWhat is the difference between the Q1, Q2 and Q3 quantiles of `false_pulsar_mean_profile_standardized` and the same theoretical quantiles of a normal distribution of mean 0 and variance 1? Respond as a tuple of three elements rounded to three decimal places.",
"_____no_output_____"
]
],
[
[
"def q5():\n quartil = np.percentile(standardized, [25, 50, 75])\n q1 = norm.ppf(0.25, loc=0, scale=1)\n q2 = norm.ppf(0.50, loc=0, scale=1)\n q3 = norm.ppf(0.75, loc=0, scale=1)\n return (round(quartil[0]-q1, 3),round(quartil[1]-q2, 3),round(quartil[2]-q3, 3))\n\nq5()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecdc7495c8008eb367fc41d072a94822c2792d9e | 24,052 | ipynb | Jupyter Notebook | intro-to-pytorch/Part 6 - Saving and Loading Models.ipynb | armhzjz/deep-learning-v2-pytorch | cedd30851aba8241a76d5278ce69286058d99fb1 | [
"MIT"
] | null | null | null | intro-to-pytorch/Part 6 - Saving and Loading Models.ipynb | armhzjz/deep-learning-v2-pytorch | cedd30851aba8241a76d5278ce69286058d99fb1 | [
"MIT"
] | null | null | null | intro-to-pytorch/Part 6 - Saving and Loading Models.ipynb | armhzjz/deep-learning-v2-pytorch | cedd30851aba8241a76d5278ce69286058d99fb1 | [
"MIT"
] | null | null | null | 58.096618 | 4,736 | 0.683394 | [
[
[
"# Saving and Loading Models\n\nIn this notebook, I'll show you how to save and load models with PyTorch. This is important because you'll often want to load previously trained models to use in making predictions or to continue training on new data.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\n\nimport helper\nimport fc_model",
"_____no_output_____"
],
[
"# Define a transform to normalize the data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))])\n# Download and load the training data\ntrainset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n\n# Download and load the test data\ntestset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=False, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)",
"Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz\nDownloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz\nDownloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz\nDownloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz\nProcessing...\nDone!\n"
]
],
[
[
"Here we can see one of the images.",
"_____no_output_____"
]
],
[
[
"image, label = next(iter(trainloader))\nhelper.imshow(image[0,:]);",
"_____no_output_____"
]
],
[
[
"# Train a network\n\nTo make things more concise here, I moved the model architecture and training code from the last part to a file called `fc_model`. Importing this, we can easily create a fully-connected network with `fc_model.Network`, and train the network using `fc_model.train`. I'll use this model (once it's trained) to demonstrate how we can save and load models.",
"_____no_output_____"
]
],
[
[
"# Create the network, define the criterion and optimizer\n\nmodel = fc_model.Network(784, 10, [512, 256, 128])\ncriterion = nn.NLLLoss()\noptimizer = optim.Adam(model.parameters(), lr=0.001)",
"_____no_output_____"
],
[
"fc_model.train(model, trainloader, testloader, criterion, optimizer, epochs=2)",
"Epoch: 1/2.. Training Loss: 1.670.. Test Loss: 0.996.. Test Accuracy: 0.643\nEpoch: 1/2.. Training Loss: 1.030.. Test Loss: 0.757.. Test Accuracy: 0.700\nEpoch: 1/2.. Training Loss: 0.868.. Test Loss: 0.692.. Test Accuracy: 0.726\nEpoch: 1/2.. Training Loss: 0.815.. Test Loss: 0.653.. Test Accuracy: 0.754\nEpoch: 1/2.. Training Loss: 0.729.. Test Loss: 0.634.. Test Accuracy: 0.752\nEpoch: 1/2.. Training Loss: 0.778.. Test Loss: 0.617.. Test Accuracy: 0.769\nEpoch: 1/2.. Training Loss: 0.703.. Test Loss: 0.580.. Test Accuracy: 0.786\nEpoch: 1/2.. Training Loss: 0.671.. Test Loss: 0.560.. Test Accuracy: 0.792\nEpoch: 1/2.. Training Loss: 0.675.. Test Loss: 0.553.. Test Accuracy: 0.795\nEpoch: 1/2.. Training Loss: 0.687.. Test Loss: 0.552.. Test Accuracy: 0.792\nEpoch: 1/2.. Training Loss: 0.664.. Test Loss: 0.537.. Test Accuracy: 0.797\nEpoch: 1/2.. Training Loss: 0.578.. Test Loss: 0.547.. Test Accuracy: 0.804\nEpoch: 1/2.. Training Loss: 0.666.. Test Loss: 0.532.. Test Accuracy: 0.801\nEpoch: 1/2.. Training Loss: 0.638.. Test Loss: 0.509.. Test Accuracy: 0.811\nEpoch: 1/2.. Training Loss: 0.645.. Test Loss: 0.520.. Test Accuracy: 0.812\nEpoch: 1/2.. Training Loss: 0.587.. Test Loss: 0.494.. Test Accuracy: 0.816\nEpoch: 1/2.. Training Loss: 0.592.. Test Loss: 0.510.. Test Accuracy: 0.809\nEpoch: 1/2.. Training Loss: 0.582.. Test Loss: 0.517.. Test Accuracy: 0.809\nEpoch: 1/2.. Training Loss: 0.581.. Test Loss: 0.517.. Test Accuracy: 0.810\nEpoch: 1/2.. Training Loss: 0.596.. Test Loss: 0.509.. Test Accuracy: 0.813\nEpoch: 1/2.. Training Loss: 0.575.. Test Loss: 0.485.. Test Accuracy: 0.823\nEpoch: 1/2.. Training Loss: 0.538.. Test Loss: 0.469.. Test Accuracy: 0.828\nEpoch: 1/2.. Training Loss: 0.543.. Test Loss: 0.520.. Test Accuracy: 0.809\nEpoch: 2/2.. Training Loss: 0.559.. Test Loss: 0.473.. Test Accuracy: 0.830\nEpoch: 2/2.. Training Loss: 0.579.. Test Loss: 0.477.. Test Accuracy: 0.829\nEpoch: 2/2.. Training Loss: 0.563.. Test Loss: 0.468.. Test Accuracy: 0.831\nEpoch: 2/2.. Training Loss: 0.546.. Test Loss: 0.477.. Test Accuracy: 0.826\nEpoch: 2/2.. Training Loss: 0.577.. Test Loss: 0.474.. Test Accuracy: 0.826\nEpoch: 2/2.. Training Loss: 0.512.. Test Loss: 0.479.. Test Accuracy: 0.826\nEpoch: 2/2.. Training Loss: 0.558.. Test Loss: 0.493.. Test Accuracy: 0.824\nEpoch: 2/2.. Training Loss: 0.553.. Test Loss: 0.485.. Test Accuracy: 0.826\nEpoch: 2/2.. Training Loss: 0.549.. Test Loss: 0.462.. Test Accuracy: 0.835\nEpoch: 2/2.. Training Loss: 0.557.. Test Loss: 0.471.. Test Accuracy: 0.830\nEpoch: 2/2.. Training Loss: 0.536.. Test Loss: 0.469.. Test Accuracy: 0.827\nEpoch: 2/2.. Training Loss: 0.521.. Test Loss: 0.466.. Test Accuracy: 0.836\nEpoch: 2/2.. Training Loss: 0.549.. Test Loss: 0.473.. Test Accuracy: 0.826\nEpoch: 2/2.. Training Loss: 0.529.. Test Loss: 0.450.. Test Accuracy: 0.835\nEpoch: 2/2.. Training Loss: 0.519.. Test Loss: 0.461.. Test Accuracy: 0.834\nEpoch: 2/2.. Training Loss: 0.502.. Test Loss: 0.443.. Test Accuracy: 0.835\nEpoch: 2/2.. Training Loss: 0.528.. Test Loss: 0.453.. Test Accuracy: 0.837\nEpoch: 2/2.. Training Loss: 0.524.. Test Loss: 0.447.. Test Accuracy: 0.839\nEpoch: 2/2.. Training Loss: 0.499.. Test Loss: 0.439.. Test Accuracy: 0.842\nEpoch: 2/2.. Training Loss: 0.500.. Test Loss: 0.434.. Test Accuracy: 0.839\nEpoch: 2/2.. Training Loss: 0.510.. Test Loss: 0.433.. Test Accuracy: 0.842\nEpoch: 2/2.. Training Loss: 0.521.. Test Loss: 0.445.. Test Accuracy: 0.837\nEpoch: 2/2.. Training Loss: 0.529.. Test Loss: 0.475.. Test Accuracy: 0.826\n"
]
],
[
[
"## Saving and loading networks\n\nAs you can imagine, it's impractical to train a network every time you need to use it. Instead, we can save trained networks then load them later to train more or use them for predictions.\n\nThe parameters for PyTorch networks are stored in a model's `state_dict`. We can see the state dict contains the weight and bias matrices for each of our layers.",
"_____no_output_____"
]
],
[
[
"print(\"Our model: \\n\\n\", model, '\\n')\nprint(\"The state dict keys: \\n\\n\", model.state_dict().keys())",
"Our model: \n\n Network(\n (hidden_layers): ModuleList(\n (0): Linear(in_features=784, out_features=512, bias=True)\n (1): Linear(in_features=512, out_features=256, bias=True)\n (2): Linear(in_features=256, out_features=128, bias=True)\n )\n (output): Linear(in_features=128, out_features=10, bias=True)\n (dropout): Dropout(p=0.5)\n) \n\nThe state dict keys: \n\n odict_keys(['hidden_layers.0.weight', 'hidden_layers.0.bias', 'hidden_layers.1.weight', 'hidden_layers.1.bias', 'hidden_layers.2.weight', 'hidden_layers.2.bias', 'output.weight', 'output.bias'])\n"
]
],
[
[
"The simplest thing to do is simply save the state dict with `torch.save`. For example, we can save it to a file `'checkpoint.pth'`.",
"_____no_output_____"
]
],
[
[
"torch.save(model.state_dict(), 'checkpoint.pth')",
"_____no_output_____"
]
],
[
[
"Then we can load the state dict with `torch.load`.",
"_____no_output_____"
]
],
[
[
"state_dict = torch.load('checkpoint.pth')\nprint(state_dict.keys())",
"odict_keys(['hidden_layers.0.weight', 'hidden_layers.0.bias', 'hidden_layers.1.weight', 'hidden_layers.1.bias', 'hidden_layers.2.weight', 'hidden_layers.2.bias', 'output.weight', 'output.bias'])\n"
]
],
[
[
"And to load the state dict in to the network, you do `model.load_state_dict(state_dict)`.",
"_____no_output_____"
]
],
[
[
"model.load_state_dict(state_dict)",
"_____no_output_____"
]
],
[
[
"Seems pretty straightforward, but as usual it's a bit more complicated. Loading the state dict works only if the model architecture is exactly the same as the checkpoint architecture. If I create a model with a different architecture, this fails.",
"_____no_output_____"
]
],
[
[
"# Try this\nmodel = fc_model.Network(784, 10, [400, 200, 100])\n# This will throw an error because the tensor sizes are wrong!\nmodel.load_state_dict(state_dict)",
"_____no_output_____"
]
],
[
[
"This means we need to rebuild the model exactly as it was when trained. Information about the model architecture needs to be saved in the checkpoint, along with the state dict. To do this, you build a dictionary with all the information you need to compeletely rebuild the model.",
"_____no_output_____"
]
],
[
[
"checkpoint = {'input_size': 784,\n 'output_size': 10,\n 'hidden_layers': [each.out_features for each in model.hidden_layers],\n 'state_dict': model.state_dict()}\n\ntorch.save(checkpoint, 'checkpoint.pth')",
"_____no_output_____"
]
],
[
[
"Now the checkpoint has all the necessary information to rebuild the trained model. You can easily make that a function if you want. Similarly, we can write a function to load checkpoints. ",
"_____no_output_____"
]
],
[
[
"def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n model = fc_model.Network(checkpoint['input_size'],\n checkpoint['output_size'],\n checkpoint['hidden_layers'])\n model.load_state_dict(checkpoint['state_dict'])\n \n return model",
"_____no_output_____"
],
[
"model = load_checkpoint('checkpoint.pth')\nprint(model)",
"Network(\n (hidden_layers): ModuleList(\n (0): Linear(in_features=784, out_features=400, bias=True)\n (1): Linear(in_features=400, out_features=200, bias=True)\n (2): Linear(in_features=200, out_features=100, bias=True)\n )\n (output): Linear(in_features=100, out_features=10, bias=True)\n (dropout): Dropout(p=0.5)\n)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
ecdc74d70e1d9fcbfd573bbaa18e1d4fae03eb5b | 975 | ipynb | Jupyter Notebook | 5_Hash_tables.ipynb | ogulkokan/Algorithms | f4054ecd6bdee25edf308017599544cfff298387 | [
"MIT"
] | null | null | null | 5_Hash_tables.ipynb | ogulkokan/Algorithms | f4054ecd6bdee25edf308017599544cfff298387 | [
"MIT"
] | null | null | null | 5_Hash_tables.ipynb | ogulkokan/Algorithms | f4054ecd6bdee25edf308017599544cfff298387 | [
"MIT"
] | null | null | null | 16.810345 | 42 | 0.500513 | [
[
[
"# Hash Tables",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
ecdc74e9c5189da110826b897b6e80078687cbc8 | 16,883 | ipynb | Jupyter Notebook | docs/test/testinnsending/.ipynb_checkpoints/person-enk-med-vedlegg-2021-checkpoint.ipynb | Skatteetaten/skattemeldingen | b06506e3c9f853b850d63ec6ceae1217af9219c1 | [
"Apache-2.0"
] | 14 | 2020-10-31T21:41:47.000Z | 2022-01-31T07:36:56.000Z | docs/test/testinnsending/.ipynb_checkpoints/person-enk-med-vedlegg-2021-checkpoint.ipynb | Skatteetaten/skattemeldingen | b06506e3c9f853b850d63ec6ceae1217af9219c1 | [
"Apache-2.0"
] | 6 | 2020-09-03T05:47:07.000Z | 2021-11-16T13:44:37.000Z | docs/test/testinnsending/.ipynb_checkpoints/person-enk-med-vedlegg-2021-checkpoint.ipynb | Skatteetaten/skattemeldingen | b06506e3c9f853b850d63ec6ceae1217af9219c1 | [
"Apache-2.0"
] | 9 | 2020-09-03T06:07:52.000Z | 2021-11-08T10:14:57.000Z | 33.698603 | 244 | 0.59557 | [
[
[
"# Testinnsening av person skattemelding med næringspesifikasjon",
"_____no_output_____"
],
[
"Denne demoen er ment for å vise hvordan flyten for et sluttbrukersystem kan hente et utkast, gjøre endringer, validere/kontrollere det mot Skatteetatens apier, for å sende det inn via Altinn3",
"_____no_output_____"
]
],
[
[
"try: \n from altinn3 import *\n from skatteetaten_api import main_relay, base64_decode_response, decode_dokument\n import requests\n import base64\n import xmltodict\n import xml.dom.minidom\n from pathlib import Path\nexcept ImportError as e:\n print(\"Mangler en avhengighet, installer dem via pip\")\n !pip install python-jose\n !pip install xmltodict\n !pip install pathlib\n import xmltodict\n from skatteetaten_api import main_relay, base64_decode_response, decode_dokument\n\n \n#hjelpe metode om du vil se en request printet som curl \ndef print_request_as_curl(r):\n command = \"curl -X {method} -H {headers} -d '{data}' '{uri}'\"\n method = r.request.method\n uri = r.request.url\n data = r.request.body\n headers = ['\"{0}: {1}\"'.format(k, v) for k, v in r.request.headers.items()]\n headers = \" -H \".join(headers)\n print(command.format(method=method, headers=headers, data=data, uri=uri))",
"_____no_output_____"
]
],
[
[
"## Generer ID-porten token\nTokenet er gyldig i 300 sekunder, rekjørt denne biten om du ikke har kommet frem til Altinn3 biten før 300 sekunder ",
"_____no_output_____"
]
],
[
[
"idporten_header = main_relay()",
"_____no_output_____"
]
],
[
[
"# Hent utkast og gjeldende\nHer legger vi inn fødselsnummeret vi logget oss inn med, Dersom du velger et annet fødselsnummer så må den du logget på med ha tilgang til skattemeldingen du ønsker å hente\n\n#### Parten nedenfor er brukt for internt test, pass på bruk deres egne testparter når dere tester\n\n01014700230 har fått en myndighetsfastsetting\n\nLegg merke til `/api/skattemelding/v2/` biten av url'n er ny for 2021",
"_____no_output_____"
]
],
[
[
"s = requests.Session()\ns.headers = dict(idporten_header)\nfnr=\"01014700230\" #oppdater med test fødselsnummerene du har fått tildelt",
"_____no_output_____"
]
],
[
[
"### Utkast",
"_____no_output_____"
]
],
[
[
"url_utkast = f'https://mp-test.sits.no/api/skattemelding/v2/utkast/2021/{fnr}'\nr = s.get(url_utkast)\nr",
"_____no_output_____"
]
],
[
[
"### Gjeldende",
"_____no_output_____"
]
],
[
[
"url_gjeldende = f'https://mp-test.sits.no/api/skattemelding/v2/2021/{fnr}'\nr_gjeldende = s.get(url_gjeldende)\nr_gjeldende",
"_____no_output_____"
]
],
[
[
"#### Fastsatt\nHer får en _http 404_ om vedkommende ikke har noen fastsetting, rekjørt denne etter du har sendt inn og fått tilbakemdling i Altinn at den har blitt behandlet, du skal nå ha en fastsatt skattemelding om den har blitt sent inn som Komplett",
"_____no_output_____"
]
],
[
[
"url_fastsatt = f'https://mp-test.sits.no/api/skattemelding/v2/fastsatt/2021/{fnr}'\nr_fastsatt = s.get(url_fastsatt)\nr_fastsatt",
"_____no_output_____"
]
],
[
[
"## Svar fra hent gjeldende \n\n### Gjeldende dokument referanse: \nI responsen på alle api kallene, være seg utkast/fastsatt eller gjeldene, så følger det med en dokumentreferanse. \nFor å kalle valider tjenesten, er en avhengig av å bruke korrekt referanse til gjeldende skattemelding. \n\nCellen nedenfor henter ut gjeldende dokumentrefranse printer ut responsen fra hent gjeldende kallet ",
"_____no_output_____"
]
],
[
[
"sjekk_svar = r_gjeldende\n\nsme_og_naering_respons = xmltodict.parse(sjekk_svar.text)\nskattemelding_base64 = sme_og_naering_respons[\"skattemeldingOgNaeringsspesifikasjonforespoerselResponse\"][\"dokumenter\"][\"skattemeldingdokument\"]\nsme_base64 = skattemelding_base64[\"content\"]\ndokref = sme_og_naering_respons[\"skattemeldingOgNaeringsspesifikasjonforespoerselResponse\"][\"dokumenter\"]['skattemeldingdokument']['id']\ndecoded_sme_xml = decode_dokument(skattemelding_base64)\nsme_utkast = xml.dom.minidom.parseString(decoded_sme_xml[\"content\"]).toprettyxml()\n\nprint(f\"Responsen fra hent gjeldende ser slik ut, gjeldende dokumentrerefanse er {dokref}\\n\")\nprint(xml.dom.minidom.parseString(sjekk_svar.text).toprettyxml())\n",
"_____no_output_____"
],
[
"with open(\"../../../src/resources/eksempler/v2/Naeringspesifikasjon-enk-v2.xml\", 'r') as f:\n naering_enk_xml = f.read()\n \ninnsendingstype = \"ikkeKomplett\"\nnaeringsspesifikasjoner_enk_b64 = base64.b64encode(naering_enk_xml.encode(\"utf-8\"))\nnaeringsspesifikasjoner_enk_b64 = str(naeringsspesifikasjoner_enk_b64.decode(\"utf-8\"))\nskattemeldingPersonligSkattepliktig_base64=sme_base64 #bruker utkastet uten noen endringer\nnaeringsspesifikasjoner_base64=naeringsspesifikasjoner_enk_b64\ndok_ref=dokref\n\nvalider_konvlutt_v2 = \"\"\"\n<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<skattemeldingOgNaeringsspesifikasjonRequest xmlns=\"no:skatteetaten:fastsetting:formueinntekt:skattemeldingognaeringsspesifikasjon:request:v2\">\n <dokumenter>\n <dokument>\n <type>skattemeldingPersonlig</type>\n <encoding>utf-8</encoding>\n <content>{sme_base64}</content>\n </dokument>\n <dokument>\n <type>naeringsspesifikasjon</type>\n <encoding>utf-8</encoding>\n <content>{naeringsspeifikasjon_base64}</content>\n </dokument>\n </dokumenter>\n <dokumentreferanseTilGjeldendeDokument>\n <dokumenttype>skattemeldingPersonlig</dokumenttype>\n <dokumentidentifikator>{dok_ref}</dokumentidentifikator>\n </dokumentreferanseTilGjeldendeDokument>\n <inntektsaar>2021</inntektsaar>\n <innsendingsinformasjon>\n <innsendingstype>{innsendingstype}</innsendingstype>\n <opprettetAv>TurboSkatt</opprettetAv>\n </innsendingsinformasjon>\n</skattemeldingOgNaeringsspesifikasjonRequest>\n\"\"\".replace(\"\\n\",\"\")\n\n\nnaering_enk = valider_konvlutt_v2.format(sme_base64=skattemeldingPersonligSkattepliktig_base64,\n naeringsspeifikasjon_base64=naeringsspesifikasjoner_base64,\n dok_ref=dok_ref,\n innsendingstype=innsendingstype)",
"_____no_output_____"
]
],
[
[
"# Valider utkast sme med næringsopplysninger",
"_____no_output_____"
]
],
[
[
"def valider_sme(payload):\n url_valider = f'https://mp-test.sits.no/api/skattemelding/v2/valider/2021/{fnr}'\n header = dict(idporten_header)\n header[\"Content-Type\"] = \"application/xml\"\n return s.post(url_valider, headers=header, data=payload)\n\n\nvalider_respons = valider_sme(naering_enk)\nresultatAvValidering = xmltodict.parse(valider_respons.text)[\"skattemeldingerOgNaeringsspesifikasjonResponse\"][\"resultatAvValidering\"]\n\nif valider_respons:\n print(resultatAvValidering)\n print()\n print(xml.dom.minidom.parseString(valider_respons.text).toprettyxml())\nelse:\n print(valider_respons.status_code, valider_respons.headers, valider_respons.text)",
"_____no_output_____"
]
],
[
[
"# Altinn 3",
"_____no_output_____"
],
[
"1. Hent Altinn Token\n2. Oppretter en ny instans av skjemaet\n3. lasteropp metadata til skjemaet\n4. last opp vedlegg til skattemeldingen\n5. oppdater skattemelding xml med referanse til vedlegg_id fra altinn3. \n6. laster opp skattemeldingen og næringsopplysninger som et vedlegg",
"_____no_output_____"
]
],
[
[
"#1\naltinn3_applikasjon = \"skd/formueinntekt-skattemelding-v2\"\naltinn_header = hent_altinn_token(idporten_header)\n#2\ninstans_data = opprett_ny_instans(altinn_header, fnr, appnavn=altinn3_applikasjon)",
"_____no_output_____"
]
],
[
[
"### 3 Last opp metadata (skattemelding_V1)\nlegg merke til `<innsendingstype>` er satt til `ikkeKomplett` i payloaden, og ikke i Altinn3 metadata\n",
"_____no_output_____"
]
],
[
[
"print(f\"innsendingstypen er satt til: {innsendingstype}\")\nreq_metadata = last_opp_metadata_json(instans_data, altinn_header, inntektsaar=2021, appnavn=altinn3_applikasjon)\nreq_metadata",
"_____no_output_____"
]
],
[
[
"## Last opp skattemelding\n### Last først opp vedlegg som hører til skattemeldingen\nEksemplet nedenfor gjelder kun generelle vedlegg for skattemeldingen, \n\n ```xml\n <vedlegg>\n <id>En unik id levert av altinn når du laster opp vedleggsfilen</id>\n <vedleggsfil>\n <opprinneligFilnavn><tekst>vedlegg_eksempel_sirius_stjerne.jpg</tekst></opprinneligFilnavn>\n <opprinneligFiltype><tekst>jpg</tekst></opprinneligFiltype>\n </vedleggsfil>\n <vedleggstype>dokumentertMarkedsverdi</vedleggstype>\n </vedlegg>\n```\n\nmen samme prinsippet gjelder for andre kort som kan ha vedlegg. Husk at rekkefølgen på xml elementene har noe å si for å få validert xml'n",
"_____no_output_____"
]
],
[
[
"vedleggfil = \"vedlegg_eksempel_sirius_stjerne.jpg\"\nopplasting_respons = last_opp_vedlegg(instans_data, \n altinn_header, \n vedleggfil, \n content_type=\"image/jpeg\", \n data_type=\"skattemelding-vedlegg\",\n appnavn=altinn3_applikasjon)\nvedlegg_id = opplasting_respons.json()[\"id\"]\n\n\n# Så må vi modifisere skattemeldingen slik at vi får med vedlegg idn inn skattemelding xml'n\nwith open(\"../../../src/resources/eksempler/v2/skattemelding_v9_eksempel_vedlegg.xml\") as f:\n filnavn = Path(vedleggfil).name\n filtype = \"jpg\"\n partsnummer = xmltodict.parse(decoded_sme_xml[\"content\"])[\"skattemelding\"][\"partsreferanse\"]\n \n sme_xml = f.read().format(partsnummer=partsnummer, vedlegg_id=vedlegg_id, filnavn=filnavn, filtype=filtype)\n sme_xml_b64 = base64.b64encode(sme_xml.encode(\"utf-8\"))\n sme_xml_b64 = str(sme_xml_b64.decode(\"utf-8\"))\n \n#La oss validere at skattemeldingen fortsatt validerer mot valideringstjenesten\nnaering_enk_med_vedlegg = valider_konvlutt_v2.format(sme_base64=sme_xml_b64,\n naeringsspeifikasjon_base64=naeringsspesifikasjoner_base64,\n dok_ref=dok_ref,\n innsendingstype=innsendingstype)\n\nvalider_respons = valider_sme(naering_enk)\nresultat_av_validering_med_vedlegg = xmltodict.parse(valider_respons.text)[\"skattemeldingerOgNaeringsspesifikasjonResponse\"][\"resultatAvValidering\"]\nresultat_av_validering_med_vedlegg",
"_____no_output_____"
],
[
"#Last opp skattemeldingen\nreq_send_inn = last_opp_skattedata(instans_data, altinn_header, \n xml=naering_enk_med_vedlegg, \n data_type=\"skattemeldingOgNaeringspesifikasjon\",\n appnavn=altinn3_applikasjon)\nreq_send_inn",
"_____no_output_____"
]
],
[
[
"### Sett statusen klar til henting av skatteetaten. ",
"_____no_output_____"
]
],
[
[
"req_bekreftelse = endre_prosess_status(instans_data, altinn_header, \"next\", appnavn=altinn3_applikasjon)\nreq_bekreftelse = endre_prosess_status(instans_data, altinn_header, \"next\", appnavn=altinn3_applikasjon)\nreq_bekreftelse",
"_____no_output_____"
]
],
[
[
"### Framtidig: Sjekk status på altinn3 instansen om skatteetaten har hentet instansen. ",
"_____no_output_____"
],
[
"### Se innsending i Altinn\n\nTa en slurk av kaffen og klapp deg selv på ryggen, du har nå sendt inn, la byråkratiet gjøre sin ting... og det tar litt tid. Pt så sjekker skatteeaten hos Altinn3 hvert 5 min om det har kommet noen nye innsendinger. ",
"_____no_output_____"
],
[
"# Ikke komplett skattemelding\n1. Når du har fått svar i altinn innboksen, så kan du gå til \n https://skatt-ref.sits.no/web/skattemeldingen/2021\n2. Her vil du se næringsinntekter overført fra skattemeldingen\n3. Når du har sendt inn i SME så vil du kunne se i altinn instansen at den har blitt avsluttet\n4. Kjør cellen nedenfor for å se at du har fått en ny fastsatt skattemelding og næringsopplysninger\n",
"_____no_output_____"
]
],
[
[
"print(\"Resultat av hent fastsatt før fastsetting\")\nprint(r_fastsatt.text)\nprint(\"Resultat av hent fastsatt etter fastsetting\")\n\nr_fastsatt2 = s.get(url_fastsatt)\nr_fastsatt2.text\n#r_fastsatt.elapsed.total_seconds()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
ecdc83c232861c6aafcaff6b1594e420ed51223d | 87,389 | ipynb | Jupyter Notebook | Data Pre And Analysis/Twitter Data Prep v2.ipynb | D19124612/dissertation | a97600de9bf978c6787e3ef3659785b8cfdcbafb | [
"Unlicense"
] | null | null | null | Data Pre And Analysis/Twitter Data Prep v2.ipynb | D19124612/dissertation | a97600de9bf978c6787e3ef3659785b8cfdcbafb | [
"Unlicense"
] | null | null | null | Data Pre And Analysis/Twitter Data Prep v2.ipynb | D19124612/dissertation | a97600de9bf978c6787e3ef3659785b8cfdcbafb | [
"Unlicense"
] | null | null | null | 87,389 | 87,389 | 0.639337 | [
[
[
"!pip install -qq emoji\n!pip install -qq ekphrasis\n!pip install -qq datasets",
"\u001b[?25l\r\u001b[K |█▉ | 10 kB 19.9 MB/s eta 0:00:01\r\u001b[K |███▊ | 20 kB 10.4 MB/s eta 0:00:01\r\u001b[K |█████▋ | 30 kB 8.7 MB/s eta 0:00:01\r\u001b[K |███████▌ | 40 kB 7.8 MB/s eta 0:00:01\r\u001b[K |█████████▍ | 51 kB 5.3 MB/s eta 0:00:01\r\u001b[K |███████████▎ | 61 kB 5.4 MB/s eta 0:00:01\r\u001b[K |█████████████▏ | 71 kB 5.3 MB/s eta 0:00:01\r\u001b[K |███████████████ | 81 kB 5.9 MB/s eta 0:00:01\r\u001b[K |█████████████████ | 92 kB 5.9 MB/s eta 0:00:01\r\u001b[K |██████████████████▉ | 102 kB 4.9 MB/s eta 0:00:01\r\u001b[K |████████████████████▊ | 112 kB 4.9 MB/s eta 0:00:01\r\u001b[K |██████████████████████▋ | 122 kB 4.9 MB/s eta 0:00:01\r\u001b[K |████████████████████████▌ | 133 kB 4.9 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▍ | 143 kB 4.9 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▏ | 153 kB 4.9 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████ | 163 kB 4.9 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 174 kB 4.9 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 174 kB 4.9 MB/s \n\u001b[?25h Building wheel for emoji (setup.py) ... \u001b[?25l\u001b[?25hdone\n\u001b[K |████████████████████████████████| 80 kB 3.5 MB/s \n\u001b[K |████████████████████████████████| 43 kB 973 kB/s \n\u001b[K |████████████████████████████████| 53 kB 1.4 MB/s \n\u001b[?25h Building wheel for ekphrasis (setup.py) ... \u001b[?25l\u001b[?25hdone\n\u001b[K |████████████████████████████████| 311 kB 5.1 MB/s \n\u001b[K |████████████████████████████████| 67 kB 3.8 MB/s \n\u001b[K |████████████████████████████████| 133 kB 17.1 MB/s \n\u001b[K |████████████████████████████████| 1.1 MB 52.5 MB/s \n\u001b[K |████████████████████████████████| 243 kB 55.1 MB/s \n\u001b[K |████████████████████████████████| 271 kB 49.5 MB/s \n\u001b[K |████████████████████████████████| 144 kB 51.8 MB/s \n\u001b[K |████████████████████████████████| 94 kB 3.3 MB/s \n\u001b[?25h"
],
[
"\n \nfrom ekphrasis.classes.preprocessor import TextPreProcessor\nfrom ekphrasis.classes.tokenizer import SocialTokenizer\nfrom ekphrasis.dicts.emoticons import emoticons\n#from transformers import BertTokenizer\nimport string \nimport re\nimport spacy\nnlp2 = spacy.load('en_core_web_sm')\nfrom spacy.symbols import ORTH,NORM,LEMMA\nimport string \nfrom spacy.lang.char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY\nfrom spacy.lang.char_classes import LIST_ICONS, HYPHENS, CURRENCY, UNITS\nfrom spacy.lang.char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA, PUNCT\nfrom spacy.util import compile_infix_regex, compile_prefix_regex, compile_suffix_regex\nimport numpy as np\nimport pandas as pd\nimport re\nimport tensorflow as tf\nimport emoji as emoji\nimport string\nimport matplotlib.pyplot as plt\nimport imblearn\nfrom imblearn.over_sampling import RandomOverSampler\nimport datasets\nfrom datasets import Dataset",
"_____no_output_____"
],
[
"twitter_df= pd.read_csv('/content/drive/MyDrive/Dissertation/labeled_data.csv')\n",
"_____no_output_____"
],
[
"twitter_df",
"_____no_output_____"
],
[
"print(twitter_df.shape)",
"(24783, 7)\n"
],
[
"\nunanimous_hate_votes = len(twitter_df[twitter_df[\"class\"]==0] [twitter_df[\"count\"] == twitter_df[\"hate_speech\"] ])\nunanimous_offensive_votes = len(twitter_df[twitter_df[\"class\"]==1] [twitter_df[\"count\"] == twitter_df[\"offensive_language\"] ])\nunanimous_neither_votes = len(twitter_df[twitter_df[\"class\"]==2] [twitter_df[\"count\"] == twitter_df[\"neither\"] ])\n\n\nprint(\"Unanimous Hate Votes: \", unanimous_hate_votes)\nprint(\"Unanimous Offensive Votes: \", unanimous_offensive_votes)\nprint(\"Unanimous Neither Votes: \", unanimous_neither_votes)",
"Unanimous Hate Votes: 263\nUnanimous Offensive Votes: 14347\nUnanimous Neither Votes: 2872\n"
],
[
"twitter_df = twitter_df[['class','tweet']]\n",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### text preprocessor for ekphrasis\ntext_processor = TextPreProcessor(\n # terms that will be normalized\n normalize=['url', 'email', 'percent', 'money', 'phone', 'user',\n 'time', 'date', 'number'],\n # terms that will be annotated\n fix_html=True, # fix HTML tokens\n annotate={\"hashtag\", \"allcaps\", \"elongated\", \"repeated\",\n 'emphasis', 'censored'},\n # corpus from which the word statistics are going to be used \n # for word segmentation \n segmenter=\"twitter\", \n \n # corpus from which the word statistics are going to be used \n # for spell correction\n corrector=\"twitter\", \n \n unpack_hashtags=True, # perform word segmentation on hashtags\n unpack_contractions=True, # Unpack contractions (can't -> can not)\n spell_correct_elong=False, # spell correction for elongated words\n \n # select a tokenizer. You can use SocialTokenizer, or pass your own\n # the tokenizer, should take as input a string and return a list of tokens\n tokenizer=SocialTokenizer(lowercase=True).tokenize,\n \n # list of dictionaries, for replacing tokens extracted from the text,\n # with other expressions. You can pass more than one dictionaries.\n dicts=[emoticons]\n)",
"/usr/local/lib/python3.7/dist-packages/ekphrasis/classes/tokenizer.py:225: FutureWarning: Possible nested set at position 2190\n self.tok = re.compile(r\"({})\".format(\"|\".join(pipeline)))\n"
],
[
"#input: text\n#process: ekphrasis preprocesser + some extra processing \n#output: list of tokens \ndef ek_extra_preprocess(text):\n remove_words=['<allcaps>','</allcaps>','<hashtag>','</hashtag>','<elongated>','<emphasis>','<repeated>','\\'','s']\n word_list=text_processor.pre_process_doc(text)\n word_list=list(filter(lambda a: a not in remove_words, word_list)) \n word_list=[token for token in word_list if token not in string.punctuation]\n return \" \".join(word_list)\n\n",
"_____no_output_____"
],
[
"twitter_df['tweet_clean'] = twitter_df['tweet'].apply(ek_extra_preprocess)\n",
"/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"twitter_df[twitter_df[\"class\"]==0].head(25)",
"_____no_output_____"
],
[
"twitter_df['words_clean'] = twitter_df['tweet_clean'].apply(lambda x: len(x.split()))\ntwitter_df['words'] = twitter_df['tweet'].apply(lambda x: len(x.split())) \n\nprint(\"Min\")\nprint(twitter_df[['words']].min())\nprint(\"Max\")\nprint(twitter_df[['words']].max())",
"Min\nwords 1\ndtype: int64\nMax\nwords 52\ndtype: int64\n"
],
[
"twitter_df.sort_values(by='words',ascending=False).head(20)",
"_____no_output_____"
],
[
"twitter_df['words'].hist(bins=20)",
"_____no_output_____"
],
[
"tokenizer = tf.keras.preprocessing.text.Tokenizer(filters=\"\")\ntokenizer.fit_on_texts(twitter_df['tweet_clean'])\nprint(\"Unique tokens:\", len(tokenizer.word_index))",
"Unique tokens: 18889\n"
],
[
"word_index = tokenizer.word_index\nwords_array = np.empty(len(word_index)).astype(str)\nfor key,value in word_index.items():\n words_array[value-1] = key",
"_____no_output_____"
],
[
"def count (corpus):\n \n word_frequency = np.zeros(len(word_index))\n doc_frequency = np.zeros(len(word_index))\n for text in corpus:\n wordlist = text.split()\n wordfreq = [wordlist.count(p) for p in wordlist]\n for key,value in dict(list(zip(wordlist,wordfreq))).items():\n index = word_index.get(key) \n if not (index is None):\n word_frequency[index-1] = word_frequency[index-1]+value\n doc_frequency[index-1] = doc_frequency[index-1]+1\n else:\n print(key)\n \n return word_frequency,doc_frequency\n\n",
"_____no_output_____"
],
[
"word_freq, doc_freq = count(twitter_df['tweet_clean'])\n",
"_____no_output_____"
],
[
"words = pd.DataFrame(columns=['Word','WordFreq','DocFreq'])\nwords['Word'] = words_array\nwords['WordFreq'] = word_freq\nwords['DocFreq'] = doc_freq\nwords.head()",
"_____no_output_____"
],
[
"\ntwitter_df = twitter_df.drop(['tweet','words','words_clean'],axis=1)\ntwitter_df = twitter_df.rename(columns={\"tweet_clean\":\"sentence\", \"class\":\"label\"})\n\n\n",
"_____no_output_____"
],
[
"\ntwitter_hf_df = Dataset.from_pandas(twitter_df)\ntwitter_dataset_dfs = twitter_hf_df.train_test_split(test_size=0.2)\ntwitter_dataset_dfs.save_to_disk('/content/drive/MyDrive/Dissertation/twitter_dfs.csv')\n",
"_____no_output_____"
],
[
"twitter_df",
"_____no_output_____"
],
[
"ros = RandomOverSampler(sampling_strategy={0: 4000, 1:19190,2:4163})\n\n\ntwitter_ros, y_ros = ros.fit_resample(twitter_df, twitter_df['label'])\n\ntwitter_ros_dfs = Dataset.from_pandas(twitter_ros)\ntwitter_ros_dfs.save_to_disk('/content/drive/MyDrive/Dissertation/twitter_ros_dfs.csv')\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdc8bed822512118445cb6edce67f3cecf6a5c6 | 197,432 | ipynb | Jupyter Notebook | readmefigs/README.ipynb | SjoerdCor/micplot | ff0ec25c8fc8840dfcecef919f7639ef0600d4e3 | [
"MIT"
] | 1 | 2022-01-04T15:42:44.000Z | 2022-01-04T15:42:44.000Z | readmefigs/README.ipynb | SjoerdCor/micplot | ff0ec25c8fc8840dfcecef919f7639ef0600d4e3 | [
"MIT"
] | null | null | null | readmefigs/README.ipynb | SjoerdCor/micplot | ff0ec25c8fc8840dfcecef919f7639ef0600d4e3 | [
"MIT"
] | null | null | null | 317.925926 | 34,780 | 0.922708 | [
[
[
"# Installation\n`micplot` can be installed by forking this repository and running `pip install micplot` in the appropriate folder. The only requirements are `pandas` and `matplotlib`.\n\n# More effective visualization in one line of code\nPandas is an extremely popular python package for data manipulation, and for good reason: it has a host of possibilities. However, it's out-of-the-box plotting options usually result in hard to interpret plots. This is unfortunate, because good visualization leads to better discussion with and more insights from subject matter experts, which is sorely needed for useful data analytics\n",
"_____no_output_____"
]
],
[
[
"import os\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport micplot",
"_____no_output_____"
],
[
"df = pd.read_csv(os.path.join('..', 'data', 'titanic.csv'))",
"_____no_output_____"
],
[
"data = df.groupby('Embarked')['Fare'].mean()\ndata.plot() \nplt.show()",
"_____no_output_____"
]
],
[
[
"Indeed, the plot is difficult to interpret. A line plot is a poor choice for this type of data, there are meaningless ticks, and no axis label.\n\nTherefore, the `micplot` package was developed, with three advantages:\n 1. It automatically makes choices that make the plot much easier to interpret\n 1. It makes the up the plot area nicely, by removing fluff, so it is easer to read.\n 1. It is fully customizable if something is not to your wishes\n \n## Creating focus\nIn plotting, it is important to make clear what the point of the plot is. `micplot` does this in two ways:\n1. From the data, it infers a focus point, by sorting and highlighting data, and \n1. It makes the plot clearer, by annotating when necessary and removing fluff, such as unnecessary ticks and the frame.\n\n",
"_____no_output_____"
]
],
[
[
"vis = micplot.visualize(data)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## The plot is still fully customizable\nIf the plot is not fully to your liking, the `Visualization` object that is returned contains all choices as attributes, including the axis, which can still be altered. In the example below, we alter the plottype and the bars which are highlighted. ",
"_____no_output_____"
]
],
[
[
"vis = micplot.visualize(data, plottype='vertical_bar', highlight=[0, 1])\nvis.ax.set_ylabel('Mean ticket price')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Other options that can be altered are in the documentation:",
"_____no_output_____"
]
],
[
[
"?micplot.Visualization",
"_____no_output_____"
]
],
[
[
"# `micplot` contains some more useful plottypes\nAbove, we already saw the bar chart that is often very useful to make a point. Below, we show other plottypes and when `micplot` uses them.\n\n## Waterfall charts for compositions\nWaterfall charts are a good choice to show how the total group composition is. Note how `micplot` automatically infers this from the fact that the data contains percentages that add up to 100%.",
"_____no_output_____"
]
],
[
[
"data = df['Embarked'].value_counts(normalize=True)\nmicplot.visualize(data)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Vertical bars for short timeseries data\nBar charts are the plot of choice for time series data with not too many points. `micplot` infers this from the Index of the data. \nNote how in the plot below the legend is placed outside the plot to prevent the legend from overlapping with the data. The highlight specifies that data point to highlight if there is only one Series, but the column to highlight if multiple Series are compared.",
"_____no_output_____"
]
],
[
[
"size = 6\ncolumnnames = ['Cars', 'Bikes', 'Buses', 'Planes']\ntest_data = pd.DataFrame(10*np.random.rand(size, 4), index=pd.date_range('20190101', periods=size), columns=columnnames)",
"_____no_output_____"
],
[
"micplot.visualize(test_data['Cars'], highlight=-1)\nplt.show()\nmicplot.visualize(test_data, highlight=0)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Line chart for longer timeseries data\nThe bar chart would become unreadable if the time series data were longer, so `micplot` changes the plottype to a line plot.",
"_____no_output_____"
]
],
[
[
"size = 12\ntest_data = pd.DataFrame(10*np.random.rand(size, 4), index=pd.date_range('20190101', periods=size), columns=columnnames)\n\nmicplot.visualize(test_data['Cars'], highlight=-1)\nplt.show()\nmicplot.visualize(test_data, highlight=0)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Scatter plots to investigate the relationship between two series",
"_____no_output_____"
]
],
[
[
"micplot.visualize(df[['Age', 'Fare']])\nplt.show() ",
"_____no_output_____"
]
],
[
[
"If there are only a few datapoints in the plot, the points are also labeled with their index.",
"_____no_output_____"
]
],
[
[
"micplot.visualize(df[['Age', 'Fare']].sample(15))\nplt.show() ",
"_____no_output_____"
]
],
[
[
"If there is a third column, this is turned into a bubble chart, where the third column determines the marker size. Here we see that `micplot` gives the legend a title when appropriate.",
"_____no_output_____"
]
],
[
[
"micplot.visualize(df[['Age', 'Fare', 'Parch']])\nplt.show() ",
"_____no_output_____"
]
],
[
[
"## Composition comparison can show how subpopulations differ\nIf we quickly want to infer whether subgroups have the same distribution, we can use a stacked bar chart. `micplot` automatically chooses this if the data to visualize is a DataFrame where each column is a percentage Series.\n",
"_____no_output_____"
]
],
[
[
"data = (df.groupby('Pclass')['Survived'].value_counts(normalize=True)\n .unstack(level='Pclass')\n )\ndisplay(data)",
"_____no_output_____"
],
[
"vis = micplot.visualize(data)",
"_____no_output_____"
]
],
[
[
"## Pie chart works as expected",
"_____no_output_____"
]
],
[
[
"micplot.visualize(df['Embarked'].value_counts(), plottype='pie')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecdca4edc3c305b0230204be8abe148f5ee2cb67 | 1,996 | ipynb | Jupyter Notebook | notebooks/ConwaysGOL_CNN.ipynb | Yonipineda/Conways-GameOfLife | caa2835025953ec7bd6e6e48c9cb17ce104ea601 | [
"MIT"
] | null | null | null | notebooks/ConwaysGOL_CNN.ipynb | Yonipineda/Conways-GameOfLife | caa2835025953ec7bd6e6e48c9cb17ce104ea601 | [
"MIT"
] | null | null | null | notebooks/ConwaysGOL_CNN.ipynb | Yonipineda/Conways-GameOfLife | caa2835025953ec7bd6e6e48c9cb17ce104ea601 | [
"MIT"
] | null | null | null | 31.68254 | 252 | 0.609719 | [
[
[
"# Conways Game of Life CNN: From Scratch\n\nFor our Computer Science Unit 1 build week at [Lambda](https://lambdaschool.com/), all of us had the option to create our own version of [Conways Game of Life](https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life).\nI decided to do that and created my own using [pygame](https://www.pygame.org/news). \n\n You can find relevant links to check it out for yourself here:\n\n- [Conways Game of Life: On repl](https://repl.it/@Yonipineda/Game-of-Life#readme.md)\n- [Conways Game of life: On PyPi](https://pypi.org/project/Conways-Game-of-Life/)\n\nBut, as a Data Science student, we also had to the option to create, from scratch, any of the acceptable Data Science specific algorithms, which can be found in this [repo readme](https://github.com/LambdaSchool/CS-Data-Science-Build-Week-1). \n\nAs I worked on the game itself, I thought of an idea, why not create a Convolutional Neural Network from scratch, train the game on it and compare it with the keras cnn model. Thats what this notebook is for. \n\n\n### Links\n- https://en.wikipedia.org/wiki/Convolution",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
ecdcb83cf519522e93fea3d1549f17f28e366363 | 35,564 | ipynb | Jupyter Notebook | Pytorch/Self Organizing Maps/som.ipynb | SomaKorada07/Deep-Learning-Projects | 58022f7839b9510e962e622be786c65ef01064c3 | [
"Apache-2.0"
] | null | null | null | Pytorch/Self Organizing Maps/som.ipynb | SomaKorada07/Deep-Learning-Projects | 58022f7839b9510e962e622be786c65ef01064c3 | [
"Apache-2.0"
] | null | null | null | Pytorch/Self Organizing Maps/som.ipynb | SomaKorada07/Deep-Learning-Projects | 58022f7839b9510e962e622be786c65ef01064c3 | [
"Apache-2.0"
] | 1 | 2019-12-17T16:48:47.000Z | 2019-12-17T16:48:47.000Z | 101.902579 | 24,296 | 0.801653 | [
[
[
"# How to train the SOM\n* Start with dataset composed of n_features independent variables\n* Create a grid composed of nodes, each one having a weight vector of n_features elements\n* Randomly initialize the values of weights vectors to small numbers close to 0\n* Select one random observation point from the dataset\n* Compute the euclidean distances from ths point to the different neurons in the network\n* Select the neuron that has the minimum distance to the point. This neuron is called the winning node\n* Update the weights of the winning node to move it closer to the point\n* Using the gaussian neighbouring function of mean the winning node also update the weights of the winning node neighbors to move them closer to the point. The neighborhood radius is sigma in the gaussian in the gaussian function.\n* Repeat top 5 steps and update the weights after each observation (Reinforcement Learning) or after a batch of observations (Batch Learning) until the network converges to a point where the neighborhood stops decreasing.",
"_____no_output_____"
]
],
[
[
"# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd",
"_____no_output_____"
],
[
"# Importing the dataset\ndataset = pd.read_csv('Credit_Card_Applications.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\ndataset.head()",
"_____no_output_____"
],
[
"# Feature Scaling\nfrom sklearn.preprocessing import MinMaxScaler\nsc = MinMaxScaler(feature_range = (0, 1))\nX = sc.fit_transform(X)",
"_____no_output_____"
],
[
"# Training the SOM\nfrom minisom import MiniSom\nsom = MiniSom(x = 10, y = 10, input_len = 15, sigma = 1.0, learning_rate = 0.5)\nsom.random_weights_init(X)\nsom.train_random(data = X, num_iteration = 100)",
"_____no_output_____"
],
[
"# Visualizing the results\nfrom pylab import bone, pcolor, colorbar, plot, show\nbone()\npcolor(som.distance_map().T)\ncolorbar()\nmarkers = ['o', 's']\ncolors = ['r', 'g']\nfor i, x in enumerate(X):\n w = som.winner(x)\n plot(w[0] + 0.5,\n w[1] + 0.5,\n markers[y[i]],\n markeredgecolor = colors[y[i]],\n markerfacecolor = 'None',\n markersize = 10,\n markeredgewidth = 2)\nshow()",
"_____no_output_____"
],
[
"# Finding the frauds\nmappings = som.win_map(X)\nfrauds = np.concatenate((mappings[(4,8)], mappings[(2,7)]), axis = 0)\nfrauds = sc.inverse_transform(frauds)",
"_____no_output_____"
],
[
"print(frauds)",
"[[1.5813718e+07 1.0000000e+00 4.2830000e+01 1.2500000e+00 2.0000000e+00\n 7.0000000e+00 4.0000000e+00 1.3875000e+01 0.0000000e+00 1.0000000e+00\n 1.0000000e+00 1.0000000e+00 2.0000000e+00 3.5200000e+02 1.1300000e+02]\n [1.5815271e+07 1.0000000e+00 2.7580000e+01 3.2500000e+00 1.0000000e+00\n 1.1000000e+01 8.0000000e+00 5.0850000e+00 0.0000000e+00 1.0000000e+00\n 2.0000000e+00 1.0000000e+00 2.0000000e+00 3.6900000e+02 2.0000000e+00]\n [1.5805627e+07 1.0000000e+00 2.7830000e+01 1.5000000e+00 2.0000000e+00\n 9.0000000e+00 4.0000000e+00 2.2500000e+00 0.0000000e+00 1.0000000e+00\n 1.0000000e+00 1.0000000e+00 2.0000000e+00 1.0000000e+02 4.0000000e+00]\n [1.5810716e+07 1.0000000e+00 2.3500000e+01 3.1650000e+00 1.0000000e+00\n 4.0000000e+00 4.0000000e+00 4.1500000e-01 0.0000000e+00 1.0000000e+00\n 1.0000000e+00 1.0000000e+00 2.0000000e+00 2.8000000e+02 8.1000000e+01]\n [1.5767264e+07 1.0000000e+00 7.4830000e+01 1.9000000e+01 1.0000000e+00\n 1.0000000e+00 1.0000000e+00 4.0000000e-02 0.0000000e+00 1.0000000e+00\n 2.0000000e+00 0.0000000e+00 2.0000000e+00 0.0000000e+00 3.5200000e+02]\n [1.5801072e+07 1.0000000e+00 3.9580000e+01 5.0000000e+00 2.0000000e+00\n 1.0000000e+00 1.0000000e+00 0.0000000e+00 0.0000000e+00 1.0000000e+00\n 2.0000000e+00 0.0000000e+00 2.0000000e+00 1.7000000e+01 2.0000000e+00]\n [1.5781574e+07 1.0000000e+00 4.9580000e+01 1.9000000e+01 2.0000000e+00\n 1.0000000e+00 1.0000000e+00 0.0000000e+00 1.0000000e+00 1.0000000e+00\n 1.0000000e+00 0.0000000e+00 2.0000000e+00 9.4000000e+01 1.0000000e+00]\n [1.5647295e+07 1.0000000e+00 6.9170000e+01 9.0000000e+00 2.0000000e+00\n 1.0000000e+00 1.0000000e+00 4.0000000e+00 0.0000000e+00 1.0000000e+00\n 1.0000000e+00 0.0000000e+00 2.0000000e+00 7.0000000e+01 7.0000000e+00]]\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdcb8dc67b834502ddd5a110c4af4ba17428b8f | 41,189 | ipynb | Jupyter Notebook | 1_mosaic_data_attention_experiments/8_testing_attention_with_cheating_network/models/Focus_cheat_data_cnn_2layer_5_6.ipynb | lnpandey/DL_explore_synth_data | 0a5d8b417091897f4c7f358377d5198a155f3f24 | [
"MIT"
] | 2 | 2019-08-24T07:20:35.000Z | 2020-03-27T08:16:59.000Z | 1_mosaic_data_attention_experiments/8_testing_attention_with_cheating_network/models/Focus_cheat_data_cnn_2layer_5_6.ipynb | lnpandey/DL_explore_synth_data | 0a5d8b417091897f4c7f358377d5198a155f3f24 | [
"MIT"
] | null | null | null | 1_mosaic_data_attention_experiments/8_testing_attention_with_cheating_network/models/Focus_cheat_data_cnn_2layer_5_6.ipynb | lnpandey/DL_explore_synth_data | 0a5d8b417091897f4c7f358377d5198a155f3f24 | [
"MIT"
] | 3 | 2019-06-21T09:34:32.000Z | 2019-09-19T10:43:07.000Z | 31.855375 | 483 | 0.431377 | [
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly&response_type=code\n\nEnter your authorization code:\n4/4AHgD_Fq6TMGseMpTy0yG2R_dfGTU_nYzhcJ7bH1IbheTJxUnDJs2bs\nMounted at /content/drive\n"
],
[
"import torch.nn as nn\nimport torch.nn.functional as F\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\n\nfrom matplotlib import pyplot as plt\n\nimport copy\n\n# Ignore warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n",
"_____no_output_____"
],
[
"transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)\n\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\n",
"Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ./data/cifar-10-python.tar.gz\n"
],
[
"type(trainset.targets)\ntype(trainset.data)",
"_____no_output_____"
],
[
"# trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)\n# testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)\n\n\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\nforeground_classes = {'plane', 'car', 'bird'}\n\nbackground_classes = {'cat', 'deer', 'dog', 'frog', 'horse','ship', 'truck'}\n\nfg1,fg2,fg3 = 0,1,2",
"_____no_output_____"
],
[
"is_fg = [np.where(np.array(trainset.targets)==fg1)[0] , np.where(np.array(trainset.targets)==fg2)[0], np.where(np.array(trainset.targets)==fg3)[0] ]\n# print(is_fg)\nis_fg = np.concatenate(is_fg,axis=0)\nprint(is_fg, (is_fg).shape)",
"[ 29 30 35 ... 49987 49991 49995] (15000,)\n"
],
[
"all_index = np.arange(0,50000)\nall_index",
"_____no_output_____"
],
[
"train_labels = np.array(trainset.targets)\ntrain_labels.shape",
"_____no_output_____"
],
[
"train_labels[is_fg]=1\ntrain_labels[list(set(all_index)-set(is_fg))]=0\ntrain_labels",
"_____no_output_____"
],
[
"np.count_nonzero(train_labels)",
"_____no_output_____"
],
[
"trainset.targets = train_labels",
"_____no_output_____"
],
[
"is_fg = [np.where(np.array(testset.targets)==fg1)[0] , np.where(np.array(testset.targets)==fg2)[0], np.where(np.array(testset.targets)==fg3)[0] ]\n# print(is_fg)\nis_fg = np.concatenate(is_fg,axis=0)\nprint(is_fg, (is_fg).shape)",
"[ 3 10 21 ... 9970 9982 9989] (3000,)\n"
],
[
"all_index = np.arange(0,10000)\nall_index",
"_____no_output_____"
],
[
"test_labels = np.array(testset.targets)\ntest_labels.shape",
"_____no_output_____"
],
[
"test_labels[is_fg]=1\ntest_labels[list(set(all_index)-set(is_fg))]=0\ntest_labels",
"_____no_output_____"
],
[
"np.count_nonzero(test_labels)",
"_____no_output_____"
],
[
"testset.targets = test_labels",
"_____no_output_____"
],
[
"trainloader = torch.utils.data.DataLoader(trainset, batch_size=256,shuffle=True)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=256,shuffle=False)",
"_____no_output_____"
],
[
"import torch.nn as nn\nimport torch.nn.functional as F\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=5, kernel_size=4, padding=0)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(in_channels=5, out_channels=6, kernel_size=4, padding=0)\n # self.conv3 = nn.Conv2d(in_channels=12, out_channels=20, kernel_size=3, padding=0)\n self.fc1 = nn.Linear(726, 256)\n self.fc2 = nn.Linear(256, 64)\n self.fc3 = nn.Linear(64, 10)\n self.fc4 = nn.Linear(10,2)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n # x = self.pool(F.relu(self.conv2(x)))\n # print(x.shape)\n x = (F.relu(self.conv2(x)))\n x = x.view(x.size(0), -1)\n # print(x.shape)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = self.fc4(x)\n return x",
"_____no_output_____"
],
[
"where_net = CNN()#.double()\nwhere_net = where_net.to(\"cuda\")",
"_____no_output_____"
],
[
"where_net",
"_____no_output_____"
],
[
"import torch.optim as optim\ncriterion_where = nn.CrossEntropyLoss()\noptimizer_where = optim.SGD(where_net.parameters(), lr=0.01, momentum=0.9)",
"_____no_output_____"
],
[
"acti = []\nloss_curi = []\nepochs = 1000\nfor epoch in range(epochs): # loop over the dataset multiple times\n ep_lossi = []\n\n running_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n # get the inputs\n inputs, labels = data\n inputs, labels = inputs.to(\"cuda\"),labels.to(\"cuda\")\n\n # zero the parameter gradients\n optimizer_where.zero_grad()\n\n # forward + backward + optimize\n outputs = where_net(inputs)\n loss = criterion_where(outputs, labels)\n loss.backward()\n optimizer_where.step()\n\n # print statistics\n running_loss += loss.item()\n mini_batch = 50\n if i % mini_batch == mini_batch-1: # print every 50 mini-batches\n print('[%d, %5d] loss: %.3f' %(epoch + 1, i + 1, running_loss / mini_batch))\n ep_lossi.append(running_loss/mini_batch) # loss per minibatch\n running_loss = 0.0\n \n if(np.mean(ep_lossi) <= 0.005):\n break;\n loss_curi.append(np.mean(ep_lossi)) #loss per epoch \n\nprint('Finished Training')",
"[1, 50] loss: 0.621\n[1, 100] loss: 0.607\n[1, 150] loss: 0.614\n[2, 50] loss: 0.615\n[2, 100] loss: 0.607\n[2, 150] loss: 0.610\n[3, 50] loss: 0.609\n[3, 100] loss: 0.611\n[3, 150] loss: 0.610\n[4, 50] loss: 0.606\n[4, 100] loss: 0.617\n[4, 150] loss: 0.612\n[5, 50] loss: 0.604\n[5, 100] loss: 0.607\n[5, 150] loss: 0.605\n[6, 50] loss: 0.584\n[6, 100] loss: 0.581\n[6, 150] loss: 0.565\n[7, 50] loss: 0.541\n[7, 100] loss: 0.535\n[7, 150] loss: 0.528\n[8, 50] loss: 0.507\n[8, 100] loss: 0.501\n[8, 150] loss: 0.496\n[9, 50] loss: 0.495\n[9, 100] loss: 0.474\n[9, 150] loss: 0.477\n[10, 50] loss: 0.480\n[10, 100] loss: 0.460\n[10, 150] loss: 0.457\n[11, 50] loss: 0.449\n[11, 100] loss: 0.449\n[11, 150] loss: 0.442\n[12, 50] loss: 0.437\n[12, 100] loss: 0.435\n[12, 150] loss: 0.438\n[13, 50] loss: 0.425\n[13, 100] loss: 0.417\n[13, 150] loss: 0.437\n[14, 50] loss: 0.417\n[14, 100] loss: 0.414\n[14, 150] loss: 0.418\n[15, 50] loss: 0.402\n[15, 100] loss: 0.395\n[15, 150] loss: 0.414\n[16, 50] loss: 0.386\n[16, 100] loss: 0.405\n[16, 150] loss: 0.387\n[17, 50] loss: 0.376\n[17, 100] loss: 0.377\n[17, 150] loss: 0.372\n[18, 50] loss: 0.371\n[18, 100] loss: 0.367\n[18, 150] loss: 0.357\n[19, 50] loss: 0.366\n[19, 100] loss: 0.357\n[19, 150] loss: 0.360\n[20, 50] loss: 0.340\n[20, 100] loss: 0.344\n[20, 150] loss: 0.350\n[21, 50] loss: 0.341\n[21, 100] loss: 0.348\n[21, 150] loss: 0.344\n[22, 50] loss: 0.318\n[22, 100] loss: 0.316\n[22, 150] loss: 0.320\n[23, 50] loss: 0.309\n[23, 100] loss: 0.310\n[23, 150] loss: 0.301\n[24, 50] loss: 0.291\n[24, 100] loss: 0.287\n[24, 150] loss: 0.310\n[25, 50] loss: 0.282\n[25, 100] loss: 0.283\n[25, 150] loss: 0.279\n[26, 50] loss: 0.265\n[26, 100] loss: 0.262\n[26, 150] loss: 0.267\n[27, 50] loss: 0.234\n[27, 100] loss: 0.254\n[27, 150] loss: 0.257\n[28, 50] loss: 0.213\n[28, 100] loss: 0.231\n[28, 150] loss: 0.253\n[29, 50] loss: 0.200\n[29, 100] loss: 0.202\n[29, 150] loss: 0.225\n[30, 50] loss: 0.182\n[30, 100] loss: 0.190\n[30, 150] loss: 0.203\n[31, 50] loss: 0.173\n[31, 100] loss: 0.157\n[31, 150] loss: 0.182\n[32, 50] loss: 0.144\n[32, 100] loss: 0.151\n[32, 150] loss: 0.171\n[33, 50] loss: 0.133\n[33, 100] loss: 0.155\n[33, 150] loss: 0.143\n[34, 50] loss: 0.128\n[34, 100] loss: 0.120\n[34, 150] loss: 0.138\n[35, 50] loss: 0.101\n[35, 100] loss: 0.111\n[35, 150] loss: 0.113\n[36, 50] loss: 0.119\n[36, 100] loss: 0.105\n[36, 150] loss: 0.118\n[37, 50] loss: 0.100\n[37, 100] loss: 0.092\n[37, 150] loss: 0.098\n[38, 50] loss: 0.106\n[38, 100] loss: 0.085\n[38, 150] loss: 0.085\n[39, 50] loss: 0.055\n[39, 100] loss: 0.074\n[39, 150] loss: 0.087\n[40, 50] loss: 0.058\n[40, 100] loss: 0.067\n[40, 150] loss: 0.084\n[41, 50] loss: 0.056\n[41, 100] loss: 0.058\n[41, 150] loss: 0.067\n[42, 50] loss: 0.057\n[42, 100] loss: 0.056\n[42, 150] loss: 0.062\n[43, 50] loss: 0.060\n[43, 100] loss: 0.061\n[43, 150] loss: 0.057\n[44, 50] loss: 0.039\n[44, 100] loss: 0.036\n[44, 150] loss: 0.045\n[45, 50] loss: 0.044\n[45, 100] loss: 0.030\n[45, 150] loss: 0.040\n[46, 50] loss: 0.033\n[46, 100] loss: 0.040\n[46, 150] loss: 0.036\n[47, 50] loss: 0.046\n[47, 100] loss: 0.030\n[47, 150] loss: 0.032\n[48, 50] loss: 0.036\n[48, 100] loss: 0.028\n[48, 150] loss: 0.031\n[49, 50] loss: 0.058\n[49, 100] loss: 0.042\n[49, 150] loss: 0.043\n[50, 50] loss: 0.026\n[50, 100] loss: 0.034\n[50, 150] loss: 0.037\n[51, 50] loss: 0.046\n[51, 100] loss: 0.033\n[51, 150] loss: 0.028\n[52, 50] loss: 0.024\n[52, 100] loss: 0.015\n[52, 150] loss: 0.019\n[53, 50] loss: 0.037\n[53, 100] loss: 0.032\n[53, 150] loss: 0.032\n[54, 50] loss: 0.029\n[54, 100] loss: 0.022\n[54, 150] loss: 0.032\n[55, 50] loss: 0.028\n[55, 100] loss: 0.025\n[55, 150] loss: 0.030\n[56, 50] loss: 0.015\n[56, 100] loss: 0.014\n[56, 150] loss: 0.020\n[57, 50] loss: 0.022\n[57, 100] loss: 0.025\n[57, 150] loss: 0.031\n[58, 50] loss: 0.016\n[58, 100] loss: 0.016\n[58, 150] loss: 0.013\n[59, 50] loss: 0.020\n[59, 100] loss: 0.020\n[59, 150] loss: 0.016\n[60, 50] loss: 0.023\n[60, 100] loss: 0.026\n[60, 150] loss: 0.017\n[61, 50] loss: 0.015\n[61, 100] loss: 0.015\n[61, 150] loss: 0.018\n[62, 50] loss: 0.017\n[62, 100] loss: 0.010\n[62, 150] loss: 0.016\n[63, 50] loss: 0.010\n[63, 100] loss: 0.010\n[63, 150] loss: 0.013\n[64, 50] loss: 0.007\n[64, 100] loss: 0.006\n[64, 150] loss: 0.009\n[65, 50] loss: 0.005\n[65, 100] loss: 0.024\n[65, 150] loss: 0.030\n[66, 50] loss: 0.020\n[66, 100] loss: 0.021\n[66, 150] loss: 0.027\n[67, 50] loss: 0.025\n[67, 100] loss: 0.018\n[67, 150] loss: 0.020\n[68, 50] loss: 0.009\n[68, 100] loss: 0.006\n[68, 150] loss: 0.006\n[69, 50] loss: 0.013\n[69, 100] loss: 0.011\n[69, 150] loss: 0.022\n[70, 50] loss: 0.013\n[70, 100] loss: 0.017\n[70, 150] loss: 0.023\n[71, 50] loss: 0.011\n[71, 100] loss: 0.009\n[71, 150] loss: 0.005\n[72, 50] loss: 0.002\n[72, 100] loss: 0.002\n[72, 150] loss: 0.003\nFinished Training\n"
],
[
"torch.save(where_net.state_dict(),\"/content/drive/My Drive/Research/Cheating_data/Focus_net_weights/focus_net_2layer_cnn_5_6.pt\")",
"_____no_output_____"
],
[
"correct = 0\ntotal = 0\nwith torch.no_grad():\n for data in trainloader:\n images, labels = data\n images, labels = images.to(\"cuda\"), labels.to(\"cuda\")\n outputs = where_net(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on the %d train images: %d %%' % (total, 100 * correct / total))\nprint(total,correct)",
"Accuracy of the network on the 50000 train images: 99 %\n50000 49942\n"
],
[
"correct = 0\ntotal = 0\nout = []\npred = []\nwith torch.no_grad():\n for data in testloader:\n images, labels = data\n images, labels = images.to(\"cuda\"),labels.to(\"cuda\")\n out.append(labels.cpu().numpy())\n outputs= where_net(images)\n _, predicted = torch.max(outputs.data, 1)\n pred.append(predicted.cpu().numpy())\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))\nprint(total,correct)",
"Accuracy of the network on the 10000 test images: 80 %\n10000 8090\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdcbcfc026f7edb4d7940cece6dc0c62c5a910d | 36,830 | ipynb | Jupyter Notebook | Natural Language Processing/NLP/TextClassification_BernoulliNB_TfidfVectorizer.ipynb | shreepad-nade/ds-seed | 93ddd3b73541f436b6832b94ca09f50872dfaf10 | [
"Apache-2.0"
] | 53 | 2021-08-28T07:41:49.000Z | 2022-03-09T02:20:17.000Z | Natural Language Processing/NLP/TextClassification_BernoulliNB_TfidfVectorizer.ipynb | shreepad-nade/ds-seed | 93ddd3b73541f436b6832b94ca09f50872dfaf10 | [
"Apache-2.0"
] | 142 | 2021-07-27T07:23:10.000Z | 2021-08-25T14:57:24.000Z | Natural Language Processing/NLP/TextClassification_BernoulliNB_TfidfVectorizer.ipynb | shreepad-nade/ds-seed | 93ddd3b73541f436b6832b94ca09f50872dfaf10 | [
"Apache-2.0"
] | 38 | 2021-07-27T04:54:08.000Z | 2021-08-23T02:27:20.000Z | 54.806548 | 9,446 | 0.696307 | [
[
[
"# Text Classification using BernoulliNB and TfidfVectorizer",
"_____no_output_____"
],
[
"This Code Template is for Text Classification using BernoulliNB algorithm along with Text Feature technique TfidfVectorizer from Scikit-learn in python.",
"_____no_output_____"
],
[
"## Required Packages",
"_____no_output_____"
]
],
[
[
"!pip install nltk\n!pip install imblearn",
"_____no_output_____"
],
[
"import pandas as pd\nimport numpy as np\nimport re\nimport nltk\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.corpus import stopwords\nfrom imblearn.over_sampling import RandomOverSampler\nfrom nltk.stem.porter import PorterStemmer\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import plot_confusion_matrix,classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import BernoulliNB\nnltk.download('stopwords')\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"## Initialization\n\nFilepath of CSV file",
"_____no_output_____"
]
],
[
[
"filepath = \"\"",
"_____no_output_____"
]
],
[
[
"Target variable for prediction",
"_____no_output_____"
]
],
[
[
"target = ''",
"_____no_output_____"
]
],
[
[
"Text column containing all text data",
"_____no_output_____"
]
],
[
[
"text = ''",
"_____no_output_____"
]
],
[
[
"## Data Fetching\n\n\nPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.\n\nWe will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(filepath)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"## Data cleaning and preprocessing",
"_____no_output_____"
],
[
"Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes and we also apply some preprocessing techniques which are specific to text data. ",
"_____no_output_____"
]
],
[
[
"def data_preprocess(df, target):\n df = df.dropna(axis=0, how = 'any')\n df[target] = LabelEncoder().fit_transform(df[target]) \n corpus = []\n ps = PorterStemmer()\n for i in range(len(df)) :\n words = re.sub('[^a-zA-Z]',' ',df[text][i])\n words = words.lower()\n words = words.split()\n words = [ps.stem(word) for word in words if word not in stopwords.words('english')]\n words = \" \".join(words)\n corpus.append(words)\n return df, corpus",
"_____no_output_____"
],
[
"df,corpus = data_preprocess(df, target)",
"_____no_output_____"
]
],
[
[
"## Feature Transformation\n\nTF-IDF are word frequency scores that try to highlight words that are more interesting, e.g. frequent in a document but not across documents. The TfidfVectorizer will tokenize documents, learn the vocabulary and inverse document frequency weightings, and allow you to encode new documents.\n\nTF-IDF (term frequency-inverse document frequency) is a statistical measure that evaluates how relevant a word is to a document in a collection of documents.\n\nFor More Info: [API](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html)",
"_____no_output_____"
]
],
[
[
"tfidf = TfidfVectorizer(max_features=2500)",
"_____no_output_____"
]
],
[
[
"## Feature Selection\n\nIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.\n\nWe will assign all the required input features to X and target/outcome to Y.",
"_____no_output_____"
]
],
[
[
"X = tfidf.fit_transform(corpus).toarray()\ny = df[target]",
"_____no_output_____"
]
],
[
[
"## Distribution of target variable",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize = (10,6))\nsns.countplot(y)",
"_____no_output_____"
]
],
[
[
"## Data Splitting\n\nSince we are using a univariate dataset, we can directly split our data into training and testing subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.",
"_____no_output_____"
]
],
[
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)",
"_____no_output_____"
]
],
[
[
"## Handling Target Imbalance\n\nThe challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important.\n\nOne approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library.",
"_____no_output_____"
]
],
[
[
"X_train,y_train = RandomOverSampler(random_state=123).fit_resample(X_train, y_train)",
"_____no_output_____"
]
],
[
[
"## Model\n\n<code>Bernoulli Naive Bayes Classifier</code> is used for discrete data and it works on Bernoulli distribution. The main feature of Bernoulli Naive Bayes is that it accepts features only as binary values like true or false, yes or no, success or failure, 0 or 1 and so on. So when the feature values are **<code>binary</code>** we know that we have to use Bernoulli Naive Bayes classifier.\n\n#### Model Tuning Parameters\n\n 1. alpha : float, default=1.0\n> Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).\n\n 2. binarize : float or None, default=0.0\n> Threshold for binarizing (mapping to booleans) of sample features. If None, input is presumed to already consist of binary vectors.\n\n 3. fit_prior : bool, default=True\n> Whether to learn class prior probabilities or not. If false, a uniform prior will be used.\n\n 4. class_prior : array-like of shape (n_classes,), default=None\n> Prior probabilities of the classes. If specified the priors are not adjusted according to the data.\n\nFor More Info : [API](https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.BernoulliNB.html)",
"_____no_output_____"
]
],
[
[
"model = BernoulliNB()\nmodel.fit(X_train, y_train)",
"_____no_output_____"
]
],
[
[
"## Model Accuracy\n\nscore() method return the mean accuracy on the given test data and labels.\n\nIn multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.",
"_____no_output_____"
]
],
[
[
"print(\"Accuracy score {:.2f} %\\n\".format(model.score(X_test,y_test)*100))",
"Accuracy score 98.74 %\n\n"
]
],
[
[
"## Confusion Matrix\n\nA Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.\n\nwhere:\n\n- Precision:- Accuracy of positive predictions.\n- Recall:- Fraction of positives that were correctly identified.\n- f1-score:- percent of positive predictions were correct\n- support:- Support is the number of actual occurrences of the class in the specified dataset.",
"_____no_output_____"
]
],
[
[
"plot_confusion_matrix(model,X_test,y_test)",
"_____no_output_____"
]
],
[
[
"## Classification Report \n\nA Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.\n\nwhere:\n\n- Precision:- Accuracy of positive predictions.\n- Recall:- Fraction of positives that were correctly identified.\n- f1-score:- percent of positive predictions were correct\n- support:- Support is the number of actual occurrences of the class in the specified dataset.",
"_____no_output_____"
]
],
[
[
"print(classification_report(y_test,model.predict(X_test)))",
" precision recall f1-score support\n\n 0 0.99 1.00 0.99 949\n 1 0.99 0.93 0.96 166\n\n accuracy 0.99 1115\n macro avg 0.99 0.96 0.97 1115\nweighted avg 0.99 0.99 0.99 1115\n\n"
]
],
[
[
"#### Creator: Vamsi Mukkamala , Github: [Profile](https://github.com/vmc99)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ecdcd949ad787d68c51673901509fa24fa7a7c64 | 5,052 | ipynb | Jupyter Notebook | notebooks/AngluinExample.ipynb | icezyclon/AALpy | 3c2f05fdbbcdc99b47ba6b918540239568fca17f | [
"MIT"
] | 61 | 2021-04-01T10:38:52.000Z | 2022-03-28T13:44:23.000Z | notebooks/AngluinExample.ipynb | icezyclon/AALpy | 3c2f05fdbbcdc99b47ba6b918540239568fca17f | [
"MIT"
] | 16 | 2021-04-03T20:14:08.000Z | 2022-02-16T10:21:48.000Z | notebooks/AngluinExample.ipynb | icezyclon/AALpy | 3c2f05fdbbcdc99b47ba6b918540239568fca17f | [
"MIT"
] | 9 | 2021-04-05T13:43:17.000Z | 2022-03-09T14:06:17.000Z | 52.082474 | 2,008 | 0.43171 | [
[
[
"from aalpy.utils import get_Angluin_dfa\n\n# Import the DFA presented in Angluin's seminal paper\ndfa = get_Angluin_dfa()",
"_____no_output_____"
],
[
"# Get its input alphabet\nalphabet = dfa.get_input_alphabet()",
"_____no_output_____"
],
[
"# Create a SUL instance weapping the Anguin's automaton\nfrom aalpy.SULs import DfaSUL\nsul = DfaSUL(dfa)",
"_____no_output_____"
],
[
"# create a random walk equivelance oracle that will perform up to 500 steps every learning round\nfrom aalpy.oracles import RandomWalkEqOracle\n\neq_oracle = RandomWalkEqOracle(alphabet, sul, 500, reset_after_cex=True)\n\n",
"_____no_output_____"
],
[
"from aalpy.learning_algs import run_Lstar\n\n# start the L* and print the whole process in detail\nlearned_dfa = run_Lstar(alphabet, sul, eq_oracle, automaton_type='dfa',\n cache_and_non_det_check=True, cex_processing=None, print_level=3)",
"Hypothesis 1: 2 states.\n------------------------\nPrefixes / E set |() \n------------------------\n() |True \n------------------------\n('a',) |False \n========================\n------------------------\n('b',) |False \n------------------------\n('a', 'a') |True \n------------------------\n('a', 'b') |False \n------------------------\nCounterexample ['a', 'a', 'b', 'b']\nHypothesis 2: 4 states.\n--------------------------------------------\nPrefixes / E set |() |('a',) |('b',) \n--------------------------------------------\n() |True |False |False \n--------------------------------------------\n('a',) |False |True |False \n--------------------------------------------\n('a', 'a', 'b') |False |False |True \n--------------------------------------------\n('a', 'b') |False |False |False \n============================================\n--------------------------------------------\n('b',) |False |False |True \n--------------------------------------------\n('a', 'a') |True |False |False \n--------------------------------------------\n('a', 'a', 'b', 'a') |False |False |False \n--------------------------------------------\n('a', 'a', 'b', 'b') |True |False |False \n--------------------------------------------\n('a', 'b', 'a') |False |False |True \n--------------------------------------------\n('a', 'b', 'b') |False |True |False \n--------------------------------------------\n-----------------------------------\nLearning Finished.\nLearning Rounds: 2\nNumber of states: 4\nTime (in seconds)\n Total : 0.01\n Learning algorithm : 0.01\n Conformance checking : 0.0\nLearning Algorithm\n # Membership Queries : 24\n # MQ Saved by Caching : 14\n # Steps : 91\nEquivalence Query\n # Membership Queries : 57\n # Steps : 504\n-----------------------------------\n"
],
[
"# print the DOT representation of the final automaton\nprint(learned_dfa)",
"digraph learnedModel {\ns0 [label=s0, shape=doublecircle];\ns1 [label=s1];\ns2 [label=s2];\ns3 [label=s3];\ns0 -> s1 [label=a];\ns0 -> s2 [label=b];\ns1 -> s0 [label=a];\ns1 -> s3 [label=b];\ns2 -> s3 [label=a];\ns2 -> s0 [label=b];\ns3 -> s2 [label=a];\ns3 -> s1 [label=b];\n__start0 [label=\"\", shape=none];\n__start0 -> s0 [label=\"\"];\n}\n\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdcdb5ce721d3a786f2b25939bf5cc3d6257edc | 199,368 | ipynb | Jupyter Notebook | Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/MACHINE_LEARNING/LOGISTIC_REGRESSION/LOGISTIC_REGRESSION.ipynb | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | null | null | null | Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/MACHINE_LEARNING/LOGISTIC_REGRESSION/LOGISTIC_REGRESSION.ipynb | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | null | null | null | Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/MACHINE_LEARNING/LOGISTIC_REGRESSION/LOGISTIC_REGRESSION.ipynb | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | 2 | 2022-02-09T15:41:33.000Z | 2022-02-11T07:47:40.000Z | 122.839187 | 45,224 | 0.851395 | [
[
[
"## LOGISTIC REGRESSION",
"_____no_output_____"
],
[
"## Titanic: Machine Learning from Disaster",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"df = pd.read_csv(\"titanic_train.csv\")\ndf.head()",
"_____no_output_____"
],
[
"print(df.columns.values)",
"['PassengerId' 'Survived' 'Pclass' 'Name' 'Sex' 'Age' 'SibSp' 'Parch'\n 'Ticket' 'Fare' 'Cabin' 'Embarked']\n"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 891 entries, 0 to 890\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 PassengerId 891 non-null int64 \n 1 Survived 891 non-null int64 \n 2 Pclass 891 non-null int64 \n 3 Name 891 non-null object \n 4 Sex 891 non-null object \n 5 Age 714 non-null float64\n 6 SibSp 891 non-null int64 \n 7 Parch 891 non-null int64 \n 8 Ticket 891 non-null object \n 9 Fare 891 non-null float64\n 10 Cabin 204 non-null object \n 11 Embarked 889 non-null object \ndtypes: float64(2), int64(5), object(5)\nmemory usage: 83.7+ KB\n"
]
],
[
[
"## EDA",
"_____no_output_____"
]
],
[
[
"pd.isnull(df).sum()",
"_____no_output_____"
],
[
"sns.heatmap(df.isnull(), yticklabels = False, cbar = False, cmap = \"viridis\")",
"_____no_output_____"
],
[
"sns.set_style('whitegrid')\nsns.countplot(x=\"Survived\", data=df, palette = \"deep\")",
"_____no_output_____"
],
[
"sns.countplot(x=\"Survived\", hue = \"Sex\", data = df)",
"_____no_output_____"
],
[
"sns.countplot(x=\"Survived\", hue=\"Pclass\", data=df)",
"_____no_output_____"
],
[
"sns.distplot(df[\"Age\"].dropna(),kde=False, color=\"darkred\", bins=30)",
"_____no_output_____"
],
[
"sns.countplot(x=\"SibSp\", data=df)",
"_____no_output_____"
],
[
"df[\"Fare\"].hist(color=\"green\",bins=40, figsize=(10,5))",
"_____no_output_____"
],
[
"df.corr()",
"_____no_output_____"
],
[
"plt.figure(figsize=(12, 7))\nsns.heatmap(df.corr(),annot=True)\n",
"_____no_output_____"
]
],
[
[
"## DATA CLEANING",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(12, 7))\nsns.boxplot(x=\"Pclass\", y=\"Age\", data=df, palette = \"winter\")",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 891 entries, 0 to 890\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 PassengerId 891 non-null int64 \n 1 Survived 891 non-null int64 \n 2 Pclass 891 non-null int64 \n 3 Name 891 non-null object \n 4 Sex 891 non-null object \n 5 Age 714 non-null float64\n 6 SibSp 891 non-null int64 \n 7 Parch 891 non-null int64 \n 8 Ticket 891 non-null object \n 9 Fare 891 non-null float64\n 10 Cabin 204 non-null object \n 11 Embarked 889 non-null object \ndtypes: float64(2), int64(5), object(5)\nmemory usage: 83.7+ KB\n"
],
[
"\ndef impute_age(cols):\n Age = cols[0]\n Pclass = cols[1]\n \n if pd.isnull(Age):\n\n if Pclass == 1:\n return 37\n\n elif Pclass == 2:\n return 29\n\n else:\n return 24\n\n else:\n return Age",
"_____no_output_____"
],
[
"df[\"Age\"] = df[[\"Age\", \"Pclass\"]].apply(impute_age, axis = 1)",
"_____no_output_____"
],
[
"sns.heatmap(df.isnull(), yticklabels = False, cbar=False, cmap=\"viridis\")",
"_____no_output_____"
],
[
"df.drop(\"Cabin\",axis=1, inplace=True)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"sex = pd.get_dummies(df[\"Sex\"], drop_first = True)\nembark = pd.get_dummies(df[\"Embarked\"], drop_first=True)",
"_____no_output_____"
],
[
"df.drop(['Sex','Embarked','Name','Ticket'],axis=1,inplace=True)",
"_____no_output_____"
],
[
"df = pd.concat([df,sex,embark],axis=1)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"## MODEL",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"X = df.drop(\"Survived\", axis=1)\ny = df[\"Survived\"]",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,\n random_state=42)",
"_____no_output_____"
]
],
[
[
"## TRAINING",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LogisticRegression",
"_____no_output_____"
],
[
"model = LogisticRegression()\nmodel.fit(X_train,y_train)",
"_____no_output_____"
],
[
"model.score(X_train,y_train)",
"_____no_output_____"
],
[
"y_pred = model.predict(X_test)",
"_____no_output_____"
],
[
"from sklearn.model_selection import cross_val_score\n\nscores = cross_val_score(model, X_test, y_test, cv=10)\nprint('Cross-Validation Accuracy Scores', scores.mean())\nprint(scores)",
"Cross-Validation Accuracy Scores 0.7618577075098815\n[0.82608696 0.69565217 0.86956522 0.77272727 0.72727273 0.81818182\n 0.68181818 0.72727273 0.77272727 0.72727273]\n"
]
],
[
[
"## EVALUATION",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix",
"_____no_output_____"
],
[
"print(classification_report(y_test,y_pred))",
" precision recall f1-score support\n\n 0 0.81 0.81 0.81 134\n 1 0.72 0.71 0.71 89\n\n accuracy 0.77 223\n macro avg 0.76 0.76 0.76 223\nweighted avg 0.77 0.77 0.77 223\n\n"
],
[
"from sklearn.model_selection import TimeSeriesSplit\nfrom sklearn.naive_bayes import GaussianNB\n\nfrom yellowbrick.classifier import ClassificationReport\nfrom yellowbrick.datasets import load_occupancy\n\nvisualizer = ClassificationReport(model, support=True)\n\nvisualizer.fit(X_train, y_train) # Fit the visualizer and the model\nvisualizer.score(X_test, y_test) # Evaluate the model on the test data\nvisualizer.show()",
"_____no_output_____"
],
[
"cnf_matrix = confusion_matrix(y_pred,y_test)",
"_____no_output_____"
],
[
"cnf_matrix",
"_____no_output_____"
],
[
"sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap=\"YlGnBu\", fmt='g')\nplt.ylabel('Predicted Label')\nplt.xlabel('Actual Label')",
"_____no_output_____"
],
[
"\n ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdcfa8369a31c8f5f2026f5472083a719faec45 | 97,761 | ipynb | Jupyter Notebook | Project_2_VQE_Molecules/S1_Classical_Methods.ipynb | CDL-Week2/CohortProject_2020 | ce33794267424760926afca0512942ab7e7d28eb | [
"MIT"
] | null | null | null | Project_2_VQE_Molecules/S1_Classical_Methods.ipynb | CDL-Week2/CohortProject_2020 | ce33794267424760926afca0512942ab7e7d28eb | [
"MIT"
] | null | null | null | Project_2_VQE_Molecules/S1_Classical_Methods.ipynb | CDL-Week2/CohortProject_2020 | ce33794267424760926afca0512942ab7e7d28eb | [
"MIT"
] | null | null | null | 126.143226 | 21,664 | 0.868168 | [
[
[
"# Review of classical electronic structure methods \n\nIn order to assess the performance of quantum computing algorithms in addressing the electronic structure problem, we will briefly review a few commonly employed classical electronic structure methods of varying level of approximation. \n\n## Hartree-Fock\n\nThe Hartree-Fock (HF) method employs a **mean-field approximation**, where Coulomb correlation of electrons is neglected. The HF wavefunction is hence restricted to the form of a single Slater determinant, for which the optimal form may be acquired by an optimization of the underlying single particle basis (orbitals). Once the optimal HF orbitals have been found, the HF state may be written as \n\n$$ | \\text{HF} \\rangle = \\prod_{p \\in \\text{occ}} \\hat a^\\dagger_p | \\text{vac} \\rangle $$\n\nwhere $\\hat a^\\dagger_p$ creates an electron in the $p^{\\rm{th}}$ optimized HF spin orbital, and $| \\text{vac} \\rangle$ is the vacuum state (all spin-orbitals unoccupied). Due to the restriction to a single Slater determinant, the HF energy may be obtained very efficiently and can be applied to very large molecules, however it becomes qualitatively incorrect when **electronic correlations** become sufficiently strong. The HF wavefunction is often used as a starting point in more accurate treatments of electronic correlation. \n\n## Coupled cluster\n\nThe Coupled cluster (CC) method introduces electronic correlation to the wavefunction ansatz by operating on the HF reference state with the exponential of excitation operators\n\n$$|\\text{CC} \\rangle = e^{\\hat T} | \\text{HF}\\rangle, \\quad \\hat T = \\hat T_1 + \\hat T_2 + ...$$\n\nwhere $\\hat T_1 = \\sum_{ia} t^{a}_i \\hat a^\\dagger_a \\hat a_i$ are referred to as 'singles', $\\hat T_2 = \\sum_{ijab} t^{ab}_{ij} \\hat a^\\dagger_a \\hat a^\\dagger_b \\hat a_i \\hat a_j$ as 'doubles', etc. When the excitation rank is truncated to only singles (S) and doubles (D), $\\hat T = \\hat T_1 + \\hat T_2$, the corresponding ansatz is referred to as CCSD. Since the number of possible single and double excitations for a system of $N$ electrons in $M$ orbitals is polynomial in $M$, one can efficiently solve a system of equations to obtain the optimized CCSD amplitudes. CCSD often gives accurate energies and can be applied to modestly sized chemical systems. However, due to its implementation, it can **violate the variational principle** and give energies lower than the ground state.\n\n## Full Configuration Interaction\nThe full configuration interaction (FCI) method yields the **exact ground state energy** within a given basis set. The FCI wavefunction is written as a linear combination of all possible $N$-particle Slater determinants over the orbital basis\n$$|\\text{FCI} \\rangle = \\left( \\sum_{ia} C^{a}_i \\hat a^\\dagger_a \\hat a_i + \\sum_{ijab} C^{ab}_{ij} \\hat a^\\dagger_a \\hat a^\\dagger_b \\hat a_i \\hat a_j + ... \\right) | \\text{HF} \\rangle $$\nwhere the sum includes up to $N$-electron excitations, and hence there are an exponential number of coefficients to optimize in $| \\text{FCI} \\rangle$. Due to its **exponential complexity**, FCI is often reserved for only the smallest chemical systems. However, it serves as a useful result to compare to when assessing tractable electronic structure methods.",
"_____no_output_____"
],
[
"# Benchmarking for electronic PESs in minimal basis (STO-3G)\n\nWe will apply HF, CCSD, and FCI to obtaining the PESs for a few molecular dissociation processes in minimal (STO-3G) basis.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom utility import get_molecular_data, obtain_PES\n\nbasis = 'sto-3g'",
"_____no_output_____"
]
],
[
[
"## H<sub>2</sub>",
"_____no_output_____"
]
],
[
[
"bond_lengths = np.linspace(0.2,2.6,15)",
"_____no_output_____"
],
[
"#Run FCI\nFCI_PES = obtain_PES('h2', bond_lengths, basis, method='fci')",
"E = 0.15748213365503272 Eh\nE = -0.8488991062024303 Eh\nE = -1.0882281844069168 Eh\nE = -1.136928959400571 Eh\nE = -1.1229570527982327 Eh\nE = -1.0887645364963472 Eh\nE = -1.050442037218747 Eh\nE = -1.0154682491653277 Eh\nE = -0.987392996293216 Eh\nE = -0.9670259116093083 Eh\nE = -0.9534242125062942 Eh\nE = -0.9449089870950516 Eh\nE = -0.9398197912188275 Eh\nE = -0.9368706035283361 Eh\nE = -0.9351960308386078 Eh\n"
],
[
"#Run HF\nHF_PES = obtain_PES('h2', bond_lengths, basis, method='hf')",
"E = 0.16412595411335862 Eh\nE = -0.8397057843499789 Eh\nE = -1.074887931719117 Eh\nE = -1.1175258568986912 Eh\nE = -1.0951595815644795 Eh\nE = -1.0495617842752072 Eh\nE = -0.9960202460350677 Eh\nE = -0.9415475409906814 Eh\nE = -0.8899750208893146 Eh\nE = -0.8432718025417755 Eh\nE = -0.8022428650141384 Eh\nE = -0.7669825247359998 Eh\nE = -0.7371545448988288 Eh\nE = -0.7121961606082431 Eh\nE = -0.6914700512624794 Eh\n"
],
[
"#Run CCSD\nCCSD_PES = obtain_PES('h2', bond_lengths, basis, method='ccsd')",
"E = 0.15748412056436217 Eh\nE = -0.8488985094851161 Eh\nE = -1.0882310174307857 Eh\nE = -1.1369333351418656 Eh\nE = -1.1229593878059763 Eh\nE = -1.0887606905920533 Eh\nE = -1.050426089327126 Eh\nE = -1.015441636439638 Eh\nE = -0.9873692257052735 Eh\nE = -0.9670168314563374 Eh\nE = -0.9534301294889956 Eh\nE = -0.9449225613845066 Eh\nE = -0.9398332746700124 Eh\nE = -0.9368793989542732 Eh\nE = -0.9351989265367034 Eh\n"
],
[
"#Plot H2 PESs\n\nplt.title('H2 dissociation, STO-3G')\nplt.xlabel('R, Angstrom')\nplt.ylabel('E, Hartree')\n\nplt.plot(bond_lengths, FCI_PES, label='FCI')\nplt.scatter(bond_lengths, HF_PES, label='HF', color='orange')\nplt.scatter(bond_lengths, CCSD_PES, label='CCSD', color='purple')\nplt.legend()",
"_____no_output_____"
]
],
[
[
"<img src=\"figs/testimage.png\">",
"_____no_output_____"
],
[
"## H<sub>2</sub>O symmetric O-H dissociation",
"_____no_output_____"
]
],
[
[
"bond_lengths = np.linspace(0.5,2.7,15)",
"_____no_output_____"
],
[
"#Run FCI\nFCI_PES = obtain_PES('h2o', bond_lengths, basis, method='fci')",
"E = -73.14278405998414 Eh\nE = -74.47523205268014 Eh\nE = -74.90413378058017 Eh\nE = -75.01383133956654 Eh\nE = -75.00356512166928 Eh\nE = -74.95170869126783 Eh\nE = -74.89218648216018 Eh\nE = -74.83923063661699 Eh\nE = -74.79866365936574 Eh\nE = -74.77161515186775 Eh\nE = -74.75568947214684 Eh\nE = -74.74704715251242 Eh\nE = -74.74248898749448 Eh\nE = -74.74007426712959 Eh\nCould not converge\n"
],
[
"#Run HF\nHF_PES = obtain_PES('h2o', bond_lengths, basis, method='hf')",
"E = -73.12948161321476 Eh\nE = -74.45343797011796 Eh\nE = -74.87055361047916 Eh\nE = -74.96308510923626 Eh\nE = -74.92817508740148 Eh\nE = -74.84281942863211 Eh\nE = -74.74051950148022 Eh\nE = -74.63577758413845 Eh\nE = -74.53576638656821 Eh\nE = -74.44494384742121 Eh\nE = -74.36975504260424 Eh\nE = -74.29778481417813 Eh\nE = -74.2416722005104 Eh\nE = -74.27129957698466 Eh\nCould not converge\n"
],
[
"#Run CCSD\nCCSD_PES = obtain_PES('h2o', bond_lengths, basis, method='ccsd')",
"E = -73.14276363918694 Eh\nE = -74.47519485717145 Eh\nE = -74.90406353010552 Eh\nE = -75.01370018934803 Eh\nE = -75.00330155818898 Eh\nE = -74.95116757042634 Eh\nE = -74.89128367467829 Eh\nE = -74.83853401139109 Eh\nE = -74.80070162376026 Eh\nE = -74.7810573871734 Eh\nE = -74.74394274758566 Eh\nE = -74.77972801511767 Eh\nE = -74.7833896345312 Eh\nE = -74.64224434262873 Eh\nCould not converge\n"
],
[
"#Plot H2O PESs\n\nplt.title('H2O symmetric dissociation, STO-3G')\nplt.xlabel('R, Angstrom')\nplt.ylabel('E, Hartree')\n\nplt.plot(bond_lengths, FCI_PES, label='FCI')\nplt.scatter(bond_lengths, HF_PES, label='HF', color='orange')\nplt.scatter(bond_lengths, CCSD_PES, label='CCSD', color='purple')\nplt.legend()",
"_____no_output_____"
]
],
[
[
"## LiH",
"_____no_output_____"
]
],
[
[
"bond_lengths = np.linspace(0.5,3.5,31)\nprint('Running FCI')\nFCI_PES = obtain_PES('lih', bond_lengths, basis, method='fci')\nprint('Running HF')\nHF_PES = obtain_PES('lih', bond_lengths, basis, method='hf')\nprint('Running CCSD')\nCCSD_PES = obtain_PES('lih', bond_lengths, basis, method='ccsd')",
"Running FCI\nE = -7.050225036066642 Eh\nE = -7.319318938170493 Eh\nE = -7.505051767361807 Eh\nE = -7.634167329728353 Eh\nE = -7.723423721347238 Eh\nE = -7.784460280267016 Eh\nE = -7.825536957974943 Eh\nE = -7.852430853316401 Eh\nE = -7.869139976429565 Eh\nE = -7.878453652319019 Eh\nE = -7.882362286810953 Eh\nE = -7.882324378871222 Eh\nE = -7.8794335165319636 Eh\nE = -7.874524024942922 Eh\nE = -7.868240793792822 Eh\nE = -7.861087772411142 Eh\nE = -7.853462904979664 Eh\nE = -7.8456836231164235 Eh\nE = -7.8380050025471135 Eh\nE = -7.830631624342838 Eh\nE = -7.823723883389364 Eh\nE = -7.8173999273756625 Eh\nE = -7.8117353390570266 Eh\nE = -7.806763402503807 Eh\nE = -7.802478452788826 Eh\nE = -7.798843159455239 Eh\nE = -7.795798533270906 Eh\nE = -7.793274300625994 Eh\nE = -7.791197565942319 Eh\nE = -7.789498785374102 Eh\nE = -7.788115123260219 Eh\nRunning HF\nE = -7.0284717535917025 Eh\nE = -7.299610442693042 Eh\nE = -7.4860204040661715 Eh\nE = -7.615847732045305 Eh\nE = -7.705830270834717 Eh\nE = -7.767438446509383 Eh\nE = -7.808820265798591 Eh\nE = -7.835694662390294 Eh\nE = -7.85203467637742 Eh\nE = -7.86062131505413 Eh\nE = -7.863441868065388 Eh\nE = -7.861950402903201 Eh\nE = -7.857231938312342 Eh\nE = -7.850107259191326 Eh\nE = -7.841202658643016 Eh\nE = -7.830998803612782 Eh\nE = -7.81986656068295 Eh\nE = -7.808094112982564 Eh\nE = -7.795907892710426 Eh\nE = -7.783488814132521 Eh\nE = -7.770984662573619 Eh\nE = -7.758519166159427 Eh\nE = -7.746198175823739 Eh\nE = -7.734113419610024 Eh\nE = -7.722344387131093 Eh\nE = -7.710958961728265 Eh\nE = -7.700013407146114 Eh\nE = -7.689552215347576 Eh\nE = -7.6796081692907645 Eh\nE = -7.670202787852991 Eh\nE = -7.661347172773848 Eh\nRunning CCSD\nE = -7.050156294294409 Eh\nE = -7.31929636945824 Eh\nE = -7.505041804061137 Eh\nE = -7.634160527656569 Eh\nE = -7.723417419306967 Eh\nE = -7.784453781342631 Eh\nE = -7.825530117618331 Eh\nE = -7.8524234479693265 Eh\nE = -7.869131686034385 Eh\nE = -7.878444199417256 Eh\nE = -7.882351449527868 Eh\nE = -7.882312097529623 Eh\nE = -7.879419815515022 Eh\nE = -7.874508895995587 Eh\nE = -7.868224101324972 Eh\nE = -7.861069226073766 Eh\nE = -7.853442051456514 Eh\nE = -7.84565983219227 Eh\nE = -7.837977472710677 Eh\nE = -7.830599382289906 Eh\nE = -7.823685850858562 Eh\nE = -7.817355013474601 Eh\nE = -7.811682577240462 Eh\nE = -7.806702090929714 Eh\nE = -7.802408241683294 Eh\nE = -7.798764080782396 Eh\nE = -7.795710917913035 Eh\nE = -7.793178734937743 Eh\nE = -7.791094696651086 Eh\nE = -7.789389227862769 Eh\nE = -7.787999544493268 Eh\n"
],
[
"plt.title('LiH dissociation, STO-3G')\nplt.xlabel('R, Angstrom')\nplt.ylabel('E, Hartree')\n\nplt.plot(bond_lengths, FCI_PES, label='FCI')\nplt.scatter(bond_lengths, HF_PES, label='HF', color='orange')\nplt.scatter(bond_lengths, CCSD_PES, label='CCSD', color='purple')\nplt.legend()",
"_____no_output_____"
]
],
[
[
"## H<sub>4</sub> angle",
"_____no_output_____"
]
],
[
[
"angles = np.linspace(85,95,21)\nprint('Running FCI')\nFCI_PES = obtain_PES('h4', angles, basis, method='fci')\nprint('Running HF')\nHF_PES = obtain_PES('h4', angles, basis, method='hf')\nprint('Running CCSD')\nCCSD_PES = obtain_PES('h4', angles, basis, method='ccsd')",
"Running FCI\nE = -1.986726115111665 Eh\nE = -1.983753074791256 Eh\nE = -1.9809876600812353 Eh\nE = -1.9784558966241912 Eh\nE = -1.9761849765159 Eh\nE = -1.9742024922300878 Eh\nE = -1.9725353916283375 Eh\nE = -1.9712086838516676 Eh\nE = -1.9702439840851076 Eh\nE = -1.969658052182965 Eh\nE = -1.9694615253790553 Eh\nE = -1.9696580521829743 Eh\nE = -1.9702439840851063 Eh\nE = -1.971208683851668 Eh\nE = -1.972535391628334 Eh\nE = -1.9742024922300914 Eh\nE = -1.976184976515897 Eh\nE = -1.978455896624208 Eh\nE = -1.9809876600812437 Eh\nE = -1.9837530747912542 Eh\nE = -1.9867261151116788 Eh\nRunning HF\nE = -1.8473793344555092 Eh\nE = -1.8407925357367825 Eh\nE = -1.834161635500117 Eh\nE = -1.8274861159746565 Eh\nE = -1.8207654212301247 Eh\nE = -1.8139989576966715 Eh\nE = -1.8071860930043528 Eh\nE = -1.800326156991018 Eh\nE = -1.7934184402660902 Eh\nE = -1.7864621949207806 Eh\nE = -1.7794566334077442 Eh\nE = -1.7864621949207602 Eh\nE = -1.7934184402660387 Eh\nE = -1.8003261569909765 Eh\nE = -1.8071860930043526 Eh\nE = -1.81399895769665 Eh\nE = -1.8207654212300826 Eh\nE = -1.8274861159746807 Eh\nE = -1.8341616354999826 Eh\nE = -1.8407925357367794 Eh\nE = -1.847379334455477 Eh\nRunning CCSD\nE = -1.988100146542443 Eh\nE = -1.985395986765857 Eh\nE = -1.9829485147096888 Eh\nE = -1.9807905424277594 Eh\nE = -1.9789558063938852 Eh\nE = -1.9774775610628073 Eh\nE = -1.9763868042291506 Eh\nE = -1.9757101943075248 Eh\nE = -1.9754679258214223 Eh\nE = -1.9756718867341703 Eh\nE = -1.9763244000372147 Eh\nE = -1.9756718867341667 Eh\nE = -1.9754679258214083 Eh\nE = -1.9757101943075221 Eh\nE = -1.9763868042291548 Eh\nE = -1.9774775610628046 Eh\nE = -1.9789558063938804 Eh\nE = -1.980790542427761 Eh\nE = -1.9829485147096741 Eh\nE = -1.9853959867658584 Eh\nE = -1.9881001465424395 Eh\n"
],
[
"fig = plt.figure(figsize=(5,5))\nax = fig.add_subplot(1,1,1)\nax.set_title('H4 angle, STO-3G')\nax.set_xlabel('Angle, Degrees')\nax.set_ylabel('E, Hartree')\nax.plot(angles, FCI_PES, label='FCI')\nax.scatter(angles, HF_PES, label='HF', color='orange')\nax.scatter(angles, CCSD_PES, label='CCSD', color='purple')\nax.legend()",
"_____no_output_____"
]
],
[
[
"## N<sub>2</sub>",
"_____no_output_____"
]
],
[
[
"bond_lengths = np.linspace(0.6,1.8,26)\nprint('Running FCI')\nFCI_PES = obtain_PES('n2', bond_lengths, basis, method='fci')\nprint('Running HF')\nHF_PES = obtain_PES('n2', bond_lengths, basis, method='hf')\nprint('Running CCSD')\nCCSD_PES = obtain_PES('n2', bond_lengths, basis, method='ccsd')",
"_____no_output_____"
],
[
"plt.title('N2 dissociation, STO-3G')\nplt.xlabel('R, Angstrom')\nplt.ylabel('E, Hartree')\n\nplt.plot(bond_lengths, FCI_PES, label='FCI')\nplt.scatter(bond_lengths, HF_PES, label='HF', color='orange')\nplt.scatter(bond_lengths, CCSD_PES, label='CCSD', color='purple')\nplt.legend()",
"_____no_output_____"
]
],
[
[
"## NH<sub>3</sub>",
"_____no_output_____"
]
],
[
[
"bond_lengths = np.linspace(0.5,2.0,16)\nprint('Running FCI')\nFCI_PES = obtain_PES('nh3', bond_lengths, basis, method='fci')\nprint('Running HF')\nHF_PES = obtain_PES('nh3', bond_lengths, basis, method='hf')\nprint('Running CCSD')\nCCSD_PES = obtain_PES('nh3', bond_lengths, basis, method='ccsd')",
"_____no_output_____"
],
[
"plt.title('NH3 dissociation, STO-3G')\nplt.xlabel('R, Angstrom')\nplt.ylabel('E, Hartree')\n\nplt.plot(bond_lengths, FCI_PES, label='FCI')\nplt.scatter(bond_lengths, HF_PES, label='HF', color='orange')\nplt.scatter(bond_lengths, CCSD_PES, label='CCSD', color='purple')\nplt.legend()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
ecdcfe1865c4af217b4c16763690ff6b6130d7bb | 3,372 | ipynb | Jupyter Notebook | 7. find_ID.ipynb | Daya-Jin/- | 28787974c40f5c3c151b1c4a567a7eb6c51604ba | [
"Apache-2.0"
] | 82 | 2018-11-28T04:38:01.000Z | 2022-03-08T03:19:05.000Z | 7. find_ID.ipynb | Daya-Jin/- | 28787974c40f5c3c151b1c4a567a7eb6c51604ba | [
"Apache-2.0"
] | null | null | null | 7. find_ID.ipynb | Daya-Jin/- | 28787974c40f5c3c151b1c4a567a7eb6c51604ba | [
"Apache-2.0"
] | 37 | 2018-11-28T04:35:09.000Z | 2022-03-25T03:21:23.000Z | 35.125 | 301 | 0.643238 | [
[
[
"其实在做特征跟调参的期间,我就一直想看看数据中是否有重复数据,因为在做rank_encoding的时候就发现数据中有很大的漏洞,只不过在做特征的前向选择与后向选择实在太花时间了(主要原因还是代码能力弱,没法自动化),一直到做完特征选择之后才去认真看了下原数据。\n\n首先是一部分特征存在等级划分,如'Region'>'BusLoc'>'Neighborhood',这是地理上的等级;然后是'TolHeight'>'Height'>'RoomDir',这是每套房屋的等级;最后是房屋内部的等级,'Bedroom'>'Livingroom'>'Bathroom'>'RoomArea'。当然这个等级的次序不同的人有不同的理解,以上次序只是我个人的理解。划分出这些等级的目的其实就是想精准定位出'房屋ID'这个属性,然后就可以找出测试集跟训练集是否有重复数据,对于同一个出租屋,直接用它的历史租金来填充它4月份的租金即可,那么这部分数据就不需要使用模型来预测了。",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntrain_data=pd.read_csv('data/train.csv').fillna(-999)\ntest_data=pd.read_csv('data/test.csv').fillna(-999)\n\n# 为了便与合并,将除目标值的所有列字符串化\ndef objectal(df):\n for col in df.columns:\n if col!='Rental':\n df[col] = df[col].astype(str)\n return df\ntrain_data=objectal(train_data)\ntest_data=objectal(test_data)\n\nmon1_train_df=train_data[train_data.loc[:,'Time']=='1'].drop(['Time','RentRoom'],axis=1).drop_duplicates()\nmon2_train_df=train_data[train_data.loc[:,'Time']=='2'].drop(['Time','RentRoom'],axis=1).drop_duplicates()\nmon3_train_df=train_data[train_data.loc[:,'Time']=='3'].drop(['Time','RentRoom'],axis=1).drop_duplicates()\n\ncommon_cols=list(mon1_train_df.columns)\ncommon_cols.remove('Rental')\n\n# 按月计算出房屋的均租金\nmon1_train_df=mon1_train_df.groupby(common_cols,as_index=False).mean()\nmon2_train_df=mon2_train_df.groupby(common_cols,as_index=False).mean()\nmon3_train_df=mon3_train_df.groupby(common_cols,as_index=False).mean()\n\n# 二月并一月,缺失值由一月数据来填充\nrecent_mean_rental=mon2_train_df.merge(mon1_train_df,how='outer',on=common_cols).fillna(method='bfill',axis=1)\nrecent_mean_rental=recent_mean_rental.drop(['Rental_y'],axis=1).rename(columns={'Rental_x':'Rental'})\n# 三月并二月,缺失值由二月(一月)来填充\nrecent_mean_rental=mon3_train_df.merge(recent_mean_rental,how='outer',on=common_cols).fillna(method='bfill',axis=1)\nrecent_mean_rental=recent_mean_rental.drop(['Rental_y'],axis=1).rename(columns={'Rental_x':'Rental'})\n\nstatistic_pred=test_data.merge(recent_mean_rental,how='left',on=common_cols)\nstatistic_pred.loc[:,['id','Rental']].to_csv('./result/statistic_pred.csv',index=False)",
"_____no_output_____"
]
],
[
[
"将这个csv文件覆盖掉队友的LGB预测文件之后,线上分数从1.86升至1.82;\n\n先将XGB与LGB均化融合,在用这个csv覆盖掉融合结果,线上分数从1.82升至1.80。\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ecdd020679cdad4213577fb53b941dd88e03dd8e | 12,591 | ipynb | Jupyter Notebook | testing/archives/matplotlib_tips.ipynb | aravindhnivas/FELion-Spectrum-Analyser | 430f16884482089b2f717ea7dd50625078971e48 | [
"MIT"
] | null | null | null | testing/archives/matplotlib_tips.ipynb | aravindhnivas/FELion-Spectrum-Analyser | 430f16884482089b2f717ea7dd50625078971e48 | [
"MIT"
] | null | null | null | testing/archives/matplotlib_tips.ipynb | aravindhnivas/FELion-Spectrum-Analyser | 430f16884482089b2f717ea7dd50625078971e48 | [
"MIT"
] | 1 | 2019-01-25T20:37:57.000Z | 2019-01-25T20:37:57.000Z | 28.042316 | 91 | 0.496227 | [
[
[
"## Cursor",
"_____no_output_____"
]
],
[
[
"%matplotlib tk",
"_____no_output_____"
],
[
"from matplotlib.widgets import Cursor\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\nfig = plt.figure(figsize=(8, 6))\nax = fig.add_subplot(111, facecolor='#FFFFCC')\n\nx, y = 4*(np.random.rand(2, 100) - .5)\nax.plot(x, y, 'o')\nax.set_xlim(-2, 2)\nax.set_ylim(-2, 2)\n\n# set useblit = True on gtkagg for enhanced performance\ncursor = Cursor(ax, useblit=True, color='red', linewidth=1)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Buttons",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Button\n\nfreqs = np.arange(2, 20, 3)\n\nfig, ax = plt.subplots()\nplt.subplots_adjust(bottom=0.2)\nt = np.arange(0.0, 1.0, 0.001)\ns = np.sin(2*np.pi*freqs[0]*t)\nl, = plt.plot(t, s, lw=2)\n\n\nclass Index(object):\n ind = 0\n\n def next(self, event):\n self.ind += 1\n i = self.ind % len(freqs)\n ydata = np.sin(2*np.pi*freqs[i]*t)\n l.set_ydata(ydata)\n plt.draw()\n\n def prev(self, event):\n self.ind -= 1\n i = self.ind % len(freqs)\n ydata = np.sin(2*np.pi*freqs[i]*t)\n l.set_ydata(ydata)\n plt.draw()\n\ncallback = Index()\naxprev = plt.axes([0.7, 0.05, 0.1, 0.075])\naxnext = plt.axes([0.81, 0.05, 0.1, 0.075])\nbnext = Button(axnext, 'Next')\nbnext.on_clicked(callback.next)\nbprev = Button(axprev, 'Previous')\nbprev.on_clicked(callback.prev)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Check Button",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import CheckButtons\n\nt = np.arange(0.0, 2.0, 0.01)\ns0 = np.sin(2*np.pi*t)\ns1 = np.sin(4*np.pi*t)\ns2 = np.sin(6*np.pi*t)\n\nfig, ax = plt.subplots()\nl0, = ax.plot(t, s0, visible=False, lw=2)\nl1, = ax.plot(t, s1, lw=2)\nl2, = ax.plot(t, s2, lw=2)\nplt.subplots_adjust(left=0.2)\n\nrax = plt.axes([0.05, 0.4, 0.1, 0.15])\ncheck = CheckButtons(rax, ('2 Hz', '4 Hz', '6 Hz'), (False, True, True))\n\n\ndef func(label):\n if label == '2 Hz':\n l0.set_visible(not l0.get_visible())\n elif label == '4 Hz':\n l1.set_visible(not l1.get_visible())\n elif label == '6 Hz':\n l2.set_visible(not l2.get_visible())\n plt.draw()\ncheck.on_clicked(func)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Radiobutton",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import RadioButtons\n\nt = np.arange(0.0, 2.0, 0.01)\ns0 = np.sin(2*np.pi*t)\ns1 = np.sin(4*np.pi*t)\ns2 = np.sin(8*np.pi*t)\n\nfig, ax = plt.subplots()\nl, = ax.plot(t, s0, lw=2, color='red')\nplt.subplots_adjust(left=0.3)\n\naxcolor = 'lightgoldenrodyellow'\nrax = plt.axes([0.05, 0.7, 0.15, 0.15], facecolor=axcolor)\nradio = RadioButtons(rax, ('2 Hz', '4 Hz', '8 Hz'))\n\n\ndef hzfunc(label):\n hzdict = {'2 Hz': s0, '4 Hz': s1, '8 Hz': s2}\n ydata = hzdict[label]\n l.set_ydata(ydata)\n plt.draw()\nradio.on_clicked(hzfunc)\n\nrax = plt.axes([0.05, 0.4, 0.15, 0.15], facecolor=axcolor)\nradio2 = RadioButtons(rax, ('red', 'blue', 'green'))\n\n\ndef colorfunc(label):\n l.set_color(label)\n plt.draw()\nradio2.on_clicked(colorfunc)\n\nrax = plt.axes([0.05, 0.1, 0.15, 0.15], facecolor=axcolor)\nradio3 = RadioButtons(rax, ('-', '--', '-.', 'steps', ':'))\n\n\ndef stylefunc(label):\n l.set_linestyle(label)\n plt.draw()\nradio3.on_clicked(stylefunc)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Rectange Sector",
"_____no_output_____"
]
],
[
[
"from __future__ import print_function\n\"\"\"\nDo a mouseclick somewhere, move the mouse to some destination, release\nthe button. This class gives click- and release-events and also draws\na line or a box from the click-point to the actual mouseposition\n(within the same axes) until the button is released. Within the\nmethod 'self.ignore()' it is checked whether the button from eventpress\nand eventrelease are the same.\n\n\"\"\"\nfrom matplotlib.widgets import RectangleSelector\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef line_select_callback(eclick, erelease):\n 'eclick and erelease are the press and release events'\n x1, y1 = eclick.xdata, eclick.ydata\n x2, y2 = erelease.xdata, erelease.ydata\n print(\"(%3.2f, %3.2f) --> (%3.2f, %3.2f)\" % (x1, y1, x2, y2))\n print(\" The button you used were: %s %s\" % (eclick.button, erelease.button))\n\n\ndef toggle_selector(event):\n print(' Key pressed.')\n if event.key in ['Q', 'q'] and toggle_selector.RS.active:\n print(' RectangleSelector deactivated.')\n toggle_selector.RS.set_active(False)\n if event.key in ['A', 'a'] and not toggle_selector.RS.active:\n print(' RectangleSelector activated.')\n toggle_selector.RS.set_active(True)\n\n\nfig, current_ax = plt.subplots() # make a new plotting range\nN = 100000 # If N is large one can see\nx = np.linspace(0.0, 10.0, N) # improvement by use blitting!\n\nplt.plot(x, +np.sin(.2*np.pi*x), lw=3.5, c='b', alpha=.7) # plot something\nplt.plot(x, +np.cos(.2*np.pi*x), lw=3.5, c='r', alpha=.5)\nplt.plot(x, -np.sin(.2*np.pi*x), lw=3.5, c='g', alpha=.3)\n\nprint(\"\\n click --> release\")\n\n# drawtype is 'box' or 'line' or 'none'\ntoggle_selector.RS = RectangleSelector(current_ax, line_select_callback,\n drawtype='box', useblit=True,\n button=[1, 3], # don't use middle button\n minspanx=5, minspany=5,\n spancoords='pixels',\n interactive=True)\nplt.connect('key_press_event', toggle_selector)\nplt.show()",
"\n click --> release\n(1.12, -0.73) --> (6.84, 0.21)\n The button you used were: 1 1\n"
]
],
[
[
"## Multicursor",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import MultiCursor\n\nt = np.arange(0.0, 2.0, 0.01)\ns1 = np.sin(2*np.pi*t)\ns2 = np.sin(4*np.pi*t)\nfig = plt.figure()\nax1 = fig.add_subplot(211)\nax1.plot(t, s1)\n\n\nax2 = fig.add_subplot(212, sharex=ax1)\nax2.plot(t, s2)\n\nmulti = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Slider DEMO",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button, RadioButtons\n\nfig, ax = plt.subplots()\nplt.subplots_adjust(left=0.25, bottom=0.25)\n\nt = np.arange(0.0, 1.0, 0.001)\na0 = 5\nf0 = 3\ns = a0*np.sin(2*np.pi*f0*t)\n\nl, = ax.plot(t, s, lw=2, color='red')\n\naxcolor = 'lightgoldenrodyellow'\naxfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\naxamp = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)\n\nsfreq = Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0)\nsamp = Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)\n\ndef update(val):\n amp = samp.val\n freq = sfreq.val\n l.set_ydata(amp*np.sin(2*np.pi*freq*t))\n fig.canvas.draw_idle()\nsfreq.on_changed(update)\nsamp.on_changed(update)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Span Selector",
"_____no_output_____"
]
],
[
[
"\"\"\"\nThe SpanSelector is a mouse widget to select a xmin/xmax range and plot the\ndetail view of the selected region in the lower axes\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import SpanSelector\n\nfig = plt.figure(figsize=(8, 6))\nax = fig.add_subplot(211, facecolor='#FFFFCC')\n\nx = np.arange(0.0, 5.0, 0.01)\ny = np.sin(2*np.pi*x) + 0.5*np.random.randn(len(x))\n\nax.plot(x, y, '-')\nax.set_ylim(-2, 2)\nax.set_title('Press left mouse button and drag to test')\n\nax2 = fig.add_subplot(212, facecolor='#FFFFCC')\nline2, = ax2.plot(x, y, '-')\n\n\ndef onselect(xmin, xmax):\n indmin, indmax = np.searchsorted(x, (xmin, xmax))\n indmax = min(len(x) - 1, indmax)\n\n thisx = x[indmin:indmax]\n thisy = y[indmin:indmax]\n line2.set_data(thisx, thisy)\n ax2.set_xlim(thisx[0], thisx[-1])\n ax2.set_ylim(thisy.min(), thisy.max())\n fig.canvas.draw()\n\n# set useblit True on gtkagg for enhanced performance\nspan = SpanSelector(ax, onselect, 'horizontal', useblit=True,\n rectprops=dict(alpha=0.5, facecolor='red'))\n\n\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecdd3b9a4dfaf860dbecb11eff6a3046af207b0d | 7,247 | ipynb | Jupyter Notebook | proj/proj1.ipynb | xjiang323/CS4710 | e57ced495e27130598faf131dc67d0e5cac8b17f | [
"MIT"
] | null | null | null | proj/proj1.ipynb | xjiang323/CS4710 | e57ced495e27130598faf131dc67d0e5cac8b17f | [
"MIT"
] | null | null | null | proj/proj1.ipynb | xjiang323/CS4710 | e57ced495e27130598faf131dc67d0e5cac8b17f | [
"MIT"
] | null | null | null | 22.367284 | 77 | 0.425417 | [
[
[
"input_string = input('Enter a string: ')\n\nhhh=[]\nfor i in input_string:\n if i in hhh:\n print(i)\n break\n else:\n hhh.append(i)",
"Enter a string: jfkdjkfsjlk\nj\n"
],
[
"fhand = input('Enter a string: ')\ncount = 0\n\nvowels = set('aeiou')\nfor i in fhand:\n if i in vowels:\n count = count + 1\n print(i)\n else:\n pass\nprint(count)",
"Enter a string: aeioudklsls\na\ne\ni\no\nu\n5\n"
],
[
"string1 = input('Enter a string: ')\nstring2 = input('Enter a another string: ')\n\nif len(string1) < 2 or len(string2) < 2:\n raise Exception('The length of string should be longer than 2.')\n\ns1 = string1[2:]\ns2 = string2[2:]\n\nprint(s1+s2)",
"Enter a string: flksjlkfjslfjsl\nEnter a another string: rieorioeiroei\nksjlkfjslfjsleorioeiroei\n"
],
[
"row = input('Enter a string: ')\nstring = str(row)\n\ndict = {}\nfor index,value in enumerate(string):\n dict[value]=index+1\n\nitems = dict.items()\nitems = sorted(items)\nfor key,value in items:\n print(\"pos(\" + str(key) + \") =\" + str(value))",
"Enter a string: aieusl\npos(a) =1\npos(e) =3\npos(i) =2\npos(l) =6\npos(s) =5\npos(u) =4\n"
],
[
"string = input('Enter a list: ')\nlst = list(string)\n\ndict = {}\nfor index,value in enumerate(lst):\n dict[value]=index+1\n\nitems = dict.items()\nitems = sorted(items)\nfor key,value in items:\n print(\"pos(\" + str(key) + \") =\" + str(value))",
"Enter a list: aiueopq\npos(a) =1\npos(e) =4\npos(i) =2\npos(o) =5\npos(p) =6\npos(q) =7\npos(u) =3\n"
],
[
"string1 = input('Enter a string: ')\nstring2 = input('Enter another string: ')\n\n# the time complexity of my algorithm is O(m+n)\n\ndef isCommenNum(s1,s2):\n set2 = set(s2)\n for i in s1:\n if i in set2:\n print('True')\n return\n else:\n pass\n\nisCommenNum(string1,string2)",
"Enter a string: isoakajf\nEnter another string: jkiaoa\nTrue\n"
],
[
"lst = []\nwhile True:\n string = input('Enter a number.Input empty to quit.')\n if len(string) != 0:\n lst.append(string)\n else:\n break\n\nif len(lst) % 2 == 1:\n tmp = lst[:-1:2]\n lst[:-1:2] = lst[1::2]\n lst[1::2] = tmp\nelse:\n tmp = lst[::2]\n lst[::2] = lst[1::2]\n lst[1::2] = tmp\nprint(lst)",
"Enter a number.Input empty to quit.12\nEnter a number.Input empty to quit.23\nEnter a number.Input empty to quit.4\nEnter a number.Input empty to quit.45\nEnter a number.Input empty to quit.56\nEnter a number.Input empty to quit.\n['23', '12', '45', '4', '56']\n"
],
[
"string = ''\nfor i in range(10):\n string = ''\n for j in range(10):\n value = i * j\n string = string + str(value) + ' '\n print(string)",
"0 0 0 0 0 0 0 0 0 0 \n0 1 2 3 4 5 6 7 8 9 \n0 2 4 6 8 10 12 14 16 18 \n0 3 6 9 12 15 18 21 24 27 \n0 4 8 12 16 20 24 28 32 36 \n0 5 10 15 20 25 30 35 40 45 \n0 6 12 18 24 30 36 42 48 54 \n0 7 14 21 28 35 42 49 56 63 \n0 8 16 24 32 40 48 56 64 72 \n0 9 18 27 36 45 54 63 72 81 \n"
],
[
"from numpy import *\nimport numpy as np\n\nM = np.random.randint(0,20,size=(10,10))\nN = np.random.randint(0,20,size=(10,10))\n\nC = M - N\nprint(C)",
"[[-14 10 10 6 3 11 17 8 5 -3]\n [ -5 0 -14 7 -9 -1 2 2 -8 -5]\n [ -3 -7 15 0 0 12 -9 -1 -4 -7]\n [ -6 -15 -11 11 9 1 -1 -14 7 -7]\n [ 3 -6 -5 -4 8 10 -1 -3 2 -15]\n [ 5 17 10 -6 -10 14 -19 -12 1 -5]\n [ 11 -6 2 16 11 -3 1 12 5 -14]\n [ 9 -10 -1 18 -4 -17 -5 -13 11 -9]\n [ -8 4 -1 -16 -10 1 -11 4 15 6]\n [ 7 11 -8 8 0 4 1 -7 -4 6]]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdd6b70917cde7654ee7145dcef9bc57e01b4eb | 1,838 | ipynb | Jupyter Notebook | tests.ipynb | cmcmaster1/simclr_hep2 | 633023e6525761286b764442e41827e7f0740e26 | [
"MIT"
] | null | null | null | tests.ipynb | cmcmaster1/simclr_hep2 | 633023e6525761286b764442e41827e7f0740e26 | [
"MIT"
] | null | null | null | tests.ipynb | cmcmaster1/simclr_hep2 | 633023e6525761286b764442e41827e7f0740e26 | [
"MIT"
] | null | null | null | 20.197802 | 88 | 0.501632 | [
[
[
"import loss\nimport torch",
"_____no_output_____"
],
[
"I = torch.tensor([[1.0, 2.0], [3.0, -2.0], [1.0, 5.0]], requires_grad=True)\nJ = torch.tensor([[1.0, 0.75], [2.8, -1.75], [1.0, 4.7]], requires_grad=True)\n\ncontrastive_loss = loss.ContrastiveLoss(3, 1.0)\nnt_xent = loss.NT_Xent(3, 1.0, 'cuda', 1)",
"_____no_output_____"
],
[
"%%timeit\ncontrastive_loss(I, J)",
"286 µs ± 7.95 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n"
],
[
"%%timeit\nnt_xent(I, J)",
"261 µs ± 4.73 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
ecdd7f0b451067c22bee40d7e7c011c9ce2b2e98 | 13,052 | ipynb | Jupyter Notebook | python-for-data/Ex03 - Booleans and Conditionals.ipynb | interphuoc/atom-assignments | ac5ae4d7d1b8666f560a123b4ab1ab88b0acd6b5 | [
"MIT"
] | null | null | null | python-for-data/Ex03 - Booleans and Conditionals.ipynb | interphuoc/atom-assignments | ac5ae4d7d1b8666f560a123b4ab1ab88b0acd6b5 | [
"MIT"
] | null | null | null | python-for-data/Ex03 - Booleans and Conditionals.ipynb | interphuoc/atom-assignments | ac5ae4d7d1b8666f560a123b4ab1ab88b0acd6b5 | [
"MIT"
] | null | null | null | 27.248434 | 270 | 0.572096 | [
[
[
"# Exercise 03 - Booleans and Conditionals",
"_____no_output_____"
],
[
"## 1. Simple Function with Conditionals\n\nMany programming languages have [sign](https://en.wikipedia.org/wiki/Sign_function) available as a built-in function. Python does not, but we can define our own!\n\nIn the cell below, define a function called `sign` which takes a numerical argument and returns -1 if it's negative, 1 if it's positive, and 0 if it's 0.",
"_____no_output_____"
]
],
[
[
"def sign(x):\n if x > 0:\n return 1\n elif x < 0:\n return -1\n else:\n return 0\nsign(2) ",
"_____no_output_____"
]
],
[
[
"## 2. Singular vs Plural Nouns\n\nWe've decided to add \"print\" to our `to_smash` function from Exercise 02",
"_____no_output_____"
]
],
[
[
"def to_smash(total_candies):\n \"\"\"Return the number of leftover candies that must be smashed after distributing\n the given number of candies evenly between 3 friends.\n \n >>> to_smash(91)\n 1\n \"\"\"\n print(\"Splitting\", total_candies, \"candies\")\n return total_candies % 3\n\nto_smash(91)",
"Splitting 91 candies\n"
]
],
[
[
"What happens if we call it with `total_candies = 1`?",
"_____no_output_____"
]
],
[
[
"to_smash(1)",
"Splitting 1 candies\n"
]
],
[
[
"**Wrong grammar there!**\n\nModify the definition in the cell below to correct the grammar of our print statement.\n\n**Your Task:**\n> If there's only one candy, we should use the singular \"candy\" instead of the plural \"candies\"",
"_____no_output_____"
]
],
[
[
"def to_smash(total_candies):\n \"\"\"Return the number of leftover candies that must be smashed after distributing\n the given number of candies evenly between 3 friends.\n \n >>> to_smash(91)\n 1\n \"\"\"\n if total_candies == 1:\n print(\"Splitting\", total_candies, \"candy\")\n else: \n print(\"Splitting\", total_candies, \"candies\")\n return total_candies % 3\n\nto_smash(91)\nto_smash(1)",
"Splitting 91 candies\nSplitting 1 candy\n"
]
],
[
[
"## 3. Checking weather again\n\nIn the main lesson we talked about deciding whether we're prepared for the weather. I said that I'm safe from today's weather if...\n- I have an umbrella...\n- or if the rain isn't too heavy and I have a hood...\n- otherwise, I'm still fine unless it's raining *and* it's a workday\n\nThe function below uses our first attempt at turning this logic into a Python expression. I claimed that there was a bug in that code. Can you find it?\n\nTo prove that `prepared_for_weather` is buggy, come up with a set of inputs where it returns the wrong answer.",
"_____no_output_____"
]
],
[
[
"def prepared_for_weather(have_umbrella, rain_level, have_hood, is_workday):\n # Don't change this code. Our goal is just to find the bug, not fix it!\n return have_umbrella or (rain_level < 5 and have_hood) or not (rain_level > 0 and is_workday) # Prioritize 'and' before 'or' so take parentheses\n\n# Change the values of these inputs so they represent a case where prepared_for_weather\n# returns the wrong answer.\nhave_umbrella = False\nrain_level = 10.0\nhave_hood = True\nis_workday = True\n\n# Check what the function returns given the current values of the variables above\nactual = prepared_for_weather(have_umbrella, rain_level, have_hood, is_workday)\nprint(actual)",
"False\n"
]
],
[
[
"## 4. Start being lazy...\n\nThe function `is_negative` below is implemented correctly \n- It returns True if the given number is negative and False otherwise.\n\nHowever, it's more verbose than it needs to be. We can actually reduce the number of lines of code in this function by *75%* while keeping the same behaviour. \n\n**Your task:**\n> See if you can come up with an equivalent body that uses just **one line** of code, and put it in the function `concise_is_negative`. (HINT: you don't even need Python's ternary syntax)",
"_____no_output_____"
]
],
[
[
"def is_negative(number):\n if number < 0:\n return True\n else:\n return False\n\ndef concise_is_negative(number):\n return number < 0\n\nconcise_is_negative(-9)",
"_____no_output_____"
]
],
[
[
"## 5. Adding Toppings\n\nThe boolean variables `ketchup`, `mustard` and `onion` represent whether a customer wants a particular topping on their hot dog. We want to implement a number of boolean functions that correspond to some yes-or-no questions about the customer's order. For example:",
"_____no_output_____"
]
],
[
[
"def onionless(ketchup, mustard, onion):\n \"\"\"Return whether the customer doesn't want onions.\n \"\"\"\n return not onion",
"_____no_output_____"
]
],
[
[
"**Your task:**\n> For each of the remaining functions, fill in the body to match the English description in the docstring. ",
"_____no_output_____"
]
],
[
[
"def wants_all_toppings(ketchup, mustard, onion):\n \"\"\"Return whether the customer wants \"the works\" (all 3 toppings)\n \"\"\"\n return ketchup and mustard and onion\nprint(wants_all_toppings(True, False, True))\nprint(wants_all_toppings(True, True, True))",
"False\nTrue\n"
],
[
"def wants_plain_hotdog(ketchup, mustard, onion):\n \"\"\"Return whether the customer wants a plain hot dog with no toppings.\n \"\"\"\n return not (ketchup or mustard or onion) # mean (not ketchup) and (not mustard) and (not onion)\nprint(wants_plain_hotdog(True, False, False))\nprint(wants_plain_hotdog(False, False, False))",
"False\nTrue\n"
],
[
"def exactly_one_sauce(ketchup, mustard, onion):\n \"\"\"Return whether the customer wants either ketchup or mustard, but not both.\n (You may be familiar with this operation under the name \"exclusive or\")\n \"\"\"\n return (ketchup and not mustard) or (not ketchup and mustard)\nprint(exactly_one_sauce(True, False, True))\nprint(exactly_one_sauce(True, True, True))",
"True\nFalse\n"
]
],
[
[
"## 6. <span title=\"A bit spicy\" style=\"color: darkgreen \">🌶️</span>\n\nWe’ve seen that calling `bool()` on an integer returns `False` if it’s equal to 0 and `True` otherwise. What happens if we call `int()` on a bool? Try it out in the notebook cell below.\n\nCan you take advantage of this to write a succinct function that corresponds to the English sentence \"*Does the customer want exactly one topping?*\"?\n\n> *HINT*: You may have already found that `int(True)` is `1`, and `int(False)` is `0`. Think about what kinds of basic arithmetic operations you might want to perform on ketchup, mustard, and onion after converting them to integers.",
"_____no_output_____"
]
],
[
[
"def exactly_one_topping(ketchup, mustard, onion):\n \"\"\"Return whether the customer wants exactly one of the three available toppings\n on their hot dog.\n \"\"\"\n return int(ketchup) + int(mustard) + int(onion) == 1\nprint(exactly_one_topping(True, False, True))\nprint(exactly_one_topping(True, False, False))",
"False\nTrue\n"
]
],
[
[
"# Keep Going 💪",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ecddb5994e371a4e4c5cd3c7299c13cdf1d3142a | 508,433 | ipynb | Jupyter Notebook | psytrack/examples/ExampleNotebook.ipynb | dlamay/psytrack | 58e1460eac3520a111dcddf72aeead26af1de728 | [
"MIT"
] | null | null | null | psytrack/examples/ExampleNotebook.ipynb | dlamay/psytrack | 58e1460eac3520a111dcddf72aeead26af1de728 | [
"MIT"
] | null | null | null | psytrack/examples/ExampleNotebook.ipynb | dlamay/psytrack | 58e1460eac3520a111dcddf72aeead26af1de728 | [
"MIT"
] | null | null | null | 477.401878 | 108,580 | 0.937671 | [
[
[
"# Psytrack Tutorial",
"_____no_output_____"
],
[
"Link to paper: http://pillowlab.princeton.edu/pubs/Roy18_NeurIPS_dynamicPsychophys.pdf\n\nLast updated: February 16, 2020\n\nPsytrack version: 1.2.0\n\n---",
"_____no_output_____"
],
[
"### Initialization",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nplt.rcParams['figure.dpi'] = 140\n\nimport psytrack as psy",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
]
],
[
[
"# Quick Demonstration\n\nThis is a quick, 2 minute _demonstration_ of Psytrack with a simulated dataset.\n\n\\*\\* **Please find a much more in-depth tutorial using real data below** \\*\\*\n\n---\n\n### Generate simulated data\n\nThis includes generating psychometric weights ${\\bf w}$, as well as the inputs ${\\bf x}$ (sampled from a standard normal) and choices ${\\bf y}$ (sampled according to the model using the weights and inputs).",
"_____no_output_____"
]
],
[
[
"seed = 31\nnum_weights = 4\nnum_trials = 5000\nhyper = {'sigma' : 2**np.array([-4.0,-5.0,-6.0,-7.0]),\n 'sigInit' : 2**np.array([ 0.0, 0.0, 0.0, 0.0])}\n\n# Simulate\nsimData = psy.generateSim(K=num_weights, N=num_trials, hyper=hyper,\n boundary=6.0, iterations=1, seed=seed, savePath=None)\n\n# Plot\npsy.plot_weights(simData['W'].T);\nplt.ylim(-3.6,3.6);",
"_____no_output_____"
]
],
[
[
"### Recover the weights from the simulated behavior\n\nThat is, given inputs ${\\bf x}$ and choices ${\\bf y}$, recover the psychometric weights ${\\bf w}$.\n\n_Note: This takes approximately 60 seconds_",
"_____no_output_____"
]
],
[
[
"rec = psy.recoverSim(simData)",
"_____no_output_____"
]
],
[
[
"### Plot the true weights from above (now in black) with the recovered weights (in color)\n\nRecovered weights also have shading to indicate a 95\\% credible interval.",
"_____no_output_____"
]
],
[
[
"psy.plot_weights(rec['wMode'], errorbar=rec[\"hess_info\"][\"W_std\"])\nplt.plot(simData['W'], c=\"k\", ls=\"-\", alpha=0.5, lw=0.75, zorder=0)\nplt.ylim(-3.6,3.6);",
"_____no_output_____"
]
],
[
[
"### Plot recovered smoothness hyperparameters $\\sigma_k$ over the true hyperparameters (black lines)\n\nRecovered hyperparameters plotted with $\\pm$2SE bars.",
"_____no_output_____"
]
],
[
[
"true_sigma = np.log2(rec['input']['sigma'])\navg_sigma = np.log2(rec['hyp']['sigma'])\nerr_sigma = rec['hess_info']['hyp_std']\n\nplt.figure(figsize=(2,2))\ncolors = np.unique(list(psy.COLORS.values()))\nfor i in range(num_weights):\n plt.plot(i, true_sigma[i], color=\"black\", marker=\"_\", markersize=12, zorder=0)\n plt.errorbar([i], avg_sigma[i], yerr=2*err_sigma[i], color=colors[i], lw=1, marker='o', markersize=5)\n\nplt.xticks([0,1,2,3]); plt.yticks(np.arange(-8,-2))\nplt.gca().set_xticklabels([r\"$\\sigma_1$\", r\"$\\sigma_2$\", r\"$\\sigma_3$\", r\"$\\sigma_4$\"])\nplt.xlim(-0.5,3.5); plt.ylim(-7.5,-3.5)\nplt.gca().spines['right'].set_visible(False)\nplt.gca().spines['top'].set_visible(False)\nplt.ylabel(r\"$\\log_2(\\sigma)$\");",
"_____no_output_____"
]
],
[
[
"---\n\n---\n\n# Full Tutorial with Real Data\n\nDatasets handled by Psytrack are specific to an individual animal and are stored as a Python dictionary",
"_____no_output_____"
]
],
[
[
"from numpy import load\n\ndata = load('sampleRatData.npz',allow_pickle=True)\nlst = data.files\nfor item in lst:\n print(item)\n print(data[item])",
"D\n{'name': 'W053', 'y': array([2., 2., 2., ..., 1., 1., 2.]), 'answer': array([2, 2, 2, ..., 2, 1, 1]), 'correct': array([1., 1., 1., ..., 0., 1., 0.]), 'dayLength': array([199, 228, 377, 344, 400, 370, 219, 363, 368, 341, 186, 331, 284,\n 393, 319, 349, 172, 254, 173, 283, 269, 89, 158, 378, 248, 251,\n 200, 178, 341, 323, 369, 383, 237, 246, 312, 214, 327, 325, 370,\n 318, 280, 303, 233, 344, 322, 91, 258, 335, 292, 350, 398, 201,\n 146, 151, 138, 173, 172, 110, 89, 54, 251, 303, 280, 317, 247,\n 136, 164, 118, 145, 237, 271, 259, 158, 183, 158, 164, 207, 152,\n 145, 176]), 'inputs': {'h': array([[-1., 0.],\n [ 1., -1.],\n [ 1., 1.],\n ...,\n [-1., -1.],\n [ 1., -1.],\n [-1., 1.]]), 'r': array([[-1., 0.],\n [ 1., -1.],\n [ 1., 1.],\n ...,\n [ 1., 1.],\n [-1., 1.],\n [ 1., -1.]]), 'c': array([[ 1., 0.],\n [ 1., 1.],\n [ 1., 1.],\n ...,\n [-1., -1.],\n [-1., -1.],\n [-1., -1.]]), 's1': array([[-0.04184135, -0.04184135],\n [ 1.51889035, -0.04184135],\n [ 0.73846325, 1.51889035],\n ...,\n [ 1.59603762, -1.39664247],\n [ 0.84765711, 1.59603762],\n [ 0.10037948, 0.84765711]]), 's2': array([[-0.82166801, 0.73846325],\n [ 0.73846325, -0.82166801],\n [-0.04184135, 0.73846325],\n ...,\n [ 0.84765711, -0.64849422],\n [ 1.59603762, 0.84765711],\n [ 0.84765711, 1.59603762]]), 's_avg': array([[ 0.34831095, 0. ],\n [-0.43175468, 0.34831095],\n [ 1.1286768 , -0.43175468],\n ...,\n [-1.02256834, 0.4740183 ],\n [ 1.22184736, -1.02256834],\n [ 1.22184736, 1.22184736]])}}\n"
],
[
"# Extract premade dataset from npz\nD = np.load('sampleRatData.npz', allow_pickle=True)['D'].item()\n\nprint(\"The keys of the dict for this example animal:\\n \", list(D.keys()))",
"The keys of the dict for this example animal:\n ['name', 'y', 'answer', 'correct', 'dayLength', 'inputs']\n"
]
],
[
[
"Of these keys, only `y` and `inputs` are **required** for analysis of the dataset, all others are *optional*.\n\n---\n\n`y` should be a 1D array of the animal's choice on each trial. Currently, the analysis only works for two-alternative forced choice tasks, and so there should only be two options on each trial (error or omission trials are typically discarded from the analysis).\n\nThe two options (A or B, Left or Right, etc.) must be mapped to {1, 2} _or_ {0, 1} in `y` (not -1 and +1). In this example, Left=1 and Right=2.",
"_____no_output_____"
]
],
[
[
"print(\"The shape of y: \", D['y'].shape)\nprint(\"The number of trials: N =\", D['y'].shape[0])\nprint(\"The unique entries of y: \", np.unique(D['y']))",
"The shape of y: (20000,)\nThe number of trials: N = 20000\nThe unique entries of y: [1. 2.]\n"
]
],
[
[
"---\n\n`inputs` is itself another dictionary, containing arbitrary keys. Each of these keys represents a _potential_ input into the model and must be a 2D array of shape $(N, M)$ where $N$ is the number of trials. The number of columns $M$ is arbitrary, and the $i^{th}$ column is typically used to encode information from $i$ time steps previous.\n\nFor example, in our example data set the key `s1` encodes the (normalized) stimulus values heard on each trial. `s1[7,0]` would encode the stimulus heard on the 7th trial where as both `s1[6,0]` and `s1[7,1]` would encode the stimulus heard on the 6th trial. The information is redundant, but allows for all feasible regressors to predicting behavior of trial $i$ to be accessible by referencing the $i^{th}$ row of the respective input array.",
"_____no_output_____"
]
],
[
[
"D['inputs']['h']",
"_____no_output_____"
],
[
"print(\"The keys of inputs:\\n \", list(D['inputs'].keys()))\n\nprint(\"\\nThe shape of s1:\", D['inputs']['s1'].shape)\nprint(\"s1[7] : \", D['inputs']['s1'][7])\nprint(\"s1[6,0] : \", D['inputs']['s1'][6,0])\nprint(\"s1[7,1] : \", D['inputs']['s1'][7,1])",
"The keys of inputs:\n ['h', 'r', 'c', 's1', 's2', 's_avg']\n\nThe shape of s1: (20000, 2)\ns1[7] : [-0.04184135 0.73846325]\ns1[6,0] : 0.738463248564285\ns1[7,1] : 0.738463248564285\n"
]
],
[
[
"---\n\nOther keys are for convenience and are _optional_ : `name` stores the name of the animal, `answer` is an easy reference as to what the correct choice was on a given trial, and `correct` is an easy reference as to whether the animal made the correct choice on a given trial. The model only needs to know what the animal _actually_ did, not what the animal _ought_ to have done!\n\n`dayLength` is an array storing the number of trials that occurred in each session of training. Taking a cumulative sum will give you the indices at which each new session begins. This is **not** optional for the analysis if one wishes to use the `sigmaDay` functionality (see Section 3.3 in paper).",
"_____no_output_____"
],
[
"---\n\n## Fitting the data\n\nOnce you have your data arranged in the proper format, you can now run the analysis! \n\nThe fitting function is called `hyperOpt()` and before using it, you must decide on 3 inputs:\n\n 1) `weights` : which of your inputs should you fit.\n\n 2) `hyper` : what hyperparameters should your model have and how should they be initialized.\n\n 3) `optList` : what subset of the hyperparameters should be optimized.",
"_____no_output_____"
],
[
"`weights` is a dictionary where the keys correspond to the keys in your dataset's `inputs` dictionary; the key values are a non-negative integer indicating how many of the columns of that value in `inputs` should be used for fitting the model, where each included column corresponds to a new weight. You can also include in `weights` the special key `bias` which need not be included in `inputs` --- this will simply create an input of all 1s.",
"_____no_output_____"
]
],
[
[
"weights = {'bias': 1, # a special key\n 's1': 1, # use only the first column of s1 from inputs\n 's2': 1} # use only the first column of s2 from inputs\n\n# It is often useful to have the total number of weights K in your model\nK = np.sum([weights[i] for i in weights.keys()])",
"_____no_output_____"
]
],
[
[
"`hyper` is a dictionary that indicates what hyperparameters your model will have. There are 3 types:\n\n1) `sigma` : required, controls trial-to-trial variability.\n\n2) `sigInit` : optional, controls the variability on the very first trial (e.g. how close weights must initialize to 0). It is often best to include this hyperparameter and set it to a high value, as you often prefer your data to determine where the weights ought to initialize. Otherwise, `sigInit` will be set equal to `sigma`.\n\n3) `sigDay` : optional, controls variability between sessions (e.g. between the last trial of one session and the first trial of the next session). If this key is included, then your dataset must also have the key `dayLength` (as described above), to indicate the trials where the `sigDay` variability should supercede the standard `sigma` variability.\n\nFor each hyperparameter key included in the `hyper` dictionary, the corresponding value is the initial value of the hyperparameter. If you are optimizing over a particular hyperparameter (see the `optList` setting below), than the initial value is not so important as the fitting procedure will eventually converge to the optimal setting. However, if you are *not* optimizing, then the initial value set will be the *fixed* value of the hyperparameter.\n\nFinally, for each hyperparameter key in `hyper`, you must specify your initializations as a 1D array with length $K$. If you instead provide only a single value, then the optimizer will assume that you want the same hyperparameter to apply to every weight (as opposed to each weight having it's own).",
"_____no_output_____"
]
],
[
[
"hyper= {'sigInit': 2**4., # Set to a single, large value for all weights. Will not be optimized further.\n 'sigma': [2**-4.]*K, # Each weight will have it's own sigma optimized, but all are initialized the same\n 'sigDay': None} # Indicates that session boundaries will be ignored in the optimization",
"_____no_output_____"
]
],
[
[
"`optList` is a list of the subset of hyperparameters in `hyper` that you wish to optimize over in your model. It is typically unnecessary to optimize over `sigInit` -- a single, large, fixed value is usually best. Thus, there are 4 reasonable ways to specify `optList`:\n\n1) `optList = []` : this means that none of the hyperparameters in `hyper` will be optimized. The optimizer will find the best set of weight trajectories given the fixed hyperparameter values specified.\n\n2) `optList = ['sigma']` : only the `sigma` hyperparameter will be optimized (as we will do below). If `sigDay` is included in `hyper`, then this means that the model _will_ include `sigDay`, but that the initial value specified will not be optimized.\n\n3) `optList = ['sigDay']` : only the `sigDay` hyperparameter will be optimized, while the `sigma` hyperparameter remains fixed.\n\n4) `optList = ['sigma', 'sigDay']` : both the `sigma` and `sigDay` hyperparameters will be optimized.",
"_____no_output_____"
]
],
[
[
"optList = ['sigma']",
"_____no_output_____"
]
],
[
[
"Now that we have specified `weights`, `hyper`, and `optList`, we can fit our model with `hyperOpt()`! The function takes your dataset `D` plus the three additional inputs, and returns 4 things:\n\n1) `hyp` : a dictionary of the optimized hyperparameters\n\n2) `evd` : the approximate log-evidence of the optimized model\n\n3) `wMode` : the weight trajectories of the optimized model\n\n4) `hess_info` : a dictionary of sparse terms that relate to the Hessian of the optimal model. By default, this also includes the posterior credible intervals on the weights, under the key `W_std`. This behavior can be altered by changing the optional argument `hess_calc` in `hyperOpt()` (see function documentation for more details).\n\nRun times will depend on the number of trials $N$ and weights $K$, as well as the number of hyperparameters being fit. To speed things up a bit, we will use the `trim` function to shrink our dataset of 20K trials to just the first 10K.\n\n_Note: this should take < 60 seconds._",
"_____no_output_____"
]
],
[
[
"new_D = psy.trim(D, END=10000) # trim dataset to first 10,000 trials",
"_____no_output_____"
],
[
"new_D",
"_____no_output_____"
],
[
"hyp, evd, wMode, hess_info = psy.hyperOpt(new_D, hyper, weights, optList)",
"_____no_output_____"
],
[
"hyp",
"_____no_output_____"
],
[
"evd",
"_____no_output_____"
],
[
"wMode",
"_____no_output_____"
],
[
"hess_info",
"_____no_output_____"
]
],
[
[
"---\n### Visualizing the results\n\nPsytrack includes a few plotting functions for visualizing the results of the fit. To see the weight trajectories, use `plot_weights`.",
"_____no_output_____"
]
],
[
[
"fig = psy.plot_weights(wMode, weights)",
"_____no_output_____"
]
],
[
[
"Adding credible intervals on the weights can be done with the `errorbar` keyword argument, and vertical lines indicating session boundaries can be added with `days`. Adjustments to the resulting plot can be made by editing the figure returned by the function directly.",
"_____no_output_____"
]
],
[
[
"fig = psy.plot_weights(wMode, weights, days=new_D[\"dayLength\"], errorbar=hess_info[\"W_std\"])",
"_____no_output_____"
]
],
[
[
"We can also generate two additional plots with useful information: \n\n1) A performance plot, tracking the animal's task accuracy, smoothed over trials\n\n2) A bias plot, tracking the animal's choice bias, smoothed over trials\n\nBoth of these plots will calculate their respective values directly from the data, with 2SD error bars.",
"_____no_output_____"
]
],
[
[
"fig_perf = psy.plot_performance(new_D)\nfig_bias = psy.plot_bias(new_D)",
"_____no_output_____"
]
],
[
[
"---\n### Cross-validation\n\nThe performance and bias plots above use the data directly to plot the *empirical* acuracy and bias of the animal. We can use our model to make predictions about the animal's accuracy and bias using the weight trajectories found by the model, to see if there is agreement. However, to make true predictions, we need to make predictions on trials that were held-out from the fitting procedure.\n\nWe can do this using the built-in cross-validation functions `crossValidate`. This is also useful if you'd like to compare different models via cross-validated log-likelihood, rather than approximate model evidence.\n\nSimilar to `hyperOpt`, `crossValidate` receives the same inputs as well an additional input `F`, which controls the number of folds in the cross-validation procedure (note that `F` must cleanly divide the number of trials in the dataset `N` -- the `trim` function can be used to make this work). `crossValidate` will then divide the dataset into `F` training and testing datasets, fitting each of the `F` training data sets. It will return `xval_logli`, the total cross-validated log-likelihood of the `F` test sets, as well `xval_pL`, the cross-validated $P(y=0)$ for each trial.\n\n_Note: Since we're fitting `F` models, this can be fairly time consuming. This particular example should take about 10 minutes._",
"_____no_output_____"
]
],
[
[
"xval_logli, xval_pL = psy.crossValidate(new_D, hyper, weights, optList, F=10, seed=41)",
"Running xval fold 10 of 10"
],
[
"xval_logli",
"_____no_output_____"
],
[
"xval_pL",
"_____no_output_____"
]
],
[
[
"Using the cross-validated prediction that the animal will go Left on each trial, `xval_pL`, we can overlay the plots of the empirical performance and bias above with a line showing the model's prediction. Fortunately, we see a close agreement!",
"_____no_output_____"
]
],
[
[
"fig_perf_xval = psy.plot_performance(new_D, xval_pL=xval_pL)\nfig_bias_xval = psy.plot_bias(new_D, xval_pL=xval_pL)",
"_____no_output_____"
]
],
[
[
"---\n## End of Tutorial\n\nPlease post any questions to the github, and thanks for using Psytrack!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ecddc3ff5db13d94036b171ad7f3da761e39c655 | 6,622 | ipynb | Jupyter Notebook | files/math-573/math-573-plots.ipynb | israeldi/quantlab | 6d5105d65c7d19190b8cda9a1ec7c9cb77e1d3d7 | [
"MIT"
] | null | null | null | files/math-573/math-573-plots.ipynb | israeldi/quantlab | 6d5105d65c7d19190b8cda9a1ec7c9cb77e1d3d7 | [
"MIT"
] | null | null | null | files/math-573/math-573-plots.ipynb | israeldi/quantlab | 6d5105d65c7d19190b8cda9a1ec7c9cb77e1d3d7 | [
"MIT"
] | null | null | null | 84.897436 | 5,092 | 0.853821 | [
[
[
"%pylab inline\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nfig = pylab.figure()\nax = fig.add_subplot(1,1,1)\nplt.plot([1,2,3], [4,3, 1], 'ro')\nplt.axis([0, 4, 0, 5])\nplt.xlabel('K')\nplt.ylabel('C(K)')\nax.set_xticks(range(0, 5))\npp = PdfPages('call-arb.pdf')\npp.savefig()\npp.close()\nplt.show()\n",
"Populating the interactive namespace from numpy and matplotlib\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
ecddcf436918191eda979ece1cb486c25874029b | 16,260 | ipynb | Jupyter Notebook | jupyterhub/notebooks/zz_under_construction/zz_old/TensorFlow/Tutorials/udacity/4_convolutions.ipynb | just4jc/pipeline | 3c7a4fa59c6363833766d2b55fa55ace6b6af351 | [
"Apache-2.0"
] | 1,040 | 2016-08-07T02:27:28.000Z | 2022-03-30T02:31:40.000Z | jupyterhub/notebooks/zz_under_construction/zz_old/TensorFlow/Tutorials/udacity/4_convolutions.ipynb | just4jc/pipeline | 3c7a4fa59c6363833766d2b55fa55ace6b6af351 | [
"Apache-2.0"
] | 38 | 2016-11-17T08:43:51.000Z | 2019-11-12T12:27:04.000Z | jupyterhub/notebooks/zz_under_construction/zz_old/TensorFlow/Tutorials/udacity/4_convolutions.ipynb | just4jc/pipeline | 3c7a4fa59c6363833766d2b55fa55ace6b6af351 | [
"Apache-2.0"
] | 1,102 | 2016-08-07T02:27:24.000Z | 2022-03-31T16:18:48.000Z | 34.892704 | 228 | 0.49182 | [
[
[
"Deep Learning\n=============\n\nAssignment 4\n------------\n\nPreviously in `2_fullyconnected.ipynb` and `3_regularization.ipynb`, we trained fully connected networks to classify [notMNIST](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html) characters.\n\nThe goal of this assignment is make the neural network convolutional.",
"_____no_output_____"
]
],
[
[
"# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import cPickle as pickle\nfrom six.moves import range",
"_____no_output_____"
],
[
"pickle_file = 'notMNIST.pickle'\n\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f)\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n del save # hint to help gc free up memory\n print('Training set', train_dataset.shape, train_labels.shape)\n print('Validation set', valid_dataset.shape, valid_labels.shape)\n print('Test set', test_dataset.shape, test_labels.shape)",
"Training set (200000, 28, 28) (200000,)\nValidation set (10000, 28, 28) (10000,)\nTest set (18724, 28, 28) (18724,)\n"
]
],
[
[
"Reformat into a TensorFlow-friendly shape:\n- convolutions need the image data formatted as a cube (width by height by #channels)\n- labels as float 1-hot encodings.",
"_____no_output_____"
]
],
[
[
"image_size = 28\nnum_labels = 10\nnum_channels = 1 # grayscale\n\nimport numpy as np\n\ndef reformat(dataset, labels):\n dataset = dataset.reshape(\n (-1, image_size, image_size, num_channels)).astype(np.float32)\n labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n return dataset, labels\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape)\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)",
"Training set (200000, 28, 28, 1) (200000, 10)\nValidation set (10000, 28, 28, 1) (10000, 10)\nTest set (18724, 28, 28, 1) (18724, 10)\n"
],
[
"def accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n / predictions.shape[0])",
"_____no_output_____"
]
],
[
[
"Let's build a small network with two convolutional layers, followed by one fully connected layer. Convolutional networks are more expensive computationally, so we'll limit its depth and number of fully connected nodes.",
"_____no_output_____"
]
],
[
[
"batch_size = 16\npatch_size = 5\ndepth = 16\nnum_hidden = 64\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n\n # Input data.\n tf_train_dataset = tf.placeholder(\n tf.float32, shape=(batch_size, image_size, image_size, num_channels))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n \n # Variables.\n layer1_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, num_channels, depth], stddev=0.1))\n layer1_biases = tf.Variable(tf.zeros([depth]))\n layer2_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, depth, depth], stddev=0.1))\n layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))\n layer3_weights = tf.Variable(tf.truncated_normal(\n [image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1))\n layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))\n layer4_weights = tf.Variable(tf.truncated_normal(\n [num_hidden, num_labels], stddev=0.1))\n layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))\n \n # Model.\n def model(data):\n conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer1_biases)\n conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer2_biases)\n shape = hidden.get_shape().as_list()\n reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])\n hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)\n return tf.matmul(hidden, layer4_weights) + layer4_biases\n \n # Training computation.\n logits = model(tf_train_dataset)\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n \n # Optimizer.\n optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)\n \n # Predictions for the training, validation, and test data.\n train_prediction = tf.nn.softmax(logits)\n valid_prediction = tf.nn.softmax(model(tf_valid_dataset))\n test_prediction = tf.nn.softmax(model(tf_test_dataset))",
"_____no_output_____"
],
[
"num_steps = 1001\n\nwith tf.Session(graph=graph) as session:\n tf.initialize_all_variables().run()\n print('Initialized')\n for step in range(num_steps):\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n batch_data = train_dataset[offset:(offset + batch_size), :, :, :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n _, l, predictions = session.run(\n [optimizer, loss, train_prediction], feed_dict=feed_dict)\n if (step % 50 == 0):\n print('Minibatch loss at step %d: %f' % (step, l))\n print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))\n print('Validation accuracy: %.1f%%' % accuracy(\n valid_prediction.eval(), valid_labels))\n print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))",
"Initialized\nMinibatch loss at step 0 : 3.51275\nMinibatch accuracy: 6.2%\nValidation accuracy: 12.8%\nMinibatch loss at step 50 : 1.48703\nMinibatch accuracy: 43.8%\nValidation accuracy: 50.4%\nMinibatch loss at step 100 : 1.04377\nMinibatch accuracy: 68.8%\nValidation accuracy: 67.4%\nMinibatch loss at step 150 : 0.601682\nMinibatch accuracy: 68.8%\nValidation accuracy: 73.0%\nMinibatch loss at step 200 : 0.898649\nMinibatch accuracy: 75.0%\nValidation accuracy: 77.8%\nMinibatch loss at step 250 : 1.3637\nMinibatch accuracy: 56.2%\nValidation accuracy: 75.4%\nMinibatch loss at step 300 : 1.41968\nMinibatch accuracy: 62.5%\nValidation accuracy: 76.0%\nMinibatch loss at step 350 : 0.300648\nMinibatch accuracy: 81.2%\nValidation accuracy: 80.2%\nMinibatch loss at step 400 : 1.32092\nMinibatch accuracy: 56.2%\nValidation accuracy: 80.4%\nMinibatch loss at step 450 : 0.556701\nMinibatch accuracy: 81.2%\nValidation accuracy: 79.4%\nMinibatch loss at step 500 : 1.65595\nMinibatch accuracy: 43.8%\nValidation accuracy: 79.6%\nMinibatch loss at step 550 : 1.06995\nMinibatch accuracy: 75.0%\nValidation accuracy: 81.2%\nMinibatch loss at step 600 : 0.223684\nMinibatch accuracy: 100.0%\nValidation accuracy: 82.3%\nMinibatch loss at step 650 : 0.619602\nMinibatch accuracy: 87.5%\nValidation accuracy: 81.8%\nMinibatch loss at step 700 : 0.812091\nMinibatch accuracy: 75.0%\nValidation accuracy: 82.4%\nMinibatch loss at step 750 : 0.276302\nMinibatch accuracy: 87.5%\nValidation accuracy: 82.3%\nMinibatch loss at step 800 : 0.450241\nMinibatch accuracy: 81.2%\nValidation accuracy: 82.3%\nMinibatch loss at step 850 : 0.137139\nMinibatch accuracy: 93.8%\nValidation accuracy: 82.3%\nMinibatch loss at step 900 : 0.52664\nMinibatch accuracy: 75.0%\nValidation accuracy: 82.2%\nMinibatch loss at step 950 : 0.623835\nMinibatch accuracy: 87.5%\nValidation accuracy: 82.1%\nMinibatch loss at step 1000 : 0.243114\nMinibatch accuracy: 93.8%\nValidation accuracy: 82.9%\nTest accuracy: 90.0%\n"
]
],
[
[
"---\nProblem 1\n---------\n\nThe convolutional model above uses convolutions with stride 2 to reduce the dimensionality. Replace the strides by a max pooling operation (`nn.max_pool()`) of stride 2 and kernel size 2.\n\n---",
"_____no_output_____"
],
[
"---\nProblem 2\n---------\n\nTry to get the best performance you can using a convolutional net. Look for example at the classic [LeNet5](http://yann.lecun.com/exdb/lenet/) architecture, adding Dropout, and/or adding learning rate decay.\n\n---",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
ecddd42780c273f5dd27bbe23098c10b0296f80a | 9,936 | ipynb | Jupyter Notebook | Copy_of_Python_Workshop_Moringa.ipynb | elly4success/AllCourseExercises | f0535973f37eecc85952ab1eb8fe2595a1eea39f | [
"MIT"
] | null | null | null | Copy_of_Python_Workshop_Moringa.ipynb | elly4success/AllCourseExercises | f0535973f37eecc85952ab1eb8fe2595a1eea39f | [
"MIT"
] | null | null | null | Copy_of_Python_Workshop_Moringa.ipynb | elly4success/AllCourseExercises | f0535973f37eecc85952ab1eb8fe2595a1eea39f | [
"MIT"
] | null | null | null | 27.6 | 1,174 | 0.448973 | [
[
[
"<a href=\"https://colab.research.google.com/github/elly4success/AllCourseExercises/blob/master/Copy_of_Python_Workshop_Moringa.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"List\nworkshop = [\"Moringa\", \"School\", \"Ngong-Lane\", \"Learning\"]\nprint(workshop)\nworkshop[0]\nworkshop[0:2]\nworkshop.append('People')\nprint(workshop)\nworkshop.remove('Moringa')\nprint(workshop)\n\n\nage = (100, 200, 300)\ntype(age)",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"Dict: Has keys and values",
"_____no_output_____"
]
],
[
[
"contact = {\"name\":\"Elly\",\n \"phone\":\"0700501255\",\n \"email\":\"[email protected]\"}\n\nprint(contact)\n",
"{'name': 'Elly', 'phone': '0700501255', 'email': '[email protected]'}\n"
]
],
[
[
"Using the \"get\" function",
"_____no_output_____"
]
],
[
[
"contact.get('email')",
"_____no_output_____"
]
],
[
[
"Using the del function in dict(cant be used in List)",
"_____no_output_____"
],
[
"syntax is del followed by var name and then the key or value",
"_____no_output_____"
]
],
[
[
"\nprint(contact)",
"{'name': 'Elly', 'phone': '0700501255', 'email': '[email protected]'}\n"
]
],
[
[
"Set: eliminating duplicate",
"_____no_output_____"
]
],
[
[
"num = [12,14, 14, 15, 16, 17, 22]\nnew_num = set(num)\nprint(new_num)",
"{12, 14, 15, 16, 17, 22}\n"
],
[
"",
"_____no_output_____"
]
],
[
[
"Create a dict with keys: 1 -5 and values:^2 of the keys",
"_____no_output_____"
]
],
[
[
"power_dict ={\"1\":1,\"2\": 4, \n \"3\": 9,\n \"4\": 16,\n \"5\": 25}\nprint(power_dict)\npower_dict[\"5\"] = 5**5\nprint(power_dict)",
"{'1': 1, '2': 4, '3': 9, '4': 16, '5': 25}\n{'1': 1, '2': 4, '3': 9, '4': 16, '5': 3125}\n"
]
],
[
[
"Modules: Pre-written code that are imported to perfom certain functions\nTo acces any function in a module, yiu call the module plus the function to perform",
"_____no_output_____"
]
],
[
[
"import math\nnumber = 100\nsqr = math.sqrt(100)\n#print(sqr)\nsquares = {1:(int(math.pow(1,2))),2:(int(math.pow(2,2))),3:(int(math.pow(3,2))),4:(int(math.pow(4,2))),5:(int(math.pow(5,2)))}\nprint(squares)",
"{1: 1, 2: 4, 3: 9, 4: 16, 5: 25}\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecdddbeced382c3c3a75b9af428c5207ca505f0f | 164,882 | ipynb | Jupyter Notebook | Day 12/rows.ipynb | VasTsak/julia_100 | 0ec969824f0d80e153581c506f197c76e0e8c629 | [
"Apache-2.0"
] | 1 | 2022-02-12T21:11:40.000Z | 2022-02-12T21:11:40.000Z | Day 12/rows.ipynb | VasTsak/julia_100 | 0ec969824f0d80e153581c506f197c76e0e8c629 | [
"Apache-2.0"
] | null | null | null | Day 12/rows.ipynb | VasTsak/julia_100 | 0ec969824f0d80e153581c506f197c76e0e8c629 | [
"Apache-2.0"
] | null | null | null | 43.538949 | 2,269 | 0.474327 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ecddecefae205f88bae8a32dd53a00c1b1441a65 | 19,459 | ipynb | Jupyter Notebook | examples/Train_ppo_cnn+eval_contact-II.ipynb | pleslabay/CarRacing-mod | e416d7f5d6dc49731e64d85094256c30c5f7d4b3 | [
"MIT"
] | null | null | null | examples/Train_ppo_cnn+eval_contact-II.ipynb | pleslabay/CarRacing-mod | e416d7f5d6dc49731e64d85094256c30c5f7d4b3 | [
"MIT"
] | null | null | null | examples/Train_ppo_cnn+eval_contact-II.ipynb | pleslabay/CarRacing-mod | e416d7f5d6dc49731e64d85094256c30c5f7d4b3 | [
"MIT"
] | 1 | 2020-12-29T23:03:44.000Z | 2020-12-29T23:03:44.000Z | 32.59464 | 178 | 0.545866 | [
[
[
"# Filter tensorflow version warnings\nimport os\n# https://stackoverflow.com/questions/40426502/is-there-a-way-to-suppress-the-messages-tensorflow-prints/40426709\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}\nimport warnings\n# https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore', category=Warning)\nimport tensorflow as tf\ntf.get_logger().setLevel('INFO')\ntf.autograph.set_verbosity(0)\nimport logging\ntf.get_logger().setLevel(logging.ERROR)",
"_____no_output_____"
],
[
"import gym\nfrom stable_baselines.common.policies import CnnPolicy #, MlpPolicy, CnnLstmPolicy\nfrom stable_baselines.common.vec_env import DummyVecEnv \nfrom stable_baselines import PPO2\n\nfrom stable_baselines.common.evaluation import evaluate_policy as test\nfrom stable_baselines.common.callbacks import EvalCallback, StopTrainingOnRewardThreshold\n",
"_____no_output_____"
]
],
[
[
"## if you wish to set which cores to use\naffinity_mask = {4, 5, 7} \n#affinity_mask = {6, 7, 9} \n#affinity_mask = {0, 1, 3} \n#affinity_mask = {2, 3, 5} \naffinity_mask = {0, 2, 4, 6} \n\npid = 0\nos.sched_setaffinity(pid, affinity_mask) \nprint(\"CPU affinity mask is modified to %s for process id 0\" % affinity_mask) \n",
"_____no_output_____"
],
[
"## DEFAULT 'CarRacing-v3' environment values\n\n# continuos action = (steering_angle, throttle, brake)\nACT = [[0, 0, 0], [-0.4, 0, 0], [0.4, 0, 0], [0, 0.6, 0], [0, 0, 0.8]]\n# discrete actions: center_steering and no gas/brake, steer left, steer right, accel, brake \n# --> actually a good choice, because car_dynamics softens the action's diff for gas and steering\n\n##REWARDS \n# reward given each step: step taken, distance to centerline, normalized speed [0-1], normalized steer angle [0-1]\n# reward given on new tile touched: %proportional of advance, %advance/steps_taken\n# reward given at episode end: all tiles touched (track finished), patience or off-raod exceeded, out of bounds, max_steps exceeded\n# reward for obstacles: obstacle hit (each step), obstacle collided (episode end)\nGYM_REWARD = [ -0.1, 0.0, 0.0, 0.0, 10.0, 0.0, 0, -0, -100, -0, -0, -0 ]\nSTD_REWARD = [ -0.1, 0.0, 0.0, 0.0, 1.0, 0.0, 100, -20, -100, -50, -0, -0 ]\nCONT_REWARD =[-0.11, 0.1, 0.0, 0.0, 1.0, 0.0, 100, -20, -100, -50, -5, -100 ]\n# see docu for RETURN computation details\n\n## DEFAULT Environment Parameters (not related to RL Algorithm!)\ngame_color = 1 # State (frame) color option: 0 = RGB, 1 = Grayscale, 2 = Green only\nindicators = True # show or not bottom Info Panel\nframes_per_state = 4 # stacked (rolling history) Frames on each state [1-inf], latest observation always on first Frame\nskip_frames = 3 # number of consecutive Frames to skip between history saves [0-4]\ndiscre = ACT # Action discretization function, format [[steer0, throtle0, brake0], [steer1, ...], ...]. None for continuous\n\nuse_track = 1 # number of times to use the same Track, [1-100]. More than 20 high risk of overfitting!!\nepisodes_per_track = 1 # number of evenly distributed starting points on each track [1-20]. Every time you call reset(), the env automatically starts at the next point\ntr_complexity = 12 # generated Track geometric Complexity, [6-20]\ntr_width = 45 # relative Track Width, [30-50]\npatience = 2.0 # max time in secs without Progress, [0.5-20]\noff_track = 1.0 # max time in secs Driving on Grass, [0.0-5]\nf_reward = CONT_REWARD # Reward Funtion coefficients, refer to Docu for details\n\nnum_obstacles = 5 # Obstacle objects placed on track [0-10]\nend_on_contact = False # Stop Episode on contact with obstacle, not recommended for starting-phase of training\nobst_location = 0 # array pre-setting obstacle Location, in %track. Negative value means tracks's left-hand side. 0 for random location\noily_patch = False # use all obstacles as Low-friction road (oily patch)\nverbose = 2 \n",
"_____no_output_____"
]
],
[
[
"## Choose one agent, see Docu for description\n#agent='CarRacing-v0'\n#agent='CarRacing-v1'\nagent='CarRacing-v3'\n\n# Stop training when the model reaches the reward threshold\ncallback_on_best = StopTrainingOnRewardThreshold(reward_threshold = 170, verbose=1)\n\nseed = 2000",
"_____no_output_____"
],
[
"## SIMULATION param \n## Changing these makes world models incompatible!!\ngame_color = 2\nindicators = True\nfpst = 4\nskip = 3\nactions = [[0, 0, 0], [-0.4, 0, 0], [0.4, 0, 0], [0, 0.6, 0], [0, 0, 0.8]] #this is ACT\n\nobst_loc = [6, -12, 25, -50, 75, -37, 62, -87, 95, -29] #track percentage, negative for obstacle to the left-hand side\n",
"_____no_output_____"
],
[
"## This model param\nuse = 10 # number of times to use same track [1,100]\nept = 12 # different starting points on same track [1,20]\ntrack_complexity = 12\npatience = 2.0\nseed = 1000\n\n#using follow_centerline for this first leg of training\nREWARD = [-0.11, 0.1, 0.0, 0.0, 1.0, 0.0, 100, -20, -100, -50, 0, 0] #adding incentive to move forward fast\n\nif agent=='CarRacing-v3': \n env1 = gym.make(agent, seed=seed, \n game_color=game_color,\n indicators=indicators,\n frames_per_state=fpst,\n skip_frames=skip, \n# discre=actions, #passing custom actions\n use_track = use, \n episodes_per_track = ept, \n tr_complexity = track_complexity, \n tr_width = 45,\n patience = patience,\n off_track = patience,\n end_on_contact = False,\n oily_patch = False,\n num_obstacles = 0, #just learning to drive on track, no obstacles used\n obst_location = obst_loc, #passing a fixed obstacle location\n f_reward = REWARD ) #passing a custom reward function\nelse: \n env1 = gym.make(agent)\n\nenv1 = DummyVecEnv([lambda: env1])\nenv1.metadata",
"_____no_output_____"
],
[
"## This training param\nbatch_size = 256\nupdates = 750\nepochs = 4\n\nmodel = PPO2(CnnPolicy, env1, verbose=1, n_steps=batch_size, #seed=314, n_cpu_tf_sess=1,\n gamma=0.99, learning_rate=0.00025, nminibatches=epochs, ent_coef=0.01, vf_coef=0.5) \n",
"_____no_output_____"
],
[
"## Training #1 - simply drive the track\n\nmodel.learn(total_timesteps = updates*batch_size, log_interval=1) #, callback=eval_callback)",
"_____no_output_____"
],
[
"#Save last updated model\nimport pickle\nroot = 'ppo_cnn_gym-mod_contact_'\n\nmodel.save(root + 'drive', cloudpickle=True)\nparam_list=model.get_parameter_list()\n",
"_____no_output_____"
],
[
"env1.close()",
"_____no_output_____"
],
[
"## Separate evaluation env\ntest_freq = 100 #policy updates until evaluation\ntest_episodes_per_track = 5 #number of starting points on test_track\neval_log = './evals/'\n\nenv_test = gym.make(agent, seed=int(3.14*seed), \n game_color=game_color,\n indicators=indicators,\n frames_per_state=fpst,\n skip_frames=skip, \n# discre=actions, #passing custom actions\n use_track = 1, #change test track after 1 ept round\n episodes_per_track = test_episodes_per_track, \n tr_complexity = 12, #test on a medium complexity track\n tr_width = 45,\n patience = 2.0,\n off_track = 2.0,\n end_on_contact = False,\n oily_patch = False,\n num_obstacles = 5,\n obst_location = obst_loc) #passing fixed obstacle location\n\nenv_test = DummyVecEnv([lambda: env_test])\n\neval_callback = EvalCallback(env_test, callback_on_new_best=callback_on_best, #None,\n n_eval_episodes=test_episodes_per_track*3, eval_freq=test_freq*batch_size,\n best_model_save_path=eval_log, log_path=eval_log, deterministic=True, \n render = True) #turn False if you dont want to watch the agent's test\n",
"_____no_output_____"
],
[
"## This model param #2\nuse = 6 # number of times to use same track [1,100]\nept = 10 # different starting points on same track [1,20]\npatience = 1.0\ntrack_complexity = 12\nseed = 5000\n\nif agent=='CarRacing-v3': \n env2 = gym.make(agent, seed=seed, \n game_color=game_color,\n indicators=indicators,\n frames_per_state=fpst,\n skip_frames=skip, \n# discre=actions, #passing custom actions\n use_track = use, \n episodes_per_track = ept, \n tr_complexity = track_complexity, \n tr_width = 45,\n patience = patience,\n off_track = patience,\n end_on_contact = True, #learning to avoid obstacles the-hard-way\n oily_patch = False,\n num_obstacles = 5, #some obstacles\n obst_location = obst_loc, #passing fixed obstacle location\n# f_reward = REWARD2, #passing a custom reward function\n verbose = 2 ) \nelse: \n env2 = gym.make(agent)\n\nenv2 = DummyVecEnv([lambda: env2])",
"_____no_output_____"
],
[
"## Training on obstacles\nmodel.set_env(env2)\nupdates = 700",
"_____no_output_____"
],
[
"## Training #2 - stop on obstacle collision\n\nmodel.learn(total_timesteps = updates*batch_size, log_interval=1) #, callback=eval_callback)",
"_____no_output_____"
],
[
"#Save last updated model\n\nfile = root+'c{:d}_f{:d}_s{:d}_{}_a{:d}__u{:d}_e{:d}_p{}_bs{:d}'.format(\n game_color,fpst,skip,indicators,len(actions),use,ept,patience,batch_size)\n\nmodel.save(file, cloudpickle=True)\nparam_list=model.get_parameter_list()",
"_____no_output_____"
],
[
"env2.close()",
"_____no_output_____"
],
[
"## This model param #3\nuse = 6 # number of times to use same track [1,100]\nept = 10 # different starting points on same track [1,20]\npatience = 1.0\ntrack_complexity = 12\n#REWARD2 = [-0.05, 0.1, 0.0, 0.0, 2.0, 0.0, 100, -20, -100, -50, -5, -100]\nseed = 25000\n\nif agent=='CarRacing-v3': \n env2b = gym.make(agent, seed=seed, \n game_color=game_color,\n indicators=indicators,\n frames_per_state=fpst,\n skip_frames=skip, \n# discre=actions, #passing custom actions\n use_track = use, \n episodes_per_track = ept, \n tr_complexity = track_complexity, \n tr_width = 45,\n patience = patience,\n off_track = patience,\n end_on_contact = False, # CHANGED \n oily_patch = False,\n num_obstacles = 5, #some obstacles\n obst_location = 0, #using random obstacle location\n# f_reward = REWARD2, #passing a custom reward function\n verbose = 3 ) \nelse: \n env2b = gym.make(agent)\n\nenv2b = DummyVecEnv([lambda: env2b])",
"_____no_output_____"
],
[
"## Training #3 \nnew_mod = False #to change batch_size you need a new model !!\nupdates = 1500\n\nif new_mod:\n batch_size2 = 512\n model2 = PPO2(CnnPolicy, env2b, verbose=1, n_steps=batch_size2, # seed=314, n_cpu_tf_sess=1,\n gamma=0.99, learning_rate=0.00025, nminibatches=epochs, ent_coef=0.01, vf_coef=0.5) \n model2.load_parameters(param_list, exact_match=True)\n model2.learn(total_timesteps = updates*batch_size2, log_interval=1, callback=eval_callback)\nelse:\n model.set_env(env2b)\n model.learn(total_timesteps = updates*batch_size, log_interval=1, callback=eval_callback)\n",
"_____no_output_____"
],
[
"#import pickle\nif new_mod:\n file = root+'c{:d}_f{:d}_s{:d}_{}_a{:d}__u{:d}_e{:d}_p{}_bs{:d}'.format(\n game_color,fpst,skip,indicators,len(actions),use,ept,patience,batch_size2)\n model2.save(file, cloudpickle=True)\n param_list=model2.get_parameter_list()\nelse:\n model.save(file+'II', cloudpickle=True)\n param_list=model.get_parameter_list()\n",
"_____no_output_____"
],
[
"env2b.close()\nenv_test.close()",
"_____no_output_____"
],
[
"## Enjoy last trained policy\n\nif agent=='CarRacing-v3': #create an independent test environment, almost everything in std/random definition\n env3 = gym.make(agent, seed=None, \n game_color=game_color,\n indicators = True,\n frames_per_state=fpst,\n skip_frames=skip, \n# discre=actions,\n use_track = 2, \n episodes_per_track = 1, \n patience = 5.0,\n off_track = 3.0 )\nelse:\n env3 = gym.make(agent)\n\nenv3 = DummyVecEnv([lambda: env3])\nobs = env3.reset()\nprint(obs.shape) \n\ndone = False\npasos = 0\n_states=None\n\nwhile not done: # and pasos<1500:\n action, _states = model.predict(obs, deterministic=True)\n obs, reward, done, info = env3.step(action)\n env3.render()\n pasos+=1\n \nenv3.close()\nprint()\nprint(reward, done, pasos) #, info)",
"_____no_output_____"
],
[
"## Enjoy best eval_policy\n\nobs = env3.reset()\nprint(obs.shape) \n\n## Load bestmodel from eval\n#if not isinstance(model_test, PPO2):\nmodel_test = PPO2.load(eval_log+'best_model', env3)\n\ndone = False\npasos = 0\n_states=None\n\nwhile not done: # and pasos<1500:\n action, _states = model_test.predict(obs, deterministic=True)\n obs, reward, done, info = env3.step(action)\n env3.render()\n pasos+=1\n \nenv3.close()\nprint()\nprint(reward, done, pasos)\nprint(action, _states)",
"_____no_output_____"
],
[
"model_test.save(file+'_evalbest', cloudpickle=True)",
"_____no_output_____"
],
[
"env2b.close()",
"_____no_output_____"
],
[
"env3.close()",
"_____no_output_____"
],
[
"env_test.close()",
"_____no_output_____"
],
[
"print(action, _states)",
"_____no_output_____"
],
[
"obs.shape",
"_____no_output_____"
]
]
] | [
"code",
"raw",
"code"
] | [
[
"code",
"code"
],
[
"raw",
"raw"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecddf2c4b51ff3ba32977d5271d0d17821dd0d93 | 8,984 | ipynb | Jupyter Notebook | String Formatting.ipynb | ldo/python_topics_notebooks | 69d714f0c9bb637a8996aaad7e2d2cf5356b89c0 | [
"CC0-1.0"
] | 2 | 2019-05-01T03:21:00.000Z | 2019-05-04T12:37:23.000Z | String Formatting.ipynb | ldo/python_topics_notebooks | 69d714f0c9bb637a8996aaad7e2d2cf5356b89c0 | [
"CC0-1.0"
] | null | null | null | String Formatting.ipynb | ldo/python_topics_notebooks | 69d714f0c9bb637a8996aaad7e2d2cf5356b89c0 | [
"CC0-1.0"
] | null | null | null | 35.231373 | 585 | 0.628117 | [
[
[
"# String Formatting #\n\nIt is easy to convert just about any Python object to a string representation, using one of the [`str()`](https://docs.python.org/3/library/functions.html#func-str), [`repr()`](https://docs.python.org/3/library/functions.html#repr) or [`ascii()`](https://docs.python.org/3/library/functions.html#ascii) functions.\n\nBut sometimes you need more control, particularly over field widths, numbers of digits/characters, sign representations and the like. Also, it can be convenient to lay out the format of an entire message, except for some variable parts, and be able to construct the formatted message in a more convenient form than having to concatenate a bunch of strings.\n\nThis is where *formatted* string output comes in. As of version 3.6, Python offers no less than *three* different ways, built into the language, of doing formatted string output:\n* the traditional [C-style](http://man7.org/linux/man-pages/man3/printf.3.html) [`printf`](https://docs.python.org/3/library/stdtypes.html#old-string-formatting) form, predating Python 3.0\n* the [`str.format()`](https://docs.python.org/3/library/string.html#formatstrings) method\n* [formatted string literals](https://docs.python.org/3/whatsnew/3.6.html#pep-498-formatted-string-literals), reminiscent of those in Perl, introduced in Python 3.6.\n\nThe rest of this notebook is basically a series of random observations on various salient points that have struck me about these various formatting mechanisms.",
"_____no_output_____"
],
[
"## `printf`-Style Formatting ##\n\n`printf`-style formatting originated with C, but is widely supported in some form across a number of languages, not just Python.\n\nPython doesn’t actually provide a direct `printf` equivalent: instead, it provides the equivalent of `sprintf`, which returns the formatted string as the result, in the form of the “`%`” operator with a string as its left operand, and the sequence of items to be substituted as its right operand. It is then up to you to do what you want with this string: you can print it out, or assign it to something for later use, or manipulate it further in an expression.\n\nAlso, Python does not allow the use of the “`$`” character for indicating the indexes of items to be substituted. But it has its own keyword-based alternative (see below).\n\n### Field Width Versus Number Of Digits ###\n\nSupposing you want to output an integer value",
"_____no_output_____"
]
],
[
[
"val1 = 3",
"_____no_output_____"
]
],
[
[
"as 3 digits with leading zeroes as necessary. It is quite common to write",
"_____no_output_____"
]
],
[
[
"\"%03d\" % val1",
"_____no_output_____"
]
],
[
[
"and while this works in this simple situation, note that the “3” in the format string specifies the *total field width*, not the *number of digits*. This becomes apparent when you add other format embellishments, such as an explicit sign:",
"_____no_output_____"
]
],
[
[
"\"%+03d\" % val1",
"_____no_output_____"
]
],
[
[
"As you can see, these take up space allocated to the total field width. In order to get the correct number of digits in all situations, you have to specify it *after a decimal point*:",
"_____no_output_____"
]
],
[
[
"\"%0+.3d\" % val1",
"_____no_output_____"
]
],
[
[
"### Variable Field Widths And Precisions ###\n\nIt is possible for the field width and/or precision values to be specified as “`*`”, which means they are taken from items in the format list, immediately preceding the item being formatted:",
"_____no_output_____"
]
],
[
[
"nr_digits = 5\n\"%0+.*d\" % (nr_digits, val1)",
"_____no_output_____"
]
],
[
[
"### Items By Keyword ###\n\nOne useful enhancement that Python offers is the ability to specify substitution items by keyword. In this case, the right operand is not a sequence, but a dictionary mapping those keywords to the corresponding items. This is convenient for allowing the items to be specified in a different order, and also for items to be given once but appear at multiple places in the output string.",
"_____no_output_____"
]
],
[
[
"\"%(val)0+.3d\" % {\"val\" : val1}",
"_____no_output_____"
]
],
[
[
"### Multilevel Formatting ###\n\nHowever, the keyword option is not available for variable field widths or precisions. But there is a way around that, by applying more than one level of string formatting:",
"_____no_output_____"
]
],
[
[
"val2 = 99\n\"%%0+.%(nr_digits)dd %%0+.%(nr_digits)dd\" \\\n % {\"nr_digits\" : nr_digits} \\\n % (val1, val2)",
"_____no_output_____"
]
],
[
[
"## `str.format()` ##\n\nOne obvious advantage of this newer-style string formatting mechanism is that you don’t need to specify the item type (e.g. “`d`”, “`f`” etc) at all: you can still do so, but otherwise reasonable defaults will be used.\n\nThis solves some of the limitations of `printf`-style formatting, such as allowing referencing items by index, and being able to specify variable widths and precisions by index or keyword. In addition, it is possible to access attributes of the items, or index them in simple ways.",
"_____no_output_____"
]
],
[
[
"from math import \\\n pi as π\n\n\"{val:.{nr_digits}f}\".format(nr_digits = nr_digits, val = π)",
"_____no_output_____"
]
],
[
[
"This mechanism also allows types to define their own `__format__()` methods, which can implement an entire “mini-language” to be used in the format string to control the representation of values of that type. [For example](https://docs.python.org/3/library/string.html#format-examples), the `datetime.datetime` class [uses this](https://docs.python.org/3/library/datetime.html#datetime.date.__format__) to bring the entire range of options available in in the [`strftime()`](https://docs.python.org/3/library/datetime.html#datetime.date.strftime) function into format strings.\n\nHowever, one peculiar omission (at least as of Python 3.6) is the inexplicable lack of support of precision specifiers for integer types. Adapting our previous example of the distinction between field widths and number of decimal digits:",
"_____no_output_____"
]
],
[
[
"\"{:+03d}\".format(val1)",
"_____no_output_____"
]
],
[
[
"is valid, but",
"_____no_output_____"
]
],
[
[
"\"{:+0.3d}\".format(val1)",
"_____no_output_____"
]
],
[
[
"is not.",
"_____no_output_____"
],
[
"## Formatted String Literals ##\n\n[These](https://docs.python.org/3/reference/lexical_analysis.html#f-strings) use a similar format mini-language to `str.format()` formatting, except that they take the form of a string literal with a special prefix, which can contain direct embedded references to variables accessible in the current environment.",
"_____no_output_____"
]
],
[
[
"f\"val1 = {val1}, val2 = {val2}\"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
ecddf59ff310656b2af32dffad6b03f5931d8d0d | 83,912 | ipynb | Jupyter Notebook | Jupyter-notebook/Models_evaluations.ipynb | lzomedia/URL-categorization-using-machine-learning | db204571a2e86643581d46c2cc7bfc9d78827e53 | [
"MIT"
] | 1 | 2021-09-06T08:15:21.000Z | 2021-09-06T08:15:21.000Z | Jupyter-notebook/Models_evaluations.ipynb | lzomedia/URL-categorization-using-machine-learning | db204571a2e86643581d46c2cc7bfc9d78827e53 | [
"MIT"
] | null | null | null | Jupyter-notebook/Models_evaluations.ipynb | lzomedia/URL-categorization-using-machine-learning | db204571a2e86643581d46c2cc7bfc9d78827e53 | [
"MIT"
] | 1 | 2021-08-02T07:37:04.000Z | 2021-08-02T07:37:04.000Z | 203.176755 | 69,108 | 0.888717 | [
[
[
"import pandas as pd\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import RegexpTokenizer\nimport ast\nimport numpy as np\nimport os\nimport ast\nimport urllib.request\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport os.path\nfrom datetime import datetime\nfrom collections import Counter\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\n\nnltk.download('stopwords')\nnltk.download('words')\nnltk.download('punkt')\nnltk.download('wordnet')",
"[nltk_data] Downloading package stopwords to\n[nltk_data] /home/domantas/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n[nltk_data] Downloading package words to /home/domantas/nltk_data...\n[nltk_data] Package words is already up-to-date!\n[nltk_data] Downloading package punkt to /home/domantas/nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n[nltk_data] Downloading package wordnet to /home/domantas/nltk_data...\n[nltk_data] Package wordnet is already up-to-date!\n"
],
[
"df = pd.read_csv('../Datasets/Translated_tokens_2019-05-05.csv')",
"_____no_output_____"
],
[
"import pickle\npickle_in = open(\"../Frequency_models/word_frequency_2019-05-05.picle\",\"rb\")\nwords_frequency = pickle.load(pickle_in)",
"_____no_output_____"
],
[
"top = 2500\nfrom collections import Counter\n\nfeatures = np.zeros(df.shape[0] * top).reshape(df.shape[0], top)\nlabels = np.zeros(df.shape[0])\ncounter = 0\nfor i, row in df.iterrows():\n c = [word for word, word_count in Counter(ast.literal_eval(row['tokens_en'])).most_common(top)]\n labels[counter] = list(set(df['main_category'].values)).index(row['main_category'])\n for word in c:\n if word in words_frequency[row['main_category']]:\n features[counter][words_frequency[row['main_category']].index(word)] = 1\n counter += 1",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score\nfrom scipy.sparse import coo_matrix\nX_sparse = coo_matrix(features)\n\nfrom sklearn.utils import shuffle\nX, X_sparse, y = shuffle(features, X_sparse, labels, random_state=0)\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n\nfrom sklearn.linear_model import LogisticRegression\nlr = LogisticRegression()\nlr.fit(X_train, y_train)\nlr_predictions = lr.predict(X_test)\nscore = lr.score(X_test, y_test)\nprint('LogisticRegression')\nprint('Score: ', score)\nprint('Top: ', top)\nprint('Dataset length: ', df.shape[0])\nprint()\n\nfrom sklearn.svm import LinearSVC\nclf = LinearSVC()\nclf.fit(X_train, y_train)\nclf_predictions = clf.predict(X_test)\nscore = clf.score(X_test, y_test)\nprint('SVM')\nprint('Score: ', score)\nprint('Top: ', top)\nprint('Dataset length: ', df.shape[0])",
"/home/domantas/.local/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n/home/domantas/.local/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\n"
],
[
"cm = confusion_matrix(y_test, lr_predictions)\nrecall = np.diag(cm) / np.sum(cm, axis = 1)\nrecall_mean = np.mean(recall)\nprecision = np.diag(cm) / np.sum(cm, axis = 0)\nprecision_mean = np.mean(precision)\nfrom sklearn.metrics import classification_report\ntarget_names = list(words_frequency.keys())\nresults = classification_report(y_test, lr_predictions, target_names=target_names)\nprint(results)",
" precision recall f1-score support\n\n Gambling 0.75 0.09 0.16 34\n Autos_and_Vehicles 0.89 0.70 0.78 79\n Adult 0.75 0.73 0.74 211\n Health 0.76 0.74 0.75 164\n Business_and_Industry 0.58 0.19 0.29 36\n Games 0.81 0.77 0.79 177\n Reference 0.77 0.70 0.73 212\n Sports 0.75 0.55 0.63 120\n Science 0.76 0.77 0.76 225\n People_and_Society 0.78 0.77 0.77 319\nComputer_and_Electronics 0.89 0.80 0.84 256\n Food_and_Drink 0.75 0.67 0.71 147\n Internet_and_Telecom 0.80 0.78 0.79 251\n Finance 0.79 0.85 0.82 427\n Beauty_and_Fitness 0.70 0.77 0.73 391\n Pets_and_Animals 0.73 0.79 0.76 451\n News_and_Media 0.76 0.46 0.57 104\n Home_and_Garden 0.77 0.82 0.79 415\n Arts_and_Entertainment 0.79 0.73 0.76 153\n Recreation_and_Hobbies 0.77 0.91 0.83 362\n Shopping 0.76 0.65 0.70 155\n Career_and_Education 0.67 0.81 0.73 339\n Books_and_Literature 0.85 0.67 0.75 123\n Travel 0.93 0.70 0.80 80\n Law_and_Government 0.74 0.82 0.78 291\n\n micro avg 0.76 0.76 0.76 5522\n macro avg 0.77 0.69 0.71 5522\n weighted avg 0.76 0.76 0.76 5522\n\n"
],
[
"from sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nlabels = list(words_frequency.keys())\ncm = confusion_matrix(y_test, lr_predictions)\nfig, ax = plt.subplots()\ncax = ax.matshow(cm)\nfig.colorbar(cax)\nfig.set_size_inches(15, 10)\nax.set_xticklabels(labels, rotation='vertical')\nax.set_yticklabels(labels)\nax.set_xticks(np.arange(len(labels)))\nax.set_yticks(np.arange(len(labels)))\nplt.xlabel('Predicted')\nplt.ylabel('True')\nplt.show()\n# plt.savefig(\"../Pictures/lr_confusion_matrix\")",
"_____no_output_____"
],
[
"cm = confusion_matrix(y_test, clf_predictions)\nrecall = np.diag(cm) / np.sum(cm, axis = 1)\nrecall_mean = np.mean(recall)\nprecision = np.diag(cm) / np.sum(cm, axis = 0)\nprecision_mean = np.mean(precision)\nfrom sklearn.metrics import classification_report\ntarget_names = list(words_frequency.keys())\nresults = classification_report(y_test, clf_predictions, target_names=target_names)\nprint(results)",
" precision recall f1-score support\n\n People_and_Society 0.86 0.86 0.86 59\n Reference 0.62 0.52 0.56 56\n Home_and_Garden 0.36 0.21 0.27 19\n Science 0.76 0.72 0.74 143\n Adult 0.88 0.88 0.88 165\n Internet_and_Telecom 0.86 0.78 0.82 69\n Travel 0.77 0.75 0.76 134\n Shopping 0.79 0.85 0.82 310\n Beauty_and_Fitness 0.86 0.88 0.87 291\n Autos_and_Vehicles 0.86 0.84 0.85 116\n Games 0.83 0.72 0.77 93\n Career_and_Education 0.81 0.87 0.84 209\n Recreation_and_Hobbies 0.08 0.11 0.09 9\n Law_and_Government 0.75 0.65 0.70 72\n Gambling 0.76 0.78 0.77 169\n Finance 0.79 0.84 0.81 88\n Sports 0.71 0.73 0.72 96\n Pets_and_Animals 0.81 0.78 0.79 243\n Health 0.78 0.80 0.79 49\n Business_and_Industry 0.81 0.77 0.79 168\n Books_and_Literature 0.73 0.75 0.74 162\n Food_and_Drink 0.76 0.83 0.79 259\n News_and_Media 0.72 0.74 0.73 195\n Arts_and_Entertainment 0.79 0.54 0.64 82\nComputer_and_Electronics 0.82 0.82 0.82 194\n\n micro avg 0.79 0.79 0.79 3450\n macro avg 0.74 0.72 0.73 3450\n weighted avg 0.79 0.79 0.79 3450\n\n"
],
[
"from sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nlabels = list(words_frequency.keys())\ncm = confusion_matrix(y_test, clf_predictions)\nfig, ax = plt.subplots()\ncax = ax.matshow(cm)\nfig.colorbar(cax)\nfig.set_size_inches(15, 10)\nax.set_xticklabels(labels, rotation='vertical')\nax.set_yticklabels(labels)\nax.set_xticks(np.arange(len(labels)))\nax.set_yticks(np.arange(len(labels)))\nplt.xlabel('Predicted')\nplt.ylabel('True')\nplt.show()\n# plt.savefig(\"../Pictures/lsvm_confusion_matrix\")",
"_____no_output_____"
],
[
"# # Save models\n# from sklearn.externals import joblib\n# filename = \"../Models/{}/LR_model_{}_latex.joblib\".format(month.title(), month)\n# if not os.path.isfile(filename):\n# joblib.dump(lr, filename)\n \n# filename = \"../Models/{}/SVM_model_{}_latex.joblib\".format(month.title(), month)\n# if not os.path.isfile(filename):\n# joblib.dump(clf, filename)\n\n# import pickle\n# words_filename = \"../Models/{}/word_frequency_{}_latex.picle\".format(month.title(), month)\n# if not os.path.isfile(words_filename):\n# pickle_out = open(words_filename,\"wb\")\n# pickle.dump(words_frequency, pickle_out)\n# pickle_out.close()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecde0c4d613ebb99f350c8f613e5806f85678973 | 13,318 | ipynb | Jupyter Notebook | images/gallery.ipynb | rlugojr/lightning-example-notebooks | 727a427ce2ec1720e2016b8ad272a1d194f464cf | [
"MIT"
] | 62 | 2015-01-21T03:05:14.000Z | 2021-05-02T14:17:57.000Z | images/gallery.ipynb | rlugojr/lightning-example-notebooks | 727a427ce2ec1720e2016b8ad272a1d194f464cf | [
"MIT"
] | 4 | 2015-02-06T22:14:17.000Z | 2020-08-07T20:23:24.000Z | images/gallery.ipynb | rlugojr/lightning-example-notebooks | 727a427ce2ec1720e2016b8ad272a1d194f464cf | [
"MIT"
] | 40 | 2015-03-19T03:20:12.000Z | 2022-01-11T22:16:50.000Z | 70.840426 | 4,099 | 0.779622 | [
[
[
"# <img style='float: left' src=\"http://lightning-viz.github.io/images/logo.png\"> <br> <br> Gallery plots in <a href='http://lightning-viz.github.io/'><font color='#9175f0'>Lightning</font></a>",
"_____no_output_____"
],
[
"## <hr> Setup",
"_____no_output_____"
]
],
[
[
"from lightning import Lightning\n\nfrom sklearn import datasets",
"_____no_output_____"
]
],
[
[
"## Connect to server",
"_____no_output_____"
]
],
[
[
"lgn = Lightning(ipython=True, host='http://public.lightning-viz.org')",
"_____no_output_____"
]
],
[
[
"## <hr> Gallery",
"_____no_output_____"
],
[
"The gallery viewer is an easy way to look at and browse multiple images simultaneously.",
"_____no_output_____"
]
],
[
[
"imgs = datasets.load_sample_images().images\nlgn.gallery([imgs[0], imgs[1]])",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
ecde2613322e7015dfabca6814674fafd0b2d0b9 | 6,294 | ipynb | Jupyter Notebook | experiments/tokenizer tryouts.ipynb | otzhora/duplication | e06222533c3d135aaa39ccbfb6589a08a9616130 | [
"MIT"
] | 2 | 2021-12-30T17:47:29.000Z | 2022-03-29T10:13:44.000Z | experiments/tokenizer tryouts.ipynb | otzhora/potator | e06222533c3d135aaa39ccbfb6589a08a9616130 | [
"MIT"
] | null | null | null | experiments/tokenizer tryouts.ipynb | otzhora/potator | e06222533c3d135aaa39ccbfb6589a08a9616130 | [
"MIT"
] | null | null | null | 21.408163 | 173 | 0.447251 | [
[
[
"import sys\nprint('Python %s on %s' % (sys.version, sys.platform))\nsys.path.extend(['/mnt/c/Users/Yuriy Rogachev/PycharmProjects/code duplication detection', '/mnt/c/Users/Yuriy Rogachev/PycharmProjects/code duplication detection'])\n",
"Python 3.8.5 (default, Jan 27 2021, 15:41:15) \n[GCC 9.3.0] on linux\n"
],
[
"%load_ext autoreload\n%autoreload 2",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"import os\nfrom pathlib import Path\n\ncwd = os.getcwd()\ntest_dir = Path(cwd).parent/\"duplication\"/\"test_data\"\ntest_dir",
"_____no_output_____"
],
[
"from collections import Counter",
"_____no_output_____"
]
],
[
[
"# Tokenizer",
"_____no_output_____"
]
],
[
[
"from duplication.run import *",
"_____no_output_____"
],
[
"recognize_languages_dir(test_dir)",
"_____no_output_____"
],
[
"data = get_identifiers_sequence_from_file(test_dir/\"test_file.kt\", \"Kotlin\", False, False)",
"_____no_output_____"
],
[
"Counter(data)",
"_____no_output_____"
],
[
"before = {\"args\": 11, \"main\": 5, \"array\": 5, \"slice\": 4, \"until\": 4, \"size\": 4,\n \"project\": 2, \"extractor\": 2, \"cli\": 1, \"empty\": 1,\n \"isempti\": 1, \"println\": 1, \"trim\": 1, \"indent\": 1, \"preprocessor\": 1,\n \"parser\": 1, \"path\": 1, \"context\": 1, \"code\": 1, \"vec\": 1, \"except\": 1}\nafter = dict(Counter(data))",
"_____no_output_____"
],
[
"sorted(before.keys()) == sorted(after.keys())",
"_____no_output_____"
],
[
"sorted(before.keys()) ",
"_____no_output_____"
],
[
"sorted(after.keys())",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecde26f10f2cfc241bd187652e3df7f5d458fbec | 569,183 | ipynb | Jupyter Notebook | notebooks/RF_age_analysis_P21_P28_P32_P35.ipynb | nlsschim/diff_predictor | e8b2239a4f221dfaa7e26335c4b0db9ecd88beea | [
"MIT"
] | null | null | null | notebooks/RF_age_analysis_P21_P28_P32_P35.ipynb | nlsschim/diff_predictor | e8b2239a4f221dfaa7e26335c4b0db9ecd88beea | [
"MIT"
] | null | null | null | notebooks/RF_age_analysis_P21_P28_P32_P35.ipynb | nlsschim/diff_predictor | e8b2239a4f221dfaa7e26335c4b0db9ecd88beea | [
"MIT"
] | null | null | null | 141.623041 | 64,788 | 0.804471 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ecde2896d5e8fe86048dd099859704099484425b | 59,531 | ipynb | Jupyter Notebook | site/en-snapshot/tfx/tutorials/tfx/components_keras.ipynb | rlatjcj/docs-l10n | 850e980034a3b9cab7f73b7feeeae7f497258f1e | [
"Apache-2.0"
] | 2 | 2020-09-29T07:31:21.000Z | 2020-10-13T08:16:18.000Z | site/en-snapshot/tfx/tutorials/tfx/components_keras.ipynb | rlatjcj/docs-l10n | 850e980034a3b9cab7f73b7feeeae7f497258f1e | [
"Apache-2.0"
] | null | null | null | site/en-snapshot/tfx/tutorials/tfx/components_keras.ipynb | rlatjcj/docs-l10n | 850e980034a3b9cab7f73b7feeeae7f497258f1e | [
"Apache-2.0"
] | null | null | null | 37.558991 | 538 | 0.560666 | [
[
[
"##### Copyright © 2020 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# TFX Keras Component Tutorial\n\n***A Component-by-Component Introduction to TensorFlow Extended (TFX)***",
"_____no_output_____"
],
[
"Note: We recommend running this tutorial in a Colab notebook, with no setup required! Just click \"Run in Google Colab\".\n\n<div class=\"devsite-table-wrapper\"><table class=\"tfo-notebook-buttons\" align=\"left\">\n<td><a target=\"_blank\" href=\"https://www.tensorflow.org/tfx/tutorials/tfx/components_keras\">\n<img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a></td>\n<td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tfx/blob/master/docs/tutorials/tfx/components_keras.ipynb\">\n<img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\">Run in Google Colab</a></td>\n<td><a target=\"_blank\" href=\"https://github.com/tensorflow/tfx/tree/master/docs/tutorials/tfx/components_keras.ipynb\">\n<img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\">View source on GitHub</a></td>\n</table></div>",
"_____no_output_____"
],
[
"This Colab-based tutorial will interactively walk through each built-in component of TensorFlow Extended (TFX).\n\nIt covers every step in an end-to-end machine learning pipeline, from data ingestion to pushing a model to serving.\n\nWhen you're done, the contents of this notebook can be automatically exported as TFX pipeline source code, which you can orchestrate with Apache Airflow and Apache Beam.\n\nNote: This notebook demonstrates the use of native Keras models in TFX pipelines. **TFX only supports the TensorFlow 2 version of Keras**.\n\nNote: This notebook and its associated APIs are **experimental** and are\nin active development. Major changes in functionality, behavior, and\npresentation are expected.",
"_____no_output_____"
],
[
"## Background\nThis notebook demonstrates how to use TFX in a Jupyter/Colab environment. Here, we walk through the Chicago Taxi example in an interactive notebook.\n\nWorking in an interactive notebook is a useful way to become familiar with the structure of a TFX pipeline. It's also useful when doing development of your own pipelines as a lightweight development environment, but you should be aware that there are differences in the way interactive notebooks are orchestrated, and how they access metadata artifacts.\n\n### Orchestration\n\nIn a production deployment of TFX, you will use an orchestrator such as Apache Airflow, Kubeflow Pipelines, or Apache Beam to orchestrate a pre-defined pipeline graph of TFX components. In an interactive notebook, the notebook itself is the orchestrator, running each TFX component as you execute the notebook cells.\n\n### Metadata\n\nIn a production deployment of TFX, you will access metadata through the ML Metadata (MLMD) API. MLMD stores metadata properties in a database such as MySQL or SQLite, and stores the metadata payloads in a persistent store such as on your filesystem. In an interactive notebook, both properties and payloads are stored in an ephemeral SQLite database in the `/tmp` directory on the Jupyter notebook or Colab server.",
"_____no_output_____"
],
[
"## Setup\nFirst, we install and import the necessary packages, set up paths, and download data.",
"_____no_output_____"
],
[
"### Upgrade Pip\n\nTo avoid upgrading Pip in a system when running locally, check to make sure that we're running in Colab. Local systems can of course be upgraded separately.",
"_____no_output_____"
]
],
[
[
"try:\n import colab\n !pip install --upgrade pip\nexcept:\n pass",
"_____no_output_____"
]
],
[
[
"### Install TFX\n\n**Note: In Google Colab, because of package updates, the first time you run this cell you must restart the runtime (Runtime > Restart runtime ...).**",
"_____no_output_____"
]
],
[
[
"!pip install tensorflow==2.2.0 tfx==0.22.0",
"_____no_output_____"
]
],
[
[
"## Did you restart the runtime?\n\nIf you are using Google Colab, the first time that you run the cell above, you must restart the runtime (Runtime > Restart runtime ...). This is because of the way that Colab loads packages.",
"_____no_output_____"
],
[
"### Import packages\nWe import necessary packages, including standard TFX component classes.",
"_____no_output_____"
]
],
[
[
"import os\nimport pprint\nimport tempfile\nimport urllib\n\nimport absl\nimport tensorflow as tf\nimport tensorflow_model_analysis as tfma\ntf.get_logger().propagate = False\npp = pprint.PrettyPrinter()\n\nimport tfx\nfrom tfx.components import CsvExampleGen\nfrom tfx.components import Evaluator\nfrom tfx.components import ExampleValidator\nfrom tfx.components import Pusher\nfrom tfx.components import ResolverNode\nfrom tfx.components import SchemaGen\nfrom tfx.components import StatisticsGen\nfrom tfx.components import Trainer\nfrom tfx.components import Transform\nfrom tfx.components.base import executor_spec\nfrom tfx.components.trainer.executor import GenericExecutor\nfrom tfx.dsl.experimental import latest_blessed_model_resolver\nfrom tfx.orchestration import metadata\nfrom tfx.orchestration import pipeline\nfrom tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext\nfrom tfx.proto import pusher_pb2\nfrom tfx.proto import trainer_pb2\nfrom tfx.types import Channel\nfrom tfx.types.standard_artifacts import Model\nfrom tfx.types.standard_artifacts import ModelBlessing\nfrom tfx.utils.dsl_utils import external_input\n\n\n%load_ext tfx.orchestration.experimental.interactive.notebook_extensions.skip",
"_____no_output_____"
]
],
[
[
"Let's check the library versions.",
"_____no_output_____"
]
],
[
[
"print('TensorFlow version: {}'.format(tf.__version__))\nprint('TFX version: {}'.format(tfx.__version__))",
"_____no_output_____"
]
],
[
[
"### Set up pipeline paths",
"_____no_output_____"
]
],
[
[
"# This is the root directory for your TFX pip package installation.\n_tfx_root = tfx.__path__[0]\n\n# This is the directory containing the TFX Chicago Taxi Pipeline example.\n_taxi_root = os.path.join(_tfx_root, 'examples/chicago_taxi_pipeline')\n\n# This is the path where your model will be pushed for serving.\n_serving_model_dir = os.path.join(\n tempfile.mkdtemp(), 'serving_model/taxi_simple')\n\n# Set up logging.\nabsl.logging.set_verbosity(absl.logging.INFO)",
"_____no_output_____"
]
],
[
[
"### Download example data\nWe download the example dataset for use in our TFX pipeline.\n\nThe dataset we're using is the [Taxi Trips dataset](https://data.cityofchicago.org/Transportation/Taxi-Trips/wrvz-psew) released by the City of Chicago. The columns in this dataset are:\n\n<table>\n<tr><td>pickup_community_area</td><td>fare</td><td>trip_start_month</td></tr>\n<tr><td>trip_start_hour</td><td>trip_start_day</td><td>trip_start_timestamp</td></tr>\n<tr><td>pickup_latitude</td><td>pickup_longitude</td><td>dropoff_latitude</td></tr>\n<tr><td>dropoff_longitude</td><td>trip_miles</td><td>pickup_census_tract</td></tr>\n<tr><td>dropoff_census_tract</td><td>payment_type</td><td>company</td></tr>\n<tr><td>trip_seconds</td><td>dropoff_community_area</td><td>tips</td></tr>\n</table>\n\nWith this dataset, we will build a model that predicts the `tips` of a trip.",
"_____no_output_____"
]
],
[
[
"_data_root = tempfile.mkdtemp(prefix='tfx-data')\nDATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/chicago_taxi_pipeline/data/simple/data.csv'\n_data_filepath = os.path.join(_data_root, \"data.csv\")\nurllib.request.urlretrieve(DATA_PATH, _data_filepath)",
"_____no_output_____"
]
],
[
[
"Take a quick look at the CSV file.",
"_____no_output_____"
]
],
[
[
"!head {_data_filepath}",
"_____no_output_____"
]
],
[
[
"*Disclaimer: This site provides applications using data that has been modified for use from its original source, www.cityofchicago.org, the official website of the City of Chicago. The City of Chicago makes no claims as to the content, accuracy, timeliness, or completeness of any of the data provided at this site. The data provided at this site is subject to change at any time. It is understood that the data provided at this site is being used at one’s own risk.*",
"_____no_output_____"
],
[
"### Create the InteractiveContext\nLast, we create an InteractiveContext, which will allow us to run TFX components interactively in this notebook.",
"_____no_output_____"
]
],
[
[
"# Here, we create an InteractiveContext using default parameters. This will\n# use a temporary directory with an ephemeral ML Metadata database instance.\n# To use your own pipeline root or database, the optional properties\n# `pipeline_root` and `metadata_connection_config` may be passed to\n# InteractiveContext. Calls to InteractiveContext are no-ops outside of the\n# notebook.\ncontext = InteractiveContext()",
"_____no_output_____"
]
],
[
[
"## Run TFX components interactively\nIn the cells that follow, we create TFX components one-by-one, run each of them, and visualize their output artifacts.",
"_____no_output_____"
],
[
"### ExampleGen\n\nThe `ExampleGen` component is usually at the start of a TFX pipeline. It will:\n\n1. Split data into training and evaluation sets (by default, 2/3 training + 1/3 eval)\n2. Convert data into the `tf.Example` format\n3. Copy data into the `_tfx_root` directory for other components to access\n\n`ExampleGen` takes as input the path to your data source. In our case, this is the `_data_root` path that contains the downloaded CSV.\n\nNote: In this notebook, we can instantiate components one-by-one and run them with `InteractiveContext.run()`. By contrast, in a production setting, we would specify all the components upfront in a `Pipeline` to pass to the orchestrator (see the \"Export to Pipeline\" section).",
"_____no_output_____"
]
],
[
[
"example_gen = CsvExampleGen(input=external_input(_data_root))\ncontext.run(example_gen)",
"_____no_output_____"
]
],
[
[
"Let's examine the output artifacts of `ExampleGen`. This component produces two artifacts, training examples and evaluation examples:",
"_____no_output_____"
]
],
[
[
"artifact = example_gen.outputs['examples'].get()[0]\nprint(artifact.split_names, artifact.uri)",
"_____no_output_____"
]
],
[
[
"We can also take a look at the first three training examples:",
"_____no_output_____"
]
],
[
[
"# Get the URI of the output artifact representing the training examples, which is a directory\ntrain_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'train')\n\n# Get the list of files in this directory (all compressed TFRecord files)\ntfrecord_filenames = [os.path.join(train_uri, name)\n for name in os.listdir(train_uri)]\n\n# Create a `TFRecordDataset` to read these files\ndataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type=\"GZIP\")\n\n# Iterate over the first 3 records and decode them.\nfor tfrecord in dataset.take(3):\n serialized_example = tfrecord.numpy()\n example = tf.train.Example()\n example.ParseFromString(serialized_example)\n pp.pprint(example)",
"_____no_output_____"
]
],
[
[
"Now that `ExampleGen` has finished ingesting the data, the next step is data analysis.",
"_____no_output_____"
],
[
"### StatisticsGen\nThe `StatisticsGen` component computes statistics over your dataset for data analysis, as well as for use in downstream components. It uses the [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) library.\n\n`StatisticsGen` takes as input the dataset we just ingested using `ExampleGen`.",
"_____no_output_____"
]
],
[
[
"statistics_gen = StatisticsGen(\n examples=example_gen.outputs['examples'])\ncontext.run(statistics_gen)",
"_____no_output_____"
]
],
[
[
"After `StatisticsGen` finishes running, we can visualize the outputted statistics. Try playing with the different plots!",
"_____no_output_____"
]
],
[
[
"context.show(statistics_gen.outputs['statistics'])",
"_____no_output_____"
]
],
[
[
"### SchemaGen\n\nThe `SchemaGen` component generates a schema based on your data statistics. (A schema defines the expected bounds, types, and properties of the features in your dataset.) It also uses the [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) library.\n\nNote: The generated schema is best-effort and only tries to infer basic properties of the data. It is expected that you review and modify it as needed.\n\n`SchemaGen` will take as input the statistics that we generated with `StatisticsGen`, looking at the training split by default.",
"_____no_output_____"
]
],
[
[
"schema_gen = SchemaGen(\n statistics=statistics_gen.outputs['statistics'],\n infer_feature_shape=False)\ncontext.run(schema_gen)",
"_____no_output_____"
]
],
[
[
"After `SchemaGen` finishes running, we can visualize the generated schema as a table.",
"_____no_output_____"
]
],
[
[
"context.show(schema_gen.outputs['schema'])",
"_____no_output_____"
]
],
[
[
"Each feature in your dataset shows up as a row in the schema table, alongside its properties. The schema also captures all the values that a categorical feature takes on, denoted as its domain.\n\nTo learn more about schemas, see [the SchemaGen documentation](https://www.tensorflow.org/tfx/guide/schemagen).",
"_____no_output_____"
],
[
"### ExampleValidator\nThe `ExampleValidator` component detects anomalies in your data, based on the expectations defined by the schema. It also uses the [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/get_started) library.\n\n`ExampleValidator` will take as input the statistics from `StatisticsGen`, and the schema from `SchemaGen`.\n\nBy default, it compares the statistics from the evaluation split to the schema from the training split.",
"_____no_output_____"
]
],
[
[
"example_validator = ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=schema_gen.outputs['schema'])\ncontext.run(example_validator)",
"_____no_output_____"
]
],
[
[
"After `ExampleValidator` finishes running, we can visualize the anomalies as a table.",
"_____no_output_____"
]
],
[
[
"context.show(example_validator.outputs['anomalies'])",
"_____no_output_____"
]
],
[
[
"In the anomalies table, we can see that the `company` feature takes on new values that were not in the training split. This information can be used to debug model performance, understand how your data evolves over time, and identify data errors.\n\nIn our case, this `company` anomaly is innocuous, but the `payment_type` could be fixed. For now we move on to the next step of transforming the data.",
"_____no_output_____"
],
[
"### Transform\nThe `Transform` component performs feature engineering for both training and serving. It uses the [TensorFlow Transform](https://www.tensorflow.org/tfx/transform/get_started) library.\n\n`Transform` will take as input the data from `ExampleGen`, the schema from `SchemaGen`, as well as a module that contains user-defined Transform code.\n\nLet's see an example of user-defined Transform code below (for an introduction to the TensorFlow Transform APIs, [see the tutorial](https://www.tensorflow.org/tfx/tutorials/transform/simple)). First, we define a few constants for feature engineering:\n\nNote: The `%%writefile` cell magic will save the contents of the cell as a `.py` file on disk. This allows the `Transform` component to load your code as a module.\n",
"_____no_output_____"
]
],
[
[
"_taxi_constants_module_file = 'taxi_constants.py'",
"_____no_output_____"
],
[
"%%writefile {_taxi_constants_module_file}\n\n# Categorical features are assumed to each have a maximum value in the dataset.\nMAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]\n\nCATEGORICAL_FEATURE_KEYS = [\n 'trip_start_hour', 'trip_start_day', 'trip_start_month',\n 'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',\n 'dropoff_community_area'\n]\n\nDENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']\n\n# Number of buckets used by tf.transform for encoding each feature.\nFEATURE_BUCKET_COUNT = 10\n\nBUCKET_FEATURE_KEYS = [\n 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',\n 'dropoff_longitude'\n]\n\n# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform\nVOCAB_SIZE = 1000\n\n# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.\nOOV_SIZE = 10\n\nVOCAB_FEATURE_KEYS = [\n 'payment_type',\n 'company',\n]\n\n# Keys\nLABEL_KEY = 'tips'\nFARE_KEY = 'fare'\n\ndef transformed_name(key):\n return key + '_xf'",
"_____no_output_____"
]
],
[
[
"Next, we write a `preprocessing_fn` that takes in raw data as input, and returns transformed features that our model can train on:",
"_____no_output_____"
]
],
[
[
"_taxi_transform_module_file = 'taxi_transform.py'",
"_____no_output_____"
],
[
"%%writefile {_taxi_transform_module_file}\n\nimport tensorflow as tf\nimport tensorflow_transform as tft\n\nimport taxi_constants\n\n_DENSE_FLOAT_FEATURE_KEYS = taxi_constants.DENSE_FLOAT_FEATURE_KEYS\n_VOCAB_FEATURE_KEYS = taxi_constants.VOCAB_FEATURE_KEYS\n_VOCAB_SIZE = taxi_constants.VOCAB_SIZE\n_OOV_SIZE = taxi_constants.OOV_SIZE\n_FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT\n_BUCKET_FEATURE_KEYS = taxi_constants.BUCKET_FEATURE_KEYS\n_CATEGORICAL_FEATURE_KEYS = taxi_constants.CATEGORICAL_FEATURE_KEYS\n_FARE_KEY = taxi_constants.FARE_KEY\n_LABEL_KEY = taxi_constants.LABEL_KEY\n_transformed_name = taxi_constants.transformed_name\n\n\ndef preprocessing_fn(inputs):\n \"\"\"tf.transform's callback function for preprocessing inputs.\n Args:\n inputs: map from feature keys to raw not-yet-transformed features.\n Returns:\n Map from string feature key to transformed feature operations.\n \"\"\"\n outputs = {}\n for key in _DENSE_FLOAT_FEATURE_KEYS:\n # Preserve this feature as a dense float, setting nan's to the mean.\n outputs[_transformed_name(key)] = tft.scale_to_z_score(\n _fill_in_missing(inputs[key]))\n\n for key in _VOCAB_FEATURE_KEYS:\n # Build a vocabulary for this feature.\n outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(\n _fill_in_missing(inputs[key]),\n top_k=_VOCAB_SIZE,\n num_oov_buckets=_OOV_SIZE)\n\n for key in _BUCKET_FEATURE_KEYS:\n outputs[_transformed_name(key)] = tft.bucketize(\n _fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)\n\n for key in _CATEGORICAL_FEATURE_KEYS:\n outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])\n\n # Was this passenger a big tipper?\n taxi_fare = _fill_in_missing(inputs[_FARE_KEY])\n tips = _fill_in_missing(inputs[_LABEL_KEY])\n outputs[_transformed_name(_LABEL_KEY)] = tf.where(\n tf.math.is_nan(taxi_fare),\n tf.cast(tf.zeros_like(taxi_fare), tf.int64),\n # Test if the tip was > 20% of the fare.\n tf.cast(\n tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))\n\n return outputs\n\n\ndef _fill_in_missing(x):\n \"\"\"Replace missing values in a SparseTensor.\n Fills in missing values of `x` with '' or 0, and converts to a dense tensor.\n Args:\n x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1\n in the second dimension.\n Returns:\n A rank 1 tensor where missing values of `x` have been filled in.\n \"\"\"\n default_value = '' if x.dtype == tf.string else 0\n return tf.squeeze(\n tf.sparse.to_dense(\n tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),\n default_value),\n axis=1)",
"_____no_output_____"
]
],
[
[
"Now, we pass in this feature engineering code to the `Transform` component and run it to transform your data.",
"_____no_output_____"
]
],
[
[
"transform = Transform(\n examples=example_gen.outputs['examples'],\n schema=schema_gen.outputs['schema'],\n module_file=os.path.abspath(_taxi_transform_module_file))\ncontext.run(transform)",
"_____no_output_____"
]
],
[
[
"Let's examine the output artifacts of `Transform`. This component produces two types of outputs:\n\n* `transform_graph` is the graph that can perform the preprocessing operations (this graph will be included in the serving and evaluation models).\n* `transformed_examples` represents the preprocessed training and evaluation data.",
"_____no_output_____"
]
],
[
[
"transform.outputs",
"_____no_output_____"
]
],
[
[
"Take a peek at the `transform_graph` artifact. It points to a directory containing three subdirectories.",
"_____no_output_____"
]
],
[
[
"train_uri = transform.outputs['transform_graph'].get()[0].uri\nos.listdir(train_uri)",
"_____no_output_____"
]
],
[
[
"The `transformed_metadata` subdirectory contains the schema of the preprocessed data. The `transform_fn` subdirectory contains the actual preprocessing graph. The `metadata` subdirectory contains the schema of the original data.\n\nWe can also take a look at the first three transformed examples:",
"_____no_output_____"
]
],
[
[
"# Get the URI of the output artifact representing the transformed examples, which is a directory\ntrain_uri = os.path.join(transform.outputs['transformed_examples'].get()[0].uri, 'train')\n\n# Get the list of files in this directory (all compressed TFRecord files)\ntfrecord_filenames = [os.path.join(train_uri, name)\n for name in os.listdir(train_uri)]\n\n# Create a `TFRecordDataset` to read these files\ndataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type=\"GZIP\")\n\n# Iterate over the first 3 records and decode them.\nfor tfrecord in dataset.take(3):\n serialized_example = tfrecord.numpy()\n example = tf.train.Example()\n example.ParseFromString(serialized_example)\n pp.pprint(example)",
"_____no_output_____"
]
],
[
[
"After the `Transform` component has transformed your data into features, and the next step is to train a model.",
"_____no_output_____"
],
[
"### Trainer\nThe `Trainer` component will train a model that you define in TensorFlow. Default Trainer support Estimator API, to use Keras API, you need to specify [Generic Trainer](https://github.com/tensorflow/community/blob/master/rfcs/20200117-tfx-generic-trainer.md) by setup `custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor)` in Trainer's contructor.\n\n`Trainer` takes as input the schema from `SchemaGen`, the transformed data and graph from `Transform`, training parameters, as well as a module that contains user-defined model code.\n\nLet's see an example of user-defined model code below (for an introduction to the TensorFlow Keras APIs, [see the tutorial](https://www.tensorflow.org/guide/keras)):",
"_____no_output_____"
]
],
[
[
"_taxi_trainer_module_file = 'taxi_trainer.py'",
"_____no_output_____"
],
[
"%%writefile {_taxi_trainer_module_file}\n\nfrom typing import List, Text\n\nimport os\nimport absl\nimport datetime\nimport tensorflow as tf\nimport tensorflow_transform as tft\n\nfrom tfx.components.trainer.executor import TrainerFnArgs\n\nimport taxi_constants\n\n_DENSE_FLOAT_FEATURE_KEYS = taxi_constants.DENSE_FLOAT_FEATURE_KEYS\n_VOCAB_FEATURE_KEYS = taxi_constants.VOCAB_FEATURE_KEYS\n_VOCAB_SIZE = taxi_constants.VOCAB_SIZE\n_OOV_SIZE = taxi_constants.OOV_SIZE\n_FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT\n_BUCKET_FEATURE_KEYS = taxi_constants.BUCKET_FEATURE_KEYS\n_CATEGORICAL_FEATURE_KEYS = taxi_constants.CATEGORICAL_FEATURE_KEYS\n_MAX_CATEGORICAL_FEATURE_VALUES = taxi_constants.MAX_CATEGORICAL_FEATURE_VALUES\n_LABEL_KEY = taxi_constants.LABEL_KEY\n_transformed_name = taxi_constants.transformed_name\n\n\ndef _transformed_names(keys):\n return [_transformed_name(key) for key in keys]\n\n\ndef _gzip_reader_fn(filenames):\n \"\"\"Small utility returning a record reader that can read gzip'ed files.\"\"\"\n return tf.data.TFRecordDataset(\n filenames,\n compression_type='GZIP')\n\n\ndef _get_serve_tf_examples_fn(model, tf_transform_output):\n \"\"\"Returns a function that parses a serialized tf.Example and applies TFT.\"\"\"\n\n model.tft_layer = tf_transform_output.transform_features_layer()\n\n @tf.function\n def serve_tf_examples_fn(serialized_tf_examples):\n \"\"\"Returns the output to be used in the serving signature.\"\"\"\n feature_spec = tf_transform_output.raw_feature_spec()\n feature_spec.pop(_LABEL_KEY)\n parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)\n transformed_features = model.tft_layer(parsed_features)\n return model(transformed_features)\n\n return serve_tf_examples_fn\n\n\ndef _input_fn(file_pattern: List[Text],\n tf_transform_output: tft.TFTransformOutput,\n batch_size: int = 200) -> tf.data.Dataset:\n \"\"\"Generates features and label for tuning/training.\n\n Args:\n file_pattern: List of paths or patterns of input tfrecord files.\n tf_transform_output: A TFTransformOutput.\n batch_size: representing the number of consecutive elements of returned\n dataset to combine in a single batch\n\n Returns:\n A dataset that contains (features, indices) tuple where features is a\n dictionary of Tensors, and indices is a single Tensor of label indices.\n \"\"\"\n transformed_feature_spec = (\n tf_transform_output.transformed_feature_spec().copy())\n\n dataset = tf.data.experimental.make_batched_features_dataset(\n file_pattern=file_pattern,\n batch_size=batch_size,\n features=transformed_feature_spec,\n reader=_gzip_reader_fn,\n label_key=_transformed_name(_LABEL_KEY))\n\n return dataset\n\n\ndef _build_keras_model(hidden_units: List[int] = None) -> tf.keras.Model:\n \"\"\"Creates a DNN Keras model for classifying taxi data.\n\n Args:\n hidden_units: [int], the layer sizes of the DNN (input layer first).\n\n Returns:\n A keras Model.\n \"\"\"\n real_valued_columns = [\n tf.feature_column.numeric_column(key, shape=())\n for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)\n ]\n categorical_columns = [\n tf.feature_column.categorical_column_with_identity(\n key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)\n for key in _transformed_names(_VOCAB_FEATURE_KEYS)\n ]\n categorical_columns += [\n tf.feature_column.categorical_column_with_identity(\n key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)\n for key in _transformed_names(_BUCKET_FEATURE_KEYS)\n ]\n categorical_columns += [\n tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension\n key,\n num_buckets=num_buckets,\n default_value=0) for key, num_buckets in zip(\n _transformed_names(_CATEGORICAL_FEATURE_KEYS),\n _MAX_CATEGORICAL_FEATURE_VALUES)\n ]\n indicator_column = [\n tf.feature_column.indicator_column(categorical_column)\n for categorical_column in categorical_columns\n ]\n\n model = _wide_and_deep_classifier(\n # TODO(b/139668410) replace with premade wide_and_deep keras model\n wide_columns=indicator_column,\n deep_columns=real_valued_columns,\n dnn_hidden_units=hidden_units or [100, 70, 50, 25])\n return model\n\n\ndef _wide_and_deep_classifier(wide_columns, deep_columns, dnn_hidden_units):\n \"\"\"Build a simple keras wide and deep model.\n\n Args:\n wide_columns: Feature columns wrapped in indicator_column for wide (linear)\n part of the model.\n deep_columns: Feature columns for deep part of the model.\n dnn_hidden_units: [int], the layer sizes of the hidden DNN.\n\n Returns:\n A Wide and Deep Keras model\n \"\"\"\n # Following values are hard coded for simplicity in this example,\n # However prefarably they should be passsed in as hparams.\n\n # Keras needs the feature definitions at compile time.\n # TODO(b/139081439): Automate generation of input layers from FeatureColumn.\n input_layers = {\n colname: tf.keras.layers.Input(name=colname, shape=(), dtype=tf.float32)\n for colname in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)\n }\n input_layers.update({\n colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')\n for colname in _transformed_names(_VOCAB_FEATURE_KEYS)\n })\n input_layers.update({\n colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')\n for colname in _transformed_names(_BUCKET_FEATURE_KEYS)\n })\n input_layers.update({\n colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')\n for colname in _transformed_names(_CATEGORICAL_FEATURE_KEYS)\n })\n\n # TODO(b/161952382): Replace with Keras preprocessing layers.\n deep = tf.keras.layers.DenseFeatures(deep_columns)(input_layers)\n for numnodes in dnn_hidden_units:\n deep = tf.keras.layers.Dense(numnodes)(deep)\n wide = tf.keras.layers.DenseFeatures(wide_columns)(input_layers)\n\n output = tf.keras.layers.Dense(\n 1, activation='sigmoid')(\n tf.keras.layers.concatenate([deep, wide]))\n\n model = tf.keras.Model(input_layers, output)\n model.compile(\n loss='binary_crossentropy',\n optimizer=tf.keras.optimizers.Adam(lr=0.001),\n metrics=[tf.keras.metrics.BinaryAccuracy()])\n model.summary(print_fn=absl.logging.info)\n return model\n\n\n# TFX Trainer will call this function.\ndef run_fn(fn_args: TrainerFnArgs):\n \"\"\"Train the model based on given args.\n\n Args:\n fn_args: Holds args used to train the model as name/value pairs.\n \"\"\"\n # Number of nodes in the first layer of the DNN\n first_dnn_layer_size = 100\n num_dnn_layers = 4\n dnn_decay_factor = 0.7\n\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n train_dataset = _input_fn(fn_args.train_files, tf_transform_output, 40)\n eval_dataset = _input_fn(fn_args.eval_files, tf_transform_output, 40)\n\n model = _build_keras_model(\n # Construct layers sizes with exponetial decay\n hidden_units=[\n max(2, int(first_dnn_layer_size * dnn_decay_factor**i))\n for i in range(num_dnn_layers)\n ])\n\n # TODO(b/158106209): use ModelRun instead of Model artifact for logging.\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=log_dir, update_freq='batch')\n model.fit(\n train_dataset,\n steps_per_epoch=fn_args.train_steps,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=[tensorboard_callback])\n\n signatures = {\n 'serving_default':\n _get_serve_tf_examples_fn(model,\n tf_transform_output).get_concrete_function(\n tf.TensorSpec(\n shape=[None],\n dtype=tf.string,\n name='examples')),\n }\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)",
"_____no_output_____"
]
],
[
[
"Now, we pass in this model code to the `Trainer` component and run it to train the model.",
"_____no_output_____"
]
],
[
[
"trainer = Trainer(\n module_file=os.path.abspath(_taxi_trainer_module_file),\n custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),\n examples=transform.outputs['transformed_examples'],\n transform_graph=transform.outputs['transform_graph'],\n schema=schema_gen.outputs['schema'],\n train_args=trainer_pb2.TrainArgs(num_steps=10000),\n eval_args=trainer_pb2.EvalArgs(num_steps=5000))\ncontext.run(trainer)",
"_____no_output_____"
]
],
[
[
"#### Analyze Training with TensorBoard\nTake a peek at the trainer artifact. It points to a directory containing the model subdirectories.",
"_____no_output_____"
]
],
[
[
"model_artifact_dir = trainer.outputs['model'].get()[0].uri\npp.pprint(os.listdir(model_artifact_dir))\nmodel_dir = os.path.join(model_artifact_dir, 'serving_model_dir')\npp.pprint(os.listdir(model_dir))",
"_____no_output_____"
]
],
[
[
"Optionally, we can connect TensorBoard to the Trainer to analyze our model's training curves.",
"_____no_output_____"
]
],
[
[
"log_dir = os.path.join(model_artifact_dir, 'logs')\n\n%load_ext tensorboard\n%tensorboard --logdir {log_dir}",
"_____no_output_____"
]
],
[
[
"### Evaluator\nThe `Evaluator` component computes model performance metrics over the evaluation set. It uses the [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) library. The `Evaluator` can also optionally validate that a newly trained model is better than the previous model. This is useful in a production pipeline setting where you may automatically train and validate a model every day. In this notebook, we only train one model, so the `Evaluator` automatically will label the model as \"good\". \n\n`Evaluator` will take as input the data from `ExampleGen`, the trained model from `Trainer`, and slicing configuration. The slicing configuration allows you to slice your metrics on feature values (e.g. how does your model perform on taxi trips that start at 8am versus 8pm?). See an example of this configuration below:",
"_____no_output_____"
]
],
[
[
"eval_config = tfma.EvalConfig(\n model_specs=[\n # This assumes a serving model with signature 'serving_default'. If\n # using estimator based EvalSavedModel, add signature_name: 'eval' and \n # remove the label_key.\n tfma.ModelSpec(label_key='tips')\n ],\n metrics_specs=[\n tfma.MetricsSpec(\n # The metrics added here are in addition to those saved with the\n # model (assuming either a keras model or EvalSavedModel is used).\n # Any metrics added into the saved model (for example using\n # model.compile(..., metrics=[...]), etc) will be computed\n # automatically.\n # To add validation thresholds for metrics saved with the model,\n # add them keyed by metric name to the thresholds map.\n metrics=[\n tfma.MetricConfig(class_name='ExampleCount'),\n tfma.MetricConfig(class_name='BinaryAccuracy',\n threshold=tfma.MetricThreshold(\n value_threshold=tfma.GenericValueThreshold(\n lower_bound={'value': 0.5}),\n change_threshold=tfma.GenericChangeThreshold(\n direction=tfma.MetricDirection.HIGHER_IS_BETTER,\n absolute={'value': -1e-10})))\n ]\n )\n ],\n slicing_specs=[\n # An empty slice spec means the overall slice, i.e. the whole dataset.\n tfma.SlicingSpec(),\n # Data can be sliced along a feature column. In this case, data is\n # sliced along feature column trip_start_hour.\n tfma.SlicingSpec(feature_keys=['trip_start_hour'])\n ])",
"_____no_output_____"
]
],
[
[
"Next, we give this configuration to `Evaluator` and run it.",
"_____no_output_____"
]
],
[
[
"# Use TFMA to compute a evaluation statistics over features of a model and\n# validate them against a baseline.\n\n# The model resolver is only required if performing model validation in addition\n# to evaluation. In this case we validate against the latest blessed model. If\n# no model has been blessed before (as in this case) the evaluator will make our\n# candidate the first blessed model.\nmodel_resolver = ResolverNode(\n instance_name='latest_blessed_model_resolver',\n resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing))\ncontext.run(model_resolver)\n\nevaluator = Evaluator(\n examples=example_gen.outputs['examples'],\n model=trainer.outputs['model'],\n baseline_model=model_resolver.outputs['model'],\n # Change threshold will be ignored if there is no baseline (first run).\n eval_config=eval_config)\ncontext.run(evaluator)",
"_____no_output_____"
]
],
[
[
"Now let's examine the output artifacts of `Evaluator`. ",
"_____no_output_____"
]
],
[
[
"evaluator.outputs",
"_____no_output_____"
]
],
[
[
"Using the `evaluation` output we can show the default visualization of global metrics on the entire evaluation set.",
"_____no_output_____"
]
],
[
[
"context.show(evaluator.outputs['evaluation'])",
"_____no_output_____"
]
],
[
[
"To see the visualization for sliced evaluation metrics, we can directly call the TensorFlow Model Analysis library.",
"_____no_output_____"
]
],
[
[
"import tensorflow_model_analysis as tfma\n\n# Get the TFMA output result path and load the result.\nPATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri\ntfma_result = tfma.load_eval_result(PATH_TO_RESULT)\n\n# Show data sliced along feature column trip_start_hour.\ntfma.view.render_slicing_metrics(\n tfma_result, slicing_column='trip_start_hour')",
"_____no_output_____"
]
],
[
[
"This visualization shows the same metrics, but computed at every feature value of `trip_start_hour` instead of on the entire evaluation set.\n\nTensorFlow Model Analysis supports many other visualizations, such as Fairness Indicators and plotting a time series of model performance. To learn more, see [the tutorial](https://www.tensorflow.org/tfx/tutorials/model_analysis/tfma_basic).",
"_____no_output_____"
],
[
"Since we added thresholds to our config, validation output is also available. The precence of a `blessing` artifact indicates that our model passed validation. Since this is the first validation being performed the candidate is automatically blessed.",
"_____no_output_____"
]
],
[
[
"blessing_uri = evaluator.outputs.blessing.get()[0].uri\n!ls -l {blessing_uri}",
"_____no_output_____"
]
],
[
[
"Now can also verify the success by loading the validation result record:",
"_____no_output_____"
]
],
[
[
"PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri\nprint(tfma.load_validation_result(PATH_TO_RESULT))",
"_____no_output_____"
]
],
[
[
"### Pusher\nThe `Pusher` component is usually at the end of a TFX pipeline. It checks whether a model has passed validation, and if so, exports the model to `_serving_model_dir`.",
"_____no_output_____"
]
],
[
[
"pusher = Pusher(\n model=trainer.outputs['model'],\n model_blessing=evaluator.outputs['blessing'],\n push_destination=pusher_pb2.PushDestination(\n filesystem=pusher_pb2.PushDestination.Filesystem(\n base_directory=_serving_model_dir)))\ncontext.run(pusher)",
"_____no_output_____"
]
],
[
[
"Let's examine the output artifacts of `Pusher`. ",
"_____no_output_____"
]
],
[
[
"pusher.outputs",
"_____no_output_____"
]
],
[
[
"In particular, the Pusher will export your model in the SavedModel format, which looks like this:",
"_____no_output_____"
]
],
[
[
"push_uri = pusher.outputs.model_push.get()[0].uri\nmodel = tf.saved_model.load(push_uri)\n\nfor item in model.signatures.items():\n pp.pprint(item)",
"_____no_output_____"
]
],
[
[
"We're finished our tour of built-in TFX components!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
ecde41a73e564cbd782ed88b281c893b7300de58 | 56,267 | ipynb | Jupyter Notebook | Competition 8 - Marketing Analytics/submission.ipynb | lpsy/CPT12_ShopeeCodeLeague | d8fb099668fd086f458c16a73b23fe7b079179b0 | [
"MIT"
] | null | null | null | Competition 8 - Marketing Analytics/submission.ipynb | lpsy/CPT12_ShopeeCodeLeague | d8fb099668fd086f458c16a73b23fe7b079179b0 | [
"MIT"
] | null | null | null | Competition 8 - Marketing Analytics/submission.ipynb | lpsy/CPT12_ShopeeCodeLeague | d8fb099668fd086f458c16a73b23fe7b079179b0 | [
"MIT"
] | null | null | null | 34.540823 | 137 | 0.321592 | [
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"df_train = pd.read_csv('train.csv')\ndf_train.set_index('row_id', inplace=True)\n\ndf_test = pd.read_csv('test.csv')\n\ndf_train",
"_____no_output_____"
],
[
"df_users = pd.read_csv('users.csv')\ndf_users.set_index('user_id', inplace=True)\ndf_users.attr_2 = df_users.attr_2.astype(object)\ndf_users.attr_3 = df_users.attr_3.astype(object)\ndf_users.age = df_users.age.clip(0, 100)\ndf_users.age = df_users.age.apply(lambda x: 31 if np.isnan(x) else x)\ndf_users.domain = df_users.domain.apply(lambda x: 1 if x=='@gmail.com' else 0).astype(object)\n\ndf_users",
"_____no_output_____"
],
[
"def prepDF(df, df_users):\n ceiling = {\n 'last_open_day': 1000,\n 'last_checkout_day':1500,\n 'last_login_day':20000\n }\n df['last_open_day'] = df['last_open_day'].apply(\n lambda x: ceiling['last_open_day'] if x=='Never open' else int(x))\n df['last_checkout_day'] = df['last_checkout_day'].apply(\n lambda x: ceiling['last_checkout_day'] if x=='Never checkout' else int(x))\n df['last_login_day'] = df['last_login_day'].apply(\n lambda x: ceiling['last_login_day'] if x=='Never login' else int(x))\n df['country_code'] = df['country_code'].astype(object)\n \n df_merged = df.merge(df_users, left_on='user_id', right_index=True)\n\n features = [\n 'country_code', 'subject_line_length',\n 'last_open_day', 'last_login_day', 'last_checkout_day',\n 'open_count_last_10_days', 'open_count_last_30_days',\n 'open_count_last_60_days', 'login_count_last_10_days',\n 'login_count_last_30_days', 'login_count_last_60_days',\n 'checkout_count_last_10_days', 'checkout_count_last_30_days',\n 'checkout_count_last_60_days', 'attr_2', 'attr_3', 'age'\n ]\n X = pd.get_dummies(df_merged[features], drop_first=True,\n columns=['country_code', 'attr_2', 'attr_3'])\n if 'open_flag' in df_merged.columns:\n y = df_merged['open_flag']\n else:\n y = None\n \n return X, y",
"_____no_output_____"
],
[
"X, y = prepDF(df_train, df_users)",
"_____no_output_____"
],
[
"X",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\n# from sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n# from sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn import metrics\nfrom sklearn.metrics import confusion_matrix, classification_report",
"_____no_output_____"
],
[
"from imblearn.over_sampling import SMOTE\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom imblearn.pipeline import Pipeline",
"Using TensorFlow backend.\n"
],
[
"# set seed for reproducibility\nseed = 0\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.25, random_state=seed, stratify=y)",
"_____no_output_____"
],
[
"class_weights = {0:1, y.value_counts()[0]/y.value_counts()[1]}",
"_____no_output_____"
],
[
"cl1 = LogisticRegression(max_iter=1000, class_weight=class_weights,\n solver='liblinear', random_state=seed)\ncl2 = LinearSVC(dual=False, max_iter=10000, random_state=seed, class_weight=class_weights)\ncl3 = DecisionTreeClassifier(max_features='log2', random_state=seed, class_weight=class_weights)\ncl4 = RandomForestClassifier(max_features='log2', random_state=seed, class_weight=class_weights)\ncl5 = GradientBoostingClassifier(max_features='log2', random_state=seed)\n\ncl_list = [cl1, cl2, cl3, cl4, cl5]",
"_____no_output_____"
],
[
"penalty = ['l1', 'l2']\nC = np.logspace(-5, 5, num=15)\nmax_depth = [3, 4, 5, 6, 7]\nn_estimators = [20, 40, 64, 128, 256]\nlearning_rate = [0.005, 0.01, 0.05, 0.1]\nsubsample_list = [0.3, 0.5, 0.7, 1]\nk = [5, 10, 15, 20]\n\n\nparams = {'decisiontreeclassifier__max_depth': n_estimators,\n 'randomforestclassifier__n_estimators': n_estimators,\n 'randomforestclassifier__max_depth': max_depth,\n 'gradientboostingclassifier__max_depth': max_depth,\n 'gradientboostingclassifier__n_estimators': n_estimators,\n 'gradientboostingclassifier__learning_rate': learning_rate,\n 'gradientbosstingclassifier__subsample_list': subsample_list,\n 'logisticregression__C': C,\n 'logisticregression__penalty': penalty,\n 'linearsvc__penalty': penalty,\n 'linearsvc__C': C,}\n\nparam_grid = []\nfor cl in cl_list:\n cl_params = {}#'sampler__k_neighbors':k}\n for p, val in params.items():\n name = p.split('__')[0]\n parameter = p.split('__')[1]\n\n if name == cl.__class__.__name__.lower():\n cl_params[f'model__{parameter}'] = val\n\n param_grid.append(cl_params)",
"_____no_output_____"
],
[
"def ml_tuner(X, y, estimators, params, scaler, sampler, scorer):\n models = {}\n for cl, p in zip(estimators, params):\n print(cl.__class__.__name__)\n est = Pipeline(\n [('sampler', sampler), ('scaler', scaler), ('model', cl)]\n )\n gs_cv = GridSearchCV(est, param_grid=p,\n n_jobs=-1, scoring=scorer,\n cv=10, verbose=1)\n gs_cv.fit(X, y) \n models[cl.__class__.__name__] = gs_cv\n return models",
"_____no_output_____"
],
[
"scorer = metrics.make_scorer(metrics.matthews_corrcoef)",
"_____no_output_____"
],
[
"tuned_models = ml_tuner(\n X_train, y_train, cl_list, param_grid,\n MinMaxScaler(), SMOTE(random_state=seed), scorer\n)",
"LogisticRegression\nFitting 10 folds for each of 30 candidates, totalling 300 fits\n"
],
[
"tuned_models.keys()",
"_____no_output_____"
],
[
"gs_scores = {}\n\ngs_scores['names'] = [name for name in tuned_models.keys()]\ngs_scores['CV_Score'] = [m.best_score_ for m in tuned_models.values()]\ngs_scores['TestScore'] = [m.score(X_test, y_test) for m in tuned_models.values()]\ngs_scores['MCC'] = [scorer(m, X_test, y_test) for m in tuned_models.values()]\n\ndf_scores = pd.DataFrame(gs_scores)\ndf_scores",
"_____no_output_____"
],
[
"X_holdout, _ = prepDF(df_test, df_users)",
"_____no_output_____"
],
[
"y_pred = tuned_models['GradientBoostingClassifier'].predict(X_holdout)",
"_____no_output_____"
],
[
"df_submission = pd.DataFrame({\n 'row_id':df_test['row_id'].values,\n 'open_flag':y_pred\n})\ndf_submission.to_csv('submission.csv', index=False)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecde46b19d5448eff9daba9ad983559e229bcda1 | 4,863 | ipynb | Jupyter Notebook | notebook/Analytic despace aberration.ipynb | jmeyers314/jtrace | 9149a5af766fb9a9cd7ebfe6f3f18de0eb8b2e89 | [
"BSD-2-Clause"
] | 13 | 2018-12-24T03:55:04.000Z | 2021-11-09T11:40:40.000Z | notebook/Analytic despace aberration.ipynb | bregeon/batoid | 7b03d9b59ff43db6746eadab7dd58a463a0415c3 | [
"BSD-2-Clause"
] | 65 | 2017-08-15T07:19:05.000Z | 2021-09-08T17:44:57.000Z | notebook/Analytic despace aberration.ipynb | bregeon/batoid | 7b03d9b59ff43db6746eadab7dd58a463a0415c3 | [
"BSD-2-Clause"
] | 10 | 2019-02-19T07:02:31.000Z | 2021-12-10T22:19:40.000Z | 29.834356 | 98 | 0.467407 | [
[
[
"import batoid\nimport galsim\nimport numpy as np\nimport matplotlib.pyplot as pl\nfrom ipywidgets import interact\nimport ipywidgets as widgets\n%matplotlib inline",
"_____no_output_____"
],
[
"def makeTelescope(L, F, dz):\n \"\"\"\n Parameters\n ----------\n L : float\n focal length in meters\n F : float\n F-number\n dz : float\n Focal plane offset \n \"\"\"\n R = 2*L # radius of curvature \n telescope = batoid.CompoundOptic(\n items = [\n batoid.Mirror(\n batoid.Paraboloid(R),\n name=\"Mirror\"\n ),\n batoid.Detector(\n batoid.Plane(),\n name=\"Detector\",\n coordSys=batoid.CoordSys(origin=[0,0,L+dz])\n )\n ]\n )\n telescope.backDist = 1.1*L\n telescope.pupilSize = L/F\n telescope.sphereRadius = L\n telescope.stopSurface = batoid.Interface(batoid.Plane())\n return telescope",
"_____no_output_____"
],
[
"def a4coef(L, F, dz, wavelength):\n R = L\n alpha = dz/R\n term1 = alpha*R\n term1 /= 4*np.sqrt(3)*(1-alpha)\n term1 /= (2*F)**2\n term2 = alpha*(1+alpha+alpha**2)*R\n term2 /= 16*np.sqrt(3)*(1-alpha)**3\n term2 /= (2*F)**4\n return (term1+term2)/wavelength\n\ndef a11coef(L, F, dz, wavelength):\n R = L\n alpha = dz/R\n result = alpha*(1+alpha*alpha**2)*R\n result /= 48*np.sqrt(5)*(1-alpha)**3\n result /= (2*F)**4\n return -result/wavelength",
"_____no_output_____"
],
[
"@interact(\n L = widgets.FloatSlider(min=1.0, max=20.0, step=0.1, value=10.0,\n description=\"L (m)\"),\n F = widgets.FloatSlider(min=1.0, max=10.0, step=0.05, value=3.0,\n description=\"F/#\"),\n dz = widgets.FloatSlider(min=-1000, max=1000, step=10, value=800.0,\n description=\"dz ($\\\\mu m$)\"),\n theta_x = widgets.FloatSlider(min=-1.75, max=1.75, step=0.05, value=0.0,\n description=\"$\\\\theta_x (deg)$\"),\n theta_y = widgets.FloatSlider(min=-1.75, max=1.75, step=0.05, value=0.0,\n description=\"$\\\\theta_y (deg)$\")\n)\ndef zernike(L, F, dz, theta_x, theta_y):\n telescope = makeTelescope(L, F, dz*1e-6)\n wavelength = 750e-9\n z = batoid.zernikeGQ(\n telescope, np.deg2rad(theta_x), np.deg2rad(theta_y), \n wavelength, jmax=22, rings=10\n )\n for i in range(1, len(z)//2+1):\n print(\"{:6d} {:6.3f} {:6d} {:6.3f}\".format(i, z[i], i+11, z[i+11])) \n\n print(\"a4 analytic: {:6.3f}\".format(a4coef(L, F, dz*1e-6, wavelength)))\n print(\"a11 analytic: {:6.3f}\".format(a11coef(L, F, dz*1e-6, wavelength))) ",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
ecde4bc98a75a72f0d3ec39c7202aa99b539b189 | 72,931 | ipynb | Jupyter Notebook | src/Notebook/Uniform conductance starting away from the soma - loop.ipynb | gusris/pnas-simulations | 285d2a6cc1843ca79cdf1b76352ad49776a6480a | [
"MIT"
] | null | null | null | src/Notebook/Uniform conductance starting away from the soma - loop.ipynb | gusris/pnas-simulations | 285d2a6cc1843ca79cdf1b76352ad49776a6480a | [
"MIT"
] | null | null | null | src/Notebook/Uniform conductance starting away from the soma - loop.ipynb | gusris/pnas-simulations | 285d2a6cc1843ca79cdf1b76352ad49776a6480a | [
"MIT"
] | 1 | 2021-06-18T20:34:52.000Z | 2021-06-18T20:34:52.000Z | 168.821759 | 24,614 | 0.881738 | [
[
[
"\"\"\"\nCylindrical axon model voltage clamped at on\n\n\"\"\"\n\n\n\n%matplotlib inline\n\nfrom brian2 import *\nimport pandas as pd\nimport gc\n\ndefaultclock.dt = 0.015*ms\n\n# Standard parameters\n# Passive parameters\nEL = -80*mV\nCm = 0.9*uF/cm**2\ngL = 1.*(siemens/meter**2)\nRi = 150*ohm*cm\n\n# Na channels\nENa = 60.*mV\nka = 6.*mV\nva = -30.*mV\n#gNa = 3000.*(siemens/meter**2) \ntaum = 0.05*ms\n\n# Morphology\nlength = 300.*um\ndiam = 1.5*um\n\n# Na channels\nAIS_length = 45.*um\nNa_start = 5.*um\n\n# Parameter ranges for loops\n#diams = [0.5,1.,1.5,2.,2.5,3.,3.5]*um\n#AIS_lengths = [30.,35.,40.,45.,50.,55.,60.]*um\n#Na_starts = [0.,5.,10.,15.,20.,25.,30.,35.]*um\ngNas = [1000.,2000.,3000.,4000.,5000.,6000.]*(siemens/meter**2) \n#gLs = [0.7,0.8,0.9,1.,1.1,1.2,1.3]*(siemens/meter**2)\n\nthresholds = []\nI_thres_preds = []\nI_thres_sims = []\nI_above_preds = []\nI_above_preds2 = []\nI_above_sims = []\n\nfor gNa in gNas:\n print Na_start\n Na_end = Na_start+AIS_length\n axon = Cylinder(diameter=diam, length=length, n=300)\n \n duration = 300*ms\n \n # Channels\n eqs='''\n Im = gL*(EL - v) + gclamp*(vc - v) + gNa*m*(ENa - v) : amp/meter**2\n dm/dt = (minf - m) / taum: 1 # simplified Na channel\n minf = 1 / (1 + exp((va - v) / ka)) : 1\n gclamp : siemens/meter**2\n gNa : siemens/meter**2\n vc = EL + 50*mV * t / duration : volt (shared) # Voltage clamp with a ramping voltage command\n '''\n\n neuron = SpatialNeuron(morphology=axon, model=eqs, Cm=Cm, Ri=Ri,\n method=\"exponential_euler\")\n \n # AIS definition\n initial_segment = axon[Na_start:Na_end]\n neuron.gNa[initial_segment] = gNa\n \n # Initialisation\n neuron.v = EL\n neuron.gclamp[0] = gL*5000000\n\n # Monitors\n mon = StateMonitor(neuron, ('v','m'), record=True)\n \n run(duration, report='text')\n\n # Theory\n gdensity = (4.*Ri/diam)*gNa # meter**-2\n ra = (4.*Ri)/(pi*diam**2) # ohm/cm\n supp = 1./(sqrt(gdensity)*tanh(sqrt(gdensity)*AIS_length)) # The non-physical distance term in V'(0)\n\n def dv_na_start(y):\n return (ENa-y)*(1./(Na_start+supp))\n def I_peak(y):\n return dv_na_start(y)/ra\n \n # Other formulation of the theory\n alpha = 1./(tanh(sqrt((4.*Ri*gNa)/diam)*AIS_length))\n cste1 = 0.5*sqrt(diam/(gNa*Ri))*alpha\n print alpha\n\n def I_peak_k(y):\n return ((ENa-y)*((pi*diam**2)/(4*Ri)))/(Na_start+cste1)\n \n dvdx = diff(mon.v,axis=0)/(1*um)\n vs_range1 = linspace(-75, -55, 21)*mV\n x = neuron.distance\n dt_per_volt = len(mon.t) / (50*mV)\n \n for vs in vs_range1:\n # Rough threshold\n thres_approx = [vs for vs in vs_range1 if mon.m[Na_end/um-1, int(dt_per_volt * (vs - EL))] >= 0.5][0]\n \n # Precise threshold\n dv_per_dt = (50.*mV)/(duration/defaultclock.dt)\n dv_in_1mV = (1.*mV)/dv_per_dt\n vs_range2 = linspace(thres_approx/mV-2., thres_approx/mV+2., 4*dv_in_1mV)*mV\n thres = [vs for vs in vs_range2 if mon.m[Na_end/um-1, int(dt_per_volt * (vs - EL))] >= 0.5][0]\n \n if Na_start==0*um:\n I_thres_sim = (dvdx[Na_start/um,int(dt_per_volt * (thres - EL))]/ra)/namp\n I_above_sim = (dvdx[Na_start/um,int(dt_per_volt * (thres+0.2*mV - EL))]/ra)/namp\n else:\n I_thres_sim = (dvdx[Na_start/um-1,int(dt_per_volt * (thres - EL))]/ra)/namp\n I_above_sim = (dvdx[Na_start/um-1,int(dt_per_volt * (thres+0.2*mV - EL))]/ra)/namp\n \n I_thres_pred = I_peak(thres)/namp\n I_above_pred = I_peak(thres+0.2*mV)/namp\n I_above_pred2 = I_peak_k(thres+0.2*mV)/namp\n \n figure(1)\n plot((dvdx[:,int(dt_per_volt * (thres+0.2*mV - EL))]/ra)/namp, label='%s' %AIS_length)\n xlabel('Location (um)')\n ylabel('Axonal current (nA)')\n legend(loc='best')\n xlim(0,80)\n \n thresholds.append(thres)\n I_thres_preds.append(I_thres_pred)\n I_thres_sims.append(I_thres_sim)\n I_above_preds.append(I_above_pred)\n I_above_preds2.append(I_above_pred2)\n I_above_sims.append(I_above_sim)\n \nshow()",
"5. um\n"
],
[
"I_above_sims",
"_____no_output_____"
],
[
"I_above_preds2",
"_____no_output_____"
]
],
[
[
"# Creating a DataFrame\n\nat_threshold = {'Parameter': Na_starts/um,\n 'Current (simulation) (nA)':I_above_sims, \n 'Current (prediction) (nA)':I_above_preds}\n\ndf_at_thres = pd.DataFrame(at_threshold)\n\n# Create a Pandas Excel writer using XlsxWriter as the engine.\nwriter = pd.ExcelWriter(\"/Users/sarahgoethals/Dropbox/Project-2_RBrette/Data Fig4/panelD2_AISonset.xlsx\", engine='xlsxwriter')\ndf_at_thres.to_excel(writer, sheet_name='AIS onset')\n\n# Close the Pandas Excel writer and output the Excel file.\nwriter.save()",
"_____no_output_____"
]
],
[
[
"from openpyxl import load_workbook\n\ndv_start = {'Parameter': gNas/(siemens/meter**2) ,\n 'Current (simulation) (nA)':I_above_sims, \n 'Current (prediction) (nA)':I_above_preds}\n\ndf_dv_start = pd.DataFrame(dv_start)\n\nbook = load_workbook(\"/Users/sarahgoethals/Dropbox/Project-2_RBrette/Data Fig4/panelC2_gNa.xlsx\")\nwriter = pd.ExcelWriter(\"/Users/sarahgoethals/Dropbox/Project-2_RBrette/Data Fig4/panelC2_gNa.xlsx\", engine='openpyxl') \nwriter.book = book\nwriter.sheets = dict((ws.title, ws) for ws in book.worksheets)\n\ndf_dv_start.to_excel(writer, \"Na conductance\")\n\nwriter.save()",
"_____no_output_____"
]
],
[
[
"Axonal peak current ",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nmatplotlib.rcParams['figure.figsize'] = (10, 4)\n\nplot(AIS_lengths/um , I_above_preds, 'bo-', label='prediction')\nplot(AIS_lengths/um, I_above_sims, 'go-', label='simulation')\nxlabel('AIS onset (um)', fontsize=14)\nylabel('Axonal current (nA)', fontsize=14)\nlegend(loc='best', fontsize=14)\nylim(0,15)\nshow()",
"_____no_output_____"
]
],
[
[
"Somatic voltage threshold for spike initiation",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nmatplotlib.rcParams['figure.figsize'] = (10, 4)\n\nplot(gNas/(siemens/meter**2) ,thresholds/mV, 'bo-')\nxlabel('AIS onset (um)', fontsize=14)\nylabel('Voltage threshold (mV)', fontsize=14)\n#ylim(-75,-60)\nshow()",
"_____no_output_____"
]
]
] | [
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecde56548168ff4d1e3f07274ba05d540450fb52 | 18,514 | ipynb | Jupyter Notebook | src/jupyter/python/q/pandas-to-q.ipynb | saarahrasheed/tsa | e4460f707eeecb737663c48d8fc3245f0acb124c | [
"Apache-2.0"
] | 117 | 2017-06-30T14:29:32.000Z | 2022-02-10T00:54:35.000Z | src/jupyter/python/q/pandas-to-q.ipynb | saarahrasheed/tsa | e4460f707eeecb737663c48d8fc3245f0acb124c | [
"Apache-2.0"
] | 2 | 2017-09-01T11:42:14.000Z | 2017-11-29T20:00:19.000Z | src/jupyter/python/q/pandas-to-q.ipynb | saarahrasheed/tsa | e4460f707eeecb737663c48d8fc3245f0acb124c | [
"Apache-2.0"
] | 37 | 2017-07-05T19:51:10.000Z | 2021-04-27T00:11:18.000Z | 27.674141 | 253 | 0.396079 | [
[
[
"import os, sys\nsys.path.append(os.path.abspath('../../../main/python'))\nimport thalesians.tsa.q.qutils as qutils",
"_____no_output_____"
],
[
"import datetime as dt\nimport pandas as pd",
"_____no_output_____"
],
[
"from qpython import qconnection",
"_____no_output_____"
],
[
"df = pd.DataFrame({\n 'sym': ['foo', None, 'baz', 'qux', 'quux', 'quuz', 'corge'],\n 'tally': [3, 7, 5, 3, 8, 10, 12],\n 'price': [3.57, 10.87, 10.28, 3.22, 18.15, 29.29, 10.09],\n 'date': [\n dt.date(2019, 5, 17), dt.date(2019, 5, 17), dt.date(2019, 5, 17),\n dt.date(2019, 5, 17), dt.date(2019, 5, 17), dt.date(2019, 5, 17),\n dt.date(2019, 5, 17)],\n 'time': [\n dt.time(21, 43, 54, 357000), dt.time(21, 43, 54, 357000), dt.time(21, 43, 54, 357000),\n dt.time(21, 43, 54, 357000), dt.time(21, 43, 54, 357000), dt.time(21, 43, 54, 357000),\n dt.time(21, 43, 54, 357000)],\n 'datetime': [\n dt.datetime(2019, 5, 17, 21, 43, 54, 357000), dt.datetime(2019, 5, 17, 21, 43, 54, 357000),\n dt.datetime(2019, 5, 17, 21, 43, 54, 357000), dt.datetime(2019, 5, 17, 21, 43, 54, 357000),\n dt.datetime(2019, 5, 17, 21, 43, 54, 357000), dt.datetime(2019, 5, 17, 21, 43, 54, 357000),\n dt.datetime(2019, 5, 17, 21, 43, 54, 357000)]\n })",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df.dtypes",
"_____no_output_____"
],
[
"q_code = []",
"_____no_output_____"
],
[
"q_code.append(qutils.df_to_q_table_schema(df, 'sample'))",
"_____no_output_____"
],
[
"q_code.extend(qutils.df_to_upsert_statements(df, 'sample'))",
"_____no_output_____"
],
[
"q = qconnection.QConnection(host='localhost', port=2507, pandas=True)\nq.open()",
"_____no_output_____"
],
[
"for q_line in q_code:\n q(q_line)",
"_____no_output_____"
],
[
"q('select from sample')",
"_____no_output_____"
],
[
"batches = qutils.df_to_batch_append_statements(df, 'sample', rows_per_batch=3)",
"_____no_output_____"
],
[
"len(batches)",
"_____no_output_____"
],
[
"batches[0]",
"_____no_output_____"
],
[
"batches[1]",
"_____no_output_____"
],
[
"batches[2]",
"_____no_output_____"
],
[
"q(qutils.df_to_q_table_schema(df, 'sample'))",
"_____no_output_____"
],
[
"for batch in batches:\n q(batch)",
"_____no_output_____"
],
[
"q('select from sample')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecde5cef3eb04ab91db3409440f8bad1d19a5cc2 | 60,484 | ipynb | Jupyter Notebook | data-handling/Graphs.ipynb | JVBravoo/Learning-Machine-Learning | 1ee58c1c74661450132f2af6a26066980b5f9cf2 | [
"MIT"
] | 1 | 2020-08-05T20:51:02.000Z | 2020-08-05T20:51:02.000Z | data-handling/Graphs.ipynb | JVBravoo/Learning-Machine-Learning | 1ee58c1c74661450132f2af6a26066980b5f9cf2 | [
"MIT"
] | 1 | 2020-08-04T19:27:22.000Z | 2020-08-04T19:27:22.000Z | data-handling/Graphs.ipynb | JVBravoo/Learning-Machine-Learning | 1ee58c1c74661450132f2af6a26066980b5f9cf2 | [
"MIT"
] | null | null | null | 122.189899 | 13,992 | 0.831426 | [
[
[
"pip install pandas",
"Requirement already satisfied: pandas in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (1.0.1)\nRequirement already satisfied: python-dateutil>=2.6.1 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from pandas) (2.8.1)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from pandas) (2019.3)\nRequirement already satisfied: numpy>=1.13.3 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from pandas) (1.18.1)\nRequirement already satisfied: six>=1.5 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from python-dateutil>=2.6.1->pandas) (1.14.0)\nNote: you may need to restart the kernel to use updated packages.\n"
],
[
"pip install seaborn",
"Requirement already satisfied: seaborn in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (0.10.0)\nRequirement already satisfied: pandas>=0.22.0 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from seaborn) (1.0.1)\nRequirement already satisfied: scipy>=1.0.1 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from seaborn) (1.4.1)\nRequirement already satisfied: numpy>=1.13.3 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from seaborn) (1.18.1)\nRequirement already satisfied: matplotlib>=2.1.2 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from seaborn) (3.2.0)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from pandas>=0.22.0->seaborn) (2019.3)\nRequirement already satisfied: python-dateutil>=2.6.1 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from pandas>=0.22.0->seaborn) (2.8.1)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from matplotlib>=2.1.2->seaborn) (2.4.6)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from matplotlib>=2.1.2->seaborn) (1.1.0)\nRequirement already satisfied: cycler>=0.10 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from matplotlib>=2.1.2->seaborn) (0.10.0)\nRequirement already satisfied: six>=1.5 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from python-dateutil>=2.6.1->pandas>=0.22.0->seaborn) (1.14.0)\nRequirement already satisfied: setuptools in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from kiwisolver>=1.0.1->matplotlib>=2.1.2->seaborn) (46.0.0)\nNote: you may need to restart the kernel to use updated packages.\n"
],
[
"pip install matplotlib",
"Requirement already satisfied: matplotlib in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (3.2.0)\nRequirement already satisfied: numpy>=1.11 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from matplotlib) (1.18.1)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from matplotlib) (1.1.0)\nRequirement already satisfied: cycler>=0.10 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from matplotlib) (0.10.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from matplotlib) (2.4.6)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from matplotlib) (2.8.1)\nRequirement already satisfied: setuptools in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from kiwisolver>=1.0.1->matplotlib) (46.0.0)\nRequirement already satisfied: six in /usr/local/Cellar/jupyterlab/2.0.1/libexec/lib/python3.7/site-packages (from cycler>=0.10->matplotlib) (1.14.0)\nNote: you may need to restart the kernel to use updated packages.\n"
],
[
"import pandas as pd\nimport os\nimport seaborn as sns\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"df = pd.read_csv('nova_base.csv',delimiter='\\t',engine='python')\ndf",
"_____no_output_____"
],
[
"# Q = Queenstown = 0\n# C = Cherbourg = 0.5\n# S = Southampton = 1\nsns.set_style('darkgrid')\nsns.countplot(x = 'Survived', hue = 'Embarked', data = df, palette = 'RdBu_r')",
"_____no_output_____"
],
[
"diagrama_idade = df[df['Estimated_Age'].notnull()]\nn, bins, patches = plt.hist(x = diagrama_idade['Estimated_Age'], bins = 'auto', color = '#0504aa', alpha = 0.7, rwidth = 0.85)\n\nplt.grid(axis = 'y', alpha = 0.75)\nfreq = n.max()",
"_____no_output_____"
],
[
"# Sobreviventes homens\n\nsobreviventes_H = df[df['Sex'] == 0.0]['Survived']\n# sobreviventes_M = df[df['Sex'] == 'female']['Survived']\n\nlabel = ['Não', 'Sim']\nlocations = [0, 1]\nheights = sobreviventes_H.value_counts()\nqntd = sobreviventes_H.value_counts()\nplt.bar(locations, heights, tick_label = label)",
"_____no_output_____"
],
[
"# Sobreviventes Mulheres\n\nsobreviventes_M = df[df['Sex'] == 1.0]['Survived']\n\nlabel = ['Não', 'Sim']\nlocations = [1, 2]\nheights = sobreviventes_M.value_counts()\nqntd = sobreviventes_M.value_counts()\nplt.bar(locations, heights, tick_label = label)",
"_____no_output_____"
],
[
"# Sobreviventes por classe\n\ndf_class = pd.DataFrame({'Total': df['Pclass'], 'Não sobreviveram' : df[df['Survived'] == 0 ]['Pclass'], \n 'Sobreviveram' : df[df['Survived'] == 1]['Pclass']},\n columns = ['Total', 'Não sobreviveram', 'Sobreviveram'])\nplt.figure()\ndf_class.plot.hist(bins = 10, alpha = 0.5, figsize = (10, 5))\nplt.show()",
"_____no_output_____"
]
],
[
[
"Baseado nisso, da pra ver que a 3º classe teve muito mais pessoas que não sobreviveram, em comparação a 1ª e 2ª classe.",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
ecde64dd29c2c9d11b9a6f5750e284ddb74f00a4 | 95,741 | ipynb | Jupyter Notebook | examples/Recreating the 1ZoneUncontrolled example file using EPEpJSON/Recreating the 1ZoneUncontrolled example file using EPEpJSON.ipynb | stevenkfirth/eprun | 2a580f8ac0b5976cb1bc84328ffb821bd31731e6 | [
"MIT"
] | 5 | 2021-05-22T19:13:13.000Z | 2022-03-07T04:54:08.000Z | examples/Recreating the 1ZoneUncontrolled example file using EPEpJSON/Recreating the 1ZoneUncontrolled example file using EPEpJSON.ipynb | stevenkfirth/eprun | 2a580f8ac0b5976cb1bc84328ffb821bd31731e6 | [
"MIT"
] | null | null | null | examples/Recreating the 1ZoneUncontrolled example file using EPEpJSON/Recreating the 1ZoneUncontrolled example file using EPEpJSON.ipynb | stevenkfirth/eprun | 2a580f8ac0b5976cb1bc84328ffb821bd31731e6 | [
"MIT"
] | null | null | null | 42.70339 | 425 | 0.361078 | [
[
[
"# Recreating the 1ZoneUncontrolled example file using EPEpJSON\n\nThis example uses the `EPEpJSON` class from the `eprun` package to recreate the '1ZoneUncontrolled.epJSON' example file.\n",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
],
[
"### Import packages",
"_____no_output_____"
]
],
[
[
"from eprun import EPSchema, EPEpJSON",
"_____no_output_____"
]
],
[
[
"### Loads the epJSON schema",
"_____no_output_____"
]
],
[
[
"s=EPSchema(r'input_files/Energy+.schema.epJSON')\ns",
"_____no_output_____"
]
],
[
[
"### Creates a new blank EPEpJSON instance",
"_____no_output_____"
]
],
[
[
"j=EPEpJSON(schema=s)\nj",
"_____no_output_____"
]
],
[
[
"## Adding the EnergyPlus input objects to the EPEpJSON instance",
"_____no_output_____"
],
[
"### Simulation parameters",
"_____no_output_____"
],
[
"#### Version",
"_____no_output_____"
]
],
[
[
"j['Version'].add_object('Version 1',\n version_identifier = '9.4',\n )",
"_____no_output_____"
]
],
[
[
"#### Timestep",
"_____no_output_____"
]
],
[
[
"j['Timestep'].add_object('Timestep 1',\n number_of_timesteps_per_hour = 4,\n )",
"_____no_output_____"
]
],
[
[
"#### Building",
"_____no_output_____"
]
],
[
[
"j['Building'].add_object('Simple One Zone (Wireframe DXF)',\n loads_convergence_tolerance_value = 0.04,\n maximum_number_of_warmup_days = 30,\n minimum_number_of_warmup_days = 6,\n north_axis = 0,\n solar_distribution = 'MinimalShadowing',\n temperature_convergence_tolerance_value = 0.004,\n terrain = 'Suburbs',\n )",
"_____no_output_____"
]
],
[
[
"#### SurfaceConvectionAlgorithm:Inside",
"_____no_output_____"
]
],
[
[
"j['SurfaceConvectionAlgorithm:Inside'].add_object('SurfaceConvectionAlgorithm:Inside 1',\n algorithm = 'TARP',\n )",
"_____no_output_____"
]
],
[
[
"#### SurfaceConvectionAlgorithm:Outside",
"_____no_output_____"
]
],
[
[
"j['SurfaceConvectionAlgorithm:Outside'].add_object('SurfaceConvectionAlgorithm:Outside 1',\n algorithm = 'DOE-2',\n )",
"_____no_output_____"
]
],
[
[
"#### HeatBalanceAlgorithm",
"_____no_output_____"
]
],
[
[
"j['HeatBalanceAlgorithm'].add_object('HeatBalanceAlgorithm 1',\n algorithm = 'ConductionTransferFunction',\n )",
"_____no_output_____"
]
],
[
[
"#### SimulationControl",
"_____no_output_____"
]
],
[
[
"j['SimulationControl'].add_object('SimulationControl 1',\n do_hvac_sizing_simulation_for_sizing_periods = 'No',\n do_plant_sizing_calculation = 'No',\n do_system_sizing_calculation = 'No',\n do_zone_sizing_calculation = 'No',\n maximum_number_of_hvac_sizing_simulation_passes = 1,\n run_simulation_for_sizing_periods = 'Yes',\n run_simulation_for_weather_file_run_periods = 'Yes',\n )",
"_____no_output_____"
]
],
[
[
"### Location – Climate – Weather File Access",
"_____no_output_____"
],
[
"#### Site:Location",
"_____no_output_____"
]
],
[
[
"j['Site:Location'].add_object('Denver Centennial Golden N_CO_USA Design_Conditions',\n elevation = 1829.0,\n latitude = 39.74,\n longitude = -105.18,\n time_zone = -7.0,\n )",
"_____no_output_____"
]
],
[
[
"#### SizingPeriod:DesignDay",
"_____no_output_____"
]
],
[
[
"j['SizingPeriod:DesignDay'].add_object('Denver Centennial Golden N Ann Clg 1% Condns DB=>MWB',\n barometric_pressure = 81198.0,\n daily_dry_bulb_temperature_range = 15.2,\n day_of_month = 21,\n day_type = 'SummerDesignDay',\n daylight_saving_time_indicator = 'No',\n humidity_condition_type = 'WetBulb',\n maximum_dry_bulb_temperature = 32,\n month = 7,\n rain_indicator = 'No',\n sky_clearness = 1.0,\n snow_indicator = 'No',\n solar_model_indicator = 'ASHRAEClearSky',\n wetbulb_or_dewpoint_at_maximum_dry_bulb = 15.5,\n wind_direction = 0,\n wind_speed = 4.9,\n )\nj['SizingPeriod:DesignDay'].add_object('Denver Centennial Golden N Ann Htg 99% Condns DB',\n barometric_pressure = 81198.0,\n daily_dry_bulb_temperature_range = 0.0,\n day_of_month = 21,\n day_type = 'WinterDesignDay',\n daylight_saving_time_indicator = 'No',\n humidity_condition_type = 'WetBulb',\n maximum_dry_bulb_temperature = -15.5,\n month = 12,\n rain_indicator = 'No',\n sky_clearness = 0.0,\n snow_indicator = 'No',\n solar_model_indicator = 'ASHRAEClearSky',\n wetbulb_or_dewpoint_at_maximum_dry_bulb = -15.5,\n wind_direction = 340,\n wind_speed = 3,\n )",
"_____no_output_____"
]
],
[
[
"#### RunPeriod",
"_____no_output_____"
]
],
[
[
"j['RunPeriod'].add_object('Run Period 1',\n apply_weekend_holiday_rule = 'No',\n begin_day_of_month = 1,\n begin_month = 1,\n day_of_week_for_start_day = 'Tuesday',\n end_day_of_month = 31,\n end_month = 12,\n use_weather_file_daylight_saving_period = 'Yes',\n use_weather_file_holidays_and_special_days = 'Yes',\n use_weather_file_rain_indicators = 'Yes',\n use_weather_file_snow_indicators = 'Yes',\n )",
"_____no_output_____"
]
],
[
[
"### Schedules",
"_____no_output_____"
],
[
"#### ScheduleTypeLimits",
"_____no_output_____"
]
],
[
[
"j['ScheduleTypeLimits'].add_object('Fraction',\n lower_limit_value = 0.0,\n numeric_type = 'Continuous',\n upper_limit_value = 1.0,\n )\nj['ScheduleTypeLimits'].add_object('On/Off',\n lower_limit_value = 0,\n numeric_type = 'Discrete',\n upper_limit_value = 1,\n )\n",
"_____no_output_____"
]
],
[
[
"#### Schedule:Constant",
"_____no_output_____"
]
],
[
[
"j['Schedule:Constant'].add_object('AlwaysOn',\n hourly_value = 1.0,\n schedule_type_limits_name = 'On/Off',\n )",
"_____no_output_____"
]
],
[
[
"### Surface Construction Elements",
"_____no_output_____"
],
[
"#### Material",
"_____no_output_____"
]
],
[
[
"j['Material'].add_object('C5 - 4 IN HW CONCRETE',\n conductivity = 1.729577,\n density = 2242.585,\n roughness = 'MediumRough',\n solar_absorptance = 0.65,\n specific_heat = 836.8,\n thermal_absorptance = 0.9,\n thickness = 0.1014984,\n visible_absorptance = 0.65,\n )\n",
"_____no_output_____"
]
],
[
[
"#### Material:NoMass",
"_____no_output_____"
]
],
[
[
"j['Material:NoMass'].add_object('R13LAYER',\n roughness = 'Rough',\n solar_absorptance = 0.75,\n thermal_absorptance = 0.9,\n thermal_resistance = 2.290965,\n visible_absorptance = 0.75,\n )\nj['Material:NoMass'].add_object('R31LAYER',\n roughness = 'Rough',\n solar_absorptance = 0.75,\n thermal_absorptance = 0.9,\n thermal_resistance = 5.456,\n visible_absorptance = 0.75,\n )",
"_____no_output_____"
]
],
[
[
"#### Construction",
"_____no_output_____"
]
],
[
[
"j['Construction'].add_object('FLOOR',\n outside_layer = 'C5 - 4 IN HW CONCRETE',\n )\nj['Construction'].add_object('R13WALL',\n outside_layer = 'R13LAYER',\n )\nj['Construction'].add_object('ROOF31',\n outside_layer = 'R31LAYER',\n )",
"_____no_output_____"
]
],
[
[
"### Thermal Zone Description/Geometry",
"_____no_output_____"
],
[
"#### Zone",
"_____no_output_____"
]
],
[
[
"j['Zone'].add_object('ZONE ONE',\n ceiling_height = 'Autocalculate',\n direction_of_relative_north = 0,\n multiplier = 1,\n type = 1,\n volume = 'Autocalculate',\n x_origin = 0,\n y_origin = 0,\n z_origin = 0,\n )",
"_____no_output_____"
]
],
[
[
"#### GlobalGeometryRules",
"_____no_output_____"
]
],
[
[
"j['GlobalGeometryRules'].add_object('GlobalGeometryRules 1',\n coordinate_system = 'World',\n starting_vertex_position = 'UpperLeftCorner',\n vertex_entry_direction = 'Counterclockwise',\n )",
"_____no_output_____"
]
],
[
[
"#### BuildingSurface:Detailed",
"_____no_output_____"
]
],
[
[
"j['BuildingSurface:Detailed'].add_object('Zn001:Flr001',\n construction_name = 'FLOOR',\n number_of_vertices = 4,\n outside_boundary_condition = 'Adiabatic',\n sun_exposure = 'NoSun',\n surface_type = 'Floor',\n vertices = [{'vertex_x_coordinate': 15.24, \n 'vertex_y_coordinate': 0.0, \n 'vertex_z_coordinate': 0.0}, \n {'vertex_x_coordinate': 0.0, \n 'vertex_y_coordinate': 0.0, \n 'vertex_z_coordinate': 0.0}, \n {'vertex_x_coordinate': 0.0, \n 'vertex_y_coordinate': 15.24, \n 'vertex_z_coordinate': 0.0}, \n {'vertex_x_coordinate': 15.24, \n 'vertex_y_coordinate': 15.24, \n 'vertex_z_coordinate': 0.0}],\n view_factor_to_ground = 1.0,\n wind_exposure = 'NoWind',\n zone_name = 'ZONE ONE',\n )\nj['BuildingSurface:Detailed'].add_object('Zn001:Roof001',\n construction_name = 'ROOF31',\n number_of_vertices = 4,\n outside_boundary_condition = 'Outdoors',\n sun_exposure = 'SunExposed',\n surface_type = 'Roof',\n vertices = [{'vertex_x_coordinate': 0.0, \n 'vertex_y_coordinate': 15.24, \n 'vertex_z_coordinate': 4.572}, \n {'vertex_x_coordinate': 0.0, \n 'vertex_y_coordinate': 0.0, \n 'vertex_z_coordinate': 4.572}, \n {'vertex_x_coordinate': 15.24, \n 'vertex_y_coordinate': 0.0, \n 'vertex_z_coordinate': 4.572}, \n {'vertex_x_coordinate': 15.24, \n 'vertex_y_coordinate': 15.24, \n 'vertex_z_coordinate': 4.572}],\n view_factor_to_ground = 0,\n wind_exposure = 'WindExposed',\n zone_name = 'ZONE ONE',\n )\nj['BuildingSurface:Detailed'].add_object('Zn001:Wall001',\n construction_name = 'R13WALL',\n number_of_vertices = 4,\n outside_boundary_condition = 'Outdoors',\n sun_exposure = 'SunExposed',\n surface_type = 'Wall',\n vertices = [{'vertex_x_coordinate': 0, \n 'vertex_y_coordinate': 0, \n 'vertex_z_coordinate': 4.572}, \n {'vertex_x_coordinate': 0, \n 'vertex_y_coordinate': 0, \n 'vertex_z_coordinate': 0}, \n {'vertex_x_coordinate': 15.24, \n 'vertex_y_coordinate': 0, \n 'vertex_z_coordinate': 0}, \n {'vertex_x_coordinate': 15.24, \n 'vertex_y_coordinate': 0, \n 'vertex_z_coordinate': 4.572}],\n view_factor_to_ground = 0.5,\n wind_exposure = 'WindExposed',\n zone_name = 'ZONE ONE',\n )\nj['BuildingSurface:Detailed'].add_object('Zn001:Wall002',\n construction_name = 'R13WALL',\n number_of_vertices = 4,\n outside_boundary_condition = 'Outdoors',\n sun_exposure = 'SunExposed',\n surface_type = 'Wall',\n vertices = [{'vertex_x_coordinate': 15.24, \n 'vertex_y_coordinate': 0, \n 'vertex_z_coordinate': 4.572}, \n {'vertex_x_coordinate': 15.24, \n 'vertex_y_coordinate': 0, \n 'vertex_z_coordinate': 0}, \n {'vertex_x_coordinate': 15.24, \n 'vertex_y_coordinate': 15.24, \n 'vertex_z_coordinate': 0}, \n {'vertex_x_coordinate': 15.24, \n 'vertex_y_coordinate': 15.24, \n 'vertex_z_coordinate': 4.572}],\n view_factor_to_ground = 0.5,\n wind_exposure = 'WindExposed',\n zone_name = 'ZONE ONE',\n )\nj['BuildingSurface:Detailed'].add_object('Zn001:Wall003',\n construction_name = 'R13WALL',\n number_of_vertices = 4,\n outside_boundary_condition = 'Outdoors',\n sun_exposure = 'SunExposed',\n surface_type = 'Wall',\n vertices = [{'vertex_x_coordinate': 15.24, \n 'vertex_y_coordinate': 15.24, \n 'vertex_z_coordinate': 4.572}, \n {'vertex_x_coordinate': 15.24, \n 'vertex_y_coordinate': 15.24, \n 'vertex_z_coordinate': 0}, \n {'vertex_x_coordinate': 0, \n 'vertex_y_coordinate': 15.24, \n 'vertex_z_coordinate': 0}, \n {'vertex_x_coordinate': 0, \n 'vertex_y_coordinate': 15.24, \n 'vertex_z_coordinate': 4.572}],\n view_factor_to_ground = 0.5,\n wind_exposure = 'WindExposed',\n zone_name = 'ZONE ONE',\n )\nj['BuildingSurface:Detailed'].add_object('Zn001:Wall004',\n construction_name = 'R13WALL',\n number_of_vertices = 4,\n outside_boundary_condition = 'Outdoors',\n sun_exposure = 'SunExposed',\n surface_type = 'Wall',\n vertices = [{'vertex_x_coordinate': 0, \n 'vertex_y_coordinate': 15.24, \n 'vertex_z_coordinate': 4.572}, \n {'vertex_x_coordinate': 0, \n 'vertex_y_coordinate': 15.24, \n 'vertex_z_coordinate': 0}, \n {'vertex_x_coordinate': 0, \n 'vertex_y_coordinate': 0, \n 'vertex_z_coordinate': 0}, \n {'vertex_x_coordinate': 0, \n 'vertex_y_coordinate': 0, \n 'vertex_z_coordinate': 4.572}],\n view_factor_to_ground = 0.5,\n wind_exposure = 'WindExposed',\n zone_name = 'ZONE ONE',\n )",
"_____no_output_____"
]
],
[
[
"### Internal Gains",
"_____no_output_____"
],
[
"#### OtherEquipment",
"_____no_output_____"
]
],
[
[
"j['OtherEquipment'].add_object('Test 352 minus',\n design_level = -352,\n design_level_calculation_method = 'EquipmentLevel',\n fraction_latent = 0,\n fraction_lost = 0,\n fraction_radiant = 0,\n fuel_type = 'None',\n schedule_name = 'AlwaysOn',\n zone_or_zonelist_name = 'ZONE ONE',\n )\nj['OtherEquipment'].add_object('Test 352a',\n design_level = 352,\n design_level_calculation_method = 'EquipmentLevel',\n fraction_latent = 0,\n fraction_lost = 0,\n fraction_radiant = 0,\n fuel_type = 'None',\n schedule_name = 'AlwaysOn',\n zone_or_zonelist_name = 'ZONE ONE',\n )",
"_____no_output_____"
]
],
[
[
"### Exterior Energy Use Equipment",
"_____no_output_____"
],
[
"#### Exterior:Lights",
"_____no_output_____"
]
],
[
[
"j['Exterior:Lights'].add_object('ExtLights',\n control_option = 'AstronomicalClock',\n design_level = 5250,\n end_use_subcategory = 'Grounds Lights',\n schedule_name = 'AlwaysOn',\n )",
"_____no_output_____"
]
],
[
[
"### Input for Output - Reports",
"_____no_output_____"
],
[
"#### Output:VariableDictionary",
"_____no_output_____"
]
],
[
[
"j['Output:VariableDictionary'].add_object('Output:VariableDictionary 1',\n key_field = 'IDF',\n )",
"_____no_output_____"
]
],
[
[
"#### Output:Constructions",
"_____no_output_____"
]
],
[
[
"j['Output:Constructions'].add_object('Output:Constructions 1',\n details_type_1 = 'Constructions',\n )",
"_____no_output_____"
]
],
[
[
"#### Output:Surfaces:Drawing",
"_____no_output_____"
]
],
[
[
"j['Output:Surfaces:Drawing'].add_object('Output:Surfaces:Drawing 1',\n report_type = 'DXF:WireFrame',\n )",
"_____no_output_____"
]
],
[
[
"#### Output:Variable",
"_____no_output_____"
]
],
[
[
"j['Output:Variable'].add_object('Output:Variable 1',\n key_value = '*',\n reporting_frequency = 'Hourly',\n variable_name = 'Site Outdoor Air Drybulb Temperature',\n )\nj['Output:Variable'].add_object('Output:Variable 2',\n key_value = '*',\n reporting_frequency = 'Daily',\n variable_name = 'Site Daylight Saving Time Status',\n )\nj['Output:Variable'].add_object('Output:Variable 3',\n key_value = '*',\n reporting_frequency = 'Daily',\n variable_name = 'Site Day Type Index',\n )\nj['Output:Variable'].add_object('Output:Variable 4',\n key_value = '*',\n reporting_frequency = 'Hourly',\n variable_name = 'Zone Mean Air Temperature',\n )\nj['Output:Variable'].add_object('Output:Variable 5',\n key_value = '*',\n reporting_frequency = 'Hourly',\n variable_name = 'Zone Total Internal Latent Gain Energy',\n )\nj['Output:Variable'].add_object('Output:Variable 6',\n key_value = '*',\n reporting_frequency = 'Hourly',\n variable_name = 'Zone Mean Radiant Temperature',\n )\nj['Output:Variable'].add_object('Output:Variable 7',\n key_value = '*',\n reporting_frequency = 'Hourly',\n variable_name = 'Zone Air Heat Balance Surface Convection Rate',\n )\nj['Output:Variable'].add_object('Output:Variable 8',\n key_value = '*',\n reporting_frequency = 'Hourly',\n variable_name = 'Zone Air Heat Balance Air Energy Storage Rate',\n )\nj['Output:Variable'].add_object('Output:Variable 9',\n key_value = '*',\n reporting_frequency = 'Daily',\n variable_name = 'Surface Inside Face Temperature',\n )\nj['Output:Variable'].add_object('Output:Variable 10',\n key_value = '*',\n reporting_frequency = 'Daily',\n variable_name = 'Surface Outside Face Temperature',\n )\nj['Output:Variable'].add_object('Output:Variable 11',\n key_value = '*',\n reporting_frequency = 'Daily',\n variable_name = 'Surface Inside Face Convection Heat Transfer Coefficient',\n )\nj['Output:Variable'].add_object('Output:Variable 12',\n key_value = '*',\n reporting_frequency = 'Daily',\n variable_name = 'Surface Outside Face Convection Heat Transfer Coefficient',\n )\nj['Output:Variable'].add_object('Output:Variable 13',\n key_value = '*',\n reporting_frequency = 'Monthly',\n variable_name = 'Other Equipment Total Heating Energy',\n )\nj['Output:Variable'].add_object('Output:Variable 14',\n key_value = '*',\n reporting_frequency = 'Monthly',\n variable_name = 'Zone Other Equipment Total Heating Energy',\n )",
"_____no_output_____"
]
],
[
[
"#### Output:Meter:MeterFileOnly",
"_____no_output_____"
]
],
[
[
"j['Output:Meter:MeterFileOnly'].add_object('Output:Meter:MeterFileOnly 1',\n key_name = 'ExteriorLights:Electricity',\n reporting_frequency = 'Hourly',\n )\nj['Output:Meter:MeterFileOnly'].add_object('Output:Meter:MeterFileOnly 2',\n key_name = 'EnergyTransfer:Building',\n reporting_frequency = 'Hourly',\n )\nj['Output:Meter:MeterFileOnly'].add_object('Output:Meter:MeterFileOnly 3',\n key_name = 'EnergyTransfer:Facility',\n reporting_frequency = 'Hourly',\n )",
"_____no_output_____"
]
],
[
[
"#### OutputControl:Table:Style",
"_____no_output_____"
]
],
[
[
"j['OutputControl:Table:Style'].add_object('OutputControl:Table:Style 1',\n column_separator = 'All',\n )",
"_____no_output_____"
]
],
[
[
"### Standard Output Reports",
"_____no_output_____"
],
[
"#### Output:Table:SummaryReports",
"_____no_output_____"
]
],
[
[
"j['Output:Table:SummaryReports'].add_object('Output:Table:SummaryReports 1',\n reports = [{'report_name': 'AllSummary'}],\n )",
"_____no_output_____"
]
],
[
[
"## Save the final EpEpJSON instance as a .epJSON file",
"_____no_output_____"
]
],
[
[
"j.write(r'1ZoneUncontrolled_recreated.epJSON')",
"_____no_output_____"
]
],
[
[
"## Running a simulation and viewing the results",
"_____no_output_____"
],
[
"### Run EnergyPlus simulation",
"_____no_output_____"
]
],
[
[
"from eprun import eprun\nepresult=eprun(ep_dir='C:\\EnergyPlusV9-4-0',\n input_filepath=r'1ZoneUncontrolled_recreated.epJSON',\n epw_filepath=r'input_files\\USA_CO_Golden-NREL.724666_TMY3.epw',\n sim_dir='simulation_files')\nprint(type(epresult))",
"<class 'eprun.epresult.EPResult'>\n"
],
[
"epresult.returncode",
"_____no_output_____"
],
[
"epresult.get_end().line",
"_____no_output_____"
]
],
[
[
"### View .eso results",
"_____no_output_____"
]
],
[
[
"eso=epresult.get_eso()\nenvs=eso.get_environments()\nenvs",
"_____no_output_____"
],
[
"envs[0].get_interval_dataframe()",
"_____no_output_____"
]
],
[
[
"## APPENDIX\n\nTo create this notebook, the original '1ZoneUncontrolled.epJSON' file was read using the script below.This prints the code which can be used by the EPEpJSON class to recreate the file. From the printout below, the code was then copied into the cells in this notebook.",
"_____no_output_____"
]
],
[
[
"# loads a .epJSON file and print the equivalent code\nj=EPEpJSON(r'input_files\\1ZoneUncontrolled.epJSON')\nfor k,v in j._dict.items():\n for k1,v1 in v.items():\n print(\"j['%s'].add_object('%s',\" % (k,k1))\n for k2,v2 in v1.items():\n print(\"%s%s = %s%s%s,\" % (\" \" * (len(\"j['%s'].add_object(\" % k) + 1),\n k2,\n \"'\" if isinstance(v2,str) else \"\",\n v2,\n \"'\" if isinstance(v2,str) else \"\"))\n print(\"%s)\" % (\" \" * (len(\"j['%s'].add_object(\" % k) + 1)))",
"j['Building'].add_object('Simple One Zone (Wireframe DXF)',\n loads_convergence_tolerance_value = 0.04,\n maximum_number_of_warmup_days = 30,\n minimum_number_of_warmup_days = 6,\n north_axis = 0,\n solar_distribution = 'MinimalShadowing',\n temperature_convergence_tolerance_value = 0.004,\n terrain = 'Suburbs',\n )\nj['BuildingSurface:Detailed'].add_object('Zn001:Flr001',\n construction_name = 'FLOOR',\n number_of_vertices = 4,\n outside_boundary_condition = 'Adiabatic',\n sun_exposure = 'NoSun',\n surface_type = 'Floor',\n vertices = [{'vertex_x_coordinate': 15.24, 'vertex_y_coordinate': 0.0, 'vertex_z_coordinate': 0.0}, {'vertex_x_coordinate': 0.0, 'vertex_y_coordinate': 0.0, 'vertex_z_coordinate': 0.0}, {'vertex_x_coordinate': 0.0, 'vertex_y_coordinate': 15.24, 'vertex_z_coordinate': 0.0}, {'vertex_x_coordinate': 15.24, 'vertex_y_coordinate': 15.24, 'vertex_z_coordinate': 0.0}],\n view_factor_to_ground = 1.0,\n wind_exposure = 'NoWind',\n zone_name = 'ZONE ONE',\n )\nj['BuildingSurface:Detailed'].add_object('Zn001:Roof001',\n construction_name = 'ROOF31',\n number_of_vertices = 4,\n outside_boundary_condition = 'Outdoors',\n sun_exposure = 'SunExposed',\n surface_type = 'Roof',\n vertices = [{'vertex_x_coordinate': 0.0, 'vertex_y_coordinate': 15.24, 'vertex_z_coordinate': 4.572}, {'vertex_x_coordinate': 0.0, 'vertex_y_coordinate': 0.0, 'vertex_z_coordinate': 4.572}, {'vertex_x_coordinate': 15.24, 'vertex_y_coordinate': 0.0, 'vertex_z_coordinate': 4.572}, {'vertex_x_coordinate': 15.24, 'vertex_y_coordinate': 15.24, 'vertex_z_coordinate': 4.572}],\n view_factor_to_ground = 0,\n wind_exposure = 'WindExposed',\n zone_name = 'ZONE ONE',\n )\nj['BuildingSurface:Detailed'].add_object('Zn001:Wall001',\n construction_name = 'R13WALL',\n number_of_vertices = 4,\n outside_boundary_condition = 'Outdoors',\n sun_exposure = 'SunExposed',\n surface_type = 'Wall',\n vertices = [{'vertex_x_coordinate': 0, 'vertex_y_coordinate': 0, 'vertex_z_coordinate': 4.572}, {'vertex_x_coordinate': 0, 'vertex_y_coordinate': 0, 'vertex_z_coordinate': 0}, {'vertex_x_coordinate': 15.24, 'vertex_y_coordinate': 0, 'vertex_z_coordinate': 0}, {'vertex_x_coordinate': 15.24, 'vertex_y_coordinate': 0, 'vertex_z_coordinate': 4.572}],\n view_factor_to_ground = 0.5,\n wind_exposure = 'WindExposed',\n zone_name = 'ZONE ONE',\n )\nj['BuildingSurface:Detailed'].add_object('Zn001:Wall002',\n construction_name = 'R13WALL',\n number_of_vertices = 4,\n outside_boundary_condition = 'Outdoors',\n sun_exposure = 'SunExposed',\n surface_type = 'Wall',\n vertices = [{'vertex_x_coordinate': 15.24, 'vertex_y_coordinate': 0, 'vertex_z_coordinate': 4.572}, {'vertex_x_coordinate': 15.24, 'vertex_y_coordinate': 0, 'vertex_z_coordinate': 0}, {'vertex_x_coordinate': 15.24, 'vertex_y_coordinate': 15.24, 'vertex_z_coordinate': 0}, {'vertex_x_coordinate': 15.24, 'vertex_y_coordinate': 15.24, 'vertex_z_coordinate': 4.572}],\n view_factor_to_ground = 0.5,\n wind_exposure = 'WindExposed',\n zone_name = 'ZONE ONE',\n )\nj['BuildingSurface:Detailed'].add_object('Zn001:Wall003',\n construction_name = 'R13WALL',\n number_of_vertices = 4,\n outside_boundary_condition = 'Outdoors',\n sun_exposure = 'SunExposed',\n surface_type = 'Wall',\n vertices = [{'vertex_x_coordinate': 15.24, 'vertex_y_coordinate': 15.24, 'vertex_z_coordinate': 4.572}, {'vertex_x_coordinate': 15.24, 'vertex_y_coordinate': 15.24, 'vertex_z_coordinate': 0}, {'vertex_x_coordinate': 0, 'vertex_y_coordinate': 15.24, 'vertex_z_coordinate': 0}, {'vertex_x_coordinate': 0, 'vertex_y_coordinate': 15.24, 'vertex_z_coordinate': 4.572}],\n view_factor_to_ground = 0.5,\n wind_exposure = 'WindExposed',\n zone_name = 'ZONE ONE',\n )\nj['BuildingSurface:Detailed'].add_object('Zn001:Wall004',\n construction_name = 'R13WALL',\n number_of_vertices = 4,\n outside_boundary_condition = 'Outdoors',\n sun_exposure = 'SunExposed',\n surface_type = 'Wall',\n vertices = [{'vertex_x_coordinate': 0, 'vertex_y_coordinate': 15.24, 'vertex_z_coordinate': 4.572}, {'vertex_x_coordinate': 0, 'vertex_y_coordinate': 15.24, 'vertex_z_coordinate': 0}, {'vertex_x_coordinate': 0, 'vertex_y_coordinate': 0, 'vertex_z_coordinate': 0}, {'vertex_x_coordinate': 0, 'vertex_y_coordinate': 0, 'vertex_z_coordinate': 4.572}],\n view_factor_to_ground = 0.5,\n wind_exposure = 'WindExposed',\n zone_name = 'ZONE ONE',\n )\nj['Construction'].add_object('FLOOR',\n outside_layer = 'C5 - 4 IN HW CONCRETE',\n )\nj['Construction'].add_object('R13WALL',\n outside_layer = 'R13LAYER',\n )\nj['Construction'].add_object('ROOF31',\n outside_layer = 'R31LAYER',\n )\nj['Exterior:Lights'].add_object('ExtLights',\n control_option = 'AstronomicalClock',\n design_level = 5250,\n end_use_subcategory = 'Grounds Lights',\n schedule_name = 'AlwaysOn',\n )\nj['GlobalGeometryRules'].add_object('GlobalGeometryRules 1',\n coordinate_system = 'World',\n starting_vertex_position = 'UpperLeftCorner',\n vertex_entry_direction = 'Counterclockwise',\n )\nj['HeatBalanceAlgorithm'].add_object('HeatBalanceAlgorithm 1',\n algorithm = 'ConductionTransferFunction',\n )\nj['Material'].add_object('C5 - 4 IN HW CONCRETE',\n conductivity = 1.729577,\n density = 2242.585,\n roughness = 'MediumRough',\n solar_absorptance = 0.65,\n specific_heat = 836.8,\n thermal_absorptance = 0.9,\n thickness = 0.1014984,\n visible_absorptance = 0.65,\n )\nj['Material:NoMass'].add_object('R13LAYER',\n roughness = 'Rough',\n solar_absorptance = 0.75,\n thermal_absorptance = 0.9,\n thermal_resistance = 2.290965,\n visible_absorptance = 0.75,\n )\nj['Material:NoMass'].add_object('R31LAYER',\n roughness = 'Rough',\n solar_absorptance = 0.75,\n thermal_absorptance = 0.9,\n thermal_resistance = 5.456,\n visible_absorptance = 0.75,\n )\nj['OtherEquipment'].add_object('Test 352 minus',\n design_level = -352,\n design_level_calculation_method = 'EquipmentLevel',\n fraction_latent = 0,\n fraction_lost = 0,\n fraction_radiant = 0,\n fuel_type = 'None',\n schedule_name = 'AlwaysOn',\n zone_or_zonelist_name = 'ZONE ONE',\n )\nj['OtherEquipment'].add_object('Test 352a',\n design_level = 352,\n design_level_calculation_method = 'EquipmentLevel',\n fraction_latent = 0,\n fraction_lost = 0,\n fraction_radiant = 0,\n fuel_type = 'None',\n schedule_name = 'AlwaysOn',\n zone_or_zonelist_name = 'ZONE ONE',\n )\nj['Output:Constructions'].add_object('Output:Constructions 1',\n details_type_1 = 'Constructions',\n )\nj['Output:Meter:MeterFileOnly'].add_object('Output:Meter:MeterFileOnly 1',\n key_name = 'ExteriorLights:Electricity',\n reporting_frequency = 'Hourly',\n )\nj['Output:Meter:MeterFileOnly'].add_object('Output:Meter:MeterFileOnly 2',\n key_name = 'EnergyTransfer:Building',\n reporting_frequency = 'Hourly',\n )\nj['Output:Meter:MeterFileOnly'].add_object('Output:Meter:MeterFileOnly 3',\n key_name = 'EnergyTransfer:Facility',\n reporting_frequency = 'Hourly',\n )\nj['Output:Surfaces:Drawing'].add_object('Output:Surfaces:Drawing 1',\n report_type = 'DXF:WireFrame',\n )\nj['Output:Table:SummaryReports'].add_object('Output:Table:SummaryReports 1',\n reports = [{'report_name': 'AllSummary'}],\n )\nj['Output:Variable'].add_object('Output:Variable 1',\n key_value = '*',\n reporting_frequency = 'Hourly',\n variable_name = 'Site Outdoor Air Drybulb Temperature',\n )\nj['Output:Variable'].add_object('Output:Variable 2',\n key_value = '*',\n reporting_frequency = 'Daily',\n variable_name = 'Site Daylight Saving Time Status',\n )\nj['Output:Variable'].add_object('Output:Variable 3',\n key_value = '*',\n reporting_frequency = 'Daily',\n variable_name = 'Site Day Type Index',\n )\nj['Output:Variable'].add_object('Output:Variable 4',\n key_value = '*',\n reporting_frequency = 'Hourly',\n variable_name = 'Zone Mean Air Temperature',\n )\nj['Output:Variable'].add_object('Output:Variable 5',\n key_value = '*',\n reporting_frequency = 'Hourly',\n variable_name = 'Zone Total Internal Latent Gain Energy',\n )\nj['Output:Variable'].add_object('Output:Variable 6',\n key_value = '*',\n reporting_frequency = 'Hourly',\n variable_name = 'Zone Mean Radiant Temperature',\n )\nj['Output:Variable'].add_object('Output:Variable 7',\n key_value = '*',\n reporting_frequency = 'Hourly',\n variable_name = 'Zone Air Heat Balance Surface Convection Rate',\n )\nj['Output:Variable'].add_object('Output:Variable 8',\n key_value = '*',\n reporting_frequency = 'Hourly',\n variable_name = 'Zone Air Heat Balance Air Energy Storage Rate',\n )\nj['Output:Variable'].add_object('Output:Variable 9',\n key_value = '*',\n reporting_frequency = 'Daily',\n variable_name = 'Surface Inside Face Temperature',\n )\nj['Output:Variable'].add_object('Output:Variable 10',\n key_value = '*',\n reporting_frequency = 'Daily',\n variable_name = 'Surface Outside Face Temperature',\n )\nj['Output:Variable'].add_object('Output:Variable 11',\n key_value = '*',\n reporting_frequency = 'Daily',\n variable_name = 'Surface Inside Face Convection Heat Transfer Coefficient',\n )\nj['Output:Variable'].add_object('Output:Variable 12',\n key_value = '*',\n reporting_frequency = 'Daily',\n variable_name = 'Surface Outside Face Convection Heat Transfer Coefficient',\n )\nj['Output:Variable'].add_object('Output:Variable 13',\n key_value = '*',\n reporting_frequency = 'Monthly',\n variable_name = 'Other Equipment Total Heating Energy',\n )\nj['Output:Variable'].add_object('Output:Variable 14',\n key_value = '*',\n reporting_frequency = 'Monthly',\n variable_name = 'Zone Other Equipment Total Heating Energy',\n )\nj['Output:VariableDictionary'].add_object('Output:VariableDictionary 1',\n key_field = 'IDF',\n )\nj['OutputControl:Table:Style'].add_object('OutputControl:Table:Style 1',\n column_separator = 'All',\n )\nj['RunPeriod'].add_object('Run Period 1',\n apply_weekend_holiday_rule = 'No',\n begin_day_of_month = 1,\n begin_month = 1,\n day_of_week_for_start_day = 'Tuesday',\n end_day_of_month = 31,\n end_month = 12,\n use_weather_file_daylight_saving_period = 'Yes',\n use_weather_file_holidays_and_special_days = 'Yes',\n use_weather_file_rain_indicators = 'Yes',\n use_weather_file_snow_indicators = 'Yes',\n )\nj['Schedule:Constant'].add_object('AlwaysOn',\n hourly_value = 1.0,\n schedule_type_limits_name = 'On/Off',\n )\nj['ScheduleTypeLimits'].add_object('Fraction',\n lower_limit_value = 0.0,\n numeric_type = 'Continuous',\n upper_limit_value = 1.0,\n )\nj['ScheduleTypeLimits'].add_object('On/Off',\n lower_limit_value = 0,\n numeric_type = 'Discrete',\n upper_limit_value = 1,\n )\nj['SimulationControl'].add_object('SimulationControl 1',\n do_hvac_sizing_simulation_for_sizing_periods = 'No',\n do_plant_sizing_calculation = 'No',\n do_system_sizing_calculation = 'No',\n do_zone_sizing_calculation = 'No',\n maximum_number_of_hvac_sizing_simulation_passes = 1,\n run_simulation_for_sizing_periods = 'Yes',\n run_simulation_for_weather_file_run_periods = 'Yes',\n )\nj['Site:Location'].add_object('Denver Centennial Golden N_CO_USA Design_Conditions',\n elevation = 1829.0,\n latitude = 39.74,\n longitude = -105.18,\n time_zone = -7.0,\n )\nj['SizingPeriod:DesignDay'].add_object('Denver Centennial Golden N Ann Clg 1% Condns DB=>MWB',\n barometric_pressure = 81198.0,\n daily_dry_bulb_temperature_range = 15.2,\n day_of_month = 21,\n day_type = 'SummerDesignDay',\n daylight_saving_time_indicator = 'No',\n humidity_condition_type = 'WetBulb',\n maximum_dry_bulb_temperature = 32,\n month = 7,\n rain_indicator = 'No',\n sky_clearness = 1.0,\n snow_indicator = 'No',\n solar_model_indicator = 'ASHRAEClearSky',\n wetbulb_or_dewpoint_at_maximum_dry_bulb = 15.5,\n wind_direction = 0,\n wind_speed = 4.9,\n )\nj['SizingPeriod:DesignDay'].add_object('Denver Centennial Golden N Ann Htg 99% Condns DB',\n barometric_pressure = 81198.0,\n daily_dry_bulb_temperature_range = 0.0,\n day_of_month = 21,\n day_type = 'WinterDesignDay',\n daylight_saving_time_indicator = 'No',\n humidity_condition_type = 'WetBulb',\n maximum_dry_bulb_temperature = -15.5,\n month = 12,\n rain_indicator = 'No',\n sky_clearness = 0.0,\n snow_indicator = 'No',\n solar_model_indicator = 'ASHRAEClearSky',\n wetbulb_or_dewpoint_at_maximum_dry_bulb = -15.5,\n wind_direction = 340,\n wind_speed = 3,\n )\nj['SurfaceConvectionAlgorithm:Inside'].add_object('SurfaceConvectionAlgorithm:Inside 1',\n algorithm = 'TARP',\n )\nj['SurfaceConvectionAlgorithm:Outside'].add_object('SurfaceConvectionAlgorithm:Outside 1',\n algorithm = 'DOE-2',\n )\nj['Timestep'].add_object('Timestep 1',\n number_of_timesteps_per_hour = 4,\n )\nj['Version'].add_object('Version 1',\n version_identifier = '9.4',\n )\nj['Zone'].add_object('ZONE ONE',\n ceiling_height = 'Autocalculate',\n direction_of_relative_north = 0,\n multiplier = 1,\n type = 1,\n volume = 'Autocalculate',\n x_origin = 0,\n y_origin = 0,\n z_origin = 0,\n )\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecde65655fce59575935b4d37064c760b376ac6d | 2,629 | ipynb | Jupyter Notebook | ml/knn.ipynb | lizhaoliu/RandomCollection | eca070c2723f9cf4e41a36930bfb805df623f853 | [
"MIT"
] | 1 | 2021-09-10T03:10:57.000Z | 2021-09-10T03:10:57.000Z | ml/knn.ipynb | lizhaoliu/RandomCollection | eca070c2723f9cf4e41a36930bfb805df623f853 | [
"MIT"
] | null | null | null | ml/knn.ipynb | lizhaoliu/RandomCollection | eca070c2723f9cf4e41a36930bfb805df623f853 | [
"MIT"
] | null | null | null | 23.265487 | 91 | 0.444275 | [
[
[
"from typing import Any\n\nimport numpy as np\nfrom scipy import stats\n\ndef knn(train: np.ndarray, x: np.ndarray, k: int) -> Any:\n \"\"\"K nearest neighbors using Euclidean distance metrics.\"\"\"\n \n train_x, train_y = train[:, :-1], train[:, -1] \n x = x.reshape([1, -1])\n # Calculates the element-wise diff of input x and ground truth x, for example:\n # [['x' 'y'] [['train_x1' 'train_y1']\n # ['x' 'y'] - ['train_x2' 'train_y2']\n # ['x' 'y']] ['train_x3' 'train_y3']]\n diff = x - train_x\n # Calculates the Euclidean distance between each ground truth point and x.\n dists = np.sqrt(np.sum(diff ** 2, axis=1))\n # Fetches the top K closest train points.\n indices = dists.argpartition(k)[:k]\n return stats.mode(train_y[indices]).mode[0]",
"_____no_output_____"
],
[
"train = np.array([[0.2, 0.2, 1],\n [0.1, 0.1, 1],\n \n [1.2, 1.2, 2],\n [1.1, 1.1, 2],\n \n [2.1, 2.2, 3],\n [1.9, 1.9, 3]])\nprint(knn(train, np.array([.9, .9]), 2))",
"2.0\n"
],
[
"\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
ecdea4861e16374558020625ee2d3395c1154414 | 126,766 | ipynb | Jupyter Notebook | Python Statistics/Exercise Files/chapter5/05_03/05_03_fitgoodness_begin.ipynb | atalebizadeh/DS-Career-Track | 8bf78ef11041aef94810a392022cd51b94462d9c | [
"MIT"
] | null | null | null | Python Statistics/Exercise Files/chapter5/05_03/05_03_fitgoodness_begin.ipynb | atalebizadeh/DS-Career-Track | 8bf78ef11041aef94810a392022cd51b94462d9c | [
"MIT"
] | null | null | null | Python Statistics/Exercise Files/chapter5/05_03/05_03_fitgoodness_begin.ipynb | atalebizadeh/DS-Career-Track | 8bf78ef11041aef94810a392022cd51b94462d9c | [
"MIT"
] | null | null | null | 173.414501 | 39,820 | 0.871685 | [
[
[
"## Python statistics essential training - 05_03_fitgoodness",
"_____no_output_____"
],
[
"Standard imports",
"_____no_output_____"
]
],
[
[
"import math",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"import matplotlib\nimport matplotlib.pyplot as pp",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import statsmodels\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf",
"_____no_output_____"
]
],
[
[
"Loading gapminder data for year 1985 (Live Aid!) and setting up plot as in chapter 3",
"_____no_output_____"
]
],
[
[
"gapminder = pd.read_csv('gapminder.csv')",
"_____no_output_____"
],
[
"gdata = gapminder.query('year == 1985')",
"_____no_output_____"
],
[
"size = 1e-6 * gdata.population\n\ncolors = gdata.region.map({'Africa': 'skyblue', 'Europe': 'gold', 'America': 'palegreen', 'Asia': 'coral'})\n\ndef plotdata():\n gdata.plot.scatter('age5_surviving','babies_per_woman',\n c=colors,s=size,linewidths=0.5,edgecolor='k',alpha=0.5)",
"_____no_output_____"
]
],
[
[
"Setting up model plot",
"_____no_output_____"
]
],
[
[
"def plotfit(fit):\n plotdata()\n pp.scatter(gdata.age5_surviving,fit.predict(gdata),\n c=colors,s=30,linewidths=0.5,edgecolor='k',marker='D')",
"_____no_output_____"
]
],
[
[
"Three models from last video",
"_____no_output_____"
]
],
[
[
"groupmeans = smf.ols(formula='babies_per_woman ~ -1 + region', data=gdata).fit()",
"_____no_output_____"
],
[
"surviving = smf.ols(formula='babies_per_woman ~ -1 + region + age5_surviving', data=gdata).fit()",
"_____no_output_____"
],
[
"surviving_byregion_population = smf.ols(\n formula='babies_per_woman ~ -1 + region + age5_surviving'\n '+ age5_surviving:region - age5_surviving + population',\n data=gdata).fit()",
"_____no_output_____"
],
[
"plotfit(surviving)",
"_____no_output_____"
],
[
"plotfit(surviving_byregion_population)",
"_____no_output_____"
],
[
"pp.scatter(gdata.age5_surviving, groupmeans.resid)\npp.scatter(gdata.age5_surviving, surviving.resid)\npp.scatter(gdata.age5_surviving, surviving_byregion_population.resid)",
"_____no_output_____"
],
[
"# We loop over the three models and just print mse_resid for mean squared error\nfor model in [groupmeans, surviving, surviving_byregion_population]:\n print(model.mse_resid) ",
"1.4979520263709154\n0.9423025333008324\n0.8480227300570317\n"
],
[
"# Maximum possible value for R2 (squared) is 1 and the minimum is 0.\nfor model in [groupmeans, surviving, surviving_byregion_population]:\n print(model.rsquared) ",
"0.636467696559659\n0.7726009080146058\n0.7999775707885218\n"
]
],
[
[
"### In both cases above, we see a progress going towards more complicated models",
"_____no_output_____"
],
[
"## F statistic takes into account not only the explained and unexplained variance, but the number of model parameters. Too many parameters will overfit the data, conforming to the vagaries of the observations, but losing predictive power. So the F statistic measures how much, on average each parameter contributes to the growth of the R2(squared) compared to a random model vector\n",
"_____no_output_____"
]
],
[
[
"for model in [groupmeans, surviving, surviving_byregion_population]:\n print(model.fvalue)",
"103.88003937611687\n150.34180603430977\n86.48787556725195\n"
]
],
[
[
"#### The values above tell us that a simple surviving model seems to hit a sweet spot. ",
"_____no_output_____"
]
],
[
[
"surviving.summary()",
"_____no_output_____"
],
[
"sm.stats.anova_lm(groupmeans)",
"_____no_output_____"
]
],
[
[
"### First column above, df shows the degree of freedom, sum_sq and mean_sq show the total and average explained in residual variance. The column F is the F statistic, and the last column is the P value for a null model with the same number of parameters as ours but where all the terms are random. ",
"_____no_output_____"
]
],
[
[
"sm.stats.anova_lm(surviving)",
"_____no_output_____"
],
[
"sm.stats.anova_lm(surviving_byregion_population)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
ecdea500be938ca07436de1cdc4e8f8893ffde4f | 3,399 | ipynb | Jupyter Notebook | 620_Catching_Exceptions.ipynb | PacktPublishing/Python-for-Everybody-The-Ultimate-Python-3-Bootcamp | 34d11a55506b1e0831a10166a5377f6592afeee8 | [
"MIT"
] | 9 | 2020-08-06T08:53:36.000Z | 2021-12-30T13:25:01.000Z | 620_Catching_Exceptions.ipynb | PacktPublishing/Python-for-Everybody-The-Ultimate-Python-3-Bootcamp | 34d11a55506b1e0831a10166a5377f6592afeee8 | [
"MIT"
] | 1 | 2021-12-09T07:43:02.000Z | 2021-12-09T07:43:02.000Z | 620_Catching_Exceptions.ipynb | PacktPublishing/Python-for-Everybody-The-Ultimate-Python-3-Bootcamp | 34d11a55506b1e0831a10166a5377f6592afeee8 | [
"MIT"
] | 17 | 2020-07-28T10:04:41.000Z | 2021-11-06T17:54:02.000Z | 23.93662 | 244 | 0.494557 | [
[
[
"# Catching Exceptions\n\nTo \"catch\" an error is to understand that an error was raised and we can handle that error appropriately. The idea is to be smarter than the data and figure out what could go wrong... and solve it before we see a problem in the program.",
"_____no_output_____"
]
],
[
[
"person = {\n 'course': 'Python for Everybody',\n}\n\ntry:\n# 1/0\n print(person['fav_food'])\nexcept ZeroDivisionError:\n print(\"Cannot divide by zero\")\nexcept KeyError as e:\n print(\"Missing key:\", e)\nexcept Exception as e:\n print(type(e))\n print(e)",
"Missing key: 'fav_food'\n"
],
[
"num1 = input(\"Enter the first number: \")\nnum2 = input(\"Enter the second number: \") \n \ntry:\n num1 = float(num1)\n num2 = float(num2) \n div = num1 / num2 \n print(div)\nexcept ValueError:\n print(\"There was a value error WOOPS\")\nexcept ZeroDivisionError:\n print(\"Cannot divide by zero\")\nexcept NameError as e:\n print(\"There was an undefined variable somewhere.\", e)\nexcept Exception as e:\n print(type(e))\n print(e)\nelse:\n print(f\"Huzzah success! We divided {num1} and {num2}\")\nfinally:\n print(\"This will always run no matter what\")",
"Enter the first number: 75\nEnter the second number: 25\n3.0\nHuzzah success! We divided 75.0 and 25.0\nThis will always run no matter what\n"
],
[
"while True:\n print(\"Make a successful division\")\n num1 = input()\n num2 = input()\n try:\n num3 = float(num1)/float(num2)\n break\n except Exception as e:\n print(type(e))\n print(e)\n print(\"Nope. Try again.\")\n ",
"Make a successful division\n10\n0\n<class 'ZeroDivisionError'>\nfloat division by zero\nNope. Try again.\nMake a successful division\n10\n1\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
ecdea9810cfd4ab519946e9900c1580e2b061d73 | 1,926 | ipynb | Jupyter Notebook | Assignment-2-Day-6.ipynb | Amitkumarpanda192/LetsUpgrade-Python | fa8afd1f24a438de0231c1ca8bad90a4755545c1 | [
"Apache-2.0"
] | null | null | null | Assignment-2-Day-6.ipynb | Amitkumarpanda192/LetsUpgrade-Python | fa8afd1f24a438de0231c1ca8bad90a4755545c1 | [
"Apache-2.0"
] | null | null | null | Assignment-2-Day-6.ipynb | Amitkumarpanda192/LetsUpgrade-Python | fa8afd1f24a438de0231c1ca8bad90a4755545c1 | [
"Apache-2.0"
] | null | null | null | 20.489362 | 98 | 0.502596 | [
[
[
"import math\nclass cone():\n def __init__(self,radius,height):\n self.radius = radius\n self.height = height\n def volume(self):\n print(\"volume - \",math.pi*self.radius*self.radius*(self.height/3))\n def surface_area(self):\n print(\"base - \",math.pi*self.radius*self.radius)\n print(\"side - \",math.pi*self.radius*math.sqrt(self.radius**2 + self.height**2))\n \n",
"_____no_output_____"
],
[
"result = cone(12,12)",
"_____no_output_____"
],
[
"result.volume()",
"volume - 1809.5573684677208\n"
],
[
"result.surface_area()",
"base - 452.3893421169302\nside - 639.7751430948047\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
ecdeba5d4937a80eac9caf24dfc54c69a94ef4a2 | 263,605 | ipynb | Jupyter Notebook | sandbox/data_import_tool_hosp_capacity_data_exploration.ipynb | austin-carnahan/spreadsheet-cleaner | 7102592f471aaca94fdcd7a3f4e8c5470c37791e | [
"Apache-2.0"
] | null | null | null | sandbox/data_import_tool_hosp_capacity_data_exploration.ipynb | austin-carnahan/spreadsheet-cleaner | 7102592f471aaca94fdcd7a3f4e8c5470c37791e | [
"Apache-2.0"
] | null | null | null | sandbox/data_import_tool_hosp_capacity_data_exploration.ipynb | austin-carnahan/spreadsheet-cleaner | 7102592f471aaca94fdcd7a3f4e8c5470c37791e | [
"Apache-2.0"
] | null | null | null | 54.757998 | 26,972 | 0.456171 | [
[
[
"# Imports\nimport pandas as pd\nimport folium\nimport matplotlib.pyplot as plt\n",
"_____no_output_____"
]
],
[
[
"## Load data and run some basic checks",
"_____no_output_____"
]
],
[
[
"# Load data file\ninput_file = \"/Users/callie/Desktop/1.USDR/cleaned_data/Compiled_Hospital_Data.csv\"\n\ndf = pd.read_csv(input_file)\ndf.head(2)",
"_____no_output_____"
]
],
[
[
"## Review column names",
"_____no_output_____"
]
],
[
[
"# Number of rows for each date import\ndf['date'].value_counts()",
"_____no_output_____"
],
[
"for columns in df.columns:\n print(columns)",
"Hospital Name\nICU: Staffed Beds\nICU: Available Now\nICU: Available 24H\nICU: Available 72H\nMed/Surg: Staffed Beds\nMed/Surg: Available Now\nMed/Surg: Available 24H\nMed/Surg: Available 72H\nBurn: Staffed Beds\nBurn: Available Now\nBurn: Available 24H\nBurn: Available 72H\nPeds ICU: Staffed Beds\nPeds ICU: Available Now\nPeds ICU: Available 24H\nPeds ICU: Available 72H\nPeds: Staffed Beds\nPeds: Available Now\nPeds: Available 24H\nPeds: Available 72H\nNeonatal: Staffed Beds\nNeonatal: Available Now\nNeonatal: Available 24H\nNeonatal: Available 72H\nInpatient Rehab: Staffed Beds\nInpatient Rehab: Available Now\nInpatient Rehab: Available 24H\nInpatient Rehab: Available 72H\nPysch: Staffed Beds\nPysch: Available Now\nPysch: Available 24H\nPysch: Available 72H\nPysch-Adult: Staffed Beds\nPysch-Adult: Available Now\nPysch-Adult: Available 24H\nPysch-Adult: Available 72H\nPysch-Adol: Staffed Beds\nPysch-Adol: Available Now\nPysch-Adol: Available 24H\nPysch-Adol: Available 72H\nPysch-Geri: Staffed Beds\nPysch-Geri: Available Now\nPysch-Geri: Available 24H\nPysch-Geri: Available 72H\nPysch-Detox: Staffed Beds\nPysch-Detox: Available Now\nPysch-Detox: Available 24H\nPysch-Detox: Available 72H\nPysch-SustanceDual: Staffed Beds\nPysch-SustanceDual: Available Now\nPysch-SustanceDual: Available 24H\nPysch-SustanceDual: Available 72H\nLabor/Deliv: Staffed Beds\nLabor/Deliv: Available Now\nLabor/Deliv: Available 24H\nLabor/Deliv: Available 72H\nMaternity: Staffed Beds\nMaternity: Available Now\nMaternity: Available 24H\nMaternity: Available 72H\nAirborne Iso: Staffed Beds\nAirborne Iso: Available Now\nAirborne Iso: Available 24H\nAirborne Iso: Available 72H\nED: Immediate\nED: Delayed\nED: Minor\nED: Deceased\nNum patient waiting non-COVID admit\nNum patient non-vent COVID admit\nNum patient vent COVID admit\nNum patient waiting ICU bed\nNum patient waiting discharge\nyesterday COVID admit\nyesterday PUI admit\nrespiratory protection plan indicator\nN95 plan, fit tested\nN95 brand and model\nPARPs plan, trained\nPPE train status\nNeed hand sanitizer\nNeed hand soap\nNeed disinfection solution\nNeed disinfection wipes\nNeed gloves\nNeed other\nExpect shortage N95\nExpect shortage PARPs\nExpect shortage PARP Hoods\nExpect shortage PARP Filters\nExpect shortage Facial Masks\nExpect shortage Gown/Apron\nExpect shortage Eye Protection\nExpect shortage Disinfection Supplies\nExpect shortage Other\nCOVID res Expect shortage N95\nCOVID res Expect shortage PARPs\nCOVID res Expect shortage PARP Hoods\nCOVID res Expect shortage PARP Filters\nCOVID res Expect shortage Facial Masks\nCOVID res Expect shortage Gown\nCOVID res Expect shortage Eye Protection\nCOVID res Expect shortage Hand Soap\nCOVID res Expect shortage Hand Sanitizer\nCOVID res Expect shortage Disinfection Supplies\nCOVID res Expect shortage other\nN95 Burn Rate\nPARP's Burn Rate\nPARP's Hoods Burn Rate\nPARP's Filters Burn Rate\nFacial Masks Burn Rate\nGown Burn Rate\nEye Protection Burn Rate\nAnticipated test or collection shortage\nShortage Note\nIndicate commercial or inhouse COVID testing\nTesting go live date\nCOVID tests run inhouse today\nCOVID positive tests today\nTotal inpatient COVID diagnosed\nTotal inpatient PUI\nTotal ICU beds occupied COVID diagnosed\nYesterday inpatients admit 14+ convert COVID\nTotal inpatients admit 14+ convert COVID\nTotal COVID diagnosed on vent\nTotal COVID diagnosed on ECMO\nNum airborne infection iso ED\nNum airborne infection iso ICU\nNum airborne infection iso non-ICU\n24H COVID patient deaths\nYesterdays 24H COVID patient deaths\nIndicate extended respirator use\nIndicate reusable respirator use\nIndicate reuse N95\nIndicate extended staff hours\nIndicate num cohorting with no dedicated staff\nIndicate num cohoriting with dedicated staff\nIndicate extended respirator use.1\nIndicate N95 last 1 to 3 days\nIndicate N95 last 4 to 14 days\nIndicate N95 last over 14 days\nIndicate PPE last 1 to 3 days\nIndicate PPE last 4 to 14 days\nIndicate PPE last over 14 days\nIndicate N95 last 1 to 3 days.1\nIndicate N95 last 4 to 14 days.1\nTotal employee absent\nEmployee absent COVID\nPhysician call outs\nNurse call outs\nExposure call outs\nChild care call outs\nCritical staff shortage: Env Science\nCritical staff shortage: RN and LPNs\nCritical staff shortage: Resp Therapists\nCritical staff shortage: Pharma\nCritical staff shortage: Physicians\nCritical staff shortage: Other licensed IP\nCritical staff shortage: Temporary Physician and LP\nCritical staff shortage: Other HCP\nCritical staff shortage: Not listed\nIn a week critical staff shortage: Env Science\nIn a week Critical staff shortage: RN and LPNs\nIn a week Critical staff shortage: Resp Therapists\nIn a week Critical staff shortage: Pharma\nIn a week Critical staff shortage: Physicians\nIn a week Critical staff shortage: Other licensed IP\nIn a week Critical staff shortage: Temporary Physician and LP\nIn a week Critical staff shortage: Other HCP\nIn a week Critical staff shortage: Not listed\nNum ventilators\nNum ventilators in use\nNum anesthesia machines\nNum anesthesia machines converted to vent\nNum vents used for confirmed COVID patients\nNum ECMO units\nNum ECMO in use\nNum ECMO in use for COVID patients\nTotal ED Airborne Isolation Rooms\nAvailable ED Airborne Isolation Rooms\nED Airborne Isolation occupied req isolation\nED Airborne Isolation occupied by COVID\nTotal Non-ICU Airborne Isolation Rooms\nAvailable Non-ICU Airborne Isolation Rooms\nNon-ICU Airborne Isolation occupied req isolation\nNon-ICU Airborne Isolation occupied by COVID\nTotal ICU Airborne Isolation Rooms\nAvailable ICU Airborne Isolation Rooms\nICU Airborne Isolation occupied req isolation\nICU Airborne Isolation occupied by COVID\nED Divert Status\nMass Decon Status\nVent Full Feature\nVent Pediatric Capable\nVent Rescue Therapies\nFacility Stress 1\nFacility Stress 2\nFacility Stress 3\nFacility Stress 4\nFacility Stress 5\nEmergency Generator available\nHeating and AC under generator available\nPhone available under generator\nHeating System- Electric\nHeating System- Natural Gas\nHeating System- Oil\nHeating System- Propane\nTotal Available Non-Skilled Ambulatory Beds\nTotal Available Non-Skilled Ambulatory\nTotal Available Non-Skilled NonAmbulatory Beds\nTotal Available Non-Skilled NonAmbulatory\nTotal Available Skilled Beds\nTotal Available Skilled\nSkilled Feeding Tubes Available\nSkilled IV Fluids Available\nSkilled Isolation Available\nSkilled Security Available\nSkilled Venilator Available\nTotal FT Certified Nursing Assistant 1\nTotal PT Certified Nursing Assistant 1\nTotal FT Certified Nursing Assistant 2\nTotal PT Certified Nursing Assistant 2\nTotal FT LPN\nTotal PT LPN\nTotal FT Medical Technician\nTotal PT Medical Technician\nTotal FT Pharmacist\nTotal PT Pharmacist\nTotal FT RN\nTotal PT RN\nTotal FT Social Service\nTotal PT Social Service\nTotal Onsite Feeding Tube Pumps\nTotal Onsite Hospital Beds\nTotal Onsite IV Pumps\nTotal Onsite Stationary Beds\nTotal Onsite Ventilators\nsource_file\ndate\n"
]
],
[
[
"## When did each hospital last share data?\n55 of the 68 hospitals reporting have the most recent data.\nThe missing hospitals are consistently not reporting ICU beds (this may be expected behavior)",
"_____no_output_____"
]
],
[
[
"only_beds_reports = df.dropna(subset = [\"ICU: Staffed Beds\"], inplace=False)\nonly_beds_reports.sort_values('date', ascending=False).groupby('Hospital Name').head(1)",
"_____no_output_____"
],
[
"# Get the most recent note on shortages:\nshortage_note = df.dropna(subset = [\"Shortage Note\"], inplace=False)\nlist = shortage_note.sort_values('date', ascending=False).groupby('Hospital Name').head(1)\nlist[['Hospital Name', 'Shortage Note', 'date']]",
"_____no_output_____"
]
],
[
[
"# Some quick graphs",
"_____no_output_____"
],
[
"## Trending ICA availability",
"_____no_output_____"
]
],
[
[
"# Aggregate across hospitals\nICU_now = pd.DataFrame(df['ICU: Available Now'].groupby(df['date']).agg('sum'))\nICU_24H = pd.DataFrame(df['ICU: Available 24H'].groupby(df['date']).agg('sum'))\nICU_72H = pd.DataFrame(df['ICU: Available 72H'].groupby(df['date']).agg('sum'))\nICU_Staffed = pd.DataFrame(df['ICU: Staffed Beds'].groupby(df['date']).agg('sum'))\ndate = [\"7/18\", \"7/19\", \"7/20\", \"7/21\", '7/22']\n",
"_____no_output_____"
],
[
"plt.plot(date, ICU_now['ICU: Available Now'], label = \"ICU beds currently available\")\nplt.plot(date, ICU_24H['ICU: Available 24H'], label = \"ICU beds available in 24 hours\")\nplt.plot(date, ICU_72H['ICU: Available 72H'], label = \"ICU beds available in 72 hours\")\nplt.plot(date, ICU_Staffed['ICU: Staffed Beds'], label = \"Staffed ICU Beds\")\n\nplt.xlabel('Date of Data Report')\nplt.ylabel('Available Beds')\nplt.title(\"Trending ICU Bed Availability \")\nplt.legend()",
"_____no_output_____"
],
[
"plt.plot(date, ICU_now['ICU: Available Now'], label = \"ICU beds currently available\")\nplt.plot(date, ICU_24H['ICU: Available 24H'], label = \"ICU beds available in 24 hours\")\nplt.plot(date, ICU_72H['ICU: Available 72H'], label = \"ICU beds available in 72 hours\")\nplt.xlabel('Date of Data Report')\nplt.ylabel('Available Beds (Actual and Estimates)')\nplt.title(\"Trending ICU Bed Availability \")\nplt.legend()",
"_____no_output_____"
]
],
[
[
"## Flag Hospitals with less than 10% ICU capacity (new field example) ",
"_____no_output_____"
]
],
[
[
"df[\"ICU_current_capacity\"] = round(df['ICU: Available Now']/df['ICU: Staffed Beds'], 2)\ndf[\"ICU_current_capacity\"].hist()",
"_____no_output_____"
],
[
"low_capacity = df[df[\"ICU_current_capacity\"] <= .15]\nlow_capacity.sort_values(\"ICU_current_capacity\")",
"_____no_output_____"
],
[
"# Aggregate low capacity\nlow_today = low_capacity[low_capacity['date'] == \" July 22\"]\nlow_today[[\"Hospital Name\", \"ICU: Staffed Beds\", \"ICU: Available Now\", \"ICU: Available 24H\", \"ICU_current_capacity\" ]]",
"_____no_output_____"
]
],
[
[
"## Map examples (likely not impactful)",
"_____no_output_____"
]
],
[
[
"# Center Map on Abinton Hospital\ncenter_point = [40.119262, -75.119874]\ncounty_map = folium.Map(location=center_point, zoom_start=11, tiles=\"CartoDB dark_matter\")\nmarker_list = [[40.119262, -75.119874],[40.072446, -75.104917]]\n\nfor mark in marker_list:\n hospital = [mark[0], mark[1]]\n marker = folium.CircleMarker(location=hospital)\n marker.add_to(county_map)\n",
"_____no_output_____"
],
[
"# Display Map\ndisplay(county_map)",
"_____no_output_____"
],
[
"county_map.save(\"my_map.html\")",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
ecded736de71b27ece347c2c9321fad0b48519e0 | 7,446 | ipynb | Jupyter Notebook | Data Science and Machine Learning/Thorough Python Data Science Topics/Machine Translation in Python 3 with NLTK.ipynb | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | 39 | 2016-11-12T09:20:41.000Z | 2020-04-03T15:11:36.000Z | Data Science and Machine Learning/Thorough Python Data Science Topics/Machine Translation in Python 3 with NLTK.ipynb | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | null | null | null | Data Science and Machine Learning/Thorough Python Data Science Topics/Machine Translation in Python 3 with NLTK.ipynb | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | 37 | 2017-02-25T21:24:07.000Z | 2020-04-03T15:11:42.000Z | 21.77193 | 240 | 0.53747 | [
[
[
"# Machine Translation in Python 3 with NLTK",
"_____no_output_____"
],
[
"**(C) 2017 by [Damir Cavar](http://damir.cavar.me/)**",
"_____no_output_____"
],
[
"**Version:** 1.0, November 2017",
"_____no_output_____"
],
[
"**License:** [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/) ([CA BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/))",
"_____no_output_____"
],
[
"This is a brief introduction to the Machine Translation components in NLTK.",
"_____no_output_____"
],
[
"## Loading an Aligned Corpus",
"_____no_output_____"
],
[
"Import the *comtrans* module from *nltk.corpus*.",
"_____no_output_____"
]
],
[
[
"from nltk.corpus import comtrans",
"_____no_output_____"
]
],
[
[
"We can load a word-level alignment corpus for English and French from the NLTK dataset:",
"_____no_output_____"
]
],
[
[
"words = comtrans.words(\"alignment-en-fr.txt\")",
"_____no_output_____"
]
],
[
[
"Print out the words in the corpus as a list:",
"_____no_output_____"
]
],
[
[
"for word in words[:20]:\n print(word)\nprint(\"...\")",
"Resumption\nof\nthe\nsession\nI\ndeclare\nresumed\nthe\nsession\nof\nthe\nEuropean\nParliament\nadjourned\non\nFriday\n17\nDecember\n1999\n,\n...\n"
]
],
[
[
"Access a word by index in the list:",
"_____no_output_____"
]
],
[
[
"print(words[0])",
"Resumption\n"
]
],
[
[
"We can load the aligned sentences. Here we will load just one sentence, the firs one in the corpus:",
"_____no_output_____"
]
],
[
[
"als = comtrans.aligned_sents(\"alignment-en-fr.txt\")[0]\nals\n\nprint(\" \".join(als.words))\nprint(\" \".join(als.mots))",
"_____no_output_____"
]
],
[
[
"The alignments can be accessed via the *alignment* property:",
"_____no_output_____"
]
],
[
[
"als.alignment",
"_____no_output_____"
]
],
[
[
"We can display the alignment using the *invert* function:",
"_____no_output_____"
]
],
[
[
"als.invert()",
"_____no_output_____"
]
],
[
[
"We can also create alignments directly using the NLTK translate module. We import the translation modules from NLTK:",
"_____no_output_____"
]
],
[
[
"from nltk.translate import Alignment, AlignedSent",
"_____no_output_____"
]
],
[
[
"We can create an alignment example:",
"_____no_output_____"
]
],
[
[
"als = AlignedSent( [\"Reprise\", \"de\", \"la\", \"session\" ], \\\n [\"Resumption\", \"of\", \"the\", \"session\" ] , \\\n Alignment( [ (0 , 0), (1 , 1), (2 , 2), (3 , 3) ] ) )",
"_____no_output_____"
]
],
[
[
"## Translating with IBM Model 1 in NLTK",
"_____no_output_____"
],
[
"We already imported comtrans from NLTK in the code above. We have to import IBMModel1 from *nltk.translate*:",
"_____no_output_____"
]
],
[
[
"from nltk.translate import IBMModel1",
"_____no_output_____"
]
],
[
[
"We can create an IBMModel1 using 20 iterations to run the learning algorithm using the first 10 sentences from the aligned corpus; see the EM explanation on the slides and the following publications:\n\n- Philipp Koehn. 2010. *Statistical Machine Translation*. Cambridge University Press, New York.\n\n- Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and Robert L. Mercer. 1993. The Mathematics of Statistical Machine Translation: Parameter Estimation. *Computational Linguistics*, 19 (2), 263-311.\n",
"_____no_output_____"
]
],
[
[
"com_ibm1 = IBMModel1(comtrans.aligned_sents()[:10], 100)",
"_____no_output_____"
],
[
"print(round(com_ibm1.translation_table[\"bitte\"][\"Please\"], 3) )",
"_____no_output_____"
],
[
"print(round(com_ibm1.translation_table[\"Sitzungsperiode\"][\"session\"] , 3) )",
"_____no_output_____"
]
],
[
[
"(C) 2017 by [Damir Cavar](http://damir.cavar.me/) - [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/) ([CA BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
ecdedf2fb1d624ffd45cb3e648a6f0c9e1d05572 | 1,958 | ipynb | Jupyter Notebook | ch11 - Graph Analysis with GraphFrames/Incoming Degree.ipynb | PacktPublishing/Azure-Databricks-Cookbook. | 71abfb8928b8c39e6202bdc753e6c4d2d05e0c15 | [
"MIT"
] | 1 | 2021-10-01T22:12:15.000Z | 2021-10-01T22:12:15.000Z | ch11 - Graph Analysis with GraphFrames/Incoming Degree.ipynb | PacktPublishing/Azure-Databricks-Cookbook. | 71abfb8928b8c39e6202bdc753e6c4d2d05e0c15 | [
"MIT"
] | null | null | null | ch11 - Graph Analysis with GraphFrames/Incoming Degree.ipynb | PacktPublishing/Azure-Databricks-Cookbook. | 71abfb8928b8c39e6202bdc753e6c4d2d05e0c15 | [
"MIT"
] | null | null | null | 979 | 1,957 | 0.628192 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
ecdee22266228c3e1bfc60034fb2300c983d2af0 | 63,108 | ipynb | Jupyter Notebook | ML Pipeline Preparation-zh.ipynb | VickieL/DSND_Disaster_Response_Pipelines | 01857c4f1e5a1c25a9ce81738c3ca5a360a433b0 | [
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null | ML Pipeline Preparation-zh.ipynb | VickieL/DSND_Disaster_Response_Pipelines | 01857c4f1e5a1c25a9ce81738c3ca5a360a433b0 | [
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null | ML Pipeline Preparation-zh.ipynb | VickieL/DSND_Disaster_Response_Pipelines | 01857c4f1e5a1c25a9ce81738c3ca5a360a433b0 | [
"FTL",
"CNRI-Python",
"blessing"
] | null | null | null | 33.390476 | 391 | 0.397715 | [
[
[
"# ML Pipeline \n按照如下的指导要求,搭建你的机器学习管道。\n### 1. 导入与加载\n- 导入 Python 库\n- 使用 [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html) 从数据库中加载数据集\n- 定义特征变量X 和目标变量 Y",
"_____no_output_____"
]
],
[
[
"# import libraries\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport re\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.stem import WordNetLemmatizer\n\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.grid_search import RandomizedSearchCV\n\nfrom sklearn.externals import joblib\nimport pickle",
"/Users/vickieliu/anaconda/lib/python3.5/site-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.\n \"This module will be removed in 0.20.\", DeprecationWarning)\n/Users/vickieliu/anaconda/lib/python3.5/site-packages/sklearn/grid_search.py:42: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. This module will be removed in 0.20.\n DeprecationWarning)\n"
],
[
"categories = ['related', 'request', 'offer', 'aid_related', 'medical_help', 'medical_products', 'search_and_rescue',\n 'security', 'military', 'child_alone', 'water', 'food', 'shelter', 'clothing', 'money',\n 'missing_people', 'refugees', 'death', 'other_aid', 'infrastructure_related', 'transport',\n 'buildings', 'electricity', 'tools', 'hospitals', 'shops', 'aid_centers', 'other_infrastructure',\n 'weather_related', 'floods', 'storm', 'fire', 'earthquake', 'cold', 'other_weather', 'direct_report']",
"_____no_output_____"
],
[
"# load data from database\nengine = create_engine('sqlite:///DisasterResponse.db')\ndf = pd.read_sql_table('messages_categories', engine)\n#X = df[['message']]\nX = df.message.values\ny = df[categories].values\n",
"_____no_output_____"
],
[
"X.shape",
"_____no_output_____"
],
[
"y.shape",
"_____no_output_____"
],
[
"# test\ndf1 = df[(df['related'] == 2)]\ndf1.head()",
"_____no_output_____"
],
[
"df2 = df[(df['weather_related'] == 1)]\ndf2.head()",
"_____no_output_____"
],
[
"related_counts = df.groupby('related').count()['message']\nrelated_names = list(related_counts.index)\nprint(related_counts)\nprint(related_names)",
"related\n0 6116\n1 19876\n2 188\nName: message, dtype: int64\n[0, 1, 2]\n"
],
[
"request_counts = df.groupby('request').count()['message']\nrequest_names = list(request_counts.index)\nprint(request_counts)\nprint(request_names)",
"request\n0 21716\n1 4464\nName: message, dtype: int64\n[0, 1]\n"
],
[
"weather_related_counts = df.groupby('weather_related').count()['message']\nweather_related_names = list(weather_related_counts.index)\nprint(weather_related_counts)\nprint(weather_related_names)",
"weather_related\n0 18894\n1 7286\nName: message, dtype: int64\n[0, 1]\n"
],
[
"aid_centers_counts = df.groupby('aid_centers').count()['message']\naid_centers_names = list(aid_centers_counts.index)\nprint(aid_centers_counts)\nprint(aid_centers_names)",
"aid_centers\n0 25871\n1 309\nName: message, dtype: int64\n[0, 1]\n"
]
],
[
[
"### 2. 编写分词函数,开始处理文本",
"_____no_output_____"
]
],
[
[
"def tokenize(text):\n # Normalize text\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n \n # # Tokenize text\n words = word_tokenize(text)\n \n # Remove stop words\n words = [w for w in words if w not in stopwords.words(\"english\")]\n \n # reduce words to their stems 之前用stem,后来改用lemmatizer了\n # stemmed = [PorterStemmer().stem(w).lower().strip() for w in words]\n \n lemmatizer = WordNetLemmatizer()\n \n clean_tokens = []\n for tok in words:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens\n ",
"_____no_output_____"
]
],
[
[
"### 3. 创建机器学习管道 \n这个机器学习管道应该接收 `message` 列作输入,输出分类结果,分类结果属于该数据集中的 36 个类。你会发现 [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) 在预测多目标变量时很有用。",
"_____no_output_____"
]
],
[
[
"pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])",
"_____no_output_____"
]
],
[
[
"### 4. 训练管道\n- 将数据分割成训练和测试集\n- 训练管道",
"_____no_output_____"
]
],
[
[
"X_train.shape",
"_____no_output_____"
],
[
"y_train.shape",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y)\n",
"_____no_output_____"
],
[
"%%time\n# train classifier\npipeline.fit(X_train, y_train)",
"CPU times: user 1min 24s, sys: 10.2 s, total: 1min 34s\nWall time: 1min 36s\n"
],
[
"%%time\n# predict on test data\ny_pred = pipeline.predict(X_test)",
"_____no_output_____"
]
],
[
[
"### 5. 测试模型\n报告数据集中每个输出类别的 f1 得分、准确度和召回率。你可以对列进行遍历,并对每个元素调用 sklearn 的 `classification_report`。",
"_____no_output_____"
]
],
[
[
"y_test[:,0]",
"_____no_output_____"
],
[
"for i in range(0,35):\n print(\"Categories:\", categories[i])\n print(classification_report(y_test[:,i], y_pred[:,i]))",
"Categories: related\n precision recall f1-score support\n\n 0 0.67 0.11 0.19 1594\n 1 0.82 0.07 0.13 4899\n 2 0.01 0.96 0.02 52\n\navg / total 0.78 0.09 0.15 6545\n\nCategories: request\n precision recall f1-score support\n\n 0 0.84 0.99 0.91 5446\n 1 0.73 0.09 0.15 1099\n\navg / total 0.82 0.84 0.79 6545\n\nCategories: offer\n precision recall f1-score support\n\n 0 1.00 1.00 1.00 6516\n 1 0.00 0.00 0.00 29\n\navg / total 0.99 1.00 0.99 6545\n\nCategories: aid_related\n precision recall f1-score support\n\n 0 0.60 0.99 0.75 3895\n 1 0.72 0.04 0.08 2650\n\navg / total 0.65 0.61 0.48 6545\n\nCategories: medical_help\n precision recall f1-score support\n\n 0 0.92 1.00 0.96 6033\n 1 0.00 0.00 0.00 512\n\navg / total 0.85 0.92 0.88 6545\n\nCategories: medical_products\n precision recall f1-score support\n\n 0 0.95 1.00 0.98 6234\n 1 0.56 0.02 0.03 311\n\navg / total 0.93 0.95 0.93 6545\n\nCategories: search_and_rescue\n precision recall f1-score support\n\n 0 0.97 1.00 0.99 6355\n 1 0.00 0.00 0.00 190\n\navg / total 0.94 0.97 0.96 6545\n\nCategories: security\n precision recall f1-score support\n\n 0 0.98 1.00 0.99 6412\n 1 0.00 0.00 0.00 133\n\navg / total 0.96 0.98 0.97 6545\n\nCategories: military\n precision recall f1-score support\n\n 0 0.97 1.00 0.98 6331\n 1 0.00 0.00 0.00 214\n\navg / total 0.94 0.97 0.95 6545\n\nCategories: child_alone\n precision recall f1-score support\n\n 0 1.00 1.00 1.00 6545\n\navg / total 1.00 1.00 1.00 6545\n\nCategories: water\n precision recall f1-score support\n\n 0 0.94 1.00 0.97 6144\n 1 0.84 0.04 0.08 401\n\navg / total 0.93 0.94 0.91 6545\n\nCategories: food\n precision recall f1-score support\n\n 0 0.90 1.00 0.95 5859\n 1 0.74 0.07 0.13 686\n\navg / total 0.88 0.90 0.86 6545\n\nCategories: shelter\n precision recall f1-score support\n\n 0 0.91 1.00 0.95 5944\n 1 0.96 0.04 0.08 601\n\navg / total 0.92 0.91 0.87 6545\n\nCategories: clothing\n precision recall f1-score support\n\n 0 0.99 1.00 0.99 6449\n 1 1.00 0.02 0.04 96\n\navg / total 0.99 0.99 0.98 6545\n\nCategories: money\n precision recall f1-score support\n\n 0 0.98 1.00 0.99 6405\n 1 0.00 0.00 0.00 140\n\navg / total 0.96 0.98 0.97 6545\n\nCategories: missing_people\n precision recall f1-score support\n\n 0 0.99 1.00 0.99 6470\n 1 0.00 0.00 0.00 75\n\navg / total 0.98 0.99 0.98 6545\n\nCategories: refugees\n precision recall f1-score support\n\n 0 0.97 1.00 0.98 6334\n 1 0.00 0.00 0.00 211\n\navg / total 0.94 0.97 0.95 6545\n\nCategories: death\n precision recall f1-score support\n\n 0 0.96 1.00 0.98 6255\n 1 1.00 0.03 0.05 290\n\navg / total 0.96 0.96 0.94 6545\n\nCategories: other_aid\n precision recall f1-score support\n\n 0 0.87 1.00 0.93 5706\n 1 0.39 0.02 0.03 839\n\navg / total 0.81 0.87 0.82 6545\n\nCategories: infrastructure_related\n precision recall f1-score support\n\n 0 0.93 1.00 0.97 6112\n 1 0.50 0.00 0.00 433\n\navg / total 0.91 0.93 0.90 6545\n\nCategories: transport\n precision recall f1-score support\n\n 0 0.95 1.00 0.98 6245\n 1 0.00 0.00 0.00 300\n\navg / total 0.91 0.95 0.93 6545\n\nCategories: buildings\n precision recall f1-score support\n\n 0 0.95 1.00 0.97 6211\n 1 0.67 0.01 0.02 334\n\navg / total 0.94 0.95 0.93 6545\n\nCategories: electricity\n precision recall f1-score support\n\n 0 0.98 1.00 0.99 6412\n 1 0.00 0.00 0.00 133\n\navg / total 0.96 0.98 0.97 6545\n\nCategories: tools\n precision recall f1-score support\n\n 0 0.99 1.00 1.00 6504\n 1 0.00 0.00 0.00 41\n\navg / total 0.99 0.99 0.99 6545\n\nCategories: hospitals\n precision recall f1-score support\n\n 0 0.99 1.00 0.99 6477\n 1 0.00 0.00 0.00 68\n\navg / total 0.98 0.99 0.98 6545\n\nCategories: shops\n precision recall f1-score support\n\n 0 0.99 1.00 1.00 6509\n 1 0.00 0.00 0.00 36\n\navg / total 0.99 0.99 0.99 6545\n\nCategories: aid_centers\n precision recall f1-score support\n\n 0 0.99 1.00 0.99 6464\n 1 0.00 0.00 0.00 81\n\navg / total 0.98 0.99 0.98 6545\n\nCategories: other_infrastructure\n precision recall f1-score support\n\n 0 0.96 1.00 0.98 6251\n 1 0.50 0.00 0.01 294\n\navg / total 0.93 0.96 0.93 6545\n\nCategories: weather_related\n precision recall f1-score support\n\n 0 0.74 0.99 0.85 4752\n 1 0.77 0.06 0.10 1793\n\navg / total 0.75 0.74 0.64 6545\n\nCategories: floods\n precision recall f1-score support\n\n 0 0.92 1.00 0.96 6032\n 1 1.00 0.01 0.01 513\n\navg / total 0.93 0.92 0.89 6545\n\nCategories: storm\n precision recall f1-score support\n\n 0 0.91 1.00 0.95 5943\n 1 0.79 0.02 0.04 602\n\navg / total 0.90 0.91 0.87 6545\n\nCategories: fire\n precision recall f1-score support\n\n 0 0.99 1.00 0.99 6468\n 1 0.00 0.00 0.00 77\n\navg / total 0.98 0.99 0.98 6545\n\nCategories: earthquake\n precision recall f1-score support\n\n 0 0.92 1.00 0.96 5944\n 1 0.75 0.13 0.22 601\n\navg / total 0.90 0.92 0.89 6545\n\nCategories: cold\n precision recall f1-score support\n\n 0 0.98 1.00 0.99 6426\n 1 0.00 0.00 0.00 119\n\navg / total 0.96 0.98 0.97 6545\n\nCategories: other_weather\n precision recall f1-score support\n\n 0 0.95 1.00 0.97 6193\n 1 0.67 0.01 0.01 352\n\navg / total 0.93 0.95 0.92 6545\n\n"
]
],
[
[
"### 6. 优化模型\n使用网格搜索来找到最优的参数组合。 ",
"_____no_output_____"
]
],
[
[
"pipeline.get_params()",
"_____no_output_____"
],
[
"def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {\n 'vect__ngram_range': ((1, 1), (1, 2)),\n 'vect__max_df': (0.5, 1.0),\n 'vect__max_features': (None, 5000),\n 'tfidf__use_idf': (True, False),\n 'clf__estimator__n_estimators': [10, 50],\n 'clf__estimator__min_samples_split': [2, 4]\n }\n \n #cv = GridSearchCV(pipeline, param_grid = parameters, n_jobs=-1)\n \n # 如果参数多,可以尝试用RandomizedSearchCV替代GridSearchCV\n n_iter_search = 5\n cv = RandomizedSearchCV(pipeline, param_distributions = parameters,n_iter = n_iter_search)\n \n return cv\n\n",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y)",
"_____no_output_____"
],
[
"%%time\n\nmodel = build_model()\nmodel.fit(X_train, y_train)",
"CPU times: user 26min 25s, sys: 2min 30s, total: 28min 55s\nWall time: 29min 16s\n"
]
],
[
[
"Total train time: 53m 26s\n\nmodel best score is ...\n0.2750190985485103\n\nmodel best params are ...\n{'clf__estimator__min_samples_split': 2, 'clf__estimator__n_estimators': 50, 'vect__max_df': 0.5, 'vect__max_features': 5000, 'vect__ngram_range': (1, 2), 'tfidf__use_idf': True}",
"_____no_output_____"
]
],
[
[
"model.best_estimator_",
"_____no_output_____"
],
[
"model.best_score_",
"_____no_output_____"
],
[
"model.best_params_",
"_____no_output_____"
],
[
"%%time\ny_pred = model.predict(X_test)",
"CPU times: user 22.3 s, sys: 3.16 s, total: 25.5 s\nWall time: 25.9 s\n"
]
],
[
[
"### 7. 测试模型\n打印微调后的模型的精确度、准确率和召回率。 \n\n因为本项目主要关注代码质量、开发流程和管道技术,所有没有模型性能指标的最低要求。但是,微调模型提高精确度、准确率和召回率可以让你的项目脱颖而出——特别是让你的简历更出彩。",
"_____no_output_____"
]
],
[
[
"for i in range(len(categories)):\n print(\"Categories:\", categories[i])\n print(classification_report(y_test[:,i], y_pred[:,i]))",
"Categories: related\n precision recall f1-score support\n\n 0 0.70 0.40 0.51 1490\n 1 0.84 0.95 0.89 5006\n 2 0.90 0.18 0.31 49\n\navg / total 0.81 0.82 0.80 6545\n\nCategories: request\n precision recall f1-score support\n\n 0 0.90 0.98 0.94 5428\n 1 0.83 0.50 0.62 1117\n\navg / total 0.89 0.90 0.89 6545\n\nCategories: offer\n precision recall f1-score support\n\n 0 1.00 1.00 1.00 6520\n 1 0.00 0.00 0.00 25\n\navg / total 0.99 1.00 0.99 6545\n\nCategories: aid_related\n precision recall f1-score support\n\n 0 0.80 0.83 0.81 3870\n 1 0.74 0.69 0.72 2675\n\navg / total 0.77 0.78 0.77 6545\n\nCategories: medical_help\n precision recall f1-score support\n\n 0 0.93 0.99 0.96 6045\n 1 0.47 0.06 0.11 500\n\navg / total 0.89 0.92 0.89 6545\n\nCategories: medical_products\n precision recall f1-score support\n\n 0 0.96 1.00 0.98 6223\n 1 0.74 0.10 0.17 322\n\navg / total 0.94 0.95 0.94 6545\n\nCategories: search_and_rescue\n precision recall f1-score support\n\n 0 0.97 1.00 0.99 6359\n 1 0.80 0.06 0.12 186\n\navg / total 0.97 0.97 0.96 6545\n\nCategories: security\n precision recall f1-score support\n\n 0 0.98 1.00 0.99 6428\n 1 0.00 0.00 0.00 117\n\navg / total 0.96 0.98 0.97 6545\n\nCategories: military\n precision recall f1-score support\n\n 0 0.97 1.00 0.98 6336\n 1 0.63 0.06 0.11 209\n\navg / total 0.96 0.97 0.96 6545\n\nCategories: child_alone\n precision recall f1-score support\n\n 0 1.00 1.00 1.00 6545\n\navg / total 1.00 1.00 1.00 6545\n\nCategories: water\n precision recall f1-score support\n\n 0 0.96 1.00 0.98 6138\n 1 0.85 0.37 0.51 407\n\navg / total 0.95 0.96 0.95 6545\n\nCategories: food\n precision recall f1-score support\n\n 0 0.95 0.99 0.97 5815\n 1 0.86 0.57 0.69 730\n\navg / total 0.94 0.94 0.94 6545\n\nCategories: shelter\n precision recall f1-score support\n\n 0 0.94 0.99 0.97 5965\n 1 0.83 0.33 0.47 580\n\navg / total 0.93 0.93 0.92 6545\n\nCategories: clothing\n precision recall f1-score support\n\n 0 0.99 1.00 0.99 6447\n 1 0.80 0.04 0.08 98\n\navg / total 0.98 0.99 0.98 6545\n\nCategories: money\n precision recall f1-score support\n\n 0 0.98 1.00 0.99 6413\n 1 0.80 0.03 0.06 132\n\navg / total 0.98 0.98 0.97 6545\n\nCategories: missing_people\n precision recall f1-score support\n\n 0 0.99 1.00 0.99 6475\n 1 0.67 0.03 0.05 70\n\navg / total 0.99 0.99 0.98 6545\n\nCategories: refugees\n precision recall f1-score support\n\n 0 0.97 1.00 0.98 6334\n 1 0.44 0.03 0.06 211\n\navg / total 0.95 0.97 0.95 6545\n\nCategories: death\n precision recall f1-score support\n\n 0 0.96 1.00 0.98 6262\n 1 0.84 0.10 0.17 283\n\navg / total 0.96 0.96 0.94 6545\n\nCategories: other_aid\n precision recall f1-score support\n\n 0 0.87 0.99 0.93 5690\n 1 0.50 0.04 0.07 855\n\navg / total 0.82 0.87 0.82 6545\n\nCategories: infrastructure_related\n precision recall f1-score support\n\n 0 0.93 1.00 0.97 6115\n 1 0.00 0.00 0.00 430\n\navg / total 0.87 0.93 0.90 6545\n\nCategories: transport\n precision recall f1-score support\n\n 0 0.96 1.00 0.98 6269\n 1 0.62 0.07 0.12 276\n\navg / total 0.95 0.96 0.94 6545\n\nCategories: buildings\n precision recall f1-score support\n\n 0 0.95 1.00 0.98 6206\n 1 0.75 0.12 0.20 339\n\navg / total 0.94 0.95 0.94 6545\n\nCategories: electricity\n precision recall f1-score support\n\n 0 0.98 1.00 0.99 6400\n 1 0.82 0.06 0.12 145\n\navg / total 0.98 0.98 0.97 6545\n\nCategories: tools\n precision recall f1-score support\n\n 0 0.99 1.00 1.00 6500\n 1 0.00 0.00 0.00 45\n\navg / total 0.99 0.99 0.99 6545\n\nCategories: hospitals\n precision recall f1-score support\n\n 0 0.99 1.00 1.00 6486\n 1 0.00 0.00 0.00 59\n\navg / total 0.98 0.99 0.99 6545\n\nCategories: shops\n precision recall f1-score support\n\n 0 1.00 1.00 1.00 6518\n 1 0.00 0.00 0.00 27\n\navg / total 0.99 1.00 0.99 6545\n\nCategories: aid_centers\n precision recall f1-score support\n\n 0 0.99 1.00 0.99 6476\n 1 0.00 0.00 0.00 69\n\navg / total 0.98 0.99 0.98 6545\n\nCategories: other_infrastructure\n precision recall f1-score support\n\n 0 0.95 1.00 0.98 6236\n 1 0.00 0.00 0.00 309\n\navg / total 0.91 0.95 0.93 6545\n\nCategories: weather_related\n precision recall f1-score support\n\n 0 0.89 0.95 0.92 4683\n 1 0.86 0.70 0.77 1862\n\navg / total 0.88 0.88 0.88 6545\n\nCategories: floods\n precision recall f1-score support\n\n 0 0.95 1.00 0.97 6019\n 1 0.90 0.43 0.58 526\n\navg / total 0.95 0.95 0.94 6545\n\nCategories: storm\n precision recall f1-score support\n\n 0 0.95 0.99 0.97 5965\n 1 0.77 0.49 0.60 580\n\navg / total 0.94 0.94 0.94 6545\n\nCategories: fire\n precision recall f1-score support\n\n 0 0.99 1.00 1.00 6488\n 1 0.50 0.02 0.03 57\n\navg / total 0.99 0.99 0.99 6545\n\nCategories: earthquake\n precision recall f1-score support\n\n 0 0.98 0.99 0.99 5889\n 1 0.92 0.81 0.86 656\n\navg / total 0.97 0.97 0.97 6545\n\nCategories: cold\n precision recall f1-score support\n\n 0 0.98 1.00 0.99 6405\n 1 0.81 0.09 0.17 140\n\navg / total 0.98 0.98 0.97 6545\n\nCategories: other_weather\n precision recall f1-score support\n\n 0 0.95 1.00 0.97 6203\n 1 0.41 0.02 0.04 342\n\navg / total 0.92 0.95 0.92 6545\n\nCategories: direct_report\n precision recall f1-score support\n\n 0 0.86 0.98 0.91 5265\n 1 0.77 0.34 0.48 1280\n\navg / total 0.84 0.85 0.83 6545\n\n"
]
],
[
[
"### 8. 继续优化模型,比如:\n* 尝试其他的机器学习算法\n* 尝试除 TF-IDF 外其他的特征",
"_____no_output_____"
],
[
"### 9. 导出模型为 pickle file",
"_____no_output_____"
]
],
[
[
"# 保存至本地磁盘\nwith open('model.pkl', 'wb') as file:\n pickle.dump(model, file)\n",
"_____no_output_____"
],
[
"# 从本地磁盘加载模型\nwith open('model.pkl', 'rb') as file:\n model_joblib = pickle.load(file)\n# 加载出来的模型可以进行predict等功能\n#print(model_joblib.predict([[4, 6, 10]]))",
"_____no_output_____"
]
],
[
[
"### 10. Use this notebook to complete `train.py`\n使用资源 (Resources)文件里附带的模板文件编写脚本,运行上述步骤,创建一个数据库,并基于用户指定的新数据集输出一个模型。",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
ecdee80de0d90b4bde7871ce058d281cfa4db05f | 93,193 | ipynb | Jupyter Notebook | CP_05_clusters_03_analysis.ipynb | fealt/ironhack-bootcamp-capstone-project | 3d6a776cae490db3c4862f8657f0eb7f62c39eeb | [
"MIT"
] | null | null | null | CP_05_clusters_03_analysis.ipynb | fealt/ironhack-bootcamp-capstone-project | 3d6a776cae490db3c4862f8657f0eb7f62c39eeb | [
"MIT"
] | null | null | null | CP_05_clusters_03_analysis.ipynb | fealt/ironhack-bootcamp-capstone-project | 3d6a776cae490db3c4862f8657f0eb7f62c39eeb | [
"MIT"
] | null | null | null | 45.086115 | 23,316 | 0.517346 | [
[
[
"# loading libraries\n\nimport io\n\nimport folium\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom folium.plugins import FastMarkerCluster\nfrom PIL import Image",
"_____no_output_____"
],
[
"# Display all df columns\npd.set_option(\"display.max_columns\", None)\n\n# Set up with a higher resolution screen\n%config InlineBackend.figure_format = 'retina'",
"_____no_output_____"
],
[
"pd.set_option(\"display.max_columns\", None)",
"_____no_output_____"
],
[
"df_2002_2021s = pd.read_csv(\"./assets/csv/df_clusters.csv\", sep=\",\")",
"_____no_output_____"
],
[
"count_perc = (\n df_2002_2021s.groupby(\"cluster\")[[\"count\", \"percentage\"]].sum().reset_index()\n)",
"_____no_output_____"
],
[
"count_perc",
"_____no_output_____"
],
[
"# Set up with a higher resolution screen (useful on Mac)\n%config InlineBackend.figure_format = 'retina'\n\nplt.figure(figsize=(8, 8))\nplt.title(\"Clusters and %\")\n# Plot DataFrame\nsns.barplot(x=\"cluster\", y=\"percentage\", data=count_perc, color=\"c\")\nplt.show()",
"_____no_output_____"
],
[
"df_all_years = pd.read_csv(\n \"./assets/csv/df_all_years/df_all_years.csv\", sep=\",\"\n).drop(columns=\"Unnamed: 0.1\")",
"_____no_output_____"
],
[
"df_all_years.head(2)",
"_____no_output_____"
],
[
"df_all_years['total_pax'] = df_all_years['seats_sold'] + df_all_years['seats_free']",
"_____no_output_____"
],
[
"print(sorted(df_all_years.seats_available.unique()))",
"[0, 1, 2, 8, 9, 10, 19, 20, 22, 27, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 212, 213, 214, 216, 217, 218, 219, 220, 221, 223, 224, 225, 226, 228, 229, 233, 238, 239, 240, 241, 242, 243, 244, 245, 247, 248, 249, 250, 251, 252, 258, 264, 265, 266, 267, 268, 271, 272, 276, 279, 280, 281, 282, 284, 285, 287, 288, 289, 298, 323, 339, 358, 365, 369, 372, 379, 410, 435]\n"
],
[
"len(df_all_years)",
"_____no_output_____"
],
[
"df_clusters = df_all_years.copy()",
"_____no_output_____"
],
[
"drop_zero = [0, 1, 2]",
"_____no_output_____"
],
[
"df_clusters = df_clusters[df_clusters.seats_available.isin(drop_zero) == False]",
"_____no_output_____"
],
[
"len(df_clusters)",
"_____no_output_____"
],
[
"df_clusters = df_clusters.groupby([\"cluster_origin\"], as_index=False).agg(\n origins=(\"icao_origin\", lambda x: x.nunique()),\n destinations=(\"icao_dest\", lambda x: x.nunique()),\n carriers=(\"icao_carrier\", lambda x: x.nunique()),\n routes=(\"route_icao\", lambda x: x.nunique()),\n total_flights=(\"icao_carrier\", 'count'),\n aircraft_types=(\"icao_aircraft_type\", lambda x: x.nunique()),\n mean_aircraft_capacity=(\"seats_available\", 'mean'),\n total_pax=(\"seats_available\", 'sum')\n).round(2)",
"_____no_output_____"
],
[
"df_clusters",
"_____no_output_____"
],
[
"df_clusters.origins = [2, 5, 6, 14, 3, 14, 14, 168]",
"_____no_output_____"
],
[
"df_clusters['flights_per_month'] = round(df_clusters['total_flights'] / 228, 2)",
"_____no_output_____"
],
[
"df_clusters['flights_month_origin'] = round(df_clusters['flights_per_month'] / df_clusters['origins'], 2)",
"_____no_output_____"
],
[
"df_clusters['pax_per_month'] = round(df_clusters['total_pax'] / 228, 2)",
"_____no_output_____"
],
[
"df_clusters['pax_month_origin'] = round(df_clusters['pax_per_month'] / df_clusters['origins'], 2)",
"_____no_output_____"
],
[
"df_clusters.drop(columns=['total_flights', 'total_pax', 'flights_per_month', 'pax_per_month'], inplace=True)",
"_____no_output_____"
],
[
"df_clusters",
"_____no_output_____"
]
],
[
[
"df_clusters.to_csv('./assets/csv/tableau/df_clusters_calc.csv', sep=',')",
"_____no_output_____"
]
],
[
[
"# players",
"_____no_output_____"
],
[
"players = df_all_years.copy()",
"_____no_output_____"
],
[
"players.head(2)",
"_____no_output_____"
],
[
"players_cluster_1 = players[players.cluster_origin == 1]",
"_____no_output_____"
],
[
"teste = players.groupby(['cluster_origin', \"icao_carrier\", 'sched_year'], as_index=False).agg(\n icao_carrier_count=('icao_carrier', 'count'),\n total_pax=(\"seats_available\", 'sum')\n)",
"_____no_output_____"
],
[
"airline = {\n 'AZU':'Azul',\n 'BRB':'Bra',\n 'GLO':'Gol',\n 'IPM':'Itapemirim',\n 'NES':'Nordeste',\n 'ONE':'Avianca Brasil',\n 'OWT':'Two',\n 'PTB':'Passaredo',\n 'PTN':'Pantanal',\n 'RSL':'Rio Sul',\n 'SLX':'Sete',\n 'TAM':'Latam',\n 'TIB':'Trip',\n 'VRG':'VARIG',\n 'VRN':'Vrg',\n 'VSP':'VASP',\n 'WEB':'Webjet',\n 'FYW':'FlyWays',\n 'NHG':'Brava (former NHT)',\n 'NRA':'Noar',\n 'PAM':'MAP',\n 'TSD':'Taf',\n 'TIM':'Team',\n 'SBA':'Sol',\n 'SUL':'Asta',\n 'TTL':'Total'\n}",
"_____no_output_____"
],
[
"teste[\"airline\"] = teste[\"icao_carrier\"].apply(lambda x: airline[x])",
"_____no_output_____"
],
[
"teste",
"_____no_output_____"
],
[
"teste.to_csv('./assets/csv/tableau/players_all_years.csv', sep=',')",
"_____no_output_____"
],
[
"years = [2002, 2003, 2004, 2005, 2006, 2007, 2008]",
"_____no_output_____"
],
[
"mkt_share_flights = players[players.sched_year.isin(years) == False]",
"_____no_output_____"
],
[
"mkt_share_flights.head(2)",
"_____no_output_____"
]
]
] | [
"code",
"raw",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"raw"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
ecdee8c9673d9c32cb889b7e357c8611cf47063a | 61,531 | ipynb | Jupyter Notebook | notebooks/2.1-week2_analysis_hypotheses.ipynb | solery-git/Yandex_MIPT_user_identification | 6861c14ebeeaef963b1d180080b87637a9578dd5 | [
"FTL"
] | null | null | null | notebooks/2.1-week2_analysis_hypotheses.ipynb | solery-git/Yandex_MIPT_user_identification | 6861c14ebeeaef963b1d180080b87637a9578dd5 | [
"FTL"
] | null | null | null | notebooks/2.1-week2_analysis_hypotheses.ipynb | solery-git/Yandex_MIPT_user_identification | 6861c14ebeeaef963b1d180080b87637a9578dd5 | [
"FTL"
] | null | null | null | 60.206458 | 20,184 | 0.721815 | [
[
[
"<center>\n<img src=\"https://habrastorage.org/web/677/8e1/337/6778e1337c3d4b159d7e99df94227cb2.jpg\"/>\n## Специализация \"Машинное обучение и анализ данных\"\n</center>\n<center>Автор материала: программист-исследователь Mail.ru Group, старший преподаватель Факультета Компьютерных Наук ВШЭ Юрий Кашницкий",
"_____no_output_____"
],
[
"# <center> Capstone проект №1. Идентификация пользователей по посещенным веб-страницам\n<img src='http://i.istockimg.com/file_thumbview_approve/21546327/5/stock-illustration-21546327-identification-de-l-utilisateur.jpg'>\n\n# <center>Неделя 2. Подготовка и первичный анализ данных\n\nНа второй неделе мы продолжим подготавливать данные для дальнейшего анализа и построения прогнозных моделей. Конкретно, раньше мы определили что сессия – это последовательность из 10 посещенных пользователем сайтов, теперь сделаем длину сессии параметром, и потом при обучении прогнозных моделей выберем лучшую длину сессии.\nТакже мы познакомимся с предобработанными данными и статистически проверим первые гипотезы, связанные с нашими наблюдениями. \n\n**План 2 недели:**\n - Часть 1. Подготовка нескольких обучающих выборок для сравнения\n - Часть 2. Первичный анализ данных, проверка гипотез\n\n**В этой части проекта Вам могут быть полезны следующие видеозаписи лекций курса \"Построение выводов по данным\":**\n\n - [Доверительные интервалы для доли](https://www.coursera.org/learn/stats-for-data-analysis/lecture/3oi53/dovieritiel-nyie-intiervaly-dlia-doli)\n - [Биномиальный критерий для доли](https://www.coursera.org/learn/stats-for-data-analysis/lecture/JwmBw/binomial-nyi-kritierii-dlia-doli)\n - [Доверительные интервалы на основе бутстрепа](https://www.coursera.org/learn/stats-for-data-analysis/lecture/GZjW7/dovieritiel-nyie-intiervaly-na-osnovie-butstriepa)\n \n**Кроме того, в задании будут использоваться библиотеки Python [glob](https://docs.python.org/3/library/glob.html), [pickle](https://docs.python.org/2/library/pickle.html), [itertools](https://docs.python.org/3/library/itertools.html) и класс [csr_matrix](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.csr_matrix.html) из scipy.sparse.**",
"_____no_output_____"
],
[
"## Часть 1. Подготовка нескольких обучающих выборок для сравнения\n\nПока мы брали последовательности из 10 сайтов, и это было наобум. Давайте сделаем число сайтов в сессии параметром, чтоб в дальнейшем сравнить модели классификации, обученные на разных выборках – с 5, 7, 10 и 15 сайтами в сессии. Более того, пока мы брали по 10 сайтов подряд, без пересечения. Теперь давайте применим идею скользящего окна – сессии будут перекрываться. \n\n**Пример**: для длины сессии 10 и ширины окна 7 файл из 30 записей породит не 3 сессии, как раньше (1-10, 11-20, 21-30), а 5 (1-10, 8-17, 15-24, 22-30, 29-30). При этом в предпоследней сессии будет один ноль, а в последней – 8 нолей.\n\nСоздадим несколько выборок для разных сочетаний параметров длины сессии и ширины окна. Все они представлены в табличке ниже:\n\n<style type=\"text/css\">\n.tg {border-collapse:collapse;border-spacing:0;}\n.tg td{font-family:Arial, sans-serif;font-size:14px;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}\n.tg th{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}\n</style>\n<table class=\"tg\">\n <tr>\n <th class=\"tg-031e\">session_length -><br>window_size <br></th>\n <th class=\"tg-031e\">5</th>\n <th class=\"tg-031e\">7</th>\n <th class=\"tg-031e\">10</th>\n <th class=\"tg-031e\">15</th>\n </tr>\n <tr>\n <td class=\"tg-031e\">5</td>\n <td class=\"tg-031e\">v</td>\n <td class=\"tg-031e\">v</td>\n <td class=\"tg-031e\">v</td>\n <td class=\"tg-031e\">v</td>\n </tr>\n <tr>\n <td class=\"tg-031e\">7</td>\n <td class=\"tg-031e\"></td>\n <td class=\"tg-031e\">v</td>\n <td class=\"tg-031e\">v</td>\n <td class=\"tg-031e\">v</td>\n </tr>\n <tr>\n <td class=\"tg-031e\">10</td>\n <td class=\"tg-031e\"></td>\n <td class=\"tg-031e\"></td>\n <td class=\"tg-031e\"><font color='green'>v</font></td>\n <td class=\"tg-031e\">v</td>\n </tr>\n</table>\n\nИтого должно получиться 18 разреженных матриц – указанные в таблице 9 сочетаний параметров формирования сессий для выборок из 10 и 150 пользователей. При этом 2 выборки мы уже сделали в прошлой части, они соответствуют сочетанию параметров: session_length=10, window_size=10, которые помечены в таблице выше галочкой зеленого цвета (done).",
"_____no_output_____"
],
[
"Реализуйте функцию *prepare_sparse_train_set_window*.\n\nАргументы:\n- *path_to_csv_files* – путь к каталогу с csv-файлами\n- *site_freq_path* – путь к pickle-файлу с частотным словарем, полученным в 1 части проекта\n- *session_length* – длина сессии (параметр)\n- *window_size* – ширина окна (параметр) \n\nФункция должна возвращать 2 объекта:\n- разреженную матрицу *X_sparse* (двухмерная Scipy.sparse.csr_matrix), в которой строки соответствуют сессиям из *session_length* сайтов, а *max(site_id)* столбцов – количеству посещений *site_id* в сессии. \n- вектор *y* (Numpy array) \"ответов\" в виде ID пользователей, которым принадлежат сессии из *X_sparse*\n\nДетали:\n- Модифицируйте созданную в 1 части функцию *prepare_train_set*\n- Некоторые сессии могут повторяться – оставьте как есть, не удаляйте дубликаты\n- Замеряйте время выполнения итераций цикла с помощью *time* из *time*, *tqdm* из *tqdm* или с помощью виджета [log_progress](https://github.com/alexanderkuk/log-progress) ([статья](https://habrahabr.ru/post/276725/) о нем на Хабрахабре)\n- 150 файлов из *capstone_websites_data/150users/* должны обрабатываться за несколько секунд (в зависимости от входных параметров). Если дольше – не страшно, но знайте, что функцию можно ускорить. ",
"_____no_output_____"
]
],
[
[
"from __future__ import division, print_function\n# отключим всякие предупреждения Anaconda\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom glob import glob\nimport os\nimport re\nimport pickle\nfrom collections import Counter\nfrom tqdm import tqdm_notebook as tqdm\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import csr_matrix\nfrom scipy import stats\nfrom statsmodels.stats.proportion import proportion_confint\n%matplotlib inline\nfrom matplotlib import pyplot as plt",
"_____no_output_____"
],
[
"# Поменяйте на свой путь к данным\nPATH_TO_RAW_DATA = '../data/raw'\nPATH_TO_INTERIM_DATA = '../data/interim'",
"_____no_output_____"
],
[
"def partition(lst, n, window=None, padding=None):\n if window == None:\n window = n\n if padding is not None:\n padded_lst = lst + [padding] * n\n else:\n padded_lst = lst\n return [padded_lst[idx : idx+n] for idx in range(0, len(lst), window)]\n\n\ndef make_site_ID_freqs(path_to_csv_files, verbose=False):\n #read through data and count site frequencies\n site_freqs = Counter()\n file_pattern = os.path.join(path_to_csv_files, 'user[0-9]*.csv')\n for fpath in tqdm(sorted(glob(file_pattern)), \n desc='Calculating frequencies', disable=not verbose):\n user_sites = pd.read_csv(fpath)['site'].values\n site_freqs.update(user_sites)\n #set site IDs from most to least frequent\n sorted_sites = [el[0] for el in sorted(site_freqs.items(), key=lambda x: -x[1])]\n site_ID_freqs = {site: (site_ID, site_freqs[site]) for site, site_ID in zip(sorted_sites, range(1, len(sorted_sites)+1))}\n return site_ID_freqs\n\ndef make_user_site_IDs(path_to_csv_files, site_ID_freqs, verbose=False):\n #read through data and encode sites with IDs\n user_site_IDs = {}\n user_ID_regex = re.compile('.*user([0-9]*).csv')\n file_pattern = os.path.join(path_to_csv_files, 'user[0-9]*.csv')\n for fpath in tqdm(sorted(glob(file_pattern)), \n desc='Encoding sites', disable=not verbose):\n user_ID = int(re.match(user_ID_regex, fpath).group(1))\n user_sites = pd.read_csv(fpath)['site'].values\n user_site_IDs[user_ID] = [site_ID_freqs[site][0] for site in user_sites]\n return user_site_IDs\n\ndef make_sessions_sparse(user_site_IDs, num_sites, session_length, window_size, verbose=False):\n data = []\n row_ind = []\n col_ind = []\n y = []\n row_counter = 0\n #split user site IDs into sessions and construct a sparse matrix of ID counts in a session\n for user_ID in tqdm(user_site_IDs, \n desc='Constructing sessions', disable=not verbose):\n for session in partition(user_site_IDs[user_ID], session_length, window_size, padding=0):\n for site_ID in session:\n row_ind.append(row_counter)\n col_ind.append(site_ID) #site IDs can also serve as column indices, because they range from 0 to something\n data.append(1)\n y.append(user_ID)\n row_counter += 1 #row is a session\n X_sparse = csr_matrix((data, (row_ind, col_ind)), shape=(row_counter, num_sites+1), dtype=np.int64)\n y = np.array(y)\n return X_sparse[:, 1:], y #exclude site ID 0\n\ndef prepare_sparse_train_set_window(path_to_csv_files, site_freq_path=None, session_length=10, window_size=10, verbose=False):\n if site_freq_path is None:\n site_ID_freqs = make_site_ID_freqs(path_to_csv_files, verbose=verbose)\n else:\n with open(site_freq_path, 'rb') as fin:\n site_ID_freqs = pickle.load(fin)\n user_site_IDs = make_user_site_IDs(path_to_csv_files, site_ID_freqs, verbose=verbose)\n X, y = make_sessions_sparse(user_site_IDs, len(site_ID_freqs), session_length, window_size, verbose=verbose)\n return X, y",
"_____no_output_____"
]
],
[
[
"**Примените полученную функцию с параметрами *session_length=5* и *window_size=3* к игрушечному примеру. Убедитесь, что все работает как надо.**",
"_____no_output_____"
]
],
[
[
"X_toy_s5_w3, y_s5_w3 = prepare_sparse_train_set_window(os.path.join(PATH_TO_RAW_DATA,'3users'), session_length=5, window_size=3)",
"_____no_output_____"
],
[
"X_toy_s5_w3.todense()",
"_____no_output_____"
],
[
"y_s5_w3",
"_____no_output_____"
]
],
[
[
"**Запустите созданную функцию 16 раз с помощью циклов по числу пользователей num_users (10 или 150), значениям параметра *session_length* (15, 10, 7 или 5) и значениям параметра *window_size* (10, 7 или 5). Сериализуйте все 16 разреженных матриц (обучающие выборки) и векторов (метки целевого класса – ID пользователя) в файлы `X_sparse_{num_users}users_s{session_length}_w{window_size}.pkl` и `y_{num_users}users_s{session_length}_w{window_size}.pkl`.**\n\n**Чтоб убедиться, что мы все далее будем работать с идентичными объектами, запишите в список *data_lengths* число строк во всех полученных рареженных матрицах (16 значений). Если какие-то будут совпадать, это нормально (можно сообразить, почему).**\n\n**На моем ноутбуке этот участок кода отработал за 26 секунд, хотя понятно, что все зависит от эффективности реализации функции *prepare_sparse_train_set_window* и мощности используемого железа. И честно говоря, моя первая реализация была намного менее эффективной (34 минуты), так что тут у Вас есть возможность оптимизировать свой код.**",
"_____no_output_____"
]
],
[
[
"%%time\nimport itertools\n\ndata_lengths = []\n\nfor num_users in [10, 150]:\n for window_size, session_length in tqdm(itertools.product([10, 7, 5], [15, 10, 7, 5]), \n desc='{} users'.format(num_users)):\n if window_size <= session_length and (window_size, session_length) != (10, 10):\n data_path = os.path.join(PATH_TO_RAW_DATA,'{}users'.format(num_users))\n site_freq_path = os.path.join(PATH_TO_INTERIM_DATA,'site_freq_{}users.pkl'.format(num_users))\n X_sparse, y = prepare_sparse_train_set_window(data_path, site_freq_path, session_length, window_size)\n #put down data lengths\n data_lengths.append(X_sparse.shape[0])\n #pickle X_sparse and y\n X_pkl_path = os.path.join(PATH_TO_INTERIM_DATA, 'X_sparse_{}users_s{}_w{}.pkl'.format(num_users, session_length, window_size))\n with open(X_pkl_path, 'wb') as X_pkl:\n pickle.dump(X_sparse, X_pkl, protocol=2)\n y_pkl_path = os.path.join(PATH_TO_INTERIM_DATA, 'y_{}users_s{}_w{}.pkl'.format(num_users, session_length, window_size))\n with open(y_pkl_path, 'wb') as y_pkl:\n pickle.dump(y, y_pkl, protocol=2)",
"_____no_output_____"
]
],
[
[
"**Запишите в файл *answer2_1.txt* все числа из списка *data_lengths* через пробел. Полученный файл будет ответом на 1 вопрос теста.**",
"_____no_output_____"
]
],
[
[
"def write_answer_to_file(answer, file_address):\n with open(file_address, 'w') as out_f:\n out_f.write(str(answer))",
"_____no_output_____"
],
[
"write_answer_to_file(' '.join(map(str, data_lengths)), \n 'answer2_1.txt')",
"_____no_output_____"
]
],
[
[
"## Часть 2. Первичный анализ данных, проверка гипотез",
"_____no_output_____"
],
[
"**Считаем в DataFrame подготовленный на 1 неделе файл `train_data_10users.csv`. Далее будем работать с ним.**",
"_____no_output_____"
]
],
[
[
"train_df = pd.read_csv(os.path.join(PATH_TO_INTERIM_DATA, 'train_data_10users.csv'), \n index_col='session_id')",
"_____no_output_____"
],
[
"train_df.head()",
"_____no_output_____"
],
[
"train_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 14061 entries, 0 to 14060\nData columns (total 11 columns):\nsite1 14061 non-null int64\nsite2 14061 non-null int64\nsite3 14061 non-null int64\nsite4 14061 non-null int64\nsite5 14061 non-null int64\nsite6 14061 non-null int64\nsite7 14061 non-null int64\nsite8 14061 non-null int64\nsite9 14061 non-null int64\nsite10 14061 non-null int64\nuser_id 14061 non-null int64\ndtypes: int64(11)\nmemory usage: 1.3 MB\n"
]
],
[
[
"**Распределение целевого класса:**",
"_____no_output_____"
]
],
[
[
"train_df['user_id'].value_counts()",
"_____no_output_____"
]
],
[
[
"**Посчитаем распределение числа уникальных сайтов в каждой сессии из 10 посещенных подряд сайтов.**",
"_____no_output_____"
]
],
[
[
"num_unique_sites = [np.unique(train_df.values[i, :-1]).shape[0] \n for i in range(train_df.shape[0])]",
"_____no_output_____"
],
[
"pd.Series(num_unique_sites).value_counts()",
"_____no_output_____"
],
[
"pd.Series(num_unique_sites).hist(figsize=(8, 6));",
"_____no_output_____"
]
],
[
[
"**Проверьте с помощью QQ-плота и критерия Шапиро-Уилка, что эта величина распределена нормально. Сделайте вывод. Ответом на второй вопрос в тесте будет файл со словом \"YES\" или \"NO\" в зависимости от того, распределено ли нормально число уникальных сайтов в сессии.**",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(8, 6))\nstats.probplot(pd.Series(num_unique_sites), dist='norm', plot=plt)\nplt.show()",
"_____no_output_____"
],
[
"print(\"Shapiro-Wilk normality test, W-statistic: %f, p-value: %f\" % stats.shapiro(pd.Series(num_unique_sites)))",
"Shapiro-Wilk normality test, W-statistic: 0.954693, p-value: 0.000000\n"
],
[
"write_answer_to_file('NO', \n 'answer2_2.txt')",
"_____no_output_____"
]
],
[
[
"**Проверьте гипотезу о том, что пользователь хотя бы раз зайдет на сайт, который он уже ранее посетил в сессии из 10 сайтов. Давайте проверим с помощью биномиального критерия для доли, что доля случаев, когда пользователь повторно посетил какой-то сайт (то есть число уникальных сайтов в сессии < 10) велика: больше 95% (обратите внимание, что альтернатива тому, что доля равна 95% – одностороняя). Ответом на 3 вопрос в тесте будет полученное p-value.**",
"_____no_output_____"
]
],
[
[
"has_two_similar = (np.array(num_unique_sites) < 10).astype('int')",
"_____no_output_____"
],
[
"pi_val = stats.binom_test(sum(has_two_similar), len(has_two_similar), 0.95, alternative='greater')\npi_val",
"_____no_output_____"
],
[
"write_answer_to_file(pi_val, \n 'answer2_3.txt')",
"_____no_output_____"
]
],
[
[
"**Постройте для этой доли 95% доверительный интервал Уилсона. Округлите границы интервала до 3 знаков после запятой и запишите через пробел в файл *answer2_4.txt*. Это будет ответом на 4 вопрос теста.**",
"_____no_output_____"
]
],
[
[
"wilson_interval = proportion_confint(sum(has_two_similar), len(has_two_similar), method = 'wilson')\nwilson_interval",
"_____no_output_____"
],
[
"write_answer_to_file('{} {}'.format(round(wilson_interval[0], 3),\n round(wilson_interval[1], 3)), \n 'answer2_4.txt')",
"_____no_output_____"
]
],
[
[
"**Постройте распределение частоты посещения сайтов (сколько раз тот или иной сайт попадается в выборке) для сайтов, которые были посещены как минимум 1000 раз.**",
"_____no_output_____"
]
],
[
[
"with open(os.path.join(PATH_TO_INTERIM_DATA, 'site_freq_10users.pkl'), 'rb') as fin:\n site_freqs_named = pickle.load(fin)\nsite_freqs = pd.Series(dict(site_freqs_named.values()))",
"_____no_output_____"
],
[
"site_freqs[site_freqs >= 1000]",
"_____no_output_____"
]
],
[
[
"**Постройте 95% доверительный интервал для средней частоты появления сайта в выборке (во всей, уже не только для тех сайтов, что были посещены как минимум 1000 раз) на основе bootstrap. Используйте столько же bootstrap-подвыборок, сколько сайтов оказалось в исходной выборке по 10 пользователям. Берите подвыборки из посчитанного списка частот посещений сайтов – не надо заново считать эти частоты. Учтите, что частоту появления нуля (сайт с индексом 0 появлялся там, где сессии были короче 10 сайтов) включать не надо. Округлите границы интервала до 3 знаков после запятой и запишите через пробел в файл *answer2_5.txt*. Это будет ответом на 5 вопрос теста.**",
"_____no_output_____"
]
],
[
[
"def get_bootstrap_samples(data, n_samples, random_seed=17):\n np.random.seed(random_seed)\n indices = np.random.randint(0, len(data), (n_samples, len(data)))\n samples = data[indices]\n return samples",
"_____no_output_____"
],
[
"def stat_intervals(stat, alpha):\n boundaries = np.percentile(stat, \n [100 * alpha / 2., 100 * (1 - alpha / 2.)])\n return boundaries",
"_____no_output_____"
],
[
"avg_scores = map(np.average, get_bootstrap_samples(site_freqs.values, len(site_freqs)))\nconf_interval = stat_intervals(avg_scores, 0.05)\nprint(\"95% confidence interval:\", conf_interval)",
"95% confidence interval: [22.51524527 35.76303684]\n"
],
[
"write_answer_to_file('{} {}'.format(round(conf_interval[0], 3),\n round(conf_interval[1], 3)),\n 'answer2_5.txt')",
"_____no_output_____"
]
],
[
[
"## Пути улучшения\nВ этом проекте свобода творчества на каждом шаге, а 7 неделя проекта посвящена общему описанию (`html`, `ipynb` или `pdf`) и взаимному оцениванию проектов. Что еще можно добавить по второй части проекта:\n- можно дополнительно рассматривать сессии с параметром – длиной сессии по времени. И составить выборки, скажем, для 5-, 10-, 15- и 20-минутных сессий (это как раз пригодится в [соревновании](https://inclass.kaggle.com/c/identify-me-if-you-can4) Kaggle Inclass)\n- можно провести больше первичного анализа и проверять прочие интересные гипотезы (а больше их появится после создания признаков на следующей неделе)\n\nНа 3 неделе мы займемся визуальным анализом данных и построением признаков.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
ecdeec2ba5b2ada60a2dd1d1efcaf22d306d6ebb | 29,710 | ipynb | Jupyter Notebook | Colabdebianrdp.ipynb | The-Burning/colabdebianrdp | bd4752b860b70be351615f01c640f885b9dd185e | [
"MIT"
] | 1 | 2022-01-23T15:13:13.000Z | 2022-01-23T15:13:13.000Z | Colabdebianrdp.ipynb | The-Burning/colabdebianrdp | bd4752b860b70be351615f01c640f885b9dd185e | [
"MIT"
] | null | null | null | Colabdebianrdp.ipynb | The-Burning/colabdebianrdp | bd4752b860b70be351615f01c640f885b9dd185e | [
"MIT"
] | null | null | null | 39.455511 | 282 | 0.470616 | [
[
[
"<a href=\"https://colab.research.google.com/github/The-Burning/colabdebianrdp/blob/main/Colabdebianrdp.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"!nvidia-smi",
"Mon Jan 10 10:01:09 2022 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 495.44 Driver Version: 460.32.03 CUDA Version: 11.2 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla K80 Off | 00000000:00:04.0 Off | 0 |\n| N/A 44C P8 28W / 149W | 0MiB / 11441MiB | 0% Default |\n| | | N/A |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n"
],
[
"!pip install git+https://github.com/The-Burning/rdp1.git\nimport remocolab\nremocolab.setupVNC()",
"Collecting git+https://github.com/The-Burning/rdp1.git\n Cloning https://github.com/The-Burning/rdp1.git to /tmp/pip-req-build-01qeo449\n Running command git clone -q https://github.com/The-Burning/rdp1.git /tmp/pip-req-build-01qeo449\nRequirement already satisfied: pyngrok in /usr/local/lib/python3.7/dist-packages (from remocolab.py==0.1) (5.1.0)\nRequirement already satisfied: PyYAML in /usr/local/lib/python3.7/dist-packages (from pyngrok->remocolab.py==0.1) (3.13)\nWarning! GPU of your assigned virtual machine is Tesla K80.\nYou might get better GPU by reseting the runtime.\nDo you want to continue? [y/n] y\n---\nCopy&paste your tunnel authtoken from https://dashboard.ngrok.com/auth\n(You need to sign up for ngrok and login,)\n··········\nSelect your ngrok region:\nus - United States (Ohio)\neu - Europe (Frankfurt)\nap - Asia/Pacific (Singapore)\nau - Australia (Sydney)\nsa - South America (Sao Paulo)\njp - Japan (Tokyo)\nin - India (Mumbai)\nau\n"
],
[
"import os\n\nusername = \"Ankit\" #@param {type:\"string\"}\npassword = \"2005\" #@param {type:\"string\"}\n\nprint(\"Creating User and Setting it up\")\n\n# Creation of user\nos.system(f\"useradd -m {username}\")\n\n# Add user to sudo group\nos.system(f\"adduser {username} sudo\")\n \n# Set password of user to 'root'\nos.system(f\"echo '{username}:{password}' | sudo chpasswd\")\n\n# Change default shell from sh to bash\nos.system(\"sed -i 's/\\/bin\\/sh/\\/bin\\/bash/g' /etc/passwd\")\n\nprint(f\"User created and configured having username `{username}` and password `{password}`\")\n\n\n",
"Creating User and Setting it up\nUser created and configured having username `Ankit` and password `2005`\n"
],
[
"#@title **RDP**\n#@markdown It takes 4-5 minutes for installation\n\nimport os\nimport subprocess\n\n#@markdown Visit http://remotedesktop.google.com/headless and copy the command after Authentication\n\nCRP = \"DISPLAY= /opt/google/chrome-remote-desktop/start-host --code=\\\"4/0AX4XfWhaYGyZLMoeqwT4Llw-ZLlxBBo0MVYxpW4dZ7D6sKbYTAiedIx4bnvTjqfCtSBC3w\\\" --redirect-url=\\\"https://remotedesktop.google.com/_/oauthredirect\\\" --name=$(hostname)\" #@param {type:\"string\"}\n\n#@markdown Enter a Pin (more or equal to 6 digits)\nPin = 123456 #@param {type: \"integer\"}\n\n#@markdown Autostart Notebook in RDP\nAutostart = True #@param {type: \"boolean\"}\nclass CRD:\n def __init__(self, user):\n os.system(\"apt update\")\n self.installCRD() \n self.installGoogleChorme()\n self.finish(user)\n print(\"\\nRDP created succesfully move to https://remotedesktop.google.com/access\")\n @staticmethod\n def installCRD():\n print(\"Installing Chrome Remote Desktop\")\n subprocess.run(['wget', 'https://dl.google.com/linux/direct/chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE)\n subprocess.run(['dpkg', '--install', 'chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE)\n subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE)\n\n \n @staticmethod\n def installGoogleChorme():\n print(\"Installing Google Chrome\")\n subprocess.run([\"wget\", \"https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb\"], stdout=subprocess.PIPE)\n subprocess.run([\"dpkg\", \"--install\", \"google-chrome-stable_current_amd64.deb\"], stdout=subprocess.PIPE)\n subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE)\n @staticmethod\n def finish(user):\n print(\"Finalizing\")\n if Autostart:\n os.makedirs(f\"/home/{user}/.config/autostart\", exist_ok=True)\n link = \"https://colab.research.google.com/drive/1_wsC8_YAQk9yYyJAJIZI0ezpBcEzqI7J#scrollTo=yGKMStJ-OaJ1\"\n colab_autostart = \"\"\"[Desktop Entry]\nType=Application\nName=Colab\nExec=sh -c \"sensible-browser {}\"\nIcon=\nComment=Open a predefined notebook at session signin.\nX-GNOME-Autostart-enabled=true\"\"\".format(link)\n with open(f\"/home/{user}/.config/autostart/colab.desktop\", \"w\") as f:\n f.write(colab_autostart)\n os.system(f\"chmod +x /home/{user}/.config/autostart/colab.desktop\")\n os.system(f\"chown {user}:{user} /home/{user}/.config\")\n\n os.system(f\"adduser {user} chrome-remote-desktop\")\n command = f\"{CRP} --pin={Pin}\"\n os.system(f\"su - {user} -c '{command}'\")\n os.system(\"service chrome-remote-desktop start\")\n \n\n print(\"Finished Succesfully\")\n\n\ntry:\n if CRP == \"\":\n print(\"Please enter authcode from the given link\")\n elif len(str(Pin)) < 6:\n print(\"Enter a pin more or equal to 6 digits\")\n else:\n CRD(username)\nexcept NameError as e:\n print(\"'username' variable not found, Create a user first\")",
"'username' variable not found, Create a user first\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
ecdef070deae52a8b7fe220fe2dc215252b476a7 | 42,626 | ipynb | Jupyter Notebook | intro-to-pytorch/Part 4 - Fashion-MNIST (Exercises).ipynb | Kshitij09/deep-learning-v2-pytorch | b214e63b7b560122bc5fd5b26bff6946b5078ba6 | [
"MIT"
] | null | null | null | intro-to-pytorch/Part 4 - Fashion-MNIST (Exercises).ipynb | Kshitij09/deep-learning-v2-pytorch | b214e63b7b560122bc5fd5b26bff6946b5078ba6 | [
"MIT"
] | null | null | null | intro-to-pytorch/Part 4 - Fashion-MNIST (Exercises).ipynb | Kshitij09/deep-learning-v2-pytorch | b214e63b7b560122bc5fd5b26bff6946b5078ba6 | [
"MIT"
] | null | null | null | 136.621795 | 26,708 | 0.855487 | [
[
[
"# Classifying Fashion-MNIST\n\nNow it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world.\n\n<img src='assets/fashion-mnist-sprite.png' width=500px>\n\nIn this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this.\n\nFirst off, let's load the dataset through torchvision.",
"_____no_output_____"
]
],
[
[
"import torch\nfrom torchvision import datasets, transforms\nimport helper\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\ntorch.backends.cudnn.benchmark = True\n\n# Define a transform to normalize the data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))])\n# Download and load the training data\ntrainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, pin_memory=True,num_workers=4)\n\n# Download and load the test data\ntestset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False, pin_memory=True,num_workers=4)",
"_____no_output_____"
]
],
[
[
"Here we can see one of the images.",
"_____no_output_____"
]
],
[
[
"image, label = next(iter(testloader))\nhelper.imshow(image[0,:]);",
"_____no_output_____"
]
],
[
[
"## Building the network\n\nHere you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers.",
"_____no_output_____"
]
],
[
[
"from torch import nn\nfrom collections import OrderedDict\nfrom torch import optim",
"_____no_output_____"
],
[
"# TODO: Define your network architecture here\n\nmodel = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(784,128,bias=False)),\n ('relu1',nn.ReLU(inplace=True)),\n ('bn1',nn.BatchNorm1d(128)),\n ('drop1',nn.Dropout(p=0.5)),\n ('fc2', nn.Linear(128,128,bias=False)),\n ('relu2',nn.ReLU(inplace=True)),\n ('bn2',nn.BatchNorm1d(128)),\n ('drop2',nn.Dropout(p=0.5)),\n ('fc3', nn.Linear(128,10))]))\nmodel.to(device)",
"_____no_output_____"
]
],
[
[
"# Train the network\n\nNow you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) ( something like `nn.CrossEntropyLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`).\n\nThen write the training code. Remember the training pass is a fairly straightforward process:\n\n* Make a forward pass through the network to get the logits \n* Use the logits to calculate the loss\n* Perform a backward pass through the network with `loss.backward()` to calculate the gradients\n* Take a step with the optimizer to update the weights\n\nBy adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4.",
"_____no_output_____"
]
],
[
[
"# TODO: Create the network, define the criterion and optimizer\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters())",
"_____no_output_____"
],
[
"# TODO: Train the network here\n\ndef error_rate(output,targets):\n return (output.argmax(-1) != labels).float().mean()\n\nfor ep in range(20):\n train_loss_avg = 0.\n train_err_rates = 0.\n model.train()\n for images,labels in trainloader:\n images = images.view(images.shape[0],-1)\n images,labels = images.to(device), labels.to(device)\n \n optimizer.zero_grad()\n output = model(images)\n\n loss = criterion(output,labels)\n\n loss.backward()\n optimizer.step()\n train_loss_avg += loss.item()\n train_err_rates += error_rate(output,labels).item()\n else:\n model.eval()\n val_loss_avg = 0.\n val_err_rates = 0.\n with torch.no_grad():\n for images,labels in testloader:\n images = images.view(images.shape[0],-1)\n images,labels = images.to(device), labels.to(device)\n output = model(images)\n loss = criterion(output,labels)\n val_loss_avg += loss.item()\n val_err_rates += error_rate(output,labels).item()\n else:\n train_loss = train_loss_avg / len(trainloader)\n train_error = train_err_rates / len(trainloader)\n valid_loss = val_loss_avg / len(testloader)\n valid_error = val_err_rates / len(testloader)\n \n print(f\"Epoch: {ep:<2}, train_loss:{train_loss:<4}, train_error:{train_error:<4}, valid_loss: {valid_loss:<4}, valid_error: {valid_error:<4}\")",
"Epoch: 0 , train_loss:0.6468340638858169, train_error:0.22634594882729211, valid_loss: 1.2102915293471828, valid_error: 0.2200437898089172\nEpoch: 1 , train_loss:0.5186431863859519, train_error:0.1839852078891258, valid_loss: 0.4590816305131669, valid_error: 0.1595342356687898\nEpoch: 2 , train_loss:0.4915071910441811, train_error:0.17369069829424308, valid_loss: 0.4868885570081176, valid_error: 0.16839171974522293\nEpoch: 3 , train_loss:0.47233755217749934, train_error:0.16726079424307036, valid_loss: 0.4384898957173536, valid_error: 0.14858678343949044\nEpoch: 4 , train_loss:0.4628956960653191, train_error:0.16431236673773988, valid_loss: 0.43589773840585333, valid_error: 0.14759156050955413\nEpoch: 5 , train_loss:0.45738732624155626, train_error:0.16086420575692964, valid_loss: 0.4450436461313515, valid_error: 0.14271496815286625\nEpoch: 6 , train_loss:0.44974569000923303, train_error:0.15959821428571427, valid_loss: 0.4565962995313535, valid_error: 0.14530254777070065\nEpoch: 7 , train_loss:0.44078441188215955, train_error:0.15909848081023453, valid_loss: 0.43881117566755623, valid_error: 0.14281449044585987\nEpoch: 8 , train_loss:0.44026216672364077, train_error:0.158215618336887, valid_loss: 0.4076123469194789, valid_error: 0.14281449044585987\nEpoch: 9 , train_loss:0.4386451492177398, train_error:0.15591684434968017, valid_loss: 0.4255441823013269, valid_error: 0.14271496815286625\nEpoch: 10, train_loss:0.4337014503983546, train_error:0.1545009328358209, valid_loss: 0.4448201625020641, valid_error: 0.14540207006369427\nEpoch: 11, train_loss:0.41979910239481977, train_error:0.15053638059701493, valid_loss: 0.41921816282211594, valid_error: 0.13953025477707007\nEpoch: 12, train_loss:0.4182848586265975, train_error:0.14852078891257997, valid_loss: 0.4055744365901704, valid_error: 0.13714171974522293\nEpoch: 13, train_loss:0.4190080232584654, train_error:0.14952025586353945, valid_loss: 0.42105620102897573, valid_error: 0.1347531847133758\nEpoch: 14, train_loss:0.41528181873087183, train_error:0.14738805970149255, valid_loss: 0.39887777170178235, valid_error: 0.1331608280254777\nEpoch: 15, train_loss:0.41036900806465126, train_error:0.14738805970149255, valid_loss: 0.40162009987861486, valid_error: 0.13823646496815287\nEpoch: 16, train_loss:0.40961807033718267, train_error:0.14703824626865672, valid_loss: 0.40976943645128017, valid_error: 0.1318670382165605\nEpoch: 17, train_loss:0.4039171287563564, train_error:0.1433568763326226, valid_loss: 0.4064226312811967, valid_error: 0.13206608280254778\nEpoch: 18, train_loss:0.4052637484568014, train_error:0.14538912579957355, valid_loss: 0.40715598367202055, valid_error: 0.13226512738853502\nEpoch: 19, train_loss:0.40186926784482335, train_error:0.14249067164179105, valid_loss: 0.39135485308565154, valid_error: 0.13126990445859874\n"
],
[
"import torch.nn.functional as F",
"_____no_output_____"
],
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport helper\n\n# Test out your network!\n\ndataiter = iter(testloader)\nimages, labels = dataiter.next()\nimg = images[30]\n# Convert 2D image to 1D vector\nimg = img.resize_(1, 784)\n\n# TODO: Calculate the class probabilities (softmax) for img\nmodel.to(torch.device('cpu'))\nps = F.softmax(model(img),dim=-1)\n\n# Plot the image and probabilities\nhelper.view_classify(img.resize_(1, 28, 28), ps, version='Fashion')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
ecdefb0dc4d161ef1a2e6934c7bfb877c59365c3 | 109,240 | ipynb | Jupyter Notebook | 04PCA/04PCA-in-scikit-learn.ipynb | violet-Bin/MachineLearning | 886af9fb22442cdc0d684e7a19132410ccb92572 | [
"Apache-2.0"
] | 1 | 2019-04-10T12:46:05.000Z | 2019-04-10T12:46:05.000Z | 04PCA/04PCA-in-scikit-learn.ipynb | violet-Bin/MachineLearning | 886af9fb22442cdc0d684e7a19132410ccb92572 | [
"Apache-2.0"
] | null | null | null | 04PCA/04PCA-in-scikit-learn.ipynb | violet-Bin/MachineLearning | 886af9fb22442cdc0d684e7a19132410ccb92572 | [
"Apache-2.0"
] | null | null | null | 238.515284 | 89,756 | 0.927188 | [
[
[
"### scikit-learn中的PCA",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets",
"_____no_output_____"
],
[
"digits = datasets.load_digits()\nX = digits.data\ny = digits.target",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=666)",
"_____no_output_____"
],
[
"X_train.shape",
"_____no_output_____"
],
[
"%%time\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\nknn_clf = KNeighborsClassifier()\nknn_clf.fit(X_train, y_train)",
"Wall time: 115 ms\n"
],
[
"knn_clf.score(X_test, y_test)",
"_____no_output_____"
],
[
"from sklearn.decomposition import PCA\n\npca = PCA(n_components=2)\npca.fit(X_train)\nX_train_reduction = pca.transform(X_train)\nX_test_reduction = pca.transform(X_test)",
"_____no_output_____"
],
[
"%%time \nknn_clf = KNeighborsClassifier()\nknn_clf.fit(X_train_reduction, y_train)",
"Wall time: 2 ms\n"
],
[
"knn_clf.score(X_test_reduction, y_test)",
"_____no_output_____"
]
],
[
[
"### 主成分所解释的方差",
"_____no_output_____"
]
],
[
[
"pca.explained_variance_ratio_",
"_____no_output_____"
],
[
"pca.explained_variance_",
"_____no_output_____"
],
[
"from sklearn.decomposition import PCA\n\npca = PCA(n_components=X_train.shape[1])\npca.fit(X_train)\npca.explained_variance_ratio_",
"_____no_output_____"
],
[
"plt.plot([i for i in range(X_train.shape[1])], [np.sum(pca.explained_variance_ratio_[:i+1]) for i in range(X_train.shape[1])])",
"_____no_output_____"
],
[
"pca = PCA(0.95)\npca.fit(X_train)",
"_____no_output_____"
],
[
"pca.n_components_",
"_____no_output_____"
],
[
"X_train_reduction = pca.transform(X_train)\nX_test_reduction = pca.transform(X_test)",
"_____no_output_____"
],
[
"%%time \nknn_clf = KNeighborsClassifier()\nknn_clf.fit(X_train_reduction, y_train)",
"Wall time: 4 ms\n"
],
[
"knn_clf.score(X_test_reduction, y_test)",
"_____no_output_____"
]
],
[
[
"### 使用PCA对数据进行降维可视化",
"_____no_output_____"
]
],
[
[
"pca = PCA(n_components=2)\npca.fit(X)\nX_reduction = pca.transform(X)",
"_____no_output_____"
],
[
"X_reduction.shape",
"_____no_output_____"
],
[
"for i in range(10):\n plt.scatter(X_reduction[y==i,0], X_reduction[y==i,1], alpha=0.8)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
ecdefe0b935fa6e6c69b3eb9568b8df16fd91913 | 62,302 | ipynb | Jupyter Notebook | PythonDataScienceHandbook/notebooks/02.07-Fancy-Indexing.ipynb | Mosuswalks/AI-with-Python | af1b45368b792bdefb9ac96cfd964791ea155baf | [
"Apache-2.0"
] | null | null | null | PythonDataScienceHandbook/notebooks/02.07-Fancy-Indexing.ipynb | Mosuswalks/AI-with-Python | af1b45368b792bdefb9ac96cfd964791ea155baf | [
"Apache-2.0"
] | null | null | null | PythonDataScienceHandbook/notebooks/02.07-Fancy-Indexing.ipynb | Mosuswalks/AI-with-Python | af1b45368b792bdefb9ac96cfd964791ea155baf | [
"Apache-2.0"
] | null | null | null | 70.717367 | 21,022 | 0.803249 | [
[
[
"<!--BOOK_INFORMATION-->\n<img align=\"left\" style=\"padding-right:10px;\" src=\"figures/PDSH-cover-small.png\">\n\n*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*\n\n*The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*",
"_____no_output_____"
],
[
"<!--NAVIGATION-->\n< [Comparisons, Masks, and Boolean Logic](02.06-Boolean-Arrays-and-Masks.ipynb) | [Contents](Index.ipynb) | [Sorting Arrays](02.08-Sorting.ipynb) >\n\n<a href=\"https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/02.07-Fancy-Indexing.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open and Execute in Google Colaboratory\"></a>\n",
"_____no_output_____"
],
[
"# Fancy Indexing",
"_____no_output_____"
],
[
"In the previous sections, we saw how to access and modify portions of arrays using simple indices (e.g., ``arr[0]``), slices (e.g., ``arr[:5]``), and Boolean masks (e.g., ``arr[arr > 0]``).\nIn this section, we'll look at another style of array indexing, known as *fancy indexing*.\nFancy indexing is like the simple indexing we've already seen, but we pass arrays of indices in place of single scalars.\nThis allows us to very quickly access and modify complicated subsets of an array's values.",
"_____no_output_____"
],
[
"## Exploring Fancy Indexing\n\nFancy indexing is conceptually simple: it means passing an array of indices to access multiple array elements at once.\nFor example, consider the following array:",
"_____no_output_____"
]
],
[
[
"import numpy as np\nrand = np.random.RandomState(42)\n\nx = rand.randint(100, size=10)\nprint(x)",
"[51 92 14 71 60 20 82 86 74 74]\n"
]
],
[
[
"Suppose we want to access three different elements. We could do it like this:",
"_____no_output_____"
]
],
[
[
"[x[3], x[7], x[2]]",
"_____no_output_____"
]
],
[
[
"Alternatively, we can pass a single list or array of indices to obtain the same result:",
"_____no_output_____"
]
],
[
[
"ind = [3, 7, 4]\nx[ind]",
"_____no_output_____"
]
],
[
[
"When using fancy indexing, the shape of the result reflects the shape of the *index arrays* rather than the shape of the *array being indexed*:",
"_____no_output_____"
]
],
[
[
"ind = np.array([[3, 7],\n [4, 5]])\nx[ind]",
"_____no_output_____"
]
],
[
[
"Fancy indexing also works in multiple dimensions. Consider the following array:",
"_____no_output_____"
]
],
[
[
"X = np.arange(12).reshape((3, 4))\nX",
"_____no_output_____"
]
],
[
[
"Like with standard indexing, the first index refers to the row, and the second to the column:",
"_____no_output_____"
]
],
[
[
"row = np.array([0, 1, 2])\ncol = np.array([2, 1, 3])\nX[row, col]",
"_____no_output_____"
]
],
[
[
"Notice that the first value in the result is ``X[0, 2]``, the second is ``X[1, 1]``, and the third is ``X[2, 3]``.\nThe pairing of indices in fancy indexing follows all the broadcasting rules that were mentioned in [Computation on Arrays: Broadcasting](02.05-Computation-on-arrays-broadcasting.ipynb).\nSo, for example, if we combine a column vector and a row vector within the indices, we get a two-dimensional result:",
"_____no_output_____"
]
],
[
[
"X[row[:, np.newaxis], col]",
"_____no_output_____"
]
],
[
[
"Here, each row value is matched with each column vector, exactly as we saw in broadcasting of arithmetic operations.\nFor example:",
"_____no_output_____"
]
],
[
[
"row[:, np.newaxis] * col",
"_____no_output_____"
]
],
[
[
"It is always important to remember with fancy indexing that the return value reflects the *broadcasted shape of the indices*, rather than the shape of the array being indexed.",
"_____no_output_____"
],
[
"## Combined Indexing\n\nFor even more powerful operations, fancy indexing can be combined with the other indexing schemes we've seen:",
"_____no_output_____"
]
],
[
[
"print(X)",
"[[ 0 1 2 3]\n [ 4 5 6 7]\n [ 8 9 10 11]]\n"
]
],
[
[
"We can combine fancy and simple indices:",
"_____no_output_____"
]
],
[
[
"X[2, [2, 0, 1]]",
"_____no_output_____"
]
],
[
[
"We can also combine fancy indexing with slicing:",
"_____no_output_____"
]
],
[
[
"X[1:, [2, 0, 1]]",
"_____no_output_____"
]
],
[
[
"And we can combine fancy indexing with masking:",
"_____no_output_____"
]
],
[
[
"mask = np.array([1, 0, 1, 0], dtype=bool)\nX[row[:, np.newaxis], mask]",
"_____no_output_____"
]
],
[
[
"All of these indexing options combined lead to a very flexible set of operations for accessing and modifying array values.",
"_____no_output_____"
],
[
"## Example: Selecting Random Points\n\nOne common use of fancy indexing is the selection of subsets of rows from a matrix.\nFor example, we might have an $N$ by $D$ matrix representing $N$ points in $D$ dimensions, such as the following points drawn from a two-dimensional normal distribution:",
"_____no_output_____"
]
],
[
[
"mean = [0, 0]\ncov = [[1, 2],\n [2, 5]]\nX = rand.multivariate_normal(mean, cov, 100)\nX.shape",
"_____no_output_____"
]
],
[
[
"Using the plotting tools we will discuss in [Introduction to Matplotlib](04.00-Introduction-To-Matplotlib.ipynb), we can visualize these points as a scatter-plot:",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn; seaborn.set() # for plot styling\n\nplt.scatter(X[:, 0], X[:, 1]);",
"_____no_output_____"
]
],
[
[
"Let's use fancy indexing to select 20 random points. We'll do this by first choosing 20 random indices with no repeats, and use these indices to select a portion of the original array:",
"_____no_output_____"
]
],
[
[
"indices = np.random.choice(X.shape[0], 20, replace=False)\nindices",
"_____no_output_____"
],
[
"selection = X[indices] # fancy indexing here\nselection.shape",
"_____no_output_____"
]
],
[
[
"Now to see which points were selected, let's over-plot large circles at the locations of the selected points:",
"_____no_output_____"
]
],
[
[
"plt.scatter(X[:, 0], X[:, 1], alpha=0.3)\nplt.scatter(selection[:, 0], selection[:, 1],\n facecolor='none', s=200);",
"_____no_output_____"
]
],
[
[
"This sort of strategy is often used to quickly partition datasets, as is often needed in train/test splitting for validation of statistical models (see [Hyperparameters and Model Validation](05.03-Hyperparameters-and-Model-Validation.ipynb)), and in sampling approaches to answering statistical questions.",
"_____no_output_____"
],
[
"## Modifying Values with Fancy Indexing\n\nJust as fancy indexing can be used to access parts of an array, it can also be used to modify parts of an array.\nFor example, imagine we have an array of indices and we'd like to set the corresponding items in an array to some value:",
"_____no_output_____"
]
],
[
[
"x = np.arange(10)\ni = np.array([2, 1, 8, 4])\nx[i] = 99\nprint(x)",
"[ 0 99 99 3 99 5 6 7 99 9]\n"
]
],
[
[
"We can use any assignment-type operator for this. For example:",
"_____no_output_____"
]
],
[
[
"x[i] -= 10\nprint(x)",
"[ 0 89 89 3 89 5 6 7 89 9]\n"
]
],
[
[
"Notice, though, that repeated indices with these operations can cause some potentially unexpected results. Consider the following:",
"_____no_output_____"
]
],
[
[
"x = np.zeros(10)\nx[[0, 0]] = [4, 6]\nprint(x)",
"[ 6. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n"
]
],
[
[
"Where did the 4 go? The result of this operation is to first assign ``x[0] = 4``, followed by ``x[0] = 6``.\nThe result, of course, is that ``x[0]`` contains the value 6.\n\nFair enough, but consider this operation:",
"_____no_output_____"
]
],
[
[
"i = [2, 3, 3, 4, 4, 4]\nx[i] += 1\nx",
"_____no_output_____"
]
],
[
[
"You might expect that ``x[3]`` would contain the value 2, and ``x[4]`` would contain the value 3, as this is how many times each index is repeated. Why is this not the case?\nConceptually, this is because ``x[i] += 1`` is meant as a shorthand of ``x[i] = x[i] + 1``. ``x[i] + 1`` is evaluated, and then the result is assigned to the indices in x.\nWith this in mind, it is not the augmentation that happens multiple times, but the assignment, which leads to the rather nonintuitive results.\n\nSo what if you want the other behavior where the operation is repeated? For this, you can use the ``at()`` method of ufuncs (available since NumPy 1.8), and do the following:",
"_____no_output_____"
]
],
[
[
"x = np.zeros(10)\nnp.add.at(x, i, 1)\nprint(x)",
"[ 0. 0. 1. 2. 3. 0. 0. 0. 0. 0.]\n"
]
],
[
[
"The ``at()`` method does an in-place application of the given operator at the specified indices (here, ``i``) with the specified value (here, 1).\nAnother method that is similar in spirit is the ``reduceat()`` method of ufuncs, which you can read about in the NumPy documentation.",
"_____no_output_____"
],
[
"## Example: Binning Data\n\nYou can use these ideas to efficiently bin data to create a histogram by hand.\nFor example, imagine we have 1,000 values and would like to quickly find where they fall within an array of bins.\nWe could compute it using ``ufunc.at`` like this:",
"_____no_output_____"
]
],
[
[
"np.random.seed(42)\nx = np.random.randn(100)\n\n# compute a histogram by hand\nbins = np.linspace(-5, 5, 20)\ncounts = np.zeros_like(bins)\n\n# find the appropriate bin for each x\ni = np.searchsorted(bins, x)\n\n# add 1 to each of these bins\nnp.add.at(counts, i, 1)",
"_____no_output_____"
]
],
[
[
"The counts now reflect the number of points within each bin–in other words, a histogram:",
"_____no_output_____"
]
],
[
[
"# plot the results\nplt.plot(bins, counts, linestyle='steps');",
"_____no_output_____"
]
],
[
[
"Of course, it would be silly to have to do this each time you want to plot a histogram.\nThis is why Matplotlib provides the ``plt.hist()`` routine, which does the same in a single line:\n\n```python\nplt.hist(x, bins, histtype='step');\n```\n\nThis function will create a nearly identical plot to the one seen here.\nTo compute the binning, ``matplotlib`` uses the ``np.histogram`` function, which does a very similar computation to what we did before. Let's compare the two here:",
"_____no_output_____"
]
],
[
[
"print(\"NumPy routine:\")\n%timeit counts, edges = np.histogram(x, bins)\n\nprint(\"Custom routine:\")\n%timeit np.add.at(counts, np.searchsorted(bins, x), 1)",
"NumPy routine:\n10000 loops, best of 3: 97.6 µs per loop\nCustom routine:\n10000 loops, best of 3: 19.5 µs per loop\n"
]
],
[
[
"Our own one-line algorithm is several times faster than the optimized algorithm in NumPy! How can this be?\nIf you dig into the ``np.histogram`` source code (you can do this in IPython by typing ``np.histogram??``), you'll see that it's quite a bit more involved than the simple search-and-count that we've done; this is because NumPy's algorithm is more flexible, and particularly is designed for better performance when the number of data points becomes large:",
"_____no_output_____"
]
],
[
[
"x = np.random.randn(1000000)\nprint(\"NumPy routine:\")\n%timeit counts, edges = np.histogram(x, bins)\n\nprint(\"Custom routine:\")\n%timeit np.add.at(counts, np.searchsorted(bins, x), 1)",
"NumPy routine:\n10 loops, best of 3: 68.7 ms per loop\nCustom routine:\n10 loops, best of 3: 135 ms per loop\n"
]
],
[
[
"What this comparison shows is that algorithmic efficiency is almost never a simple question. An algorithm efficient for large datasets will not always be the best choice for small datasets, and vice versa (see [Big-O Notation](02.08-Sorting.ipynb#Aside:-Big-O-Notation)).\nBut the advantage of coding this algorithm yourself is that with an understanding of these basic methods, you could use these building blocks to extend this to do some very interesting custom behaviors.\nThe key to efficiently using Python in data-intensive applications is knowing about general convenience routines like ``np.histogram`` and when they're appropriate, but also knowing how to make use of lower-level functionality when you need more pointed behavior.",
"_____no_output_____"
],
[
"<!--NAVIGATION-->\n< [Comparisons, Masks, and Boolean Logic](02.06-Boolean-Arrays-and-Masks.ipynb) | [Contents](Index.ipynb) | [Sorting Arrays](02.08-Sorting.ipynb) >\n\n<a href=\"https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/02.07-Fancy-Indexing.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open and Execute in Google Colaboratory\"></a>\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
ecdf2dc69e1f6871ade06d829b55db44fc38d3c5 | 351,246 | ipynb | Jupyter Notebook | notebooks/QuickStart.ipynb | rbeucher/LavaVu | 317a234d69ba3eb06a827a1f8658feb031fe358b | [
"CC-BY-4.0"
] | 23 | 2016-01-26T23:06:53.000Z | 2019-06-11T08:31:32.000Z | notebooks/QuickStart.ipynb | rbeucher/LavaVu | 317a234d69ba3eb06a827a1f8658feb031fe358b | [
"CC-BY-4.0"
] | 73 | 2016-03-16T03:02:35.000Z | 2019-07-18T07:29:52.000Z | notebooks/QuickStart.ipynb | rbeucher/LavaVu | 317a234d69ba3eb06a827a1f8658feb031fe358b | [
"CC-BY-4.0"
] | 6 | 2016-03-25T23:22:49.000Z | 2018-01-16T14:38:09.000Z | 119.920109 | 50,939 | 0.670778 | [
[
[
"\n## LavaVu: python API quickstart guide\nThe LavaVu python interface provides a python style, object oriented way of working with 4D visualisations. All the objects you work with have properties that control how the visualisation is rendered which can be added, read or modified like a python dictionary. \n\n### The Viewer Object\n\nThe first object you need to create is the Viewer() which defines a viewing frame and an OpenGL rendering context. Using this we can then create drawing objects that can then be used to load rendering data.\n\nProperties set on the Viewer control the global rendering state as well as the highest level scope for setting object properties. They are listed in the docs here by category:\nhttps://lavavu.github.io/Documentation/Property-Reference\n\nOther actions that change the visualisation are possible via functions on the viewer object or the drawing objects.\nThese include the set of scripting commands understood by LavaVu which are listed here\nhttps://lavavu.github.io/Documentation/Scripting-Commands-Reference\n\nThese can be called as methods on the Viewer object, eg: lv.rotate('x', 90)",
"_____no_output_____"
],
[
"#### Create a viewer\n\nAdditionally, let's set the background and resolution by passing them to the constructor",
"_____no_output_____"
]
],
[
[
"import lavavu\nlv = lavavu.Viewer(background=\"darkgrey\", resolution=(400,300))",
"_____no_output_____"
]
],
[
[
"Images and video can be created from the viewer as well as opening interactive views.\nIf running in an IPython notebook, images and video can be displayed inline.",
"_____no_output_____"
]
],
[
[
"#Save an image inline\nlv.display()\n\n#Save an image to disk, optionally at a different resolution\nlv.image(\"output.png\", resolution=(500,500))",
"_____no_output_____"
]
],
[
[
"Global properties can be set on the viewer as a dictionary, eg: lets update the background and display the image again:",
"_____no_output_____"
]
],
[
[
"lv[\"background\"] = \"white\"\nlv.display()",
"_____no_output_____"
]
],
[
[
"#### Creating objects\n\nObjects associate a set of rendering properties with a collection of rendering data (vertices etc)\n\nAvailable object creation functions are:\n **points, lines, triangles, quads, shapes, vectors, tracers, volume, labels**\n",
"_____no_output_____"
]
],
[
[
"pts = lv.points(\"myparticles\") #, pointsize=10)",
"_____no_output_____"
],
[
"surf = lv.triangles(\"mymesh\")\n\nsurf[\"opacity\"] = 1.0",
"_____no_output_____"
]
],
[
[
"#### Load some data\nThis can be in a python list or for numerical data - a numpy array or anything that can be trivially converted to a numpy array",
"_____no_output_____"
]
],
[
[
"pts.vertices([[0,0,1], [1,1,1], [0,1,1], [1,0,1]])\nsurf.vertices([[0,0.25,0], [1,0,1], [1,0.75,0], [0,0.25,1], [0.5,0.5,0.5]])\nsurf.indices([[0,2,4], [4,2,1], [0,4,3], [3,4,1]])",
"_____no_output_____"
],
[
"lv.reload() #Because we have new data since last display\nlv.display()",
"_____no_output_____"
]
],
[
[
"Functions for data loading expect either scalar (1d) or vector (3d) data.\n\nMost functions expect floating point values, all of which will be converted to 32 bit single precision.\n\n3d vector data is interpreted as a list of coord triples [[x0,y0,z0], [x1, y1, z1]...] \nIf passed split coord arrays arrange in a list/array [[x0,x1...], [y0, y1...], [z0, z1...]] this will be detected automatically if the first dimension of the passed array is 3 and the last dimension is != 3, the array will be reshaped before loading.\n\nAvailable data loading functions are:\n\n- **vertices**: 3d vertex data, float. The vertices to plot.\n- **normals**: 3d normal data, float. normals for meshes, if not provided will be calculated automatically\n- **vectors**: 3d vector data, float. Vector field data.\n- **indices**: integer index data referencing the indices of vertices to use for plotting, when provided will lookup vertices to plot from this array instead of just plotting them in their memory order\n- **colours**: either 32 bit integer data defining an array of ARGB colours or a list of strings that will be parsed as colours. Colour strings can be either names from https://en.wikipedia.org/wiki/X11_color_names (with spaces removed) or HTML style colour values in either hex \"#RRGGBB\" or decimal \"rgb(R,G,B)\" or \"rgba(R,G,B,A)\"\n- **labels**: string data to label vertices\n- **values**: scalar data, this function has an additional label argument to name the data as multiple scalar fields can be loaded under different labels.\n- **texcoords**: 2d texture coord data, float. Coordinates to map texture to a mesh.\n- **rgb**: colour data as 8 bit integer array [r0,g0,b0, r1,g1.b1,....]\n- **luminance**: luminance values as 8 bit integer array \n\n#### Loading additional data arrays\nHere we'll colour our two objects in two different ways\n",
"_____no_output_____"
]
],
[
[
"pts.colours([\"red\", \"blue\"])\n\nsurf.values([0.25, 0, 0.75, 0.25, 0.5], \"height\")\ncm = surf.colourmap(\"diverge\")",
"_____no_output_____"
]
],
[
[
"In the first instance we load a literal colour value for each vertex. As there are not enough values for each vertex, they are spread over the range evenly, so each colour is applied to two vertices.\n\nIn the second, we provide an additional data field containing the height of each vertex (y coord) and then apply a colour map.\n\nThe label *height* we apply to the field can be used to switch between which field to use to colour the data, this is defined by the \"colourby\" property and defaults to the first scalar value data set loaded.\nFor example, to load and switch to colouring by a new field named *temperature*\n\n surf.values(\"temperatue\", [20, 16, 27, 18])\n surf[\"colourby\"] = \"temperature\"\n \nThe colourmap() function will apply a colourmap to an object to map the values of the field to colours, the data passed can be:\n\nA name of a predefined colourmap, see: https://lavavu.github.io/Documentation/Tutorials/ColourMaps.ipynb\n\n surf.colourmap(\"diverge\")\n \nA list of colour strings, these will be evenly spaced over the map:\n\n surf.colourmap(\"red green blue\")\n \nA list of colour positions and value strings:\n\n surf.colourmap([(0, \"black\"), (0.25, \"yellow\"), (1, \"red\")])\n \nColours can also include an alpha value for transparency, either by using the HTML rgba(R,G,B,A\\[0,1\\]) format or appending a colon and the alpha value to a hex or X11 colour name value.\n\n surf.colourmap(\"rgba(255,0,0,0.5) #0000ff:0.5\")\n\nThe return value of colourmap() is a ColourMap object which can be used to later modify the colourmap:",
"_____no_output_____"
]
],
[
[
"#Set the opacity of the last colour to 0.0 (transparent)\ncm.colours[-1][3] = 0.0",
"_____no_output_____"
]
],
[
[
"A colour bar can be plotted with the colourbar() function:",
"_____no_output_____"
]
],
[
[
"cb = surf.colourbar()",
"_____no_output_____"
],
[
"lv.reload() #Because we have new data since last display\nlv.display()",
"_____no_output_____"
]
],
[
[
"### More about properties\nAll the available properties are listed here:\nhttps://lavavu.github.io/Documentation/Property-Reference\n\nProperties can be passed as arguments to the function used to create the object or set using the python dictionary syntax afterwards.\n\nProperty values \"cascade\", meaning a property that is only defined on a specific type of object, can still be set at a broader scope (such as globally or on a timestep), where it will define a default value for all objects within that scope. If not set at any level the properties fall back to their default values.\nAvailable scopes are: global, timestep, view, object, colourmap",
"_____no_output_____"
],
[
"### More about geometry types\n - **points** : each vertex is a point\n - **lines**: each pair of vertices is a line, or if \"link\" property is true, each element block of vertices is a single joined line. Element blocks can be defined automatically by setting the \"dims\" property to a value [N] which will cause a new block to start once N vertices are loaded into the active one. Blocks of different sizes can be defined by calling the append() function on an object to explicitly start a new block. \n - **triangles**: each vertex triple is a triangle, or if indices provided each index triple referencing the vertex array is a triangle. If \"dims\"=[width, height] is specified, instead plotted as a regular grid mesh where vertices are the grid points.\n - **quads**: Plots a regular grid as for triangles but using quads, requires \"dims\"=[width, height] to be set\n - **vectors**: Plots vector arrows at each vertex, requires a vector field to be loaded.\n - **tracers**: Plots tracer lines or arrows over time-varying point vertices.\n - **shapes**: Plots ellipsoids or cuboids at each vertex. Data fields or properties can be used to define their dimensions.\n - **volume**: Volume render a cube of data or a set of slices.\n - **labels**: as points, but the points are hidden to allow label data to be displayed without other geometry being plotted.\n\n> NOTE: The function used to create the object only specifies how the\n> subsequent data loaded will be rendered, the objects themselves are\n> all the same. This can be changed after object creation with the\n> \"geometry\" property, so to load a triangle mesh into our 'pts' object,\n> we can set pts[\"geometry\"] = \"triangles\" before loading the mesh data. Both the points and the mesh will be plotted with the properties of the pts object.\n",
"_____no_output_____"
],
[
"### Time Varying data\nWhen loading data, there is initially no time dimension defined. All geometry loaded at this point is considered fixed in time and will always be rendered.\n\nTo define a time step, call the addstep() function, properties can also be passed to this function which will apply to this timestep only:\n\n lv.addstep(title=\"1/1/2018\")\n\nAll data subsequently loaded will be considered temporal and displayed only when its timestep is set to the active one:",
"_____no_output_____"
]
],
[
[
"falling = lv.points(\"falling\", pointsize=20, pointtype=\"sphere\", colour=\"green\")\n\nfor time in range(0,20):\n lv.addstep(time, title='step %d' % time)\n height = 1.0 - 0.05*time\n falling.vertices([0.5, height, 0.5])",
"_____no_output_____"
]
],
[
[
"Timestep information can also be retrieved and set as follows:",
"_____no_output_____"
]
],
[
[
"print(lv.steps)\nlv.step = 5 #Attempt to set timestep 5 (if not found, nearest match will be selected)\nprint(\"Current timestep: \", lv[\"timestep\"])\nlv.display()",
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]\nCurrent timestep: 5\n"
]
],
[
[
"#### Interaction\nAn inline interactive mode is provided for IPython notebooks where the interactive window is displayed inline within a cell. This operates in the background and python commands can continue to be issued.\n\nControl widgets can also be created to interactively manipulate rendering properties from IPython, such as active timestep\n\nThe interactive window allows mouse control to adjust the viewpoint",
"_____no_output_____"
]
],
[
[
"lv.control.TimeStepper()\nlv.window()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
ecdf4d8e7245fdfedaa1537f00f85809b89872fa | 14,706 | ipynb | Jupyter Notebook | py/solution/lab_01_numpy.ipynb | tek5030/lab_01 | bc330de5f319fa9795eec04ada02e9885c79624f | [
"BSD-3-Clause"
] | null | null | null | py/solution/lab_01_numpy.ipynb | tek5030/lab_01 | bc330de5f319fa9795eec04ada02e9885c79624f | [
"BSD-3-Clause"
] | null | null | null | py/solution/lab_01_numpy.ipynb | tek5030/lab_01 | bc330de5f319fa9795eec04ada02e9885c79624f | [
"BSD-3-Clause"
] | 1 | 2020-12-19T20:19:54.000Z | 2020-12-19T20:19:54.000Z | 27.799622 | 247 | 0.494356 | [
[
[
"# Lab 1: Transformations with NumPy\nYou are now in a Jupyter notebook! It consists of written text, mixed with executable Python code.\n\nYou are supposed to read from top to bottom, and fill in code in the Python-blocks as you go.\n\n## 1. Getting started\nFamiliarize yourself with the [NumPy quickstart](https://numpy.org/doc/stable/user/quickstart.html). You will need it to solve the following tasks.\nUse the text, examples and links in the quickstart guide.\n\n## 2. Get familiar with NumPy\nFirst, import NumPy:\n\nPress `Shift + Enter` to execute and advance to the next block. Repeat for each block. You can click on a block to edit and run the code again.\nPress `Ctrl + Enter` to execute the current block without advancing.",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
]
],
[
[
"## a) Create vectors and matrices\n#### Create the vector `t`\n\n$\\mathbf{t} =\n\\begin{bmatrix}\n1 \\\\\n0 \\\\\n3\n\\end{bmatrix}\n$\n\nHint: [https://numpy.org/doc/stable/user/quickstart.html#array-creation](https://numpy.org/doc/stable/user/quickstart.html#array-creation)",
"_____no_output_____"
]
],
[
[
"# TODO: Create the vector `t`\nt = np.array([1, 0, 3])\nprint(f\"t = {t}\\nshape: {t.shape}\\ntype: {type(t)}\")",
"_____no_output_____"
]
],
[
[
"It's important to note that the sequence of numbers in a one-dimensional NumPy \"vector\", like `np.array([1, 2, 3])`, is just numbers in a vector space and therefore neither a row nor a column vector. We can see that its shape is `(3,)`.\n\nIn order to use it with linear algebra, we have to explicitly go for a 2D representation.\n\n#### Try to create `t` as a `N x 1` _column_ vector.\n",
"_____no_output_____"
]
],
[
[
"# TODO: Create 't' as a 'N x 1' column vector\n# Be explicit when you create\nrow = np.array([[1, 2, 3]]) # 1 row with 3 elements. Note the double brackets.\ncol = np.array([[1],[2],[3]]) # 3 rows with 1 element each\ncol2 = np.array([[1, 2, 3]]).T # Transpose a row vector\nprint(\"Explicit\")\nprint(f\"row = {row}, shape: {row.shape}\")\nprint(f\"col = \\n{col}, shape: {col.shape}\")\nprint(f\"\\nTranspose:\\ncol2 = \\n{col}, shape: {col2.shape}\")\n\n# Use shortcuts when you create\nrow = np.r_['r', [1,2,3]] # shape: (1, 3)\ncol = np.r_['c', [1,2,3]] # shape: (3, 1)\nprint(\"\\nShortcuts\")\nprint(f\"row = {row}, shape: {row.shape}\")\nprint(f\"col = \\n{col}, shape: {col.shape}\")\n\n# Reshape existing data\ncol = np.array([1,2,3]).reshape(-1, 1) # Reshape to change the 'view' of the data\nrow = np.array([1,2,3]).reshape(1, -1) # Reshape to change the 'view' of the data\nprint(\"\\nReshape\")\nprint(f\"row = {row}, shape: {row.shape}\")\nprint(f\"col = \\n{col}, shape: {col.shape}\")\n\n# Increase the dimension of existing data\narr = np.array([1,2,3])\nrow = arr[np.newaxis, :]\ncol = arr[:,np.newaxis]\nprint(\"\\nIncrease dimension\")\nprint(f\"row = {row}, shape: {row.shape}\")\nprint(f\"col = \\n{col}, shape: {col.shape}\")\n\n# We'll go for the one with the least typing\nt = np.array([[1, 0, 3]]).T\nprint(f\"\\nt = \\n{t}\\nshape: {t.shape}\")",
"_____no_output_____"
]
],
[
[
"#### Create the matrix `A`\n\n$\n\\mathbf{A} =\n\\begin{bmatrix}\n1 & 0 & 3\\\\\n4 & 5 & 6 \\\\\n7 & 8 & 9\n\\end{bmatrix}\n$",
"_____no_output_____"
]
],
[
[
"# TODO: Create the matrix A\nA = np.array([[1, 0, 3],\n [4, 5, 6],\n [7, 8, 9]])\nprint(f\"A:\\n{A}\")",
"_____no_output_____"
]
],
[
[
"#### Create the identity matrix I.\n\n$\n\\mathbf{I}=\n\\begin{bmatrix}\n1 & 0 & 0\\\\\n0 & 1 & 0 \\\\\n0 & 0 & 1\n\\end{bmatrix}\n$\n\nHint: [https://numpy.org/doc/stable/user/quickstart.html#functions-and-methods-overview](https://numpy.org/doc/stable/user/quickstart.html#functions-and-methods-overview)",
"_____no_output_____"
]
],
[
[
"# TODO: Create the matrix I\nI = np.eye(3)\nprint(f\"I:\\n{I}\")",
"_____no_output_____"
]
],
[
[
"#### Create the matrix T.\n\n$\n\\mathbf{T} =\n\\begin{bmatrix}\n\\mathbf{A} & \\mathbf{t} \\\\\n\\mathbf{0} & 1\n\\end{bmatrix}\n$\n\nHint: [https://numpy.org/doc/stable/user/quickstart.html#stacking-together-different-arrays](https://numpy.org/doc/stable/user/quickstart.html#stacking-together-different-arrays)\nHint: [https://numpy.org/doc/stable/reference/generated/numpy.block.html](https://numpy.org/doc/stable/reference/generated/numpy.block.html)",
"_____no_output_____"
]
],
[
[
"# TODO: Create the matrix T\nT = np.block([ # Using 'block'\n [A, t],\n [0, 0, 0, 1]\n])\nT2 = np.row_stack((np.c_[A, t], [0, 0, 0, 1])) # Nested stacking\n\nprint(f\"T = \\n{T}\\nnp.array_equal(T, T2): {np.array_equal(T, T2)}\")\n\n# Stacking: np.c_[A, t] does NOT require 't' to be a column (have two dimensions)\n# Block: np.block([A, t]) does.",
"_____no_output_____"
]
],
[
[
"#### Create the matrix B.\n\n$\n\\mathbf{B} = \\mathbf{A}^T\n$\n\nHint: [https://numpy.org/doc/stable/user/quickstart.html#changing-the-shape-of-an-array](https://numpy.org/doc/stable/user/quickstart.html#changing-the-shape-of-an-array)",
"_____no_output_____"
]
],
[
[
"# TODO: Create the matrix B\nB = A.T\nprint(f\"A = \\n{A}\\nB = \\n{B}\")",
"_____no_output_____"
]
],
[
[
"## b) Coefficients\nSet $t_2 = 2$ and $A_{12} = 2$, so that\n\n $\\mathbf{t} =\n \\begin{bmatrix}\n 1 \\\\\n 2 \\\\\n 3\n \\end{bmatrix},\n \\:\n \\mathbf{A} =\n \\begin{bmatrix}\n 1 & 2 & 3\\\\\n 4 & 5 & 6 \\\\\n 7 & 8 & 9\n \\end{bmatrix}$\n\nPerform the corresponding corrections to **T**, so that we still have\n\n $\\mathbf{T} =\n \\begin{bmatrix}\n \\mathbf{A} & \\mathbf{t} \\\\\n \\mathbf{0} & 1\n \\end{bmatrix}$\n\nBut hey, what happened to **B**?",
"_____no_output_____"
]
],
[
[
"# TODO: Solve b)\n# Note python is zero indexed, so t_2 is t[1]\nt[1] = 2\n\n# In the matrix the indexes are A[row, col]\nA[0, 1] = 2\n\n# Update corresponding block in T.\nT[:3, :3] = A\n\nprint(f\"t:\\n{t}\\nA:\\n{A},\\nT:\\n{T},\\nB:\\n{B}\")",
"_____no_output_____"
]
],
[
[
"## c) Block operations\nExtract the row vector\n\n$\\mathbf{r}_2 = \\begin{bmatrix}A_{21} & A_{22} & A_{23}\\end{bmatrix}$\n\nand the column vector\n\n$\\mathbf{c}_2 = \\begin{bmatrix}A_{12} \\\\ A_{22} \\\\ A_{32} \\end{bmatrix}$\n\nfrom **A**.\n\nExtract the submatrix\n\n$\\mathbf{T}_{3 \\times 4} = \\begin{bmatrix}\\mathbf{A} & \\mathbf{t}\\end{bmatrix}$\n\nfrom **T**.",
"_____no_output_____"
]
],
[
[
"# TODO: r_2\nr_2 = A[np.newaxis, 1, :]\nprint(f\"r_2 = \\n{r_2}\\n\")\n\n# TODO: c_2\nc_2 = A[:, 1, np.newaxis]\nprint(f\"c_2 = \\n{c_2}\\n\")\n\n# TODO: T_3x4\nT_3x4 = T[0:3]\nprint(f\"T_3x4 = \\n{T_3x4}\")",
"_____no_output_____"
]
],
[
[
"Set the corresponding blocks in **A** and **T** to all `0` (so that the second row and column in **A** are all 0, and the upper 3x4 matrix in **T** is all 0).\n\nHint: [https://numpy.org/doc/stable/user/quickstart.html#copies-and-views](https://numpy.org/doc/stable/user/quickstart.html#copies-and-views)",
"_____no_output_____"
]
],
[
[
"# TODO: set 0\nr_2[:] = 0\nc_2[:] = 0\nT_3x4[:] = 0\nprint(f\"A = \\n{A}\")\nprint(f\"T = \\n{T}\")",
"_____no_output_____"
]
],
[
[
"## d) Matrix and vector arithmetic\n- Add two vectors\n- Add two matrices\n- Multiply two matrices\n- Take the dot product between two vectors\n- Take the coefficient-wise multiplication between two matrices\n\nHint: [https://numpy.org/doc/stable/user/quickstart.html#basic-operations](https://numpy.org/doc/stable/user/quickstart.html#basic-operations)",
"_____no_output_____"
]
],
[
[
"# TODO: Solve d)\nv1 = np.array([1, 2, 3])\nv2 = np.array([3, 2, 1])\nM = np.arange(10, 19).reshape(3, 3)\nM[2, 2] = 5\nN = np.eye(3)\n\nprint(f\"v1: {v1}\\nv2: {v2}\\n\")\nprint(f\"M:\\n{M}\\nN:\\n{N}\\n\")\n\n# TODO: Compute the sum of two vectors.\nprint(f\"v1 + v2 = {v1 + v2}\\n\")\n\n# TODO: Compute the sum of two matrices.\nprint(f\"M + N = \\n{M + N}\\n\")\n\n# TODO: Compute the matrix multiplication of two matrices.\nprint(f\"N matrix multiply M =\\n{N @ M}\\n\")\n\n# TODO: Compute the dot product between two vectors.\nprint(f\"v1 dot v2 = {v1.dot(v2)}\\n\")\n\n# TODO: Compute the element-wise multiplication between two matrices.\nprint(f\"M Element-wise product N =\\n{M * N}\")\n",
"_____no_output_____"
]
],
[
[
"## e) Reductions\n- Take the sum of all elements in a matrix\n- Compute the minimum value in a matrix\n - Also, find its position in the matrix\n- Create a vector that is the maximum of each column in a matrix\n- Find the L2-norm of a vector\n- Find the number of elements in a vector that is greater than a given value.",
"_____no_output_____"
]
],
[
[
"# Replace 'None' with valid expressions\n\n# TODO: Take the sum of all elements in a matrix\n# https://numpy.org/doc/stable/user/quickstart.html#basic-operations\nprint(f\"sum of A: {A.sum()}\")\n\n# TODO: Compute the minimum value in a matrix\n# TODO: Also, find its position in the matrix.\nprint(f\"minimum of A: {A.min()} \"\n f\"at index {A.argmin()}, \"\n f\"(or {np.unravel_index(A.argmin(), A.shape)} in 2D)\")\n\n# TODO: Create a vector that is the maximum of each column in a matrix.\nprint(f\"maximum of each column in A: {A.max(0)}\")\n\n# TODO: Find the L2-norm of a vector.\nv1 = np.array([1, 1])\nprint(f\"L2 norm of {v1} is {np.linalg.norm(v1)}\")\n\n# TODO: Find the number of elements in a vector that is greater than a given value.\n# https://numpy.org/doc/stable/user/quickstart.html#changing-the-shape-of-an-array\nv2 = np.array([0, 10, 2, 13, 4, 15, 6, 17, 8, 19])\nprint(f\"v2: {v2},\\n\"\n f\" {(v2 > 10).sum()} elements are greater than 10\")",
"_____no_output_____"
]
],
[
[
"Now that you have gotten to know NumPy, let's use it to transform images in the [next part of the lab](lab_01_transformations.ipynb)!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.