hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
sequence
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
sequence
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
sequence
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
sequence
cell_types
sequence
cell_type_groups
sequence
d069345fbfffe905f32fd79061e8ba4e37b65c3f
391,733
ipynb
Jupyter Notebook
Project Portfolio/cnn-cifar10-tf2-v12_Notebook_CarlosCabano.ipynb
CarlosCabano/carloscabano.github.io
32c0903f9d0ef99a7905ce51c31cf4bf8b7857b2
[ "MIT" ]
1
2021-01-20T08:57:09.000Z
2021-01-20T08:57:09.000Z
Project Portfolio/cnn-cifar10-tf2-v12_Notebook_CarlosCabano.ipynb
CarlosCabano/carloscabano.github.io
32c0903f9d0ef99a7905ce51c31cf4bf8b7857b2
[ "MIT" ]
null
null
null
Project Portfolio/cnn-cifar10-tf2-v12_Notebook_CarlosCabano.ipynb
CarlosCabano/carloscabano.github.io
32c0903f9d0ef99a7905ce51c31cf4bf8b7857b2
[ "MIT" ]
null
null
null
391,733
391,733
0.927098
[ [ [ "# PROYECTO CIFAR-10", "_____no_output_____" ], [ "## CARLOS CABAÑÓ", "_____no_output_____" ], [ "## 1. Librerias", "_____no_output_____" ], [ "Descargamos la librería para los arrays en preprocesamiento de Keras\r\n", "_____no_output_____" ] ], [ [ "from tensorflow import keras as ks\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport time\nimport datetime\nimport random\n\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator", "_____no_output_____" ] ], [ [ "## 2. Arquitectura de red del modelo\n", "_____no_output_____" ], [ "Adoptamos la arquitectura del modelo 11 con los ajustes en Batch Normalization, Kernel Regularizer y Kernel Initializer. Añadimos Batch normalization a las capas de convolución.", "_____no_output_____" ] ], [ [ "model = ks.Sequential()\n\nmodel.add(ks.layers.Conv2D(64, (3, 3), strides=1, activation='relu', kernel_regularizer=l2(0.0005), kernel_initializer=\"he_uniform\", padding='same', input_shape=(32,32,3)))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.Conv2D(64, (3, 3), strides=1, activation='relu', kernel_regularizer=l2(0.0005), kernel_initializer=\"he_uniform\", padding='same'))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.MaxPooling2D((2, 2)))\nmodel.add(ks.layers.Dropout(0.2))\n\nmodel.add(ks.layers.Conv2D(128, (3, 3), strides=1, activation='relu', kernel_regularizer=l2(0.0005), kernel_initializer=\"he_uniform\", padding='same'))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.Conv2D(128, (3, 3), strides=1, activation='relu', kernel_regularizer=l2(0.0005), kernel_initializer=\"he_uniform\", padding='same'))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.MaxPooling2D(pool_size=(2, 2)))\nmodel.add(ks.layers.Dropout(0.2))\n\nmodel.add(ks.layers.Conv2D(256, (3, 3), strides=1, activation='relu', kernel_regularizer=l2(0.0005), kernel_initializer=\"he_uniform\", padding='same'))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.Conv2D(256, (3, 3), strides=1, activation='relu', kernel_regularizer=l2(0.0005), kernel_initializer=\"he_uniform\", padding='same'))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.Conv2D(256, (3, 3), strides=1, activation='relu', kernel_regularizer=l2(0.0005), kernel_initializer=\"he_uniform\", padding='same'))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.MaxPooling2D(pool_size=(2, 2)))\nmodel.add(ks.layers.Dropout(0.2))\n\nmodel.add(ks.layers.Conv2D(512, (3, 3), strides=1, activation='relu', kernel_regularizer=l2(0.0005), kernel_initializer=\"he_uniform\", padding='same'))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.Conv2D(512, (3, 3), strides=1, activation='relu', kernel_regularizer=l2(0.0005), kernel_initializer=\"he_uniform\", padding='same'))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.Conv2D(512, (3, 3), strides=1, activation='relu', kernel_regularizer=l2(0.0005), kernel_initializer=\"he_uniform\", padding='same'))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.Conv2D(512, (3, 3), strides=1, activation='relu', kernel_regularizer=l2(0.0005), kernel_initializer=\"he_uniform\", padding='same'))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.MaxPooling2D(pool_size=(2, 2)))\nmodel.add(ks.layers.Conv2D(512, (3, 3), strides=1, activation='relu', kernel_regularizer=l2(0.0005), kernel_initializer=\"he_uniform\", padding='same'))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.Conv2D(512, (3, 3), strides=1, activation='relu', kernel_regularizer=l2(0.0005), kernel_initializer=\"he_uniform\", padding='same'))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.Dropout(0.3))\n\nmodel.add(ks.layers.Flatten())\nmodel.add(ks.layers.Dense(512, activation='relu', kernel_regularizer=l2(0.001), kernel_initializer=\"he_uniform\"))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.Dropout(0.4))\nmodel.add(ks.layers.Dense(512, activation='relu', kernel_regularizer=l2(0.001), kernel_initializer=\"he_uniform\"))\nmodel.add(ks.layers.BatchNormalization())\nmodel.add(ks.layers.Dropout(0.5))\nmodel.add(ks.layers.Dense(10, activation='softmax'))\n\nmodel.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 32, 32, 64) 1792 \n_________________________________________________________________\nbatch_normalization (BatchNo (None, 32, 32, 64) 256 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 32, 32, 64) 36928 \n_________________________________________________________________\nbatch_normalization_1 (Batch (None, 32, 32, 64) 256 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 16, 16, 64) 0 \n_________________________________________________________________\ndropout (Dropout) (None, 16, 16, 64) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 16, 16, 128) 73856 \n_________________________________________________________________\nbatch_normalization_2 (Batch (None, 16, 16, 128) 512 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 16, 16, 128) 147584 \n_________________________________________________________________\nbatch_normalization_3 (Batch (None, 16, 16, 128) 512 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 8, 8, 128) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 8, 8, 128) 0 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 8, 8, 256) 295168 \n_________________________________________________________________\nbatch_normalization_4 (Batch (None, 8, 8, 256) 1024 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, 8, 8, 256) 590080 \n_________________________________________________________________\nbatch_normalization_5 (Batch (None, 8, 8, 256) 1024 \n_________________________________________________________________\nconv2d_6 (Conv2D) (None, 8, 8, 256) 590080 \n_________________________________________________________________\nbatch_normalization_6 (Batch (None, 8, 8, 256) 1024 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 4, 4, 256) 0 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 4, 4, 256) 0 \n_________________________________________________________________\nconv2d_7 (Conv2D) (None, 4, 4, 512) 1180160 \n_________________________________________________________________\nbatch_normalization_7 (Batch (None, 4, 4, 512) 2048 \n_________________________________________________________________\nconv2d_8 (Conv2D) (None, 4, 4, 512) 2359808 \n_________________________________________________________________\nbatch_normalization_8 (Batch (None, 4, 4, 512) 2048 \n_________________________________________________________________\nconv2d_9 (Conv2D) (None, 4, 4, 512) 2359808 \n_________________________________________________________________\nbatch_normalization_9 (Batch (None, 4, 4, 512) 2048 \n_________________________________________________________________\nconv2d_10 (Conv2D) (None, 4, 4, 512) 2359808 \n_________________________________________________________________\nbatch_normalization_10 (Batc (None, 4, 4, 512) 2048 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 2, 2, 512) 0 \n_________________________________________________________________\nconv2d_11 (Conv2D) (None, 2, 2, 512) 2359808 \n_________________________________________________________________\nbatch_normalization_11 (Batc (None, 2, 2, 512) 2048 \n_________________________________________________________________\nconv2d_12 (Conv2D) (None, 2, 2, 512) 2359808 \n_________________________________________________________________\nbatch_normalization_12 (Batc (None, 2, 2, 512) 2048 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 2, 2, 512) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 2048) 0 \n_________________________________________________________________\ndense (Dense) (None, 512) 1049088 \n_________________________________________________________________\nbatch_normalization_13 (Batc (None, 512) 2048 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 512) 262656 \n_________________________________________________________________\nbatch_normalization_14 (Batc (None, 512) 2048 \n_________________________________________________________________\ndropout_5 (Dropout) (None, 512) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 10) 5130 \n=================================================================\nTotal params: 16,052,554\nTrainable params: 16,042,058\nNon-trainable params: 10,496\n_________________________________________________________________\n" ] ], [ [ "## 3. Optimizador, función error\n", "_____no_output_____" ], [ "Añadimos el learning rate al optimizador", "_____no_output_____" ] ], [ [ "from keras.optimizers import SGD\n\nmodel.compile(optimizer=SGD(lr=0.001, momentum=0.9),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "## 4. Preparamos los datos", "_____no_output_____" ] ], [ [ "cifar10 = ks.datasets.cifar10\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\nx_train, x_test = x_train / 255.0, x_test / 255.0", "Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\n170500096/170498071 [==============================] - 4s 0us/step\n" ], [ "cifar10_labels = [\n'airplane', # id 0\n'automobile',\n'bird',\n'cat',\n'deer',\n'dog',\n'frog',\n'horse',\n'ship',\n'truck',\n]\n\nprint('Number of labels: %s' % len(cifar10_labels))", "Number of labels: 10\n" ] ], [ [ "Pintemos una muestra de las imagenes del dataset CIFAR10:", "_____no_output_____" ] ], [ [ "# Pintemos una muestra de las las imagenes del dataset MNIST\n\nprint('Train: X=%s, y=%s' % (x_train.shape, y_train.shape))\nprint('Test: X=%s, y=%s' % (x_test.shape, y_test.shape))\n\nfor i in range(9):\n\n plt.subplot(330 + 1 + i)\n plt.imshow(x_train[i], cmap=plt.get_cmap('gray'))\n plt.title(cifar10_labels[y_train[i,0]])\n\nplt.subplots_adjust(hspace = 1)\nplt.show()", "Train: X=(50000, 32, 32, 3), y=(50000, 1)\nTest: X=(10000, 32, 32, 3), y=(10000, 1)\n" ] ], [ [ "Hacemos la validación al mismo tiempo que el entrenamiento:", "_____no_output_____" ] ], [ [ "x_val = x_train[-10000:]\ny_val = y_train[-10000:]\n\nx_train = x_train[:-10000]\ny_train = y_train[:-10000]\n", "_____no_output_____" ] ], [ [ "Hacemos el OHE para la clasificación", "_____no_output_____" ] ], [ [ "le = LabelEncoder()\r\nle.fit(y_train.ravel())\r\ny_train_encoded = le.transform(y_train.ravel())\r\ny_val_encoded = le.transform(y_val.ravel())\r\ny_test_encoded = le.transform(y_test.ravel())", "_____no_output_____" ] ], [ [ "## 5. Ajustes: Early Stopping ", "_____no_output_____" ], [ "Definimos un early stopping con base en el loss de validación y con el parámetro de \"patience\" a 10, para tener algo de margen. Con el Early Stopping lograremos parar el entrenamiento en el momento óptimo para evitar que siga entrenando a partir del overfitting.", "_____no_output_____" ] ], [ [ "callback_val_loss = EarlyStopping(monitor=\"val_loss\", patience=5)\r\ncallback_val_accuracy = EarlyStopping(monitor=\"val_accuracy\", patience=10)", "_____no_output_____" ] ], [ [ "## 6. Transformador de imágenes", "_____no_output_____" ], [ "### 6.1 Imágenes de entrenamiento", "_____no_output_____" ] ], [ [ "train_datagen = ImageDataGenerator(\r\n horizontal_flip=True,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n )\r\n\r\ntrain_generator = train_datagen.flow(\r\n x_train, \r\n y_train_encoded, \r\n batch_size=64\r\n)", "_____no_output_____" ] ], [ [ "### 6.2 Imágenes de validación y testeo", "_____no_output_____" ] ], [ [ "validation_datagen = ImageDataGenerator(\r\n horizontal_flip=True,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n )\r\n\r\nvalidation_generator = validation_datagen.flow(\r\n x_val, \r\n y_val_encoded, \r\n batch_size=64\r\n)\r\n\r\ntest_datagen = ImageDataGenerator(\r\n horizontal_flip=True,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n )\r\n\r\ntest_generator = test_datagen.flow(\r\n x_test, \r\n y_test_encoded,\r\n batch_size=64\r\n)", "_____no_output_____" ] ], [ [ "### 6.3 Generador de datos", "_____no_output_____" ] ], [ [ "sample = random.choice(range(0,1457))\r\nimage = x_train[sample]\r\nplt.imshow(image, cmap=plt.cm.binary)", "_____no_output_____" ], [ "sample = random.choice(range(0,1457))\r\n\r\nexample_generator = train_datagen.flow(\r\n x_train[sample:sample+1],\r\n y_train_encoded[sample:sample+1],\r\n batch_size=64\r\n )", "_____no_output_____" ], [ "plt.figure(figsize=(12, 12))\r\nfor i in range(0, 15):\r\n plt.subplot(5, 3, i+1)\r\n for X, Y in example_generator:\r\n image = X[0]\r\n plt.imshow(image)\r\n break\r\nplt.tight_layout()\r\nplt.show()", "_____no_output_____" ] ], [ [ "## 7. Entrenamiento ", "_____no_output_____" ] ], [ [ "t = time.perf_counter()", "_____no_output_____" ], [ "steps=int(x_train.shape[0]/64)\r\nhistory = model.fit(train_generator, epochs=100, use_multiprocessing=False, batch_size= 64, validation_data=validation_generator, steps_per_epoch=steps, callbacks=[callback_val_loss, callback_val_accuracy])", "Epoch 1/100\n625/625 [==============================] - 41s 50ms/step - loss: 9.1824 - accuracy: 0.1818 - val_loss: 8.0948 - val_accuracy: 0.2919\nEpoch 2/100\n625/625 [==============================] - 31s 50ms/step - loss: 8.1597 - accuracy: 0.2860 - val_loss: 7.7903 - val_accuracy: 0.3788\nEpoch 3/100\n625/625 [==============================] - 31s 50ms/step - loss: 7.8230 - accuracy: 0.3567 - val_loss: 7.5712 - val_accuracy: 0.4234\nEpoch 4/100\n625/625 [==============================] - 31s 50ms/step - loss: 7.5809 - accuracy: 0.4103 - val_loss: 7.4885 - val_accuracy: 0.4396\nEpoch 5/100\n625/625 [==============================] - 31s 50ms/step - loss: 7.3967 - accuracy: 0.4581 - val_loss: 7.2569 - val_accuracy: 0.4816\nEpoch 6/100\n625/625 [==============================] - 31s 50ms/step - loss: 7.2303 - accuracy: 0.4798 - val_loss: 7.0436 - val_accuracy: 0.5258\nEpoch 7/100\n625/625 [==============================] - 31s 50ms/step - loss: 7.0667 - accuracy: 0.5102 - val_loss: 7.0003 - val_accuracy: 0.5126\nEpoch 8/100\n625/625 [==============================] - 31s 50ms/step - loss: 6.9073 - accuracy: 0.5389 - val_loss: 6.9591 - val_accuracy: 0.5171\nEpoch 9/100\n625/625 [==============================] - 31s 50ms/step - loss: 6.7693 - accuracy: 0.5600 - val_loss: 6.6794 - val_accuracy: 0.5677\nEpoch 10/100\n625/625 [==============================] - 31s 50ms/step - loss: 6.6491 - accuracy: 0.5743 - val_loss: 6.5222 - val_accuracy: 0.5954\nEpoch 11/100\n625/625 [==============================] - 31s 50ms/step - loss: 6.5051 - accuracy: 0.5975 - val_loss: 6.4285 - val_accuracy: 0.6045\nEpoch 12/100\n625/625 [==============================] - 31s 50ms/step - loss: 6.3745 - accuracy: 0.6138 - val_loss: 6.2443 - val_accuracy: 0.6330\nEpoch 13/100\n625/625 [==============================] - 31s 50ms/step - loss: 6.2591 - accuracy: 0.6249 - val_loss: 6.0713 - val_accuracy: 0.6666\nEpoch 14/100\n625/625 [==============================] - 31s 50ms/step - loss: 6.1268 - accuracy: 0.6476 - val_loss: 6.0985 - val_accuracy: 0.6373\nEpoch 15/100\n625/625 [==============================] - 31s 50ms/step - loss: 6.0068 - accuracy: 0.6620 - val_loss: 5.9344 - val_accuracy: 0.6662\nEpoch 16/100\n625/625 [==============================] - 31s 50ms/step - loss: 5.8980 - accuracy: 0.6707 - val_loss: 5.8722 - val_accuracy: 0.6615\nEpoch 17/100\n625/625 [==============================] - 31s 50ms/step - loss: 5.7878 - accuracy: 0.6856 - val_loss: 5.7329 - val_accuracy: 0.6820\nEpoch 18/100\n625/625 [==============================] - 31s 50ms/step - loss: 5.6720 - accuracy: 0.6952 - val_loss: 5.5759 - val_accuracy: 0.7166\nEpoch 19/100\n625/625 [==============================] - 31s 50ms/step - loss: 5.5839 - accuracy: 0.7024 - val_loss: 5.5217 - val_accuracy: 0.7072\nEpoch 20/100\n625/625 [==============================] - 31s 50ms/step - loss: 5.4795 - accuracy: 0.7156 - val_loss: 5.3879 - val_accuracy: 0.7307\nEpoch 21/100\n625/625 [==============================] - 31s 50ms/step - loss: 5.3891 - accuracy: 0.7240 - val_loss: 5.3091 - val_accuracy: 0.7331\nEpoch 22/100\n625/625 [==============================] - 31s 50ms/step - loss: 5.2931 - accuracy: 0.7317 - val_loss: 5.3475 - val_accuracy: 0.7036\nEpoch 23/100\n625/625 [==============================] - 31s 50ms/step - loss: 5.2075 - accuracy: 0.7438 - val_loss: 5.1032 - val_accuracy: 0.7557\nEpoch 24/100\n625/625 [==============================] - 31s 50ms/step - loss: 5.1150 - accuracy: 0.7473 - val_loss: 5.0559 - val_accuracy: 0.7528\nEpoch 25/100\n625/625 [==============================] - 31s 50ms/step - loss: 5.0402 - accuracy: 0.7512 - val_loss: 4.9973 - val_accuracy: 0.7533\nEpoch 26/100\n625/625 [==============================] - 31s 49ms/step - loss: 4.9393 - accuracy: 0.7636 - val_loss: 4.9755 - val_accuracy: 0.7384\nEpoch 27/100\n625/625 [==============================] - 31s 50ms/step - loss: 4.8677 - accuracy: 0.7698 - val_loss: 4.8760 - val_accuracy: 0.7510\nEpoch 28/100\n625/625 [==============================] - 31s 50ms/step - loss: 4.7959 - accuracy: 0.7714 - val_loss: 4.7670 - val_accuracy: 0.7667\nEpoch 29/100\n625/625 [==============================] - 31s 50ms/step - loss: 4.7227 - accuracy: 0.7733 - val_loss: 4.6693 - val_accuracy: 0.7750\nEpoch 30/100\n625/625 [==============================] - 31s 50ms/step - loss: 4.6357 - accuracy: 0.7841 - val_loss: 4.6067 - val_accuracy: 0.7793\nEpoch 31/100\n625/625 [==============================] - 31s 49ms/step - loss: 4.5622 - accuracy: 0.7871 - val_loss: 4.5422 - val_accuracy: 0.7785\nEpoch 32/100\n625/625 [==============================] - 31s 50ms/step - loss: 4.4951 - accuracy: 0.7914 - val_loss: 4.4828 - val_accuracy: 0.7828\nEpoch 33/100\n625/625 [==============================] - 31s 50ms/step - loss: 4.4245 - accuracy: 0.7938 - val_loss: 4.4464 - val_accuracy: 0.7754\nEpoch 34/100\n625/625 [==============================] - 31s 49ms/step - loss: 4.3538 - accuracy: 0.7981 - val_loss: 4.3466 - val_accuracy: 0.7907\nEpoch 35/100\n625/625 [==============================] - 31s 50ms/step - loss: 4.2840 - accuracy: 0.8064 - val_loss: 4.3889 - val_accuracy: 0.7623\nEpoch 36/100\n625/625 [==============================] - 31s 49ms/step - loss: 4.2116 - accuracy: 0.8104 - val_loss: 4.2095 - val_accuracy: 0.7983\nEpoch 37/100\n625/625 [==============================] - 31s 50ms/step - loss: 4.1627 - accuracy: 0.8077 - val_loss: 4.1196 - val_accuracy: 0.8120\nEpoch 38/100\n625/625 [==============================] - 31s 49ms/step - loss: 4.0915 - accuracy: 0.8144 - val_loss: 4.1543 - val_accuracy: 0.7832\nEpoch 39/100\n625/625 [==============================] - 31s 50ms/step - loss: 4.0231 - accuracy: 0.8218 - val_loss: 4.0156 - val_accuracy: 0.8105\nEpoch 40/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.9657 - accuracy: 0.8242 - val_loss: 4.0132 - val_accuracy: 0.7995\nEpoch 41/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.9077 - accuracy: 0.8263 - val_loss: 3.9284 - val_accuracy: 0.8052\nEpoch 42/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.8487 - accuracy: 0.8287 - val_loss: 3.8862 - val_accuracy: 0.8048\nEpoch 43/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.7895 - accuracy: 0.8308 - val_loss: 3.8060 - val_accuracy: 0.8210\nEpoch 44/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.7454 - accuracy: 0.8320 - val_loss: 3.7656 - val_accuracy: 0.8178\nEpoch 45/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.6673 - accuracy: 0.8427 - val_loss: 3.7415 - val_accuracy: 0.8094\nEpoch 46/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.6141 - accuracy: 0.8443 - val_loss: 3.6448 - val_accuracy: 0.8286\nEpoch 47/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.5728 - accuracy: 0.8443 - val_loss: 3.6892 - val_accuracy: 0.8016\nEpoch 48/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.5107 - accuracy: 0.8450 - val_loss: 3.5734 - val_accuracy: 0.8213\nEpoch 49/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.4649 - accuracy: 0.8488 - val_loss: 3.5123 - val_accuracy: 0.8288\nEpoch 50/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.4038 - accuracy: 0.8527 - val_loss: 3.4465 - val_accuracy: 0.8330\nEpoch 51/100\n625/625 [==============================] - 31s 49ms/step - loss: 3.3639 - accuracy: 0.8561 - val_loss: 3.4088 - val_accuracy: 0.8345\nEpoch 52/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.3223 - accuracy: 0.8576 - val_loss: 3.4425 - val_accuracy: 0.8154\nEpoch 53/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.2613 - accuracy: 0.8610 - val_loss: 3.3146 - val_accuracy: 0.8361\nEpoch 54/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.2210 - accuracy: 0.8605 - val_loss: 3.2887 - val_accuracy: 0.8339\nEpoch 55/100\n625/625 [==============================] - 32s 50ms/step - loss: 3.1830 - accuracy: 0.8644 - val_loss: 3.2565 - val_accuracy: 0.8345\nEpoch 56/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.1260 - accuracy: 0.8677 - val_loss: 3.2492 - val_accuracy: 0.8246\nEpoch 57/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.0931 - accuracy: 0.8672 - val_loss: 3.1819 - val_accuracy: 0.8323\nEpoch 58/100\n625/625 [==============================] - 31s 50ms/step - loss: 3.0427 - accuracy: 0.8690 - val_loss: 3.1512 - val_accuracy: 0.8329\nEpoch 59/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.9897 - accuracy: 0.8743 - val_loss: 3.0740 - val_accuracy: 0.8430\nEpoch 60/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.9580 - accuracy: 0.8711 - val_loss: 3.0980 - val_accuracy: 0.8272\nEpoch 61/100\n625/625 [==============================] - 32s 50ms/step - loss: 2.9113 - accuracy: 0.8749 - val_loss: 3.0232 - val_accuracy: 0.8414\nEpoch 62/100\n625/625 [==============================] - 32s 51ms/step - loss: 2.8815 - accuracy: 0.8758 - val_loss: 2.9533 - val_accuracy: 0.8483\nEpoch 63/100\n625/625 [==============================] - 32s 50ms/step - loss: 2.8444 - accuracy: 0.8797 - val_loss: 2.9582 - val_accuracy: 0.8399\nEpoch 64/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.8007 - accuracy: 0.8788 - val_loss: 2.9483 - val_accuracy: 0.8313\nEpoch 65/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.7637 - accuracy: 0.8814 - val_loss: 2.8676 - val_accuracy: 0.8443\nEpoch 66/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.7140 - accuracy: 0.8873 - val_loss: 2.8358 - val_accuracy: 0.8432\nEpoch 67/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.6867 - accuracy: 0.8858 - val_loss: 2.7931 - val_accuracy: 0.8490\nEpoch 68/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.6443 - accuracy: 0.8913 - val_loss: 2.7262 - val_accuracy: 0.8601\nEpoch 69/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.6141 - accuracy: 0.8895 - val_loss: 2.7391 - val_accuracy: 0.8516\nEpoch 70/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.5702 - accuracy: 0.8951 - val_loss: 2.7065 - val_accuracy: 0.8490\nEpoch 71/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.5313 - accuracy: 0.8974 - val_loss: 2.7089 - val_accuracy: 0.8376\nEpoch 72/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.4985 - accuracy: 0.8974 - val_loss: 2.7466 - val_accuracy: 0.8255\nEpoch 73/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.4711 - accuracy: 0.8971 - val_loss: 2.6271 - val_accuracy: 0.8460\nEpoch 74/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.4442 - accuracy: 0.8979 - val_loss: 2.5957 - val_accuracy: 0.8496\nEpoch 75/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.4085 - accuracy: 0.9016 - val_loss: 2.5309 - val_accuracy: 0.8625\nEpoch 76/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.3778 - accuracy: 0.8994 - val_loss: 2.5961 - val_accuracy: 0.8406\nEpoch 77/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.3380 - accuracy: 0.9047 - val_loss: 2.5015 - val_accuracy: 0.8557\nEpoch 78/100\n625/625 [==============================] - 32s 51ms/step - loss: 2.3148 - accuracy: 0.9040 - val_loss: 2.4950 - val_accuracy: 0.8441\nEpoch 79/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.2835 - accuracy: 0.9056 - val_loss: 2.5653 - val_accuracy: 0.8273\nEpoch 80/100\n625/625 [==============================] - 32s 50ms/step - loss: 2.2577 - accuracy: 0.9038 - val_loss: 2.4306 - val_accuracy: 0.8546\nEpoch 81/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.2208 - accuracy: 0.9091 - val_loss: 2.3725 - val_accuracy: 0.8613\nEpoch 82/100\n625/625 [==============================] - 32s 51ms/step - loss: 2.1852 - accuracy: 0.9121 - val_loss: 2.3552 - val_accuracy: 0.8647\nEpoch 83/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.1593 - accuracy: 0.9124 - val_loss: 2.3515 - val_accuracy: 0.8583\nEpoch 84/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.1384 - accuracy: 0.9121 - val_loss: 2.3379 - val_accuracy: 0.8511\nEpoch 85/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.1062 - accuracy: 0.9170 - val_loss: 2.2916 - val_accuracy: 0.8584\nEpoch 86/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.0812 - accuracy: 0.9161 - val_loss: 2.3001 - val_accuracy: 0.8509\nEpoch 87/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.0497 - accuracy: 0.9172 - val_loss: 2.2513 - val_accuracy: 0.8584\nEpoch 88/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.0342 - accuracy: 0.9174 - val_loss: 2.2018 - val_accuracy: 0.8676\nEpoch 89/100\n625/625 [==============================] - 31s 50ms/step - loss: 2.0081 - accuracy: 0.9170 - val_loss: 2.1919 - val_accuracy: 0.8665\nEpoch 90/100\n625/625 [==============================] - 31s 50ms/step - loss: 1.9852 - accuracy: 0.9174 - val_loss: 2.2318 - val_accuracy: 0.8485\nEpoch 91/100\n625/625 [==============================] - 31s 50ms/step - loss: 1.9467 - accuracy: 0.9222 - val_loss: 2.1839 - val_accuracy: 0.8561\nEpoch 92/100\n625/625 [==============================] - 31s 50ms/step - loss: 1.9304 - accuracy: 0.9223 - val_loss: 2.1263 - val_accuracy: 0.8601\nEpoch 93/100\n625/625 [==============================] - 31s 50ms/step - loss: 1.8977 - accuracy: 0.9262 - val_loss: 2.0964 - val_accuracy: 0.8669\nEpoch 94/100\n625/625 [==============================] - 31s 50ms/step - loss: 1.8788 - accuracy: 0.9243 - val_loss: 2.0660 - val_accuracy: 0.8716\nEpoch 95/100\n625/625 [==============================] - 31s 50ms/step - loss: 1.8585 - accuracy: 0.9248 - val_loss: 2.0604 - val_accuracy: 0.8652\nEpoch 96/100\n625/625 [==============================] - 31s 50ms/step - loss: 1.8400 - accuracy: 0.9225 - val_loss: 2.0354 - val_accuracy: 0.8725\nEpoch 97/100\n625/625 [==============================] - 31s 50ms/step - loss: 1.8082 - accuracy: 0.9282 - val_loss: 2.0693 - val_accuracy: 0.8592\nEpoch 98/100\n625/625 [==============================] - 31s 50ms/step - loss: 1.7905 - accuracy: 0.9260 - val_loss: 2.0144 - val_accuracy: 0.8655\nEpoch 99/100\n625/625 [==============================] - 31s 50ms/step - loss: 1.7738 - accuracy: 0.9274 - val_loss: 2.0211 - val_accuracy: 0.8610\nEpoch 100/100\n625/625 [==============================] - 31s 50ms/step - loss: 1.7480 - accuracy: 0.9281 - val_loss: 1.9626 - val_accuracy: 0.8699\n" ], [ "elapsed_time = datetime.timedelta(seconds=(time.perf_counter() - t))\n\nprint('Tiempo de entrenamiento:', elapsed_time)", "Tiempo de entrenamiento: 0:52:12.653343\n" ] ], [ [ "## 8. Evaluamos los resultados\n", "_____no_output_____" ] ], [ [ "_, acc = model.evaluate(x_test, y_test_encoded, verbose=0)\nprint('> %.3f' % (acc * 100.0))", "> 87.290\n" ], [ "plt.title('Cross Entropy Loss')\nplt.plot(history.history['loss'], color='blue', label='train')\nplt.plot(history.history['val_loss'], color='orange', label='test')\nplt.show()\n\nplt.title('Classification Accuracy')\nplt.plot(history.history['accuracy'], color='blue', label='train')\nplt.plot(history.history['val_accuracy'], color='orange', label='test')\nplt.show()", "_____no_output_____" ], [ "predictions = model.predict(x_test)", "_____no_output_____" ], [ "def plot_image(i, predictions_array, true_label, img):\n predictions_array, true_label, img = predictions_array, true_label[i], img[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n\n plt.imshow(img, cmap=plt.cm.binary)\n\n predicted_label = np.argmax(predictions_array)\n if predicted_label == true_label:\n color = 'blue'\n else:\n color = 'red'\n\n plt.xlabel(\"{} {:2.0f}% ({})\".format(predicted_label,\n 100*np.max(predictions_array),\n true_label[0]),\n color=color)\n\ndef plot_value_array(i, predictions_array, true_label):\n predictions_array, true_label = predictions_array, true_label[i]\n plt.grid(False)\n plt.xticks(range(10))\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array)\n\n thisplot[predicted_label].set_color('red')\n thisplot[true_label[0]].set_color('blue')", "_____no_output_____" ] ], [ [ "Dibujamos las primeras imágenes:", "_____no_output_____" ] ], [ [ "i = 0\nfor l in cifar10_labels:\n print(i, l)\n i += 1\n\nnum_rows = 5\nnum_cols = 4\nstart = 650\nnum_images = num_rows*num_cols\nplt.figure(figsize=(2*2*num_cols, 2*num_rows))\nfor i in range(num_images):\n plt.subplot(num_rows, 2*num_cols, 2*i+1)\n plot_image(i+start, predictions[i+start], y_test, x_test)\n plt.subplot(num_rows, 2*num_cols, 2*i+2)\n plot_value_array(i+start, predictions[i+start], y_test)\nplt.tight_layout()\nplt.show()", "0 airplane\n1 automobile\n2 bird\n3 cat\n4 deer\n5 dog\n6 frog\n7 horse\n8 ship\n9 truck\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d0693baec83a41daa2d6295a9430f13270d2c848
632,117
ipynb
Jupyter Notebook
MNIST-image-classification-using-TF.ipynb
jpnevrones/Digit-Recognizer
573f3f1320a18ced40c02e65ecbe6ab365dbe5bd
[ "BSD-3-Clause" ]
null
null
null
MNIST-image-classification-using-TF.ipynb
jpnevrones/Digit-Recognizer
573f3f1320a18ced40c02e65ecbe6ab365dbe5bd
[ "BSD-3-Clause" ]
null
null
null
MNIST-image-classification-using-TF.ipynb
jpnevrones/Digit-Recognizer
573f3f1320a18ced40c02e65ecbe6ab365dbe5bd
[ "BSD-3-Clause" ]
null
null
null
379.421969
148,708
0.915562
[ [ [ "__author__ = \"Jithin Pradeep\"\n__copyright__ = \"Copyright (C) 2018 Jithin Pradeep\"\n__license__ = \"MIT License\"\n__version__ = \"1.0\"", "_____no_output_____" ] ], [ [ "# Summary \n\n## About the Dataset\nThe data files train.csv and test.csv contain gray-scale images of hand-drawn digits, from zero through nine.\n\nEach image is 28 pixels in height and 28 pixels in width, for a total of 784 pixels in total. Each pixel has a single pixel-value associated with it, indicating the lightness or darkness of that pixel, with higher numbers meaning darker. This pixel-value is an integer between 0 and 255, inclusive.\n\nThe training data set, (train.csv), has 785 columns. The first column, called \"label\", is the digit that was drawn by the user. The rest of the columns contain the pixel-values of the associated image.\n\nEach pixel column in the training set has a name like pixelx, where x is an integer between 0 and 783, inclusive. To locate this pixel on the image, suppose that we have decomposed x as x = i * 28 + j, where i and j are integers between 0 and 27, inclusive. Then pixelx is located on row i and column j of a 28 x 28 matrix, (indexing by zero).\n\nFor example, pixel31 indicates the pixel that is in the fourth column from the left, and the second row from the top, as in the ascii-diagram below.\n\nVisually, the pixels make up the image like this:\n\n000 001 002 003 ... 026 027\n028 029 030 031 ... 054 055\n056 057 058 059 ... 082 083\n | | | | ... | |\n728 729 730 731 ... 754 755\n756 757 758 759 ... 782 783 \n\nThe test data set, (test.csv), is the same as the training set, except that it does not contain the \"label\" column.\n\n[More about MNIST Dataset can be found here](http://yann.lecun.com/exdb/mnist/)\n[Wiki Link](https://en.wikipedia.org/wiki/MNIST_database)\n\n## Method\n\nIn this post I will be describing my solution to classify handwritten digits(MNIST Dataset). Below is a deep neural network(Convolution neural network) consisting of convolution and fully connected layers.\n![Model image](png.PNG)\n\nGo ahead and use the tensorboard for deatiled visualization saved from the model.\n", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport keras.preprocessing.image\nimport sklearn.preprocessing\nimport sklearn.model_selection\nimport sklearn.metrics\nimport sklearn.linear_model\nimport sklearn.naive_bayes\nimport sklearn.tree\nimport sklearn.ensemble\nimport os;\nimport datetime \nimport cv2 \nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm \n%matplotlib inline\n\nimport platform\nprint(\"Platform deatils {0} \\nPython version {1}\".format(\n platform.platform(), platform.python_version()))", "Platform deatils Windows-10-10.0.15063-SP0 \nPython version 3.6.2\n" ] ], [ [ "Additional info: I am going to use the Kaggle csv based data set but MNIST Data set can also be downloaded and extracted using the below functions. ", "_____no_output_____" ], [ "#### Function to downlaod and Extract MNIST Dataset ", "_____no_output_____" ] ], [ [ "url = 'http://commondatastorage.googleapis.com/books1000/'\nlast_percent_reported = None\n\ndef download_progress_hook(count, blockSize, totalSize):\n \"\"\"A hook to report the progress of a download. This is mostly intended for users with\n slow internet connections. Reports every 1% change in download progress.\n \"\"\"\n global last_percent_reported\n percent = int(count * blockSize * 100 / totalSize)\n\n if last_percent_reported != percent:\n if percent % 5 == 0:\n sys.stdout.write(\"%s%%\" % percent)\n sys.stdout.flush()\n else:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n \n last_percent_reported = percent\n \ndef maybe_download(filename, expected_bytes, force=False):\n \"\"\"Download a file if not present, and make sure it's the right size.\"\"\"\n if force or not os.path.exists(filename):\n print('Attempting to download:', filename) \n filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)\n print('\\nDownload Complete!')\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename\n\ntrain_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\ntest_filename = maybe_download('notMNIST_small.tar.gz', 8458043)", "_____no_output_____" ], [ "num_classes = 10\nnp.random.seed(133)\n\ndef maybe_extract(filename, force=False):\n root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz\n if os.path.isdir(root) and not force:\n # You may override by setting force=True.\n print('%s already present - Skipping extraction of %s.' % (root, filename))\n else:\n print('Extracting data for %s. This may take a while. Please wait.' % root)\n tar = tarfile.open(filename)\n sys.stdout.flush()\n tar.extractall()\n tar.close()\n data_folders = [\n os.path.join(root, d) for d in sorted(os.listdir(root))\n if os.path.isdir(os.path.join(root, d))]\n if len(data_folders) != num_classes:\n raise Exception(\n 'Expected %d folders, one per class. Found %d instead.' % (\n num_classes, len(data_folders)))\n print(data_folders)\n return data_folders\n \ntrain_folders = maybe_extract(train_filename)\ntest_folders = maybe_extract(test_filename)", "_____no_output_____" ], [ "#Load the input file from the folder\n\nif os.path.isfile('MNISTdatacsv/train.csv'):\n data_df = pd.read_csv('MNISTdatacsv/train.csv') \n print('train.csv loaded: data_df({0[0]},{0[1]})'.format(data_df.shape))\nelse:\n print('Error: train.csv not found')\n \n## read test data\n\n# read test data from CSV file \nif os.path.isfile('MNISTdatacsv/test.csv'):\n test_df = pd.read_csv('MNISTdatacsv/test.csv') \n print('test.csv loaded: test_df{0}'.format(test_df.shape))\nelse:\n print('Error: test.csv not found')\n \n# transforma and normalize test data\nx_test = test_df.iloc[:,0:].values.reshape(-1,28,28,1) # (28000,28,28,1) array\nx_test = x_test.astype(np.float)\nx_test = normalize_data(x_test)\nprint('x_test.shape = ', x_test.shape)\n\n# for saving results\ny_test_pred = {}\ny_test_pred_labels = {}", "train.csv loaded: data_df(42000,785)\ntest.csv loaded: test_df(28000, 784)\nx_test.shape = (28000, 28, 28, 1)\n" ] ], [ [ "### Preprocessing\n\n#### Normalize data and split into training and validation sets\n- In order to scale feature that robust to outlier you can use sklearn.preprocessing.RobustScaler()\n - rtoo = sklearn.preprocessing.RobustScaler()\n - rtoo.fit(data)\n - data = rtoo.transform(data) \n- or you can do standraization by using mean and std dev\n - data = (data-data.mean())/(data.std()) (Try different normalization techniques \n - to understand more about normalization, these are just few) \n- Another idea might be is to convert the rgb range to -1 to 1 from 255 to 0\n - data = ((data / 255.)-0.5)*2.\n - I am converting the range to 0 to 1\n \n[One hot encoding my notes](http://jp.jithinjp.in/2018/Representing-Categorical-values-in-Machine-learning) ", "_____no_output_____" ] ], [ [ "# function to normalize data\ndef normalize_data(data): \n data = data / data.max() # convert from [0:255] to [0.:1.] \n return data\n\n# class labels to one-hot vectors e.g. 1 => [0 1 0 0 0 0 0 0 0 0]\ndef dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot\n\n# one-hot encodings into labels\ndef one_hot_to_dense(labels_one_hot):\n return np.argmax(labels_one_hot,1)\n\n# accuracy o\ndef accuracy_from_dense_labels(y_target, y_pred):\n y_target = y_target.reshape(-1,)\n y_pred = y_pred.reshape(-1,)\n return np.mean(y_target == y_pred)\n\n# accuracy of one-hot encoded predictions\ndef accuracy_from_one_hot_labels(y_target, y_pred):\n y_target = one_hot_to_dense(y_target).reshape(-1,)\n y_pred = one_hot_to_dense(y_pred).reshape(-1,)\n return np.mean(y_target == y_pred)\n\n# extract and normalize images\nx_train_valid = data_df.iloc[:,1:].values.reshape(-1,28,28,1) # (42000,28,28,1) array\nx_train_valid = x_train_valid.astype(np.float) # convert from int64 to float32\nx_train_valid = normalize_data(x_train_valid)\nimage_width = image_height = 28\nimage_size = 784\n\n# extract image labels\ny_train_valid_labels = data_df.iloc[:,0].values # (42000,1) array\nlabels_count = np.unique(y_train_valid_labels).shape[0]; # number of different labels = 10\n\n#plot some images and labels\nplt.figure(figsize=(15,9))\nfor i in range(50):\n plt.subplot(5,10,1+i)\n plt.title(y_train_valid_labels[i])\n plt.imshow(x_train_valid[i].reshape(28,28), cmap=cm.inferno)\n \n# labels in one hot representation\ny_train_valid = dense_to_one_hot(y_train_valid_labels, labels_count).astype(np.uint8)\n\n# dictionaries for saving results\ny_valid_pred = {}\ny_train_pred = {}\ny_test_pred = {}\ntrain_loss, valid_loss = {}, {}\ntrain_acc, valid_acc = {}, {}\n\nprint('x_train_valid.shape = ', x_train_valid.shape)\nprint('y_train_valid_labels.shape = ', y_train_valid_labels.shape)\nprint('image_size = ', image_size )\nprint('image_width = ', image_width)\nprint('image_height = ', image_height)\nprint('labels_count = ', labels_count)", "x_train_valid.shape = (42000, 28, 28, 1)\ny_train_valid_labels.shape = (42000,)\nimage_size = 784\nimage_width = 28\nimage_height = 28\nlabels_count = 10\n" ] ], [ [ "#### Data augmenttaion\nlets stick to basics like rotations, translations, zoom using keras ", "_____no_output_____" ] ], [ [ "def generate_images(imgs): \n # rotations, translations, zoom\n image_generator = keras.preprocessing.image.ImageDataGenerator(\n rotation_range = 10, width_shift_range = 0.1 , height_shift_range = 0.1,\n zoom_range = 0.1)\n\n # get transformed images\n imgs = image_generator.flow(imgs.copy(), np.zeros(len(imgs)),\n batch_size=len(imgs), shuffle = False).next() \n \n return imgs[0]\n\n# Visulizing the image augmnettaion\nfig,axs = plt.subplots(5,10, figsize=(15,9))\nfor i in range(5):\n n = np.random.randint(0,x_train_valid.shape[0]-2)\n axs[i,0].imshow(x_train_valid[n:n+1].reshape(28,28),cmap=cm.inferno)\n for j in range(1,10):\n axs[i,j].imshow(generate_images(x_train_valid[n:n+1]).reshape(28,28), cmap=cm.inferno)\n", "_____no_output_____" ] ], [ [ "### Benchmarking on some basic ML models\nAs we have our training data ready lets run couple of basic machine elarning model, I would consider these models to kind of create a baseline which would help me later own to generlize the performance of my model. In simple word these would give me datapoints to compare the performance across models.\n\nlets use Logistic regression, Extra tress regressor and Random forest model along with cross validation for benmarking.", "_____no_output_____" ] ], [ [ "logistic_regression = sklearn.linear_model.LogisticRegression(verbose=0, solver='lbfgs',multi_class='multinomial')\nextra_trees = sklearn.ensemble.ExtraTreesClassifier(verbose=0)\nrandom_forest = sklearn.ensemble.RandomForestClassifier(verbose=0)\n\nbench_markingDict = {'logistic_regression': logistic_regression, \n 'extra_trees': extra_trees,\n 'random_forest': random_forest }\n \nbench_marking = ['logistic_regression', 'extra_trees','random_forest'] \nfor bm_model in bench_marking:\n train_acc[bm_model] = []\n valid_acc[bm_model] = []\n\ncv_num = 10 # cross validations default = 20 => 5% validation set\nkfold = sklearn.model_selection.KFold(cv_num, shuffle=True, random_state=123)\n\nfor i,(train_index, valid_index) in enumerate(kfold.split(x_train_valid)):\n\n # start timer\n start = datetime.datetime.now();\n\n # train and validation data of original images\n x_train = x_train_valid[train_index].reshape(-1,784)\n y_train = y_train_valid[train_index]\n x_valid = x_train_valid[valid_index].reshape(-1,784)\n y_valid = y_train_valid[valid_index]\n\n for bm_model in bench_marking:\n\n # create cloned model from base models\n model = sklearn.base.clone(bench_markingDict[bm_model])\n model.fit(x_train, one_hot_to_dense(y_train))\n\n # predictions\n y_train_pred[bm_model] = model.predict_proba(x_train)\n y_valid_pred[bm_model] = model.predict_proba(x_valid)\n train_acc[bm_model].append(accuracy_from_one_hot_labels(y_train_pred[bm_model], y_train))\n valid_acc[bm_model].append(accuracy_from_one_hot_labels(y_valid_pred[bm_model], y_valid))\n\n print(i+1,': '+bm_model+' train/valid accuracy = %.3f/%.3f'%(train_acc[bm_model][-1], \n valid_acc[bm_model][-1]))\n # only one iteration\n if False:\n break;\n\nprint(bm_model+': averaged train/valid accuracy = %.3f/%.3f'%(np.mean(train_acc[bm_model]),\n np.mean(valid_acc[bm_model])))\n\n", "1 : logistic_regression train/valid accuracy = 0.940/0.920\n1 : extra_trees train/valid accuracy = 1.000/0.947\n1 : random_forest train/valid accuracy = 0.999/0.941\n2 : logistic_regression train/valid accuracy = 0.940/0.922\n2 : extra_trees train/valid accuracy = 1.000/0.949\n2 : random_forest train/valid accuracy = 0.999/0.941\n3 : logistic_regression train/valid accuracy = 0.939/0.928\n3 : extra_trees train/valid accuracy = 1.000/0.944\n3 : random_forest train/valid accuracy = 0.999/0.945\n4 : logistic_regression train/valid accuracy = 0.939/0.924\n4 : extra_trees train/valid accuracy = 1.000/0.945\n4 : random_forest train/valid accuracy = 0.999/0.941\n5 : logistic_regression train/valid accuracy = 0.940/0.920\n5 : extra_trees train/valid accuracy = 1.000/0.939\n5 : random_forest train/valid accuracy = 0.999/0.941\n6 : logistic_regression train/valid accuracy = 0.939/0.919\n6 : extra_trees train/valid accuracy = 1.000/0.948\n6 : random_forest train/valid accuracy = 0.999/0.941\n7 : logistic_regression train/valid accuracy = 0.941/0.916\n7 : extra_trees train/valid accuracy = 1.000/0.943\n7 : random_forest train/valid accuracy = 0.999/0.937\n8 : logistic_regression train/valid accuracy = 0.941/0.911\n8 : extra_trees train/valid accuracy = 1.000/0.942\n8 : random_forest train/valid accuracy = 0.999/0.933\n9 : logistic_regression train/valid accuracy = 0.940/0.925\n9 : extra_trees train/valid accuracy = 1.000/0.950\n9 : random_forest train/valid accuracy = 0.999/0.941\n10 : logistic_regression train/valid accuracy = 0.940/0.918\n10 : extra_trees train/valid accuracy = 1.000/0.945\n10 : random_forest train/valid accuracy = 0.999/0.937\nrandom_forest: averaged train/valid accuracy = 0.999/0.940\n" ] ], [ [ "### Neural network -\nLets get to the fun part Neural network", "_____no_output_____" ] ], [ [ "class nn_class:\n# class that implements the neural network\n\n # constructor\n def __init__(self, nn_name = 'nn_1'):\n\n # hyperparameters \n self.s_f_conv1 = 3; # filter size of first convolution layer (default = 3)\n self.n_f_conv1 = 36; # number of features of first convolution layer (default = 36)\n self.s_f_conv2 = 3; # filter size of second convolution layer (default = 3)\n self.n_f_conv2 = 36; # number of features of second convolution layer (default = 36)\n self.s_f_conv3 = 3; # filter size of third convolution layer (default = 3)\n self.n_f_conv3 = 36; # number of features of third convolution layer (default = 36)\n self.n_n_fc1 = 576; # number of neurons of first fully connected layer (default = 576)\n\n # hyperparameters for training\n self.mb_size = 50 # mini batch size\n self.keep_prob = 0.33 # keeping probability with dropout regularization \n self.learn_rate_array = [10*1e-4, 7.5*1e-4, 5*1e-4, 2.5*1e-4, 1*1e-4, 1*1e-4,\n 1*1e-4,0.75*1e-4, 0.5*1e-4, 0.25*1e-4, 0.1*1e-4, \n 0.1*1e-4, 0.075*1e-4,0.050*1e-4, 0.025*1e-4, 0.01*1e-4, \n 0.0075*1e-4, 0.0050*1e-4,0.0025*1e-4,0.001*1e-4]\n self.learn_rate_step_size = 3 # in terms of epochs\n \n # parameters\n self.learn_rate = self.learn_rate_array[0]\n self.learn_rate_pos = 0 # current position pointing to current learning rate\n self.index_in_epoch = 0 \n self.current_epoch = 0\n self.log_step = 0.2 # log results in terms of epochs\n self.n_log_step = 0 # counting current number of mini batches trained on\n self.use_tb_summary = False # True = use tensorboard visualization\n self.use_tf_saver = False # True = use saver to save the model\n self.nn_name = nn_name # name of the neural network\n \n # permutation array\n self.perm_array = np.array([])\n \n # get the next mini batch\n def next_mini_batch(self):\n\n start = self.index_in_epoch\n self.index_in_epoch += self.mb_size\n self.current_epoch += self.mb_size/len(self.x_train) \n \n # adapt length of permutation array\n if not len(self.perm_array) == len(self.x_train):\n self.perm_array = np.arange(len(self.x_train))\n \n # shuffle once at the start of epoch\n if start == 0:\n np.random.shuffle(self.perm_array)\n\n # at the end of the epoch\n if self.index_in_epoch > self.x_train.shape[0]:\n np.random.shuffle(self.perm_array) # shuffle data\n start = 0 # start next epoch\n self.index_in_epoch = self.mb_size # set index to mini batch size\n \n if self.train_on_augmented_data:\n # use augmented data for the next epoch\n self.x_train_aug = normalize_data(self.generate_images(self.x_train))\n self.y_train_aug = self.y_train\n \n end = self.index_in_epoch\n \n if self.train_on_augmented_data:\n # use augmented data\n x_tr = self.x_train_aug[self.perm_array[start:end]]\n y_tr = self.y_train_aug[self.perm_array[start:end]]\n else:\n # use original data\n x_tr = self.x_train[self.perm_array[start:end]]\n y_tr = self.y_train[self.perm_array[start:end]]\n \n return x_tr, y_tr\n \n # generate new images via rotations, translations, zoom using keras\n def generate_images(self, imgs):\n \n print('generate new set of images')\n \n # rotations, translations, zoom\n image_generator = keras.preprocessing.image.ImageDataGenerator(\n rotation_range = 10, width_shift_range = 0.1 , height_shift_range = 0.1,\n zoom_range = 0.1)\n\n # get transformed images\n imgs = image_generator.flow(imgs.copy(), np.zeros(len(imgs)),\n batch_size=len(imgs), shuffle = False).next() \n\n return imgs[0]\n\n # weight initialization\n def weight_variable(self, shape, name = None):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial, name = name)\n\n # bias initialization\n def bias_variable(self, shape, name = None):\n initial = tf.constant(0.1, shape=shape) # positive bias\n return tf.Variable(initial, name = name)\n\n # 2D convolution\n def conv2d(self, x, W, name = None):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME', name = name)\n\n # max pooling\n def max_pool_2x2(self, x, name = None):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],\n padding='SAME', name = name)\n\n # attach summaries to a tensor for TensorBoard visualization\n def summary_variable(self, var, var_name):\n with tf.name_scope(var_name):\n mean = tf.reduce_mean(var)\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('mean', mean)\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n \n # function to create the graph\n def create_graph(self):\n\n # reset default graph\n tf.reset_default_graph()\n\n # variables for input and output \n self.x_data_tf = tf.placeholder(dtype=tf.float32, shape=[None,28,28,1], \n name='x_data_tf')\n self.y_data_tf = tf.placeholder(dtype=tf.float32, shape=[None,10], name='y_data_tf')\n\n # 1.layer: convolution + max pooling\n self.W_conv1_tf = self.weight_variable([self.s_f_conv1, self.s_f_conv1, 1,\n self.n_f_conv1], \n name = 'W_conv1_tf') # (5,5,1,32)\n self.b_conv1_tf = self.bias_variable([self.n_f_conv1], name = 'b_conv1_tf') # (32)\n self.h_conv1_tf = tf.nn.relu(self.conv2d(self.x_data_tf, \n self.W_conv1_tf) + self.b_conv1_tf, \n name = 'h_conv1_tf') # (.,28,28,32)\n self.h_pool1_tf = self.max_pool_2x2(self.h_conv1_tf, \n name = 'h_pool1_tf') # (.,14,14,32)\n\n # 2.layer: convolution + max pooling\n self.W_conv2_tf = self.weight_variable([self.s_f_conv2, self.s_f_conv2, \n self.n_f_conv1, self.n_f_conv2], \n name = 'W_conv2_tf')\n self.b_conv2_tf = self.bias_variable([self.n_f_conv2], name = 'b_conv2_tf')\n self.h_conv2_tf = tf.nn.relu(self.conv2d(self.h_pool1_tf, \n self.W_conv2_tf) + self.b_conv2_tf, \n name ='h_conv2_tf') #(.,14,14,32)\n self.h_pool2_tf = self.max_pool_2x2(self.h_conv2_tf, name = 'h_pool2_tf') #(.,7,7,32)\n\n # 3.layer: convolution + max pooling\n self.W_conv3_tf = self.weight_variable([self.s_f_conv3, self.s_f_conv3, \n self.n_f_conv2, self.n_f_conv3], \n name = 'W_conv3_tf')\n self.b_conv3_tf = self.bias_variable([self.n_f_conv3], name = 'b_conv3_tf')\n self.h_conv3_tf = tf.nn.relu(self.conv2d(self.h_pool2_tf, \n self.W_conv3_tf) + self.b_conv3_tf, \n name = 'h_conv3_tf') #(.,7,7,32)\n self.h_pool3_tf = self.max_pool_2x2(self.h_conv3_tf, \n name = 'h_pool3_tf') # (.,4,4,32)\n\n # 4.layer: fully connected\n self.W_fc1_tf = self.weight_variable([4*4*self.n_f_conv3,self.n_n_fc1], \n name = 'W_fc1_tf') # (4*4*32, 1024)\n self.b_fc1_tf = self.bias_variable([self.n_n_fc1], name = 'b_fc1_tf') # (1024)\n self.h_pool3_flat_tf = tf.reshape(self.h_pool3_tf, [-1,4*4*self.n_f_conv3], \n name = 'h_pool3_flat_tf') # (.,1024)\n self.h_fc1_tf = tf.nn.relu(tf.matmul(self.h_pool3_flat_tf, \n self.W_fc1_tf) + self.b_fc1_tf, \n name = 'h_fc1_tf') # (.,1024)\n \n # add dropout\n self.keep_prob_tf = tf.placeholder(dtype=tf.float32, name = 'keep_prob_tf')\n self.h_fc1_drop_tf = tf.nn.dropout(self.h_fc1_tf, self.keep_prob_tf, \n name = 'h_fc1_drop_tf')\n\n # 5.layer: fully connected\n self.W_fc2_tf = self.weight_variable([self.n_n_fc1, 10], name = 'W_fc2_tf')\n self.b_fc2_tf = self.bias_variable([10], name = 'b_fc2_tf')\n self.z_pred_tf = tf.add(tf.matmul(self.h_fc1_drop_tf, self.W_fc2_tf), \n self.b_fc2_tf, name = 'z_pred_tf')# => (.,10)\n\n # cost function\n self.cross_entropy_tf = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(\n labels=self.y_data_tf, logits=self.z_pred_tf), name = 'cross_entropy_tf')\n \n # optimisation function\n self.learn_rate_tf = tf.placeholder(dtype=tf.float32, name=\"learn_rate_tf\")\n self.train_step_tf = tf.train.AdamOptimizer(self.learn_rate_tf).minimize(\n self.cross_entropy_tf, name = 'train_step_tf')\n\n # predicted probabilities in one-hot encoding\n self.y_pred_proba_tf = tf.nn.softmax(self.z_pred_tf, name='y_pred_proba_tf') \n \n # tensor of correct predictions\n self.y_pred_correct_tf = tf.equal(tf.argmax(self.y_pred_proba_tf, 1),\n tf.argmax(self.y_data_tf, 1),\n name = 'y_pred_correct_tf') \n \n # accuracy \n self.accuracy_tf = tf.reduce_mean(tf.cast(self.y_pred_correct_tf, dtype=tf.float32),\n name = 'accuracy_tf')\n\n # tensors to save intermediate accuracies and losses during training\n self.train_loss_tf = tf.Variable(np.array([]), dtype=tf.float32, \n name='train_loss_tf', validate_shape = False)\n self.valid_loss_tf = tf.Variable(np.array([]), dtype=tf.float32, \n name='valid_loss_tf', validate_shape = False)\n self.train_acc_tf = tf.Variable(np.array([]), dtype=tf.float32, \n name='train_acc_tf', validate_shape = False)\n self.valid_acc_tf = tf.Variable(np.array([]), dtype=tf.float32, \n name='valid_acc_tf', validate_shape = False)\n \n # number of weights and biases\n num_weights = (self.s_f_conv1**2*self.n_f_conv1 \n + self.s_f_conv2**2*self.n_f_conv1*self.n_f_conv2 \n + self.s_f_conv3**2*self.n_f_conv2*self.n_f_conv3 \n + 4*4*self.n_f_conv3*self.n_n_fc1 + self.n_n_fc1*10)\n num_biases = self.n_f_conv1 + self.n_f_conv2 + self.n_f_conv3 + self.n_n_fc1\n print('num_weights =', num_weights)\n print('num_biases =', num_biases)\n \n return None \n \n def attach_summary(self, sess):\n \n # create summary tensors for tensorboard\n self.use_tb_summary = True\n self.summary_variable(self.W_conv1_tf, 'W_conv1_tf')\n self.summary_variable(self.b_conv1_tf, 'b_conv1_tf')\n self.summary_variable(self.W_conv2_tf, 'W_conv2_tf')\n self.summary_variable(self.b_conv2_tf, 'b_conv2_tf')\n self.summary_variable(self.W_conv3_tf, 'W_conv3_tf')\n self.summary_variable(self.b_conv3_tf, 'b_conv3_tf')\n self.summary_variable(self.W_fc1_tf, 'W_fc1_tf')\n self.summary_variable(self.b_fc1_tf, 'b_fc1_tf')\n self.summary_variable(self.W_fc2_tf, 'W_fc2_tf')\n self.summary_variable(self.b_fc2_tf, 'b_fc2_tf')\n tf.summary.scalar('cross_entropy_tf', self.cross_entropy_tf)\n tf.summary.scalar('accuracy_tf', self.accuracy_tf)\n\n # merge all summaries for tensorboard\n self.merged = tf.summary.merge_all()\n\n # initialize summary writer \n timestamp = datetime.datetime.now().strftime('%d-%m-%Y_%H-%M-%S')\n filepath = os.path.join(os.getcwd(), 'logs', (self.nn_name+'_'+timestamp))\n self.train_writer = tf.summary.FileWriter(os.path.join(filepath,'train'), sess.graph)\n self.valid_writer = tf.summary.FileWriter(os.path.join(filepath,'valid'), sess.graph)\n\n def attach_saver(self):\n # initialize tensorflow saver\n self.use_tf_saver = True\n self.saver_tf = tf.train.Saver()\n\n # train \n def train_graph(self, sess, x_train, y_train, x_valid, y_valid, n_epoch = 1, \n train_on_augmented_data = False):\n\n # train on original or augmented data\n self.train_on_augmented_data = train_on_augmented_data\n \n # training and validation data\n self.x_train = x_train\n self.y_train = y_train\n self.x_valid = x_valid\n self.y_valid = y_valid\n \n # use augmented data\n if self.train_on_augmented_data:\n print('generate new set of images')\n self.x_train_aug = normalize_data(self.generate_images(self.x_train))\n self.y_train_aug = self.y_train\n \n # parameters\n mb_per_epoch = self.x_train.shape[0]/self.mb_size\n train_loss, train_acc, valid_loss, valid_acc = [],[],[],[]\n \n # start timer\n start = datetime.datetime.now();\n print(datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S'),': start training')\n print('learnrate = ',self.learn_rate,', n_epoch = ', n_epoch,\n ', mb_size = ', self.mb_size)\n # looping over mini batches\n for i in range(int(n_epoch*mb_per_epoch)+1):\n\n # adapt learn_rate\n self.learn_rate_pos = int(self.current_epoch // self.learn_rate_step_size)\n if not self.learn_rate == self.learn_rate_array[self.learn_rate_pos]:\n self.learn_rate = self.learn_rate_array[self.learn_rate_pos]\n print(datetime.datetime.now()-start,': set learn rate to %.6f'%self.learn_rate)\n \n # get new batch\n x_batch, y_batch = self.next_mini_batch() \n\n # run the graph\n sess.run(self.train_step_tf, feed_dict={self.x_data_tf: x_batch, \n self.y_data_tf: y_batch, \n self.keep_prob_tf: self.keep_prob, \n self.learn_rate_tf: self.learn_rate})\n \n \n # store losses and accuracies\n if i%int(self.log_step*mb_per_epoch) == 0 or i == int(n_epoch*mb_per_epoch):\n \n self.n_log_step += 1 # for logging the results\n \n feed_dict_train = {\n self.x_data_tf: self.x_train[self.perm_array[:len(self.x_valid)]], \n self.y_data_tf: self.y_train[self.perm_array[:len(self.y_valid)]], \n self.keep_prob_tf: 1.0}\n \n feed_dict_valid = {self.x_data_tf: self.x_valid, \n self.y_data_tf: self.y_valid, \n self.keep_prob_tf: 1.0}\n \n # summary for tensorboard\n if self.use_tb_summary:\n train_summary = sess.run(self.merged, feed_dict = feed_dict_train)\n valid_summary = sess.run(self.merged, feed_dict = feed_dict_valid)\n self.train_writer.add_summary(train_summary, self.n_log_step)\n self.valid_writer.add_summary(valid_summary, self.n_log_step)\n \n train_loss.append(sess.run(self.cross_entropy_tf,\n feed_dict = feed_dict_train))\n\n train_acc.append(self.accuracy_tf.eval(session = sess, \n feed_dict = feed_dict_train))\n \n valid_loss.append(sess.run(self.cross_entropy_tf,\n feed_dict = feed_dict_valid))\n\n valid_acc.append(self.accuracy_tf.eval(session = sess, \n feed_dict = feed_dict_valid))\n\n print('%.2f epoch: train/val loss = %.4f/%.4f, train/val acc = %.4f/%.4f'%(\n self.current_epoch, train_loss[-1], valid_loss[-1],\n train_acc[-1], valid_acc[-1]))\n \n # concatenate losses and accuracies and assign to tensor variables\n tl_c = np.concatenate([self.train_loss_tf.eval(session=sess), train_loss], axis = 0)\n vl_c = np.concatenate([self.valid_loss_tf.eval(session=sess), valid_loss], axis = 0)\n ta_c = np.concatenate([self.train_acc_tf.eval(session=sess), train_acc], axis = 0)\n va_c = np.concatenate([self.valid_acc_tf.eval(session=sess), valid_acc], axis = 0)\n \n sess.run(tf.assign(self.train_loss_tf, tl_c, validate_shape = False))\n sess.run(tf.assign(self.valid_loss_tf, vl_c , validate_shape = False))\n sess.run(tf.assign(self.train_acc_tf, ta_c , validate_shape = False))\n sess.run(tf.assign(self.valid_acc_tf, va_c , validate_shape = False))\n \n print('running time for training: ', datetime.datetime.now() - start)\n return None\n \n # save summaries\n def save_model(self, sess):\n \n # tf saver\n if self.use_tf_saver:\n #filepath = os.path.join(os.getcwd(), 'logs' , self.nn_name)\n filepath = os.path.join(os.getcwd(), self.nn_name)\n self.saver_tf.save(sess, filepath)\n \n # tb summary\n if self.use_tb_summary:\n self.train_writer.close()\n self.valid_writer.close()\n \n return None\n \n # prediction \n def forward(self, sess, x_data):\n y_pred_proba = self.y_pred_proba_tf.eval(session = sess, \n feed_dict = {self.x_data_tf: x_data,\n self.keep_prob_tf: 1.0})\n return y_pred_proba\n \n # load tensors from a saved graph\n def load_tensors(self, graph):\n \n # input tensors\n self.x_data_tf = graph.get_tensor_by_name(\"x_data_tf:0\")\n self.y_data_tf = graph.get_tensor_by_name(\"y_data_tf:0\")\n \n # weights and bias tensors\n self.W_conv1_tf = graph.get_tensor_by_name(\"W_conv1_tf:0\")\n self.W_conv2_tf = graph.get_tensor_by_name(\"W_conv2_tf:0\")\n self.W_conv3_tf = graph.get_tensor_by_name(\"W_conv3_tf:0\")\n self.W_fc1_tf = graph.get_tensor_by_name(\"W_fc1_tf:0\")\n self.W_fc2_tf = graph.get_tensor_by_name(\"W_fc2_tf:0\")\n self.b_conv1_tf = graph.get_tensor_by_name(\"b_conv1_tf:0\")\n self.b_conv2_tf = graph.get_tensor_by_name(\"b_conv2_tf:0\")\n self.b_conv3_tf = graph.get_tensor_by_name(\"b_conv3_tf:0\")\n self.b_fc1_tf = graph.get_tensor_by_name(\"b_fc1_tf:0\")\n self.b_fc2_tf = graph.get_tensor_by_name(\"b_fc2_tf:0\")\n \n # activation tensors\n self.h_conv1_tf = graph.get_tensor_by_name('h_conv1_tf:0') \n self.h_pool1_tf = graph.get_tensor_by_name('h_pool1_tf:0')\n self.h_conv2_tf = graph.get_tensor_by_name('h_conv2_tf:0')\n self.h_pool2_tf = graph.get_tensor_by_name('h_pool2_tf:0')\n self.h_conv3_tf = graph.get_tensor_by_name('h_conv3_tf:0')\n self.h_pool3_tf = graph.get_tensor_by_name('h_pool3_tf:0')\n self.h_fc1_tf = graph.get_tensor_by_name('h_fc1_tf:0')\n self.z_pred_tf = graph.get_tensor_by_name('z_pred_tf:0')\n \n # training and prediction tensors\n self.learn_rate_tf = graph.get_tensor_by_name(\"learn_rate_tf:0\")\n self.keep_prob_tf = graph.get_tensor_by_name(\"keep_prob_tf:0\")\n self.cross_entropy_tf = graph.get_tensor_by_name('cross_entropy_tf:0')\n self.train_step_tf = graph.get_operation_by_name('train_step_tf')\n self.z_pred_tf = graph.get_tensor_by_name('z_pred_tf:0')\n self.y_pred_proba_tf = graph.get_tensor_by_name(\"y_pred_proba_tf:0\")\n self.y_pred_correct_tf = graph.get_tensor_by_name('y_pred_correct_tf:0')\n self.accuracy_tf = graph.get_tensor_by_name('accuracy_tf:0')\n \n # tensor of stored losses and accuricies during training\n self.train_loss_tf = graph.get_tensor_by_name(\"train_loss_tf:0\")\n self.train_acc_tf = graph.get_tensor_by_name(\"train_acc_tf:0\")\n self.valid_loss_tf = graph.get_tensor_by_name(\"valid_loss_tf:0\")\n self.valid_acc_tf = graph.get_tensor_by_name(\"valid_acc_tf:0\")\n \n return None\n \n # get losses of training and validation sets\n def get_loss(self, sess):\n train_loss = self.train_loss_tf.eval(session = sess)\n valid_loss = self.valid_loss_tf.eval(session = sess)\n return train_loss, valid_loss \n \n # get accuracies of training and validation sets\n def get_accuracy(self, sess):\n train_acc = self.train_acc_tf.eval(session = sess)\n valid_acc = self.valid_acc_tf.eval(session = sess)\n return train_acc, valid_acc \n \n # get weights\n def get_weights(self, sess):\n W_conv1 = self.W_conv1_tf.eval(session = sess)\n W_conv2 = self.W_conv2_tf.eval(session = sess)\n W_conv3 = self.W_conv3_tf.eval(session = sess)\n W_fc1_tf = self.W_fc1_tf.eval(session = sess)\n W_fc2_tf = self.W_fc2_tf.eval(session = sess)\n return W_conv1, W_conv2, W_conv3, W_fc1_tf, W_fc2_tf\n \n # get biases\n def get_biases(self, sess):\n b_conv1 = self.b_conv1_tf.eval(session = sess)\n b_conv2 = self.b_conv2_tf.eval(session = sess)\n b_conv3 = self.b_conv3_tf.eval(session = sess)\n b_fc1_tf = self.b_fc1_tf.eval(session = sess)\n b_fc2_tf = self.b_fc2_tf.eval(session = sess)\n return b_conv1, b_conv2, b_conv3, b_fc1_tf, b_fc2_tf\n \n # load session from file, restore graph, and load tensors\n def load_session_from_file(self, filename):\n tf.reset_default_graph()\n filepath = os.path.join(os.getcwd(), filename + '.meta')\n #filepath = os.path.join(os.getcwd(),'logs', filename + '.meta')\n saver = tf.train.import_meta_graph(filepath)\n print(filepath)\n sess = tf.Session()\n saver.restore(sess, instance)\n graph = tf.get_default_graph()\n self.load_tensors(graph)\n return sess\n \n # receive activations given the input\n def get_activations(self, sess, x_data):\n feed_dict = {self.x_data_tf: x_data, self.keep_prob_tf: 1.0}\n h_conv1 = self.h_conv1_tf.eval(session = sess, feed_dict = feed_dict)\n h_pool1 = self.h_pool1_tf.eval(session = sess, feed_dict = feed_dict)\n h_conv2 = self.h_conv2_tf.eval(session = sess, feed_dict = feed_dict)\n h_pool2 = self.h_pool2_tf.eval(session = sess, feed_dict = feed_dict)\n h_conv3 = self.h_conv3_tf.eval(session = sess, feed_dict = feed_dict)\n h_pool3 = self.h_pool3_tf.eval(session = sess, feed_dict = feed_dict)\n h_fc1 = self.h_fc1_tf.eval(session = sess, feed_dict = feed_dict)\n h_fc2 = self.z_pred_tf.eval(session = sess, feed_dict = feed_dict)\n return h_conv1,h_pool1,h_conv2,h_pool2,h_conv3,h_pool3,h_fc1,h_fc2", "_____no_output_____" ], [ "## train the neural network graph\n\nModel_instance_list = ['CNN1'] # use full when you would want to run diffrent \n#instamnce of same model with diffrent parameter\n# we wont be doing it but you can try, we just ahve one\n\n# cross validations\ncv_num = 10 # cross validations default = 20 => 5% validation set\nkfold = sklearn.model_selection.KFold(cv_num, shuffle=True, random_state=123)\n\nfor i,(train_index, valid_index) in enumerate(kfold.split(x_train_valid)):\n \n # start timer\n start = datetime.datetime.now();\n \n # train and validation data of original images\n x_train = x_train_valid[train_index]\n y_train = y_train_valid[train_index]\n x_valid = x_train_valid[valid_index]\n y_valid = y_train_valid[valid_index]\n \n # create neural network graph\n nn_graph = nn_class(nn_name = Model_instance_list[i]) # instance of nn_class\n nn_graph.create_graph() # create graph\n nn_graph.attach_saver() # attach saver tensors\n \n # start tensorflow session\n with tf.Session() as sess:\n \n # attach summaries\n nn_graph.attach_summary(sess) \n \n # variable initialization of the default graph\n sess.run(tf.global_variables_initializer()) \n \n # training on original data\n nn_graph.train_graph(sess, x_train, y_train, x_valid, y_valid, n_epoch = 1.0)\n \n # training on augmented data\n nn_graph.train_graph(sess, x_train, y_train, x_valid, y_valid, n_epoch = 14.0,\n train_on_augmented_data = True)\n\n # save tensors and summaries of model\n nn_graph.save_model(sess)\n \n # only one iteration\n if True:\n break;\n \n \nprint('total running time for training: ', datetime.datetime.now() - start)\n ", "num_weights = 361188\nnum_biases = 684\n02-04-2018 15:22:31 : start training\nlearnrate = 0.001 , n_epoch = 1.0 , mb_size = 50\n0.00 epoch: train/val loss = 2.5636/2.5635, train/val acc = 0.1107/0.1148\n0.20 epoch: train/val loss = 0.2069/0.2036, train/val acc = 0.9405/0.9374\n0.40 epoch: train/val loss = 0.1601/0.1727, train/val acc = 0.9495/0.9445\n0.60 epoch: train/val loss = 0.1002/0.1013, train/val acc = 0.9712/0.9698\n0.80 epoch: train/val loss = 0.0884/0.0823, train/val acc = 0.9707/0.9762\n1.00 epoch: train/val loss = 0.0799/0.0822, train/val acc = 0.9762/0.9748\n1.00 epoch: train/val loss = 0.0673/0.0821, train/val acc = 0.9783/0.9750\nrunning time for training: 0:01:02.407611\ngenerate new set of images\ngenerate new set of images\n02-04-2018 15:23:37 : start training\nlearnrate = 0.001 , n_epoch = 14.0 , mb_size = 50\n1.00 epoch: train/val loss = 0.0642/0.0784, train/val acc = 0.9807/0.9760\n1.20 epoch: train/val loss = 0.0712/0.0764, train/val acc = 0.9790/0.9757\n1.40 epoch: train/val loss = 0.0643/0.0649, train/val acc = 0.9800/0.9764\n1.60 epoch: train/val loss = 0.0559/0.0574, train/val acc = 0.9843/0.9814\n1.80 epoch: train/val loss = 0.0531/0.0498, train/val acc = 0.9848/0.9819\ngenerate new set of images\n2.00 epoch: train/val loss = 0.0816/0.0709, train/val acc = 0.9767/0.9760\n2.20 epoch: train/val loss = 0.0516/0.0459, train/val acc = 0.9843/0.9840\n2.40 epoch: train/val loss = 0.0517/0.0454, train/val acc = 0.9857/0.9857\n2.60 epoch: train/val loss = 0.0612/0.0574, train/val acc = 0.9810/0.9829\n2.80 epoch: train/val loss = 0.0476/0.0418, train/val acc = 0.9871/0.9862\n3.00 epoch: train/val loss = 0.0452/0.0395, train/val acc = 0.9871/0.9855\ngenerate new set of images\n0:01:54.182973 : set learn rate to 0.000750\n3.20 epoch: train/val loss = 0.0552/0.0468, train/val acc = 0.9848/0.9826\n3.40 epoch: train/val loss = 0.0400/0.0351, train/val acc = 0.9876/0.9871\n3.60 epoch: train/val loss = 0.0428/0.0365, train/val acc = 0.9869/0.9862\n3.80 epoch: train/val loss = 0.0469/0.0358, train/val acc = 0.9855/0.9890\n4.00 epoch: train/val loss = 0.0505/0.0375, train/val acc = 0.9852/0.9867\ngenerate new set of images\n4.20 epoch: train/val loss = 0.0281/0.0341, train/val acc = 0.9921/0.9881\n4.40 epoch: train/val loss = 0.0308/0.0332, train/val acc = 0.9902/0.9888\n4.60 epoch: train/val loss = 0.0314/0.0340, train/val acc = 0.9902/0.9890\n4.80 epoch: train/val loss = 0.0335/0.0404, train/val acc = 0.9898/0.9852\n5.00 epoch: train/val loss = 0.0335/0.0375, train/val acc = 0.9898/0.9867\ngenerate new set of images\n5.20 epoch: train/val loss = 0.0272/0.0276, train/val acc = 0.9921/0.9905\n5.40 epoch: train/val loss = 0.0307/0.0287, train/val acc = 0.9910/0.9900\n5.60 epoch: train/val loss = 0.0329/0.0303, train/val acc = 0.9895/0.9893\n5.80 epoch: train/val loss = 0.0311/0.0276, train/val acc = 0.9910/0.9910\n6.00 epoch: train/val loss = 0.0293/0.0264, train/val acc = 0.9905/0.9907\n0:04:33.773779 : set learn rate to 0.000500\ngenerate new set of images\n6.20 epoch: train/val loss = 0.0247/0.0230, train/val acc = 0.9898/0.9917\n6.40 epoch: train/val loss = 0.0281/0.0227, train/val acc = 0.9883/0.9926\n6.60 epoch: train/val loss = 0.0296/0.0269, train/val acc = 0.9888/0.9905\n6.79 epoch: train/val loss = 0.0291/0.0275, train/val acc = 0.9893/0.9907\n6.99 epoch: train/val loss = 0.0259/0.0236, train/val acc = 0.9905/0.9921\ngenerate new set of images\n7.19 epoch: train/val loss = 0.0193/0.0233, train/val acc = 0.9938/0.9926\n7.39 epoch: train/val loss = 0.0232/0.0218, train/val acc = 0.9933/0.9924\n7.59 epoch: train/val loss = 0.0210/0.0233, train/val acc = 0.9931/0.9926\n7.79 epoch: train/val loss = 0.0187/0.0212, train/val acc = 0.9931/0.9936\n7.99 epoch: train/val loss = 0.0271/0.0331, train/val acc = 0.9917/0.9912\ngenerate new set of images\n8.19 epoch: train/val loss = 0.0228/0.0221, train/val acc = 0.9933/0.9919\n8.39 epoch: train/val loss = 0.0259/0.0237, train/val acc = 0.9926/0.9912\n8.59 epoch: train/val loss = 0.0216/0.0225, train/val acc = 0.9943/0.9929\n8.79 epoch: train/val loss = 0.0206/0.0194, train/val acc = 0.9936/0.9936\n8.99 epoch: train/val loss = 0.0246/0.0232, train/val acc = 0.9921/0.9921\n0:07:16.482874 : set learn rate to 0.000250\ngenerate new set of images\n9.19 epoch: train/val loss = 0.0186/0.0171, train/val acc = 0.9940/0.9938\n9.39 epoch: train/val loss = 0.0198/0.0189, train/val acc = 0.9936/0.9943\n9.59 epoch: train/val loss = 0.0265/0.0217, train/val acc = 0.9914/0.9919\n9.79 epoch: train/val loss = 0.0206/0.0200, train/val acc = 0.9940/0.9926\n9.99 epoch: train/val loss = 0.0202/0.0184, train/val acc = 0.9936/0.9936\ngenerate new set of images\n10.19 epoch: train/val loss = 0.0198/0.0193, train/val acc = 0.9938/0.9926\n10.39 epoch: train/val loss = 0.0205/0.0197, train/val acc = 0.9952/0.9938\n10.59 epoch: train/val loss = 0.0230/0.0245, train/val acc = 0.9933/0.9914\n10.79 epoch: train/val loss = 0.0227/0.0207, train/val acc = 0.9931/0.9933\n10.99 epoch: train/val loss = 0.0237/0.0213, train/val acc = 0.9929/0.9926\ngenerate new set of images\n11.19 epoch: train/val loss = 0.0143/0.0188, train/val acc = 0.9943/0.9945\n11.39 epoch: train/val loss = 0.0156/0.0189, train/val acc = 0.9948/0.9943\n11.59 epoch: train/val loss = 0.0157/0.0182, train/val acc = 0.9945/0.9945\n11.79 epoch: train/val loss = 0.0172/0.0215, train/val acc = 0.9943/0.9921\n11.99 epoch: train/val loss = 0.0158/0.0188, train/val acc = 0.9948/0.9933\n0:10:00.029841 : set learn rate to 0.000100\ngenerate new set of images\n12.19 epoch: train/val loss = 0.0165/0.0188, train/val acc = 0.9945/0.9929\n12.39 epoch: train/val loss = 0.0172/0.0193, train/val acc = 0.9948/0.9936\n12.59 epoch: train/val loss = 0.0162/0.0197, train/val acc = 0.9950/0.9933\n12.79 epoch: train/val loss = 0.0167/0.0184, train/val acc = 0.9952/0.9936\n12.99 epoch: train/val loss = 0.0183/0.0201, train/val acc = 0.9948/0.9936\ngenerate new set of images\n13.19 epoch: train/val loss = 0.0161/0.0194, train/val acc = 0.9955/0.9945\n13.39 epoch: train/val loss = 0.0159/0.0183, train/val acc = 0.9955/0.9936\n13.59 epoch: train/val loss = 0.0164/0.0174, train/val acc = 0.9950/0.9936\n13.79 epoch: train/val loss = 0.0160/0.0179, train/val acc = 0.9940/0.9936\n13.99 epoch: train/val loss = 0.0161/0.0182, train/val acc = 0.9945/0.9938\ngenerate new set of images\n14.19 epoch: train/val loss = 0.0134/0.0164, train/val acc = 0.9964/0.9933\n14.38 epoch: train/val loss = 0.0142/0.0180, train/val acc = 0.9957/0.9933\n14.58 epoch: train/val loss = 0.0134/0.0190, train/val acc = 0.9957/0.9931\n14.78 epoch: train/val loss = 0.0143/0.0180, train/val acc = 0.9960/0.9931\n14.98 epoch: train/val loss = 0.0142/0.0190, train/val acc = 0.9957/0.9933\ngenerate new set of images\n15.00 epoch: train/val loss = 0.0164/0.0184, train/val acc = 0.9945/0.9933\nrunning time for training: 0:12:54.299752\ntotal running time for training: 0:14:01.069572\n" ], [ "instance = Model_instance_list[0]\nnn_graph = nn_class()\nsess = nn_graph.load_session_from_file(instance)\ny_valid_pred[instance] = nn_graph.forward(sess, x_valid)\nsess.close()\n\ncnf_matrix = sklearn.metrics.confusion_matrix(\n one_hot_to_dense(y_valid_pred[instance]), one_hot_to_dense(y_valid)).astype(np.float32)\n\nlabels_array = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nfig, ax = plt.subplots(1,figsize=(10,10))\nax = sns.heatmap(cnf_matrix, ax=ax, cmap=plt.cm.PuBuGn, annot=True)\nax.set_xticklabels(labels_array)\nax.set_yticklabels(labels_array)\nplt.title('Confusion matrix of validation set')\nplt.ylabel('True digit')\nplt.xlabel('Predicted digit')\nplt.show();", "C:\\Users\\jxp161430\\Documents\\Jithin\\review\\Two Sigma\\Exploratory Data Analysis\\CNN1.meta\nINFO:tensorflow:Restoring parameters from CNN1\n" ], [ "## loss and accuracy curves\n\nnn_graph = nn_class()\nsess = nn_graph.load_session_from_file(instance)\ntrain_loss[instance], valid_loss[instance] = nn_graph.get_loss(sess)\ntrain_acc[instance], valid_acc[instance] = nn_graph.get_accuracy(sess)\nsess.close()\n\nprint('final train/valid loss = %.4f/%.4f, train/valid accuracy = %.4f/%.4f'%(\n train_loss[instance][-1], valid_loss[instance][-1], train_acc[instance][-1], valid_acc[instance][-1]))\n\nplt.figure(figsize=(10, 5));\nplt.subplot(1,2,1);\nplt.plot(np.arange(0,len(train_acc[instance])), train_acc[instance],'-b', label='Training')\nplt.plot(np.arange(0,len(valid_acc[instance])), valid_acc[instance],'-g', label='Validation')\nplt.legend(loc='lower right', frameon=False)\nplt.ylim(ymax = 1.1, ymin = 0.0)\nplt.ylabel('accuracy')\nplt.xlabel('log steps');\n\nplt.subplot(1,2,2)\nplt.plot(np.arange(0,len(train_loss[instance])), train_loss[instance],'-b', label='Training')\nplt.plot(np.arange(0,len(valid_loss[instance])), valid_loss[instance],'-g', label='Validation')\nplt.legend(loc='lower right', frameon=False)\nplt.ylim(ymax = 3.0, ymin = 0.0)\nplt.ylabel('loss')\nplt.xlabel('log steps');", "C:\\Users\\jxp161430\\Documents\\Jithin\\review\\Two Sigma\\Exploratory Data Analysis\\CNN1.meta\nINFO:tensorflow:Restoring parameters from CNN1\nfinal train/valid loss = 0.0164/0.0184, train/valid accuracy = 0.9945/0.9933\n" ], [ "## visualize weights\n\nnn_graph = nn_class()\nsess = nn_graph.load_session_from_file(instance)\nW_conv1, W_conv2, W_conv3, _, _ = nn_graph.get_weights(sess)\nsess.close()\n\nprint('W_conv1: min = ' + str(np.min(W_conv1)) + ' max = ' + str(np.max(W_conv1))\n + ' mean = ' + str(np.mean(W_conv1)) + ' std = ' + str(np.std(W_conv1)))\nprint('W_conv2: min = ' + str(np.min(W_conv2)) + ' max = ' + str(np.max(W_conv2))\n + ' mean = ' + str(np.mean(W_conv2)) + ' std = ' + str(np.std(W_conv2)))\nprint('W_conv3: min = ' + str(np.min(W_conv3)) + ' max = ' + str(np.max(W_conv3))\n + ' mean = ' + str(np.mean(W_conv3)) + ' std = ' + str(np.std(W_conv3)))\n\ns_f_conv1 = nn_graph.s_f_conv1\ns_f_conv2 = nn_graph.s_f_conv2\ns_f_conv3 = nn_graph.s_f_conv3\n\nW_conv1 = np.reshape(W_conv1,(s_f_conv1,s_f_conv1,1,6,6))\nW_conv1 = np.transpose(W_conv1,(3,0,4,1,2))\nW_conv1 = np.reshape(W_conv1,(s_f_conv1*6,s_f_conv1*6,1))\n\nW_conv2 = np.reshape(W_conv2,(s_f_conv2,s_f_conv2,6,6,36))\nW_conv2 = np.transpose(W_conv2,(2,0,3,1,4))\nW_conv2 = np.reshape(W_conv2,(6*s_f_conv2,6*s_f_conv2,6,6))\nW_conv2 = np.transpose(W_conv2,(2,0,3,1))\nW_conv2 = np.reshape(W_conv2,(6*6*s_f_conv2,6*6*s_f_conv2))\n\nW_conv3 = np.reshape(W_conv3,(s_f_conv3,s_f_conv3,6,6,36))\nW_conv3 = np.transpose(W_conv3,(2,0,3,1,4))\nW_conv3 = np.reshape(W_conv3,(6*s_f_conv3,6*s_f_conv3,6,6))\nW_conv3 = np.transpose(W_conv3,(2,0,3,1))\nW_conv3 = np.reshape(W_conv3,(6*6*s_f_conv3,6*6*s_f_conv3))\n\nplt.figure(figsize=(15,5))\nplt.subplot(1,3,1)\nplt.gca().set_xticks(np.arange(-0.5, s_f_conv1*6, s_f_conv1), minor = False);\nplt.gca().set_yticks(np.arange(-0.5, s_f_conv1*6, s_f_conv1), minor = False);\nplt.grid(which = 'minor', color='b', linestyle='-', linewidth=1)\nplt.title('W_conv1 ' + str(W_conv1.shape))\nplt.colorbar(plt.imshow(W_conv1[:,:,0], cmap=cm.inferno));\n\nplt.subplot(1,3,2)\nplt.gca().set_xticks(np.arange(-0.5, 6*6*s_f_conv2, 6*s_f_conv2), minor = False);\nplt.gca().set_yticks(np.arange(-0.5, 6*6*s_f_conv2, 6*s_f_conv2), minor = False);\nplt.grid(which = 'minor', color='b', linestyle='-', linewidth=1)\nplt.title('W_conv2 ' + str(W_conv2.shape))\nplt.colorbar(plt.imshow(W_conv2[:,:], cmap=cm.inferno));\n\nplt.subplot(1,3,3)\nplt.gca().set_xticks(np.arange(-0.5, 6*6*s_f_conv3, 6*s_f_conv3), minor = False);\nplt.gca().set_yticks(np.arange(-0.5, 6*6*s_f_conv3, 6*s_f_conv3), minor = False);\nplt.grid(which = 'minor', color='b', linestyle='-', linewidth=1)\nplt.title('W_conv3 ' + str(W_conv3.shape))\nplt.colorbar(plt.imshow(W_conv3[:,:], cmap=cm.inferno));", "C:\\Users\\jxp161430\\Documents\\Jithin\\review\\Two Sigma\\Exploratory Data Analysis\\CNN1.meta\nINFO:tensorflow:Restoring parameters from CNN1\nW_conv1: min = -0.40554228 max = 0.30480886 mean = -0.011088983 std = 0.14781868\nW_conv2: min = -0.5984618 max = 0.35811886 mean = -0.013514044 std = 0.105048046\nW_conv3: min = -0.4627795 max = 0.36970696 mean = -0.016451234 std = 0.106902294\n" ], [ "## visualize activations\n\nimg_no = 143;\nnn_graph = nn_class()\nsess = nn_graph.load_session_from_file(instance)\n(h_conv1, h_pool1, h_conv2, h_pool2,h_conv3, h_pool3, h_fc1,\n h_fc2) = nn_graph.get_activations(sess, x_train_valid[img_no:img_no+1])\nsess.close()\n \n# original image\nplt.figure(figsize=(15,9))\nplt.subplot(2,4,1)\nplt.imshow(x_train_valid[img_no].reshape(28,28),cmap=cm.inferno);\n\n# 1. convolution\nplt.subplot(2,4,2)\nplt.title('h_conv1 ' + str(h_conv1.shape))\nh_conv1 = np.reshape(h_conv1,(-1,28,28,6,6))\nh_conv1 = np.transpose(h_conv1,(0,3,1,4,2))\nh_conv1 = np.reshape(h_conv1,(-1,6*28,6*28))\nplt.imshow(h_conv1[0], cmap=cm.inferno);\n\n# 1. max pooling\nplt.subplot(2,4,3)\nplt.title('h_pool1 ' + str(h_pool1.shape))\nh_pool1 = np.reshape(h_pool1,(-1,14,14,6,6))\nh_pool1 = np.transpose(h_pool1,(0,3,1,4,2))\nh_pool1 = np.reshape(h_pool1,(-1,6*14,6*14))\nplt.imshow(h_pool1[0], cmap=cm.inferno);\n\n# 2. convolution\nplt.subplot(2,4,4)\nplt.title('h_conv2 ' + str(h_conv2.shape))\nh_conv2 = np.reshape(h_conv2,(-1,14,14,6,6))\nh_conv2 = np.transpose(h_conv2,(0,3,1,4,2))\nh_conv2 = np.reshape(h_conv2,(-1,6*14,6*14))\nplt.imshow(h_conv2[0], cmap=cm.inferno);\n\n# 2. max pooling\nplt.subplot(2,4,5)\nplt.title('h_pool2 ' + str(h_pool2.shape))\nh_pool2 = np.reshape(h_pool2,(-1,7,7,6,6))\nh_pool2 = np.transpose(h_pool2,(0,3,1,4,2))\nh_pool2 = np.reshape(h_pool2,(-1,6*7,6*7))\nplt.imshow(h_pool2[0], cmap=cm.inferno);\n\n# 3. convolution\nplt.subplot(2,4,6)\nplt.title('h_conv3 ' + str(h_conv3.shape))\nh_conv3 = np.reshape(h_conv3,(-1,7,7,6,6))\nh_conv3 = np.transpose(h_conv3,(0,3,1,4,2))\nh_conv3 = np.reshape(h_conv3,(-1,6*7,6*7))\nplt.imshow(h_conv3[0], cmap=cm.inferno);\n\n# 3. max pooling\nplt.subplot(2,4,7)\nplt.title('h_pool2 ' + str(h_pool3.shape))\nh_pool3 = np.reshape(h_pool3,(-1,4,4,6,6))\nh_pool3 = np.transpose(h_pool3,(0,3,1,4,2))\nh_pool3 = np.reshape(h_pool3,(-1,6*4,6*4))\nplt.imshow(h_pool3[0], cmap=cm.inferno);\n\n# 4. FC layer\nplt.subplot(2,4,8)\nplt.title('h_fc1 ' + str(h_fc1.shape))\nh_fc1 = np.reshape(h_fc1,(-1,24,24))\nplt.imshow(h_fc1[0], cmap=cm.inferno);\n\n# 5. FC layer\nnp.set_printoptions(precision=2)\nprint('h_fc2 = ', h_fc2)", "C:\\Users\\jxp161430\\Documents\\Jithin\\review\\Two Sigma\\Exploratory Data Analysis\\CNN1.meta\nINFO:tensorflow:Restoring parameters from CNN1\nh_fc2 = [[-6.41 -1.45 -4.37 -9.91 10.5 -7.01 -3.35 -4.34 -0.6 -1.2 ]]\n" ], [ "## show misclassified images\n\nnn_graph = nn_class()\nsess = nn_graph.load_session_from_file(instance)\ny_valid_pred[instance] = nn_graph.forward(sess, x_valid)\nsess.close()\n\ny_valid_pred_label = one_hot_to_dense(y_valid_pred[instance])\ny_valid_label = one_hot_to_dense(y_valid)\ny_val_false_index = []\n\nfor i in range(y_valid_label.shape[0]):\n if y_valid_pred_label[i] != y_valid_label[i]:\n y_val_false_index.append(i)\n\nprint('# false predictions: ', len(y_val_false_index),'out of', len(y_valid))\n\nplt.figure(figsize=(10,15))\nfor j in range(0,5):\n for i in range(0,10):\n if j*10+i<len(y_val_false_index):\n plt.subplot(10,10,j*10+i+1)\n plt.title('%d/%d'%(y_valid_label[y_val_false_index[j*10+i]],\n y_valid_pred_label[y_val_false_index[j*10+i]]))\n plt.imshow(x_valid[y_val_false_index[j*10+i]].reshape(28,28),cmap=cm.inferno) ", "C:\\Users\\jxp161430\\Documents\\Jithin\\review\\Two Sigma\\Exploratory Data Analysis\\CNN1.meta\nINFO:tensorflow:Restoring parameters from CNN1\n# false predictions: 28 out of 4200\n" ], [ "nn_graph = nn_class() # create instance\nsess = nn_graph.load_session_from_file(instance) # receive session \ny_test_pred = {}\ny_test_pred_labels = {}\n\n# split evaluation of test predictions into batches\nkfold = sklearn.model_selection.KFold(40, shuffle=False) \nfor i,(train_index, valid_index) in enumerate(kfold.split(x_test)):\n if i==0:\n y_test_pred[instance] = nn_graph.forward(sess, x_test[valid_index])\n else: \n y_test_pred[instance] = np.concatenate([y_test_pred[instance],\n nn_graph.forward(sess, x_test[valid_index])])\n \nsess.close()\n\ny_test_pred_labels[instance] = one_hot_to_dense(y_test_pred[instance])\n\nprint(instance +': y_test_pred_labels[instance].shape = ', y_test_pred_labels[instance].shape)\nunique, counts = np.unique(y_test_pred_labels[instance], return_counts=True)\nprint(dict(zip(unique, counts)))", "C:\\Users\\jxp161430\\Documents\\Jithin\\review\\Two Sigma\\Exploratory Data Analysis\\CNN1.meta\nINFO:tensorflow:Restoring parameters from CNN1\nCNN1: y_test_pred_labels[instance].shape = (28000,)\n{0: 2763, 1: 3188, 2: 2811, 3: 2816, 4: 2765, 5: 2500, 6: 2749, 7: 2898, 8: 2743, 9: 2767}\n" ], [ "plt.figure(figsize=(10,15))\nfor j in range(0,5):\n for i in range(0,10):\n plt.subplot(10,10,j*10+i+1)\n plt.title('%d'%y_test_pred_labels[instance][j*10+i])\n plt.imshow(x_test[j*10+i].reshape(28,28), cmap=cm.inferno)", "_____no_output_____" ], [ "# Suppose I have 4 models, how would I stack them up\nModel_instance_list = ['CNN1', 'CNN2', 'CNN3', 'CNN4']\n\n# cross validations\n# choose the same seed as was done for training the neural nets\nkfold = sklearn.model_selection.KFold(len(Model_instance_list), shuffle=True, random_state = 123)\n\n# train and test data for meta model\nx_train_meta = np.array([]).reshape(-1,10)\ny_train_meta = np.array([]).reshape(-1,10)\nx_test_meta = np.zeros((x_test.shape[0], 10))\n\nprint('Out-of-folds predictions:')\n\n# make out-of-folds predictions from base models\nfor i,(train_index, valid_index) in enumerate(kfold.split(x_train_valid)):\n\n # training and validation data\n x_train = x_train_valid[train_index]\n y_train = y_train_valid[train_index]\n x_valid = x_train_valid[valid_index]\n y_valid = y_train_valid[valid_index]\n\n # load neural network and make predictions\n instance = Model_instance_list[i] \n nn_graph = nn_class()\n sess = nn_graph.load_session_from_file(instance)\n y_train_pred[instance] = nn_graph.forward(sess, x_train[:len(x_valid)])\n y_valid_pred[instance] = nn_graph.forward(sess, x_valid)\n y_test_pred[instance] = nn_graph.forward(sess, x_test)\n sess.close()\n\n # collect train and test data for meta model \n x_train_meta = np.concatenate([x_train_meta, y_valid_pred[instance]])\n y_train_meta = np.concatenate([y_train_meta, y_valid]) \n x_test_meta += y_test_pred[instance]\n\n print(take_models[i],': train/valid accuracy = %.4f/%.4f'%(\n accuracy_from_one_hot_labels(y_train_pred[instance], y_train[:len(x_valid)]),\n accuracy_from_one_hot_labels(y_valid_pred[instance], y_valid)))\n\n if False:\n break;\n\n# take average of test predictions\nx_test_meta = x_test_meta/(i+1)\ny_test_pred['stacked_models'] = x_test_meta\n\nprint('Stacked models: valid accuracy = %.4f'%accuracy_from_one_hot_labels(x_train_meta,\n y_train_meta))\n ", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0693f35b8c6b661c2663269d56e60d6c0e866f5
106,926
ipynb
Jupyter Notebook
plan_pole_transect.ipynb
csherwood-usgs/DUNEX
995ae6336adadffb3cb6bd7a66b95ae8cd4ffd16
[ "CC0-1.0" ]
null
null
null
plan_pole_transect.ipynb
csherwood-usgs/DUNEX
995ae6336adadffb3cb6bd7a66b95ae8cd4ffd16
[ "CC0-1.0" ]
null
null
null
plan_pole_transect.ipynb
csherwood-usgs/DUNEX
995ae6336adadffb3cb6bd7a66b95ae8cd4ffd16
[ "CC0-1.0" ]
1
2021-09-02T00:30:14.000Z
2021-09-02T00:30:14.000Z
300.353933
45,672
0.917709
[ [ [ "## plan_pole_transect\nVisualize pole locations on Pea Island beach transect.\n\nProfiles were extracted from SfM maps by Jenna on 31 August 2021 - Provisional Data.", "_____no_output_____" ], [ "#### Read in profiles\nUse pandas to read profiles; pull out arrays of x, y (UTM meters, same for all profiles) and z (m NAVD88). \nCalculate distance along profile from arbitrary starting point.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfnames = ['crossShore_profile_2019_preDorian.xyz', 'crossShore_profile_2019_postDorian.xyz',\n 'crossShore_profile_2020_Sep.xyz', 'crossShore_profile_2021_Apr.xyz']\ndf0 = pd.read_csv(fnames[0],skiprows=1,sep=',',header=None,names=['x','y','z'])\ndf1 = pd.read_csv(fnames[1],skiprows=1,sep=',',header=None,names=['x','y','z'])\ndf2 = pd.read_csv(fnames[2],skiprows=1,sep=',',header=None,names=['x','y','z'])\ndf3 = pd.read_csv(fnames[3],skiprows=1,sep=',',header=None,names=['x','y','z'])\ndf0.describe()\nx = df0['x'].values\ny = df0['y'].values\nz0 = df0['z'].values\nz1 = df1['z'].values\nz2 = df2['z'].values\nz3 = df3['z'].values\ndist = np.sqrt((x - x[0])**2+(y-y[0])**2)", "_____no_output_____" ] ], [ [ "#### Use Stockdon equation to calculate runup for slope on upper beach and offshore waves\n", "_____no_output_____" ] ], [ [ "def calcR2(H,T,slope,igflag=0):\n \"\"\"\n %\n % [R2,S,setup, Sinc, SIG, ir] = calcR2(H,T,slope,igflag);\n %\n % Calculated 2% runup (R2), swash (S), setup (setup), incident swash (Sinc)\n % and infragravity swash (SIG) elevations based on parameterizations from runup paper\n % also Iribarren (ir)\n % August 2010 - Included 15% runup (R16) statistic that, for a Guassian distribution, \n % represents mean+sigma. It is calculated as R16 = setup + swash/4. \n % In a wave tank, Palmsten et al (2010) found this statistic represented initiation of dune erosion. \n %\n %\n % H = significant wave height, reverse shoaled to deep water\n % T = deep-water peak wave period\n % slope = radians\n % igflag = 0 (default)use full equation for all data\n % = 1 use dissipative-specific calculations when dissipative conditions exist (Iribarren < 0.3)\n % = 2 use dissipative-specific (IG energy) calculation for all data\n %\n % based on:\n % Stockdon, H. F., R. A. Holman, P. A. Howd, and J. Sallenger A. H. (2006),\n % Empirical parameterization of setup, swash, and runup,\n % Coastal Engineering, 53, 573-588.\n % author: [email protected]\n # Converted to Python by [email protected]\n \"\"\"\n g = 9.81\n\n # make slopes positive!\n slope = np.abs(slope)\n\n # compute wavelength and Iribarren\n L = (g*T**2) / (2.*np.pi)\n sqHL = np.sqrt(H*L)\n ir = slope/np.sqrt(H/L)\n\n if igflag == 2: # use dissipative equations (IG) for ALL data\n R2 = 1.1*(0.039 * sqHL)\n S = 0.046*sqHL\n setup = 0.016*sqHL\n\n elif igflag == 1 and ir < 0.3: # if dissipative site use diss equations\n R2 = 1.1*(0.039 * sqHL)\n S = 0.046*sqHL\n setup = 0.016*sqHL\n\n else: # if int/ref site, use full equations\n setup = 0.35*slope*sqHL\n Sinc = 0.75*slope*sqHL\n SIG = 0.06*sqHL\n S = np.sqrt(Sinc**2 + SIG**2)\n R2 = 1.1*(setup + S/2.)\n R16 = 1.1*(setup + S/4.)\n\n return R2, S, setup, Sinc, SIG, ir, R16\n\nH = 2.\nT = 17.\nslp = .05\n\nR2, S, setup, Sinc, SIG, ir, R16 = calcR2(H,T,slp,igflag=0)\nmllw = -0.6 #NAVD88\nhigh_water = 1.6 + mllw # high water estimates from Duck and Jenettes\nmaxHW = R2 + high_water\nprint('R2: {:.2f}, max HW: {:.2f}'.format(R2, maxHW))", "R2: 1.75, max HW: 2.75\n" ] ], [ [ "#### Plot profiles and pole locations\nApply arbitrary vertical offset to profiles to collapse them. The range of these offsets suggests fairly big uncertainty in the elevation data. \nDefine a function to plot pole at ground level with 2 m embedded and 3 m above ground. \nMake plot with vertical exaggeration of 2.1 bazillion.\n\n\n`edist` - Horizontal retreat of hypothetical eroded profile. \n`pole_locations` - Locations of the pole along the transect...fiddle with this. \n`polz` - Function to plot the poles at the specified locations, with 2 m buried below local ground elev. and 3 m proud.", "_____no_output_____" ] ], [ [ "# eyeball offsets to make plot easier to interpret (note this elevates May profile)\nioff1 = -.25\nioff2 = +.3\nioff3 = +.25\nmhw = 0.77 # estimated from VDatum\nedist = -5 # distance to offset eroded profile\n#pole_locations = [96, 89, 82, 75, 68, 55, 42] # Chris's original\npole_locations = [104, 95, 84, 76, 68, 55, 42] # Katherine's idea to stretch the array seaward; less overlap\nlidar_res_left = 6 # m, depends on orientation\nlidar_res_right = 4 # m, depends on orientation\n\n# function to plot pole at ground level, given a distance (pdist) along a profile (dist and z)\ndef polz(pdist,dist,z,x,y):\n idx = (dist>=pdist).argmax()\n plt.plot([dist[idx],dist[idx]],[z[idx]-2.,z[idx]+3],'-',c='gray',linewidth=3)\n print('dist, z: {:.1f}, {:.1f} utmx, utmy: {:.1f}, {:.1f}'.format(dist[idx],z[idx],x[idx],y[idx]))\n plt.hlines(np.min(z), pz-lidar_res_left, pz+lidar_res_right, alpha=0.5)\n\nplt.figure(figsize=(12,3))\nplt.plot([dist[0],dist[-1]],[mhw,mhw],'--k',alpha=0.3,label='MHW')\nplt.plot([dist[0],dist[-1]],[maxHW,maxHW],'--r',alpha=0.3,label='Max HW')\nplt.plot(dist,z0,alpha=0.3,label='pre Dorian')\nplt.plot(dist,z1+ioff1,alpha=0.3,label='Post Dorian')\nplt.plot(dist,z2+ioff2,alpha=0.3,label='Sep 2020')\nplt.plot(dist,z3+ioff3,'-k',linewidth=2,label='May 2021')\nplt.plot(dist[500:]+edist,z3[500:]+ioff3,'--r',linewidth=2,label='Eroded')\nfor pz in pole_locations:\n polz(pz,dist,z3+ioff3,x,y)\n\nplt.grid()\nplt.legend()\nplt.ylabel('Elevation (m NAVD88)')\n_ = plt.xlabel('Distance along transect (m)')\n", "dist, z: 104.0, 0.9 utmx, utmy: 456574.3, 3948281.3\ndist, z: 95.0, 1.4 utmx, utmy: 456565.4, 3948279.8\ndist, z: 84.0, 2.2 utmx, utmy: 456554.6, 3948278.0\ndist, z: 76.0, 3.5 utmx, utmy: 456546.7, 3948276.7\ndist, z: 68.0, 4.5 utmx, utmy: 456538.8, 3948275.4\ndist, z: 55.0, 4.9 utmx, utmy: 456526.0, 3948273.2\ndist, z: 42.0, 2.6 utmx, utmy: 456513.1, 3948271.1\n" ] ], [ [ "**Comments from Katherine here:** \n\nHow much overlap do we really need? Why is this important? Are there severe edge effects? \n\nIt seems to me that we should either 1) try to cover as much of the profile as we can with the LiDARs since you're interested in runup (i.e., minimal to no overlap) or 2) cluster poles in areas where we expect high gradients in bed-level changes or impacts (i.e., where interpolations in bed-level change between sensors may be a bad assumption: around the \"dune toe\"(100 m?) and near the dune face). The whole profile looks steeper right now than pre-Dorian, so maybe we'll get more erosion/collision at the dune?\n\nI plotted the horizontal lidar resolution because I was having a hard time visualizing.", "_____no_output_____" ] ], [ [ "# plot beach slope\nslope = np.diff(z3)/np.diff(dist)\nplt.plot(dist,0.1*(z3+ioff3),'-k',linewidth=2,label='May 2021')\nplt.plot(dist[1:],slope)\nplt.ylim((-.5,.5))", "_____no_output_____" ], [ "# plot smoothed slope v. index\ndef running_mean(x, N):\n return np.convolve(x, np.ones((N,))/N)[(N-1):]\nN = int(2/.12478)\nprint(N)\nsslope = running_mean(slope,N)\nplt.plot(0.1*(z3+ioff3),'-k',linewidth=2,label='May 2021')\nplt.plot(sslope)", "16\n" ], [ "print(np.median(sslope[690:700]))\nprint(np.std(sslope[690:700]))", "-0.05149328538733515\n0.0008375655872101676\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0694b972c113af95b2da6c243a99a1275d90cbd
1,036,689
ipynb
Jupyter Notebook
Project/KNN AND NB Project.ipynb
foday1989/FODAY-DS.Portfolio.io
64f32a8ebaded9d25347fddf07df00d71ee304fe
[ "Unlicense" ]
null
null
null
Project/KNN AND NB Project.ipynb
foday1989/FODAY-DS.Portfolio.io
64f32a8ebaded9d25347fddf07df00d71ee304fe
[ "Unlicense" ]
null
null
null
Project/KNN AND NB Project.ipynb
foday1989/FODAY-DS.Portfolio.io
64f32a8ebaded9d25347fddf07df00d71ee304fe
[ "Unlicense" ]
1
2021-08-02T07:39:09.000Z
2021-08-02T07:39:09.000Z
518,344.5
1,036,688
0.945498
[ [ [ "import pandas as pd\nimport numpy as np\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nsns.set(style='white', color_codes=True)", "_____no_output_____" ], [ "dataset = pd.read_csv(\"/content/drive/MyDrive/Colab Notebooks/ortho_knnnb.csv\")", "_____no_output_____" ], [ "dataset.head()", "_____no_output_____" ], [ "print(\"Dimension of dataset:\", dataset.shape)\nprint(\"Number of rows in the dataset:\", dataset.shape[0])\nprint(\"Number of columns in the dataset:\", dataset.shape[1])", "Dimension of dataset: (310, 7)\nNumber of rows in the dataset: 310\nNumber of columns in the dataset: 7\n" ], [ "print(\"Column Names:\",dataset.columns.values)", "Column Names: ['pelvic_incidence' 'pelvic_tilt numeric' 'lumbar_lordosis_angle'\n 'sacral_slope' 'pelvic_radius' 'degree_spondylolisthesis' 'class']\n" ] ], [ [ "We are trying to predict weather the classification is normal or abnormal.", "_____no_output_____" ] ], [ [ "dataset.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 310 entries, 0 to 309\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 pelvic_incidence 310 non-null float64\n 1 pelvic_tilt numeric 310 non-null float64\n 2 lumbar_lordosis_angle 310 non-null float64\n 3 sacral_slope 310 non-null float64\n 4 pelvic_radius 310 non-null float64\n 5 degree_spondylolisthesis 310 non-null float64\n 6 class 310 non-null object \ndtypes: float64(6), object(1)\nmemory usage: 17.1+ KB\n" ], [ "dataset.describe()", "_____no_output_____" ], [ "import seaborn as sns\n\nsns.set_style(\"whitegrid\");\nsns.FacetGrid(dataset, hue=\"class\", size=5.5) \\\n .map(plt.scatter, \"pelvic_incidence\", \"pelvic_tilt numeric\") \\\n .add_legend();\nplt.show();", "_____no_output_____" ], [ "sns.set_style(\"whitegrid\");\nsns.pairplot(dataset, hue=\"class\", size=3);\nplt.show()", "_____no_output_____" ], [ "for name in dataset.columns.values[:-1]:\n sns.FacetGrid(dataset, hue=\"class\", size=5).map(sns.distplot, name).add_legend()\nplt.show()", "_____no_output_____" ], [ "X = dataset.iloc[:, :-1]\ndisplay(X)\n\nY = dataset.iloc[:, -1]\ndisplay(Y)", "_____no_output_____" ], [ "symptom_class = ['Abnormal:1', 'Normal:0']", "_____no_output_____" ], [ "dataset['symptom_class'] \ndataset.head()", "_____no_output_____" ], [ "from sklearn import preprocessing", "_____no_output_____" ], [ "label_encoder=preprocessing.LabelEncoder()\ndataset['symptom_class']=label_encoder.fit_transform(dataset['class'])", "_____no_output_____" ], [ "dataset.head()", "_____no_output_____" ], [ "dataset= dataset.drop('class', axis=1)\ndataset.head()", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\ntrain, test = train_test_split(dataset, test_size=0.20,random_state = 1)", "_____no_output_____" ], [ "train_x = train.drop(['symptom_class'], axis = 1)\ntrain_y = train['symptom_class'] \n\ntest_x = test.drop(['symptom_class'],axis = 1)\ntest_y = test['symptom_class']", "_____no_output_____" ], [ "print('Dimension of train_x :',train_x.shape)\nprint('Dimension of train_y :',train_y.shape)\nprint('Dimension of test_x :',test_x.shape)\nprint('Dimension of test_y :',test_y.shape)", "Dimension of train_x : (248, 6)\nDimension of train_y : (248,)\nDimension of test_x : (62, 6)\nDimension of test_y : (62,)\n" ], [ "from sklearn.neighbors import KNeighborsClassifier\nKNN = KNeighborsClassifier(n_neighbors=3)\nKNN.fit(train_x, train_y)", "_____no_output_____" ], [ "pred = KNN.predict(test_x)\npred", "_____no_output_____" ], [ "from sklearn.metrics import accuracy_score\n\nprint('The accuracy of the KNN with K=3 is {}%'.format(round(accuracy_score(pred,test_y)*100,2)))", "The accuracy of the KNN with K=3 is 83.87%\n" ], [ "from sklearn.metrics import accuracy_score\n\nprint('The accuracy of the KNN with K=5 is {}%'.format(round(accuracy_score(pred,test_y)*100,2)))", "The accuracy of the KNN with K=5 is 83.87%\n" ], [ "train_accuracy =[]\ntest_accuracy = []\nfor k in range(1,15):\n \n KNN = KNeighborsClassifier(n_neighbors=k)\n \n KNN.fit(train_x, train_y)\n \n train_pred = KNN.predict(train_x)\n train_score = accuracy_score(train_pred, train_y)\n train_accuracy.append(train_score)\n \n test_pred = KNN.predict(test_x)\n test_score = accuracy_score(test_pred, test_y)\n test_accuracy.append(test_score)\n\nprint(\"Best accuracy is {} with K = {}\".format(max(test_accuracy),1+test_accuracy.index(max(test_accuracy))))", "Best accuracy is 0.8548387096774194 with K = 1\n" ], [ "plt.figure(figsize=[8,5]) #Accuracy Plot\nplt.plot(range(1,15), test_accuracy, label = 'Testing Accuracy')\nplt.plot(range(1,15), train_accuracy, label = 'Training Accuracy')\nplt.legend()\nplt.title('\\nTrain Accuracy Vs Test Accuracy\\n',fontsize=15)\nplt.xlabel('Value of K',fontsize=15)\nplt.ylabel('Accuracy',fontsize=15)\nplt.xticks(range(1,15))\nplt.grid()\nplt.show()", "_____no_output_____" ], [ "from sklearn.model_selection import GridSearchCV\n\nknn_params = {\"n_neighbors\": list(range(1,15,1)), 'metric': ['euclidean','manhattan']}\ngrid_knn = GridSearchCV(KNeighborsClassifier(), knn_params, cv=5)\ngrid_knn.fit(train_x, train_y)", "_____no_output_____" ], [ "knn_besthypr = grid_knn.best_estimator_ #KNN best estimator\nknn_besthypr", "_____no_output_____" ], [ "print(\"Tuned hyperparameter: {}\".format(grid_knn.best_params_)) \nprint(\"Best score: {}\".format(grid_knn.best_score_))", "Tuned hyperparameter: {'metric': 'manhattan', 'n_neighbors': 10}\nBest score: 0.8546122448979592\n" ], [ "knn = knn_besthypr.fit(train_x,train_y) #Using best hyperparameter\ny_pred = knn.predict(test_x)\nacc = accuracy_score(y_pred,test_y)\nprint('The accuracy of the KNN with K = {} is {}%'.format(knn_besthypr.n_neighbors,round(acc*100,2)))", "The accuracy of the KNN with K = 10 is 80.65%\n" ], [ "test = test.reset_index(drop = True) #actual value and predicted value\ntest[\"pred_value\"] = y_pred\ntest", "_____no_output_____" ], [ "from sklearn.naive_bayes import GaussianNB\nnvclassifier = GaussianNB()\nnvclassifier.fit(train_x, train_y)", "_____no_output_____" ], [ "y_pred = nvclassifier.predict(test_x)\nprint(y_pred)", "[0 0 0 0 0 0 1 0 0 0 1 1 1 1 1 0 1 1 1 0 0 1 1 0 1 0 0 0 0 0 0 1 1 1 1 0 1\n 1 0 0 1 1 0 1 0 0 1 1 0 1 0 1 1 0 0 0 0 0 0 0 1 0]\n" ], [ "test = test.reset_index(drop = True)\ntest[\"pred_value\"] = y_pred\ntest.head()", "_____no_output_____" ], [ "from sklearn.metrics import confusion_matrix\ncm = confusion_matrix(test_y, y_pred)\nplt.figure(figsize=(6,5))\nsns.heatmap(cm, annot=True)\nplt.ylabel('True label')\nplt.xlabel('Predicted label')\nplt.show()", "_____no_output_____" ], [ "a = cm.shape\ncorrPred = 0\nfalsePred = 0\n\nfor row in range(a[0]):\n for c in range(a[1]):\n if row == c:\n corrPred += cm[row,c]\n else:\n falsePred += cm[row,c]\nprint(\"*\"*70)\nprint('Correct predictions: ', corrPred)\nprint('False predictions', falsePred)\nprint(\"*\"*70)\nacc = corrPred/cm.sum()\nprint ('Accuracy of the Naive Bayes Clasification is {}% '.format(round(acc*100,2)))\nprint(\"*\"*70)", "**********************************************************************\nCorrect predictions: 50\nFalse predictions 12\n**********************************************************************\nAccuracy of the Naive Bayes Clasification is 80.65% \n**********************************************************************\n" ], [ "from sklearn.metrics import accuracy_score\n\nprint('The accuracy of the NB is {}%'.format(round(accuracy_score(y_pred,test_y)*100,2)))", "The accuracy of the NB is 80.65%\n" ], [ "nvclassifier.predict_proba(test_x)[:10]", "_____no_output_____" ] ], [ [ "I will recommend KNN than NB because it has a high accuracy than NB.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d0695a87c6fd889e4212a333975f805628a94b07
8,931
ipynb
Jupyter Notebook
kurstag6_API_Scraping1/BeautifulSoup_Intro.ipynb
Priskawa/kurstag2
028d5b07011d7ddc2b2416aa40b7f94dee134614
[ "MIT" ]
null
null
null
kurstag6_API_Scraping1/BeautifulSoup_Intro.ipynb
Priskawa/kurstag2
028d5b07011d7ddc2b2416aa40b7f94dee134614
[ "MIT" ]
null
null
null
kurstag6_API_Scraping1/BeautifulSoup_Intro.ipynb
Priskawa/kurstag2
028d5b07011d7ddc2b2416aa40b7f94dee134614
[ "MIT" ]
null
null
null
30.172297
920
0.498376
[ [ [ "!ls", "\u001b[34m06 APIs, Scraping I\u001b[m\u001b[m die_ersten_zehn_erdbeben\r\nAPI_uebung1_komb_Abfrage.ipynb die_ersten_zehn_erdbeben.csv\r\nUntitled.ipynb test.htm\r\nÜbung Erdbeben.ipynb\r\n" ], [ "file = open(\"test.htm\", \"r\")", "_____no_output_____" ], [ "file", "_____no_output_____" ], [ "file.read()", "_____no_output_____" ], [ "f = open(\"test.htm\", \"r\").read()", "_____no_output_____" ], [ "f", "_____no_output_____" ], [ "from bs4 import BeautifulSoup", "_____no_output_____" ], [ "soup = BeautifulSoup(f,\"lxml\") #wenn das nicht geht, dann untenstehende Zeile benutzen...", "_____no_output_____" ], [ "soup = BeautifulSoup(f,\"html.parser\")", "_____no_output_____" ], [ "soup", "_____no_output_____" ], [ "soup.find_all(\"li\")", "_____no_output_____" ], [ "lst = []\nfor elem in soup.find_all(\"li\"):\n lst.append(elem.text)", "_____no_output_____" ], [ "lst", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06961746c21864d0d86a61305f48f55f1ee3856
5,673
ipynb
Jupyter Notebook
content/lessons/07/Untitled.ipynb
IST256-classroom/fall2018-learn-python-mafudge
173a3c00baaf501a5006ff2a8058bdf97b23f37e
[ "MIT" ]
null
null
null
content/lessons/07/Untitled.ipynb
IST256-classroom/fall2018-learn-python-mafudge
173a3c00baaf501a5006ff2a8058bdf97b23f37e
[ "MIT" ]
null
null
null
content/lessons/07/Untitled.ipynb
IST256-classroom/fall2018-learn-python-mafudge
173a3c00baaf501a5006ff2a8058bdf97b23f37e
[ "MIT" ]
5
2018-09-17T03:54:06.000Z
2019-10-17T02:47:20.000Z
17.783699
36
0.410541
[ [ [ "name = \"michael fudge\"\n\nname.count('e',4,7)", "_____no_output_____" ], [ "len(name)", "_____no_output_____" ], [ "name.startswith('mi')", "_____no_output_____" ], [ "x = 14\ndir(x)\n", "_____no_output_____" ], [ "import random", "_____no_output_____" ], [ "choice(['yes','no'])", "_____no_output_____" ], [ "input()", "s\n" ], [ "name = 'mike'\nfor character in name:\n print(character)", "m\ni\nk\ne\n" ], [ "print(name,name[::-1])", "mike ekim\n" ], [ "band = 'abba'\nprint(band, band[::-1])", "abba abba\n" ], [ "state = 'california'\nstate[::3]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06974ad6c9b52eb153878b2cccf6a0d7ef3b9fd
1,047,925
ipynb
Jupyter Notebook
wsb_sentiment.ipynb
kenzeng24/wsb-analysis
e4f05bd3224b5e15a45da52e864bcefe757a41d6
[ "MIT" ]
null
null
null
wsb_sentiment.ipynb
kenzeng24/wsb-analysis
e4f05bd3224b5e15a45da52e864bcefe757a41d6
[ "MIT" ]
null
null
null
wsb_sentiment.ipynb
kenzeng24/wsb-analysis
e4f05bd3224b5e15a45da52e864bcefe757a41d6
[ "MIT" ]
null
null
null
772.236551
48,318
0.94214
[ [ [ "# Analysis of how mentions of a stock on WSB relates to stock prices\n\nWallStreetBets is a popular forum on reddit known for going to the moon, apes and stonks. Jokes aside, despite all of the ridiculous bad trades, undecipherable jargon and love for memes, it's effect on the stock market is undeniable. Therefore in this project, we want to investigate how the reaction of reddit users on the forum relate to actual changes in the stock market.\n", "_____no_output_____" ] ], [ [ "import pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt \nimport warnings\nimport os \nimport tensorflow as tf\n\nfrom datetime import datetime\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "Mounted at /content/drive\n" ] ], [ [ "### Reddit Post Data\n\nSource: https://huggingface.co/datasets/SocialGrep/reddit-wallstreetbets-aug-2021\n", "_____no_output_____" ] ], [ [ "# TODO: add shortcut from shared drive for:\n# wsb-aug-2021-comments.csv\n\ndef load_data(filename, path=\"/content/drive/MyDrive/\"):\n # read csv file and drop indices \n df = pd.read_csv(os.path.join(path, filename))\n df = df.dropna(axis=0)\n # convert utc to datetime format \n df[\"date\"] = pd.to_datetime(df[\"created_utc\"],unit=\"s\").dt.date\n return df \n\nfilename = \"wsb-aug-2021-comments.csv\"\ndf = load_data(filename)", "_____no_output_____" ] ], [ [ "#### Overal Sentiment on the Subreddit", "_____no_output_____" ] ], [ [ "def sentiment_bins(df):\n \n # extract sentiment \n sent_df = df[[\"date\",\"sentiment\"]]\n bins = {} \n bins[\"positive\"] = sent_df.loc[sent_df[\"sentiment\"] > 0.25,:]\n bins[\"negative\"] = sent_df.loc[sent_df[\"sentiment\"] < -0.25,:]\n bins[\"neutral\"] = sent_df.loc[sent_df[\"sentiment\"].between(-0.25,0.25),:]\n \n # count the posts in each bin for each day \n for name in bins:\n bins[name] = bins[name].groupby(['date']).count()\n counts = sent_df.groupby(['date']).count()\n return bins, counts \n\ndef plot_sentiment(df,normalize=True, title=None):\n\n # collect sentiment into three bins \n bins, counts = sentiment_bins(df)\n\n # plot counts of each bin every day \n colours = [\"lightgreen\", \"coral\", \"grey\"]\n for i, name in enumerate([\"positive\", \"negative\", \"neutral\"]):\n dates = bins[name].index \n total_counts = counts.loc[dates,:].values.reshape(-1)\n bin_counts = bins[name][\"sentiment\"].values\n if not normalize: \n total_counts = 1 \n plt.plot(dates, bin_counts / total_counts, \n alpha=0.7, c=colours[i])\n \n plt.legend([\"positive\", \"negative\", \"neutral\"])\n if title:\n plt.title(title)\n plt.xticks(rotation=20)\n plt.show() ", "_____no_output_____" ], [ "plot_sentiment(df,normalize=False, title=\"overall-unnormalized\")\nplot_sentiment(df,normalize=True, title=\"overall-normalized\")", "_____no_output_____" ], [ "# distribution of sentiment of the posts \nplt.hist(df[\"sentiment\"], color=\"coral\", \n alpha=0.5)\nplt.show()", "_____no_output_____" ] ], [ [ "#### Sentiment for individual stocks", "_____no_output_____" ] ], [ [ "stocks = [\"GME\", \"AMC\", \"AMD\",\"AMZN\", \"PLTR\", \"NVDA\"]\nfor stock in stocks:\n gme_posts = df.loc[df[\"body\"].str.contains(stock),:] \n plot_sentiment(gme_posts,title=f\"{stock}-normalized\", normalize=True)\n plot_sentiment(gme_posts,title=f\"{stock}-unnormalized\", normalize=False)", "_____no_output_____" ] ], [ [ "### Analyzing Stock Data ", "_____no_output_____" ] ], [ [ "def get_daily_sentiment(df, stock):\n\n # intialize df with all dates in august \n datelist = pd.date_range(datetime(2021,8,1), periods=31).tolist()\n sentiment_df = pd.DataFrame({\"date\":datelist})\n sentiment_df = sentiment_df.set_index(\"date\")\n \n # get all posts mentioning stock \n posts = df.loc[df[\"body\"].str.contains(stock),:]\n bins, counts = sentiment_bins(posts)\n\n # get number of posts in each bin \n for name,values in bins.items():\n values = values \n values.index = pd.to_datetime(values.index)\n values = values.rename(columns={\"sentiment\":name})\n sentiment_df = sentiment_df.join(values)\n\n # get the total number of posts for each day \n counts.index = pd.to_datetime(counts.index)\n counts = counts.rename(columns={\"sentiment\":\"count\"})\n\n sentiment_df = sentiment_df.join(counts)\n sentiment_df = sentiment_df.fillna(0)\n return sentiment_df\n\ndef load_stocks(filename=\"drive/MyDrive/Stock Prices.csv\"):\n\n stonks = pd.read_csv(filename)\n\n # add missing dates to df \n stonks.Date = pd.to_datetime(stonks.Date)\n datelist = pd.date_range(datetime(2021,8,1), periods=31).tolist()\n dates = pd.DataFrame({\"Date\":datelist})\n stonks_df = pd.merge(dates, stonks, on=\"Date\", how=\"left\")\n\n # fill the null values using closest date\n stonks_df = stonks_df.interpolate(method='nearest')\n stonks_df = stonks_df.set_index('Date')\n return stonks_df", "_____no_output_____" ], [ "# we identified the forum's favourite stocks \nstocks = [\"GME\", \"AMC\", \"AMD\",\"AMZN\", \"PLTR\", \"NVDA\"]\n\n# retrieve the sentiment informationn for each stock \nsentiment_df = {stock: get_daily_sentiment(df, stock) for stock in stocks}\nstonks_df = load_stocks() ", "_____no_output_____" ] ], [ [ "#### visualize stock prices ", "_____no_output_____" ] ], [ [ "def scale(x):\n minx = np.min(x); maxx = np.max(x)\n return (x-minx) / (maxx-minx)\n\nfor stock in stocks:\n # plot scaled price against the number of posts\n price = stonks_df.dropna(axis=0)[[stock]]\n num_posts = sentiment_df[stock][\"count\"].loc[price.index,].values\n plt.plot(price.index, scale(price), alpha=0.7)\n plt.plot(price.index, scale(num_posts), alpha=0.7)\n plt.xticks(rotation=20)\n plt.title(f\"{stock}: scaled stock price vs number of posts\")\n plt.legend([\"stock price\", \"posts count\"])\n plt.show()", "_____no_output_____" ], [ "stonks_log = np.log(stonks_df)\nfor stock in stocks:\n returns = stonks_df.diff().dropna(axis=0)[[stock]]\n log_returns = stonks_log.diff().dropna(axis=0)[[stock]]\n num_posts = sentiment_df[stock][\"count\"].loc[returns.index,].values\n plt.plot(returns.index, scale(returns), alpha=0.7)\n plt.plot(returns.index, scale(log_returns),alpha=0.7)\n plt.plot(returns.index, scale(num_posts),alpha=0.7)\n plt.xticks(rotation=20)\n plt.title(f\"{stock}: returns vs number of posts, scaled\")\n plt.legend([\"returns\", \"log returns\", \"posts count\"])\n plt.show()", "_____no_output_____" ] ], [ [ "#### sentiment vs stock prices", "_____no_output_____" ] ], [ [ "# from sklearn.linear_model import LinearRegression\nimport statsmodels.api as sm\n\nfor stock in stocks:\n\n # get stock prices \n y = stonks_df.dropna(axis=0)[[stock]]\n print(\"=\"*50)\n print(f'name: {stock}, total:{sum(sentiment_df[stock][\"count\"])}')\n print(\"=\"*50)\n\n for col in sentiment_df[stock].columns:\n\n # fit a linear model using the number of posts in each bin \n X = sentiment_df[stock][col].loc[y.index,].values\n X = sm.add_constant(X)\n mod = sm.OLS(y,X)\n res = mod.fit()\n print(f'{col}: {res.rsquared:.3f}, pval: {res.pvalues.x1:.3f}')", "==================================================\nname: GME, total:7867\n==================================================\npositive: 0.228, pval: 0.009\nnegative: 0.183, pval: 0.021\nneutral: 0.252, pval: 0.006\ncount: 0.229, pval: 0.009\n==================================================\nname: AMC, total:5130\n==================================================\npositive: 0.068, pval: 0.170\nnegative: 0.024, pval: 0.423\nneutral: 0.162, pval: 0.030\ncount: 0.096, pval: 0.102\n==================================================\nname: AMD, total:3060\n==================================================\npositive: 0.445, pval: 0.000\nnegative: 0.362, pval: 0.001\nneutral: 0.415, pval: 0.000\ncount: 0.424, pval: 0.000\n==================================================\nname: AMZN, total:1330\n==================================================\npositive: 0.032, pval: 0.351\nnegative: 0.077, pval: 0.145\nneutral: 0.064, pval: 0.187\ncount: 0.062, pval: 0.191\n==================================================\nname: PLTR, total:3223\n==================================================\npositive: 0.003, pval: 0.761\nnegative: 0.001, pval: 0.845\nneutral: 0.000, pval: 0.939\ncount: 0.001, pval: 0.851\n==================================================\nname: NVDA, total:1799\n==================================================\npositive: 0.043, pval: 0.282\nnegative: 0.001, pval: 0.855\nneutral: 0.033, pval: 0.348\ncount: 0.028, pval: 0.387\n" ], [ "norm_df = {}\nfor stock in sentiment_df:\n norm_df[stock] = sentiment_df[stock].copy()\n for col in norm_df[stock].columns:\n if col != \"count\":\n norm_df[stock][col] = norm_df[stock][col]/ norm_df[stock][\"count\"]", "_____no_output_____" ], [ "import statsmodels.api as sm\n\nnames = [\"intercept\"] + list(norm_df[\"GME\"].columns)\nfor stock in stocks:\n\n # get stock prices \n y = stonks_df.dropna(axis=0)[[stock]]\n print(\"=\"*50)\n print(f'name: {stock}, total:{sum(sentiment_df[stock][\"count\"])}')\n print(\"=\"*50)\n\n # fit a linear model using the number of posts in each bin \n X = norm_df[stock].loc[y.index,:].values\n X = sm.add_constant(X)\n mod = sm.OLS(y,X)\n res = mod.fit()\n print(f'{\"rsquared\"}: {res.rsquared:.3f}')\n for name, pval in zip(names, res.pvalues):\n print(f'{name}: {pval:.3f}')", "==================================================\nname: GME, total:7867\n==================================================\nrsquared: 0.381\nintercept: 0.000\npositive: 0.111\nnegative: 0.100\nneutral: 0.028\ncount: 0.056\n==================================================\nname: AMC, total:5130\n==================================================\nrsquared: 0.206\nintercept: 0.000\npositive: 0.178\nnegative: 0.371\nneutral: 0.061\ncount: 0.229\n==================================================\nname: AMD, total:3060\n==================================================\nrsquared: 0.511\nintercept: 0.000\npositive: 0.000\nnegative: 0.003\nneutral: 0.000\ncount: 0.000\n==================================================\nname: AMZN, total:1330\n==================================================\nrsquared: 0.081\nintercept: 0.000\npositive: 0.000\nnegative: 0.000\nneutral: 0.000\ncount: 0.313\n==================================================\nname: PLTR, total:3223\n==================================================\nrsquared: 0.021\nintercept: 0.000\npositive: 0.001\nnegative: 0.135\nneutral: 0.054\ncount: 0.822\n==================================================\nname: NVDA, total:1799\n==================================================\nrsquared: 0.104\nintercept: 0.000\npositive: 0.000\nnegative: 0.117\nneutral: 0.000\ncount: 0.278\n" ] ], [ [ "#### sentiment vs stock direction ", "_____no_output_____" ] ], [ [ "from sklearn.metrics import roc_auc_score\n\nstonks_diff = stonks_df.diff()\nfor stock in stocks:\n\n # check if stock increased \n y = (stonks_diff.dropna(axis=0)[[stock]] > 0) * 1\n print(\"=\"*50)\n print(f'name: {stock}, total:{sum(sentiment_df[stock][\"count\"])}')\n print(\"=\"*50)\n\n for col in sentiment_df[stock].columns:\n\n # fit a linear model using the number of posts in each bin \n X = sentiment_df[stock][col].loc[y.index,].values\n X = sm.add_constant(X)\n log_reg = sm.Logit(y, X).fit(disp=False)\n ypred = log_reg.predict(X)\n score = roc_auc_score(y.values, ypred)\n acc = np.mean((ypred > 0.5) == y.values)\n print(f'{col}: auc:{score:.3f}, acc:{acc:.3f}')\n", "==================================================\nname: GME, total:7867\n==================================================\npositive: auc:0.478, acc:0.622\nnegative: auc:0.461, acc:0.633\nneutral: auc:0.428, acc:0.622\ncount: auc:0.475, acc:0.622\n==================================================\nname: AMC, total:5130\n==================================================\npositive: auc:0.481, acc:0.607\nnegative: auc:0.553, acc:0.607\nneutral: auc:0.548, acc:0.607\ncount: auc:0.548, acc:0.607\n==================================================\nname: AMD, total:3060\n==================================================\npositive: auc:0.564, acc:0.612\nnegative: auc:0.508, acc:0.622\nneutral: auc:0.550, acc:0.612\ncount: auc:0.536, acc:0.612\n==================================================\nname: AMZN, total:1330\n==================================================\npositive: auc:0.544, acc:0.643\nnegative: auc:0.556, acc:0.643\nneutral: auc:0.606, acc:0.612\ncount: auc:0.531, acc:0.643\n==================================================\nname: PLTR, total:3223\n==================================================\npositive: auc:0.497, acc:0.551\nnegative: auc:0.612, acc:0.546\nneutral: auc:0.513, acc:0.566\ncount: auc:0.516, acc:0.551\n==================================================\nname: NVDA, total:1799\n==================================================\npositive: auc:0.698, acc:0.599\nnegative: auc:0.671, acc:0.569\nneutral: auc:0.663, acc:0.599\ncount: auc:0.684, acc:0.584\n" ], [ "from sklearn.metrics import roc_auc_score\n\nnames = [\"intercept\"] + list(norm_df[\"GME\"].columns)\nstonks_diff = stonks_df.diff()\nfor stock in stocks:\n\n # get stock prices \n y = (stonks_diff.dropna(axis=0)[[stock]] > 0) * 1\n print(\"=\"*50)\n print(f'name: {stock}, total:{sum(sentiment_df[stock][\"count\"])}')\n print(\"=\"*50)\n\n # fit a linear model using the number of posts in each bin \n X = norm_df[stock].loc[y.index,:].values\n X = sm.add_constant(X)\n log_reg = sm.Logit(y, X).fit(disp=False)\n ypred = log_reg.predict(X)\n score = roc_auc_score(y.values, ypred)\n acc = np.mean((ypred > 0.5) == y.values)\n print(f'{col}: auc:{score:.3f}, acc:{acc:.3f}')", "==================================================\nname: GME, total:7867\n==================================================\ncount: auc:0.639, acc:0.612\n==================================================\nname: AMC, total:5130\n==================================================\ncount: auc:0.561, acc:0.592\n==================================================\nname: AMD, total:3060\n==================================================\ncount: auc:0.644, acc:0.612\n==================================================\nname: AMZN, total:1330\n==================================================\ncount: auc:0.839, acc:0.571\n==================================================\nname: PLTR, total:3223\n==================================================\ncount: auc:0.740, acc:0.515\n==================================================\nname: NVDA, total:1799\n==================================================\ncount: auc:0.626, acc:0.584\n" ] ], [ [ "#### sentiment vs returns", "_____no_output_____" ] ], [ [ "stonks_diff = stonks_df.diff()\nfor stock in stocks:\n y = stonks_diff.dropna(axis=0)[[stock]]\n print(\"=\"*50)\n print(f'name: {stock}')\n print(\"=\"*50)\n for col in sentiment_df[stock].columns:\n X = sentiment_df[stock][col].loc[y.index,].values\n X = sm.add_constant(X)\n mod = sm.OLS(y,X)\n res = mod.fit()\n print(f'{col}: {res.rsquared:.3f}, pval: {res.pvalues.x1:.3f}')", "==================================================\nname: GME\n==================================================\npositive: 0.297, pval: 0.003\nnegative: 0.220, pval: 0.012\nneutral: 0.298, pval: 0.003\ncount: 0.282, pval: 0.004\n==================================================\nname: AMC\n==================================================\npositive: 0.047, pval: 0.270\nnegative: 0.006, pval: 0.706\nneutral: 0.070, pval: 0.175\ncount: 0.045, pval: 0.280\n==================================================\nname: AMD\n==================================================\npositive: 0.076, pval: 0.157\nnegative: 0.051, pval: 0.247\nneutral: 0.086, pval: 0.130\ncount: 0.076, pval: 0.157\n==================================================\nname: AMZN\n==================================================\npositive: 0.013, pval: 0.562\nnegative: 0.055, pval: 0.229\nneutral: 0.002, pval: 0.806\ncount: 0.016, pval: 0.520\n==================================================\nname: PLTR\n==================================================\npositive: 0.299, pval: 0.003\nnegative: 0.206, pval: 0.015\nneutral: 0.206, pval: 0.015\ncount: 0.247, pval: 0.007\n==================================================\nname: NVDA\n==================================================\npositive: 0.029, pval: 0.388\nnegative: 0.045, pval: 0.276\nneutral: 0.040, pval: 0.311\ncount: 0.039, pval: 0.312\n" ], [ "# incorporate the sentiment for each day as well \nnames = [\"intercept\"] + list(norm_df[\"GME\"].columns)\nstonks_diff = stonks_df.diff()\nfor stock in stocks:\n\n # get stock prices \n y = stonks_diff.dropna(axis=0)[[stock]]\n print(\"=\"*50)\n print(f'name: {stock}, total:{sum(sentiment_df[stock][\"count\"])}')\n print(\"=\"*50)\n\n # fit a linear model using the number of posts in each bin \n X = norm_df[stock].loc[y.index,:].values\n X = sm.add_constant(X)\n mod = sm.OLS(y,X)\n res = mod.fit()\n print(f'{\"rsquared\"}: {res.rsquared:.3f}')\n for name, pval in zip(names, res.pvalues):\n print(f'{name}: {pval:.3f}')", "==================================================\nname: GME, total:7867\n==================================================\nrsquared: 0.314\nintercept: 0.271\npositive: 0.314\nnegative: 0.761\nneutral: 0.503\ncount: 0.005\n==================================================\nname: AMC, total:5130\n==================================================\nrsquared: 0.059\nintercept: 0.453\npositive: 0.603\nnegative: 0.601\nneutral: 0.885\ncount: 0.291\n==================================================\nname: AMD, total:3060\n==================================================\nrsquared: 0.082\nintercept: 0.481\npositive: 0.763\nnegative: 0.698\nneutral: 0.903\ncount: 0.163\n==================================================\nname: AMZN, total:1330\n==================================================\nrsquared: 0.165\nintercept: 0.770\npositive: 0.436\nnegative: 0.251\nneutral: 0.046\ncount: 0.308\n==================================================\nname: PLTR, total:3223\n==================================================\nrsquared: 0.284\nintercept: 0.531\npositive: 0.355\nnegative: 0.895\nneutral: 0.466\ncount: 0.007\n==================================================\nname: NVDA, total:1799\n==================================================\nrsquared: 0.049\nintercept: 0.855\npositive: 0.642\nnegative: 0.634\nneutral: 0.981\ncount: 0.300\n" ] ], [ [ "#### sentiment vs log returns ", "_____no_output_____" ] ], [ [ "stonks_log = np.log(stonks_df)\nfor stock in stocks:\n y = stonks_log.diff().dropna(axis=0)[[stock]]\n print(\"=\"*50)\n print(f'name: {stock}')\n print(\"=\"*50)\n for col in sentiment_df[stock].columns:\n X = sentiment_df[stock][col].loc[y.index,].values\n X = sm.add_constant(X)\n mod = sm.OLS(y,X)\n res = mod.fit()\n print(f'{col}: {res.rsquared:.3f}, pval: {res.pvalues.x1:.3f}')", "==================================================\nname: GME\n==================================================\npositive: 0.287, pval: 0.003\nnegative: 0.212, pval: 0.014\nneutral: 0.289, pval: 0.003\ncount: 0.272, pval: 0.004\n==================================================\nname: AMC\n==================================================\npositive: 0.035, pval: 0.341\nnegative: 0.003, pval: 0.775\nneutral: 0.057, pval: 0.219\ncount: 0.035, pval: 0.342\n==================================================\nname: AMD\n==================================================\npositive: 0.071, pval: 0.171\nnegative: 0.047, pval: 0.270\nneutral: 0.081, pval: 0.143\ncount: 0.071, pval: 0.171\n==================================================\nname: AMZN\n==================================================\npositive: 0.014, pval: 0.554\nnegative: 0.056, pval: 0.225\nneutral: 0.003, pval: 0.797\ncount: 0.017, pval: 0.512\n==================================================\nname: PLTR\n==================================================\npositive: 0.294, pval: 0.003\nnegative: 0.202, pval: 0.016\nneutral: 0.202, pval: 0.016\ncount: 0.243, pval: 0.008\n==================================================\nname: NVDA\n==================================================\npositive: 0.031, pval: 0.372\nnegative: 0.050, pval: 0.253\nneutral: 0.041, pval: 0.300\ncount: 0.042, pval: 0.297\n" ] ], [ [ "The ", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "### Further Areas of Interest ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d06986ab818ac5e64930014c5e875a14c5903c4d
48,562
ipynb
Jupyter Notebook
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
d21137f45201ad00ff6056b78c1a6d76162c9182
[ "CC-BY-3.0" ]
3
2015-09-11T17:22:54.000Z
2021-11-10T09:20:53.000Z
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
d21137f45201ad00ff6056b78c1a6d76162c9182
[ "CC-BY-3.0" ]
2
2015-08-18T19:50:46.000Z
2015-08-31T23:09:00.000Z
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
d21137f45201ad00ff6056b78c1a6d76162c9182
[ "CC-BY-3.0" ]
1
2020-12-02T13:20:37.000Z
2020-12-02T13:20:37.000Z
47.891519
28,410
0.751719
[ [ [ "# NumPy arrays", "_____no_output_____" ], [ "Nikolay Koldunov\n\[email protected]", "_____no_output_____" ], [ "This is part of [**Python for Geosciences**](https://github.com/koldunovn/python_for_geosciences) notes.", "_____no_output_____" ], [ "================", "_____no_output_____" ], [ "<img height=\"100\" src=\"files/numpy.png\" >", "_____no_output_____" ], [ "- a powerful N-dimensional array object\n- sophisticated (broadcasting) functions\n- tools for integrating C/C++ and Fortran code\n- useful linear algebra, Fourier transform, and random number capabilities\n", "_____no_output_____" ] ], [ [ "#allow graphics inline\n%matplotlib inline \nimport matplotlib.pylab as plt #import plotting library\nimport numpy as np #import numpy library\nnp.set_printoptions(precision=3) # this is just to make the output look better", "_____no_output_____" ] ], [ [ "## Load data", "_____no_output_____" ], [ "I am going to use some real data as an example of array manipulations. This will be the AO index downloaded by wget through a system call (you have to be on Linux of course):", "_____no_output_____" ] ], [ [ "!wget www.cpc.ncep.noaa.gov/products/precip/CWlink/daily_ao_index/monthly.ao.index.b50.current.ascii", "_____no_output_____" ] ], [ [ "This is how data in the file look like (we again use system call for *head* command):", "_____no_output_____" ] ], [ [ "!head monthly.ao.index.b50.current.ascii", " 1950 1 -0.60310E-01\r\n 1950 2 0.62681E+00\r\n 1950 3 -0.81275E-02\r\n 1950 4 0.55510E+00\r\n 1950 5 0.71577E-01\r\n 1950 6 0.53857E+00\r\n 1950 7 -0.80248E+00\r\n 1950 8 -0.85101E+00\r\n 1950 9 0.35797E+00\r\n 1950 10 -0.37890E+00\r\n" ] ], [ [ "Load data in to a variable:", "_____no_output_____" ] ], [ [ "ao = np.loadtxt('monthly.ao.index.b50.current.ascii')", "_____no_output_____" ], [ "ao", "_____no_output_____" ], [ "ao.shape", "_____no_output_____" ] ], [ [ "So it's a *row-major* order. Matlab and Fortran use *column-major* order for arrays.", "_____no_output_____" ] ], [ [ "type(ao)", "_____no_output_____" ] ], [ [ "Numpy arrays are statically typed, which allow faster operations", "_____no_output_____" ] ], [ [ "ao.dtype", "_____no_output_____" ] ], [ [ "You can't assign value of different type to element of the numpy array:", "_____no_output_____" ] ], [ [ "ao[0,0] = 'Year'", "_____no_output_____" ] ], [ [ "Slicing works similarly to Matlab:", "_____no_output_____" ] ], [ [ "ao[0:5,:]", "_____no_output_____" ] ], [ [ "One can look at the data. This is done by matplotlib.pylab module that we have imported in the beggining as `plt`. We will plot only first 780 poins:", "_____no_output_____" ] ], [ [ "plt.plot(ao[:780,2])", "_____no_output_____" ] ], [ [ "## Index slicing", "_____no_output_____" ], [ "In general it is similar to Matlab", "_____no_output_____" ], [ "First 12 elements of **second** column (months). Remember that indexing starts with 0:", "_____no_output_____" ] ], [ [ "ao[0:12,1]", "_____no_output_____" ] ], [ [ "First raw:", "_____no_output_____" ] ], [ [ "ao[0,:]", "_____no_output_____" ] ], [ [ "We can create mask, selecting all raws where values in second raw (months) equals 10 (October):", "_____no_output_____" ] ], [ [ "mask = (ao[:,1]==10)", "_____no_output_____" ] ], [ [ "Here we apply this mask and show only first 5 rowd of the array:", "_____no_output_____" ] ], [ [ "ao[mask][:5,:]", "_____no_output_____" ] ], [ [ "You don't have to create separate variable for mask, but apply it directly. Here instead of first five rows I show five last rows:", "_____no_output_____" ] ], [ [ "ao[ao[:,1]==10][-5:,:]", "_____no_output_____" ] ], [ [ "You can combine conditions. In this case we select October-December data (only first 10 elements are shown):", "_____no_output_____" ] ], [ [ "ao[(ao[:,1]>=10)&(ao[:,1]<=12)][0:10,:]", "_____no_output_____" ] ], [ [ "You can assighn values to subset of values (*thi expression fixes the problem with very small value at 2015-04*)", "_____no_output_____" ] ], [ [ "ao[ao<-10]=0", "_____no_output_____" ] ], [ [ "## Basic operations", "_____no_output_____" ], [ "Create example array from first 12 values of second column and perform some basic operations:", "_____no_output_____" ] ], [ [ "months = ao[0:12,1]\nmonths", "_____no_output_____" ], [ "months+10", "_____no_output_____" ], [ "months*20", "_____no_output_____" ], [ "months*months", "_____no_output_____" ] ], [ [ "## Basic statistics", "_____no_output_____" ], [ "Create *ao_values* that will contain onlu data values:", "_____no_output_____" ] ], [ [ "ao_values = ao[:,2]", "_____no_output_____" ] ], [ [ "Simple statistics:", "_____no_output_____" ] ], [ [ "ao_values.min()", "_____no_output_____" ], [ "ao_values.max()", "_____no_output_____" ], [ "ao_values.mean()", "_____no_output_____" ], [ "ao_values.std()", "_____no_output_____" ], [ "ao_values.sum()", "_____no_output_____" ] ], [ [ "You can also use *np.sum* function:", "_____no_output_____" ] ], [ [ "np.sum(ao_values)", "_____no_output_____" ] ], [ [ "One can make operations on the subsets:", "_____no_output_____" ] ], [ [ "np.mean(ao[ao[:,1]==1,2]) # January monthly mean", "_____no_output_____" ] ], [ [ "Result will be the same if we use method on our selected data:", "_____no_output_____" ] ], [ [ "ao[ao[:,1]==1,2].mean()", "_____no_output_____" ] ], [ [ "## Saving data", "_____no_output_____" ], [ "You can save your data as a text file", "_____no_output_____" ] ], [ [ "np.savetxt('ao_only_values.csv',ao[:, 2], fmt='%.4f')", "_____no_output_____" ] ], [ [ "Head of resulting file:", "_____no_output_____" ] ], [ [ "!head ao_only_values.csv", "-0.0603\r\n0.6268\r\n-0.0081\r\n0.5551\r\n0.0716\r\n0.5386\r\n-0.8025\r\n-0.8510\r\n0.3580\r\n-0.3789\r\n" ] ], [ [ "You can also save it as binary:", "_____no_output_____" ] ], [ [ "f=open('ao_only_values.bin', 'w')\nao[:,2].tofile(f)\nf.close()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0698e9fa46e4149335c7cc72d8ef63f36d01eeb
71,332
ipynb
Jupyter Notebook
examples/FittingTheGrid.ipynb
SmirnGreg/pyradex
e520c2a3a63853d07624e9a7cb90be843a0b1f57
[ "BSD-3-Clause" ]
12
2016-01-26T13:39:56.000Z
2021-09-01T07:38:04.000Z
examples/FittingTheGrid.ipynb
SmirnGreg/pyradex
e520c2a3a63853d07624e9a7cb90be843a0b1f57
[ "BSD-3-Clause" ]
27
2015-05-29T16:01:31.000Z
2022-01-31T23:41:36.000Z
examples/FittingTheGrid.ipynb
SmirnGreg/pyradex
e520c2a3a63853d07624e9a7cb90be843a0b1f57
[ "BSD-3-Clause" ]
13
2015-01-13T10:40:50.000Z
2022-01-25T22:24:46.000Z
217.47561
22,437
0.893119
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d06997ea3566a051b45160dd3623cdd3bc39c833
14,547
ipynb
Jupyter Notebook
MidTermPart2.ipynb
moonryul/course-v3
e5b13732fcbdbc75992ceef6681d00f52a8be4c2
[ "Apache-2.0" ]
null
null
null
MidTermPart2.ipynb
moonryul/course-v3
e5b13732fcbdbc75992ceef6681d00f52a8be4c2
[ "Apache-2.0" ]
null
null
null
MidTermPart2.ipynb
moonryul/course-v3
e5b13732fcbdbc75992ceef6681d00f52a8be4c2
[ "Apache-2.0" ]
3
2020-03-03T03:24:32.000Z
2020-09-20T11:44:38.000Z
28.579568
396
0.491442
[ [ [ "<a href=\"https://colab.research.google.com/github/moonryul/course-v3/blob/master/MidTermPart2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Creating your own dataset from Google Images\n\n*by: Francisco Ingham and Jeremy Howard. Inspired by [Adrian Rosebrock](https://www.pyimagesearch.com/2017/12/04/how-to-create-a-deep-learning-dataset-using-google-images/)*", "_____no_output_____" ], [ "In this tutorial we will see how to easily create an image dataset through Google Images. **Note**: You will have to repeat these steps for any new category you want to Google (e.g once for dogs and once for cats).", "_____no_output_____" ] ], [ [ "%reload_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ], [ "# You need to mount your google drive to the /content/gdrive folder of your virtual computer\n# located in the colab server\n\nfrom google.colab import drive\ndrive.mount(\"/content/gdrive\")\n#drive.mount(\"/content/gdrive\", force_remount=True)\n", "Mounted at /content/gdrive\n" ], [ "from fastai.vision import *", "_____no_output_____" ] ], [ [ "## Get a list of URLs", "_____no_output_____" ], [ "### Search and scroll", "_____no_output_____" ], [ "Question 1: (1.1) Please download 3 categories of animal images from google. Download about 100 images for each category. ", "_____no_output_____" ], [ "Go to [Google Images](http://images.google.com) and search for the images you are interested in. The more specific you are in your Google Search, the better the results and the less manual pruning you will have to do.\n\nScroll down until you've seen all the images you want to download, or until you see a button that says 'Show more results'. All the images you scrolled past are now available to download. To get more, click on the button, and continue scrolling. The maximum number of images Google Images shows is 700.\n\nIt is a good idea to put things you want to exclude into the search query, for instance if you are searching for the Eurasian wolf, \"canis lupus lupus\", it might be a good idea to exclude other variants:\n\n \"canis lupus lupus\" -dog -arctos -familiaris -baileyi -occidentalis\n\nYou can also limit your results to show only photos by clicking on Tools and selecting Photos from the Type dropdown.", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "### Download into file", "_____no_output_____" ], [ "Question 1 (1.2) Move the downloaded files to your google dirve and make the names of the files in the form of *.csv.", "_____no_output_____" ], [ "Now you must run some Javascript code in your browser which will save the URLs of all the images you want for you dataset.\n\nIn Google Chrome press <kbd>Ctrl</kbd><kbd>+Shift</kbd><kbd>+j</kbd> on Windows/Linux and <kbd>Cmd</kbd><kbd>Opt</kbd><kbd>j</kbd> on macOS, and a small window the javascript 'Console' will appear. In Firefox press <kbd>Ctrl</kbd><kbd>Shift</kbd><kbd>k</kbd> on Windows/Linux or <kbd>Cmd</kbd><kbd>Opt</kbd><kbd>k</kbd> on macOS. That is where you will paste the JavaScript commands.\n\nYou will need to get the urls of each of the images. Before running the following commands, you may want to disable ad blocking extensions (uBlock, AdBlockPlus etc.) in Chrome. Otherwise the window.open() command doesn't work. Then you can run the following commands:\n\n```javascript\nurls=Array.from(document.querySelectorAll('.rg_i')).map(el=> el.hasAttribute('data-src')?el.getAttribute('data-src'):el.getAttribute('data-iurl'));\nwindow.open('data:text/csv;charset=utf-8,' + escape(urls.join('\\n')));\n```", "_____no_output_____" ], [ "### upload urls file into /content folder", "_____no_output_____" ], [ "You will need to run this cell once per each category. The following is an illustration.", "_____no_output_____" ] ], [ [ "path = Path('gdrive/My Drive/fastai-v3/data/bears')\n", "_____no_output_____" ] ], [ [ "## Download images", "_____no_output_____" ], [ "Now you will need to download your images from their respective urls.\n\nfast.ai has a function that allows you to do just that. You just have to specify the urls filename as well as the destination folder and this function will download and save all images that can be opened. If they have some problem in being opened, they will not be saved.\n\nLet's download our images! Notice you can choose a maximum number of images to be downloaded. In this case we will not download all the urls.\n\nYou will need to run this line once for every category. The following is an illustration.", "_____no_output_____" ] ], [ [ "classes = ['teddys','grizzly','black']", "_____no_output_____" ], [ "# For example, Do this when download \"urls_black.csv' file:\nfolder = 'teddys'\ndest = path/folder\nfile = 'urls_teddy.csv'\ndownload_images(dest/file, dest, max_pics=100)\n# Question 2: Explain what happens when you execute download_images() statement.\n", "_____no_output_____" ], [ "for c in classes:\n print(c)\n verify_images(path/c, delete=True, max_size=500)", "teddys\n" ] ], [ [ "## View data", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "np.random.seed(42)\ndata = ImageDataBunch.from_folder(path, train=\".\", valid_pct=0.2,\n ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats\n \n# Question 3: Explain how the categories of the images are extracted when you execute the above statement.\n", "_____no_output_____" ] ], [ [ "Good! Let's take a look at some of our pictures then.", "_____no_output_____" ], [ "## Train model", "_____no_output_____" ] ], [ [ "learn = cnn_learner(data, models.resnet34, metrics=error_rate)\n# Question 4: 4.1) cnn_learner() has input paramters other than the shown above.\n# One of them is pretrained, which is True by default when you do not specify it. \n# What happens when you specify pretrained=True as in \n# learn = cnn_learner(data, models.resnet34, metrics=error_rate, pretrained=False) ", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "interp = ClassificationInterpretation.from_learner(learn)", "_____no_output_____" ], [ "interp.plot_confusion_matrix()\n# Question 5: What does your confusion matrix tell you about the prediction capability of your neural network?\n# Explain in a conscise manner but do not omit important points.", "_____no_output_____" ], [ "#Question 6: use interp.plot_top_losses() to find out the prediction capability of your neural network?\n# Explain in a conscise manner but do not omit important points.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d069b08716c5b981ec61fe454e52522cb7a0d316
240,920
ipynb
Jupyter Notebook
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
81d1b5017f27f72299a1a0adab5b5ab6c3eeb223
[ "CNRI-Python" ]
1
2021-12-01T05:08:20.000Z
2021-12-01T05:08:20.000Z
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
81d1b5017f27f72299a1a0adab5b5ab6c3eeb223
[ "CNRI-Python" ]
null
null
null
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
81d1b5017f27f72299a1a0adab5b5ab6c3eeb223
[ "CNRI-Python" ]
1
2021-10-09T17:17:48.000Z
2021-10-09T17:17:48.000Z
36.552875
36,012
0.5478
[ [ [ "<img src=\"http://hilpisch.com/tpq_logo.png\" alt=\"The Python Quants\" width=\"35%\" align=\"right\" border=\"0\"><br>", "_____no_output_____" ], [ "# Python for Finance (2nd ed.)\n\n**Mastering Data-Driven Finance**\n\n&copy; Dr. Yves J. Hilpisch | The Python Quants GmbH\n\n<img src=\"http://hilpisch.com/images/py4fi_2nd_shadow.png\" width=\"300px\" align=\"left\">", "_____no_output_____" ], [ "# Data Analysis with pandas", "_____no_output_____" ], [ "## pandas Basics", "_____no_output_____" ], [ "### First Steps with DataFrame Class", "_____no_output_____" ] ], [ [ "import pandas as pd ", "_____no_output_____" ], [ "df = pd.DataFrame([10, 20, 30, 40], \n columns=['numbers'], \n index=['a', 'b', 'c', 'd']) ", "_____no_output_____" ], [ "df ", "_____no_output_____" ], [ "df.index ", "_____no_output_____" ], [ "df.columns ", "_____no_output_____" ], [ "df.loc['c'] ", "_____no_output_____" ], [ "df.loc[['a', 'd']] ", "_____no_output_____" ], [ "df.iloc[1:3] ", "_____no_output_____" ], [ "df.sum() ", "_____no_output_____" ], [ "df.apply(lambda x: x ** 2) ", "_____no_output_____" ], [ "df ** 2 ", "_____no_output_____" ], [ "df['floats'] = (1.5, 2.5, 3.5, 4.5) ", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df['floats'] ", "_____no_output_____" ], [ "df['names'] = pd.DataFrame(['Yves', 'Sandra', 'Lilli', 'Henry'],\n index=['d', 'a', 'b', 'c']) ", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.append({'numbers': 100, 'floats': 5.75, 'names': 'Jil'},\n ignore_index=True) ", "_____no_output_____" ], [ "df = df.append(pd.DataFrame({'numbers': 100, 'floats': 5.75,\n 'names': 'Jil'}, index=['y',])) ", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df = df.append(pd.DataFrame({'names': 'Liz'}, index=['z',]), sort=False) ", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.dtypes ", "_____no_output_____" ], [ "df[['numbers', 'floats']].mean() ", "_____no_output_____" ], [ "df[['numbers', 'floats']].std() ", "_____no_output_____" ] ], [ [ "### Second Steps with DataFrame Class", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "np.random.seed(100)", "_____no_output_____" ], [ "a = np.random.standard_normal((9, 4))", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "df = pd.DataFrame(a) ", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.columns = ['No1', 'No2', 'No3', 'No4'] ", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df['No2'].mean() ", "_____no_output_____" ], [ "dates = pd.date_range('2019-1-1', periods=9, freq='M') ", "_____no_output_____" ], [ "dates", "_____no_output_____" ], [ "df.index = dates", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.values", "_____no_output_____" ], [ "np.array(df)", "_____no_output_____" ] ], [ [ "## Basic Analytics", "_____no_output_____" ] ], [ [ "df.info() ", "<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 9 entries, 2019-01-31 to 2019-09-30\nFreq: M\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 No1 9 non-null float64\n 1 No2 9 non-null float64\n 2 No3 9 non-null float64\n 3 No4 9 non-null float64\ndtypes: float64(4)\nmemory usage: 360.0 bytes\n" ], [ "df.describe() ", "_____no_output_____" ], [ "df.sum() ", "_____no_output_____" ], [ "df.mean() ", "_____no_output_____" ], [ "df.mean(axis=0) ", "_____no_output_____" ], [ "df.mean(axis=1) ", "_____no_output_____" ], [ "df.cumsum() ", "_____no_output_____" ], [ "np.mean(df) ", "_____no_output_____" ], [ "# raises warning\nnp.log(df) ", "/Users/yves/Python/lib/python3.7/site-packages/ipykernel_launcher.py:2: RuntimeWarning: invalid value encountered in log\n \n" ], [ "np.sqrt(abs(df)) ", "_____no_output_____" ], [ "np.sqrt(abs(df)).sum() ", "_____no_output_____" ], [ "100 * df + 100 ", "_____no_output_____" ] ], [ [ "## Basic Visualization", "_____no_output_____" ] ], [ [ "from pylab import plt, mpl \nplt.style.use('seaborn') \nmpl.rcParams['font.family'] = 'serif' \n%matplotlib inline", "_____no_output_____" ], [ "df.cumsum().plot(lw=2.0, figsize=(10, 6)); \n# plt.savefig('../../images/ch05/pd_plot_01.png')", "_____no_output_____" ], [ "df.plot.bar(figsize=(10, 6), rot=30); \n# df.plot(kind='bar', figsize=(10, 6)) \n# plt.savefig('../../images/ch05/pd_plot_02.png')", "_____no_output_____" ] ], [ [ "## Series Class", "_____no_output_____" ] ], [ [ "type(df)", "_____no_output_____" ], [ "S = pd.Series(np.linspace(0, 15, 7), name='series')", "_____no_output_____" ], [ "S", "_____no_output_____" ], [ "type(S)", "_____no_output_____" ], [ "s = df['No1']", "_____no_output_____" ], [ "s", "_____no_output_____" ], [ "type(s)", "_____no_output_____" ], [ "s.mean()", "_____no_output_____" ], [ "s.plot(lw=2.0, figsize=(10, 6));\n# plt.savefig('../../images/ch05/pd_plot_03.png')", "_____no_output_____" ] ], [ [ "## GroupBy Operations", "_____no_output_____" ] ], [ [ "df['Quarter'] = ['Q1', 'Q1', 'Q1', 'Q2', 'Q2',\n 'Q2', 'Q3', 'Q3', 'Q3']\ndf", "_____no_output_____" ], [ "groups = df.groupby('Quarter') ", "_____no_output_____" ], [ "groups.size() ", "_____no_output_____" ], [ "groups.mean() ", "_____no_output_____" ], [ "groups.max() ", "_____no_output_____" ], [ "groups.aggregate([min, max]).round(2) ", "_____no_output_____" ], [ "df['Odd_Even'] = ['Odd', 'Even', 'Odd', 'Even', 'Odd', 'Even',\n 'Odd', 'Even', 'Odd']", "_____no_output_____" ], [ "groups = df.groupby(['Quarter', 'Odd_Even'])", "_____no_output_____" ], [ "groups.size()", "_____no_output_____" ], [ "groups[['No1', 'No4']].aggregate([sum, np.mean])", "_____no_output_____" ] ], [ [ "## Complex Selection", "_____no_output_____" ] ], [ [ "data = np.random.standard_normal((10, 2)) ", "_____no_output_____" ], [ "df = pd.DataFrame(data, columns=['x', 'y']) ", "_____no_output_____" ], [ "df.info() ", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 10 entries, 0 to 9\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 x 10 non-null float64\n 1 y 10 non-null float64\ndtypes: float64(2)\nmemory usage: 288.0 bytes\n" ], [ "df.head() ", "_____no_output_____" ], [ "df.tail() ", "_____no_output_____" ], [ "df['x'] > 0.5 ", "_____no_output_____" ], [ "(df['x'] > 0) & (df['y'] < 0) ", "_____no_output_____" ], [ "(df['x'] > 0) | (df['y'] < 0) ", "_____no_output_____" ], [ "df[df['x'] > 0] ", "_____no_output_____" ], [ "df.query('x > 0') ", "_____no_output_____" ], [ "df[(df['x'] > 0) & (df['y'] < 0)] ", "_____no_output_____" ], [ "df.query('x > 0 & y < 0') ", "_____no_output_____" ], [ "df[(df.x > 0) | (df.y < 0)] ", "_____no_output_____" ], [ "df > 0 ", "_____no_output_____" ], [ "df[df > 0] ", "_____no_output_____" ] ], [ [ "## Concatenation, Joining and Merging", "_____no_output_____" ] ], [ [ "df1 = pd.DataFrame(['100', '200', '300', '400'], \n index=['a', 'b', 'c', 'd'],\n columns=['A',])", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "df2 = pd.DataFrame(['200', '150', '50'], \n index=['f', 'b', 'd'],\n columns=['B',])", "_____no_output_____" ], [ "df2", "_____no_output_____" ] ], [ [ "#### Concatenation", "_____no_output_____" ] ], [ [ "df1.append(df2, sort=False) ", "_____no_output_____" ], [ "df1.append(df2, ignore_index=True, sort=False) ", "_____no_output_____" ], [ "pd.concat((df1, df2), sort=False) ", "_____no_output_____" ], [ "pd.concat((df1, df2), ignore_index=True, sort=False) ", "_____no_output_____" ] ], [ [ "#### Joining", "_____no_output_____" ] ], [ [ "df1.join(df2) ", "_____no_output_____" ], [ "df2.join(df1) ", "_____no_output_____" ], [ "df1.join(df2, how='left') ", "_____no_output_____" ], [ "df1.join(df2, how='right') ", "_____no_output_____" ], [ "df1.join(df2, how='inner') ", "_____no_output_____" ], [ "df1.join(df2, how='outer') ", "_____no_output_____" ], [ "df = pd.DataFrame()", "_____no_output_____" ], [ "df['A'] = df1['A'] ", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df['B'] = df2 ", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df = pd.DataFrame({'A': df1['A'], 'B': df2['B']}) ", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "#### Merging", "_____no_output_____" ] ], [ [ "c = pd.Series([250, 150, 50], index=['b', 'd', 'c'])\ndf1['C'] = c\ndf2['C'] = c", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "df2", "_____no_output_____" ], [ "pd.merge(df1, df2) ", "_____no_output_____" ], [ "pd.merge(df1, df2, on='C') ", "_____no_output_____" ], [ "pd.merge(df1, df2, how='outer') ", "_____no_output_____" ], [ "pd.merge(df1, df2, left_on='A', right_on='B')", "_____no_output_____" ], [ "pd.merge(df1, df2, left_on='A', right_on='B', how='outer') ", "_____no_output_____" ], [ "pd.merge(df1, df2, left_index=True, right_index=True)", "_____no_output_____" ], [ "pd.merge(df1, df2, on='C', left_index=True)", "_____no_output_____" ], [ "pd.merge(df1, df2, on='C', right_index=True)", "_____no_output_____" ], [ "pd.merge(df1, df2, on='C', left_index=True, right_index=True)", "_____no_output_____" ] ], [ [ "## Performance Aspects", "_____no_output_____" ] ], [ [ "data = np.random.standard_normal((1000000, 2)) ", "_____no_output_____" ], [ "data.nbytes ", "_____no_output_____" ], [ "df = pd.DataFrame(data, columns=['x', 'y']) ", "_____no_output_____" ], [ "df.info() ", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1000000 entries, 0 to 999999\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 x 1000000 non-null float64\n 1 y 1000000 non-null float64\ndtypes: float64(2)\nmemory usage: 15.3 MB\n" ], [ "%time res = df['x'] + df['y'] ", "CPU times: user 10.5 ms, sys: 14.9 ms, total: 25.4 ms\nWall time: 6.78 ms\n" ], [ "res[:3]", "_____no_output_____" ], [ "%time res = df.sum(axis=1) ", "CPU times: user 55.5 ms, sys: 19.5 ms, total: 75 ms\nWall time: 73.6 ms\n" ], [ "res[:3]", "_____no_output_____" ], [ "%time res = df.values.sum(axis=1) ", "CPU times: user 21 ms, sys: 2.25 ms, total: 23.2 ms\nWall time: 20.6 ms\n" ], [ "res[:3]", "_____no_output_____" ], [ "%time res = np.sum(df, axis=1) ", "CPU times: user 51.9 ms, sys: 11.2 ms, total: 63.1 ms\nWall time: 60.6 ms\n" ], [ "res[:3]", "_____no_output_____" ], [ "%time res = np.sum(df.values, axis=1) ", "CPU times: user 20.5 ms, sys: 1.48 ms, total: 22 ms\nWall time: 20 ms\n" ], [ "res[:3]", "_____no_output_____" ], [ "%time res = df.eval('x + y') ", "CPU times: user 12 ms, sys: 16.9 ms, total: 28.9 ms\nWall time: 11.8 ms\n" ], [ "res[:3]", "_____no_output_____" ], [ "%time res = df.apply(lambda row: row['x'] + row['y'], axis=1) ", "CPU times: user 26.8 s, sys: 109 ms, total: 26.9 s\nWall time: 27 s\n" ], [ "res[:3]", "_____no_output_____" ] ], [ [ "<img src=\"http://hilpisch.com/tpq_logo.png\" alt=\"The Python Quants\" width=\"35%\" align=\"right\" border=\"0\"><br>\n\n<a href=\"http://tpq.io\" target=\"_blank\">http://tpq.io</a> | <a href=\"http://twitter.com/dyjh\" target=\"_blank\">@dyjh</a> | <a href=\"mailto:[email protected]\">[email protected]</a>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d069b62bc5c0e05fbce6a2b0c7d0d950d630db50
20,140
ipynb
Jupyter Notebook
notebooks/RAC_3D/.ipynb_checkpoints/RAC-DVR-J3D-checkpoint.ipynb
tsommerfeld/L2-methods_for_resonances
acba48bfede415afd99c89ff2859346e1eb4f96c
[ "MIT" ]
null
null
null
notebooks/RAC_3D/.ipynb_checkpoints/RAC-DVR-J3D-checkpoint.ipynb
tsommerfeld/L2-methods_for_resonances
acba48bfede415afd99c89ff2859346e1eb4f96c
[ "MIT" ]
null
null
null
notebooks/RAC_3D/.ipynb_checkpoints/RAC-DVR-J3D-checkpoint.ipynb
tsommerfeld/L2-methods_for_resonances
acba48bfede415afd99c89ff2859346e1eb4f96c
[ "MIT" ]
null
null
null
26.997319
90
0.421152
[ [ [ "## RAC/DVR step 1: diagonalize **H**($\\lambda$)", "_____no_output_____" ] ], [ [ "import numpy as np\nimport sys\nimport matplotlib.pyplot as plt\n%matplotlib qt5\nimport pandas as pd\n#\n# extend path by location of the dvr package\n#\nsys.path.append('../../Python_libs')\nimport dvr\nimport jolanta", "_____no_output_____" ], [ "amu_to_au=1822.888486192\nau2cm=219474.63068\nau2eV=27.211386027\nAngs2Bohr=1.8897259886", "_____no_output_____" ], [ "#\n# Jolanata-3D parameters a, b, c: (0.028, 1.0, 0.028)\n#\n# CS-DVR: \n# bound state: -7.17051 eV\n# resonance (3.1729556 - 0.16085j) eV\n#\njparam=(0.028, 1.0, 0.028)", "_____no_output_____" ], [ "#\n# compute DVR of T and V\n# then show the density of states\n# in a potential + energy-levels plot\n# the standard 3D-Jolanta is used (resonance at 1.75 -0.2i eV)\n#\nrmin=0\nrmax=12 # grid from 0 to rmax\nthresh = 8 # maximum energy for plot\nppB = 15 # grid points per Bohr\n\nnGrid=int((rmax-rmin)*ppB)\nrs = dvr.DVRGrid(rmin, rmax, nGrid)\nVs = jolanta.Jolanta_3D(rs, jparam)\nTs = dvr.KineticEnergy(1, rmin, rmax, nGrid)\n[energy, wf] = dvr.DVRDiag2(nGrid, Ts, Vs, wf=True)\n\nn_ene=0\nfor i in range(nGrid):\n print(\"%3d %12.8f au = %12.5f eV\" % (i+1, energy[i], energy[i]*au2eV))\n n_ene += 1\n if energy[i]*au2eV > thresh:\n break\n\n# \"DVR normalization\", sum(wf[:,0]**2)\n# this is correct for plotting\n\nc=[\"orange\", \"blue\"]\n#h=float(xmax) / (nGrid+1.0)\nscale=3*au2eV\n\nplt.cla()\nplt.plot(rs,Vs*au2eV, '-', color=\"black\")\nfor i in range(n_ene):\n plt.plot(rs, scale*wf[:,i]**2+energy[i]*au2eV, '-', color=c[i%len(c)])\nplt.ylim(-8, 1.5*thresh)\nplt.xlabel('$r$ [Bohr]')\nplt.ylabel('$E$ [eV]')\nplt.show()", " 1 -0.26351095 au = -7.17050 eV\n 2 0.11989697 au = 3.26256 eV\n 3 0.28142119 au = 7.65786 eV\n 4 0.52212147 au = 14.20765 eV\n" ] ], [ [ "## RAC by increasing $b$\n\nThe last energy needs to be about $7E_r \\approx 22$eV", "_____no_output_____" ] ], [ [ "#\n# show the potential\n#\na_ref, b_ref, c_ref = jparam\nplt.cla()\nfor b_curr in [1.1, 1.3, 1.5, 1.7]:\n param = [a_ref, b_curr, c_ref]\n plt.plot(rs, jolanta.Jolanta_3D(rs, param)*au2eV)\n\nplt.ylim(-30, 10)\nplt.show()", "_____no_output_____" ], [ "a_ref, b_ref, c_ref = jparam\n\nb_min=b_ref\nb_max=2.5\nnEs_keep=4 # how many energies are kept\n\nn_b=101\n\nbs=np.linspace(b_min, b_max, num=n_b, endpoint=True)\n\nrun_data = np.zeros((n_b, nEs_keep+1)) # array used to collect all eta-run data\nrun_data[:,0]=bs\n\nfor l, b_curr in enumerate(bs):\n param = [a_ref, b_curr, c_ref]\n Vs = jolanta.Jolanta_3D(rs, param)\n energy = dvr.DVRDiag2(nGrid, Ts, Vs)\n run_data[l,1:] = au2eV*energy[0:nEs_keep]\n print(l+1, end=\" \")\n if (l+1)%10==0:\n print()\n\nprint(run_data[-1,:])", "1 2 3 4 5 6 7 8 9 10 \n11 12 13 14 15 16 17 18 19 20 \n21 22 23 24 25 26 27 28 29 30 \n31 32 33 34 35 36 37 38 39 40 \n41 42 43 44 45 46 47 48 49 50 \n51 52 53 54 55 56 57 58 59 60 \n61 62 63 64 65 66 67 68 69 70 \n71 72 73 74 75 76 77 78 79 80 \n81 82 83 84 85 86 87 88 89 90 \n91 92 93 94 95 96 97 98 99 100 \n101 [ 2.5 -40.14360549 -21.72431982 -7.33018023 1.92800864]\n" ], [ "plt.cla()\nfor i in range(0, nEs_keep):\n plt.plot(bs, run_data[:,i+1], 'o-')\nplt.ylim(-25,5)\nplt.show()", "_____no_output_____" ], [ "cols = ['z']\nfor i in range(nEs_keep):\n cols.append('E'+str(i+1))\ndf = pd.DataFrame(run_data, columns=cols)\ndf.to_csv('rac_DVR_3D_b-scale_rmax_12.csv', index=False)\ndf.head(5)", "_____no_output_____" ] ], [ [ "## RAC with Coulomb potential", "_____no_output_____" ] ], [ [ "#\n# show the potential\n#\ndef coulomb(r, lbd=1.0):\n \"\"\" attractive Coulomb potential with strength lbd = lamda \"\"\"\n return -lbd/r\n \nplt.cla()\nfor l_curr in [0, 0.5, 1.0, 1.5, 2.0]:\n plt.plot(rs, (jolanta.Jolanta_3D(rs, jparam)+coulomb(rs, lbd=l_curr))*au2eV)\n\n#plt.xlim(0,15)\nplt.ylim(-30, 10)\nplt.show()", "_____no_output_____" ], [ "l_min=0.0\nl_max=2.6\nnEs_keep=4 # how many energies are kept\n\nnpts=101\n\nls=np.linspace(l_min, l_max, num=npts, endpoint=True)\n\nrun_data = np.zeros((npts, nEs_keep+1)) # array used to collect all eta-run data\nrun_data[:,0]=ls\n\nVJs = jolanta.Jolanta_3D(rs, jparam)\nWs = coulomb(rs, lbd=1.0)\n\nfor j, l_curr in enumerate(ls):\n Vs = VJs + l_curr*Ws\n energy = dvr.DVRDiag2(nGrid, Ts, Vs)\n run_data[j,1:] = au2eV*energy[0:nEs_keep]\n print(j+1, end=\" \")\n if (j+1)%10==0:\n print()\n\nprint(run_data[-1,:])", "1 2 3 4 5 6 7 8 9 10 \n11 12 13 14 15 16 17 18 19 20 \n21 22 23 24 25 26 27 28 29 30 \n31 32 33 34 35 36 37 38 39 40 \n41 42 43 44 45 46 47 48 49 50 \n51 52 53 54 55 56 57 58 59 60 \n61 62 63 64 65 66 67 68 69 70 \n71 72 73 74 75 76 77 78 79 80 \n81 82 83 84 85 86 87 88 89 90 \n91 92 93 94 95 96 97 98 99 100 \n101 [ 2.6 -45.3121785 -21.77660986 -7.6859924 -0.87078918]\n" ], [ "plt.cla()\nfor i in range(0, nEs_keep):\n plt.plot(ls, run_data[:,i+1], 'o-')\nplt.ylim(-25,5)\nplt.show()", "_____no_output_____" ], [ "cols = ['z']\nfor i in range(nEs_keep):\n cols.append('E'+str(i+1))\ndf = pd.DataFrame(run_data, columns=cols)\ndf.to_csv('rac_DVR_3D_coulomb_rmax_12.csv', index=False)\ndf.head(5)", "_____no_output_____" ] ], [ [ "## RAC with soft-box", "_____no_output_____" ] ], [ [ "#\n# show the box potential\n#\ndef softbox(r, rcut=1.0, lbd=1.0):\n \"\"\" \n Softbox: \n -1 at the origin, rises at r0 softly to asymptotic 0\n based on Gaussian with inverted scale\n \"\"\"\n return lbd*(np.exp(-(2*rcut)**2/r**2) - 1)\n\nplt.cla()\nfor l_curr in [0.1, 0.2, 0.3, 0.4, 0.5]:\n Vs = jolanta.Jolanta_3D(rs, jparam)\n Ws = softbox(rs, rcut=5.0, lbd=l_curr)\n plt.plot(rs, Ws*au2eV)\n\nplt.xlim(0,20)\nplt.ylim(-15, 0)\nplt.show()", "_____no_output_____" ], [ "#\n# show the full potential\n#\nplt.cla()\nfor l_curr in [0.1, 0.2, 0.3, 0.4, 0.5]:\n Vs = jolanta.Jolanta_3D(rs, jparam)\n Ws = softbox(rs, rcut=3.0, lbd=l_curr)\n plt.plot(rs, (Vs+Ws)*au2eV)\n\n#plt.xlim(0,20)\nplt.ylim(-30, 8)\nplt.show()", "_____no_output_____" ], [ "l_min=0.0\nl_max=1.2\nnEs_keep=4 # how many energies are kept\n\nnpts=101\n\nls=np.linspace(l_min, l_max, num=npts, endpoint=True)\n\nrun_data = np.zeros((npts, nEs_keep+1)) # array used to collect all eta-run data\nrun_data[:,0]=ls\n\nVJs = jolanta.Jolanta_3D(rs, jparam)\nWs = softbox(rs, rcut=3.0, lbd=1.0)\n\nfor j, l_curr in enumerate(ls):\n Vs = VJs + l_curr*Ws\n energy = dvr.DVRDiag2(nGrid, Ts, Vs)\n run_data[j,1:] = au2eV*energy[0:nEs_keep]\n print(j+1, end=\" \")\n if (j+1)%10==0:\n print()\n\nprint(run_data[-1,:])", "1 2 3 4 5 6 7 8 9 10 \n11 12 13 14 15 16 17 18 19 20 \n21 22 23 24 25 26 27 28 29 30 \n31 32 33 34 35 36 37 38 39 40 \n41 42 43 44 45 46 47 48 49 50 \n51 52 53 54 55 56 57 58 59 60 \n61 62 63 64 65 66 67 68 69 70 \n71 72 73 74 75 76 77 78 79 80 \n81 82 83 84 85 86 87 88 89 90 \n91 92 93 94 95 96 97 98 99 100 \n101 [ 1.2 -38.57676846 -22.90126541 -10.86827086 -3.4565116 ]\n" ], [ "plt.cla()\nfor i in range(0, nEs_keep):\n plt.plot(ls, run_data[:,i+1], 'o-')\nplt.ylim(-25,5)\nplt.show()", "_____no_output_____" ], [ "cols = ['z']\nfor i in range(nEs_keep):\n cols.append('E'+str(i+1))\ndf = pd.DataFrame(run_data, columns=cols)\ndf.to_csv('rac_DVR_3D_softbox_rmax_12.csv', index=False)\ndf.head(5)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d069caf2f5febf181c79ae3877a04eac2baac3e8
30,352
ipynb
Jupyter Notebook
previous_training/udacity/4_convolutions.ipynb
archelogos/smart-live-camera
73fde98a48bcc2c8bf5e17cee0b24e5dbdef0c52
[ "Apache-2.0" ]
1
2016-07-21T19:31:51.000Z
2016-07-21T19:31:51.000Z
previous_training/udacity/4_convolutions.ipynb
archelogos/smart-live-camera
73fde98a48bcc2c8bf5e17cee0b24e5dbdef0c52
[ "Apache-2.0" ]
null
null
null
previous_training/udacity/4_convolutions.ipynb
archelogos/smart-live-camera
73fde98a48bcc2c8bf5e17cee0b24e5dbdef0c52
[ "Apache-2.0" ]
null
null
null
41.922652
1,334
0.591757
[ [ [ "Deep Learning\n=============\n\nAssignment 4\n------------\n\nPreviously in `2_fullyconnected.ipynb` and `3_regularization.ipynb`, we trained fully connected networks to classify [notMNIST](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html) characters.\n\nThe goal of this assignment is make the neural network convolutional.", "_____no_output_____" ] ], [ [ "# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\nfrom __future__ import print_function\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import cPickle as pickle\nfrom six.moves import range\n", "_____no_output_____" ], [ "pickle_file = 'notMNIST.pickle'\n\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f)\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n del save # hint to help gc free up memory\n print('Training set', train_dataset.shape, train_labels.shape)\n print('Validation set', valid_dataset.shape, valid_labels.shape)\n print('Test set', test_dataset.shape, test_labels.shape)", "Training set (200000, 28, 28) (200000,)\nValidation set (10000, 28, 28) (10000,)\nTest set (10000, 28, 28) (10000,)\n" ] ], [ [ "Reformat into a TensorFlow-friendly shape:\n- convolutions need the image data formatted as a cube (width by height by #channels)\n- labels as float 1-hot encodings.", "_____no_output_____" ] ], [ [ "image_size = 28\nnum_labels = 10\nnum_channels = 1 # grayscale\n\nimport numpy as np\n\ndef reformat(dataset, labels):\n dataset = dataset.reshape(\n (-1, image_size, image_size, num_channels)).astype(np.float32)\n labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n return dataset, labels\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape)\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)", "Training set (200000, 28, 28, 1) (200000, 10)\nValidation set (10000, 28, 28, 1) (10000, 10)\nTest set (10000, 28, 28, 1) (10000, 10)\n" ], [ "def accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n / predictions.shape[0])", "_____no_output_____" ] ], [ [ "Let's build a small network with two convolutional layers, followed by one fully connected layer. Convolutional networks are more expensive computationally, so we'll limit its depth and number of fully connected nodes.", "_____no_output_____" ] ], [ [ "batch_size = 16\npatch_size = 5\ndepth = 16\nnum_hidden = 64\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n\n # Input data.\n tf_train_dataset = tf.placeholder(\n tf.float32, shape=(batch_size, image_size, image_size, num_channels))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n \n # Variables.\n layer1_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, num_channels, depth], stddev=0.1))\n layer1_biases = tf.Variable(tf.zeros([depth]))\n layer2_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, depth, depth], stddev=0.1))\n layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))\n layer3_weights = tf.Variable(tf.truncated_normal(\n [image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1))\n layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))\n layer4_weights = tf.Variable(tf.truncated_normal(\n [num_hidden, num_labels], stddev=0.1))\n layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))\n \n # Model.\n def model(data):\n conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer1_biases)\n conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer2_biases)\n shape = hidden.get_shape().as_list()\n reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])\n hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)\n return tf.matmul(hidden, layer4_weights) + layer4_biases\n \n # Training computation.\n logits = model(tf_train_dataset)\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n \n # Optimizer.\n optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)\n \n # Predictions for the training, validation, and test data.\n train_prediction = tf.nn.softmax(logits)\n valid_prediction = tf.nn.softmax(model(tf_valid_dataset))\n test_prediction = tf.nn.softmax(model(tf_test_dataset))", "_____no_output_____" ], [ "num_steps = 1001\n\nwith tf.Session(graph=graph) as session:\n tf.initialize_all_variables().run()\n print('Initialized')\n for step in range(num_steps):\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n batch_data = train_dataset[offset:(offset + batch_size), :, :, :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n _, l, predictions = session.run(\n [optimizer, loss, train_prediction], feed_dict=feed_dict)\n if (step % 50 == 0):\n print('Minibatch loss at step %d: %f' % (step, l))\n print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))\n print('Validation accuracy: %.1f%%' % accuracy(\n valid_prediction.eval(), valid_labels))\n print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))", "Initialized\nMinibatch loss at step 0 : 3.51275\nMinibatch accuracy: 6.2%\nValidation accuracy: 12.8%\nMinibatch loss at step 50 : 1.48703\nMinibatch accuracy: 43.8%\nValidation accuracy: 50.4%\nMinibatch loss at step 100 : 1.04377\nMinibatch accuracy: 68.8%\nValidation accuracy: 67.4%\nMinibatch loss at step 150 : 0.601682\nMinibatch accuracy: 68.8%\nValidation accuracy: 73.0%\nMinibatch loss at step 200 : 0.898649\nMinibatch accuracy: 75.0%\nValidation accuracy: 77.8%\nMinibatch loss at step 250 : 1.3637\nMinibatch accuracy: 56.2%\nValidation accuracy: 75.4%\nMinibatch loss at step 300 : 1.41968\nMinibatch accuracy: 62.5%\nValidation accuracy: 76.0%\nMinibatch loss at step 350 : 0.300648\nMinibatch accuracy: 81.2%\nValidation accuracy: 80.2%\nMinibatch loss at step 400 : 1.32092\nMinibatch accuracy: 56.2%\nValidation accuracy: 80.4%\nMinibatch loss at step 450 : 0.556701\nMinibatch accuracy: 81.2%\nValidation accuracy: 79.4%\nMinibatch loss at step 500 : 1.65595\nMinibatch accuracy: 43.8%\nValidation accuracy: 79.6%\nMinibatch loss at step 550 : 1.06995\nMinibatch accuracy: 75.0%\nValidation accuracy: 81.2%\nMinibatch loss at step 600 : 0.223684\nMinibatch accuracy: 100.0%\nValidation accuracy: 82.3%\nMinibatch loss at step 650 : 0.619602\nMinibatch accuracy: 87.5%\nValidation accuracy: 81.8%\nMinibatch loss at step 700 : 0.812091\nMinibatch accuracy: 75.0%\nValidation accuracy: 82.4%\nMinibatch loss at step 750 : 0.276302\nMinibatch accuracy: 87.5%\nValidation accuracy: 82.3%\nMinibatch loss at step 800 : 0.450241\nMinibatch accuracy: 81.2%\nValidation accuracy: 82.3%\nMinibatch loss at step 850 : 0.137139\nMinibatch accuracy: 93.8%\nValidation accuracy: 82.3%\nMinibatch loss at step 900 : 0.52664\nMinibatch accuracy: 75.0%\nValidation accuracy: 82.2%\nMinibatch loss at step 950 : 0.623835\nMinibatch accuracy: 87.5%\nValidation accuracy: 82.1%\nMinibatch loss at step 1000 : 0.243114\nMinibatch accuracy: 93.8%\nValidation accuracy: 82.9%\nTest accuracy: 90.0%\n" ] ], [ [ "---\nProblem 1\n---------\n\nThe convolutional model above uses convolutions with stride 2 to reduce the dimensionality. Replace the strides by a max pooling operation (`nn.max_pool()`) of stride 2 and kernel size 2.\n\n---", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ], [ [ "---\nProblem 2\n---------\n\nTry to get the best performance you can using a convolutional net. Look for example at the classic [LeNet5](http://yann.lecun.com/exdb/lenet/) architecture, adding Dropout, and/or adding learning rate decay.\n\n---", "_____no_output_____" ] ], [ [ "batch_size = 16\npatch_size = 3\ndepth = 16\nnum_hidden = 705\nnum_hidden_last = 205\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n\n # Input data.\n tf_train_dataset = tf.placeholder(\n tf.float32, shape=(batch_size, image_size, image_size, num_channels))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n \n # Variables.\n layerconv1_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, num_channels, depth], stddev=0.1))\n layerconv1_biases = tf.Variable(tf.zeros([depth]))\n layerconv2_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, depth, depth * 2], stddev=0.1))\n layerconv2_biases = tf.Variable(tf.zeros([depth * 2]))\n \n layerconv3_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, depth * 2, depth * 4], stddev=0.03))\n layerconv3_biases = tf.Variable(tf.zeros([depth * 4]))\n \n layerconv4_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, depth * 4, depth * 4], stddev=0.03))\n layerconv4_biases = tf.Variable(tf.zeros([depth * 4]))\n \n\n layerconv5_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, depth * 4, depth * 16], stddev=0.03))\n layerconv5_biases = tf.Variable(tf.zeros([depth * 16]))\n\n \n layer3_weights = tf.Variable(tf.truncated_normal(\n [image_size / 7 * image_size / 7 * (depth * 4), num_hidden], stddev=0.03))\n layer3_biases = tf.Variable(tf.zeros([num_hidden]))\n layer4_weights = tf.Variable(tf.truncated_normal(\n [num_hidden, num_hidden_last], stddev=0.0532))\n layer4_biases = tf.Variable(tf.zeros([num_hidden_last]))\n \n layer5_weights = tf.Variable(tf.truncated_normal(\n [num_hidden_last, num_labels], stddev=0.1))\n layer5_biases = tf.Variable(tf.zeros([num_labels]))\n \n\n # Model.\n def model(data, use_dropout=False):\n conv = tf.nn.conv2d(data, layerconv1_weights, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.elu(conv + layerconv1_biases)\n pool = tf.nn.max_pool(hidden, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')\n \n conv = tf.nn.conv2d(pool, layerconv2_weights, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.elu(conv + layerconv2_biases)\n #pool = tf.nn.max_pool(hidden, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')\n \n\n conv = tf.nn.conv2d(hidden, layerconv3_weights, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.elu(conv + layerconv3_biases)\n pool = tf.nn.max_pool(hidden, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')\n # norm1\n # norm1 = tf.nn.lrn(pool, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n \n conv = tf.nn.conv2d(pool, layerconv4_weights, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.elu(conv + layerconv4_biases)\n pool = tf.nn.max_pool(hidden, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')\n # norm1 = tf.nn.lrn(pool, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n\n \n conv = tf.nn.conv2d(pool, layerconv5_weights, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.elu(conv + layerconv5_biases)\n pool = tf.nn.max_pool(hidden, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')\n # norm1 = tf.nn.lrn(pool, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n \n shape = pool.get_shape().as_list()\n #print(shape)\n reshape = tf.reshape(pool, [shape[0], shape[1] * shape[2] * shape[3]])\n hidden = tf.nn.elu(tf.matmul(reshape, layer3_weights) + layer3_biases)\n \n if use_dropout:\n hidden = tf.nn.dropout(hidden, 0.75)\n \n nn_hidden_layer = tf.matmul(hidden, layer4_weights) + layer4_biases\n hidden = tf.nn.elu(nn_hidden_layer)\n \n if use_dropout:\n hidden = tf.nn.dropout(hidden, 0.75)\n \n \n return tf.matmul(hidden, layer5_weights) + layer5_biases\n \n # Training computation.\n logits = model(tf_train_dataset, True)\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n \n global_step = tf.Variable(0) # count the number of steps taken.\n learning_rate = tf.train.exponential_decay(0.1, global_step, 3000, 0.86, staircase=True)\n \n # Optimizer.\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n \n # Predictions for the training, validation, and test data.\n train_prediction = tf.nn.softmax(logits)\n valid_prediction = tf.nn.softmax(model(tf_valid_dataset))\n test_prediction = tf.nn.softmax(model(tf_test_dataset))\n\n\nnum_steps = 45001\n\nwith tf.Session(graph=graph) as session:\n tf.initialize_all_variables().run()\n print(\"Initialized\")\n for step in xrange(num_steps):\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n batch_data = train_dataset[offset:(offset + batch_size), :, :, :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n _, l, predictions = session.run(\n [optimizer, loss, train_prediction], feed_dict=feed_dict)\n if (step % 500 == 0):\n print(\"Minibatch loss at step\", step, \":\", l)\n print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n print(\"Validation accuracy: %.1f%%\" % accuracy(\n valid_prediction.eval(), valid_labels))\n print(time.ctime())\n print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))", "Initialized\nMinibatch loss at step 0 : 2.30135\nMinibatch accuracy: 6.2%\nValidation accuracy: 11.4%\nThu Jul 14 19:41:34 2016\nMinibatch loss at step 500 : 0.77839\nMinibatch accuracy: 87.5%\nValidation accuracy: 85.0%\nThu Jul 14 19:42:07 2016\nMinibatch loss at step 1000 : 0.239152\nMinibatch accuracy: 93.8%\nValidation accuracy: 86.5%\nThu Jul 14 19:42:50 2016\nMinibatch loss at step 1500 : 0.642659\nMinibatch accuracy: 81.2%\nValidation accuracy: 86.7%\nThu Jul 14 19:43:30 2016\nMinibatch loss at step 2000 : 0.194781\nMinibatch accuracy: 87.5%\nValidation accuracy: 87.4%\nThu Jul 14 19:44:08 2016\nMinibatch loss at step 2500 : 1.07727\nMinibatch accuracy: 62.5%\nValidation accuracy: 87.4%\nThu Jul 14 19:44:53 2016\nMinibatch loss at step 3000 : 0.656757\nMinibatch accuracy: 87.5%\nValidation accuracy: 88.3%\nThu Jul 14 19:45:32 2016\nMinibatch loss at step 3500 : 0.417028\nMinibatch accuracy: 87.5%\nValidation accuracy: 88.4%\nThu Jul 14 19:46:10 2016\nMinibatch loss at step 4000 : 0.498826\nMinibatch accuracy: 81.2%\nValidation accuracy: 89.3%\nThu Jul 14 19:46:51 2016\nMinibatch loss at step 4500 : 0.501579\nMinibatch accuracy: 87.5%\nValidation accuracy: 89.3%\nThu Jul 14 19:47:36 2016\nMinibatch loss at step 5000 : 0.852857\nMinibatch accuracy: 75.0%\nValidation accuracy: 88.5%\nThu Jul 14 19:48:23 2016\nMinibatch loss at step 5500 : 0.468938\nMinibatch accuracy: 87.5%\nValidation accuracy: 88.9%\nThu Jul 14 19:49:09 2016\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d069d17a6916a06cecaf6b0bf5700ed10032ed18
82,612
ipynb
Jupyter Notebook
archive/cleanlab-trial3.ipynb
PathwayCommons/pathway-abstract-classifier
fe63fa9fcb817810207e213ee5487846a7e40b16
[ "MIT" ]
null
null
null
archive/cleanlab-trial3.ipynb
PathwayCommons/pathway-abstract-classifier
fe63fa9fcb817810207e213ee5487846a7e40b16
[ "MIT" ]
null
null
null
archive/cleanlab-trial3.ipynb
PathwayCommons/pathway-abstract-classifier
fe63fa9fcb817810207e213ee5487846a7e40b16
[ "MIT" ]
null
null
null
33.084501
267
0.560294
[ [ [ "# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session", "/kaggle/input/all-labelled/all_labelled.tsv\n" ], [ "# Read in data\ndf=pd.read_csv(\"../input/all-labelled/all_labelled.tsv\", delimiter=\"\\t\")", "_____no_output_____" ], [ "!pip install -q ktrain ", "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nyellowbrick 1.3.post1 requires numpy<1.20,>=1.16.0, but you have numpy 1.20.3 which is incompatible.\r\npdpbox 0.2.1 requires matplotlib==3.1.1, but you have matplotlib 3.5.1 which is incompatible.\r\nimbalanced-learn 0.9.0 requires scikit-learn>=1.0.1, but you have scikit-learn 0.24.2 which is incompatible.\r\nhypertools 0.7.0 requires scikit-learn!=0.22,<0.24,>=0.19.1, but you have scikit-learn 0.24.2 which is incompatible.\r\nfeaturetools 1.4.1 requires numpy>=1.21.0, but you have numpy 1.20.3 which is incompatible.\u001b[0m\r\n\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\r\n" ], [ "df['text']=df['abstract']\ndf['label']=df['class']\ndf.drop(['abstract', 'journal', 'title', 'uid', 'class'], axis=1, inplace=True)\ndf.dropna(inplace=True)\ndf=df.drop_duplicates()", "_____no_output_____" ], [ "import tensorflow as tf \nimport ktrain \nfrom ktrain import text \nfrom sklearn.model_selection import train_test_split\n# Enable AMP\nfrom tensorflow.keras.mixed_precision import experimental as mixed_precision\npolicy = mixed_precision.Policy('mixed_float16')\nmixed_precision.set_policy(policy)", "_____no_output_____" ], [ "df.dropna(inplace=True)\nX = df['text'].tolist()\ny = df['label'].tolist() \npos = y.count(1)\nneg=y.count(0)\ntotal=len(y)\nprint(pos)\nprint(neg)\nprint(total)", "988\n7640\n8628\n" ], [ "t_mod = text.Transformer('microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext', maxlen=500, class_names = [0,1])", "_____no_output_____" ], [ "from sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nimport copy", "_____no_output_____" ], [ "#weight_for_0 = (1 / y.count(0)) * (len(y) / 2.0)\n#weight_for_1 = (1 / y.count(1)) * (len(y) / 2.0)\n\n#class_weight = {0: weight_for_0, 1: weight_for_1}\nkf = StratifiedKFold(n_splits=5, shuffle=True, random_state=10)\ny=np.asarray(y)\nX=np.asarray(X)", "_____no_output_____" ], [ "# Re-run cell if one of the classifiers predicts all 0s\n\n# Initialize empty array for prediction probabilities/confidence\npsx = np.zeros((len(y), 2))\n\n# Fill in this array using k fold cross validation \nfor k, (cv_train_idx, cv_holdout_idx) in enumerate(kf.split(X, y)):\n t_mod = text.Transformer('microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext', maxlen=500, class_names = [0,1])\n\n # Select the training and holdout cross-validated sets.\n X_train_cv, X_holdout_cv = X[cv_train_idx], X[cv_holdout_idx]\n y_train_cv, y_holdout_cv = y[cv_train_idx], y[cv_holdout_idx]\n # Preprocess train \n data= t_mod.preprocess_train(X_train_cv,y_train_cv)\n \n # Get new classifier for each iteration\n model = t_mod.get_classifier()\n \n # Fit model\n learner = ktrain.get_learner(model, train_data=data, batch_size=16)\n learning_rate= 5e-5\n epochs=3\n learner.fit_onecycle(learning_rate, epochs)\n predictor=ktrain.get_predictor(learner.model, preproc=t_mod)\n \n \n # Check performance of model \n predictions=predictor.predict((X_holdout_cv))\n print(accuracy_score((y_holdout_cv), predictions))\n print(classification_report((y_holdout_cv), predictions))\n\n \n # Get probabilities\n psx_cv = predictor.predict_proba(X_holdout_cv) # P(s = k|x) # [:,1]\n psx[cv_holdout_idx] = psx_cv\n", "preprocessing train...\nlanguage: en\ntrain sequence lengths:\n\tmean : 170\n\t95percentile : 249\n\t99percentile : 271\n" ], [ "!pip install -q cleanlab", "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\nTo disable this warning, you can either:\n\t- Avoid using `tokenizers` before the fork if possible\n\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\r\n" ], [ "from cleanlab.pruning import get_noise_indices\n\nordered_label_errors = get_noise_indices(\n s=y,\n psx=psx,\n sorted_index_method='normalized_margin', # Orders label errors\n )", "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\nTo disable this warning, you can either:\n\t- Avoid using `tokenizers` before the fork if possible\n\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\nhuggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\nTo disable this warning, you can either:\n\t- Avoid using `tokenizers` before the fork if possible\n\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" ], [ "print(len(ordered_label_errors))\nprint(ordered_label_errors)", "509\n[7562 2752 165 4701 3843 1857 2800 1404 3650 4125 2499 5258 2751 1542\n 492 7515 3799 2766 3662 4565 3375 2724 4136 57 2886 3818 1321 2097\n 4713 1515 4690 7586 1821 557 8156 8305 2632 8342 7920 3991 1226 3773\n 4124 4018 1041 1302 4193 3794 3699 976 205 3802 1735 3944 2662 1015\n 2656 1629 1964 7923 1728 951 6426 987 5554 2120 1928 4564 325 5118\n 3174 2787 6450 3803 5490 2697 3518 7078 1068 1002 5530 1307 4148 4471\n 4536 1759 2767 2378 3865 1413 1552 3630 994 8386 7786 4418 4827 3187\n 8146 1174 4681 4375 849 2599 1854 190 5138 2224 1230 4469 1613 3471\n 1914 107 700 4722 3327 4260 2758 5322 2208 1416 4446 114 3258 2940\n 2400 5585 3018 1282 1272 2056 5408 2199 1705 2107 2467 1711 4555 710\n 2139 1284 397 250 1704 3738 152 1156 461 5946 5489 4918 7567 63\n 5292 2567 3517 1968 401 835 2765 4752 241 1582 1421 3084 5360 4712\n 2368 2187 3550 318 255 2277 1179 433 2449 436 352 3221 1676 4494\n 4627 749 883 2821 2900 6456 599 6432 5392 274 664 77 296 782\n 625 409 853 661 1604 1479 644 1102 3768 1138 5122 573 19 171\n 28 5283 5326 1349 658 6492 4123 4938 4453 7379 6449 3591 4417 1576\n 4090 2273 3231 704 355 358 230 581 411 615 1235 432 7269 222\n 1760 3284 47 373 3501 334 297 3894 628 3441 4465 2253 467 1488\n 102 3362 4629 1754 261 3367 5610 4645 345 4660 1960 598 4501 692\n 2096 4703 1992 3088 2232 4529 116 1185 6349 886 2265 524 349 687\n 1752 516 2055 1526 6287 8188 136 323 1395 968 1040 588 696 7222\n 8071 100 5486 3520 6412 179 670 3529 7155 368 5513 272 2118 3825\n 1989 5864 3412 1659 4764 5144 726 7928 8419 1346 93 786 3031 4680\n 59 8597 2398 601 277 587 681 2678 2269 3185 603 4414 2292 2562\n 2684 3620 6062 5290 387 4710 1675 988 202 3207 7 602 4524 752\n 861 120 4091 150 112 7417 204 4456 705 4112 1696 6685 1448 3990\n 420 683 89 1995 348 514 388 317 1599 3387 5128 3353 5936 708\n 8541 2052 5830 300 8364 1919 3754 429 2393 1424 74 79 426 1807\n 7309 3049 402 372 3325 488 180 1761 901 2537 3319 104 2201 564\n 3331 1736 279 4628 6746 773 630 2676 7290 1334 8230 577 2205 580\n 5638 1149 3302 3824 7675 6626 366 416 2836 6853 379 3197 7550 306\n 2341 478 2264 5022 3137 138 90 5852 6123 536 106 4962 2679 985\n 3334 534 1672 4094 2937 1370 3003 2479 2418 7738 5948 160 5076 5877\n 1381 4878 5062 2009 1216 568 2335 2685 2125 3460 96 1858 2425 362\n 8370 1165 6124 405 7207 2793 5311 3988 1298 4613 3200 1318 103 5865\n 7424 1596 790 7889 149 2029 224 217 5616 789 693 3842 4864 3942\n 132 636 832 3544 1932]\n" ], [ "list_abs=[]\nfor i in ordered_label_errors: \n list_abs.append(X[i])\nprint(len(list_abs))", "509\n" ], [ "df_o=pd.read_csv(\"../input/all-labelled/all_labelled.tsv\", delimiter=\"\\t\")\ndf_o.dropna(inplace=True)\ndf_o=df_o.drop_duplicates()\ndf_top_errors= df_o[df_o['abstract'].isin(list_abs)]\nprint(len(df_top_errors))", "529\n" ], [ "print(df_top_errors)\n", " uid abstract \\\n7 31969690 Neonatal mammalian heart maintains a transient... \n20 32127658 Autophagy is a cellular catabolic process that... \n29 32015503 Cells experiencing delays in mitotic progressi... \n50 32376875 We have previously reported that Monoglyceride... \n60 32332916 Although the roles of the Hippo pathway in org... \n... ... ... \n8442 33591274 Chromosome segregation during cell division re... \n8458 33587037 Piwi-interacting RNAs (piRNAs) play essential ... \n8491 33555257 The Hippo (Hpo) pathway regulates tissue growt... \n8613 33459596 Germline mutations in the Folliculin (FLCN) tu... \n8669 33416496 The oncoprotein transcription factor MYC is a ... \n\n title \\\n7 Targeting LncDACH1 promotes cardiac repair and... \n20 Deleting key autophagy elongation proteins ind... \n29 MARCH5-dependent degradation of MCL1/NOXA comp... \n50 Monoglyceride lipase mediates tumor-suppressiv... \n60 Hippo kinases MST1 and MST2 control the differ... \n... ... \n8442 Structural basis of Stu2 recruitment to yeast ... \n8458 SNPC-1.3 is a sex-specific transcription facto... \n8491 Negative feedback couples Hippo pathway activa... \n8613 Loss of FLCN-FNIP1/2 induces a non-canonical i... \n8669 MYC regulates ribosome biogenesis and mitochon... \n\n journal class \n7 Cell death and differentiation 0 \n20 Cell death and differentiation 0 \n29 Cell death and differentiation 0 \n50 Cell death and differentiation 0 \n60 Cell death and differentiation 1 \n... ... ... \n8442 eLife 0 \n8458 eLife 1 \n8491 eLife 1 \n8613 eLife 0 \n8669 eLife 0 \n\n[529 rows x 5 columns]\n" ], [ "df_top_errors.to_csv('top_losses_cl.tsv', sep= \"\\t\", index=False)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d069ded8d50c0d0b3d732bf640dc7e526ad983c3
17,437
ipynb
Jupyter Notebook
learning_DAN/base_arima.ipynb
HaTT2018/NET_louvain_DAN
f77ac0e846c3274535dff1928a0b2ce3915ff573
[ "MIT" ]
3
2021-11-19T08:07:33.000Z
2022-01-06T08:30:59.000Z
learning_DAN/base_arima.ipynb
HaTT2018/NET_louvain_DAN
f77ac0e846c3274535dff1928a0b2ce3915ff573
[ "MIT" ]
null
null
null
learning_DAN/base_arima.ipynb
HaTT2018/NET_louvain_DAN
f77ac0e846c3274535dff1928a0b2ce3915ff573
[ "MIT" ]
null
null
null
71.757202
1,751
0.653667
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport warnings\nimport ipdb\nimport dan_utils\n\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ], [ "from statsmodels.graphics.tsaplots import plot_acf\nfrom statsmodels.graphics.tsaplots import plot_pacf\nfrom statsmodels.tsa.stattools import adfuller as ADF\nfrom statsmodels.stats.diagnostic import acorr_ljungbox\nfrom statsmodels.tsa.arima_model import ARIMA", "_____no_output_____" ], [ "def base_arima(data, pre_step):\n # data should be 1-D DataFrame\n D_data=data.diff(periods=1).dropna()\n data = np.array(data)\n\n model=ARIMA(data,(1,1,1)).fit()\n forecast=model.forecast(pre_step)\n return (forecast[0])", "_____no_output_____" ], [ "randseed = 25\ndan_utils.setup_seed(randseed)\nres = 11\n\nv = pd.read_csv('../data/q_20_aggragated.csv')\nv = v.rename(columns={'Unnamed: 0': 'id'})\ndet_with_class = pd.read_csv('../res/%i_res%i_id_402_withclass.csv'%(randseed, res), index_col=0)\n\nv['class_i'] = ''\nfor i in range(len(v)):\n v.loc[i, 'class_i'] = det_with_class[det_with_class['id']==v.loc[i, 'id']].iloc[0, 5] # 5 stands for 'class_i'\n\nnum_class = det_with_class['class_i'].drop_duplicates().size\n\nv_class = []\nfor i in range(num_class):\n v_class.append(v[v['class_i']==i])\n\nprint('There are %i class(es)'%num_class)\n\ndist_mat = pd.read_csv('../data/dist_mat.csv', index_col=0)\nid_info = pd.read_csv('../data/id2000.csv', index_col=0)\ndist_mat.index = id_info['id2']\ndist_mat.columns = id_info['id2']\nfor i in range(len(dist_mat)):\n for j in range(len(dist_mat)):\n if i==j:\n dist_mat.iloc[i, j] = 0\n\nnear_id = pd.DataFrame(np.argsort(np.array(dist_mat)), index = id_info['id2'], columns = id_info['id2'])\n\nseg = pd.read_csv('../data/segement.csv', header=None)\nnum_dets = 25\n\ndet_list_class = []\nfor i in range(num_class):\n det_list_class_temp, v_class_temp = dan_utils.get_class_with_node(seg, v_class[i])\n det_list_class.append(det_list_class_temp)\n v_class_temp = v_class_temp[v_class_temp['id'].isin(det_list_class_temp[:num_dets])]\n v_class[i] = v_class_temp\n \nnear_road_set = []\nfor i in range(num_class):\n near_road_set.append(dan_utils.rds_mat(dist_mat, det_list_class[i][:num_dets], seg))", "There are 5 class(es)\n" ], [ "# ind, class\n# 0 , blue\n# 1 , green\n# 2 , yellow <--\n# 3 , black <--\n# 4 , red <--\nclass_color_set = ['b', 'g', 'y', 'black', 'r']\nclass_i = 4\n\n# v_class[4].iloc[:, 2:-1]", "_____no_output_____" ], [ "data = np.array(v_class[4].iloc[:, 2:-1])\nwindow = 100\npred_num = 6\n\npred_mat_all = []\nlabel_mat_all = []\nfor i in range(data.shape[0]): # iterate over detectors\n pred_mat = []\n label_mat = []\n for j in range(data.shape[1] - window - pred_num):\n data_temp = data[i, j:j+window]\n label = data[i, j:j+window+pred_num]\n pred = base_arima(pd.DataFrame(data_temp), pred_num)\n pred_mat.append(pred)\n label_mat.append(label)\n pred_mat_all.append(np.array(pred_mat))\n label_mat_all.append(np.array(label_mat))", "C:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:547: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n warnings.warn('Inverting hessian failed, no bse or cov_params '\nC:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:547: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n warnings.warn('Inverting hessian failed, no bse or cov_params '\nC:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:547: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n warnings.warn('Inverting hessian failed, no bse or cov_params '\nC:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:566: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n warnings.warn(\"Maximum Likelihood optimization failed to \"\nC:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:547: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n warnings.warn('Inverting hessian failed, no bse or cov_params '\nC:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:547: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n warnings.warn('Inverting hessian failed, no bse or cov_params '\nC:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:547: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n warnings.warn('Inverting hessian failed, no bse or cov_params '\nC:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:547: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n warnings.warn('Inverting hessian failed, no bse or cov_params '\nC:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:547: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n warnings.warn('Inverting hessian failed, no bse or cov_params '\nC:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:547: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n warnings.warn('Inverting hessian failed, no bse or cov_params '\nC:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:547: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n warnings.warn('Inverting hessian failed, no bse or cov_params '\nC:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:547: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n warnings.warn('Inverting hessian failed, no bse or cov_params '\nC:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:547: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n warnings.warn('Inverting hessian failed, no bse or cov_params '\nC:\\Users\\10169\\anaconda3\\envs\\dan_traff\\lib\\site-packages\\statsmodels\\base\\model.py:547: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n warnings.warn('Inverting hessian failed, no bse or cov_params '\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d069ed27df121472ca86e5f105b1661276deb41e
21,553
ipynb
Jupyter Notebook
data/get-imagelist.ipynb
linflytang/databook
04ced06c85aff50f5e8b8aef1e94bdec788abff5
[ "Apache-2.0" ]
20
2018-07-27T15:14:44.000Z
2022-03-10T06:44:46.000Z
data/get-imagelist.ipynb
linflytang/databook
04ced06c85aff50f5e8b8aef1e94bdec788abff5
[ "Apache-2.0" ]
1
2020-11-18T22:15:54.000Z
2020-11-18T22:15:54.000Z
data/get-imagelist.ipynb
linflytang/databook
04ced06c85aff50f5e8b8aef1e94bdec788abff5
[ "Apache-2.0" ]
19
2018-07-27T07:42:22.000Z
2021-05-12T01:36:10.000Z
129.837349
1,356
0.677122
[ [ [ "# coding:utf-8\n# 引入requests包和正则表达式包re\nimport requests\nimport re\nimport pprint\nfrom bs4 import BeautifulSoup", "_____no_output_____" ], [ "def load_page(url):\n response=requests.get(url)\n data=response.content\n return data", "_____no_output_____" ], [ "def get_list(url_region):\n # 定义爬取页面的链接\n # 调用load_page函数,下载页面内容\n html = load_page(url_region)\n #pprint.pprint(html)\n \n regx=r'_src=\"https://pan[\\S]*\"' # 定义图片正则表达式\n pattern=re.compile(regx) # 编译表达式构造匹配模式\n get_images=re.findall(pattern,repr(html)) # 在页面中匹配图片链接\n \n print(\"发现图像,共计:\",len(get_images))\n return get_images", "_____no_output_____" ], [ "def save_list(k,image_list):\n with open(\"list.txt\",\"a\") as flist:\n flist.writelines(\"#\",k,\"\\n\")\n for aurl in get_images:\n aurl = aurl[6:53] + \"\\n\"\n pprint.pprint(str(aurl))\n flist.writelines(aurl)", "_____no_output_____" ], [ "# Read region list from file to dict. \nwith open(\"list.txt\", \"w\") as flist:\n flist.writelines(\"#list file for SJZ images.\\n\")\n\n# For each region, get image list.\nwith open('region.txt', 'r') as fregion:\n region_list = eval(fregion.read())\n # print(region_list,\"\\n\")\n \n for k in region_list:\n print(\"Get image list of region: \", k,\",\",region_list[k])\n image_list = get_list(region_list[k])\n save_list(k,image_list)\n print(\"ok.\\n\")", "Get image list of region: 2109-浙江省卫星地图离线包下载 , http://www.rivermap.cn/help/show-2109.html\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d069fc57a26f23d0f1c8ba92c05cd2c7990d74a3
125,953
ipynb
Jupyter Notebook
s10336/STEP4-making-drl-pysc2-agent.ipynb
parksurk/skcc-drl-sc2-course-2020_1st
951d09424b93c76093bab51ed6aaa75eb545152e
[ "MIT" ]
null
null
null
s10336/STEP4-making-drl-pysc2-agent.ipynb
parksurk/skcc-drl-sc2-course-2020_1st
951d09424b93c76093bab51ed6aaa75eb545152e
[ "MIT" ]
null
null
null
s10336/STEP4-making-drl-pysc2-agent.ipynb
parksurk/skcc-drl-sc2-course-2020_1st
951d09424b93c76093bab51ed6aaa75eb545152e
[ "MIT" ]
null
null
null
57.512785
21,872
0.552492
[ [ [ "# STEP 4 - Making DRL PySC2 Agent", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ], [ "import sys; sys.path.append('..')", "_____no_output_____" ], [ "### unfortunately, PySC2 uses Abseil, which treats python code as if its run like an app\n# This does not play well with jupyter notebook\n# So we will need to monkeypatch sys.argv\n\n\nimport sys\n#sys.argv = [\"python\", \"--map\", \"AbyssalReef\"]\nsys.argv = [\"python\", \"--map\", \"Simple64\"]", "_____no_output_____" ] ], [ [ "## 0. Runnning 'Agent code' on jupyter notebook ", "_____no_output_____" ] ], [ [ "\n\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Run an agent.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport importlib\nimport threading\n\nfrom absl import app\nfrom absl import flags\nfrom future.builtins import range # pylint: disable=redefined-builtin\n\nfrom pysc2 import maps\nfrom pysc2.env import available_actions_printer\nfrom pysc2.env import run_loop\nfrom pysc2.env import sc2_env\nfrom pysc2.lib import point_flag\nfrom pysc2.lib import stopwatch\nfrom pysc2.lib import actions\n\nFLAGS = flags.FLAGS\n\n# because of Abseil's horrible design for running code underneath Colabs\n# We have to pull out this ugly hack from the hat\nif \"flags_defined\" not in globals():\n flags.DEFINE_bool(\"render\", False, \"Whether to render with pygame.\")\n point_flag.DEFINE_point(\"feature_screen_size\", \"84\",\n \"Resolution for screen feature layers.\")\n point_flag.DEFINE_point(\"feature_minimap_size\", \"64\",\n \"Resolution for minimap feature layers.\")\n point_flag.DEFINE_point(\"rgb_screen_size\", None,\n \"Resolution for rendered screen.\")\n point_flag.DEFINE_point(\"rgb_minimap_size\", None,\n \"Resolution for rendered minimap.\")\n flags.DEFINE_enum(\"action_space\", \"RAW\", sc2_env.ActionSpace._member_names_, # pylint: disable=protected-access\n \"Which action space to use. Needed if you take both feature \"\n \"and rgb observations.\")\n flags.DEFINE_bool(\"use_feature_units\", False,\n \"Whether to include feature units.\")\n flags.DEFINE_bool(\"use_raw_units\", True,\n \"Whether to include raw units.\")\n flags.DEFINE_integer(\"raw_resolution\", 64, \"Raw Resolution.\")\n flags.DEFINE_bool(\"disable_fog\", True, \"Whether to disable Fog of War.\")\n\n flags.DEFINE_integer(\"max_agent_steps\", 0, \"Total agent steps.\")\n flags.DEFINE_integer(\"game_steps_per_episode\", None, \"Game steps per episode.\")\n flags.DEFINE_integer(\"max_episodes\", 0, \"Total episodes.\")\n flags.DEFINE_integer(\"step_mul\", 8, \"Game steps per agent step.\")\n flags.DEFINE_float(\"fps\", 22.4, \"Frames per second to run the game.\")\n\n #flags.DEFINE_string(\"agent\", \"sc2.agent.BasicAgent.ZergBasicAgent\",\n # \"Which agent to run, as a python path to an Agent class.\")\n #flags.DEFINE_enum(\"agent_race\", \"zerg\", sc2_env.Race._member_names_, # pylint: disable=protected-access\n # \"Agent 1's race.\")\n flags.DEFINE_string(\"agent\", \"TerranRLAgentWithRawActsAndRawObs\",\n \"Which agent to run, as a python path to an Agent class.\")\n flags.DEFINE_enum(\"agent_race\", \"terran\", sc2_env.Race._member_names_, # pylint: disable=protected-access\n \"Agent 1's race.\")\n\n flags.DEFINE_string(\"agent2\", \"Bot\", \"Second agent, either Bot or agent class.\")\n flags.DEFINE_enum(\"agent2_race\", \"random\", sc2_env.Race._member_names_, # pylint: disable=protected-access\n \"Agent 2's race.\")\n flags.DEFINE_enum(\"difficulty\", \"hard\", sc2_env.Difficulty._member_names_, # pylint: disable=protected-access\n \"If agent2 is a built-in Bot, it's strength.\")\n\n flags.DEFINE_bool(\"profile\", False, \"Whether to turn on code profiling.\")\n flags.DEFINE_bool(\"trace\", False, \"Whether to trace the code execution.\")\n flags.DEFINE_integer(\"parallel\", 1, \"How many instances to run in parallel.\")\n\n flags.DEFINE_bool(\"save_replay\", True, \"Whether to save a replay at the end.\")\n\n flags.DEFINE_string(\"map\", None, \"Name of a map to use.\")\n flags.mark_flag_as_required(\"map\")\n\nflags_defined = True\n\ndef run_thread(agent_classes, players, map_name, visualize):\n \"\"\"Run one thread worth of the environment with agents.\"\"\"\n with sc2_env.SC2Env(\n map_name=map_name,\n players=players,\n agent_interface_format=sc2_env.parse_agent_interface_format(\n feature_screen=FLAGS.feature_screen_size,\n feature_minimap=FLAGS.feature_minimap_size,\n rgb_screen=FLAGS.rgb_screen_size,\n rgb_minimap=FLAGS.rgb_minimap_size,\n action_space=FLAGS.action_space,\n use_raw_units=FLAGS.use_raw_units,\n raw_resolution=FLAGS.raw_resolution),\n step_mul=FLAGS.step_mul,\n game_steps_per_episode=FLAGS.game_steps_per_episode,\n disable_fog=FLAGS.disable_fog,\n visualize=visualize) as env:\n #env = available_actions_printer.AvailableActionsPrinter(env)\n agents = [agent_cls() for agent_cls in agent_classes]\n run_loop.run_loop(agents, env, FLAGS.max_agent_steps, FLAGS.max_episodes)\n if FLAGS.save_replay:\n env.save_replay(agent_classes[0].__name__)\n\ndef main(unused_argv):\n \"\"\"Run an agent.\"\"\"\n #stopwatch.sw.enabled = FLAGS.profile or FLAGS.trace\n #stopwatch.sw.trace = FLAGS.trace\n\n map_inst = maps.get(FLAGS.map)\n\n agent_classes = []\n players = []\n\n #agent_module, agent_name = FLAGS.agent.rsplit(\".\", 1)\n #agent_cls = getattr(importlib.import_module(agent_module), agent_name)\n #agent_classes.append(agent_cls)\n agent_classes.append(TerranRLAgentWithRawActsAndRawObs)\n players.append(sc2_env.Agent(sc2_env.Race[FLAGS.agent_race]))\n\n if map_inst.players >= 2:\n if FLAGS.agent2 == \"Bot\":\n players.append(sc2_env.Bot(sc2_env.Race[FLAGS.agent2_race],\n sc2_env.Difficulty[FLAGS.difficulty]))\n else:\n #agent_module, agent_name = FLAGS.agent2.rsplit(\".\", 1)\n #agent_cls = getattr(importlib.import_module(agent_module), agent_name)\n agent_classes.append(TerranRandomAgent)\n players.append(sc2_env.Agent(sc2_env.Race[FLAGS.agent2_race]))\n\n threads = []\n for _ in range(FLAGS.parallel - 1):\n t = threading.Thread(target=run_thread,\n args=(agent_classes, players, FLAGS.map, False))\n threads.append(t)\n t.start()\n\n run_thread(agent_classes, players, FLAGS.map, FLAGS.render)\n\n for t in threads:\n t.join()\n\n if FLAGS.profile:\n pass\n #print(stopwatch.sw)", "_____no_output_____" ] ], [ [ "## 1. Creating a PySC2 Agent with Raw Actions & Observations\n\n![StarCraft2 PySC2 interfaces](./images/StarCraft2_PySC2_interfaces.png)\n\nref : https://on-demand.gputechconf.com/gtc/2018/presentation/s8739-machine-learning-with-starcraft-II.pdf", "_____no_output_____" ], [ "### < PySC2 Interfaces 3가지 종류 >\n\n### 1st, Rendered\n* Decomposed :\n - Screen, minimap, resources, available actions\n* Same control as humans :\n - Pixel coordinates\n - Move camera\n - Select unit/rectangle\n* Great for Deep Learning, but hard\n\n### 2nd, Feature Layer\n* Same actions : still in pixel space\n* Same decomposed observations, but more abstract\n - Orthogonal camera \n* Layers:\n - unit type\n - unit owner\n - selection\n - health\n - unit density\n - etc\n \n### 3rd, Raw\n* List of units and state\n* Control each unit individually in world coordinates\n* Gives all observable state (no camera)\n* Great for scripted agents and programmatic replay analysis", "_____no_output_____" ], [ "### < Raw Actions & Observations 을 사용하는 이유>\n* Raw Actions & Observations 은 world cordinates를 사용하므로 전체 Map을 한번에 관찰하고 Camera를 이동하지 않고도 Map 상의 어느 곳에서도 Action을 취할 수 있는 새로운 형태의 Feature 이다.\n* 이번 과정에 SL(Supervised Learning, 지도학습)을 활용한 학습은 없지만 스타크래프트 2 리플레이를 활용한 SL은 Raw Actions & Observations를 활용한 \"programmatic replay analysis\"가 필요하다.\n* 인간 플레이어를 이긴 DeepMind의 AlphaStar의 주요 변경사항 중의 하나는 Raw Actions & Observations 의 활용이다.", "_____no_output_____" ], [ "### DRL 모델의 성능 추이를 보기위해 Reward의 평균 추이를 이용한다. 이때 단순이동평균 보다는 지수이동평균이 적절하다.\n\n### 지수이동평균(EMA:Exponential Moving Average) 란?\n지수이동평균(Exponential Moving Average)은 과거의 모든 기간을 계산대상으로 하며 최근의 데이타에 더 높은 가중치를 두는 일종의 가중이동평균법이다.\n\n단순이동평균의 계산법에 비하여 원리가 복잡해 보이지만 실제로 이동평균을 산출하는 방법은 Previous Step의 지수이동평균값과 평활계수(smoothing constant) 그리고 당일의 가격만으로 구할 수 있으므로 Previous Step의 지수이동평균값만 구해진다면 오히려 간단한 편이다.\n\n따라서 지수이동평균은 단순이동평균에 비해 몇가지 중요한 강점을 가진다.\n\n첫째는 가장 최근의 Step에 가장 큰 가중치를 둠으로 해서 최근의 Episode들을 잘 반영한다는 점이고, 둘째는 단순이동평균에서와 같이 오래된 데이타를 갑자기 제외하지 않고 천천히 그 영향력을 사라지게 한다는 점이다.\n또한 전 기간의 데이타를 분석대상으로 함으로써 가중이동평균에서 문제되는 특정 기간의 데이타만을 분석대상으로 한다는 단점도 보완하고 있다.\n\n### 지수이동평균(EMA:Exponential Moving Average) 계산\n\n지수이동평균은 가장 최근의 값에 많은 가중치를 부여하고 오래 된 값에는 적은 가중치를 부여한다. 비록 오래 된 값이라고 할지라도 완전히 무시하지는 않고 적게나마 반영시켜 계산한다는 장점이 있다. 단기 변동성을 포착하려는 것이 목적이다.\n\nEMA=Previous Step 지수이동평균+(k∗(Current Step Reward − Previous Step 지수이동평균))\n", "_____no_output_____" ], [ "## 3. Applying Vanilla DQN to a PySC2 Agent\n\n구현된 기능\n\n- Implementing 'Experience Replay' : \n - 'Maximization Bias' 문제를 발생시키는 원인 중 하나인 'Sample간의 시간적 연관성'을 해결하기 위한 방법\n - Online Learning 에서 Batch Learning 으로 학습방법 바뀜 : Online update 는 Batch update 보다 일반적으로 Validation loss 가 더 높게 나타남.\n - Reinforcement Learning for Robots. Using Neural Networks. Long -Ji Lin. January 6, 1993. 논문에서 최초로 연구됨 http://isl.anthropomatik.kit.edu/pdf/Lin1993.pdf\n\n- Implementing 'Fixed Q-Target' : \n - 'Moving Q-Target' 문제 해결하기 위한 방법\n - 2015년 Nature 버전 DQN 논문에서 처음 제안됨. https://deepmind.com/research/publications/human-level-control-through-deep-reinforcement-learning \n\n\n구현되지 않은 기능\n\n- Implementing 'Sensory Input Feature-Extraction' :\n - 게임의 Raw Image 를 Neural Net에 넣기 위한 Preprocessing(전처리) 과정\n - Raw Image 의 Sequence중 '최근 4개의 이미지'(과거 정보)를 하나의 새로운 State로 정의하여 non-MDP를 MDP 문제로 바꾸는 Preprocessing 과정 \n - CNN(합성곱 신경망)을 활용한 '차원의 저주' 극복", "_____no_output_____" ] ], [ [ "import random\nimport time\nimport math\nimport os.path\n\nimport numpy as np\nimport pandas as pd\nfrom collections import deque\nimport pickle\n\nfrom pysc2.agents import base_agent\nfrom pysc2.env import sc2_env\nfrom pysc2.lib import actions, features, units, upgrades\nfrom absl import app\n\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom skdrl.pytorch.model.mlp import NaiveMultiLayerPerceptron\nfrom skdrl.common.memory.memory import ExperienceReplayMemory", "_____no_output_____" ], [ "DATA_FILE_QNET = 'rlagent_with_vanilla_dqn_qnet'\nDATA_FILE_QNET_TARGET = 'rlagent_with_vanilla_dqn_qnet_target'\nSCORE_FILE = 'rlagent_with_vanilla_dqn_score'\n\nscores = [] # list containing scores from each episode\nscores_window = deque(maxlen=100) # last 100 scores\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nwriter = SummaryWriter()", "_____no_output_____" ], [ "import torch\nimport torch.nn as nn\n\n\nclass NaiveMultiLayerPerceptron(nn.Module):\n\n def __init__(self,\n input_dim: int,\n output_dim: int,\n num_neurons: list = [64, 32],\n hidden_act_func: str = 'ReLU',\n out_act_func: str = 'Identity'):\n super(NaiveMultiLayerPerceptron, self).__init__()\n\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.num_neurons = num_neurons\n self.hidden_act_func = getattr(nn, hidden_act_func)()\n self.out_act_func = getattr(nn, out_act_func)()\n\n input_dims = [input_dim] + num_neurons\n output_dims = num_neurons + [output_dim]\n\n self.layers = nn.ModuleList()\n for i, (in_dim, out_dim) in enumerate(zip(input_dims, output_dims)):\n is_last = True if i == len(input_dims) - 1 else False\n self.layers.append(nn.Linear(in_dim, out_dim))\n if is_last:\n self.layers.append(self.out_act_func)\n else:\n self.layers.append(self.hidden_act_func)\n\n def forward(self, xs):\n for layer in self.layers:\n xs = layer(xs)\n return xs\n\n\nif __name__ == '__main__':\n net = NaiveMultiLayerPerceptron(10, 1, [20, 12], 'ReLU', 'Identity')\n print(net)\n\n xs = torch.randn(size=(12, 10))\n ys = net(xs)\n print(ys)\n", "NaiveMultiLayerPerceptron(\n (hidden_act_func): ReLU()\n (out_act_func): Identity()\n (layers): ModuleList(\n (0): Linear(in_features=10, out_features=20, bias=True)\n (1): ReLU()\n (2): Linear(in_features=20, out_features=12, bias=True)\n (3): ReLU()\n (4): Linear(in_features=12, out_features=1, bias=True)\n (5): Identity()\n )\n)\ntensor([[-0.3301],\n [-0.4388],\n [-0.4118],\n [-0.3924],\n [-0.3717],\n [-0.4231],\n [-0.4617],\n [-0.3912],\n [-0.3118],\n [-0.3619],\n [-0.3262],\n [-0.4291]], grad_fn=<AddmmBackward>)\n" ] ], [ [ "### Q-update 공식\n\n#### 1. Online Q-learning\n![Online Q-learning](./images/q-update-experience-replay.png)\n\n#### 2. Online Q-learning with Function Approximation\n![Online Q-learning with Function Approximation](./images/q-update-function-approximation.png)\n\n#### 3. Batch Q-learning with Function Approximation & Experience Replay\n![Batch Q-learning with Function Approximation & Experience Replay](./images/q-update-online.png)", "_____no_output_____" ] ], [ [ "from random import sample\n\n\nclass ExperienceReplayMemory:\n def __init__(self, max_size):\n # deque object that we've used for 'episodic_memory' is not suitable for random sampling\n # here, we instead use a fix-size array to implement 'buffer'\n self.buffer = [None] * max_size\n self.max_size = max_size\n self.index = 0\n self.size = 0\n\n def push(self, obj):\n self.buffer[self.index] = obj\n self.size = min(self.size + 1, self.max_size)\n self.index = (self.index + 1) % self.max_size\n\n def sample(self, batch_size):\n indices = sample(range(self.size), batch_size)\n return [self.buffer[index] for index in indices]\n\n def __len__(self):\n return self.size", "_____no_output_____" ] ], [ [ "### Moving target problem\n\n#### 1. Function Approximation을 사용하지 않는 Q-learning 의 경우 : 특정한 Q(s,a) update가 다른 Q(s,a)에 영향을 주지 않는다.\n![Moving target Q-learning](./images/moving-target_q-learing_case.png)\n\n#### 2. Function Approximation을 사용하는 Q-learnig 의 경우 : 특정한 Q(s,a) update가 다른 Q(s,a)에 영향을 준다.\n![Moving target Q-learning with Function Approximation](./images/moving-target_q-learing_with_function_approximation_case.png)\n\n### Moving target 문제는 Deep Neural Network를 사용하는 Function Approximation 기법인 경우 심해지는 경향성이 있음.\n\nimage ref : Fast Campus RL online courese", "_____no_output_____" ], [ "### `nn.SmoothL1Loss()` = Huber loss 란?\n\nMean-squared Error (MSE) Loss 는 데이터의 outlier에 매우 취약하다.\n어떤 이유로 타겟하는 레이블 y (이 경우는 q-learning target)이 noisy 할때를 가정하면, 잘못된 y 값을 맞추기 위해 파라미터들이 너무 sensitive 하게 움직이게 된다.\n\n이런 현상은 q-learning 의 학습초기에 매우 빈번해 나타난다. 이러한 문제를 조금이라도 완화하기 위해서 outlier에 덜 민감한 Huber loss 함수를 사용한다.\n\n### SmoothL1Loss (aka Huber loss)\n\n$$loss(x,y) = \\frac{1}{n}\\sum_i z_i$$\n$|x_i - y_i| <1$ 일때,\n$$z_i = 0.5(x_i - y_i)^2$$\n$|x_i - y_i| \\geq1$ 일때,\n$$z_i = |x_i - y_i|-0.5$$\n\nref : https://pytorch.org/docs/master/generated/torch.nn.SmoothL1Loss.html", "_____no_output_____" ] ], [ [ "import torch\nimport torch.nn as nn\nimport numpy as np\nimport random\n\nclass DQN(nn.Module):\n\n def __init__(self,\n state_dim: int,\n action_dim: int,\n qnet: nn.Module,\n qnet_target: nn.Module,\n lr: float,\n gamma: float,\n epsilon: float):\n \"\"\"\n :param state_dim: input state dimension\n :param action_dim: action dimension\n :param qnet: main q network\n :param qnet_target: target q network\n :param lr: learning rate\n :param gamma: discount factor of MDP\n :param epsilon: E-greedy factor\n \"\"\"\n\n super(DQN, self).__init__()\n self.state_dim = state_dim\n self.action_dim = action_dim\n self.qnet = qnet\n self.lr = lr\n self.gamma = gamma\n self.opt = torch.optim.Adam(params=self.qnet.parameters(), lr=lr)\n self.register_buffer('epsilon', torch.ones(1) * epsilon)\n\n # target network related\n qnet_target.load_state_dict(qnet.state_dict())\n self.qnet_target = qnet_target\n self.criteria = nn.SmoothL1Loss()\n\n def choose_action(self, state):\n qs = self.qnet(state)\n #prob = np.random.uniform(0.0, 1.0, 1)\n #if torch.from_numpy(prob).float() <= self.epsilon: # random\n if random.random() <= self.epsilon: # random\n action = np.random.choice(range(self.action_dim))\n else: # greedy\n action = qs.argmax(dim=-1)\n return int(action)\n\n def learn(self, state, action, reward, next_state, done):\n s, a, r, ns = state, action, reward, next_state\n# print(\"state: \", s)\n# print(\"action: \", a)\n# print(\"reward: \", reward)\n# print(\"next_state: \", ns)\n \n\n # compute Q-Learning target with 'target network'\n with torch.no_grad():\n q_max, _ = self.qnet_target(ns).max(dim=-1, keepdims=True)\n q_target = r + self.gamma * q_max * (1 - done)\n\n q_val = self.qnet(s).gather(1, a)\n loss = self.criteria(q_val, q_target)\n\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n\ndef prepare_training_inputs(sampled_exps, device='cpu'):\n states = []\n actions = []\n rewards = []\n next_states = []\n dones = []\n for sampled_exp in sampled_exps:\n states.append(sampled_exp[0])\n actions.append(sampled_exp[1])\n rewards.append(sampled_exp[2])\n next_states.append(sampled_exp[3])\n dones.append(sampled_exp[4])\n\n states = torch.cat(states, dim=0).float().to(device)\n actions = torch.cat(actions, dim=0).to(device)\n rewards = torch.cat(rewards, dim=0).float().to(device)\n next_states = torch.cat(next_states, dim=0).float().to(device)\n dones = torch.cat(dones, dim=0).float().to(device)\n return states, actions, rewards, next_states, dones", "_____no_output_____" ] ], [ [ "# Action 함수 정의", "_____no_output_____" ] ], [ [ "class TerranAgentWithRawActsAndRawObs(base_agent.BaseAgent):\n # actions 추가 및 함수 정의(hirerachy하게)\n \n actions = (\"do_nothing\",\n \"train_scv\",\n \"harvest_minerals\",\n \"harvest_gas\",\n \"build_commandcenter\",\n \n \"build_refinery\",\n \"build_supply_depot\",\n \"build_barracks\",\n \"train_marine\",\n \n \"build_factorys\",\n \"build_techlab_factorys\",\n \"train_tank\",\n \n \"build_armorys\",\n \n \"build_starports\",\n \"build_techlab_starports\",\n \"train_banshee\",\n \n \"attack\",\n \"attack_all\",\n \n \"tank_control\"\n )\n\n\n \n def unit_type_is_selected(self, obs, unit_type):\n if (len(obs.observation.single_select) > 0 and\n obs.observation.single_select[0].unit_type == unit_type):\n return True\n\n if (len(obs.observation.multi_select) > 0 and\n obs.observation.multi_select[0].unit_type == unit_type):\n return True\n\n return False\n\n def get_my_units_by_type(self, obs, unit_type):\n if unit_type == units.Neutral.VespeneGeyser: # 가스 일 때만\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type]\n \n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.alliance == features.PlayerRelative.SELF]\n\n def get_enemy_units_by_type(self, obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.alliance == features.PlayerRelative.ENEMY]\n\n def get_my_completed_units_by_type(self, obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.build_progress == 100\n and unit.alliance == features.PlayerRelative.SELF]\n\n def get_enemy_completed_units_by_type(self, obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.build_progress == 100\n and unit.alliance == features.PlayerRelative.ENEMY]\n\n def get_distances(self, obs, units, xy):\n units_xy = [(unit.x, unit.y) for unit in units]\n return np.linalg.norm(np.array(units_xy) - np.array(xy), axis=1)\n\n def step(self, obs):\n super(TerranAgentWithRawActsAndRawObs, self).step(obs)\n if obs.first():\n command_center = self.get_my_units_by_type(\n obs, units.Terran.CommandCenter)[0]\n self.base_top_left = (command_center.x < 32)\n self.top_left_gas_xy = [(14, 25), (21,19), (46,23), (39,16)]\n self.bottom_right_gas_xy = [(44, 43), (37,50), (12,46), (19,53)]\n \n \n self.cloaking_flag = 1\n \n self.TerranVehicleWeaponsLevel1 = False\n self.TerranVehicleWeaponsLevel2 = False\n self.TerranVehicleWeaponsLevel3 = False\n \n\n def do_nothing(self, obs):\n return actions.RAW_FUNCTIONS.no_op()\n \n def train_scv(self, obs):\n completed_commandcenterses = self.get_my_completed_units_by_type(\n obs, units.Terran.CommandCenter)\n \n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n \n if (len(completed_commandcenterses) > 0 and obs.observation.player.minerals >= 100\n and len(scvs) < 35):\n commandcenters = self.get_my_units_by_type(obs, units.Terran.CommandCenter)\n \n ccs =[commandcenter for commandcenter in commandcenters if commandcenter.assigned_harvesters < 18]\n \n if ccs:\n ccs = ccs[0]\n if ccs.order_length < 5:\n return actions.RAW_FUNCTIONS.Train_SCV_quick(\"now\", ccs.tag)\n return actions.RAW_FUNCTIONS.no_op()\n\n def harvest_minerals(self, obs):\n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n commandcenters = self.get_my_units_by_type(obs,units.Terran.CommandCenter) # 최적 자원 할당 유닛 구현\n \n cc = [commandcenter for commandcenter in commandcenters if commandcenter.assigned_harvesters < 18]\n \n if cc:\n cc = cc[0]\n\n idle_scvs = [scv for scv in scvs if scv.order_length == 0]\n\n if len(idle_scvs) > 0 and cc.assigned_harvesters < 18:\n\n mineral_patches = [unit for unit in obs.observation.raw_units\n if unit.unit_type in [\n units.Neutral.BattleStationMineralField,\n units.Neutral.BattleStationMineralField750,\n units.Neutral.LabMineralField,\n units.Neutral.LabMineralField750,\n units.Neutral.MineralField,\n units.Neutral.MineralField750,\n units.Neutral.PurifierMineralField,\n units.Neutral.PurifierMineralField750,\n units.Neutral.PurifierRichMineralField,\n units.Neutral.PurifierRichMineralField750,\n units.Neutral.RichMineralField,\n units.Neutral.RichMineralField750\n ]]\n scv = random.choice(idle_scvs)\n distances = self.get_distances(obs, mineral_patches, (scv.x, scv.y))\n mineral_patch = mineral_patches[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Harvest_Gather_unit(\n \"now\", scv.tag, mineral_patch.tag)\n return actions.RAW_FUNCTIONS.no_op()\n \n def harvest_gas(self, obs):\n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n refs = self.get_my_units_by_type(obs, units.Terran.Refinery)\n \n refs = [refinery for refinery in refs if refinery.assigned_harvesters < 3]\n \n if refs:\n ref = refs[0]\n if len(scvs) > 0 and ref.ideal_harvesters:\n scv = random.choice(scvs)\n distances = self.get_distances(obs, refs, (scv.x, scv.y))\n ref = refs[np.argmin(distances)]\n\n return actions.RAW_FUNCTIONS.Harvest_Gather_unit(\n \"now\", scv.tag, ref.tag)\n \n return actions.RAW_FUNCTIONS.no_op()\n \n def build_commandcenter(self,obs):\n commandcenters = self.get_my_units_by_type(obs,units.Terran.CommandCenter)\n \n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n \n if len(commandcenters) == 0 and obs.observation.player.minerals >= 400 and len(scvs) > 0:\n # 본진 commandcenter가 파괴된 경우\n ccs_xy = (19, 23) if self.base_top_left else (39,45)\n distances = self.get_distances(obs, scvs, ccs_xy)\n scv = scvs[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_CommandCenter_pt(\n \"now\", scv.tag, ccs_xy)\n \n if ( len(commandcenters) < 2 and obs.observation.player.minerals >= 400 and\n len(scvs) > 0):\n ccs_xy = (41, 21) if self.base_top_left else (17, 48)\n \n if len(commandcenters) == 1 and ( (commandcenters[0].x,commandcenters[0].y) == (41,21) or\n (commandcenters[0].x,commandcenters[0].y) == (17,48)):\n # 본진 commandcenter가 파괴된 경우\n ccs_xy = (19, 23) if self.base_top_left else (39,45)\n \n distances = self.get_distances(obs, scvs, ccs_xy)\n scv = scvs[np.argmin(distances)]\n\n return actions.RAW_FUNCTIONS.Build_CommandCenter_pt(\n \"now\", scv.tag, ccs_xy)\n return actions.RAW_FUNCTIONS.no_op()\n \n ################################################################################################\n ####################################### refinery ###############################################\n \n def build_refinery(self,obs):\n refinerys = self.get_my_units_by_type(obs,units.Terran.Refinery)\n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n \n if (obs.observation.player.minerals >= 100 and\n len(scvs) > 0):\n gas = self.get_my_units_by_type(obs, units.Neutral.VespeneGeyser)[0]\n \n if self.base_top_left:\n gases = self.top_left_gas_xy\n else:\n gases = self.bottom_right_gas_xy\n \n rc = np.random.choice([0,1,2,3])\n gas_xy = gases[rc]\n if (gas.x, gas.y) == gas_xy:\n distances = self.get_distances(obs, scvs, gas_xy)\n scv = scvs[np.argmin(distances)]\n\n return actions.RAW_FUNCTIONS.Build_Refinery_pt(\n \"now\", scv.tag, gas.tag)\n return actions.RAW_FUNCTIONS.no_op()\n\n def build_supply_depot(self, obs):\n supply_depots = self.get_my_units_by_type(obs, units.Terran.SupplyDepot)\n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n \n free_supply = (obs.observation.player.food_cap -\n obs.observation.player.food_used)\n \n if (obs.observation.player.minerals >= 100 and\n len(scvs) > 0 and free_supply < 8):\n \n ccs = self.get_my_units_by_type(obs, units.Terran.CommandCenter)\n if ccs:\n for cc in ccs:\n cc_x, cc_y = cc.x, cc.y\n \n rand1,rand2 = random.randint(0,10),random.randint(-10,0)\n supply_depot_xy = (cc_x + rand1, cc_y + rand2) if self.base_top_left else (cc_x - rand1, cc_y - rand2)\n if 0 < supply_depot_xy[0] < 64 and 0 < supply_depot_xy[1] < 64:\n pass\n else:\n return actions.RAW_FUNCTIONS.no_op()\n \n \n distances = self.get_distances(obs, scvs, supply_depot_xy)\n scv = scvs[np.argmin(distances)]\n \n return actions.RAW_FUNCTIONS.Build_SupplyDepot_pt(\n \"now\", scv.tag, supply_depot_xy)\n \n return actions.RAW_FUNCTIONS.no_op()\n\n def build_barracks(self, obs):\n completed_supply_depots = self.get_my_completed_units_by_type(\n obs, units.Terran.SupplyDepot)\n barrackses = self.get_my_units_by_type(obs, units.Terran.Barracks)\n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n \n if (len(completed_supply_depots) > 0 and\n obs.observation.player.minerals >= 150 and len(scvs) > 0 and\n len(barrackses)< 3):\n \n brks = self.get_my_units_by_type(obs, units.Terran.SupplyDepot)\n \n completed_command_center = self.get_my_completed_units_by_type(\n obs, units.Terran.CommandCenter)\n \n if len(barrackses) >= 1 and len(completed_command_center) == 1:\n # double commands\n \n commandcenters = self.get_my_units_by_type(obs,units.Terran.CommandCenter)\n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n\n if ( len(commandcenters) < 2 and obs.observation.player.minerals >= 400 and\n len(scvs) > 0):\n ccs_xy = (41, 21) if self.base_top_left else (17, 48)\n\n distances = self.get_distances(obs, scvs, ccs_xy)\n scv = scvs[np.argmin(distances)]\n\n return actions.RAW_FUNCTIONS.Build_CommandCenter_pt(\n \"now\", scv.tag, ccs_xy)\n \n if brks:\n for brk in brks:\n brk_x,brk_y = brk.x, brk.y\n \n\n rand1, rand2 = random.randint(1,3),random.randint(1,3)\n barracks_xy = (brk_x + rand1, brk_y + rand2) if self.base_top_left else (brk_x - rand1, brk_y - rand2)\n if 0 < barracks_xy[0] < 64 and 0 < barracks_xy[1] < 64:\n pass\n else:\n return actions.RAW_FUNCTIONS.no_op()\n \n\n distances = self.get_distances(obs, scvs, barracks_xy)\n scv = scvs[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Barracks_pt(\n \"now\", scv.tag, barracks_xy)\n \n \n \n return actions.RAW_FUNCTIONS.no_op()\n\n def train_marine(self, obs):\n \n ################# 아머리가 완성된 후 부터 토르생산 ######################\n completed_barrackses = self.get_my_completed_units_by_type(\n obs, units.Terran.Barracks)\n \n completed_factorys = self.get_my_completed_units_by_type(\n obs, units.Terran.Factory)\n \n completed_armorys = self.get_my_completed_units_by_type(\n obs, units.Terran.Armory)\n \n marines = self.get_my_units_by_type(obs, units.Terran.Marine)\n \n \n free_supply = (obs.observation.player.food_cap -\n obs.observation.player.food_used)\n \n \n if (len(completed_barrackses) > 0 and obs.observation.player.minerals >= 100\n and free_supply > 0 and len(completed_armorys) == 0):\n barracks = self.get_my_units_by_type(obs, units.Terran.Barracks)[0]\n if barracks.order_length < 5:\n return actions.RAW_FUNCTIONS.Train_Marine_quick(\"now\", barracks.tag)\n \n elif free_supply > 0 and len(completed_factorys) > 0 and len(completed_armorys) > 0:\n factory = completed_factorys[0]\n if factory.order_length < 5:\n return actions.RAW_FUNCTIONS.Train_Thor_quick(\"now\", factory.tag)\n \n return actions.RAW_FUNCTIONS.no_op()\n \n ###############################################################################################\n ###################################### Factorys ###############################################\n ###############################################################################################\n \n def build_factorys(self, obs):\n completed_barrackses = self.get_my_completed_units_by_type(\n obs, units.Terran.Barracks)\n \n factorys = self.get_my_units_by_type(obs, units.Terran.Factory)\n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n ref = self.get_my_completed_units_by_type(obs,units.Terran.Refinery)\n # print(\"gas: \", obs.observation.player.minerals)\n # print(\"gas: \", obs.observation.player.gas)\n if (len(completed_barrackses) > 0 and\n obs.observation.player.minerals >= 200 and\n len(factorys) < 3 and\n len(scvs) > 0):\n \n if len(factorys) >= 1 and len(ref) < 4: # 가스부족시 가스 건설\n refinerys = self.get_my_units_by_type(obs,units.Terran.Refinery)\n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n\n if (obs.observation.player.minerals >= 100 and\n len(scvs) > 0):\n gas = self.get_my_units_by_type(obs, units.Neutral.VespeneGeyser)[0]\n\n if self.base_top_left:\n gases = self.top_left_gas_xy\n else:\n gases = self.bottom_right_gas_xy\n\n rc = np.random.choice([0,1,2,3])\n gas_xy = gases[rc]\n if (gas.x, gas.y) == gas_xy:\n distances = self.get_distances(obs, scvs, gas_xy)\n scv = scvs[np.argmin(distances)]\n\n return actions.RAW_FUNCTIONS.Build_Refinery_pt(\n \"now\", scv.tag, gas.tag)\n \n if len(factorys) >= 1:\n rand1 = random.randint(-5,5)\n fx, fy = factorys[0].x, factorys[0].y\n factorys_xy = (fx + rand1, fy + rand1) if self.base_top_left else (fx - rand1, fy - rand1)\n \n else:\n rand1, rand2 = random.randint(-2,2), random.randint(-2,2) # x, y\n factorys_xy = (39 + rand1, 25 + rand2) if self.base_top_left else (17 - rand1, 40 - rand2)\n\n \n if 0 < factorys_xy[0] < 64 and 0 < factorys_xy[1] < 64 and factorys_xy != (17,48) and factorys_xy != (41,21):\n pass\n else:\n return actions.RAW_FUNCTIONS.no_op()\n\n\n distances = self.get_distances(obs, scvs, factorys_xy)\n scv = scvs[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Factory_pt(\n \"now\", scv.tag, factorys_xy)\n return actions.RAW_FUNCTIONS.no_op()\n \n def build_techlab_factorys(self, obs):\n completed_factorys = self.get_my_completed_units_by_type(\n obs, units.Terran.Factory)\n\n \n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n \n if (len(completed_factorys) > 0 and \n obs.observation.player.minerals >= 200):\n \n ftrs = self.get_my_units_by_type(obs, units.Terran.Factory)\n \n if ftrs:\n for ftr in ftrs:\n ftr_x,ftr_y = ftr.x, ftr.y\n \n factorys_xy = (ftr_x,ftr_y)\n if 0 < factorys_xy[0] < 64 and 0 < factorys_xy[1] < 64:\n pass\n else:\n return actions.RAW_FUNCTIONS.no_op()\n\n return actions.RAW_FUNCTIONS.Build_TechLab_Factory_pt(\n \"now\", ftr.tag, factorys_xy)\n \n return actions.RAW_FUNCTIONS.no_op()\n \n def train_tank(self, obs):\n completed_factorytechlab = self.get_my_completed_units_by_type(\n obs, units.Terran.FactoryTechLab)\n \n free_supply = (obs.observation.player.food_cap -\n obs.observation.player.food_used)\n \n if (len(completed_factorytechlab) > 0 and obs.observation.player.minerals >= 200):\n \n factorys = self.get_my_units_by_type(obs, units.Terran.Factory)[0]\n \n if factorys.order_length < 5:\n return actions.RAW_FUNCTIONS.Train_SiegeTank_quick(\"now\", factorys.tag)\n return actions.RAW_FUNCTIONS.no_op()\n \n ###############################################################################\n ############################ Build Armory ##################################\n \n def build_armorys(self, obs):\n completed_factory = self.get_my_completed_units_by_type(\n obs, units.Terran.Factory)\n \n armorys = self.get_my_units_by_type(obs, units.Terran.Armory)\n \n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n \n if (len(completed_factory) > 0 and\n obs.observation.player.minerals >= 200 and\n len(armorys) < 2 and\n len(scvs) > 0):\n \n rand1, rand2 = random.randint(-2,2),random.randint(-2,2)\n armorys_xy = (36 + rand1, 20 + rand2) if self.base_top_left else ( 20 - rand1, 50 - rand2)\n if 0 < armorys_xy[0] < 64 and 0 < armorys_xy[1] < 64:\n pass\n else:\n return actions.RAW_FUNCTIONS.no_op()\n\n\n distances = self.get_distances(obs, scvs, armorys_xy)\n scv = scvs[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Armory_pt(\n \"now\", scv.tag, armorys_xy)\n\n elif (len(completed_factory) > 0 and\n obs.observation.player.minerals >= 200 and\n 1 <= len(armorys) and\n len(scvs) > 0):\n # armory upgrade 추가\n armory = armorys[0]\n \n armory_xy = (armory.x, armory.y)\n #cloak_field = self.get_my_units_by_type(obs, upgrades.Upgrades.CloakingField)[0]\n if self.TerranVehicleWeaponsLevel1 == False:\n self.TerranVehicleWeaponsLevel1 = True\n return actions.RAW_FUNCTIONS.Research_TerranVehicleWeapons_quick(\"now\", armory.tag)\n \n elif self.TerranVehicleWeaponsLevel1 == True and self.TerranVehicleWeaponsLevel2 == False:\n self.TerranVehicleWeaponsLevel2 = True\n return actions.RAW_FUNCTIONS.Research_TerranVehicleWeaponsLevel2_quick(\"now\", armory.tag)\n \n elif self.TerranVehicleWeaponsLevel1 == True and self.TerranVehicleWeaponsLevel2 == True and self.TerranVehicleWeaponsLevel3 == False:\n self.TerranVehicleWeaponsLevel3 = True\n return actions.RAW_FUNCTIONS.Research_TerranVehicleWeaponsLevel3_quick(\"now\", armory.tag)\n \n \n \n return actions.RAW_FUNCTIONS.no_op()\n \n \n ############################################################################################\n #################################### StarPort ##############################################\n def build_starports(self, obs):\n completed_factorys = self.get_my_completed_units_by_type(\n obs, units.Terran.Factory)\n \n starports = self.get_my_units_by_type(obs, units.Terran.Starport)\n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n \n if (len(completed_factorys) > 0 and\n obs.observation.player.minerals >= 200 and \n len(starports) < 1 and\n len(scvs) > 0):\n \n # stp_x,stp_y = (22,22), (36,46) # minerals기준 중앙부쪽 좌표\n \n if len(starports) >= 1:\n rand1 = random.randint(-5,5)\n sx, sy = starports[0].x, starports[0].y\n starport_xy = (sx + rand1, sy + rand1) if self.base_top_left else (sx - rand1, sy - rand1)\n else:\n rand1, rand2 = random.randint(-5,5),random.randint(-5,5)\n starport_xy = (22 + rand1, 22 + rand2) if self.base_top_left else (36 - rand1, 46 - rand2)\n\n \n if 0 < starport_xy[0] < 64 and 0 < starport_xy[1] < 64:\n pass\n else:\n return actions.RAW_FUNCTIONS.no_op()\n\n distances = self.get_distances(obs, scvs, starport_xy)\n scv = scvs[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Starport_pt(\n \"now\", scv.tag, starport_xy)\n \n ####################### 스타포트 건설 후 팩토리 증설 #########################\n elif (len(starports) >= 1 and obs.observation.player.minerals >= 200 and\n len(completed_factorys) < 4 and len(scvs) > 0):\n \n if len(starports) >= 1:\n rand1 = random.randint(-5,5)\n sx, sy = starports[0].x, starports[0].y\n factory_xy = (sx + rand1, sy + rand1) if self.base_top_left else (sx - rand1, sy - rand1)\n else:\n rand1, rand2 = random.randint(-5,5),random.randint(-5,5)\n factory_xy = (22 + rand1, 22 + rand2) if self.base_top_left else (36 - rand1, 46 - rand2)\n\n \n if 0 < factory_xy[0] < 64 and 0 < factory_xy[1] < 64:\n pass\n else:\n return actions.RAW_FUNCTIONS.no_op()\n\n distances = self.get_distances(obs, scvs, factory_xy)\n scv = scvs[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Factory_pt(\n \"now\", scv.tag, factory_xy)\n \n else:\n completed_barrackses = self.get_my_completed_units_by_type(\n obs, units.Terran.Barracks)\n marines = self.get_my_units_by_type(obs, units.Terran.Marine)\n\n free_supply = (obs.observation.player.food_cap -\n obs.observation.player.food_used)\n\n if (len(completed_barrackses) > 0 and obs.observation.player.minerals >= 100\n and free_supply > 0):\n barracks = self.get_my_units_by_type(obs, units.Terran.Barracks)[0]\n if barracks.order_length < 5:\n return actions.RAW_FUNCTIONS.Train_Marine_quick(\"now\", barracks.tag)\n \n \n \n \n return actions.RAW_FUNCTIONS.no_op()\n \n def build_techlab_starports(self, obs):\n completed_starports = self.get_my_completed_units_by_type(\n obs, units.Terran.Starport)\n \n completed_starport_techlab = self.get_my_completed_units_by_type(\n obs, units.Terran.StarportTechLab)\n \n if (len(completed_starports) < 3 and \n obs.observation.player.minerals >= 200):\n stps = self.get_my_units_by_type(obs, units.Terran.Starport)\n \n if stps:\n for stp in stps:\n stp_x,stp_y = stp.x, stp.y\n \n starport_xy = (stp_x,stp_y)\n\n return actions.RAW_FUNCTIONS.Build_TechLab_Starport_pt(\n \"now\", stp.tag, starport_xy)\n \n ############ Cloak upgrade #########################\n if len(completed_starport_techlab) > 0 and self.cloaking_flag:\n # self.cloaking_flag = 0\n cloaking = upgrades.Upgrades.CloakingField\n \n stp_techlab = self.get_my_units_by_type(obs, units.Terran.StarportTechLab)\n if stp_techlab:\n stp_tech_xy = (stp_techlab[0].x, stp_techlab[0].y)\n cloak_field = self.get_my_units_by_type(obs, upgrades.Upgrades.CloakingField)[0]\n \n# print(\"stp_tech_xy: \", stp_tech_xy)\n# print(\"cloaking upgrade: \",cloak_field.tag)\n return actions.FUNCTIONS.Research_BansheeCloakingField_quick(\"now\", cloaking )\n \n return actions.RAW_FUNCTIONS.no_op()\n \n def train_banshee(self, obs):\n completed_starporttechlab = self.get_my_completed_units_by_type(\n obs, units.Terran.StarportTechLab)\n \n ravens = self.get_my_units_by_type(obs, units.Terran.Raven)\n \n free_supply = (obs.observation.player.food_cap -\n obs.observation.player.food_used)\n \n \n if (len(completed_starporttechlab) > 0 and obs.observation.player.minerals >= 200\n and free_supply > 3):\n \n starports = self.get_my_units_by_type(obs, units.Terran.Starport)[0]\n \n ############################### cloaking detecting을 위한 Raven 생산 #######################\n if starports.order_length < 2 and len(ravens) < 3 :\n return actions.RAW_FUNCTIONS.Train_Raven_quick(\"now\", starports.tag)\n \n #########################################################################################\n \n if starports.order_length < 5:\n return actions.RAW_FUNCTIONS.Train_Banshee_quick(\"now\", starports.tag)\n return actions.RAW_FUNCTIONS.no_op()\n \n \n \n \n ############################################################################################\n \n def attack(self, obs):\n marines = self.get_my_units_by_type(obs, units.Terran.Marine)\n if 20 < len(marines):\n \n flag = random.randint(0,2)\n if flag == 1:\n attack_xy = (38, 44) if self.base_top_left else (19, 23)\n else:\n attack_xy = (16, 45) if self.base_top_left else (42, 19)\n \n \n distances = self.get_distances(obs, marines, attack_xy)\n marine = marines[np.argmax(distances)]\n #marine = marines\n \n x_offset = random.randint(-5, 5)\n y_offset = random.randint(-5, 5)\n return actions.RAW_FUNCTIONS.Attack_pt(\n \"now\", marine.tag, (attack_xy[0] + x_offset, attack_xy[1] + y_offset))\n else:\n barracks = self.get_my_units_by_type(obs, units.Terran.Barracks)\n if len(barracks) > 0:\n barracks = barracks[0]\n if barracks.order_length < 5:\n return actions.RAW_FUNCTIONS.Train_Marine_quick(\"now\", barracks.tag)\n\n return actions.RAW_FUNCTIONS.no_op()\n \n def attack_all(self,obs):\n # 추가 유닛 생길 때 마다 추가\n marines = self.get_my_units_by_type(obs, units.Terran.Marine)\n tanks = self.get_my_units_by_type(obs, units.Terran.SiegeTank)\n banshees = self.get_my_units_by_type(obs, units.Terran.Banshee)\n raven = self.get_my_units_by_type(obs, units.Terran.Raven)\n thor = self.get_my_units_by_type(obs, units.Terran.Thor)\n \n sieged_tanks = self.get_my_units_by_type(obs, units.Terran.SiegeTankSieged)\n total_tanks = tanks + sieged_tanks\n \n all_units = marines + total_tanks + banshees + raven + thor\n \n if 25 < len(all_units):\n \n flag = random.randint(0,1000)\n \n if flag%4 == 0:\n attack_xy = (39, 45) if self.base_top_left else (19, 23)\n elif flag%4 == 1:\n \n attack_xy = (39, 45) if self.base_top_left else (19, 23)\n \n if len(tanks) > 0:\n distances = self.get_distances(obs, tanks, attack_xy)\n tank = tanks[np.argmax(distances)]\n x_offset = random.randint(-1, 1)\n y_offset = random.randint(-1, 1)\n return actions.RAW_FUNCTIONS.Morph_SiegeMode_quick(\n \"now\", tank.tag, (attack_xy[0] + x_offset, attack_xy[1] + y_offset))\n \n elif flag%4 == 2:\n attack_xy = (39, 45) if self.base_top_left else (19, 23)\n #### siegeMode 제거 ####\n if len(total_tanks) > 0:\n all_tanks_tag = [tank.tag for tank in total_tanks]\n\n return actions.RAW_FUNCTIONS.Morph_Unsiege_quick(\n \"now\", all_tanks_tag)\n \n else:\n attack_xy = (17, 48) if self.base_top_left else (41, 21)\n \n x_offset = random.randint(-5, 5)\n y_offset = random.randint(-5, 5)\n \n all_tag = [unit.tag for unit in all_units]\n \n return actions.RAW_FUNCTIONS.Attack_pt(\n \"now\", all_tag, (attack_xy[0] + x_offset, attack_xy[1] + y_offset))\n else:\n flag = random.randint(0,1000)\n if flag%4 == 0:\n attack_xy = (35, 25) if self.base_top_left else (25, 40)\n elif flag%4 == 1:\n attack_xy = (35, 25) if self.base_top_left else (25, 40)\n\n if len(tanks) > 0:\n tanks_tag = [tank.tag for tank in tanks]\n x_offset = random.randint(-1, 1)\n y_offset = random.randint(-1, 1)\n return actions.RAW_FUNCTIONS.Morph_SiegeMode_quick(\n \"now\", tanks_tag, (attack_xy[0] + x_offset, attack_xy[1] + y_offset))\n \n \n elif flag%4 == 2:\n attack_xy = (35, 25) if self.base_top_left else (25, 40)\n \n else:\n attack_xy = (30, 25) if self.base_top_left else (33, 40)\n \n x_offset = random.randint(-1, 1)\n y_offset = random.randint(-1, 1)\n \n \n \n all_units = marines + banshees + raven + thor\n all_tag = [unit.tag for unit in all_units]\n if all_tag:\n return actions.RAW_FUNCTIONS.Attack_pt(\n \"now\", all_tag, (attack_xy[0] + x_offset, attack_xy[1] + y_offset))\n \n return actions.RAW_FUNCTIONS.no_op()\n \n ###################################################################################\n ############################### Unit Controls #####################################\n \n def tank_control(self, obs):\n tanks = self.get_my_units_by_type(obs, units.Terran.SiegeTank)\n sieged_tanks = self.get_my_units_by_type(obs, units.Terran.SiegeTankSieged)\n \n total_tanks = tanks + sieged_tanks\n \n if len(total_tanks) < 8:\n \n if tanks:\n \n attack_xy = (40, 25) if self.base_top_left else (25, 40)\n\n distances = self.get_distances(obs, tanks, attack_xy)\n distances.sort()\n \n tank_tag = [t.tag for t in tanks[:4]]\n\n x_offset = random.randint(-5, 5)\n y_offset = random.randint(-5, 5)\n return actions.RAW_FUNCTIONS.Morph_SiegeMode_quick(\n \"now\", tank_tag, (attack_xy[0] + x_offset, attack_xy[1] + y_offset))\n else:\n #### siegeMode 제거 ####\n all_tanks_tag = [tank.tag for tank in total_tanks]\n return actions.RAW_FUNCTIONS.Morph_Unsiege_quick(\n \"now\", all_tanks_tag)\n \n return actions.RAW_FUNCTIONS.no_op()\n \n ", "_____no_output_____" ], [ "class TerranRandomAgent(TerranAgentWithRawActsAndRawObs):\n def step(self, obs):\n super(TerranRandomAgent, self).step(obs)\n action = random.choice(self.actions)\n \n return getattr(self, action)(obs)", "_____no_output_____" ] ], [ [ "### Hyperparameter\n\n하이퍼파라미터는 심층강화학습 알고리즘에서 성능에 매우 큰 영향을 미칩니다.\n이 실험에 쓰인 하이퍼파라미터는 https://github.com/chucnorrisful/dqn 실험에서 제안된 값들을 참고하였습니다.\n\n\n- self.s_dim = 21\n- self.a_dim = 6\n\n- self.lr = 1e-4 * 1\n- self.batch_size = 32\n- self.gamma = 0.99\n- self.memory_size = 200000\n- self.eps_max = 1.0\n- self.eps_min = 0.01\n- self.epsilon = 1.0\n- self.init_sampling = 4000\n- self.target_update_interval = 10\n\n- self.epsilon = max(self.eps_min, self.eps_max - self.eps_min * (self.episode_count / 50))\n\n\n![Winning rate graph](./images/rlagent_with_vanilla_dqn_score-Terran-Terran-495_Eps.png)", "_____no_output_____" ] ], [ [ "class TerranRLAgentWithRawActsAndRawObs(TerranAgentWithRawActsAndRawObs):\n def __init__(self):\n super(TerranRLAgentWithRawActsAndRawObs, self).__init__()\n\n self.s_dim = 21\n self.a_dim = 19\n \n self.lr = 1e-4 * 1\n self.batch_size = 32\n self.gamma = 0.99\n self.memory_size = 200000\n self.eps_max = 1.0\n self.eps_min = 0.01\n self.epsilon = 1.0\n self.init_sampling = 4000\n self.target_update_interval = 10\n\n self.data_file_qnet = DATA_FILE_QNET\n self.data_file_qnet_target = DATA_FILE_QNET_TARGET\n self.score_file = SCORE_FILE\n \n self.qnetwork = NaiveMultiLayerPerceptron(input_dim=self.s_dim,\n output_dim=self.a_dim,\n num_neurons=[128],\n hidden_act_func='ReLU',\n out_act_func='Identity').to(device)\n \n self.qnetwork_target = NaiveMultiLayerPerceptron(input_dim=self.s_dim,\n output_dim=self.a_dim,\n num_neurons=[128],\n hidden_act_func='ReLU',\n out_act_func='Identity').to(device)\n \n ############################################ qnet 로드하면 이전 모델이라 학습모델 인풋 아웃풋차원이 바뀜 #########\n if os.path.isfile(self.data_file_qnet + '.pt'):\n self.qnetwork.load_state_dict(torch.load(self.data_file_qnet + '.pt'))\n \n if os.path.isfile(self.data_file_qnet_target + '.pt'):\n self.qnetwork_target.load_state_dict(torch.load(self.data_file_qnet_target + '.pt'))\n \n # initialize target network same as the main network.\n self.qnetwork_target.load_state_dict(self.qnetwork.state_dict())\n\n self.dqn = DQN(state_dim=self.s_dim,\n action_dim=self.a_dim,\n qnet=self.qnetwork,\n qnet_target=self.qnetwork_target,\n lr=self.lr,\n gamma=self.gamma,\n epsilon=self.epsilon).to(device)\n \n self.memory = ExperienceReplayMemory(self.memory_size)\n \n self.print_every = 1\n self.cum_reward = 0\n self.cum_loss = 0\n self.episode_count = 0\n \n self.new_game()\n\n\n def reset(self):\n super(TerranRLAgentWithRawActsAndRawObs, self).reset()\n self.new_game()\n\n def new_game(self):\n self.base_top_left = None\n self.previous_state = None\n self.previous_action = None\n self.cum_reward = 0\n self.cum_loss = 0\n \n # epsilon scheduling\n # slowly decaying_epsilon\n self.epsilon = max(self.eps_min, self.eps_max - self.eps_min * (self.episode_count / 50))\n self.dqn.epsilon = torch.tensor(self.epsilon).to(device)\n \n\n def get_state(self, obs):\n scvs = self.get_my_units_by_type(obs, units.Terran.SCV)\n idle_scvs = [scv for scv in scvs if scv.order_length == 0]\n command_centers = self.get_my_units_by_type(obs, units.Terran.CommandCenter)\n supply_depots = self.get_my_units_by_type(obs, units.Terran.SupplyDepot)\n completed_supply_depots = self.get_my_completed_units_by_type(\n obs, units.Terran.SupplyDepot)\n barrackses = self.get_my_units_by_type(obs, units.Terran.Barracks)\n completed_barrackses = self.get_my_completed_units_by_type(\n obs, units.Terran.Barracks)\n marines = self.get_my_units_by_type(obs, units.Terran.Marine)\n\n queued_marines = (completed_barrackses[0].order_length\n if len(completed_barrackses) > 0 else 0)\n\n free_supply = (obs.observation.player.food_cap -\n obs.observation.player.food_used)\n can_afford_supply_depot = obs.observation.player.minerals >= 100\n can_afford_barracks = obs.observation.player.minerals >= 150\n can_afford_marine = obs.observation.player.minerals >= 100\n\n enemy_scvs = self.get_enemy_units_by_type(obs, units.Terran.SCV)\n enemy_idle_scvs = [scv for scv in enemy_scvs if scv.order_length == 0]\n enemy_command_centers = self.get_enemy_units_by_type(\n obs, units.Terran.CommandCenter)\n enemy_supply_depots = self.get_enemy_units_by_type(\n obs, units.Terran.SupplyDepot)\n enemy_completed_supply_depots = self.get_enemy_completed_units_by_type(\n obs, units.Terran.SupplyDepot)\n enemy_barrackses = self.get_enemy_units_by_type(obs, units.Terran.Barracks)\n enemy_completed_barrackses = self.get_enemy_completed_units_by_type(\n obs, units.Terran.Barracks)\n enemy_marines = self.get_enemy_units_by_type(obs, units.Terran.Marine)\n\n return (len(command_centers),\n len(scvs),\n len(idle_scvs),\n len(supply_depots),\n len(completed_supply_depots),\n len(barrackses),\n len(completed_barrackses),\n len(marines),\n queued_marines,\n free_supply,\n can_afford_supply_depot,\n can_afford_barracks,\n can_afford_marine,\n len(enemy_command_centers),\n len(enemy_scvs),\n len(enemy_idle_scvs),\n len(enemy_supply_depots),\n len(enemy_completed_supply_depots),\n len(enemy_barrackses),\n len(enemy_completed_barrackses),\n len(enemy_marines))\n\n def step(self, obs):\n super(TerranRLAgentWithRawActsAndRawObs, self).step(obs)\n \n #time.sleep(0.5)\n \n state = self.get_state(obs)\n state = torch.tensor(state).float().view(1, self.s_dim).to(device)\n action_idx = self.dqn.choose_action(state)\n action = self.actions[action_idx]\n done = True if obs.last() else False\n\n if self.previous_action is not None:\n experience = (self.previous_state.to(device),\n torch.tensor(self.previous_action).view(1, 1).to(device),\n torch.tensor(obs.reward).view(1, 1).to(device),\n state.to(device),\n torch.tensor(done).view(1, 1).to(device))\n self.memory.push(experience)\n \n self.cum_reward += obs.reward\n self.previous_state = state\n self.previous_action = action_idx\n \n if obs.last():\n self.episode_count = self.episode_count + 1\n \n if len(self.memory) >= self.init_sampling:\n # training dqn\n sampled_exps = self.memory.sample(self.batch_size)\n sampled_exps = prepare_training_inputs(sampled_exps, device)\n self.dqn.learn(*sampled_exps)\n\n if self.episode_count % self.target_update_interval == 0:\n self.dqn.qnet_target.load_state_dict(self.dqn.qnet.state_dict())\n\n if self.episode_count % self.print_every == 0:\n msg = (self.episode_count, self.cum_reward, self.epsilon)\n print(\"Episode : {:4.0f} | Cumulative Reward : {:4.0f} | Epsilon : {:.3f}\".format(*msg))\n \n torch.save(self.dqn.qnet.state_dict(), self.data_file_qnet + '.pt')\n torch.save(self.dqn.qnet_target.state_dict(), self.data_file_qnet_target + '.pt')\n\n scores_window.append(obs.reward) # save most recent reward\n win_rate = scores_window.count(1)/len(scores_window)*100\n tie_rate = scores_window.count(0)/len(scores_window)*100\n lost_rate = scores_window.count(-1)/len(scores_window)*100\n \n scores.append([win_rate, tie_rate, lost_rate]) # save most recent score(win_rate, tie_rate, lost_rate)\n with open(self.score_file + '.txt', \"wb\") as fp:\n pickle.dump(scores, fp)\n \n #writer.add_scalar(\"Loss/train\", self.cum_loss/obs.observation.game_loop, self.episode_count)\n writer.add_scalar(\"Score\", self.cum_reward, self.episode_count)\n\n return getattr(self, action)(obs)", "_____no_output_____" ], [ "if __name__ == \"__main__\":\n app.run(main)", "I0922 23:23:02.756312 4616515008 sc_process.py:135] Launching SC2: /Applications/StarCraft II/Versions/Base81102/SC2.app/Contents/MacOS/SC2 -listen 127.0.0.1 -port 19112 -dataDir /Applications/StarCraft II/ -tempDir /var/folders/r1/x6k135_915z463fc7lc4hkp40000gn/T/sc-m9gntgxu/ -displayMode 0 -windowwidth 640 -windowheight 480 -windowx 50 -windowy 50\nI0922 23:23:02.777318 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 0, running: True\nI0922 23:23:03.782161 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 1, running: True\nI0922 23:23:04.785537 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 2, running: True\nI0922 23:23:05.792152 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 3, running: True\nI0922 23:23:06.797986 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 4, running: True\nI0922 23:23:07.802450 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 5, running: True\nI0922 23:23:08.808930 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 6, running: True\nI0922 23:23:09.812354 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 7, running: True\nI0922 23:23:10.816801 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 8, running: True\nI0922 23:23:11.818795 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 9, running: True\nI0922 23:23:12.820305 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 10, running: True\nI0922 23:23:13.825726 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 11, running: True\nI0922 23:23:14.828522 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 12, running: True\nI0922 23:23:15.832547 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 13, running: True\nI0922 23:23:16.834920 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 14, running: True\nI0922 23:23:17.841595 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 15, running: True\nI0922 23:23:18.845139 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 16, running: True\nI0922 23:23:19.846853 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 17, running: True\nI0922 23:23:20.848951 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 18, running: True\nI0922 23:23:21.852277 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 19, running: True\nI0922 23:23:22.856023 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 20, running: True\nI0922 23:23:23.861757 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 21, running: True\nI0922 23:23:33.645176 4616515008 sc2_env.py:314] Environment is ready\nI0922 23:23:33.653440 4616515008 sc2_env.py:507] Starting episode 1: [terran, random] on Simple64\nI0922 23:23:35.562937 4616515008 sc2_env.py:752] Environment Close\nI0922 23:25:39.113759 4616515008 sc2_env.py:725] Episode 1 finished after 15944 game steps. Outcome: [1], reward: [1], score: [11323]\n" ] ], [ [ "### [Winning rate graph]", "_____no_output_____" ] ], [ [ "!pip install matplotlib", "Requirement already satisfied: matplotlib in /Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages (3.0.3)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages (from matplotlib) (2.4.7)\nRequirement already satisfied: numpy>=1.10.0 in /Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages (from matplotlib) (1.18.5)\nRequirement already satisfied: python-dateutil>=2.1 in /Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages (from matplotlib) (2.8.1)\nRequirement already satisfied: kiwisolver>=1.0.1 in /Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages (from matplotlib) (1.1.0)\nRequirement already satisfied: cycler>=0.10 in /Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages (from matplotlib) (0.10.0)\nRequirement already satisfied: six>=1.5 in /Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages (from python-dateutil>=2.1->matplotlib) (1.15.0)\nRequirement already satisfied: setuptools in /Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages (from kiwisolver>=1.0.1->matplotlib) (47.1.1)\n\u001b[33mWARNING: You are using pip version 20.1.1; however, version 20.2.3 is available.\nYou should consider upgrading via the '/Library/Frameworks/Python.framework/Versions/3.5/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\n" ], [ "import pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nSCORE_FILE = 'rlagent_with_vanilla_dqn_score'", "_____no_output_____" ], [ "with open(SCORE_FILE + '.txt', \"rb\") as fp:\n scores = pickle.load(fp)", "_____no_output_____" ], [ "np_scores = np.array(scores)\nnp_scores", "_____no_output_____" ], [ "# plot the scores\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(np.arange(len(np_scores)), np_scores.T[0], color='r', label='win rate')\nplt.plot(np.arange(len(np_scores)), np_scores.T[1], color='g', label='tie rate')\nplt.plot(np.arange(len(np_scores)), np_scores.T[2], color='b', label='lose rate')\nplt.ylabel('Score %')\nplt.xlabel('Episode #')\nplt.legend(loc='best')\nplt.show()", "_____no_output_____" ], [ "f = file.open()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
d069fca84d29c88c10ab0e43e9315788260dd776
7,191
ipynb
Jupyter Notebook
notebooks/Chapter_19/03_MGFs_Normal_and_the_CLT.ipynb
choldgraf/prob140
0750fc62fb114220035278ed2161e4b82ddca15f
[ "MIT" ]
null
null
null
notebooks/Chapter_19/03_MGFs_Normal_and_the_CLT.ipynb
choldgraf/prob140
0750fc62fb114220035278ed2161e4b82ddca15f
[ "MIT" ]
null
null
null
notebooks/Chapter_19/03_MGFs_Normal_and_the_CLT.ipynb
choldgraf/prob140
0750fc62fb114220035278ed2161e4b82ddca15f
[ "MIT" ]
null
null
null
40.173184
385
0.535391
[ [ [ "# HIDDEN\nfrom datascience import *\nfrom prob140 import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.style.use('fivethirtyeight')\n%matplotlib inline\nimport math\nfrom scipy import stats", "_____no_output_____" ] ], [ [ "## MGFs, the Normal, and the CLT ##", "_____no_output_____" ], [ "Let $Z$ be standard normal. Then the mgf of $Z$ is given by\n\n$$\nM_Z(t) ~ = ~ e^{t^2/2} ~~~ \\text{for all } t\n$$\n\nTo see this, just work out the integral:\n\n\\begin{align*}\nM_Z(t) ~ &= ~ \\int_{-\\infty}^\\infty e^{tz} \\frac{1}{\\sqrt{2\\pi}} e^{-\\frac{1}{2}z^2} dz \\\\ \\\\\n&= ~ \\int_{-\\infty}^\\infty \\frac{1}{\\sqrt{2\\pi}} e^{-\\frac{1}{2}(z^2 - 2tz)} dz \\\\ \\\\\n&= ~ e^{t^2/2} \\int_{-\\infty}^\\infty \\frac{1}{\\sqrt{2\\pi}} e^{-\\frac{1}{2}(z^2 - 2tz + t^2)} dz \\\\ \\\\\n&= ~ e^{t^2/2} \\int_{-\\infty}^\\infty \\frac{1}{\\sqrt{2\\pi}} e^{-\\frac{1}{2}(z- t)^2} dz \\\\ \\\\\n&= ~ e^{t^2/2}\n\\end{align*}\n\nbecause the integral is 1. It is the normal $(t, 1)$ density integrated over the whole real line.\n\n### Normal $(\\mu, \\sigma^2)$ ###\nIt's a good idea to first note that moment generating functions behave well under linear transformations.\n\n$$\nM_{aX+b}(t) ~ = ~ E(e^{t(aX + b)}) ~ = ~ e^{bt}E(e^{atX}) ~ = ~ e^{bt}M_X(at)\n$$\n\nSince a normal $(\\mu, \\sigma^2)$ variable can be written as $\\sigma Z + \\mu$ where $Z$ is standard normal, its m.g.f. is\n\n$$\nM_{\\sigma Z + \\mu} (t) ~ = ~ e^{\\mu t}M_Z(\\sigma t) ~ = ~ e^{\\mu t +\\sigma^2 t^2/2}\n$$\n\nDetails aside, what this formula is saying is that if a moment generating function is $\\exp(c_1t + c_2t^2)$ for any constant $c_1$ and any positive constant $c_2$, then it is the moment generating function of a normally distributed random variable.", "_____no_output_____" ], [ "### Sums of Independent Normal Variables ###\nWe can now show that sums of independent normal variables are normal.\n\nLet $X$ have normal $(\\mu_X, \\sigma_X^2)$ distribution, and let $Y$ independent of $X$ have normal $(\\mu_Y, \\sigma_Y^2)$ distribution. Then\n\n$$\nM_{X+Y} (t) ~ = ~ e^{\\mu_X t + \\sigma_X^2 t^2/2} \\cdot e^{\\mu_Y t + \\sigma_Y^2 t^2/2} ~ = ~ e^{(\\mu_X + \\mu_Y)t + (\\sigma_X^2 + \\sigma_Y^2)t^2/2}\n$$\n\nThat's the m.g.f. of the normal distribution with mean $\\mu_X + \\mu_Y$ and variance $\\sigma_X^2 + \\sigma_Y^2$.", "_____no_output_____" ], [ "### \"Proof\" of the Central Limit Theorem ###\nAnother important reason for studying mgf's is that they can help us identify the limit of a sequence of distributions. \n\nThe main example of convergence that we have seen is the Central Limit Theorem. Now we can indicate a proof.\n\nLet $X_1, X_2, \\ldots$ be i.i.d. random variables with expectation $\\mu$ and SD $\\sigma$. For every $n \\ge 1$ let $S_n = X_1 + X_2 + \\cdots + X_n$.\n\nThe Central Limit Theorem says that for large $n$, the distribution of the standardized sum\n\n$$\nS_n^* ~ = ~ \\frac{S_n - n\\mu}{\\sqrt{n}\\sigma}\n$$\n\nis approximately standard normal.\n\nTo show this, we will assume a major result whose proof is well beyond the scope of this class. Suppose $Y_1, Y_2, \\ldots $ are random variables and we want to show that the the distribution of the $Y_n$'s converges to the distribution of some random variable $Y$. The result says that it is enough to show that the mgf's of the $Y_n$'s converge to the mgf of $Y$. \n\nThe result requires a careful statement and the proof requires considerable attention to detail. We won't go into that in this course. Instead we'll just point out that it should seem reasonable. Since mgf's determine distributions, it's not difficult to accept that if two mgf's are close to each other then the corresponding distributions should also be close to each other.\n\nLet's use this result to \"prove\" the CLT. The quotes are because we will use the above result without proof, and also because the argument below involves some hand-waving about approximations.\n\nFirst, write the standardized sum in terms of the standardized $X$'s.\n\n$$\nS_n^* ~ = ~ \\frac{S_n - n\\mu}{\\sqrt{n}\\sigma} ~ = ~ \\sum_{i=1}^n \\frac{1}{\\sqrt{n}} \\big{(} \\frac{X_i - \\mu}{\\sigma} \\big{)} ~ = ~ \\sum_{i=1}^n \\frac{1}{\\sqrt{n}} X_i^*\n$$\n\nwhere for each $i$, the random variable $X_i^*$ is $X_i$ in standard units. \n\nThe random variables $X_i^*$ are i.i.d., so let $M_{X^*}$ denote the mgf of any one of them. By the linear transformation property proved above, the mgf of each $\\frac{1}{\\sqrt{n}}X_i^*$ is given by\n\n$$\nM_{\\frac{1}{\\sqrt{n}}X_i^*} (t) ~ = ~ M_{X^*} (t/\\sqrt{n})\n$$\n\nTherefore\n\n\\begin{align*}\nM_{S_n^*} (t) ~ &= ~ \\big{(} M_{X^*}(t/\\sqrt{n}) \\big{)}^n \\\\ \\\\\n&= ~ \\Big{(} 1 ~ + ~ \\frac{t}{\\sqrt{n}} \\cdot \\frac{E(X^*)}{1!} ~ + ~ \\frac{t^2}{n} \\cdot \\frac{E({X^*}^2)}{2!} ~ + ~ \\frac{t^3}{n^{3/2}} \\cdot \\frac{E({X^*}^3)}{3!} ~ + ~ \\cdots \\Big{)}^n \\\\ \\\\\n&\\approx ~ \\Big{(} 1 ~ + ~ \\frac{t^2}{2n}\\Big{)}^n ~~~ \\text{for large } n\\\\ \\\\\n\\end{align*}\n\nby ignoring small terms and using the fact that for any standardized random variable $X^*$ we have $E(X^*) = 0$ and $E({X^*}^2) = 1$.\n\nThus for large $n$,\n\n$$\nM_{S_n^*} (t) ~ \\approx ~ \\Big{(} 1 ~ + ~ \\frac{t^2}{2n}\\Big{)}^n \n~ \\to ~ e^{\\frac{t^2}{2}} ~~ \\text{as } n \\to \\infty\n$$\n\nThe limit is the moment generating function of the standard normal distribution. ", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
d069fe2ff5a4044f0ae8f2ca664a857acc6e7c69
14,533
ipynb
Jupyter Notebook
site/en/r1/tutorials/eager/custom_training.ipynb
PhilipMay/docs
4d291590e6352f7fda6175e4f663cceb287589d5
[ "Apache-2.0" ]
3
2020-01-28T11:36:06.000Z
2020-01-28T12:15:04.000Z
site/en/r1/tutorials/eager/custom_training.ipynb
PhilipMay/docs
4d291590e6352f7fda6175e4f663cceb287589d5
[ "Apache-2.0" ]
1
2020-02-20T14:49:33.000Z
2020-02-20T14:49:33.000Z
site/en/r1/tutorials/eager/custom_training.ipynb
PhilipMay/docs
4d291590e6352f7fda6175e4f663cceb287589d5
[ "Apache-2.0" ]
1
2020-03-04T00:12:25.000Z
2020-03-04T00:12:25.000Z
33.797674
536
0.532719
[ [ [ "##### Copyright 2018 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Custom training: basics", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r1/tutorials/eager/custom_training.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/r1/tutorials/eager/custom_training.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.\nIn this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.\n\nTensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation.", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "from __future__ import absolute_import, division, print_function, unicode_literals\n\ntry:\n # %tensorflow_version only exists in Colab.\n %tensorflow_version 2.x\nexcept Exception:\n pass\nimport tensorflow.compat.v1 as tf", "_____no_output_____" ] ], [ [ "## Variables\n\nTensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language:\n", "_____no_output_____" ] ], [ [ "# Using python state\nx = tf.zeros([10, 10])\nx += 2 # This is equivalent to x = x + 2, which does not mutate the original\n # value of x\nprint(x)", "_____no_output_____" ] ], [ [ "TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.\n\nA Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable.", "_____no_output_____" ] ], [ [ "v = tf.Variable(1.0)\nassert v.numpy() == 1.0\n\n# Re-assign the value\nv.assign(3.0)\nassert v.numpy() == 3.0\n\n# Use `v` in a TensorFlow operation like tf.square() and reassign\nv.assign(tf.square(v))\nassert v.numpy() == 9.0", "_____no_output_____" ] ], [ [ "Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.\n\nUsing Variables is also a way to quickly let a reader of your code know that this piece of state is mutable.", "_____no_output_____" ], [ "## Example: Fitting a linear model\n\nLet's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:\n\n1. Define the model.\n2. Define a loss function.\n3. Obtain training data.\n4. Run through the training data and use an \"optimizer\" to adjust the variables to fit the data.\n\nIn this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`.", "_____no_output_____" ], [ "### Define the model\n\nLet's define a simple class to encapsulate the variables and the computation.", "_____no_output_____" ] ], [ [ "class Model(object):\n def __init__(self):\n # Initialize variable to (5.0, 0.0)\n # In practice, these should be initialized to random values.\n self.W = tf.Variable(5.0)\n self.b = tf.Variable(0.0)\n\n def __call__(self, x):\n return self.W * x + self.b\n\nmodel = Model()\n\nassert model(3.0).numpy() == 15.0", "_____no_output_____" ] ], [ [ "### Define a loss function\n\nA loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss.", "_____no_output_____" ] ], [ [ "def loss(predicted_y, desired_y):\n return tf.reduce_mean(tf.square(predicted_y - desired_y))", "_____no_output_____" ] ], [ [ "### Obtain training data\n\nLet's synthesize the training data with some noise.", "_____no_output_____" ] ], [ [ "TRUE_W = 3.0\nTRUE_b = 2.0\nNUM_EXAMPLES = 1000\n\ninputs = tf.random_normal(shape=[NUM_EXAMPLES])\nnoise = tf.random_normal(shape=[NUM_EXAMPLES])\noutputs = inputs * TRUE_W + TRUE_b + noise", "_____no_output_____" ] ], [ [ "Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nplt.scatter(inputs, outputs, c='b')\nplt.scatter(inputs, model(inputs), c='r')\nplt.show()\n\nprint('Current loss: '),\nprint(loss(model(inputs), outputs).numpy())", "_____no_output_____" ] ], [ [ "### Define a training loop\n\nWe now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves.", "_____no_output_____" ] ], [ [ "def train(model, inputs, outputs, learning_rate):\n with tf.GradientTape() as t:\n current_loss = loss(model(inputs), outputs)\n dW, db = t.gradient(current_loss, [model.W, model.b])\n model.W.assign_sub(learning_rate * dW)\n model.b.assign_sub(learning_rate * db)", "_____no_output_____" ] ], [ [ "Finally, let's repeatedly run through the training data and see how `W` and `b` evolve.", "_____no_output_____" ] ], [ [ "model = Model()\n\n# Collect the history of W-values and b-values to plot later\nWs, bs = [], []\nepochs = range(10)\nfor epoch in epochs:\n Ws.append(model.W.numpy())\n bs.append(model.b.numpy())\n current_loss = loss(model(inputs), outputs)\n\n train(model, inputs, outputs, learning_rate=0.1)\n print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' %\n (epoch, Ws[-1], bs[-1], current_loss))\n\n# Let's plot it all\nplt.plot(epochs, Ws, 'r',\n epochs, bs, 'b')\nplt.plot([TRUE_W] * len(epochs), 'r--',\n [TRUE_b] * len(epochs), 'b--')\nplt.legend(['W', 'b', 'true W', 'true_b'])\nplt.show()\n", "_____no_output_____" ] ], [ [ "## Next Steps\n\nIn this tutorial we covered `Variable`s and built and trained a simple linear model using the TensorFlow primitives discussed so far.\n\nIn theory, this is pretty much all you need to use TensorFlow for your machine learning research.\nIn practice, particularly for neural networks, the higher level APIs like `tf.keras` will be much more convenient since it provides higher level building blocks (called \"layers\"), utilities to save and restore state, a suite of loss functions, a suite of optimization strategies etc.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d06a0643431015a41ab47764cc2c5b8d940da1a3
1,724
ipynb
Jupyter Notebook
Bit Manipulation/1222/389. Find the Difference.ipynb
YuHe0108/Leetcode
90d904dde125dd35ee256a7f383961786f1ada5d
[ "Apache-2.0" ]
1
2020-08-05T11:47:47.000Z
2020-08-05T11:47:47.000Z
Bit Manipulation/1222/389. Find the Difference.ipynb
YuHe0108/LeetCode
b9e5de69b4e4d794aff89497624f558343e362ad
[ "Apache-2.0" ]
null
null
null
Bit Manipulation/1222/389. Find the Difference.ipynb
YuHe0108/LeetCode
b9e5de69b4e4d794aff89497624f558343e362ad
[ "Apache-2.0" ]
null
null
null
19.590909
64
0.464617
[ [ [ "class Solution:\n def findTheDifference(self, s: str, t: str) -> str:\n s = sorted(s)\n t = sorted(t)\n for i in range(len(s)):\n if s[i] != t[i]:\n return t[i]\n return t[-1]", "_____no_output_____" ], [ "solution = Solution()\nsolution.findTheDifference(s = \"abcd\", t = \"abcde\")", "_____no_output_____" ], [ "class Solution:\n def findTheDifference(self, s: str, t: str) -> str:\n return chr(sum(map(ord, t)) - sum(map(ord, s)))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
d06a1da6ede5fa4c2c6468287e9c1e073d6bba99
58,165
ipynb
Jupyter Notebook
5-ExamProblems/.src/EX1-F2020-Solution/.ipynb_checkpoints/Exam1-Deploy-Solutions-checkpoint.ipynb
dustykat/engr-1330-psuedo-course
3e7e31a32a1896fcb1fd82b573daa5248e465a36
[ "CC0-1.0" ]
null
null
null
5-ExamProblems/.src/EX1-F2020-Solution/.ipynb_checkpoints/Exam1-Deploy-Solutions-checkpoint.ipynb
dustykat/engr-1330-psuedo-course
3e7e31a32a1896fcb1fd82b573daa5248e465a36
[ "CC0-1.0" ]
null
null
null
5-ExamProblems/.src/EX1-F2020-Solution/.ipynb_checkpoints/Exam1-Deploy-Solutions-checkpoint.ipynb
dustykat/engr-1330-psuedo-course
3e7e31a32a1896fcb1fd82b573daa5248e465a36
[ "CC0-1.0" ]
null
null
null
68.752955
27,216
0.720536
[ [ [ "# ENGR 1330 Exam 1 Sec 003/004 Fall 2020\nTake Home Portion of Exam 1\n<hr>\n\n## Full name\n## R#:\n## HEX:\n## ENGR 1330 Exam 1 Sec 003/004\n## Date:\n<hr>", "_____no_output_____" ], [ "## Question 1 (1 pts):\nRun the cell below, and leave the results in your notebook (Windows users may get an error, leave the error in place) ", "_____no_output_____" ] ], [ [ "#### RUN! the Cell ####\nimport sys\n! hostname\n! whoami\nprint(sys.executable) # OK if generates an exception message on Windows machines", "atomickitty.aws\ncompthink\n/opt/conda/envs/python/bin/python\n" ] ], [ [ "<hr>", "_____no_output_____" ], [ "## Question 2 (9 pts):\n- __When it is 8:00 in Lubbock,__\n - __It is 9:00 in New York__\n - __It is 14:00 in London__\n - __It is 15:00 in Cairo__\n - __It is 16:00 in Istanbul__\n - __It is 19:00 in Hyderabad__\n - __It is 22:00 in Tokyo__ <br>\n\n__Write a function that reports the time in New York, London, Cairo, Istanbul, Hyderabad, and Tokyo based on the time in Lubbock. Use a 24-hour time format. Include error trapping that:__<br>\n\n1- Issues a message like \"Please Enter A Number from 00 to 23\" if the first input is numeric but outside the range of [0,23].<br>\n2- Takes any numeric input for \"Lubbock time\" selection , and forces it into an integer.<br>\n3- Issues an appropriate message if the user's selection is non-numeric.<br>\n\n__Check your function for these times:__\n- 8:00\n- 17:00\n- 0:00", "_____no_output_____" ] ], [ [ "def LBBtime():\n try:\n LBK = int(input('What hour is it in Lubbock?- Please enter a number from 0 to 23'))\n if LBK>23:\n print('Please Enter A Number from 00 to 23')\n if LBK<23 and LBK>=0:\n if LBK+1>23:\n print(\"Time in New York is\",(LBK+1)-24,\":00\")\n else:\n print(\"Time in New York is\",(LBK+1),\":00\")\n if LBK+6>23:\n print(\"Time in London is\",(LBK+6)-24,\":00\")\n else:\n print(\"Time in London is\",(LBK+6),\":00\")\n if LBK+7>23:\n print(\"Time in Cairo is\",(LBK+7)-24,\":00\")\n else:\n print(\"Time in Cairo is\",(LBK+7),\":00\")\n if LBK+8>23:\n print(\"Time in Istanbul is\",(LBK+8)-24,\":00\")\n else:\n print(\"Time in Istanbul is\",(LBK+8),\":00\")\n if LBK+11>23:\n print(\"Time in Hyderabad is\",(LBK+11)-24,\":00\")\n else:\n print(\"Time in Hyderabad is\",(LBK+11),\":00\")\n if LBK+14>23:\n print(\"Time in Tokyo is\",(LBK+14)-24,\":00\")\n else:\n print(\"Time in Tokyo is\",(LBK+14),\":00\")\n return #null return\n except:\n print(\"Please Enter an Appropriate Input\")", "_____no_output_____" ], [ "LBBtime()", "What hour is it in Lubbock?- Please enter a number from 0 to 23 8\n" ], [ "LBBtime()", "What hour is it in Lubbock?- Please enter a number from 0 to 23 17\n" ], [ "LBBtime()", "What hour is it in Lubbock?- Please enter a number from 0 to 23 0\n" ] ], [ [ "<hr>\n\n## Question 3 (28 pts): \nFollow the steps below. Add comments to your script and signify when each step and each task is done. *hint: For this problem you will need the numpy and pandas libraries.\n- __STEP1: There are 8 digits in your R#. Define a 2x4 array with these 8 digits, name it \"Rarray\", and print it__\n- __STEP2: Find the maximum value of the \"Rarray\" and its position__\n- __STEP3: Sort the \"Rarray\" along the rows, store it in a new array named \"Rarraysort\", and print the new array out__\n- __STEP4: Define and print a 4x4 array that has the \"Rarray\" as its two first rows, and \"Rarraysort\" as its next rows. Name this new array \"DoubleRarray\"__\n- __STEP5: Slice and print a 4x3 array from the \"DoubleRarray\" that contains the last three columns of it. Name this new array \"SliceRarray\".__\n- __STEP6: Define the \"SliceRarray\" as a panda dataframe:__\n - name it \"Rdataframe\",\n - name the rows as \"Row A\",\"Row B\",\"Row C\", and \"Row D\"\n - name the columns as \"Column 1\", \"Column 2\", and \"Column 3\"\n- __STEP7: Print the first few rows of the \"Rdataframe\".__\n- __STEP8: Create a new dataframe object (\"R2dataframe\") by adding a column to the \"Rdataframe\", name it \"Column X\" and fill it with \"None\" values. Then, use the appropriate descriptor function and print the data model (data column count, names, data types) of the \"R2dataframe\"__\n- __STEP9: Replace the **'None'** in the \"R2dataframe\" with 0. Then, print the summary statistics of each numeric column in the data frame.__\n- __STEP10: Define a function based on the equation below, apply on the entire \"R2dataframe\", store the results in a new dataframe (\"R3dataframe\"), and print the results and the summary statistics again.__ \n$$ y = x^2 - 5x +7 $$\n- __STEP11: Print the number of occurrences of each unique value in \"Column 3\"__\n- __STEP12: Sort the data frame with respect to \"Column 1\" with a descending order and print it__\n- __STEP13: Write the final format of the \"R3dataframe\" on a CSV file, named \"Rfile.csv\"__\n- __STEP14: Read the \"Rfile.csv\" and print its content.__<br>\n** __Make sure to attach the \"Rfile.csv\" file to your midterm exam submission.__", "_____no_output_____" ] ], [ [ "# Code and Run your solution here:\n\nprint('#Step0: Install Dependencies')\nimport numpy as np\nimport pandas as pd\nprint('#Step1: Create the array')\nRarray = np.array([[1,6,7,4],[5,2,3,8]]) #Define Rarray\nprint(Rarray)\nprint('#Step2: find max and its position ')\nprint(np.max(Rarray)) #Find the maximum Value\nprint(np.argmax(Rarray)) #Find the posirtion of the maximum value\nprint('#Step3: Sort the array')\nRarraysort = np.sort(Rarray,axis = 1) #Sort Rarray along the rows and define a new array\nprint(Rarraysort) \nprint('#Step4: Create the double array - manual entry')\nDoubleRarray = np.array([[1,6,7,4],[5,2,3,8],[1,4,6,7],[2,3,5,8]]) #Define DoubleRarray\nprint(DoubleRarray)\nprint('#Step5: Slice the array')\nSliceRarray = DoubleRarray[0:4,1:4] #Slice DoubleRarray and Define SliceRarray\nprint(SliceRarray)\nprint('#Step6: Make a dataframey')\nmyrowname = [\"Row A\",\"Row B\",\"Row C\",\"Row D\"]\nmycolname = [\"Column 1\", \"Column 2\",\"Column 3\"]\nRdataframe = pd.DataFrame(SliceRarray,myrowname,mycolname) #Define Rdataframe\nprint('#Step7: head method on dataframe')\nprint(Rdataframe.head()) #Print the first few rows of the Rdataframe\nprint('#Step8: add column to a dataframe')\nRdataframe['Column X']= None #Add a new column, \"Column X\"\nR2dataframe = Rdataframe #Define R2dataframe\nprint(R2dataframe.info()) #Get the info\nprint('#Step9: Replace NA')\nR2dataframe = R2dataframe.fillna(0) #Replace NAs with 0\nprint(R2dataframe.describe()) #Get the summary statistics\nprint('#Step10: Define a function, apply to a dataframe')\ndef myfunc(x): # A user-built function\n y = (x**2) - (10*x) +7\n return(y)\nR3dataframe = R2dataframe.apply(myfunc) #Apply the function on the entire R2dataframe\nprint(R3dataframe)\nprint(R3dataframe.describe())\nprint('#Step11: Descriptors')\nprint(R3dataframe['Column 3'].value_counts()) #Returns the number of occurences of each unique value in Column 3\nprint('#Step12: Sort on values')\nprint(R3dataframe.sort_values('Column 1', ascending = False)) #Sorting based on Column 1\nprint('#Step13: Write to an external file')\nR3dataframe.to_csv('Rfile.csv') #Write R3dataframe on a CSV file\nprint('#Step14: Verify the write')\nreadfilecsv = pd.read_csv('Rfile.csv') #Read the Rfile.csv\nprint(readfilecsv) #Print the contents of the Rfile.csv", "#Step0: Install Dependencies\n#Step1: Create the array\n[[1 6 7 4]\n [5 2 3 8]]\n#Step2: find max and its position \n8\n7\n#Step3: Sort the array\n[[1 4 6 7]\n [2 3 5 8]]\n#Step4: Create the double array - manual entry\n[[1 6 7 4]\n [5 2 3 8]\n [1 4 6 7]\n [2 3 5 8]]\n#Step5: Slice the array\n[[6 7 4]\n [2 3 8]\n [4 6 7]\n [3 5 8]]\n#Step6: Make a dataframey\n#Step7: head method on dataframe\n Column 1 Column 2 Column 3\nRow A 6 7 4\nRow B 2 3 8\nRow C 4 6 7\nRow D 3 5 8\n#Step8: add column to a dataframe\n<class 'pandas.core.frame.DataFrame'>\nIndex: 4 entries, Row A to Row D\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Column 1 4 non-null int64 \n 1 Column 2 4 non-null int64 \n 2 Column 3 4 non-null int64 \n 3 Column X 0 non-null object\ndtypes: int64(3), object(1)\nmemory usage: 160.0+ bytes\nNone\n#Step9: Replace NA\n Column 1 Column 2 Column 3 Column X\ncount 4.000000 4.000000 4.000000 4.0\nmean 3.750000 5.250000 6.750000 0.0\nstd 1.707825 1.707825 1.892969 0.0\nmin 2.000000 3.000000 4.000000 0.0\n25% 2.750000 4.500000 6.250000 0.0\n50% 3.500000 5.500000 7.500000 0.0\n75% 4.500000 6.250000 8.000000 0.0\nmax 6.000000 7.000000 8.000000 0.0\n#Step10: Define a function, apply to a dataframe\n Column 1 Column 2 Column 3 Column X\nRow A -17 -14 -17 7\nRow B -9 -14 -9 7\nRow C -17 -17 -14 7\nRow D -14 -18 -9 7\n Column 1 Column 2 Column 3 Column X\ncount 4.000000 4.000000 4.000000 4.0\nmean -14.250000 -15.750000 -12.250000 7.0\nstd 3.774917 2.061553 3.947573 0.0\nmin -17.000000 -18.000000 -17.000000 7.0\n25% -17.000000 -17.250000 -14.750000 7.0\n50% -15.500000 -15.500000 -11.500000 7.0\n75% -12.750000 -14.000000 -9.000000 7.0\nmax -9.000000 -14.000000 -9.000000 7.0\n#Step11: Descriptors\n-9 2\n-14 1\n-17 1\nName: Column 3, dtype: int64\n#Step12: Sort on values\n Column 1 Column 2 Column 3 Column X\nRow B -9 -14 -9 7\nRow D -14 -18 -9 7\nRow A -17 -14 -17 7\nRow C -17 -17 -14 7\n#Step13: Write to an external file\n#Step14: Verify the write\n Unnamed: 0 Column 1 Column 2 Column 3 Column X\n0 Row A -17 -14 -17 7\n1 Row B -9 -14 -9 7\n2 Row C -17 -17 -14 7\n3 Row D -14 -18 -9 7\n" ] ], [ [ "<hr>\n\n## Problem 4 (32 pts)\n\nGraphing Functions Special Functions \n\nConsider the two functions listed below:\n\n\\begin{equation}\nf(x) = e^{-\\alpha x}\n\\label{eqn:fofx}\n\\end{equation}\n\n\\begin{equation}\ng(x) = \\gamma sin(\\beta x)\n\\label{eqn:gofx}\n\\end{equation}\n\nPrepare a plot of the two functions on the same graph. \n\nUse the values in Table below for $\\alpha$, $\\beta$, and $\\gamma$.\n\n|Parameter|Value|\n|:---|---:|\n|$\\alpha$|0.50|\n|$\\beta$|3.00|\n|$\\gamma$|$\\frac{\\pi}{2}$|\n\n\nThe plot should have $x$ values ranging from $0$ to $10$ (inclusive) in sufficiently small increments to see curvature in the two functions as well as to identify the number and approximate locations of intersections. In this problem, intersections are locations in the $x-y$ plane where the two \"curves\" cross one another of the two plots.", "_____no_output_____" ], [ "#### By-hand evaluate f(x) for x=1, alpha = 1/2 (Simply enter your answer from a calculator)\nf(x) = 0.61", "_____no_output_____" ], [ "#### By-hand evaluate g(x) for x=3.14, beta = 1/2, gamma = 2 (Simply enter your answer from a calculator)\ng(x) = 1.99", "_____no_output_____" ] ], [ [ "# Define the first function f(x,alpha), test the function using your by hand answer\n# Define the first function f(x,alpha), test the function using your by hand answer\ndef f(x,alpha):\n import math\n f = math.exp(-1.0*alpha*x)\n return f\n\nf(1,0.5)", "_____no_output_____" ], [ "# Define the second function g(x,beta,gamma), test the function using your by hand answer\ndef g(x,beta,gamma):\n import math\n f = gamma*math.sin(beta*x)\n return f\n\ng(3.14,0.5,2.0)", "_____no_output_____" ], [ "# Built a list for x that ranges from 0 to 10, inclusive, with adjustable step sizes for plotting later on\nhowMany = 100\nscale = 10.0/howMany\nxvector = []\nfor i in range(0,howMany+1):\n xvector.append(scale*i)\n#xvector # activate to display", "_____no_output_____" ], [ "# xvector", "_____no_output_____" ], [ "# Build a plotting function that plots both functions on the same chart\n# Build a plotting function that plots both functions on the same chart\nimport mplcursors\nalpha = 0.5\nbeta = 3.0\ngamma = 1.57\nyf = []\nyg = []\nfor i in range(0,howMany+1):\n yf.append(f(xvector[i],alpha))\n yg.append(g(xvector[i],beta,gamma))\n\ndef plot2lines(list11,list21,list12,list22,strx,stry,strtitle): # plot list1 on x, list2 on y, xlabel, ylabel, title\n from matplotlib import pyplot as plt # import the plotting library from matplotlibplt.show()\n plt.plot( list11, list21, color ='green', marker ='s', linestyle ='dashdot' , label = \"Observed\" ) # create a line chart, years on x-axis, gdp on y-axis\n plt.plot( list12, list22, color ='red', marker ='o', linestyle ='solid' , label = \"Model\") # create a line chart, years on x-axis, gdp on y-axis\n plt.legend()\n plt.title(strtitle)# add a title\n plt.ylabel(stry)# add a label to the x and y-axes\n plt.xlabel(strx)\n mplcursors.cursor()\n plt.show() # display the plot\n return #null return\n\nplot2lines(xvector,yf,xvector,yg,'x-value','y-value','plot of f and g')", "_____no_output_____" ], [ "\n\n\n", "_____no_output_____" ], [ "# Using the plot as a guide, find the approximate values of x where the two curves intercept (i.e. f(x) = g(x))\n# You can either use interactive input, or direct specify x values, but need to show results\n# Using the plot as a guide, find the values of x where the two curves intercept (i.e. f(x) = g(x))\n\n#xguess = float(input('my guess for x')) # ~0.7, and 6.25\nalpha = 0.5\nbeta = 0.5\ngamma = 2.0\nxguess = 1\nresult = f(xguess,alpha) - g(xguess,beta,gamma)\nprint('f(x) - g(x) =', result,' at x = ', xguess)\nxguess = 2\nresult = f(xguess,alpha) - g(xguess,beta,gamma)\nprint('f(x) - g(x) =', result,' at x = ', xguess)", "f(x) - g(x) = -0.3523204174957726 at x = 1\nf(x) - g(x) = -1.3150625284443507 at x = 2\n" ] ], [ [ "<hr>\n\n## Bonus Problem 1. Extra Credit (You must complete the regular problems)!\n__create a class to compute the average grade (out of 10) of the students based on their grades in Quiz1, Quiz2, the Mid-term, Quiz3, and the Final exam.__\n\n| Student Name | Quiz 1 | Quiz 2 | Mid-term | Quiz 3 | Final Exam |\n| ------------- | -----------| -----------| -------------| -----------| -------------|\n| Harry | 8 | 9 | 8 | 10 | 9 |\n| Ron | 7 | 8 | 8 | 7 | 9 |\n| Hermione | 10 | 10 | 9 | 10 | 10 |\n| Draco | 8 | 7 | 9 | 8 | 9 |\n| Luna | 9 | 8 | 7 | 6 | 5 |\n\n1. __Use docstrings to describe the purpose of the class.__\n2. __Create an object for each car brand and display the output as shown below.__\n\n\"Student Name\": **Average Grade** \n\n3. __Create and print out a dictionary with the student names as keys and their average grades as data.__\n", "_____no_output_____" ] ], [ [ "#Code and run your solution here:\n\n#Suggested Solution:\n\nclass Hogwarts:\n \"\"\"This class calculates the average grade of the students\"\"\"\n def __init__(self, Name,Quiz1,Quiz2,MidTerm,Quiz3,Final):\n self.Name = Name\n self.Quiz1 = Quiz1\n self.Quiz2 = Quiz2\n self.MidTerm = MidTerm\n self.Quiz3 = Quiz3\n self.Final= Final\n \n def average(self):\n return (self.Quiz1 + self.Quiz2 + self.MidTerm + self.Quiz3 + self.Final) /5\n\n\nS1 = Hogwarts('Harry',8,9,8,10,9) #Fill the instances\nS2 = Hogwarts('Ron',7,8,8,7,9)\nS3 = Hogwarts('Hermione',10,10,9,10,10)\nS4 = Hogwarts('Draco',8,7,9,8,9)\nS5 = Hogwarts('Luna',9,8,7,6,5)\n\nprint(\"Harry\", S1.average())\nprint(\"Ron\", S2.average())\nprint(\"Hermione\", S3.average())\nprint(\"Draco\", S4.average())\nprint(\"Luna\", S5.average())\n\nGradeDict = {\"Harry\":S1.average(),\"Ron\":S2.average(),\"Hermione\":S3.average(),\"Draco\":S4.average(),\"Luna\":S5.average()}\nprint(GradeDict)", "Harry 8.8\nRon 7.8\nHermione 9.8\nDraco 8.2\nLuna 7.0\n{'Harry': 8.8, 'Ron': 7.8, 'Hermione': 9.8, 'Draco': 8.2, 'Luna': 7.0}\n" ] ], [ [ "<hr>\n\n## Bonus 2 Extra credit (You must complete the regular problems)!\n#### Write the VOLUME Function to compute the volume of Cylinders, Spheres, Cones, and Rectangular Boxes. This function should:\n- First, ask the user about __the shape of the object__ of interest using this statement:<br>\n**\"Please choose the shape of the object. Enter 1 for \"Cylinder\", 2 for \"Sphere\", 3 for \"Cone\", or 4 for \"Rectangular Box\"\"**<br>\n- Second, based on user's choice in the previous step, __ask for the right inputs__.\n- Third, print out an statement with __the input values and the calculated volumes__.\n\n#### Include error trapping that:\n\n1. Issues a message that **\"The object should be either a Cylinder, a Sphere, a Cone, or a Rectangular Box. Please Enter A Number from 1,2,3, and 4!\"** if the first input is non-numeric.\n2. Takes any numeric input for the initial selection , and force it into an integer.\n4. Issues an appropriate message if the user's selection is numeric but outside the range of [1,4]\n3. Takes any numeric input for the shape characteristics , and force it into a float.\n4. Issues an appropriate message if the object characteristics are as non-numerics.\n\n#### Test the script for:\n1. __Sphere, r=10__\n2. __r=10 , Sphere__\n3. __Rectangular Box, w=5, h=10, l=0.5__\n\n\n- <font color=orange>__Volume of a Cylinder = πr²h__</font>\n- <font color=orange>__Volume of a Sphere = 4(πr3)/3__</font>\n- <font color=orange>__Volume of a Cone = (πr²h)/3__</font>\n- <font color=orange>__Volume of a Rectangular Box = whl__</font>", "_____no_output_____" ] ], [ [ "#Code and Run your solution here\n\n#Suggested Solution:\n\nimport numpy as np # import NumPy: for large, multi-dimensional arrays and matrices, along with high-level mathematical functions to operate on these arrays.\npi = np.pi #pi value from the np package\n\ndef VOLUME():\n try:\n UI = input('Please choose the shape of the object. Enter 1 for \"Cylinder\", 2 for \"Sphere\", 3 for \"Cone\", or 4 for \"Rectangular Box\"')\n UI =int(UI)\n if UI==1:\n try:\n UI2 = input('Please enter the radius of the Cylinder')\n r= float(UI2)\n UI3 = input('Please enter the height of the Cylinder')\n h= float(UI3)\n V= pi*h*r**2\n print(\"The volume of the Cylinder with the radius of \",r,\" and the height of \",h,\" is equal to\", V)\n except:\n print(\"The radius and height of the Cylinder must be numerics. Please Try Again!\")\n elif UI==2:\n try:\n UI2 = input('Please enter the radius of the Sphere')\n r= float(UI2)\n V= (4*pi*r**3)/3\n print(\"The volume of the Sphere with the radius of \",r,\" is equal to\", V)\n except:\n print(\"The radius of the Sphere must be numeric. Please Try Again!\")\n elif UI==3:\n try:\n UI2 = input('Please enter the radius of the Cone')\n r= float(UI2)\n UI3 = input('Please enter the height of the Cone')\n h= float(UI3)\n V= (pi*h*r**2)/3\n print(\"The volume of the Cone with the radius of \",r,\" and the height of \",h,\" is equal to\", V)\n except:\n print(\"The radius and height of the Cone must be numerics. Please Try Again!\")\n elif UI==4:\n try:\n UI2 = input('Please enter the width of the Rectangular Box')\n w= float(UI2)\n UI3 = input('Please enter the height of the Rectangular Box')\n h= float(UI3)\n UI4 = input('Please enter the length of the Rectangular Box')\n l= float(UI4)\n V= w*h*l\n print(\"The volume of the Rectangular Box with the width of \",w,\" and the height of \",h,\" and the length of \",l,\" is equal to\", V)\n except:\n print(\"The width, height, and length of the Rectangular Box must be numerics. Please Try Again!\")\n else:\n print(\"Please Enter A Number from 1,2,3, and 4!\")\n\n except:\n print(\"The object should be either a Cylinder, a Sphere, a Cone, or a Rectangular Box. Please Enter A Number from 1,2,3, and 4!\")\n ", "_____no_output_____" ], [ "VOLUME()", "Please choose the shape of the object. Enter 1 for \"Cylinder\", 2 for \"Sphere\", 3 for \"Cone\", or 4 for \"Rectangular Box\" 1\nPlease enter the radius of the Cylinder 1\nPlease enter the height of the Cylinder 1\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d06a2d0046b95c1118a12c3201a3cff248da2c2a
11,198
ipynb
Jupyter Notebook
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
3e440d88111627827456aa8672516eb389a68e98
[ "MIT" ]
2
2021-12-11T07:19:34.000Z
2022-03-11T09:29:49.000Z
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
3e440d88111627827456aa8672516eb389a68e98
[ "MIT" ]
null
null
null
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
3e440d88111627827456aa8672516eb389a68e98
[ "MIT" ]
null
null
null
22.531187
152
0.489998
[ [ [ "# 转置卷积\n:label:`sec_transposed_conv`\n\n到目前为止,我们所见到的卷积神经网络层,例如卷积层( :numref:`sec_conv_layer`)和汇聚层( :numref:`sec_pooling`),通常会减少下采样输入图像的空间维度(高和宽)。\n然而如果输入和输出图像的空间维度相同,在以像素级分类的语义分割中将会很方便。\n例如,输出像素所处的通道维可以保有输入像素在同一位置上的分类结果。\n\n为了实现这一点,尤其是在空间维度被卷积神经网络层缩小后,我们可以使用另一种类型的卷积神经网络层,它可以增加上采样中间层特征图的空间维度。\n在本节中,我们将介绍\n*转置卷积*(transposed convolution) :cite:`Dumoulin.Visin.2016`,\n用于逆转下采样导致的空间尺寸减小。\n", "_____no_output_____" ] ], [ [ "from mxnet import init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\n\nnpx.set_np()", "_____no_output_____" ] ], [ [ "## 基本操作\n\n让我们暂时忽略通道,从基本的转置卷积开始,设步幅为1且没有填充。\n假设我们有一个$n_h \\times n_w$的输入张量和一个$k_h \\times k_w$的卷积核。\n以步幅为1滑动卷积核窗口,每行$n_w$次,每列$n_h$次,共产生$n_h n_w$个中间结果。\n每个中间结果都是一个$(n_h + k_h - 1) \\times (n_w + k_w - 1)$的张量,初始化为0。\n为了计算每个中间张量,输入张量中的每个元素都要乘以卷积核,从而使所得的$k_h \\times k_w$张量替换中间张量的一部分。\n请注意,每个中间张量被替换部分的位置与输入张量中元素的位置相对应。\n最后,所有中间结果相加以获得最终结果。\n\n例如, :numref:`fig_trans_conv`解释了如何为$2\\times 2$的输入张量计算卷积核为$2\\times 2$的转置卷积。\n\n![卷积核为 $2\\times 2$ 的转置卷积。阴影部分是中间张量的一部分,也是用于计算的输入和卷积核张量元素。 ](../img/trans_conv.svg)\n:label:`fig_trans_conv`\n\n我们可以对输入矩阵`X`和卷积核矩阵`K`(**实现基本的转置卷积运算**)`trans_conv`。\n", "_____no_output_____" ] ], [ [ "def trans_conv(X, K):\n h, w = K.shape\n Y = np.zeros((X.shape[0] + h - 1, X.shape[1] + w - 1))\n for i in range(X.shape[0]):\n for j in range(X.shape[1]):\n Y[i: i + h, j: j + w] += X[i, j] * K\n return Y", "_____no_output_____" ] ], [ [ "与通过卷积核“减少”输入元素的常规卷积(在 :numref:`sec_conv_layer`中)相比,转置卷积通过卷积核“广播”输入元素,从而产生大于输入的输出。\n我们可以通过 :numref:`fig_trans_conv`来构建输入张量`X`和卷积核张量`K`从而[**验证上述实现输出**]。\n此实现是基本的二维转置卷积运算。\n", "_____no_output_____" ] ], [ [ "X = np.array([[0.0, 1.0], [2.0, 3.0]])\nK = np.array([[0.0, 1.0], [2.0, 3.0]])\ntrans_conv(X, K)", "_____no_output_____" ] ], [ [ "或者,当输入`X`和卷积核`K`都是四维张量时,我们可以[**使用高级API获得相同的结果**]。\n", "_____no_output_____" ] ], [ [ "X, K = X.reshape(1, 1, 2, 2), K.reshape(1, 1, 2, 2)\ntconv = nn.Conv2DTranspose(1, kernel_size=2)\ntconv.initialize(init.Constant(K))\ntconv(X)", "_____no_output_____" ] ], [ [ "## [**填充、步幅和多通道**]\n\n与常规卷积不同,在转置卷积中,填充被应用于的输出(常规卷积将填充应用于输入)。\n例如,当将高和宽两侧的填充数指定为1时,转置卷积的输出中将删除第一和最后的行与列。\n", "_____no_output_____" ] ], [ [ "tconv = nn.Conv2DTranspose(1, kernel_size=2, padding=1)\ntconv.initialize(init.Constant(K))\ntconv(X)", "_____no_output_____" ] ], [ [ "在转置卷积中,步幅被指定为中间结果(输出),而不是输入。\n使用 :numref:`fig_trans_conv`中相同输入和卷积核张量,将步幅从1更改为2会增加中间张量的高和权重,因此输出张量在 :numref:`fig_trans_conv_stride2`中。\n\n![卷积核为$2\\times 2$,步幅为2的转置卷积。阴影部分是中间张量的一部分,也是用于计算的输入和卷积核张量元素。](../img/trans_conv_stride2.svg)\n:label:`fig_trans_conv_stride2`\n\n以下代码可以验证 :numref:`fig_trans_conv_stride2`中步幅为2的转置卷积的输出。\n", "_____no_output_____" ] ], [ [ "tconv = nn.Conv2DTranspose(1, kernel_size=2, strides=2)\ntconv.initialize(init.Constant(K))\ntconv(X)", "_____no_output_____" ] ], [ [ "对于多个输入和输出通道,转置卷积与常规卷积以相同方式运作。\n假设输入有$c_i$个通道,且转置卷积为每个输入通道分配了一个$k_h\\times k_w$的卷积核张量。\n当指定多个输出通道时,每个输出通道将有一个$c_i\\times k_h\\times k_w$的卷积核。\n\n同样,如果我们将$\\mathsf{X}$代入卷积层$f$来输出$\\mathsf{Y}=f(\\mathsf{X})$,并创建一个与$f$具有相同的超参数、但输出通道数量是$\\mathsf{X}$中通道数的转置卷积层$g$,那么$g(Y)$的形状将与$\\mathsf{X}$相同。\n下面的示例可以解释这一点。\n", "_____no_output_____" ] ], [ [ "X = np.random.uniform(size=(1, 10, 16, 16))\nconv = nn.Conv2D(20, kernel_size=5, padding=2, strides=3)\ntconv = nn.Conv2DTranspose(10, kernel_size=5, padding=2, strides=3)\nconv.initialize()\ntconv.initialize()\ntconv(conv(X)).shape == X.shape", "_____no_output_____" ] ], [ [ "## [**与矩阵变换的联系**]\n:label:`subsec-connection-to-mat-transposition`\n\n转置卷积为何以矩阵变换命名呢?\n让我们首先看看如何使用矩阵乘法来实现卷积。\n在下面的示例中,我们定义了一个$3\\times 3$的输入`X`和$2\\times 2$卷积核`K`,然后使用`corr2d`函数计算卷积输出`Y`。\n", "_____no_output_____" ] ], [ [ "X = np.arange(9.0).reshape(3, 3)\nK = np.array([[1.0, 2.0], [3.0, 4.0]])\nY = d2l.corr2d(X, K)\nY", "_____no_output_____" ] ], [ [ "接下来,我们将卷积核`K`重写为包含大量0的稀疏权重矩阵`W`。\n权重矩阵的形状是($4$,$9$),其中非0元素来自卷积核`K`。\n", "_____no_output_____" ] ], [ [ "def kernel2matrix(K):\n k, W = np.zeros(5), np.zeros((4, 9))\n k[:2], k[3:5] = K[0, :], K[1, :]\n W[0, :5], W[1, 1:6], W[2, 3:8], W[3, 4:] = k, k, k, k\n return W\n\nW = kernel2matrix(K)\nW", "_____no_output_____" ] ], [ [ "逐行连结输入`X`,获得了一个长度为9的矢量。\n然后,`W`的矩阵乘法和向量化的`X`给出了一个长度为4的向量。\n重塑它之后,可以获得与上面的原始卷积操作所得相同的结果`Y`:我们刚刚使用矩阵乘法实现了卷积。\n", "_____no_output_____" ] ], [ [ "Y == np.dot(W, X.reshape(-1)).reshape(2, 2)", "_____no_output_____" ] ], [ [ "同样,我们可以使用矩阵乘法来实现转置卷积。\n在下面的示例中,我们将上面的常规卷积$2 \\times 2$的输出`Y`作为转置卷积的输入。\n想要通过矩阵相乘来实现它,我们只需要将权重矩阵`W`的形状转置为$(9, 4)$。\n", "_____no_output_____" ] ], [ [ "Z = trans_conv(Y, K)\nZ == np.dot(W.T, Y.reshape(-1)).reshape(3, 3)", "_____no_output_____" ] ], [ [ "抽象来看,给定输入向量$\\mathbf{x}$和权重矩阵$\\mathbf{W}$,卷积的前向传播函数可以通过将其输入与权重矩阵相乘并输出向量$\\mathbf{y}=\\mathbf{W}\\mathbf{x}$来实现。\n由于反向传播遵循链式法则和$\\nabla_{\\mathbf{x}}\\mathbf{y}=\\mathbf{W}^\\top$,卷积的反向传播函数可以通过将其输入与转置的权重矩阵$\\mathbf{W}^\\top$相乘来实现。\n因此,转置卷积层能够交换卷积层的正向传播函数和反向传播函数:它的正向传播和反向传播函数将输入向量分别与$\\mathbf{W}^\\top$和$\\mathbf{W}$相乘。\n\n## 小结\n\n* 与通过卷积核减少输入元素的常规卷积相反,转置卷积通过卷积核广播输入元素,从而产生形状大于输入的输出。\n* 如果我们将$\\mathsf{X}$输入卷积层$f$来获得输出$\\mathsf{Y}=f(\\mathsf{X})$并创造一个与$f$有相同的超参数、但输出通道数是$\\mathsf{X}$中通道数的转置卷积层$g$,那么$g(Y)$的形状将与$\\mathsf{X}$相同。\n* 我们可以使用矩阵乘法来实现卷积。转置卷积层能够交换卷积层的正向传播函数和反向传播函数。\n\n## 练习\n\n1. 在 :numref:`subsec-connection-to-mat-transposition`中,卷积输入`X`和转置的卷积输出`Z`具有相同的形状。他们的数值也相同吗?为什么?\n1. 使用矩阵乘法来实现卷积是否有效率?为什么?\n", "_____no_output_____" ], [ "[Discussions](https://discuss.d2l.ai/t/3301)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d06a3aa157b3ca9bca2c78566482a4f4d694dbd4
294
ipynb
Jupyter Notebook
notebooks/book1/10/iris_logreg.ipynb
karm-patel/pyprobml
af8230a0bc0d01bb0f779582d87e5856d25e6211
[ "MIT" ]
null
null
null
notebooks/book1/10/iris_logreg.ipynb
karm-patel/pyprobml
af8230a0bc0d01bb0f779582d87e5856d25e6211
[ "MIT" ]
null
null
null
notebooks/book1/10/iris_logreg.ipynb
karm-patel/pyprobml
af8230a0bc0d01bb0f779582d87e5856d25e6211
[ "MIT" ]
null
null
null
18.375
123
0.591837
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d06a4232aed451abdf70855132f238d9170a445e
854
ipynb
Jupyter Notebook
Untitled1.ipynb
goldang01/python_study
b8be539f64935182ea07d29067d0dbef54cc14ab
[ "MIT" ]
1
2021-03-11T12:11:19.000Z
2021-03-11T12:11:19.000Z
Untitled1.ipynb
goldang01/python_study
b8be539f64935182ea07d29067d0dbef54cc14ab
[ "MIT" ]
null
null
null
Untitled1.ipynb
goldang01/python_study
b8be539f64935182ea07d29067d0dbef54cc14ab
[ "MIT" ]
1
2021-03-08T10:07:14.000Z
2021-03-08T10:07:14.000Z
16.423077
34
0.490632
[ [ [ "print(\"hello\")", "hello\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
d06a4a0abc21b5b690e865fa361e4ffc6738738e
237,484
ipynb
Jupyter Notebook
Prediction.ipynb
Kunal614/Pocket-Medical-CNN-Model
125258a5f01b640c7551f39152f76b970591a958
[ "Apache-2.0" ]
null
null
null
Prediction.ipynb
Kunal614/Pocket-Medical-CNN-Model
125258a5f01b640c7551f39152f76b970591a958
[ "Apache-2.0" ]
null
null
null
Prediction.ipynb
Kunal614/Pocket-Medical-CNN-Model
125258a5f01b640c7551f39152f76b970591a958
[ "Apache-2.0" ]
null
null
null
821.743945
132,052
0.950679
[ [ [ "from tensorflow.keras import models\nfrom keras.preprocessing.image import img_to_array , load_img , ImageDataGenerator\nimport matplotlib.pyplot as plt", "Using TensorFlow backend.\n" ], [ "model = models.load_model('wound_aug.h5')", "WARNING:tensorflow:From /home/iiitk/miniconda3/lib/python3.7/site-packages/tensorflow_core/python/ops/init_ops.py:97: calling GlorotUniform.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\nInstructions for updating:\nCall initializer instance with the dtype argument instead of passing it to the constructor\nWARNING:tensorflow:From /home/iiitk/miniconda3/lib/python3.7/site-packages/tensorflow_core/python/ops/init_ops.py:97: calling Zeros.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\nInstructions for updating:\nCall initializer instance with the dtype argument instead of passing it to the constructor\nWARNING:tensorflow:From /home/iiitk/miniconda3/lib/python3.7/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\nInstructions for updating:\nIf using Keras pass *_constraint arguments to layers.\nWARNING:tensorflow:From /home/iiitk/miniconda3/lib/python3.7/site-packages/tensorflow_core/python/ops/nn_impl.py:183: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\n" ], [ "ls", " \u001b[0m\u001b[01;35mcut.jpg\u001b[0m \u001b[01;34mnon-severe\u001b[0m/\r\n \u001b[01;34mdata\u001b[0m/ Prediction.ipynb\r\n'Deep Learning with tensorflow and keras.ipynb' \u001b[01;34msevere\u001b[0m/\r\n Image_aug.ipynb Wound_agumented_model.ipynb\r\n \u001b[01;35mmax_cut.jpg\u001b[0m wound_aug.h5\r\n \u001b[01;34mModel_Script\u001b[0m/\r\n" ], [ "model.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 148, 148, 32) 896 \n_________________________________________________________________\nactivation (Activation) (None, 148, 148, 32) 0 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 74, 74, 32) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 74, 74, 32) 9248 \n_________________________________________________________________\nactivation_1 (Activation) (None, 74, 74, 32) 0 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 37, 37, 32) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 37, 37, 64) 18496 \n_________________________________________________________________\nactivation_2 (Activation) (None, 37, 37, 64) 0 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 19, 19, 64) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 23104) 0 \n_________________________________________________________________\ndense (Dense) (None, 64) 1478720 \n_________________________________________________________________\nactivation_3 (Activation) (None, 64) 0 \n_________________________________________________________________\ndropout (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 1) 65 \n_________________________________________________________________\nactivation_4 (Activation) (None, 1) 0 \n=================================================================\nTotal params: 1,507,425\nTrainable params: 1,507,425\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "from PIL import Image\nimport numpy as np\nfrom skimage import transform\nnp_image = Image.open('cut.jpg')\nplt.imshow(np_image)\nnp_image = np.array(np_image).astype('float32')/255\nnp_image = transform.resize(np_image, (150, 150, 3))\nnp_image = np.expand_dims(np_image, axis=0)\nnp_image.shape\n", "_____no_output_____" ], [ "p = model.predict(np_image) #less than 0.5 so Non-Severe\np", "_____no_output_____" ], [ "np_image = Image.open('max_cut.jpg')\nplt.imshow(np_image)\nnp_image = np.array(np_image).astype('float32')/255\nnp_image = transform.resize(np_image, (150, 150, 3))\nnp_image = np.expand_dims(np_image, axis=0)\nnp_image.shape", "_____no_output_____" ], [ "p = model.predict(np_image) #More than 0.5 Severe\np", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06a5377066f4b492eaa467ee3461eb77d95992a
169,654
ipynb
Jupyter Notebook
Hwk01.ipynb
ucdavis-are254/Hwk01
f70ba5b02ff26b793e01849361623aa9abc5df9b
[ "MIT" ]
null
null
null
Hwk01.ipynb
ucdavis-are254/Hwk01
f70ba5b02ff26b793e01849361623aa9abc5df9b
[ "MIT" ]
2
2019-09-28T01:01:39.000Z
2019-09-30T00:49:50.000Z
Hwk01.ipynb
ucdavis-are254/Hwk01
f70ba5b02ff26b793e01849361623aa9abc5df9b
[ "MIT" ]
11
2019-09-25T17:08:52.000Z
2019-09-30T06:26:37.000Z
78.145555
339
0.785086
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d06a601dd1e248dcd1d0cb51781d43c2b995a8cd
2,504
ipynb
Jupyter Notebook
Day_039_HW.ipynb
semishen/ML100Days
423ee8fc4beeae43694a33143b9a94bf5e15fd92
[ "MIT" ]
null
null
null
Day_039_HW.ipynb
semishen/ML100Days
423ee8fc4beeae43694a33143b9a94bf5e15fd92
[ "MIT" ]
null
null
null
Day_039_HW.ipynb
semishen/ML100Days
423ee8fc4beeae43694a33143b9a94bf5e15fd92
[ "MIT" ]
null
null
null
26.083333
227
0.497204
[ [ [ "<a href=\"https://colab.research.google.com/github/semishen/ML100Days/blob/master/Day_039_HW.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "## [作業重點]\n清楚了解 L1, L2 的意義與差異為何,並了解 LASSO 與 Ridge 之間的差異與使用情境", "_____no_output_____" ], [ "## 作業", "_____no_output_____" ], [ "請閱讀相關文獻,並回答下列問題\n\n[脊回歸 (Ridge Regression)](https://blog.csdn.net/daunxx/article/details/51578787)\n[Linear, Ridge, Lasso Regression 本質區別](https://www.zhihu.com/question/38121173)\n\n### Q1: LASSO 回歸可以被用來作為 Feature selection 的工具,請了解 LASSO 模型為什麼可用來作 Feature selection\n### A1: LASSO 基於 L1 正則,隨著 alpha 的提升數值為0的參數數量隨之提升,而參數為0的特徵可以視為對模型沒有影響力,理當剔除,因此 LASSO 具有 Feature Selection 的功能。\n\n<br/>\n\n\n### Q2: 當自變數 (X) 存在高度共線性時,Ridge Regression 可以處理這樣的問題嗎?\n### A2: 可以。當自變數存在高度共線性時,模型對 noise 的敏感度會提高,Ridge Regression 基於 L2 正則,可以減緩參數的劇烈變動,降低 noise 的影響性。", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ] ]
d06a644a11be596ae94f3be98d1eca7679a74bc5
18,368
ipynb
Jupyter Notebook
Untitled.ipynb
Gabs0102/Mi-primer-Proyecto
6261564ea42d9137bff93a66e39633068cf1dde9
[ "MIT" ]
null
null
null
Untitled.ipynb
Gabs0102/Mi-primer-Proyecto
6261564ea42d9137bff93a66e39633068cf1dde9
[ "MIT" ]
null
null
null
Untitled.ipynb
Gabs0102/Mi-primer-Proyecto
6261564ea42d9137bff93a66e39633068cf1dde9
[ "MIT" ]
null
null
null
75.588477
13,552
0.838251
[ [ [ "### PRESENTACIÓN", "_____no_output_____" ], [ "**nombre:** Gabriela Ivonne Montoya Ortiz \n- **Profesión**: <font color = pink> **Estudiante** </font>\n- **Edad**: 19 años \n- **Pasa tiempos**: bailar diferentes estilos, leer, ver peliculas. \n- **Educación**: colegio salesiano anahuac revolucion ", "_____no_output_____" ], [ "Foto:\n <img src=\"https://mujerpandora.com/media/thumbs/uploads/articles/images/eres-bailarina-3-tips-para-red-jpg_800x0-jpg_626x0.jpg\" width=\"150px\" height=\"50px\" />", "_____no_output_____" ], [ "## Hola", "_____no_output_____" ], [ "Ecuaciones...", "_____no_output_____" ], [ "$f(x) = \\sin(x)$", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline", "_____no_output_____" ], [ "x = np.linspace(-2*np.pi, 2*np.pi, 100)\nplt.figure(figsize = (4,3))\nplt.plot(x, np.sin(x));", "_____no_output_____" ] ], [ [ "$$\\frac{df}{dx} = -\\nabla\\psi $$", "_____no_output_____" ], [ "** Minichatbot, Gaby **", "_____no_output_____" ] ], [ [ "q1 = \"¿Cómo te llamas?\"\nq2 = \"¿Qué edad tienes?\"\nq3 = \"¿Donde vienes?\"\nq4 = \"¿Sexo?\"", "_____no_output_____" ], [ "qs = [q1, q2, q3, q4]\nqs", "_____no_output_____" ], [ "ans1 = \"Mucho gusto, me llamo Gaby. \"\nans2 = \"Legal!\"\nans3 = \"Orale, muy bien.\"\nans4 = \"NO.\"", "_____no_output_____" ], [ "anss = [ans1, ans2, ans3, ans4]\nanss", "_____no_output_____" ], [ "def chatGaby():\n print(\"Hola, bienvenite!, tengo algunas preguntas para ti.\")\n print(qs[0])\n nombre = input(\">>> \")\n print(anss[0] + \"Me gusta tu nombre, %s\" % nombre)\n print(\"segunda pregunta\")\n print(qs[1])\n edad = input(\">>> \")\n print(\"ohh, ya vas a cumplir %s\" % (int(edad+1)))\n print(anss[1])\n print(\"Je, y \" + qs[2])\n print(anss[2])\n print(qs[3])\n print(anss[3])\n ", "_____no_output_____" ], [ "chatGaby()", "Hola, bienvenite!, tengo algunas preguntas para ti.\n¿Cómo te llamas?\n" ], [ "%run welcome.py", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
d06a71f0afcede64fad7bebd80a5d31d2454df6b
239,673
ipynb
Jupyter Notebook
time-series-prediction.ipynb
srivarshan-s/LSTM-Trials
eaabe4bf9dae64a96a3f8a90db9ad9834ab122b1
[ "MIT" ]
null
null
null
time-series-prediction.ipynb
srivarshan-s/LSTM-Trials
eaabe4bf9dae64a96a3f8a90db9ad9834ab122b1
[ "MIT" ]
null
null
null
time-series-prediction.ipynb
srivarshan-s/LSTM-Trials
eaabe4bf9dae64a96a3f8a90db9ad9834ab122b1
[ "MIT" ]
null
null
null
154.528046
31,600
0.882281
[ [ [ "# **LSTM - Time Series Prediction**", "_____no_output_____" ], [ "## **Importing libraries**", "_____no_output_____" ] ], [ [ "import pandas\nimport matplotlib.pyplot as plt\nimport numpy\nimport math\nfrom tqdm import tqdm\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import LSTM\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error", "_____no_output_____" ], [ "# fix random seed for reproducibility\nnumpy.random.seed(7)", "_____no_output_____" ] ], [ [ "## **Load the data**", "_____no_output_____" ] ], [ [ "! rm /content/airline-passengers.csv\n! wget https://raw.githubusercontent.com/jbrownlee/Datasets/master/airline-passengers.csv", "_____no_output_____" ], [ "dataset = pandas.read_csv('airline-passengers.csv', usecols=[1], engine='python')", "_____no_output_____" ], [ "plt.plot(dataset)\nplt.show()", "_____no_output_____" ], [ "# convert an array of values into a dataset matrix\ndef create_dataset(dataset, look_back=1):\n\tdataX, dataY = [], []\n\tfor i in range(len(dataset)-look_back-1):\n\t\ta = dataset[i:(i+look_back), 0]\n\t\tdataX.append(a)\n\t\tdataY.append(dataset[i + look_back, 0])\n\treturn numpy.array(dataX), numpy.array(dataY)", "_____no_output_____" ] ], [ [ "## **LSTM Network for Regression**", "_____no_output_____" ] ], [ [ "# load the dataset\ndataframe = pandas.read_csv('airline-passengers.csv', usecols=[1], engine='python')\ndataset = dataframe.values\ndataset = dataset.astype('float32')", "_____no_output_____" ], [ "# normalize the dataset\nscaler = MinMaxScaler(feature_range=(0, 1))\ndataset = scaler.fit_transform(dataset)", "_____no_output_____" ], [ "# split into train and test sets\ntrain_size = int(len(dataset) * 0.67)\ntest_size = len(dataset) - train_size\ntrain, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]\nprint(len(train), len(test))", "96 48\n" ], [ "# reshape into X=t and Y=t+1\nlook_back = 1\ntrainX, trainY = create_dataset(train, look_back)\ntestX, testY = create_dataset(test, look_back)", "_____no_output_____" ], [ "# reshape input to be [samples, time steps, features]\ntrainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\ntestX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))", "_____no_output_____" ], [ "# create and fit the LSTM network\nmodel = Sequential()\nmodel.add(LSTM(4, input_shape=(1, look_back)))\nmodel.add(Dense(1))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nmodel.fit(trainX, trainY, epochs=100, batch_size=1, verbose=0)", "_____no_output_____" ], [ "# make predictions\ntrainPredict = model.predict(trainX)\ntestPredict = model.predict(testX)", "_____no_output_____" ], [ "# invert predictions\ntrainPredict = scaler.inverse_transform(trainPredict)\ntrainY = scaler.inverse_transform([trainY])\ntestPredict = scaler.inverse_transform(testPredict)\ntestY = scaler.inverse_transform([testY])", "_____no_output_____" ], [ "# calculate root mean squared error\ntrainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))\nprint('Train Score: %.2f RMSE' % (trainScore))\ntestScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))\nprint('Test Score: %.2f RMSE' % (testScore))", "Train Score: 23.04 RMSE\nTest Score: 47.18 RMSE\n" ], [ "# shift train predictions for plotting\ntrainPredictPlot = numpy.empty_like(dataset)\ntrainPredictPlot[:, :] = numpy.nan\ntrainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict", "_____no_output_____" ], [ "# shift test predictions for plotting\ntestPredictPlot = numpy.empty_like(dataset)\ntestPredictPlot[:, :] = numpy.nan\ntestPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict", "_____no_output_____" ], [ "# plot baseline and predictions\nplt.plot(scaler.inverse_transform(dataset))\nplt.plot(trainPredictPlot)\nplt.plot(testPredictPlot)\nplt.show()", "_____no_output_____" ] ], [ [ "## **LSTM for Regression Using the Window Method**", "_____no_output_____" ] ], [ [ "# load the dataset\ndataframe = pandas.read_csv('airline-passengers.csv', usecols=[1], engine='python')\ndataset = dataframe.values\ndataset = dataset.astype('float32')", "_____no_output_____" ], [ "# normalize the dataset\nscaler = MinMaxScaler(feature_range=(0, 1))\ndataset = scaler.fit_transform(dataset)", "_____no_output_____" ], [ "# split into train and test sets\ntrain_size = int(len(dataset) * 0.67)\ntest_size = len(dataset) - train_size\ntrain, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]", "_____no_output_____" ], [ "# reshape into X=t and Y=t+1\nlook_back = 3\ntrainX, trainY = create_dataset(train, look_back)\ntestX, testY = create_dataset(test, look_back)", "_____no_output_____" ], [ "# reshape input to be [samples, time steps, features]\ntrainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\ntestX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))", "_____no_output_____" ], [ "# create and fit the LSTM network\nmodel = Sequential()\nmodel.add(LSTM(4, input_shape=(1, look_back)))\nmodel.add(Dense(1))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nmodel.fit(trainX, trainY, epochs=100, batch_size=1, verbose=0)", "_____no_output_____" ], [ "# make predictions\ntrainPredict = model.predict(trainX)\ntestPredict = model.predict(testX)", "_____no_output_____" ], [ "# invert predictions\ntrainPredict = scaler.inverse_transform(trainPredict)\ntrainY = scaler.inverse_transform([trainY])\ntestPredict = scaler.inverse_transform(testPredict)\ntestY = scaler.inverse_transform([testY])", "_____no_output_____" ], [ "# calculate root mean squared error\ntrainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))\nprint('Train Score: %.2f RMSE' % (trainScore))\ntestScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))\nprint('Test Score: %.2f RMSE' % (testScore))", "Train Score: 21.34 RMSE\nTest Score: 57.22 RMSE\n" ], [ "# shift train predictions for plotting\ntrainPredictPlot = numpy.empty_like(dataset)\ntrainPredictPlot[:, :] = numpy.nan\ntrainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict", "_____no_output_____" ], [ "# shift test predictions for plotting\ntestPredictPlot = numpy.empty_like(dataset)\ntestPredictPlot[:, :] = numpy.nan\ntestPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict", "_____no_output_____" ], [ "# plot baseline and predictions\nplt.plot(scaler.inverse_transform(dataset))\nplt.plot(trainPredictPlot)\nplt.plot(testPredictPlot)\nplt.show()", "_____no_output_____" ] ], [ [ "## **LSTM for Regression with Time Steps**", "_____no_output_____" ] ], [ [ "# load the dataset\ndataframe = pandas.read_csv('airline-passengers.csv', usecols=[1], engine='python')\ndataset = dataframe.values\ndataset = dataset.astype('float32')", "_____no_output_____" ], [ "# normalize the dataset\nscaler = MinMaxScaler(feature_range=(0, 1))\ndataset = scaler.fit_transform(dataset)", "_____no_output_____" ], [ "# split into train and test sets\ntrain_size = int(len(dataset) * 0.67)\ntest_size = len(dataset) - train_size\ntrain, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]", "_____no_output_____" ], [ "# reshape into X=t and Y=t+1\nlook_back = 3\ntrainX, trainY = create_dataset(train, look_back)\ntestX, testY = create_dataset(test, look_back)", "_____no_output_____" ], [ "# reshape input to be [samples, time steps, features]\ntrainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\ntestX = numpy.reshape(testX, (testX.shape[0], testX.shape[1], 1))", "_____no_output_____" ], [ "# create and fit the LSTM network\nmodel = Sequential()\nmodel.add(LSTM(4, input_shape=(look_back, 1)))\nmodel.add(Dense(1))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nmodel.fit(trainX, trainY, epochs=100, batch_size=1, verbose=0)", "_____no_output_____" ], [ "# make predictions\ntrainPredict = model.predict(trainX)\ntestPredict = model.predict(testX)", "_____no_output_____" ], [ "# invert predictions\ntrainPredict = scaler.inverse_transform(trainPredict)\ntrainY = scaler.inverse_transform([trainY])\ntestPredict = scaler.inverse_transform(testPredict)\ntestY = scaler.inverse_transform([testY])", "_____no_output_____" ], [ "# calculate root mean squared error\ntrainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))\nprint('Train Score: %.2f RMSE' % (trainScore))\ntestScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))\nprint('Test Score: %.2f RMSE' % (testScore))", "Train Score: 25.03 RMSE\nTest Score: 63.80 RMSE\n" ], [ "# shift train predictions for plotting\ntrainPredictPlot = numpy.empty_like(dataset)\ntrainPredictPlot[:, :] = numpy.nan\ntrainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict", "_____no_output_____" ], [ "# shift test predictions for plotting\ntestPredictPlot = numpy.empty_like(dataset)\ntestPredictPlot[:, :] = numpy.nan\ntestPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict", "_____no_output_____" ], [ "# plot baseline and predictions\nplt.plot(scaler.inverse_transform(dataset))\nplt.plot(trainPredictPlot)\nplt.plot(testPredictPlot)\nplt.show()", "_____no_output_____" ] ], [ [ "## **LSTM with Memory Between Batches**", "_____no_output_____" ] ], [ [ "# load the dataset\ndataframe = pandas.read_csv('airline-passengers.csv', usecols=[1], engine='python')\ndataset = dataframe.values\ndataset = dataset.astype('float32')", "_____no_output_____" ], [ "# normalize the dataset\nscaler = MinMaxScaler(feature_range=(0, 1))\ndataset = scaler.fit_transform(dataset)", "_____no_output_____" ], [ "# split into train and test sets\ntrain_size = int(len(dataset) * 0.67)\ntest_size = len(dataset) - train_size\ntrain, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]", "_____no_output_____" ], [ "# reshape into X=t and Y=t+1\nlook_back = 3\ntrainX, trainY = create_dataset(train, look_back)\ntestX, testY = create_dataset(test, look_back)", "_____no_output_____" ], [ "# reshape input to be [samples, time steps, features]\ntrainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\ntestX = numpy.reshape(testX, (testX.shape[0], testX.shape[1], 1))", "_____no_output_____" ], [ "# create and fit the LSTM network\nbatch_size = 1\nmodel = Sequential()\nmodel.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True))\nmodel.add(Dense(1))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nfor i in range(100):\n\tmodel.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)\n\tmodel.reset_states()", "_____no_output_____" ], [ "# make predictions\ntrainPredict = model.predict(trainX, batch_size=batch_size)\nmodel.reset_states()\ntestPredict = model.predict(testX, batch_size=batch_size)", "_____no_output_____" ], [ "# invert predictions\ntrainPredict = scaler.inverse_transform(trainPredict)\ntrainY = scaler.inverse_transform([trainY])\ntestPredict = scaler.inverse_transform(testPredict)\ntestY = scaler.inverse_transform([testY])", "_____no_output_____" ], [ "# calculate root mean squared error\ntrainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))\nprint('Train Score: %.2f RMSE' % (trainScore))\ntestScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))\nprint('Test Score: %.2f RMSE' % (testScore))", "Train Score: 22.35 RMSE\nTest Score: 83.01 RMSE\n" ], [ "# shift train predictions for plotting\ntrainPredictPlot = numpy.empty_like(dataset)\ntrainPredictPlot[:, :] = numpy.nan\ntrainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict", "_____no_output_____" ], [ "# shift test predictions for plotting\ntestPredictPlot = numpy.empty_like(dataset)\ntestPredictPlot[:, :] = numpy.nan\ntestPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict", "_____no_output_____" ], [ "# plot baseline and predictions\nplt.plot(scaler.inverse_transform(dataset))\nplt.plot(trainPredictPlot)\nplt.plot(testPredictPlot)\nplt.show()", "_____no_output_____" ] ], [ [ "## **Stacked LSTMs with Memory Between Batches**", "_____no_output_____" ] ], [ [ "# load the dataset\ndataframe = pandas.read_csv('airline-passengers.csv', usecols=[1], engine='python')\ndataset = dataframe.values\ndataset = dataset.astype('float32')", "_____no_output_____" ], [ "# normalize the dataset\nscaler = MinMaxScaler(feature_range=(0, 1))\ndataset = scaler.fit_transform(dataset)", "_____no_output_____" ], [ "# split into train and test sets\ntrain_size = int(len(dataset) * 0.67)\ntest_size = len(dataset) - train_size\ntrain, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]", "_____no_output_____" ], [ "# reshape into X=t and Y=t+1\nlook_back = 3\ntrainX, trainY = create_dataset(train, look_back)\ntestX, testY = create_dataset(test, look_back)", "_____no_output_____" ], [ "# reshape input to be [samples, time steps, features]\ntrainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\ntestX = numpy.reshape(testX, (testX.shape[0], testX.shape[1], 1))", "_____no_output_____" ], [ "# create and fit the LSTM network\nbatch_size = 1\nmodel = Sequential()\nmodel.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True))\nmodel.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True))\nmodel.add(Dense(1))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nfor i in tqdm(range(100)):\n\tmodel.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)\n\tmodel.reset_states()", "100%|██████████| 100/100 [00:37<00:00, 2.64it/s]\n" ], [ "# make predictions\ntrainPredict = model.predict(trainX, batch_size=batch_size)\nmodel.reset_states()\ntestPredict = model.predict(testX, batch_size=batch_size)", "_____no_output_____" ], [ "# invert predictions\ntrainPredict = scaler.inverse_transform(trainPredict)\ntrainY = scaler.inverse_transform([trainY])\ntestPredict = scaler.inverse_transform(testPredict)\ntestY = scaler.inverse_transform([testY])", "_____no_output_____" ], [ "# calculate root mean squared error\ntrainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))\nprint('Train Score: %.2f RMSE' % (trainScore))\ntestScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))\nprint('Test Score: %.2f RMSE' % (testScore))", "Train Score: 25.07 RMSE\nTest Score: 77.12 RMSE\n" ], [ "# shift train predictions for plotting\ntrainPredictPlot = numpy.empty_like(dataset)\ntrainPredictPlot[:, :] = numpy.nan\ntrainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict", "_____no_output_____" ], [ "# shift test predictions for plotting\ntestPredictPlot = numpy.empty_like(dataset)\ntestPredictPlot[:, :] = numpy.nan\ntestPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict", "_____no_output_____" ], [ "# plot baseline and predictions\nplt.plot(scaler.inverse_transform(dataset))\nplt.plot(trainPredictPlot)\nplt.plot(testPredictPlot)\nplt.show()", "_____no_output_____" ] ], [ [ "## **Time series prediction of TESLA closing stock price**", "_____no_output_____" ] ], [ [ "# Importing libraries\nimport numpy\nimport matplotlib.pyplot as plt\nfrom pandas import read_csv\nimport math\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n! pip install nsepy\nfrom nsepy import get_history\nfrom datetime import date\nfrom tqdm import tqdm", "_____no_output_____" ], [ "# convert an array of values into a dataset matrix\ndef create_dataset(dataset, look_back=1):\n\tdataX, dataY = [], []\n\tfor i in range(len(dataset)-look_back-1):\n\t\ta = dataset[i:(i+look_back), 0]\n\t\tdataX.append(a)\n\t\tdataY.append(dataset[i + look_back, 0])\n\treturn numpy.array(dataX), numpy.array(dataY)", "_____no_output_____" ], [ "# load the dataset\ndataframe = pandas.read_csv('TSLA.csv', usecols=[4], engine='python')\ndataset = dataframe.values\ndataset = dataset.astype('float32')", "_____no_output_____" ], [ "# normalize the dataset\nscaler = MinMaxScaler(feature_range=(0, 1))\ndataset = scaler.fit_transform(dataset)", "_____no_output_____" ], [ "# split into train and test sets\ntrain_size = int(len(dataset) * 0.67)\ntest_size = len(dataset) - train_size\ntrain, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]", "_____no_output_____" ], [ "# reshape into X=t and Y=t+1\nlook_back = 3\ntrainX, trainY = create_dataset(train, look_back)\ntestX, testY = create_dataset(test, look_back)", "_____no_output_____" ], [ "# reshape input to be [samples, time steps, features]\ntrainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\ntestX = numpy.reshape(testX, (testX.shape[0], testX.shape[1], 1))", "_____no_output_____" ], [ "# create and fit the LSTM network\nbatch_size = 1\nmodel = Sequential()\nmodel.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True))\nmodel.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True))\nmodel.add(Dense(1))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nfor i in tqdm(range(300)):\n\tmodel.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)\n\tmodel.reset_states()", "100%|██████████| 300/300 [12:55<00:00, 2.59s/it]\n" ], [ "# make predictions\ntrainPredict = model.predict(trainX, batch_size=batch_size)\nmodel.reset_states()\ntestPredict = model.predict(testX, batch_size=batch_size)", "_____no_output_____" ], [ "# invert predictions\ntrainPredict = scaler.inverse_transform(trainPredict)\ntrainY = scaler.inverse_transform([trainY])\ntestPredict = scaler.inverse_transform(testPredict)\ntestY = scaler.inverse_transform([testY])", "_____no_output_____" ], [ "# calculate root mean squared error\ntrainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))\nprint('Train Score: %.2f RMSE' % (trainScore))\ntestScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))\nprint('Test Score: %.2f RMSE' % (testScore))", "Train Score: 4.43 RMSE\nTest Score: 39.16 RMSE\n" ], [ "# shift train predictions for plotting\ntrainPredictPlot = numpy.empty_like(dataset)\ntrainPredictPlot[:, :] = numpy.nan\ntrainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict", "_____no_output_____" ], [ "# shift test predictions for plotting\ntestPredictPlot = numpy.empty_like(dataset)\ntestPredictPlot[:, :] = numpy.nan\ntestPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict", "_____no_output_____" ], [ "# plot baseline and predictions\nplt.plot(scaler.inverse_transform(dataset))\nplt.plot(trainPredictPlot)\nplt.plot(testPredictPlot)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06a782639a3d5f9465a0e8dc2f148f89bed19a9
63,682
ipynb
Jupyter Notebook
BCNcode/0_vibratioon_signal/1450/DNN/DNN_1450-09-512-x.ipynb
Decaili98/BCN-code-2022
ab0ce085cb29fbf12b6d773861953cb2cef23e20
[ "MulanPSL-1.0" ]
null
null
null
BCNcode/0_vibratioon_signal/1450/DNN/DNN_1450-09-512-x.ipynb
Decaili98/BCN-code-2022
ab0ce085cb29fbf12b6d773861953cb2cef23e20
[ "MulanPSL-1.0" ]
null
null
null
BCNcode/0_vibratioon_signal/1450/DNN/DNN_1450-09-512-x.ipynb
Decaili98/BCN-code-2022
ab0ce085cb29fbf12b6d773861953cb2cef23e20
[ "MulanPSL-1.0" ]
null
null
null
139.96044
26,500
0.825759
[ [ [ "from tensorflow import keras\nfrom tensorflow.keras import *\nfrom tensorflow.keras.models import *\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.regularizers import l2#正则化L2\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "normal = np.loadtxt(r'F:\\张老师课题学习内容\\code\\数据集\\试验数据(包括压力脉动和振动)\\2013.9.12-未发生缠绕前\\2013-9.12振动\\2013-9-12振动-1450rmin-mat\\1450r_normalvibx.txt', delimiter=',')\nchanrao = np.loadtxt(r'F:\\张老师课题学习内容\\code\\数据集\\试验数据(包括压力脉动和振动)\\2013.9.17-发生缠绕后\\振动\\9-17下午振动1450rmin-mat\\1450r_chanraovibx.txt', delimiter=',')\nprint(normal.shape,chanrao.shape,\"***************************************************\")\ndata_normal=normal[16:18] #提取前两行\ndata_chanrao=chanrao[16:18] #提取前两行\nprint(data_normal.shape,data_chanrao.shape)\nprint(data_normal,\"\\r\\n\",data_chanrao,\"***************************************************\")\ndata_normal=data_normal.reshape(1,-1)\ndata_chanrao=data_chanrao.reshape(1,-1)\nprint(data_normal.shape,data_chanrao.shape)\nprint(data_normal,\"\\r\\n\",data_chanrao,\"***************************************************\")", "(22, 32768) (22, 32768) ***************************************************\n(2, 32768) (2, 32768)\n[[ 1.2027 2.7489 -0.32088 ... -0.36631 -0.059784 0.88198 ]\n [-2.3527 2.4643 0.95492 ... 4.1248 -0.4956 -0.2093 ]] \r\n [[ 0.59238 -3.0357 -0.24356 ... 0.48481 1.8901 0.21009 ]\n [-2.2471 -1.5225 0.74835 ... -1.5349 0.37456 0.054544]] ***************************************************\n(1, 65536) (1, 65536)\n[[ 1.2027 2.7489 -0.32088 ... 4.1248 -0.4956 -0.2093 ]] \r\n [[ 0.59238 -3.0357 -0.24356 ... -1.5349 0.37456 0.054544]] ***************************************************\n" ], [ "#水泵的两种故障类型信号normal正常,chanrao故障\ndata_normal=data_normal.reshape(-1, 512)#(65536,1)-(128, 515)\ndata_chanrao=data_chanrao.reshape(-1,512)\nprint(data_normal.shape,data_chanrao.shape)\n", "(128, 512) (128, 512)\n" ], [ "import numpy as np\ndef yuchuli(data,label):#(4:1)(51:13)\n #打乱数据顺序\n np.random.shuffle(data)\n train = data[0:102,:]\n test = data[102:128,:]\n label_train = np.array([label for i in range(0,102)])\n label_test =np.array([label for i in range(0,26)])\n return train,test ,label_train ,label_test\ndef stackkk(a,b,c,d,e,f,g,h):\n aa = np.vstack((a, e))\n bb = np.vstack((b, f))\n cc = np.hstack((c, g))\n dd = np.hstack((d, h))\n return aa,bb,cc,dd\nx_tra0,x_tes0,y_tra0,y_tes0 = yuchuli(data_normal,0)\nx_tra1,x_tes1,y_tra1,y_tes1 = yuchuli(data_chanrao,1)\ntr1,te1,yr1,ye1=stackkk(x_tra0,x_tes0,y_tra0,y_tes0 ,x_tra1,x_tes1,y_tra1,y_tes1)\n\nx_train=tr1\nx_test=te1\ny_train = yr1\ny_test = ye1\n\n#打乱数据\nstate = np.random.get_state()\nnp.random.shuffle(x_train)\nnp.random.set_state(state)\nnp.random.shuffle(y_train)\n\nstate = np.random.get_state()\nnp.random.shuffle(x_test)\nnp.random.set_state(state)\nnp.random.shuffle(y_test)\n\n\n#对训练集和测试集标准化\ndef ZscoreNormalization(x):\n \"\"\"Z-score normaliaztion\"\"\"\n x = (x - np.mean(x)) / np.std(x)\n return x\nx_train=ZscoreNormalization(x_train)\nx_test=ZscoreNormalization(x_test)\n# print(x_test[0])\n\n\n#转化为一维序列\nx_train = x_train.reshape(-1,512,1)\nx_test = x_test.reshape(-1,512,1)\nprint(x_train.shape,x_test.shape)\n\ndef to_one_hot(labels,dimension=2):\n results = np.zeros((len(labels),dimension))\n for i,label in enumerate(labels):\n results[i,label] = 1\n return results\none_hot_train_labels = to_one_hot(y_train)\none_hot_test_labels = to_one_hot(y_test)\n", "(204, 512, 1) (52, 512, 1)\n" ], [ "x = layers.Input(shape=[512,1,1])\nFlatten=layers.Flatten()(x)\nDense1=layers.Dense(12, activation='relu')(Flatten)\nDense2=layers.Dense(6, activation='relu')(Dense1)\nDense3=layers.Dense(2, activation='softmax')(Dense2)\nmodel = keras.Model(x, Dense3) \nmodel.summary() ", "Model: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) [(None, 512, 1, 1)] 0 \n_________________________________________________________________\nflatten (Flatten) (None, 512) 0 \n_________________________________________________________________\ndense (Dense) (None, 12) 6156 \n_________________________________________________________________\ndense_1 (Dense) (None, 6) 78 \n_________________________________________________________________\ndense_2 (Dense) (None, 2) 14 \n=================================================================\nTotal params: 6,248\nTrainable params: 6,248\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "#定义优化\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',metrics=['accuracy']) ", "_____no_output_____" ], [ "import time\ntime_begin = time.time()\nhistory = model.fit(x_train,one_hot_train_labels,\n validation_split=0.1,\n epochs=50,batch_size=10,\n shuffle=True)\ntime_end = time.time()\ntime = time_end - time_begin\nprint('time:', time)", "Epoch 1/50\n19/19 [==============================] - 3s 91ms/step - loss: 0.9257 - accuracy: 0.4729 - val_loss: 0.7630 - val_accuracy: 0.4762\nEpoch 2/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.6430 - accuracy: 0.6512 - val_loss: 0.7600 - val_accuracy: 0.5238\nEpoch 3/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.5330 - accuracy: 0.7538 - val_loss: 0.7673 - val_accuracy: 0.4762\nEpoch 4/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.4818 - accuracy: 0.7948 - val_loss: 0.7822 - val_accuracy: 0.4762\nEpoch 5/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.4700 - accuracy: 0.8367 - val_loss: 0.7917 - val_accuracy: 0.4762\nEpoch 6/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.4265 - accuracy: 0.8063 - val_loss: 0.8034 - val_accuracy: 0.5238\nEpoch 7/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.4152 - accuracy: 0.7737 - val_loss: 0.8214 - val_accuracy: 0.4762\nEpoch 8/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.3861 - accuracy: 0.7798 - val_loss: 0.8264 - val_accuracy: 0.5238\nEpoch 9/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.3414 - accuracy: 0.8068 - val_loss: 0.8344 - val_accuracy: 0.5238\nEpoch 10/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.3029 - accuracy: 0.8304 - val_loss: 0.8566 - val_accuracy: 0.5238\nEpoch 11/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.3038 - accuracy: 0.7962 - val_loss: 0.8976 - val_accuracy: 0.5238\nEpoch 12/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.2767 - accuracy: 0.8088 - val_loss: 0.9182 - val_accuracy: 0.5238\nEpoch 13/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.2173 - accuracy: 0.8506 - val_loss: 0.9345 - val_accuracy: 0.5238\nEpoch 14/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.2040 - accuracy: 0.8925 - val_loss: 0.9432 - val_accuracy: 0.5238\nEpoch 15/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.1664 - accuracy: 0.9345 - val_loss: 0.9422 - val_accuracy: 0.5238\nEpoch 16/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.1820 - accuracy: 0.9153 - val_loss: 0.9682 - val_accuracy: 0.4762\nEpoch 17/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.1602 - accuracy: 0.9407 - val_loss: 1.0068 - val_accuracy: 0.4762\nEpoch 18/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.1324 - accuracy: 0.9572 - val_loss: 1.0200 - val_accuracy: 0.4762\nEpoch 19/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.1329 - accuracy: 0.9122 - val_loss: 1.0236 - val_accuracy: 0.4762\nEpoch 20/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.1079 - accuracy: 0.9481 - val_loss: 1.0398 - val_accuracy: 0.4762\nEpoch 21/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.1050 - accuracy: 0.9640 - val_loss: 1.0407 - val_accuracy: 0.4762\nEpoch 22/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0922 - accuracy: 0.9671 - val_loss: 1.0353 - val_accuracy: 0.5238\nEpoch 23/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0732 - accuracy: 0.9876 - val_loss: 1.0444 - val_accuracy: 0.5238\nEpoch 24/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0701 - accuracy: 0.9858 - val_loss: 1.0539 - val_accuracy: 0.5238\nEpoch 25/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0575 - accuracy: 0.9683 - val_loss: 1.0738 - val_accuracy: 0.5238\nEpoch 26/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0733 - accuracy: 0.9769 - val_loss: 1.0974 - val_accuracy: 0.3810\nEpoch 27/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0701 - accuracy: 0.9860 - val_loss: 1.1145 - val_accuracy: 0.3810\nEpoch 28/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0547 - accuracy: 0.9961 - val_loss: 1.1131 - val_accuracy: 0.3810\nEpoch 29/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.0480 - accuracy: 0.9870 - val_loss: 1.1263 - val_accuracy: 0.3810\nEpoch 30/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0489 - accuracy: 0.9820 - val_loss: 1.1299 - val_accuracy: 0.4762\nEpoch 31/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0481 - accuracy: 0.9866 - val_loss: 1.1375 - val_accuracy: 0.3810\nEpoch 32/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0332 - accuracy: 0.9867 - val_loss: 1.1462 - val_accuracy: 0.4286\nEpoch 33/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0539 - accuracy: 0.9942 - val_loss: 1.1532 - val_accuracy: 0.4762\nEpoch 34/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0309 - accuracy: 0.9903 - val_loss: 1.1671 - val_accuracy: 0.5238\nEpoch 35/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0431 - accuracy: 0.9966 - val_loss: 1.2201 - val_accuracy: 0.3810\nEpoch 36/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0321 - accuracy: 0.9961 - val_loss: 1.2366 - val_accuracy: 0.3810\nEpoch 37/50\n19/19 [==============================] - 0s 3ms/step - loss: 0.0340 - accuracy: 0.9992 - val_loss: 1.2568 - val_accuracy: 0.3810\nEpoch 38/50\n19/19 [==============================] - 0s 6ms/step - loss: 0.0368 - accuracy: 0.9820 - val_loss: 1.2699 - val_accuracy: 0.4286\nEpoch 39/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.0348 - accuracy: 1.0000 - val_loss: 1.2904 - val_accuracy: 0.4286\nEpoch 40/50\n19/19 [==============================] - 0s 6ms/step - loss: 0.0343 - accuracy: 0.9870 - val_loss: 1.2767 - val_accuracy: 0.4286\nEpoch 41/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.0306 - accuracy: 1.0000 - val_loss: 1.2878 - val_accuracy: 0.4286\nEpoch 42/50\n19/19 [==============================] - 0s 6ms/step - loss: 0.0173 - accuracy: 1.0000 - val_loss: 1.3011 - val_accuracy: 0.4286\nEpoch 43/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.0247 - accuracy: 1.0000 - val_loss: 1.3205 - val_accuracy: 0.4286\nEpoch 44/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0173 - accuracy: 1.0000 - val_loss: 1.3305 - val_accuracy: 0.4286\nEpoch 45/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0310 - accuracy: 1.0000 - val_loss: 1.3640 - val_accuracy: 0.3810\nEpoch 46/50\n19/19 [==============================] - 0s 4ms/step - loss: 0.0216 - accuracy: 0.9950 - val_loss: 1.3607 - val_accuracy: 0.3810\nEpoch 47/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.0205 - accuracy: 0.9885 - val_loss: 1.3779 - val_accuracy: 0.4286\nEpoch 48/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.0197 - accuracy: 1.0000 - val_loss: 1.3891 - val_accuracy: 0.4286\nEpoch 49/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.0084 - accuracy: 1.0000 - val_loss: 1.3908 - val_accuracy: 0.4286\nEpoch 50/50\n19/19 [==============================] - 0s 5ms/step - loss: 0.0256 - accuracy: 1.0000 - val_loss: 1.3597 - val_accuracy: 0.4286\ntime: 6.8046839237213135\n" ], [ "import time\ntime_begin = time.time()\nscore = model.evaluate(x_test,one_hot_test_labels, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n \ntime_end = time.time()\ntime = time_end - time_begin\nprint('time:', time)", "Test loss: 1.3608510494232178\nTest accuracy: 0.5\ntime: 0.08136534690856934\n" ], [ "#绘制acc-loss曲线\nimport matplotlib.pyplot as plt\n\nplt.plot(history.history['loss'],color='r')\nplt.plot(history.history['val_loss'],color='g')\nplt.plot(history.history['accuracy'],color='b')\nplt.plot(history.history['val_accuracy'],color='k')\nplt.title('model loss and acc')\nplt.ylabel('Accuracy')\nplt.xlabel('epoch')\nplt.legend(['train_loss', 'test_loss','train_acc', 'test_acc'], loc='center right')\n# plt.legend(['train_loss','train_acc'], loc='upper left')\n#plt.savefig('1.png')\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nplt.plot(history.history['loss'],color='r')\nplt.plot(history.history['accuracy'],color='b')\nplt.title('model loss and sccuracy ')\nplt.ylabel('loss/sccuracy')\nplt.xlabel('epoch')\nplt.legend(['train_loss', 'train_sccuracy'], loc='center right')\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06a7d7a9af63138deebc85b22778a794cef3e37
79,573
ipynb
Jupyter Notebook
YUSAG_FBS_football_linear_model.ipynb
mc-robinson/YUSAG_football_model
47769971870ca047597ba5123a226bcf3bc8b309
[ "MIT" ]
2
2017-08-17T14:42:35.000Z
2017-08-17T15:38:25.000Z
YUSAG_FBS_football_linear_model.ipynb
Engy-22/YUSAG_football_model
47769971870ca047597ba5123a226bcf3bc8b309
[ "MIT" ]
null
null
null
YUSAG_FBS_football_linear_model.ipynb
Engy-22/YUSAG_football_model
47769971870ca047597ba5123a226bcf3bc8b309
[ "MIT" ]
1
2020-03-27T15:32:59.000Z
2020-03-27T15:32:59.000Z
31.576587
613
0.398791
[ [ [ "# The YUSAG Football Model\n by Matt Robinson, [email protected], Yale Undergraduate Sports Analytics Group\n \nThis notebook introduces the model we at the Yale Undergraduate Sports Analytics Group (YUSAG) use for our college football rankings. This specific notebook details our FBS rankings at the beginning of the 2017 season.\n", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport math", "_____no_output_____" ] ], [ [ "Let's start by reading in the NCAA FBS football data from 2013-2016:", "_____no_output_____" ] ], [ [ "df_1 = pd.read_csv('NCAA_FBS_Results_2013_.csv')\ndf_2 = pd.read_csv('NCAA_FBS_Results_2014_.csv')\ndf_3 = pd.read_csv('NCAA_FBS_Results_2015_.csv')\ndf_4 = pd.read_csv('NCAA_FBS_Results_2016_.csv')\n\ndf = pd.concat([df_1,df_2,df_3,df_4],ignore_index=True)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "As you can see, the `OT` column has some `NaN` values that we will replace with 0.", "_____no_output_____" ] ], [ [ "# fill missing data with 0\ndf = df.fillna(0)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "I'm also going to make some weights for when we run our linear regression. I have found that using the factorial of the difference between the year and 2012 seems to work decently well. Clearly, the most recent seasons are weighted quite heavily in this scheme.", "_____no_output_____" ] ], [ [ "# update the weights based on a factorial scheme\ndf['weights'] = (df['year']-2012)\ndf['weights'] = df['weights'].apply(lambda x: math.factorial(x))", "_____no_output_____" ] ], [ [ "And now, we also are going to make a `scorediff` column that we can use in our linear regression.", "_____no_output_____" ] ], [ [ "df['scorediff'] = (df['teamscore']-df['oppscore'])", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "Since we need numerical values for the linear regression algorithm, I am going to replace the locations with what seem like reasonable numbers:\n* Visiting = -1\n* Neutral = 0\n* Home = 1\n\nThe reason we picked these exact numbers will become clearer in a little bit.", "_____no_output_____" ] ], [ [ "df['location'] = df['location'].replace('V',-1)\ndf['location'] = df['location'].replace('N',0)\ndf['location'] = df['location'].replace('H',1)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "The way our linear regression model works is a little tricky to code up in scikit-learn. It's much easier to do in R, but then you don't have a full understanding of what's happening when we make the model.\n\nIn simplest terms, our model predicts the score differential (`scorediff`) of each game based on three things: the strength of the `team`, the strength of the `opponent`, and the `location`.\n\nYou'll notice that the `team` and `opponent` features are categorical, and thus are not curretnly ripe for use with linear regression. However, we can use what is called 'one hot encoding' in order to transform these features into a usable form. One hot encoding works by taking the `team` feature, for example, and transforming it into many features such as `team_Yale` and `team_Harvard`. This `team_Yale` feature will usally equal zero, except when the team is actually Yale, then `team_Yale` will equal 1. In this way, it's a binary encoding (which is actually very useful for us as we'll see later).\n\nOne can use `sklearn.preprocessing.OneHotEncoder` for this task, but I am going to use Pandas instead: ", "_____no_output_____" ] ], [ [ "# create dummy variables, need to do this in python b/c does not handle automatically like R\nteam_dummies = pd.get_dummies(df.team, prefix='team')\nopponent_dummies = pd.get_dummies(df.opponent, prefix='opponent')\n\ndf = pd.concat([df, team_dummies, opponent_dummies], axis=1)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "Now let's make our training data, so that we can construct the model. At this point, I am going to use all the avaiable data to train the model, using our predetermined hyperparameters. This way, the model is ready to make predictions for the 2017 season.", "_____no_output_____" ] ], [ [ "# make the training data\nX = df.drop(['year','month','day','team','opponent','teamscore','oppscore','D1','OT','weights','scorediff'], axis=1)\ny = df['scorediff']\nweights = df['weights']", "_____no_output_____" ], [ "X.head()", "_____no_output_____" ], [ "y.head()", "_____no_output_____" ], [ "weights.head()", "_____no_output_____" ] ], [ [ "Now let's train the linear regression model. You'll notice that I'm actually using ridge regression (adds an l2 penalty with alpha = 1.0) because that prevents the model from overfitting and also limits the values of the coefficients to be more interpretable. If I did not add this penalty, the coefficients would be huge.", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import Ridge\nridge_reg = Ridge()\nridge_reg.fit(X, y, sample_weight=weights)", "_____no_output_____" ], [ "# get the R^2 value\nr_squared = ridge_reg.score(X, y, sample_weight=weights)\nprint('R^2 on the training data:')\nprint(r_squared)", "R^2 on the training data:\n0.495412735743\n" ] ], [ [ "Now that the model is trained, we can use it to provide our rankings. Note that in this model, a team's ranking is simply defined as its linear regression coefficient, which we call the YUSAG coefficient. \n\nWhen predicting a game's score differential on a neutral field, the predicted score differential (`scorediff`) is just the difference in YUSAG coefficients. The reason this works is the binary encoding we did earlier.\n\n#### More details below on how it actually works\n\nOk, so you may have noticed that every game in our dataframe is actually duplicated, just with the `team` and `opponent` variables switched. This may have seemed like a mistake but it is actually useful for making the model more interpretable. \n\nWhen we run the model, we get a coefficient for the `team_Yale` variable, which we call the YUSAG coefficient, and a coefficient for the `opponent_Yale` variable. Since we allow every game to be repeated, these variables end up just being negatives of each other. \n\nSo let's think about what we are doing when we predict the score differential for the Harvard-Penn game with `team` = Harvard and `opponent` = Penn.\n\nIn our model, the coefficients are as follows:\n- team_Harvard_coef = 7.78\n- opponent_Harvard_coef = -7.78\n- team_Penn_coef = 6.68\n- opponent_Penn_coef = -6.68\n\nwhen we go to use the model for this game, it looks like this:\n\n`scorediff` = (location_coef $*$ `location`) + (team_Harvard_coef $*$ `team_Harvard`) + (opponent_Harvard_coef $*$ `opponent_Harvard`) + (team_Penn_coef $*$ `team_Penn`) + (opponent_Penn_coef $*$ `opponent_Penn`) + (team_Yale_coef $*$ `team_Yale`) + (opponent_Yale_coef $*$ `opponent_Yale`) + $\\cdots$\n\nwhere the $\\cdots$ represent data for many other teams, which will all just equal $0$.\n\nTo put numbers in for the variables, the model looks like this:\n\n`scorediff` = (location_coef $*$ $0$) + (team_Harvard_coef $*$ $1$) + (opponent_Harvard_coef $*$ $0$) + (team_Penn_coef $*$ $0$) + (opponent_Penn_coef $*$ $1$) + (team_Yale_coef $*$ $0$) + (opponent_Yale_coef $*$ $0$) + $\\cdots$\n\nWhich is just:\n\n`scorediff` = (location_coef $*$ $0$) + (7.78 $*$ $1$) + (-6.68 $*$ $1$) = $7.78 - 6.68$ = Harvard_YUSAG_coef - Penn_YUSAG_coef\n\nThus showing how the difference in YUSAG coefficients is the same as the predicted score differential. Furthermore, the higher YUSAG coefficient a team has, the better they are.\n\nLastly, if the Harvard-Penn game was to be home at Harvard, we would just add the location_coef:\n\n`scorediff` = (location_coef $*$ $1$) + (team_Harvard_coef $*$ $1$) + (opponent_Penn_coef $*$ $1$) = $1.77 + 7.78 - 6.68$ = Location_coef + Harvard_YUSAG_coef - Penn_YUSAG_coef\n", "_____no_output_____" ] ], [ [ "# get the coefficients for each feature\ncoef_data = list(zip(X.columns,ridge_reg.coef_))\ncoef_df = pd.DataFrame(coef_data,columns=['feature','feature_coef'])\ncoef_df.head()", "_____no_output_____" ] ], [ [ "Let's get only the team variables, so that it is a proper ranking", "_____no_output_____" ] ], [ [ "# first get rid of opponent_ variables\nteam_df = coef_df[~coef_df['feature'].str.contains(\"opponent\")]\n\n# get rid of the location variable\nteam_df = team_df.iloc[1:]", "_____no_output_____" ], [ "team_df.head()", "_____no_output_____" ], [ "# rank them by coef, not alphabetical order\nranked_team_df = team_df.sort_values(['feature_coef'],ascending=False)\n# reset the indices at 0\nranked_team_df = ranked_team_df.reset_index(drop=True);", "_____no_output_____" ], [ "ranked_team_df.head()", "_____no_output_____" ] ], [ [ "I'm goint to change the name of the columns and remove the 'team_' part of every string:", "_____no_output_____" ] ], [ [ "ranked_team_df.rename(columns={'feature':'team', 'feature_coef':'YUSAG_coef'}, inplace=True)\nranked_team_df['team'] = ranked_team_df['team'].str.replace('team_', '')", "_____no_output_____" ], [ "ranked_team_df.head()", "_____no_output_____" ] ], [ [ "Lastly, I'm just going to shift the index to start at 1, so that it corresponds to the ranking.", "_____no_output_____" ] ], [ [ "ranked_team_df.index = ranked_team_df.index + 1 ", "_____no_output_____" ], [ "ranked_team_df.to_csv(\"FBS_power_rankings.csv\")", "_____no_output_____" ] ], [ [ "## Additional stuff: Testing the model\n\nThis section is mostly about how own could test the performance of the model or how one could choose appropriate hyperparamters.\n\n#### Creating a new dataframe\n\nFirst let's take the original dataframe and sort it by date, so that the order of games in the dataframe matches the order the games were played.", "_____no_output_____" ] ], [ [ "# sort by date and reset the indices to 0\ndf_dated = df.sort_values(['year', 'month','day'], ascending=[True, True, True])\ndf_dated = df_dated.reset_index(drop=True)", "_____no_output_____" ], [ "df_dated.head()", "_____no_output_____" ] ], [ [ "Let's first make a dataframe with training data (the first three years of results)", "_____no_output_____" ] ], [ [ "thirteen_df = df_dated.loc[df_dated['year']==2013]\nfourteen_df = df_dated.loc[df_dated['year']==2014]\nfifteen_df = df_dated.loc[df_dated['year']==2015]\n\ntrain_df = pd.concat([thirteen_df,fourteen_df,fifteen_df], ignore_index=True)", "_____no_output_____" ] ], [ [ "Now let's make an initial testing dataframe with the data from this past year.", "_____no_output_____" ] ], [ [ "sixteen_df = df_dated.loc[df_dated['year']==2016]\nseventeen_df = df_dated.loc[df_dated['year']==2017]\n\ntest_df = pd.concat([sixteen_df,seventeen_df], ignore_index=True)", "_____no_output_____" ] ], [ [ "I am now going to set up a testing/validation scheme for the model. It works like this:\n\nFirst I start off where my training data is all games from 2012-2015. Using the model trained on this data, I then predict games from the first week of the 2016 season and look at the results.\n\nNext, I add that first week's worth of games to the training data, and now I train on all 2012-2015 results plus the first week from 2016. After training the model on this data, I then test on the second week of games. I then add that week's games to the training data and repeat the same procedure week after week.\n\nIn this way, I am never testing on a result that I have trained on. Though, it should be noted that I have also used this as a validation scheme, so I have technically done some sloppy 'data snooping' and this is not a great predictor of my generalization error. ", "_____no_output_____" ] ], [ [ "def train_test_model(train_df, test_df):\n\n # make the training data\n X_train = train_df.drop(['year','month','day','team','opponent','teamscore','oppscore','D1','OT','weights','scorediff'], axis=1)\n y_train = train_df['scorediff']\n weights_train = train_df['weights']\n \n # train the model\n ridge_reg = Ridge()\n ridge_reg.fit(X_train, y_train, weights_train)\n fit = ridge_reg.score(X_train,y_train,sample_weight=weights_train)\n print('R^2 on the training data:')\n print(fit)\n \n # get the test data\n X_test = test_df.drop(['year','month','day','team','opponent','teamscore','oppscore','D1','OT','weights','scorediff'], axis=1)\n y_test = test_df['scorediff']\n \n # get the metrics\n compare_data = list(zip(ridge_reg.predict(X_test),y_test))\n \n right_count = 0\n for tpl in compare_data:\n if tpl[0] >= 0 and tpl[1] >=0:\n right_count = right_count + 1\n elif tpl[0] <= 0 and tpl[1] <=0:\n right_count = right_count + 1\n accuracy = right_count/len(compare_data)\n print('accuracy on this weeks games')\n print(right_count/len(compare_data))\n \n total_squared_error = 0.0\n for tpl in compare_data:\n total_squared_error = total_squared_error + (tpl[0]-tpl[1])**2\n RMSE = (total_squared_error / float(len(compare_data)))**(0.5)\n print('RMSE on this weeks games:')\n print(RMSE)\n \n return fit, accuracy, RMSE, right_count, total_squared_error\n ", "_____no_output_____" ], [ "#Now the code for running the week by week testing.\nbase_df = train_df\nnew_indices = []\n# this is the hash for the first date\nlast_date_hash = 2018\n\nfit_list = []\naccuracy_list = []\nRMSE_list = []\ntotal_squared_error = 0\ntotal_right_count = 0\n\nfor index, row in test_df.iterrows():\n \n year = row['year']\n month = row['month']\n day = row['day']\n date_hash = year+month+day \n \n if date_hash != last_date_hash:\n last_date_hash = date_hash\n test_week = test_df.iloc[new_indices]\n fit, accuracy, RMSE, correct_calls, squared_error = train_test_model(base_df,test_week)\n \n fit_list.append(fit)\n accuracy_list.append(accuracy)\n RMSE_list.append(RMSE)\n \n total_squared_error = total_squared_error + squared_error\n total_right_count = total_right_count + correct_calls\n \n base_df = pd.concat([base_df,test_week],ignore_index=True)\n new_indices = [index]\n \n else:\n new_indices.append(index)", "R^2 on the training data:\n0.514115428151\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n24.6130049653\nR^2 on the training data:\n0.515062529455\naccuracy on this weeks games\n0.75\nRMSE on this weeks games:\n8.50571284957\nR^2 on the training data:\n0.515637252787\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n1.79624117389\nR^2 on the training data:\n0.515707166397\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n7.80308140843\nR^2 on the training data:\n0.516755093582\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n8.30907342967\nR^2 on the training data:\n0.530413242372\naccuracy on this weeks games\n0.6\nRMSE on this weeks games:\n24.0421530547\nR^2 on the training data:\n0.524662107499\naccuracy on this weeks games\n0.7307692307692307\nRMSE on this weeks games:\n18.2597176436\nR^2 on the training data:\n0.537844378003\naccuracy on this weeks games\n0.0\nRMSE on this weeks games:\n13.2061739345\nR^2 on the training data:\n0.537113491933\naccuracy on this weeks games\n0.0\nRMSE on this weeks games:\n15.1910124649\nR^2 on the training data:\n0.536473420445\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n18.3382261414\nR^2 on the training data:\n0.538934298666\naccuracy on this weeks games\n0.8636363636363636\nRMSE on this weeks games:\n12.8189519944\nR^2 on the training data:\n0.582035291629\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n15.2449819288\nR^2 on the training data:\n0.582403376696\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n13.7766715096\nR^2 on the training data:\n0.582348351767\naccuracy on this weeks games\n0.803921568627451\nRMSE on this weeks games:\n18.5962963562\nR^2 on the training data:\n0.576990606623\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n13.4224118815\nR^2 on the training data:\n0.577056821057\naccuracy on this weeks games\n0.5\nRMSE on this weeks games:\n5.92939129455\nR^2 on the training data:\n0.578427562218\naccuracy on this weeks games\n0.71875\nRMSE on this weeks games:\n16.897906967\nR^2 on the training data:\n0.581714005608\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n7.55193185348\nR^2 on the training data:\n0.584794100169\naccuracy on this weeks games\n0.5\nRMSE on this weeks games:\n30.1301766226\nR^2 on the training data:\n0.581440590766\naccuracy on this weeks games\n0.7272727272727273\nRMSE on this weeks games:\n16.1664380396\nR^2 on the training data:\n0.578568738472\naccuracy on this weeks games\n0.0\nRMSE on this weeks games:\n9.11724202195\nR^2 on the training data:\n0.578354531742\naccuracy on this weeks games\n0.5\nRMSE on this weeks games:\n9.1525069259\nR^2 on the training data:\n0.577954295715\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n17.3677270374\nR^2 on the training data:\n0.579515211939\naccuracy on this weeks games\n0.6222222222222222\nRMSE on this weeks games:\n18.943978217\nR^2 on the training data:\n0.560749642658\naccuracy on this weeks games\n0.5\nRMSE on this weeks games:\n17.418457086\nR^2 on the training data:\n0.559489012116\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n8.30652563022\nR^2 on the training data:\n0.560151718908\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n10.0183622685\nR^2 on the training data:\n0.559715821378\naccuracy on this weeks games\n0.7395833333333334\nRMSE on this weeks games:\n15.1001843603\nR^2 on the training data:\n0.552759304195\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n9.323045051\nR^2 on the training data:\n0.55277779786\naccuracy on this weeks games\n0.3333333333333333\nRMSE on this weeks games:\n16.843194639\nR^2 on the training data:\n0.553059868385\naccuracy on this weeks games\n0.6458333333333334\nRMSE on this weeks games:\n18.8013361361\nR^2 on the training data:\n0.535598591728\naccuracy on this weeks games\n0.6\nRMSE on this weeks games:\n20.61518416\nR^2 on the training data:\n0.532368051828\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n14.5105539461\nR^2 on the training data:\n0.532028713189\naccuracy on this weeks games\n0.7045454545454546\nRMSE on this weeks games:\n16.2453946003\nR^2 on the training data:\n0.528823598752\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n13.7369937882\nR^2 on the training data:\n0.530091839609\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n18.7196494009\nR^2 on the training data:\n0.530527443418\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n9.48914028039\nR^2 on the training data:\n0.530984672174\naccuracy on this weeks games\n0.6666666666666666\nRMSE on this weeks games:\n19.0850939921\nR^2 on the training data:\n0.529741175256\naccuracy on this weeks games\n0.7142857142857143\nRMSE on this weeks games:\n20.5915862474\nR^2 on the training data:\n0.528232831181\naccuracy on this weeks games\n0.5\nRMSE on this weeks games:\n10.3353909178\nR^2 on the training data:\n0.528134292082\naccuracy on this weeks games\n0.5\nRMSE on this weeks games:\n10.0419047515\nR^2 on the training data:\n0.527876234945\naccuracy on this weeks games\n0.3333333333333333\nRMSE on this weeks games:\n18.2126521661\nR^2 on the training data:\n0.526435237116\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n16.860315415\nR^2 on the training data:\n0.527531214743\naccuracy on this weeks games\n0.72\nRMSE on this weeks games:\n16.0888824765\nR^2 on the training data:\n0.531795377598\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n23.3404627122\nR^2 on the training data:\n0.530989109732\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n3.06469863764\nR^2 on the training data:\n0.531354622878\naccuracy on this weeks games\n0.0\nRMSE on this weeks games:\n37.642639928\nR^2 on the training data:\n0.527616335683\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n12.767855154\nR^2 on the training data:\n0.528072988352\naccuracy on this weeks games\n0.66\nRMSE on this weeks games:\n19.5032226995\nR^2 on the training data:\n0.51956298128\naccuracy on this weeks games\n0.6666666666666666\nRMSE on this weeks games:\n6.29851898254\nR^2 on the training data:\n0.519399027004\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n12.6891606688\nR^2 on the training data:\n0.51933076814\naccuracy on this weeks games\n0.5\nRMSE on this weeks games:\n18.6831736447\nR^2 on the training data:\n0.514092027371\naccuracy on this weeks games\n0.6744186046511628\nRMSE on this weeks games:\n19.9184797468\nR^2 on the training data:\n0.508382085155\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n17.3466492005\nR^2 on the training data:\n0.50829394128\naccuracy on this weeks games\n0.7142857142857143\nRMSE on this weeks games:\n15.2730317412\nR^2 on the training data:\n0.507597963676\naccuracy on this weeks games\n0.0\nRMSE on this weeks games:\n19.7940944079\nR^2 on the training data:\n0.506922020404\naccuracy on this weeks games\n0.4\nRMSE on this weeks games:\n17.4197006173\nR^2 on the training data:\n0.505249755542\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n35.1730998402\nR^2 on the training data:\n0.50510966421\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n19.932364329\nR^2 on the training data:\n0.504805556879\naccuracy on this weeks games\n1.0\nRMSE on this weeks games:\n12.6520570426\nR^2 on the training data:\n0.504528049513\naccuracy on this weeks games\n0.0\nRMSE on this weeks games:\n24.1761431639\nR^2 on the training data:\n0.503626569575\naccuracy on this weeks games\n0.6666666666666666\nRMSE on this weeks games:\n4.37386376777\nR^2 on the training data:\n0.503573419863\naccuracy on this weeks games\n0.0\nRMSE on this weeks games:\n24.6680126856\nR^2 on the training data:\n0.502805914088\naccuracy on this weeks games\n0.6666666666666666\nRMSE on this weeks games:\n17.8733082884\nR^2 on the training data:\n0.501751732482\naccuracy on this weeks games\n0.25\nRMSE on this weeks games:\n17.0832779251\nR^2 on the training data:\n0.500232184871\naccuracy on this weeks games\n0.5\nRMSE on this weeks games:\n11.2641158141\nR^2 on the training data:\n0.499704516896\naccuracy on this weeks games\n0.6666666666666666\nRMSE on this weeks games:\n20.5540234472\nR^2 on the training data:\n0.49857541638\naccuracy on this weeks games\n0.6\nRMSE on this weeks games:\n9.71536849142\nR^2 on the training data:\n0.498602786947\naccuracy on this weeks games\n0.75\nRMSE on this weeks games:\n22.0752019224\nR^2 on the training data:\n0.497056280421\naccuracy on this weeks games\n0.75\nRMSE on this weeks games:\n15.8007124148\n" ], [ "# get the number of games it called correctly in 2016\ntotal_accuracy = total_right_count/test_df.shape[0]\ntotal_accuracy", "_____no_output_____" ], [ "# get the Root Mean Squared Error\noverall_RMSE = (total_squared_error/test_df.shape[0])**(0.5)\noverall_RMSE", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d06a884621e9f64a838470856c552405995b97d0
3,864
ipynb
Jupyter Notebook
code/01_preprocessing.ipynb
maxim-belkin/SDC-BIDS-fMRI
920a587981787a147b2f55c94a0e80d1ea58e842
[ "CC-BY-4.0" ]
1
2019-11-14T21:46:48.000Z
2019-11-14T21:46:48.000Z
code/01_preprocessing.ipynb
maxim-belkin/SDC-BIDS-fMRI
920a587981787a147b2f55c94a0e80d1ea58e842
[ "CC-BY-4.0" ]
null
null
null
code/01_preprocessing.ipynb
maxim-belkin/SDC-BIDS-fMRI
920a587981787a147b2f55c94a0e80d1ea58e842
[ "CC-BY-4.0" ]
null
null
null
39.835052
392
0.649327
[ [ [ "### Using fmriprep", "_____no_output_____" ], [ "[fmriprep](https://fmriprep.readthedocs.io/en/stable/) is a package developed by the Poldrack lab to do the minimal preprocessing of fMRI data required. It covers brain extraction, motion correction, field unwarping, and registration. It uses a combination of well-known software packages (e.g., FSL, SPM, ANTS, AFNI) and selects the 'best' implementation of each preprocessing step.\n\nOnce installed, `fmriprep` can be invoked from the command line. We can even run it inside this notebook! The following command should work after you remove the 'hashtag' `#`. \n\nHowever, running fmriprep takes quite some time (we included the hashtag to prevent you from accidentally running it). You'll most likely want to run it in parallel on a computing cluster.", "_____no_output_____" ] ], [ [ "#!fmriprep \\\n# --ignore slicetiming \\\n# --ignore fieldmaps \\\n# --output-space template \\\n# --template MNI152NLin2009cAsym \\\n# --template-resampling-grid 2mm \\\n# --fs-no-reconall \\\n# --fs-license-file \\\n# ../license.txt \\\n# ../data/ds000030 ../data/ds000030/derivatives/fmriprep participant", "_____no_output_____" ] ], [ [ "The command above consists of the following parts:\n- \\\"fmriprep\\\" calls fmriprep\n- `--ignore slicetiming` tells fmriprep to _not_ perform slice timing correction\n- `--ignore fieldmaps` tells fmriprep to _not_ perform distortion correction (unfortunately, there are no field maps available in this data set)\n- `--output-space template` tells fmriprep to normalize (register) data to a template\n- `--template MNI152NLin2009cAsym` tells fmriprep that the template should be MNI152 version 6 (2009c)\n- `--template-resampling-grid 2mm` tells fmriprep to resample the output images to 2mm isotropic resolution\n- `--fs-license-file ../../license.txt` tells fmriprep where to find the license.txt-file for freesurfer - you can ignore this\n- `bids` is the name of the folder containing the data in bids format\n- `output_folder` is the name of the folder where we want the preprocessed data to be stored,\n- `participant` tells fmriprep to run only at the participant level (and not, for example, at the group level - you can forget about this)\n\nThe [official documentation](http://fmriprep.readthedocs.io/) contains all possible arguments you can pass.", "_____no_output_____" ], [ "### Using nipype", "_____no_output_____" ], [ "fmriprep makes use of [Nipype](https://nipype.readthedocs.io/en/latest/), a pipelining tool for preprocessing neuroimaging data. Nipype makes it easy to share and document pipelines and run them in parallel on a computing cluster. If you would like to build your own preprocessing pipelines, a good resource to get started is [this tutorial](https://miykael.github.io/nipype_tutorial/).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
d06a9910b662812695823a328f14e83ca84cb520
676,971
ipynb
Jupyter Notebook
homework2.ipynb
x-sile/made_ml
fa82f119ea6cce746595d7183ff5d75c5d42d0c5
[ "MIT" ]
null
null
null
homework2.ipynb
x-sile/made_ml
fa82f119ea6cce746595d7183ff5d75c5d42d0c5
[ "MIT" ]
null
null
null
homework2.ipynb
x-sile/made_ml
fa82f119ea6cce746595d7183ff5d75c5d42d0c5
[ "MIT" ]
1
2021-05-20T07:07:11.000Z
2021-05-20T07:07:11.000Z
777.234214
169,720
0.952191
[ [ [ "Скачайте данные в формате csv, выберите из таблицы данные по России, начиная с 3 марта 2020 г. (в этот момент впервые стало больше 2 заболевших). В качестве целевой переменной возьмём число случаев заболевания (столбцы total_cases и new_cases); для упрощения обработки можно заменить в столбце new_cases все нули на единицы. Для единообразия давайте зафиксируем тренировочный набор в виде первых 50 отсчётов (дней), начиная с 3 марта; остальные данные можно использовать в качестве тестового набора (и он даже будет увеличиваться по мере выполнения задания).\n- Постройте графики целевых переменных. Вы увидите, что число заболевших растёт очень быстро, на первый взгляд экспоненциально. Для первого подхода к снаряду давайте это и используем.\n- Используя линейную регрессию, обучите модель с экспоненциальным ростом числа заболевших: y ~ exp(линейная функция от x), где x — номер текущего дня.\n- Найдите апостериорное распределение параметров этой модели для достаточно широкого априорного распределения. Требующееся для этого значение дисперсии шума в данных оцените, исходя из вашей же максимальной апостериорной модели (это фактически первый шаг эмпирического Байеса).\n- Посэмплируйте много разных экспонент, постройте графики. Сколько, исходя из этих сэмплов, предсказывается случаев коронавируса в России к 1 мая? к 1 июня? к 1 сентября? Постройте предсказательные распределения (можно эмпирически, исходя из данных сэмплирования).\n\nПредсказания экспоненциальной модели наверняка получились грустными. Но это, конечно, чересчур пессимистично — экспоненциальный рост в природе никак не может продолжаться вечно. Кривая общего числа заболевших во время эпидемии в реальности имеет сигмоидальный вид: после начальной фазы экспоненциального роста неизбежно происходит насыщение. В качестве конкретной формы такой сигмоиды давайте возьмём форму функции распределения для гауссиана. Естественно, в нашем случае сигмоида стремится не к единице, т.е. константа перед интегралом может быть произвольной (и её можно внести в экспоненту), а в экспоненте под интегралом может быть произвольная квадратичная функция от t.\n- Предложите способ обучать параметры такой сигмоидальной функции при помощи линейной регрессии.\n- Обучите эти параметры на датасете случаев коронавируса в России. Найдите апостериорное распределение параметров этой модели для достаточно широкого априорного распределения. Требующееся для этого значение дисперсии шума в данных оцените, исходя из вашей же максимальной апостериорной модели.\n- Посэмплируйте много разных сигмоид из апостериорного распределения, постройте графики. Сколько, исходя из этих сэмплов, будет всего случаев коронавируса в России? Постройте эмпирическое предсказательное распределение, нарисуйте графики. Каков ваш прогноз числа случаев коронавируса в пессимистичном сценарии (90-й процентиль в выборке числа случаев)? В оптимистичном сценарии (10-й процентиль)?\n\nБонус: проведите такой же анализ для других стран (здесь придётся руками подобрать дни начала моделирования — коронавирус приходил в разные страны в разное время). Насколько разные параметры получаются? Можно ли разделить страны на кластеры (хотя бы чисто визуально) в зависимости от этих параметров?\n\n[Эта часть задания не оценивается, здесь нет правильных и неправильных ответов, но буду рад узнать, что вы думаете]\nЧто вы поняли из этого упражнения? Что можно сказать про коронавирус по итогам такого моделирования? Как принять решение, например, о том, нужно ли вводить карантин?\n", "_____no_output_____" ] ], [ [ "from datetime import datetime\n\nimport pandas as pd\nimport numpy as np\nimport scipy\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.linear_model import LinearRegression\n\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = 16, 6", "_____no_output_____" ] ], [ [ "### Загрузка и предобработка данных", "_____no_output_____" ] ], [ [ "# загрузим данные\ndf = pd.read_csv('full_data.csv')\ndf = df[(df['location'] == 'Russia') & (df['date'] >= '2020-03-03')].reset_index(drop=True)\ndf.loc[df['new_cases'] == 0, 'new_cases'] = 1\ndf['day'] = df.index", "_____no_output_____" ], [ "start_day = datetime.strptime('2020-03-03', '%Y-%m-%d')\nmay_first = datetime.strptime('2020-05-01', '%Y-%m-%d')\njune_first = datetime.strptime('2020-06-01', '%Y-%m-%d')\nsept_first = datetime.strptime('2020-09-01', '%Y-%m-%d')\nyear_end = datetime.strptime('2020-12-31', '%Y-%m-%d')\n\ntill_may = (may_first - start_day).days\ntill_june = (june_first - start_day).days\ntill_sept = (sept_first - start_day).days\ntill_year_end = (year_end - start_day).days", "_____no_output_____" ] ], [ [ "### Разделим на трейн и тест", "_____no_output_____" ] ], [ [ "# разделим на трейн и тест. Возьмем 60! дней, т.к. результаты получаются более адекватные\nTRAIN_DAYS = 60\n\ntrain = df[:TRAIN_DAYS]\ntest = df[TRAIN_DAYS:]", "_____no_output_____" ] ], [ [ "### Код для байесовской регрессии", "_____no_output_____" ] ], [ [ "class BayesLR(BaseEstimator, TransformerMixin):\n \n def __init__(self, mu, sigma, noise=None):\n self.mu = mu\n self.sigma = sigma\n self.noise = None\n \n def _estimate_noise(self, X, y):\n return np.std(y - X.dot(np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y))) # linear regression\n \n def _add_intercept(self, X):\n return np.hstack((np.ones((len(X), 1)), X))\n \n def fit(self, X, y):\n \"\"\"\n X: (n_samples, n_features)\n y: (n_samples, )\n \"\"\"\n X = self._add_intercept(X)\n \n if self.noise is None:\n self.noise = self._estimate_noise(X, y) \n beta = 1 / self.noise ** 2\n \n mu_prev = self.mu\n sigma_prev = self.sigma\n self.sigma = np.linalg.inv(np.linalg.inv(sigma_prev) + beta * np.dot(X.T, X))\n self.mu = np.dot(self.sigma, np.dot(np.linalg.inv(sigma_prev), mu_prev) + beta * np.dot(X.T, y))\n \n return self\n \n def predict(self, X):\n X = self._add_intercept(X)\n return X.dot(self.mu)\n \n def sample_w(self, n_samples=1000):\n return np.random.multivariate_normal(self.mu, self.sigma, n_samples)\n \n def sample(self, X, n_samples=1000):\n X = self._add_intercept(X)\n w = self.sample_w(n_samples)\n return X.dot(w.T)", "_____no_output_____" ], [ "def plot_sampled(sampled, true=None):\n for i in range(sampled.shape[1]):\n plt.plot(sampled[:, i], 'k-', lw=.4)", "_____no_output_____" ] ], [ [ "## Часть 1: моделирование экспонентной", "_____no_output_____" ], [ "### 1.1 Графики", "_____no_output_____" ] ], [ [ "plt.plot(train['total_cases'], label='общее число зараженных')\nplt.plot(train['new_cases'], label='количество новых случаев за день')\nplt.title('Графики целевых переменных')\nplt.legend();", "_____no_output_____" ] ], [ [ "### 1.2 Линейная регрессия y ~ exp(wX)", "_____no_output_____" ], [ "Чтобы построить линейную регрессию для такого случая, прологарифмируем целевую переменную (общее количество зараженных).", "_____no_output_____" ] ], [ [ "X_tr = train[['day']].values\ny_tr = np.log(train['total_cases'].values)\n\nX_te = test[['day']].values\ny_te = np.log(test['total_cases'].values)\n\nX_full = np.arange(till_year_end + 1).reshape(-1, 1) # до конца года", "_____no_output_____" ], [ "# Выберем uninformative prior\nmu_prior = np.array([0, 0])\nsigma_prior = 100 * np.array([[1, 0], \n [0, 1]])\n\nbayes_lr = BayesLR(mu_prior, sigma_prior)\nbayes_lr.fit(X_tr, y_tr)\n\nprint(bayes_lr.mu)\nprint(bayes_lr.sigma)", "[1.81476765 0.18447833]\n[[ 1.70391846e-02 -4.29559261e-04]\n [-4.29559261e-04 1.45619669e-05]]\n" ], [ "# Семплируем параметры модели\nw = bayes_lr.sample_w(n_samples=10000)", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 2)\n\nax[0].hist(w[:, 0], bins=100)\nax[0].set_title('Распределение свободного члена')\n\nax[1].hist(w[:, 1], bins=100)\nax[1].set_title('Распределение коэффициента наклона')\n\nplt.show()", "_____no_output_____" ] ], [ [ "### 1.3 Предсказания", "_____no_output_____" ] ], [ [ "# Семплируем экспоненты для трейна\nsampled_train = np.exp(bayes_lr.sample(X_tr))", "_____no_output_____" ], [ "plot_sampled(sampled_train)\nplt.plot(np.exp(y_tr), color='red', label='Реальное число зараженных')\nplt.legend()\nplt.title('Предсказания для трейна');", "_____no_output_____" ], [ "# Посемплируем экспоненты для теста\nsampled_test = np.exp(bayes_lr.sample(X_te, n_samples=10000))\n\n# Делаем предсказания\npreds_full = np.exp(bayes_lr.predict(X_full))", "_____no_output_____" ], [ "plot_sampled(sampled_test)\nplt.plot(np.exp(y_te), color='red', label='Реальное число зараженных')\nplt.legend()\nplt.title('Предсказания для теста');\n\nprint(f'1 мая: {preds_full[till_may] / 1_000_000:.4f} млн зараженных')\nprint(f'1 июня: {preds_full[till_june] / 1_000_000:.4f} млн зараженных')\nprint(f'1 сентября: {preds_full[till_sept] / 1_000_000:.4f} млн зараженных')", "1 мая: 0.3274 млн зараженных\n1 июня: 99.7141 млн зараженных\n1 сентября: 2342098539.3834 млн зараженных\n" ] ], [ [ "Получается, что к 1 июня 2/3 России вымрет, не очень реалистично.", "_____no_output_____" ] ], [ [ "# Посемплируем экспоненты на будущее\nsampled_full = np.exp(bayes_lr.sample(X_full, n_samples=10000))", "_____no_output_____" ], [ "fig, ax = plt.subplots(2, 2, figsize=(16, 10))\n\nax[0][0].hist(sampled_full[till_may], bins=50)\nax[0][0].set_title('Предсказательное распределение количества зараженных к маю')\n\nax[0][1].hist(sampled_full[till_june], bins=50)\nax[0][1].set_title('Предсказательное распределение количества зараженных к июню')\n\nax[1][0].hist(sampled_full[till_sept], bins=50)\nax[1][0].set_title('Предсказательное распределение количества зараженных к сентябрю')\n\nax[1][1].hist(sampled_test.mean(0), bins=30)\nax[1][1].set_title('Распределение среднего числа зараженных для тестовой выборки')\n\nplt.show()", "_____no_output_____" ] ], [ [ "Вывод: моделирование экспонентой - это шляпа =)", "_____no_output_____" ], [ "## Часть 2: моделирование сигмоидой", "_____no_output_____" ], [ "### 2.1 Как такое обучать", "_____no_output_____" ], [ "Справа у нас интеграл - можем взять производную, а затем прологарифмировать, в итоге получим:\n\n$ln$($\\Delta$y) = w_2 * x^2 + w_1 * x + w_0 \n\nДругими словами, мы можем замоделировать количество новых случаев заражения с помощью плотности нормального распределения. В качестве функции в экспоненте возьмет квадратичную функцию от дня.", "_____no_output_____" ], [ "### 2.2 Обучаем", "_____no_output_____" ] ], [ [ "# Функция для приведения наших предсказаний приростов к общему числу зараженных\ndef to_total(preds):\n return 2 + np.cumsum(np.exp(preds), axis=0)", "_____no_output_____" ], [ "X_tr = np.hstack([X_tr, X_tr ** 2])\ny_tr = np.log(train['new_cases'].values)\n\nX_te = np.hstack([X_te, X_te ** 2])\ny_te = np.log(test['new_cases'].values)\n\nX_full = np.hstack([X_full, X_full ** 2])", "_____no_output_____" ], [ "# Выберем uninformative prior\nmu_prior = np.array([0, 0, 0])\nsigma_prior = 1000 * np.array([[1, 0, 0], \n [0, 1, 0],\n [0, 0, 1]])\n\nbayes_lr = BayesLR(mu_prior, sigma_prior)\nbayes_lr.fit(X_tr, y_tr)\n\nprint(bayes_lr.mu)\nprint(bayes_lr.sigma)", "[-0.7822012 0.28569776 -0.00200843]\n[[ 7.54223479e-02 -5.06981264e-03 7.10057749e-05]\n [-5.06981264e-03 4.63235833e-04 -7.34561947e-06]\n [ 7.10057749e-05 -7.34561947e-06 1.24503088e-07]]\n" ], [ "# Семплируем параметры модели\nw = bayes_lr.sample_w(n_samples=10000)", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 3)\n\nax[0].hist(w[:, 0], bins=100)\nax[0].set_title('Распределение свободного члена')\n\nax[1].hist(w[:, 1], bins=100)\nax[1].set_title('Распределение коэффициента при X')\n\nax[2].hist(w[:, 2], bins=100)\nax[2].set_title('Распределение коэффициента при X^2')\n\nplt.show()", "_____no_output_____" ] ], [ [ "### 2.3 Предсказываем", "_____no_output_____" ] ], [ [ "# Семплируем сигмоиды для трейна\nsampled_train = to_total(bayes_lr.sample(X_tr))", "_____no_output_____" ], [ "plot_sampled(sampled_train)\nplt.plot(to_total(y_tr), color='red', label='Реальное число зараженных')\nplt.legend()\nplt.title('Предсказания для трейна');", "_____no_output_____" ], [ "# Посемплируем сигмоиды для теста\nsampled_test = to_total(bayes_lr.sample(X_te))\n\n# Делаем предсказания\npreds_full = to_total(bayes_lr.predict(X_full))", "_____no_output_____" ], [ "plt.plot(preds_full)\nplt.plot(to_total(np.hstack([y_tr, y_te])), color='red', label='Реальное известное число зараженных')\nplt.legend()\nplt.title('Среднее наших предсказаний по числу зараженных до конца года');", "_____no_output_____" ], [ "plot_sampled(sampled_test)\nplt.plot(to_total(y_te), color='red', label='Реальное число зараженных')\nplt.legend()\nplt.title('Предсказания для теста');\n\nprint(f'1 мая: {preds_full[till_may] / 1_000_000:.4f} млн зараженных')\nprint(f'1 июня: {preds_full[till_june] / 1_000_000:.4f} млн зараженных')\nprint(f'1 сентября: {preds_full[till_sept] / 1_000_000:.4f} млн зараженных')", "1 мая: 0.1078 млн зараженных\n1 июня: 0.4163 млн зараженных\n1 сентября: 0.4676 млн зараженных\n" ], [ "# Посемплируем сигмоиды на будущее\nsampled_full = to_total(bayes_lr.sample(X_full, n_samples=100))", "_____no_output_____" ], [ "plot_sampled(sampled_full)\nplt.ylim(0, 1_000_000)\nplt.title('Предсказания до конца года');", "_____no_output_____" ], [ "# Посемплируем больше сигмоид на будущее\nsampled_full = to_total(bayes_lr.sample(X_full, n_samples=10000))", "_____no_output_____" ], [ "fig, ax = plt.subplots(3, 2, figsize=(16, 16))\n\nSHOW_THR = 3_000_000\n\nax[0][0].hist(sampled_full[till_may], bins=50)\nax[0][0].set_title('Предсказательное распределение количества зараженных к маю')\n\nax[0][1].hist(sampled_full[till_june][sampled_full[till_june] < SHOW_THR], bins=50)\nax[0][1].set_title('Предсказательное распределение количества зараженных к июню')\n\nax[1][0].hist(sampled_full[till_sept][sampled_full[till_sept] < SHOW_THR], bins=50)\nax[1][0].set_title('Предсказательное распределение количества зараженных к сентябрю')\n\nax[1][1].hist(sampled_full[-1][sampled_full[-1] < SHOW_THR], bins=50)\nax[1][1].set_title('Предсказательное распределение количества зараженных к концу года')\n\nax[2][0].hist(sampled_test.mean(0), bins=30)\nax[2][0].set_title('Распределение среднего числа зараженных для тестовой выборки')\n\nax[2][1].hist(sampled_full.mean(0)[sampled_full.mean(0) < SHOW_THR], bins=30)\nax[2][1].set_title('Распределение среднего числа зараженных до конца года')\n\nplt.show()", "_____no_output_____" ], [ "print(f'Оптимистичный прогноз к концу года: {int(np.quantile(sampled_full[-1], 0.1)) / 1_000_000:.4f} млн человек')\nprint(f'Пессимистичный прогноз к концу года: {int(np.quantile(sampled_full[-1], 0.9)) / 1_000_000:.4f} млн человек')", "Оптимистичный прогноз к концу года: 0.2409 млн человек\nПессимистичный прогноз к концу года: 1.3295 млн человек\n" ] ], [ [ "Если смотреть на пессимистичный прогноз, то он кажется уже чуть более реальным.", "_____no_output_____" ], [ "#### Что я понял", "_____no_output_____" ], [ "- Разобрался с байесовским выводом, понял (надеюсь), как обучать сигмоиды\n- Параметры априорных распределений не играют большой роли, когда имеется уже 50 точек\n- Моделировать экспонентой - шляпа, сигмоидой получше, хотя такие модели кажутся здесь все равно слишком неточными, и почти все зависит от выбора точки начала и конца моделирования\n- Решение вводить или не вводить карантин, наверное, можно принять, оценив результат от его введения (быстрее ли затухает сигмоида) в других странах", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
d06aa7cdc8183de517843326e02ed5ff14d328fb
3,918
ipynb
Jupyter Notebook
GPUtest.ipynb
bjarnidk/MRnet
58805b704a9882a54bd034770e15eb2a2b72e359
[ "MIT" ]
null
null
null
GPUtest.ipynb
bjarnidk/MRnet
58805b704a9882a54bd034770e15eb2a2b72e359
[ "MIT" ]
null
null
null
GPUtest.ipynb
bjarnidk/MRnet
58805b704a9882a54bd034770e15eb2a2b72e359
[ "MIT" ]
null
null
null
30.372093
518
0.543389
[ [ [ "torch.cuda.current_device()", "_____no_output_____" ], [ "# setting device on GPU if available, else CPU\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint('Using device:', device)\nprint()\n\n#Additional Info when using cuda\nif device.type == 'cuda':\n print(torch.cuda.get_device_name(0))\n print('Memory Usage:')\n print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')\n print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB')", "Using device: cuda\n\nGeForce RTX 2070 SUPER\nMemory Usage:\nAllocated: 0.0 GB\nCached: 0.0 GB\n" ], [ "import torch\ntorch.cuda.is_available()", "_____no_output_____" ], [ "# assuming that 'a' is a tensor created somewhere else\na.device # returns the device where the tensor is allocated", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
d06ab970cfabfd8a2ee4a802eacaa2c59a31eb35
24,660
ipynb
Jupyter Notebook
SVM/SVM_wesad_eda.ipynb
aiLocsRnD/classification
39d2cd8dc2fae176cfc657f706226e28ff78b45d
[ "MIT" ]
1
2021-04-11T09:44:51.000Z
2021-04-11T09:44:51.000Z
SVM/SVM_wesad_eda.ipynb
aiLocsRnD/classification
39d2cd8dc2fae176cfc657f706226e28ff78b45d
[ "MIT" ]
null
null
null
SVM/SVM_wesad_eda.ipynb
aiLocsRnD/classification
39d2cd8dc2fae176cfc657f706226e28ff78b45d
[ "MIT" ]
null
null
null
36.533333
489
0.423966
[ [ [ "SVM", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom sklearn import svm, metrics\nfrom sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "wesad_eda = pd.read_csv('D:\\data\\wesad-chest-combined-classification-eda.csv') # need to adjust a path of dataset", "_____no_output_____" ], [ "wesad_eda.columns", "_____no_output_____" ], [ "original_column_list = ['MEAN', 'MAX', 'MIN', 'RANGE', 'KURT', 'SKEW', 'MEAN_1ST_GRAD',\n 'STD_1ST_GRAD', 'MEAN_2ND_GRAD', 'STD_2ND_GRAD', 'ALSC', 'INSC', 'APSC',\n 'RMSC', 'subject id', 'MEAN_LOG', 'INSC_LOG', 'APSC_LOG', 'RMSC_LOG',\n 'RANGE_LOG', 'ALSC_LOG', 'MIN_LOG', 'MEAN_1ST_GRAD_LOG',\n 'MEAN_2ND_GRAD_LOG', 'MIN_LOG_LOG', 'MEAN_1ST_GRAD_LOG_LOG',\n 'MEAN_2ND_GRAD_LOG_LOG', 'APSC_LOG_LOG', 'ALSC_LOG_LOG', 'APSC_BOXCOX',\n 'RMSC_BOXCOX', 'RANGE_BOXCOX', 'MEAN_YEO_JONSON', 'SKEW_YEO_JONSON',\n 'KURT_YEO_JONSON', 'APSC_YEO_JONSON', 'MIN_YEO_JONSON',\n 'MAX_YEO_JONSON', 'MEAN_1ST_GRAD_YEO_JONSON', 'RMSC_YEO_JONSON',\n 'STD_1ST_GRAD_YEO_JONSON', 'RANGE_SQRT', 'RMSC_SQUARED',\n 'MEAN_2ND_GRAD_CUBE', 'INSC_APSC', 'condition', 'SSSQ class',\n 'SSSQ Label', 'condition label'] ", "_____no_output_____" ], [ "original_column_list_withoutString = ['MEAN', 'MAX', 'MIN', 'RANGE', 'KURT', 'SKEW', 'MEAN_1ST_GRAD',\n 'STD_1ST_GRAD', 'MEAN_2ND_GRAD', 'STD_2ND_GRAD', 'ALSC', 'INSC', 'APSC',\n 'RMSC', 'MEAN_LOG', 'INSC_LOG', 'APSC_LOG', 'RMSC_LOG',\n 'RANGE_LOG', 'ALSC_LOG', 'MIN_LOG', 'MEAN_1ST_GRAD_LOG',\n 'MEAN_2ND_GRAD_LOG', 'MIN_LOG_LOG', 'MEAN_1ST_GRAD_LOG_LOG',\n 'MEAN_2ND_GRAD_LOG_LOG', 'APSC_LOG_LOG', 'ALSC_LOG_LOG', 'APSC_BOXCOX',\n 'RMSC_BOXCOX', 'RANGE_BOXCOX', 'MEAN_YEO_JONSON', 'SKEW_YEO_JONSON',\n 'KURT_YEO_JONSON', 'APSC_YEO_JONSON', 'MIN_YEO_JONSON',\n 'MAX_YEO_JONSON', 'MEAN_1ST_GRAD_YEO_JONSON', 'RMSC_YEO_JONSON',\n 'STD_1ST_GRAD_YEO_JONSON', 'RANGE_SQRT', 'RMSC_SQUARED',\n 'MEAN_2ND_GRAD_CUBE', 'INSC_APSC']", "_____no_output_____" ], [ "selected_colum_list = ['MEAN', 'MAX', 'MIN', 'RANGE', 'KURT', 'SKEW', 'MEAN_1ST_GRAD',\n 'STD_1ST_GRAD', 'MEAN_2ND_GRAD', 'STD_2ND_GRAD', 'ALSC', 'INSC', 'APSC',\n 'RMSC', 'subject id', 'MEAN_LOG', 'INSC_LOG', 'APSC_LOG', 'RMSC_LOG',\n 'RANGE_LOG', 'ALSC_LOG', 'MIN_LOG'] ", "_____no_output_____" ], [ "stress_data = wesad_eda[original_column_list_withoutString]\nstress_label = wesad_eda['condition label']\n\nstress_data", "_____no_output_____" ], [ "train_data, test_data, train_label, test_label = train_test_split(stress_data, stress_label)", "_____no_output_____" ], [ "from sklearn.decomposition import PCA\npca = PCA(n_components=2)\npca.fit(train_data)\nX_t_train = pca.transform(train_data)\nX_t_test = pca.transform(test_data)", "_____no_output_____" ], [ "model = svm.SVC()\nmodel.fit(X_t_train, train_label)\npredict = model.predict(X_t_test)", "_____no_output_____" ], [ "acc_score = metrics.accuracy_score(test_label, predict)\nprint(acc_score)", "0.4856296598917373\n" ], [ "import pickle\nfrom sklearn.externals import joblib", "_____no_output_____" ], [ "saved_model = pickle.dumps(model)", "_____no_output_____" ], [ "joblib.dump(model, 'SVMmodel1.pkl') ", "_____no_output_____" ], [ "model_from_pickle = joblib.load('SVMmodel1.pkl')", "_____no_output_____" ], [ "predict = model_from_pickle.predict(test_data)\nacc_score = metrics.accuracy_score(test_label, predict)\nprint(acc_score)", "0.9998672250025533\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06ad0c277e22325a1ddc08a98148740162e0f42
55,047
ipynb
Jupyter Notebook
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
9eb5f2254c4abdf21363b319f2f62afd593a8492
[ "MIT" ]
2
2022-02-04T23:10:25.000Z
2022-03-21T18:17:42.000Z
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
9eb5f2254c4abdf21363b319f2f62afd593a8492
[ "MIT" ]
null
null
null
Chapter01/chapter1-time-series-analysis-overview.ipynb
PacktPublishing/Time-Series-Analysis-on-AWS
9eb5f2254c4abdf21363b319f2f62afd593a8492
[ "MIT" ]
2
2022-01-30T00:12:44.000Z
2022-02-08T09:49:56.000Z
33.101022
552
0.560685
[ [ [ "# Time series analysis on AWS\n*Chapter 1 - Time series analysis overview*", "_____no_output_____" ], [ "## Initializations\n---", "_____no_output_____" ] ], [ [ "!pip install --quiet tqdm kaggle tsia ruptures", "_____no_output_____" ] ], [ [ "### Imports", "_____no_output_____" ] ], [ [ "import matplotlib.colors as mpl_colors\nimport matplotlib.dates as mdates\nimport matplotlib.ticker as ticker\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport ruptures as rpt\nimport sys\nimport tsia\nimport warnings\nimport zipfile\n\nfrom matplotlib import gridspec\nfrom sklearn.preprocessing import normalize\nfrom tqdm import tqdm\nfrom urllib.request import urlretrieve", "_____no_output_____" ] ], [ [ "### Parameters", "_____no_output_____" ] ], [ [ "RAW_DATA = os.path.join('..', 'Data', 'raw')\nDATA = os.path.join('..', 'Data')\nwarnings.filterwarnings(\"ignore\")\nos.makedirs(RAW_DATA, exist_ok=True)\n\n%matplotlib inline\n# plt.style.use('Solarize_Light2')\nplt.style.use('fivethirtyeight')\nprop_cycle = plt.rcParams['axes.prop_cycle']\ncolors = prop_cycle.by_key()['color']\n\nplt.rcParams['figure.dpi'] = 300\nplt.rcParams['lines.linewidth'] = 0.3\nplt.rcParams['axes.titlesize'] = 6\nplt.rcParams['axes.labelsize'] = 6\nplt.rcParams['xtick.labelsize'] = 4.5\nplt.rcParams['ytick.labelsize'] = 4.5\nplt.rcParams['grid.linewidth'] = 0.2\nplt.rcParams['legend.fontsize'] = 5", "_____no_output_____" ] ], [ [ "### Helper functions", "_____no_output_____" ] ], [ [ "def progress_report_hook(count, block_size, total_size):\n mb = int(count * block_size // 1048576)\n if count % 500 == 0:\n sys.stdout.write(\"\\r{} MB downloaded\".format(mb))\n sys.stdout.flush()", "_____no_output_____" ] ], [ [ "### Downloading datasets", "_____no_output_____" ], [ "#### **Dataset 1:** Household energy consumption", "_____no_output_____" ] ], [ [ "ORIGINAL_DATA = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00321/LD2011_2014.txt.zip'\nARCHIVE_PATH = os.path.join(RAW_DATA, 'energy-consumption.zip')\nFILE_NAME = 'energy-consumption.csv'\nFILE_PATH = os.path.join(DATA, 'energy', FILE_NAME)\nFILE_DIR = os.path.dirname(FILE_PATH)\n\nif not os.path.isfile(FILE_PATH):\n print(\"Downloading dataset (258MB), can take a few minutes depending on your connection\")\n urlretrieve(ORIGINAL_DATA, ARCHIVE_PATH, reporthook=progress_report_hook)\n os.makedirs(os.path.join(DATA, 'energy'), exist_ok=True)\n\n print(\"\\nExtracting data archive\")\n zip_ref = zipfile.ZipFile(ARCHIVE_PATH, 'r')\n zip_ref.extractall(FILE_DIR + '/')\n zip_ref.close()\n \n !rm -Rf $FILE_DIR/__MACOSX\n !mv $FILE_DIR/LD2011_2014.txt $FILE_PATH\n \nelse:\n print(\"File found, skipping download\")", "_____no_output_____" ] ], [ [ "#### **Dataset 2:** Nasa Turbofan remaining useful lifetime", "_____no_output_____" ] ], [ [ "ok = True\nok = ok and os.path.exists(os.path.join(DATA, 'turbofan', 'train_FD001.txt'))\nok = ok and os.path.exists(os.path.join(DATA, 'turbofan', 'test_FD001.txt'))\nok = ok and os.path.exists(os.path.join(DATA, 'turbofan', 'RUL_FD001.txt'))\n\nif (ok):\n print(\"File found, skipping download\")\n\nelse:\n print('Some datasets are missing, create working directories and download original dataset from the NASA repository.')\n \n # Making sure the directory already exists:\n os.makedirs(os.path.join(DATA, 'turbofan'), exist_ok=True)\n\n # Download the dataset from the NASA repository, unzip it and set\n # aside the first training file to work on:\n !wget https://ti.arc.nasa.gov/c/6/ --output-document=$RAW_DATA/CMAPSSData.zip\n !unzip $RAW_DATA/CMAPSSData.zip -d $RAW_DATA\n !cp $RAW_DATA/train_FD001.txt $DATA/turbofan/train_FD001.txt\n !cp $RAW_DATA/test_FD001.txt $DATA/turbofan/test_FD001.txt\n !cp $RAW_DATA/RUL_FD001.txt $DATA/turbofan/RUL_FD001.txt", "_____no_output_____" ] ], [ [ "#### **Dataset 3:** Human heartbeat", "_____no_output_____" ] ], [ [ "ECG_DATA_SOURCE = 'http://www.timeseriesclassification.com/Downloads/ECG200.zip'\nARCHIVE_PATH = os.path.join(RAW_DATA, 'ECG200.zip')\nFILE_NAME = 'ecg.csv'\nFILE_PATH = os.path.join(DATA, 'ecg', FILE_NAME)\nFILE_DIR = os.path.dirname(FILE_PATH)\n\nif not os.path.isfile(FILE_PATH):\n urlretrieve(ECG_DATA_SOURCE, ARCHIVE_PATH)\n os.makedirs(os.path.join(DATA, 'ecg'), exist_ok=True)\n\n print(\"\\nExtracting data archive\")\n zip_ref = zipfile.ZipFile(ARCHIVE_PATH, 'r')\n zip_ref.extractall(FILE_DIR + '/')\n zip_ref.close()\n \n !mv $DATA/ecg/ECG200_TRAIN.txt $FILE_PATH\n \nelse:\n print(\"File found, skipping download\")", "_____no_output_____" ] ], [ [ "#### **Dataset 4:** Industrial pump data\nTo download this dataset from Kaggle, you will need to have an account and create a token that you install on your machine. You can follow [**this link**](https://www.kaggle.com/docs/api) to get started with the Kaggle API. Once generated, make sure your Kaggle token is stored in the `~/.kaggle/kaggle.json` file, or the next cells will issue an error. In some cases, you may still have an error while using this location. Try moving your token in this location instead: `~/kaggle/kaggle.json` (not the absence of the `.` in the folder name).\n\nTo get a Kaggle token, go to kaggle.com and create an account. Then navigate to **My account** and scroll down to the API section. There, click the **Create new API token** button:\n\n<img src=\"../Assets/kaggle_api.png\" />\n", "_____no_output_____" ] ], [ [ "FILE_NAME = 'pump-sensor-data.zip'\nARCHIVE_PATH = os.path.join(RAW_DATA, FILE_NAME)\nFILE_PATH = os.path.join(DATA, 'pump', 'sensor.csv')\nFILE_DIR = os.path.dirname(FILE_PATH)\n\nif not os.path.isfile(FILE_PATH):\n if not os.path.exists('/home/ec2-user/.kaggle/kaggle.json'):\n os.makedirs('/home/ec2-user/.kaggle/', exist_ok=True)\n raise Exception('The kaggle.json token was not found.\\nCreating the /home/ec2-user/.kaggle/ directory: put your kaggle.json file there once you have generated it from the Kaggle website')\n else:\n print('The kaggle.json token file was found: making sure it is not readable by other users on this system.')\n !chmod 600 /home/ec2-user/.kaggle/kaggle.json\n\n os.makedirs(os.path.join(DATA, 'pump'), exist_ok=True)\n !kaggle datasets download -d nphantawee/pump-sensor-data -p $RAW_DATA\n\n print(\"\\nExtracting data archive\")\n zip_ref = zipfile.ZipFile(ARCHIVE_PATH, 'r')\n zip_ref.extractall(FILE_DIR + '/')\n zip_ref.close()\n \nelse:\n print(\"File found, skipping download\")", "_____no_output_____" ] ], [ [ "#### **Dataset 5:** London household energy consumption with weather data", "_____no_output_____" ] ], [ [ "FILE_NAME = 'smart-meters-in-london.zip'\nARCHIVE_PATH = os.path.join(RAW_DATA, FILE_NAME)\nFILE_PATH = os.path.join(DATA, 'energy-london', 'smart-meters-in-london.zip')\nFILE_DIR = os.path.dirname(FILE_PATH)\n\n# Checks if the data were already downloaded:\nif os.path.exists(os.path.join(DATA, 'energy-london', 'acorn_details.csv')):\n print(\"File found, skipping download\")\n \nelse:\n # Downloading and unzipping datasets from Kaggle:\n print(\"Downloading dataset (2.26G), can take a few minutes depending on your connection\")\n os.makedirs(os.path.join(DATA, 'energy-london'), exist_ok=True)\n !kaggle datasets download -d jeanmidev/smart-meters-in-london -p $RAW_DATA\n \n print('Unzipping files...')\n zip_ref = zipfile.ZipFile(ARCHIVE_PATH, 'r')\n zip_ref.extractall(FILE_DIR + '/')\n zip_ref.close()\n \n !rm $DATA/energy-london/*zip\n !rm $DATA/energy-london/*gz\n !mv $DATA/energy-london/halfhourly_dataset/halfhourly_dataset/* $DATA/energy-london/halfhourly_dataset\n !rm -Rf $DATA/energy-london/halfhourly_dataset/halfhourly_dataset\n !mv $DATA/energy-london/daily_dataset/daily_dataset/* $DATA/energy-london/daily_dataset\n !rm -Rf $DATA/energy-london/daily_dataset/daily_dataset", "_____no_output_____" ] ], [ [ "## Dataset visualization\n---", "_____no_output_____" ], [ "### **1.** Household energy consumption", "_____no_output_____" ] ], [ [ "%%time\n\nFILE_PATH = os.path.join(DATA, 'energy', 'energy-consumption.csv')\nenergy_df = pd.read_csv(FILE_PATH, sep=';', decimal=',')\nenergy_df = energy_df.rename(columns={'Unnamed: 0': 'Timestamp'})\nenergy_df['Timestamp'] = pd.to_datetime(energy_df['Timestamp'])\nenergy_df = energy_df.set_index('Timestamp')\nenergy_df.iloc[100000:, 1:5].head()", "_____no_output_____" ], [ "fig = plt.figure(figsize=(5, 1.876))\nplt.plot(energy_df['MT_002'])\nplt.title('Energy consumption for household MT_002')\nplt.show()", "_____no_output_____" ] ], [ [ "### **2.** NASA Turbofan data", "_____no_output_____" ] ], [ [ "FILE_PATH = os.path.join(DATA, 'turbofan', 'train_FD001.txt')\nturbofan_df = pd.read_csv(FILE_PATH, header=None, sep=' ')\nturbofan_df.dropna(axis='columns', how='all', inplace=True)\nprint('Shape:', turbofan_df.shape)\nturbofan_df.head(5)", "_____no_output_____" ], [ "columns = [\n 'unit_number',\n 'cycle',\n 'setting_1',\n 'setting_2',\n 'setting_3',\n] + ['sensor_{}'.format(s) for s in range(1,22)]\nturbofan_df.columns = columns\nturbofan_df.head()", "_____no_output_____" ], [ "# Add a RUL column and group the data by unit_number:\nturbofan_df['rul'] = 0\ngrouped_data = turbofan_df.groupby(by='unit_number')\n\n# Loops through each unit number to get the lifecycle counts:\nfor unit, rul in enumerate(grouped_data.count()['cycle']):\n current_df = turbofan_df[turbofan_df['unit_number'] == (unit+1)].copy()\n current_df['rul'] = rul - current_df['cycle']\n turbofan_df[turbofan_df['unit_number'] == (unit+1)] = current_df", "_____no_output_____" ], [ "df = turbofan_df.iloc[:, [0,1,2,3,4,5,6,25,26]].copy()\ndf = df[df['unit_number'] == 1]\n\ndef highlight_cols(s):\n return f'background-color: rgba(0, 143, 213, 0.3)'\n\ndf.head(10).style.applymap(highlight_cols, subset=['rul'])", "_____no_output_____" ] ], [ [ "### **3.** ECG Data", "_____no_output_____" ] ], [ [ "FILE_PATH = os.path.join(DATA, 'ecg', 'ecg.csv')\necg_df = pd.read_csv(FILE_PATH, header=None, sep=' ')\nprint('Shape:', ecg_df.shape)\necg_df.head()", "_____no_output_____" ], [ "plt.rcParams['lines.linewidth'] = 0.7\nfig = plt.figure(figsize=(5,2))\nlabel_normal = False\nlabel_ischemia = False\nfor i in range(0,100):\n label = ecg_df.iloc[i, 0]\n if (label == -1):\n color = colors[1]\n \n if label_ischemia:\n plt.plot(ecg_df.iloc[i,1:96], color=color, alpha=0.5, linestyle='--', linewidth=0.5)\n else:\n plt.plot(ecg_df.iloc[i,1:96], color=color, alpha=0.5, label='Ischemia', linestyle='--')\n label_ischemia = True\n \n else:\n color = colors[0]\n \n if label_normal:\n plt.plot(ecg_df.iloc[i,1:96], color=color, alpha=0.5)\n else:\n plt.plot(ecg_df.iloc[i,1:96], color=color, alpha=0.5, label='Normal')\n label_normal = True\n \nplt.title('Human heartbeat activity')\nplt.legend(loc='upper right', ncol=2)\nplt.show()", "_____no_output_____" ] ], [ [ "### **4.** Industrial pump data", "_____no_output_____" ] ], [ [ "FILE_PATH = os.path.join(DATA, 'pump', 'sensor.csv')\npump_df = pd.read_csv(FILE_PATH, sep=',')\npump_df.drop(columns={'Unnamed: 0'}, inplace=True)\npump_df['timestamp'] = pd.to_datetime(pump_df['timestamp'], format='%Y-%m-%d %H:%M:%S')\npump_df = pump_df.set_index('timestamp')\n\npump_df['machine_status'].replace(to_replace='NORMAL', value=np.nan, inplace=True)\npump_df['machine_status'].replace(to_replace='BROKEN', value=1, inplace=True)\npump_df['machine_status'].replace(to_replace='RECOVERING', value=1, inplace=True)\n\nprint('Shape:', pump_df.shape)\npump_df.head()", "_____no_output_____" ], [ "file_structure_df = pump_df.iloc[:, 0:10].resample('5D').mean()", "_____no_output_____" ], [ "plt.rcParams['hatch.linewidth'] = 0.5\nplt.rcParams['lines.linewidth'] = 0.5\n\nfig = plt.figure(figsize=(5,1))\nax1 = fig.add_subplot(1,1,1)\nplot1 = ax1.plot(pump_df['sensor_00'], label='Healthy pump')\n\nax2 = ax1.twinx()\nplot2 = ax2.fill_between(\n x=pump_df.index, \n y1=0.0, \n y2=pump_df['machine_status'], \n color=colors[1], \n linewidth=0.0,\n edgecolor='#000000',\n alpha=0.5, \n hatch=\"//////\", \n label='Broken pump'\n)\nax2.grid(False)\nax2.set_yticks([])\n\nlabels = [plot1[0].get_label(), plot2.get_label()]\n\nplt.legend(handles=[plot1[0], plot2], labels=labels, loc='lower center', ncol=2, bbox_to_anchor=(0.5, -.4))\nplt.title('Industrial pump sensor data')\nplt.show()", "_____no_output_____" ] ], [ [ "### **5.** London household energy consumption with weather data", "_____no_output_____" ], [ "We want to filter out households that are are subject to the dToU tariff and keep only the ones with a known ACORN (i.e. not in the ACORN-U group): this will allow us to better model future analysis by adding the Acorn detail informations (which by definitions, won't be available for the ACORN-U group).", "_____no_output_____" ] ], [ [ "household_filename = os.path.join(DATA, 'energy-london', 'informations_households.csv')\nhousehold_df = pd.read_csv(household_filename)\nhousehold_df = household_df[(household_df['stdorToU'] == 'Std') & (household_df['Acorn'] == 'ACORN-E')]\nprint(household_df.shape)\nhousehold_df.head()", "_____no_output_____" ] ], [ [ "#### Associating households with they energy consumption data\nEach household (with an ID starting by `MACxxxxx` in the table above) has its consumption data stored in a block file name `block_xx`. This file is also available from the `informations_household.csv` file extracted above. We have the association between `household_id` and `block_file`: we can open each of them and keep the consumption for the households of interest. All these data will be concatenated into an `energy_df` dataframe:", "_____no_output_____" ] ], [ [ "%%time\n\nhousehold_ids = household_df['LCLid'].tolist()\nconsumption_file = os.path.join(DATA, 'energy-london', 'hourly_consumption.csv')\nmin_data_points = ((pd.to_datetime('2020-12-31') - pd.to_datetime('2020-01-01')).days + 1)*24*2\n\nif os.path.exists(consumption_file):\n print('Half-hourly consumption file already exists, loading from disk...')\n energy_df = pd.read_csv(consumption_file)\n energy_df['timestamp'] = pd.to_datetime(energy_df['timestamp'], format='%Y-%m-%d %H:%M:%S.%f')\n print('Done.')\n \nelse:\n print('Half-hourly consumption file not found. We need to generate it.')\n \n # We know have the block number we can use to open the right file:\n energy_df = pd.DataFrame()\n target_block_files = household_df['file'].unique().tolist()\n print('- {} block files to process: '.format(len(target_block_files)), end='')\n df_list = []\n for block_file in tqdm(target_block_files):\n # Reads the current block file:\n current_filename = os.path.join(DATA, 'energy-london', 'halfhourly_dataset', '{}.csv'.format(block_file))\n df = pd.read_csv(current_filename)\n \n # Set readable column names and adjust data types:\n df.columns = ['household_id', 'timestamp', 'energy']\n df = df.replace(to_replace='Null', value=0.0)\n df['energy'] = df['energy'].astype(np.float64)\n df['timestamp'] = pd.to_datetime(df['timestamp'], format='%Y-%m-%d %H:%M:%S.%f')\n \n # We filter on the households sampled earlier:\n df_list.append(df[df['household_id'].isin(household_ids)].reset_index(drop=True))\n \n # Concatenate with the main dataframe:\n energy_df = pd.concat(df_list, axis='index', ignore_index=True)\n \n datapoints = energy_df.groupby(by='household_id').count()\n datapoints = datapoints[datapoints['timestamp'] < min_data_points]\n hhid_to_remove = datapoints.index.tolist()\n energy_df = energy_df[~energy_df['household_id'].isin(hhid_to_remove)]\n\n # Let's save this dataset to disk, we will use it from now on:\n print('Saving file to disk... ', end='')\n energy_df.to_csv(consumption_file, index=False)\n print('Done.')", "_____no_output_____" ], [ "start = np.min(energy_df['timestamp'])\nend = np.max(energy_df['timestamp'])\nweather_filename = os.path.join(DATA, 'energy-london', 'weather_hourly_darksky.csv')\n\nweather_df = pd.read_csv(weather_filename)\nweather_df['time'] = pd.to_datetime(weather_df['time'], format='%Y-%m-%d %H:%M:%S')\nweather_df = weather_df.drop(columns=['precipType', 'icon', 'summary'])\nweather_df = weather_df.sort_values(by='time')\nweather_df = weather_df.set_index('time')\nweather_df = weather_df[start:end]\n\n# Let's make sure we have one datapoint per hour to match \n# the frequency used for the household energy consumption data:\nweather_df = weather_df.resample(rule='1H').mean() # This will generate NaN values timestamp missing data\nweather_df = weather_df.interpolate(method='linear') # This will fill the missing values with the average \n\nprint(weather_df.shape)\nweather_df", "_____no_output_____" ], [ "energy_df = energy_df.set_index(['household_id', 'timestamp'])\nenergy_df", "_____no_output_____" ], [ "hhid = household_ids[2]\nhh_energy = energy_df.loc[hhid, :]\nstart = '2012-07-01'\nend = '2012-07-15'\n\nfig = plt.figure(figsize=(5,1))\nax1 = fig.add_subplot(1,1,1)\nplot2 = ax1.fill_between(\n x=weather_df.loc[start:end, 'temperature'].index, \n y1=0.0, \n y2=weather_df.loc[start:end, 'temperature'], \n color=colors[1], \n linewidth=0.0,\n edgecolor='#000000',\n alpha=0.25, \n hatch=\"//////\", \n label='Temperature'\n)\nax1.set_ylim((0,40))\nax1.grid(False)\n\nax2 = ax1.twinx()\nax2.plot(hh_energy[start:end], label='Energy consumption', linewidth=2, color='#FFFFFF', alpha=0.5)\nplot1 = ax2.plot(hh_energy[start:end], label='Energy consumption', linewidth=0.7)\nax2.set_title(f'Energy consumption for household {hhid}')\n\nlabels = [plot1[0].get_label(), plot2.get_label()]\nplt.legend(handles=[plot1[0], plot2], labels=labels, loc='upper left', fontsize=3, ncol=2)\n\nplt.show()", "_____no_output_____" ], [ "acorn_filename = os.path.join(DATA, 'energy-london', 'acorn_details.csv')\nacorn_df = pd.read_csv(acorn_filename, encoding='ISO-8859-1')\nacorn_df = acorn_df.sample(10).loc[:, ['MAIN CATEGORIES', 'CATEGORIES', 'REFERENCE', 'ACORN-A', 'ACORN-B', 'ACORN-E']]\nacorn_df", "_____no_output_____" ] ], [ [ "## File structure exploration\n---", "_____no_output_____" ] ], [ [ "from IPython.display import display_html\n\ndef display_multiple_dataframe(*args, max_rows=None, max_cols=None):\n html_str = ''\n for df in args:\n html_str += df.to_html(max_cols=max_cols, max_rows=max_rows)\n \n display_html(html_str.replace('table','table style=\"display:inline\"'), raw=True)", "_____no_output_____" ], [ "display_multiple_dataframe(\n file_structure_df[['sensor_00']],\n file_structure_df[['sensor_01']],\n file_structure_df[['sensor_03']],\n max_rows=10, max_cols=None\n)", "_____no_output_____" ], [ "display_multiple_dataframe(\n file_structure_df.loc['2018-04', :].head(6),\n file_structure_df.loc['2018-05', :].head(6),\n file_structure_df.loc['2018-06', :].head(6),\n max_rows=None, max_cols=2\n)", "_____no_output_____" ], [ "display_multiple_dataframe(\n file_structure_df.loc['2018-04', ['sensor_00']].head(6),\n file_structure_df.loc['2018-05', ['sensor_00']].head(6),\n file_structure_df.loc['2018-06', ['sensor_00']].head(6),\n max_rows=10, max_cols=None\n)\ndisplay_multiple_dataframe(\n file_structure_df.loc['2018-04', ['sensor_01']].head(6),\n file_structure_df.loc['2018-05', ['sensor_01']].head(6),\n file_structure_df.loc['2018-06', ['sensor_01']].head(6),\n max_rows=10, max_cols=None\n)\nprint('.\\n.\\n.')\ndisplay_multiple_dataframe(\n file_structure_df.loc['2018-04', ['sensor_09']].head(6),\n file_structure_df.loc['2018-05', ['sensor_09']].head(6),\n file_structure_df.loc['2018-06', ['sensor_09']].head(6),\n max_rows=10, max_cols=None\n)", "_____no_output_____" ], [ "df1 = pump_df.iloc[:, [0]].resample('5D').mean()\ndf2 = pump_df.iloc[:, [1]].resample('2D').mean()\ndf3 = pump_df.iloc[:, [2]].resample('7D').mean()\n\ndisplay_multiple_dataframe(\n df1.head(10), df2.head(10), df3.head(10),\n pd.merge(pd.merge(df1, df2, left_index=True, right_index=True, how='outer'), df3, left_index=True, right_index=True, how='outer').head(10),\n max_rows=None, max_cols=None\n)", "_____no_output_____" ], [ "pd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', 10)\npd.merge(pd.merge(df1, df2, left_index=True, right_index=True, how='outer'), df3, left_index=True, right_index=True, how='outer').head(10)", "_____no_output_____" ], [ "plt.figure(figsize=(5,1))\nfor i in range(len(colors)):\n plt.plot(file_structure_df[f'sensor_0{i}'], linewidth=2, alpha=0.5, label=colors[i])\n\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "## Visualization\n---", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(5,1))\nax1 = fig.add_subplot(1,1,1)\nax2 = ax1.twinx()\n\nplot_sensor_0 = ax1.plot(pump_df['sensor_00'], label='Sensor 0', color=colors[0], linewidth=1, alpha=0.8)\nplot_sensor_1 = ax2.plot(pump_df['sensor_01'], label='Sensor 1', color=colors[1], linewidth=1, alpha=0.8)\nax2.grid(False)\nplt.title('Pump sensor values (2 sensors)')\nplt.legend(handles=[plot_sensor_0[0], plot_sensor_1[0]], ncol=2, loc='lower right')\nplt.show()", "_____no_output_____" ], [ "reduced_pump_df = pump_df.loc[:, 'sensor_00':'sensor_14']\nreduced_pump_df = reduced_pump_df.replace([np.inf, -np.inf], np.nan)\nreduced_pump_df = reduced_pump_df.fillna(0.0)\nreduced_pump_df = reduced_pump_df.astype(np.float32)\nscaled_pump_df = pd.DataFrame(normalize(reduced_pump_df), index=reduced_pump_df.index, columns=reduced_pump_df.columns)\nscaled_pump_df", "_____no_output_____" ], [ "fig = plt.figure(figsize=(5,1))\n\nfor i in range(0,15):\n plt.plot(scaled_pump_df.iloc[:, i], alpha=0.6)\n\nplt.title('Pump sensor values (15 sensors)')\nplt.show()", "_____no_output_____" ], [ "pump_df2 = pump_df.copy()\n\npump_df2 = pump_df2.replace([np.inf, -np.inf], np.nan)\npump_df2 = pump_df2.fillna(0.0)\npump_df2 = pump_df2.astype(np.float32)\n\npump_description = pump_df2.describe().T\nconstant_signals = pump_description[pump_description['min'] == pump_description['max']].index.tolist()\npump_df2 = pump_df2.drop(columns=constant_signals)\n\nfeatures = pump_df2.columns.tolist()", "_____no_output_____" ], [ "def hex_to_rgb(hex_color):\n \"\"\"\n Converts a color string in hexadecimal format to RGB format.\n \n PARAMS\n ======\n hex_color: string\n A string describing the color to convert from hexadecimal. It can\n include the leading # character or not\n \n RETURNS\n =======\n rgb_color: tuple\n Each color component of the returned tuple will be a float value\n between 0.0 and 1.0\n \"\"\"\n hex_color = hex_color.lstrip('#')\n rgb_color = tuple(int(hex_color[i:i+2], base=16) / 255.0 for i in [0, 2, 4])\n return rgb_color\n\ndef plot_timeseries_strip_chart(binned_timeseries, signal_list, fig_width=12, signal_height=0.15, dates=None, day_interval=7):\n # Build a suitable colormap:\n colors_list = [\n hex_to_rgb('#DC322F'), \n hex_to_rgb('#B58900'), \n hex_to_rgb('#2AA198')\n ]\n cm = mpl_colors.LinearSegmentedColormap.from_list('RdAmGr', colors_list, N=len(colors_list))\n \n fig = plt.figure(figsize=(fig_width, signal_height * binned_timeseries.shape[0]))\n ax = fig.add_subplot(1,1,1)\n \n # Devising the extent of the actual plot:\n if dates is not None:\n dnum = mdates.date2num(dates)\n start = dnum[0] - (dnum[1]-dnum[0])/2.\n stop = dnum[-1] + (dnum[1]-dnum[0])/2.\n extent = [start, stop, 0, signal_height * (binned_timeseries.shape[0])]\n \n else:\n extent = None\n \n # Plot the matrix:\n im = ax.imshow(binned_timeseries, \n extent=extent, \n aspect=\"auto\", \n cmap=cm, \n origin='lower')\n \n # Adjusting the x-axis if we provide dates:\n if dates is not None:\n ax.xaxis.set_major_locator(mdates.MonthLocator())\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(4)\n tick.label.set_rotation(60)\n tick.label.set_fontweight('bold')\n\n ax.tick_params(axis='x', which='major', pad=7, labelcolor='#000000')\n plt.xticks(ha='right')\n \n # Adjusting the y-axis:\n ax.yaxis.set_major_locator(ticker.MultipleLocator(signal_height))\n ax.set_yticklabels(signal_list, verticalalignment='bottom', fontsize=4)\n ax.set_yticks(np.arange(len(signal_list)) * signal_height)\n\n plt.grid()\n return ax", "_____no_output_____" ], [ "from IPython.display import display, Markdown, Latex\n\n# Build a list of dataframes, one per sensor:\ndf_list = []\nfor f in features[:1]:\n df_list.append(pump_df2[[f]])\n\n# Discretize each signal in 3 bins:\narray = tsia.markov.discretize_multivariate(df_list)\n\nfig = plt.figure(figsize=(5.5, 0.6))\nplt.plot(pump_df2['sensor_00'], linewidth=0.7, alpha=0.6)\nplt.title('Line plot of the pump sensor 0')\nplt.show()\n\ndisplay(Markdown('<img src=\"arrow.png\" align=\"left\" style=\"padding-left: 730px\"/>'))\n\n\n# Plot the strip chart:\nax = plot_timeseries_strip_chart(\n array, \n signal_list=features[:1],\n fig_width=5.21,\n signal_height=0.2,\n dates=df_list[0].index.to_pydatetime(),\n day_interval=2\n)\nax.set_title('Strip chart of the pump sensor 0');", "_____no_output_____" ], [ "# Build a list of dataframes, one per sensor:\ndf_list = []\nfor f in features:\n df_list.append(pump_df2[[f]])\n\n# Discretize each signal in 3 bins:\narray = tsia.markov.discretize_multivariate(df_list)\n\n# Plot the strip chart:\nfig = plot_timeseries_strip_chart(\n array, \n signal_list=features,\n fig_width=5.5,\n signal_height=0.1,\n dates=df_list[0].index.to_pydatetime(),\n day_interval=2\n)", "_____no_output_____" ] ], [ [ "### Recurrence plot", "_____no_output_____" ] ], [ [ "from pyts.image import RecurrencePlot\nfrom pyts.image import GramianAngularField\nfrom pyts.image import MarkovTransitionField", "_____no_output_____" ], [ "hhid = household_ids[2]\nhh_energy = energy_df.loc[hhid, :]\npump_extract_df = pump_df.iloc[:800, 0].copy()\n\nrp = RecurrencePlot(threshold='point', percentage=30)\nweather_rp = rp.fit_transform(weather_df.loc['2013-01-01':'2013-01-31']['temperature'].values.reshape(1, -1))\nenergy_rp = rp.fit_transform(hh_energy['2012-07-01':'2012-07-15'].values.reshape(1, -1))\npump_rp = rp.fit_transform(pump_extract_df.values.reshape(1, -1))\n\n\nfig = plt.figure(figsize=(5.5, 2.4))\ngs = gridspec.GridSpec(nrows=3, ncols=2, width_ratios=[3,1], hspace=0.8, wspace=0.0)\n\n# Pump sensor 0:\nax = fig.add_subplot(gs[0])\nax.plot(pump_extract_df, label='Pump sensor 0')\nax.set_title(f'Pump sensor 0')\n\nax = fig.add_subplot(gs[1])\nax.imshow(pump_rp[0], cmap='binary', origin='lower')\nax.axis('off')\n\n# Energy consumption line plot and recurrence plot:\nax = fig.add_subplot(gs[2])\nplot1 = ax.plot(hh_energy['2012-07-01':'2012-07-15'], color=colors[1])\nax.set_title(f'Energy consumption for household {hhid}')\n\nax = fig.add_subplot(gs[3])\nax.imshow(energy_rp[0], cmap='binary', origin='lower')\nax.axis('off')\n\n# Daily temperature line plot and recurrence plot:\nax = fig.add_subplot(gs[4])\nstart = '2012-07-01'\nend = '2012-07-15'\nax.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color=colors[2])\nax.set_title(f'Daily temperature')\n\nax = fig.add_subplot(gs[5])\nax.imshow(weather_rp[0], cmap='binary', origin='lower')\nax.axis('off')\n\nplt.show()", "_____no_output_____" ], [ "hhid = household_ids[2]\nhh_energy = energy_df.loc[hhid, :]\npump_extract_df = pump_df.iloc[:800, 0].copy()\n\ngaf = GramianAngularField(image_size=48, method='summation')\nweather_gasf = gaf.fit_transform(weather_df.loc['2013-01-01':'2013-01-31']['temperature'].values.reshape(1, -1))\nenergy_gasf = gaf.fit_transform(hh_energy['2012-07-01':'2012-07-15'].values.reshape(1, -1))\npump_gasf = gaf.fit_transform(pump_extract_df.values.reshape(1, -1))\n\nfig = plt.figure(figsize=(5.5, 2.4))\ngs = gridspec.GridSpec(nrows=3, ncols=2, width_ratios=[3,1], hspace=0.8, wspace=0.0)\n\n# Pump sensor 0:\nax = fig.add_subplot(gs[0])\nax.plot(pump_extract_df, label='Pump sensor 0')\nax.set_title(f'Pump sensor 0')\n\nax = fig.add_subplot(gs[1])\nax.imshow(pump_gasf[0], cmap='RdBu_r', origin='lower')\nax.axis('off')\n\n# Energy consumption line plot and recurrence plot:\nax = fig.add_subplot(gs[2])\nplot1 = ax.plot(hh_energy['2012-07-01':'2012-07-15'], color=colors[1])\nax.set_title(f'Energy consumption for household {hhid}')\n\nax = fig.add_subplot(gs[3])\nax.imshow(energy_gasf[0], cmap='RdBu_r', origin='lower')\nax.axis('off')\n\n# Daily temperature line plot and recurrence plot:\nax = fig.add_subplot(gs[4])\nstart = '2012-07-01'\nend = '2012-07-15'\nax.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color=colors[2])\nax.set_title(f'Daily temperature')\n\nax = fig.add_subplot(gs[5])\nax.imshow(weather_gasf[0], cmap='RdBu_r', origin='lower')\nax.axis('off')\n\nplt.show()", "_____no_output_____" ], [ "mtf = MarkovTransitionField(image_size=48)\n\nweather_mtf = mtf.fit_transform(weather_df.loc['2013-01-01':'2013-01-31']['temperature'].values.reshape(1, -1))\nenergy_mtf = mtf.fit_transform(hh_energy['2012-07-01':'2012-07-15'].values.reshape(1, -1))\npump_mtf = mtf.fit_transform(pump_extract_df.values.reshape(1, -1))\n\nfig = plt.figure(figsize=(5.5, 2.4))\ngs = gridspec.GridSpec(nrows=3, ncols=2, width_ratios=[3,1], hspace=0.8, wspace=0.0)\n\n# Pump sensor 0:\nax = fig.add_subplot(gs[0])\nax.plot(pump_extract_df, label='Pump sensor 0')\nax.set_title(f'Pump sensor 0')\n\nax = fig.add_subplot(gs[1])\nax.imshow(pump_mtf[0], cmap='RdBu_r', origin='lower')\nax.axis('off')\n\n# Energy consumption line plot and recurrence plot:\nax = fig.add_subplot(gs[2])\nplot1 = ax.plot(hh_energy['2012-07-01':'2012-07-15'], color=colors[1])\nax.set_title(f'Energy consumption for household {hhid}')\n\nax = fig.add_subplot(gs[3])\nax.imshow(energy_mtf[0], cmap='RdBu_r', origin='lower')\nax.axis('off')\n\n# Daily temperature line plot and recurrence plot:\nax = fig.add_subplot(gs[4])\nstart = '2012-07-01'\nend = '2012-07-15'\nax.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color=colors[2])\nax.set_title(f'Daily temperature')\n\nax = fig.add_subplot(gs[5])\nax.imshow(weather_mtf[0], cmap='RdBu_r', origin='lower')\nax.axis('off')\n\nplt.show()", "_____no_output_____" ], [ "import matplotlib\nimport matplotlib.cm as cm\nimport networkx as nx\nimport community\n\ndef compute_network_graph(markov_field):\n G = nx.from_numpy_matrix(markov_field[0])\n\n # Uncover the communities in the current graph:\n communities = community.best_partition(G)\n nb_communities = len(pd.Series(communities).unique())\n cmap = 'autumn'\n\n # Compute node colors and edges colors for the modularity encoding:\n edge_colors = [matplotlib.colors.to_hex(cm.get_cmap(cmap)(communities.get(v)/(nb_communities - 1))) for u,v in G.edges()]\n node_colors = [communities.get(node) for node in G.nodes()]\n node_size = [nx.average_clustering(G, [node])*90 for node in G.nodes()]\n\n # Builds the options set to draw the network graph in the \"modularity\" configuration:\n options = {\n 'node_size': 10,\n 'edge_color': edge_colors,\n 'node_color': node_colors,\n 'linewidths': 0,\n 'width': 0.1,\n 'alpha': 0.6,\n 'with_labels': False,\n 'cmap': cmap\n }\n \n return G, options", "_____no_output_____" ], [ "fig = plt.figure(figsize=(5.5, 2.4))\ngs = gridspec.GridSpec(nrows=3, ncols=2, width_ratios=[3,1], hspace=0.8, wspace=0.0)\n\n# Pump sensor 0:\nax = fig.add_subplot(gs[0])\nax.plot(pump_extract_df, label='Pump sensor 0')\nax.set_title(f'Pump sensor 0')\n\nax = fig.add_subplot(gs[1])\nG, options = compute_network_graph(weather_mtf)\nnx.draw_networkx(G, **options, pos=nx.spring_layout(G), ax=ax)\nax.axis('off')\n\n# Energy consumption line plot and recurrence plot:\nax = fig.add_subplot(gs[2])\nplot1 = ax.plot(hh_energy['2012-07-01':'2012-07-15'], color=colors[1])\nax.set_title(f'Energy consumption for household {hhid}')\n\nax = fig.add_subplot(gs[3])\nG, options = compute_network_graph(energy_mtf)\nnx.draw_networkx(G, **options, pos=nx.spring_layout(G), ax=ax)\nax.axis('off')\n\n# Daily temperature line plot and recurrence plot:\nax = fig.add_subplot(gs[4])\nstart = '2012-07-01'\nend = '2012-07-15'\nax.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color=colors[2])\nax.set_title(f'Daily temperature')\n\nax = fig.add_subplot(gs[5])\nG, options = compute_network_graph(weather_mtf)\nnx.draw_networkx(G, **options, pos=nx.spring_layout(G), ax=ax)\nax.axis('off')\n\nplt.show()", "_____no_output_____" ] ], [ [ "## Symbolic representation\n---", "_____no_output_____" ] ], [ [ "from pyts.bag_of_words import BagOfWords\n\nwindow_size, word_size = 30, 5\nbow = BagOfWords(window_size=window_size, word_size=word_size, window_step=window_size, numerosity_reduction=False)\nX = weather_df.loc['2013-01-01':'2013-01-31']['temperature'].values.reshape(1, -1)\nX_bow = bow.transform(X)\ntime_index = weather_df.loc['2013-01-01':'2013-01-31']['temperature'].index\nlen(X_bow[0].replace(' ', ''))", "_____no_output_____" ], [ "# Plot the considered subseries\nplt.figure(figsize=(5, 2))\nsplits_series = np.linspace(0, X.shape[1], 1 + X.shape[1] // window_size, dtype='int64')\nfor start, end in zip(splits_series[:-1], np.clip(splits_series[1:] + 1, 0, X.shape[1])):\n plt.plot(np.arange(start, end), X[0, start:end], 'o-', linewidth=0.5, ms=0.1)\n\n# Plot the corresponding letters\nsplits_letters = np.linspace(0, X.shape[1], 1 + word_size * X.shape[1] // window_size)\nsplits_letters = ((splits_letters[:-1] + splits_letters[1:]) / 2)\nsplits_letters = splits_letters.astype('int64')\n\nfor i, (x, text) in enumerate(zip(splits_letters, X_bow[0].replace(' ', ''))):\n t = plt.text(x, X[0, x], text, color=\"C{}\".format(i // 5), fontsize=3.5)\n t.set_bbox(dict(facecolor='#FFFFFF', alpha=0.5, edgecolor=\"C{}\".format(i // 5), boxstyle='round4'))\n\nplt.title('Bag-of-words representation for weather temperature')\nplt.tight_layout()\nplt.show()", "_____no_output_____" ], [ "from pyts.transformation import WEASEL\nfrom sklearn.preprocessing import LabelEncoder", "_____no_output_____" ], [ "X_train = ecg_df.iloc[:, 1:].values\ny_train = ecg_df.iloc[:, 0]\ny_train = LabelEncoder().fit_transform(y_train)\nweasel = WEASEL(word_size=3, n_bins=3, window_sizes=[10, 25], sparse=False)\nX_weasel = weasel.fit_transform(X_train, y_train)\nvocabulary_length = len(weasel.vocabulary_)", "_____no_output_____" ], [ "plt.figure(figsize=(5,1.5))\nwidth = 0.4\nx = np.arange(vocabulary_length) - width / 2\nfor i in range(len(X_weasel[y_train == 0])):\n if i == 0:\n plt.bar(x, X_weasel[y_train == 0][i], width=width, alpha=0.25, color=colors[1], label='Time series for Ischemia')\n else:\n plt.bar(x, X_weasel[y_train == 0][i], width=width, alpha=0.25, color=colors[1])\n \nfor i in range(len(X_weasel[y_train == 1])):\n if i == 0:\n plt.bar(x+width, X_weasel[y_train == 1][i], width=width, alpha=0.25, color=colors[0], label='Time series for Normal heartbeat')\n else:\n plt.bar(x+width, X_weasel[y_train == 1][i], width=width, alpha=0.25, color=colors[0])\n \nplt.xticks(\n np.arange(vocabulary_length),\n np.vectorize(weasel.vocabulary_.get)(np.arange(X_weasel[0].size)),\n fontsize=2,\n rotation=60\n)\n \nplt.legend(loc='upper right')\nplt.show()", "_____no_output_____" ] ], [ [ "## Statistics\n---", "_____no_output_____" ] ], [ [ "plt.rcParams['xtick.labelsize'] = 3\n\nimport statsmodels.api as sm\n\nfig = plt.figure(figsize=(5.5, 3))\ngs = gridspec.GridSpec(nrows=3, ncols=2, width_ratios=[1,1], hspace=0.8)\n\n# Pump\nax = fig.add_subplot(gs[0])\nax.plot(pump_extract_df, label='Pump sensor 0')\nax.set_title(f'Pump sensor 0')\nax.tick_params(axis='x', which='both', labelbottom=False)\n\nax = fig.add_subplot(gs[1])\nsm.graphics.tsa.plot_acf(pump_extract_df.values.squeeze(), ax=ax, markersize=1, title='')\nax.set_ylim(-1.2, 1.2)\nax.tick_params(axis='x', which='major', labelsize=4)\n\n# Energy consumption\nax = fig.add_subplot(gs[2])\nax.plot(hh_energy['2012-07-01':'2012-07-15'], color=colors[1])\nax.set_title(f'Energy consumption for household {hhid}')\nax.tick_params(axis='x', which='both', labelbottom=False)\n\nax = fig.add_subplot(gs[3])\nsm.graphics.tsa.plot_acf(hh_energy['2012-07-01':'2012-07-15'].values.squeeze(), ax=ax, markersize=1, title='')\nax.set_ylim(-0.3, 0.3)\nax.tick_params(axis='x', which='major', labelsize=4)\n\n# Daily temperature:\nax = fig.add_subplot(gs[4])\nstart = '2012-07-01'\nend = '2012-07-15'\nax.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color=colors[2])\nax.set_title(f'Daily temperature')\nax.tick_params(axis='x', which='both', labelbottom=False)\n\nax = fig.add_subplot(gs[5])\nsm.graphics.tsa.plot_acf(weather_df.loc['2013-01-01':'2013-01-31']['temperature'].values.squeeze(), ax=ax, markersize=1, title='')\nax.set_ylim(-1.2, 1.2)\nax.tick_params(axis='x', which='major', labelsize=4)\n\nplt.show()", "_____no_output_____" ], [ "from statsmodels.tsa.seasonal import STL\n\nendog = endog.resample('30T').mean()", "_____no_output_____" ], [ "plt.rcParams['lines.markersize'] = 1\n\ntitle = f'Energy consumption for household {hhid}'\nendog = hh_energy['2012-07-01':'2012-07-15']\nendog.columns = [title]\nendog = endog[title]\nstl = STL(endog, period=48)\nres = stl.fit()\nfig = res.plot()\n\nfig = plt.gcf()\nfig.set_size_inches(5.5, 4)\n\nplt.show()", "_____no_output_____" ] ], [ [ "## Binary segmentation\n---", "_____no_output_____" ] ], [ [ "signal = weather_df.loc['2013-01-01':'2013-01-31']['temperature'].values.squeeze()\nalgo = rpt.Binseg(model='l2').fit(signal)\nmy_bkps = algo.predict(n_bkps=3)", "_____no_output_____" ], [ "my_bkps = [0] + my_bkps\nmy_bkps", "_____no_output_____" ], [ "fig = plt.figure(figsize=(5.5,1))\nstart = '2012-07-01'\nend = '2012-07-15'\nplt.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color='#FFFFFF', linewidth=1.2, alpha=0.8)\nplt.plot(weather_df.loc['2013-01-01':'2013-01-31']['temperature'], color=colors[2], linewidth=0.7)\n\nplt.title(f'Daily temperature')\nplt.xticks(rotation=60, fontsize=4)\n\nweather_index = weather_df.loc['2013-01-01':'2013-01-31']['temperature'].index\n\nfor index, bkps in enumerate(my_bkps[:-1]):\n x1 = weather_index[my_bkps[index]]\n x2 = weather_index[np.clip(my_bkps[index+1], 0, len(weather_index)-1)]\n \n plt.axvspan(x1, x2, color=colors[index % 5], alpha=0.2)\n\nplt.title('Daily temperature segmentation')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d06ad4cfd1c5325791db1942fb01262d6356f604
3,054
ipynb
Jupyter Notebook
iJulia/ConstantTPH2Combustion.ipynb
hwpang/ReactionMechanismSimulator.jl
1fba41e212860607a1340300b798b41c05aa7049
[ "MIT" ]
null
null
null
iJulia/ConstantTPH2Combustion.ipynb
hwpang/ReactionMechanismSimulator.jl
1fba41e212860607a1340300b798b41c05aa7049
[ "MIT" ]
null
null
null
iJulia/ConstantTPH2Combustion.ipynb
hwpang/ReactionMechanismSimulator.jl
1fba41e212860607a1340300b798b41c05aa7049
[ "MIT" ]
null
null
null
22.291971
164
0.555665
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d06ae3d05e2e6563e9e32fefb6d69a56f37989e4
312,963
ipynb
Jupyter Notebook
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
003b098e9ea895de9baf5a8e9d7dda17a6b4ce8c
[ "Apache-2.0" ]
null
null
null
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
003b098e9ea895de9baf5a8e9d7dda17a6b4ce8c
[ "Apache-2.0" ]
null
null
null
climatology_analysis_notebooks/mohid_viz.ipynb
MIDOSS/analysis-ashutosh
003b098e9ea895de9baf5a8e9d7dda17a6b4ce8c
[ "Apache-2.0" ]
null
null
null
333.29393
124,824
0.928202
[ [ [ "# MOHID visualisation tools", "_____no_output_____" ] ], [ [ "from IPython.display import HTML\n\nHTML('''<script>\ncode_show=true; \nfunction code_toggle() {\n if (code_show){\n $('div.input').hide();\n } else {\n $('div.input').show();\n }\n code_show = !code_show\n} \n$( document ).ready(code_toggle);\n</script>\n<form action=\"javascript:code_toggle()\"><input type=\"submit\" value=\"Click here to toggle on/off the raw code.\"></form>''')", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport xarray as xr\nimport numpy as np\nimport cmocean\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## How to Parse time into datetime64 string format", "_____no_output_____" ] ], [ [ "from datetime import datetime, timedelta\nfrom dateutil.parser import parse", "_____no_output_____" ], [ "def to_datetime64(time):\n \"\"\"Convert string to string in datetime64[s] format\n :arg time: string\n :return datetime64: str in datetime64[s] format\n \"\"\"\n time = parse(time) # parse to datetime format\n # now just take care of formatting\n year, month, day, hour, minute, second = str(time.year), str(time.month), str(time.day), str(time.hour), str(time.minute), str(time.second)\n if len(month) < 2:\n month = '0' + month\n if len(day) < 2:\n day = '0' + day\n if len(hour) < 2:\n hour = '0' + hour\n if len(minute) < 2:\n minute = '0' + minute\n if len(second) < 2:\n second = '0' + second\n datetime64 = '{}-{}-{}T{}:{}:{}'.format(year, month, day, hour, minute, second)\n return datetime64", "_____no_output_____" ] ], [ [ "### Usage:", "_____no_output_____" ] ], [ [ "to_datetime64('1 Jan 2016')", "_____no_output_____" ] ], [ [ "<h2>Generate heat maps of vertical velocities</h2>", "_____no_output_____" ], [ "<h3>Getting depth slices</h3>", "_____no_output_____" ] ], [ [ "# load a profile\nsog2015 = xr.open_dataset('Vertical_velocity_profiles/sog2015.nc')", "_____no_output_____" ], [ "sog2015", "_____no_output_____" ], [ "# slice by layer index\nsog2015.vovecrtz.isel(depthw = slice(0,11))", "_____no_output_____" ], [ "# slice explicitly by layer depth\n\n# print depth with corresponding index\nfor i in zip(range(40), sog2015.depthw.values):\n print(i)", "(0, 0.0)\n(1, 1.0000012)\n(2, 2.0000064)\n(3, 3.0000193)\n(4, 4.0000467)\n(5, 5.000104)\n(6, 6.000217)\n(7, 7.0004406)\n(8, 8.000879)\n(9, 9.001736)\n(10, 10.003407)\n(11, 11.006662)\n(12, 12.013008)\n(13, 13.025366)\n(14, 14.049429)\n(15, 15.096255)\n(16, 16.187304)\n(17, 17.364035)\n(18, 18.705973)\n(19, 20.363474)\n(20, 22.613064)\n(21, 25.937412)\n(22, 31.101034)\n(23, 39.11886)\n(24, 50.963238)\n(25, 67.05207)\n(26, 86.96747)\n(27, 109.73707)\n(28, 134.34593)\n(29, 160.02956)\n(30, 186.30528)\n(31, 212.89656)\n(32, 239.65305)\n(33, 266.4952)\n(34, 293.3816)\n(35, 320.29077)\n(36, 347.2116)\n(37, 374.1385)\n(38, 401.06845)\n(39, 428.0)\n" ], [ "sog2015.vovecrtz.sel(depthw = slice(0.0, 10.003407))", "_____no_output_____" ] ], [ [ "### Getting time slices using parsing", "_____no_output_____" ] ], [ [ "# this is where to_datetime64 comes in handy\n# getting the first week in january\nsog2015.sel(time_counter = slice(to_datetime64('1 jan 2015'), to_datetime64('7 jan 2015')))", "_____no_output_____" ] ], [ [ "### Slicing by time and depth at the same time", "_____no_output_____" ] ], [ [ "slice_example = sog2015.vovecrtz.sel(time_counter = slice(to_datetime64('1 jan 2015'), to_datetime64('7 jan 2015'))).isel(depthw = slice(0,11))", "_____no_output_____" ], [ "slice_example", "_____no_output_____" ] ], [ [ "### Plotting the slice", "_____no_output_____" ] ], [ [ "slice_example.T.plot(cmap = 'RdBu') # transposed to have depth on y axis. cmap specified as RdBu.\nplt.gca().invert_yaxis()", "_____no_output_____" ] ], [ [ "<h3>Extracting the data you just visualised</h3>", "_____no_output_____" ] ], [ [ "a_slice.data()", "_____no_output_____" ] ], [ [ "## Plotting the trend of the depth of maximum vertical change", "_____no_output_____" ] ], [ [ "def find_bottom(array):\n \"\"\"Find the bottom depth layer index\n :arg array: one dimesional array (profile at giventime stamp)\n :returns bottom: int, 1 + index of sea floor layer\n \"\"\"\n i=-1\n for value in np.flip(array):\n if value != 0:\n bottom = 39-i\n return bottom\n else:\n i=i+1", "_____no_output_____" ], [ "def max_delta(depths, truncated_array):\n \"\"\"return raw plot data for depth of maximum delta\n \"\"\"\n # time is axis 0, depth is axis 1\n difference = np.abs(np.diff(truncated_array, axis=1))\n data = (depths[np.argmax(difference, axis=1)])\n return data, difference", "_____no_output_____" ], [ "depths = sog2015.depthw.values\narray = sog2015.vovecrtz.sel(time_counter = slice(convert_timestamp('1 Jan 2015'), convert_timestamp('7 Jan 2015')))\nbottom_index = find_bottom(array[0].values)", "_____no_output_____" ], [ "truncated_array = array.isel(depthw = slice(0,bottom_index)).values\ntimes = array.time_counter.values", "_____no_output_____" ], [ "delta, difference = max_delta(depths,truncated_array)", "_____no_output_____" ], [ "fig = plt.figure(figsize=(10,5))\nplt.plot(times, delta)\nplt.xlim(times[0], times[-1])\nplt.ylim(depths[0], depths[-1])\nplt.hlines(depths[bottom_index-1], times[0], times[-1], label = 'sea floor')\nplt.hlines(depths[0:bottom_index], times[0], times[-1], linewidth = 0.25, label='layer depths')\nplt.gca().invert_yaxis()\nplt.ylabel('layer depth (m)')\nplt.title('Timeseries of depth of maximum chnage in vertical velocity')\nplt.legend()", "_____no_output_____" ] ], [ [ "## Salinity profiles with shaded range region", "_____no_output_____" ] ], [ [ "import seaborn as sns", "_____no_output_____" ], [ "palette = sns.color_palette(\"Reds\", n_colors = 14)", "_____no_output_____" ], [ "sal_sog2015 = xr.open_dataset('salinity_profiles/salinity_sog2015.nc')", "_____no_output_____" ], [ "A = sal_sog2015.sel(time_counter = slice(to_datetime64('1 Jan 2015'),to_datetime64('8 Jan 2015')))", "_____no_output_____" ], [ "fig = plt.figure(figsize = (10,10))\nax = plt.subplot(111)\ndepths = A.deptht.values.T\n#bottom = find_bottom(A.isel(time_counter= 0).vosaline.values)\nbottom = 11\ntry:\n for i in range(14):\n plt.plot(A.vosaline.isel(time_counter = 12*i).values[0: bottom],depths[0: bottom], label = A.time_counter.values[12*i], color = palette[i])\nexcept IndexError:\n pass\n# find the fill_between values\nlow, high = np.min(A.vosaline.values,axis = 0)[0: bottom], np.max(A.vosaline.values, axis=0)[0:bottom]\nmean = np.average(A.vosaline.values,axis = 0)[0: bottom]\nstddev = np.std(A.vosaline.values,axis = 0)[0: bottom]\nplt.plot(mean,depths[0: bottom], 'k--',label = 'Average Salinity')\nplt.fill_betweenx(depths[0:bottom],low, high, facecolor = 'lightgray', label = 'Range')\nplt.fill_betweenx(depths[0:bottom], mean-stddev, mean+stddev,facecolor = 'deepskyblue', label = '1 Std. Dev')\nax.set_ylim(depths[0], depths[bottom-1])\nplt.gca().invert_yaxis()\nplt.legend(loc='lower left')\nplt.ylabel('Ocean Depth [m]')\nplt.xlabel('Salinity [g kg-1]')\nplt.title('Salinity profiles over a week, showing profile every 12th hour')", "_____no_output_____" ] ], [ [ "<h2>Heat maps of Salinity</h2>", "_____no_output_____" ] ], [ [ "salinity_slice = sal_sog2015.sel(time_counter=slice(to_datetime64('1 Jan 2015'), to_datetime64('7 jan 2015')))", "_____no_output_____" ], [ "salinity_slice.vosaline.T.plot(cmap = cmocean.cm.haline)\nplt.gca().invert_yaxis()", "_____no_output_____" ] ], [ [ "## Difference between surface and botttom salinity", "_____no_output_____" ] ], [ [ "salinity_slice = sal_sog2015.sel(time_counter=slice(to_datetime64('1 Jan 2015'), to_datetime64('7 jan 2015')))", "_____no_output_____" ], [ "bottom = find_bottom(sal_sog2015.vosaline.isel(time_counter=0).values)", "_____no_output_____" ], [ "# plot the difference between the surface and bottom salinity\ndiff = salinity_slice.isel(deptht = 0) - salinity_slice.isel(deptht = bottom-1)\ndiff.vosaline.plot()\nplt.title('(Surface Salinity - Bottom Salinity) [g.cm-3]')\nplt.ylabel('(Surface - Bottom Salinity) [g kg-1]')", "_____no_output_____" ], [ "depths = sal_sog2015.deptht.values\narray = sal_sog2015.vosaline.sel(time_counter = slice(convert_timestamp('1 Jan 2015'), convert_timestamp('7 Jan 2015')))\nbottom_index = find_bottom(array[0].values)\ntruncated_array = array.isel(deptht = slice(0,bottom_index)).values\ntimes = array.time_counter.values\ndelta, difference = max_delta(depths,truncated_array)\nfig = plt.figure(figsize=(10,5))\nplt.plot(times, delta)\nplt.xlim(times[0], times[-1])\nplt.ylim(depths[0], depths[-1])\nplt.hlines(depths[bottom_index-1], times[0], times[-1], label = 'sea floor')\nplt.hlines(depths[0:bottom_index], times[0], times[-1], linewidth = 0.25, label='layer depths')\nplt.gca().invert_yaxis()\nplt.ylabel('layer depth (m)')\nplt.title('Timeseries of Halocline depth')\nplt.legend()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d06ae453dfc7137ebd69ec6afa10dc809f2c40b7
191,526
ipynb
Jupyter Notebook
data output/data until 04-08-2017/clean/merge sensor data + occupancy csv.ipynb
georgetown-analytics/crash-severity
990d736a43a3aba06ac41d92d7310307e9d9dc09
[ "MIT" ]
7
2017-03-28T19:53:57.000Z
2018-08-16T13:18:15.000Z
data output/data until 04-08-2017/clean/merge sensor data + occupancy csv.ipynb
georgetown-analytics/crash-severity
990d736a43a3aba06ac41d92d7310307e9d9dc09
[ "MIT" ]
null
null
null
data output/data until 04-08-2017/clean/merge sensor data + occupancy csv.ipynb
georgetown-analytics/crash-severity
990d736a43a3aba06ac41d92d7310307e9d9dc09
[ "MIT" ]
9
2017-03-30T17:43:52.000Z
2021-11-13T03:32:14.000Z
38.482218
143
0.338847
[ [ [ "import pandas as pd\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n%matplotlib inline", "_____no_output_____" ], [ "ls", "door_data.csv\r\nmerge sensor data + occupancy csv.ipynb\r\noccupancy_data.csv\r\nsensor_data.csv\r\n" ], [ "# Load occupance data as a dataframe with the 'datetime' column as its index\npeeps = 'occupancy_data.csv'\ndf = pd.read_csv(peeps, index_col='datetime', parse_dates=True)\n# add new datetime column \"dt\" which is a copy of datetime\ndf['dt'] = df.index", "_____no_output_____" ], [ "print(df.head())", " location count_operation count_change \\\ndatetime \n2017-03-18 08:59:09.769 Georgetown + 1 \n2017-03-18 08:59:13.994 Georgetown + 1 \n2017-03-18 08:59:15.326 Georgetown + 1 \n2017-03-18 08:59:15.977 Georgetown + 1 \n2017-03-18 08:59:17.561 Georgetown + 1 \n\n count_total dt \ndatetime \n2017-03-18 08:59:09.769 1 2017-03-18 08:59:09.769 \n2017-03-18 08:59:13.994 2 2017-03-18 08:59:13.994 \n2017-03-18 08:59:15.326 3 2017-03-18 08:59:15.326 \n2017-03-18 08:59:15.977 4 2017-03-18 08:59:15.977 \n2017-03-18 08:59:17.561 5 2017-03-18 08:59:17.561 \n" ], [ "df.describe()", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "# create a new column truncated to minute precision\ndf['dtm'] = df['dt'].values.astype('<M8[m]')\ndf", "_____no_output_____" ], [ "# show only datetime truncated by minute and count_total \ndf1 = df[['dtm','count_total']]\ndf1", "_____no_output_____" ], [ "# drop duplicates, keeping the row with the highest value\ndf1 = df1.groupby('dtm', group_keys=False).apply(lambda x: x.ix[x.count_total.idxmax()])\ndf1", "_____no_output_____" ], [ "# drop dtm column\ndel df1['dtm']\n# save df1 to csv\ndf1.to_csv('count_total.csv', sep=',')\ndf1", "_____no_output_____" ], [ "#Load sensor data as a dataframe with the 'datetime' column as its index\nsensor = 'sensor_data.csv'\nsensor_data = pd.read_csv(sensor, index_col='datetime', parse_dates=True)\nsensor_data['dt'] = sensor_data.index", "_____no_output_____" ], [ "print(sensor_data.head())", " location temperature humidity co2 light \\\ndatetime \n2017-03-25 09:05:58 Georgetown-default 22.6 36.9 781.0 430.0 \n2017-03-25 09:06:04 Georgetown-default 23.8 39.0 767.0 448.0 \n2017-03-25 09:06:10 Georgetown-default 23.8 39.0 754.0 423.0 \n2017-03-25 09:06:15 Georgetown-default 23.8 39.0 768.0 412.0 \n2017-03-25 09:06:21 Georgetown-default 23.8 39.0 758.0 428.0 \n\n noise bluetooth_devices bluetooth_non_personal_devices \\\ndatetime \n2017-03-25 09:05:58 511.0 1 NaN \n2017-03-25 09:06:04 510.0 8 NaN \n2017-03-25 09:06:10 511.0 8 NaN \n2017-03-25 09:06:15 492.0 8 NaN \n2017-03-25 09:06:21 491.0 9 NaN \n\n dt \ndatetime \n2017-03-25 09:05:58 2017-03-25 09:05:58 \n2017-03-25 09:06:04 2017-03-25 09:06:04 \n2017-03-25 09:06:10 2017-03-25 09:06:10 \n2017-03-25 09:06:15 2017-03-25 09:06:15 \n2017-03-25 09:06:21 2017-03-25 09:06:21 \n" ], [ "sensor_data.describe()", "_____no_output_____" ], [ "# create a new column truncated to minute precision\nsensor_data['dtm'] = sensor_data['dt'].values.astype('<M8[m]')", "_____no_output_____" ], [ "# show only datetime truncated by minute and count_total of people\nsensor_data1 = sensor_data[['dtm','temperature','humidity','co2','light','noise','bluetooth_devices','bluetooth_non_personal_devices']]\n", "_____no_output_____" ], [ "# drop duplicates, keeping the row with the highest value of temp\nsensor_data1 = sensor_data1.groupby('dtm', group_keys=False).apply(lambda x: x.ix[x.temperature.idxmax()])\nsensor_data1", "_____no_output_____" ], [ "# drop dtm column\ndel sensor_data1['dtm']\n# save sensor_data1 to csv\nsensor_data1.to_csv('sensor_data1.csv', sep=',')", "_____no_output_____" ], [ "ls", "count_total.csv\r\ndoor_data.csv\r\nmerge sensor data + occupancy csv.ipynb\r\noccupancy_data.csv\r\nsensor_data.csv\r\nsensor_data1.csv\r\nsensor_data1_count_total.csv\r\n" ], [ "#merge newly saved csv files\ncsv1 = pd.read_csv('sensor_data1.csv')\ncsv2 = pd.read_csv('count_total.csv')\nmerged = csv1.merge(csv2, on=\"dtm\", how=\"outer\").fillna(method='ffill')\nmerged.to_csv(\"sensor_data1_count_total.csv\", index=False)", "_____no_output_____" ], [ "# Import merged data as a dataframe with the 'datetime' column as its index\nmerged_data = 'sensor_data1_count_total.csv'\nmerged = pd.read_csv(merged_data, index_col='dtm', parse_dates=True)", "_____no_output_____" ], [ "merged", "_____no_output_____" ], [ "merged['count_total'].isnull().sum()", "_____no_output_____" ], [ "# save merged to csv\nmerged.to_csv('merged.csv', sep=',')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06afa4d4e9094e922bd79bab035a412ab5d6183
255,870
ipynb
Jupyter Notebook
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
127311c533635280be8ee98194c6a608532d06c6
[ "Apache-2.0" ]
18
2018-09-17T19:04:42.000Z
2021-08-08T03:59:56.000Z
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
127311c533635280be8ee98194c6a608532d06c6
[ "Apache-2.0" ]
null
null
null
lecture_3/lab_3_solutions.ipynb
jakirkham/JaneliaMLCourse
127311c533635280be8ee98194c6a608532d06c6
[ "Apache-2.0" ]
16
2018-09-17T17:12:13.000Z
2020-08-29T15:14:32.000Z
307.536058
52,332
0.922242
[ [ [ "## ML Lab 3\n### Neural Networks\n\nIn the following exercise class we explore how to design and train neural networks in various ways.\n\n#### Prerequisites:\n\nIn order to follow the exercises you need to:\n1. Activate your conda environment from last week via: `source activate <env-name>` \n2. Install tensorflow (https://www.tensorflow.org) via: `pip install tensorflow` (CPU-only)\n3. Install keras (provides high level wrapper for tensorflow) (https://keras.io) via: `pip install keras`", "_____no_output_____" ], [ "## Exercise 1: Create a 2 layer network that acts as an XOR gate using numpy.\n\nXOR is a fundamental logic gate that outputs a one whenever there is an odd parity of ones in its input and zero otherwise. For two inputs this can be thought of as an exclusive or operation and the associated boolean function is fully characterized by the following truth table.\n\n| X | Y | XOR(X,Y) |\n|---|---|----------|\n| 0 | 0 | 0 |\n| 0 | 1 | 1 |\n| 1 | 0 | 1 |\n| 1 | 1 | 0 |\n\nThe function of an XOR gate can also be understood as a classification problem on $v \\in \\{0,1\\}^2$ and we can think about designing a classifier acting as an XOR gate. It turns out that this problem is not solvable by any single layer perceptron (https://en.wikipedia.org/wiki/Perceptron) because the set of points $\\{(0,0), (0,1), (1,0), (1,1)\\}$ is not linearly seperable.\n\n**Design a two layer perceptron using basic numpy matrix operations that implements an XOR Gate on two inputs. Think about the flow of information and accordingly set the weight values by hand.**", "_____no_output_____" ], [ "### Data", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndef generate_xor_data():\n X = [(i,j) for i in [0,1] for j in [0,1]]\n y = [int(np.logical_xor(x[0], x[1])) for x in X]\n return X, y\n \nprint(generate_xor_data())", "([(0, 0), (0, 1), (1, 0), (1, 1)], [0, 1, 1, 0])\n" ] ], [ [ "### Hints\nA single layer in a multilayer perceptron can be described by the equation $y = f(\\vec{b} + W\\vec{x})$ with $f$ the logistic function, a smooth and differentiable version of the step function, and defined as $f(z) = \\frac{1}{1+e^{-z}}$. $\\vec{b}$ is the so called bias, a constant offset vector and $W$ is the weight matrix. However, since we set the weights by hand feel free to use hard thresholding instead of using the logistic function. Write down the equation for a two layer MLP and implement it with numpy. For documentation see https://docs.scipy.org/doc/numpy-1.13.0/reference/ ", "_____no_output_____" ] ], [ [ "\"\"\"\nImplement your solution here.\n\"\"\"", "_____no_output_____" ] ], [ [ "### Solution", "_____no_output_____" ], [ "| X | Y | AND(NOT X, Y) | AND(X,NOT Y) | OR[AND(NOT X, Y), AND(X, NOT Y)]| XOR(X,Y) |\n|---|---|---------------|--------------|---------------------------------|----------|\n| 0 | 0 | 0 | 0 | 0 | 0 |\n| 0 | 1 | 1 | 0 | 1 | 1 |\n| 1 | 0 | 0 | 1 | 1 | 1 |\n| 1 | 1 | 0 | 0 | 0 | 0 |\n\nImplement XOR as a combination of 2 AND Gates and 1 OR gate where each neuron in the network acts as one of these gates.", "_____no_output_____" ] ], [ [ "\"\"\"\nDefinitions:\n\nInput = np.array([X,Y])\n\n0 if value < 0.5\n1 if value >= 0.5\n\"\"\"\n\ndef threshold(vector):\n return (vector>=0.5).astype(float)\n\ndef mlp(x, W0, W1, b0, b1, f):\n x0 = f(np.dot(W0, x) + b0)\n x1 = f(np.dot(W1, x0) + b1)\n return x1\n\n# AND(NOT X, Y)\nw_andnotxy = np.array([-1.0, 1.0])\n# AND(X, NOT Y)\nw_andxnoty = np.array([1.0, -1.0])\n# W0 weight matrix:\nW0 = np.vstack([w_andnotxy, w_andxnoty])\n\n# OR(X,Y)\nw_or = np.array([1., 1.])\nW1 = w_or\n\n# No biases needed\nb0 = np.array([0.0,0.0])\nb1 = 0.0\n\nprint(\"Input\", \"Output\", \"XOR\")\nxx,yy = generate_xor_data()\nfor x,y in zip(xx, yy):\n print(x, int(mlp(x, W0, W1, b0, b1, threshold)),\" \", y)", "Input Output XOR\n(0, 0) 0 0\n(0, 1) 1 1\n(1, 0) 1 1\n(1, 1) 0 0\n" ] ], [ [ "## Exercise 2: Use Keras to design, train and evaluate a neural network that can classify points on a 2D plane.", "_____no_output_____" ], [ "### Data generator", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef generate_spiral_data(n_points, noise=1.0):\n n = np.sqrt(np.random.rand(n_points,1)) * 780 * (2*np.pi)/360\n d1x = -np.cos(n)*n + np.random.rand(n_points,1) * noise\n d1y = np.sin(n)*n + np.random.rand(n_points,1) * noise\n return (np.vstack((np.hstack((d1x,d1y)),np.hstack((-d1x,-d1y)))), \n np.hstack((np.zeros(n_points),np.ones(n_points))))", "_____no_output_____" ] ], [ [ "### Training data", "_____no_output_____" ] ], [ [ "X_train, y_train = generate_spiral_data(1000)\n\nplt.title('Training set')\nplt.plot(X_train[y_train==0,0], X_train[y_train==0,1], '.', label='Class 1')\nplt.plot(X_train[y_train==1,0], X_train[y_train==1,1], '.', label='Class 2')\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "### Test data", "_____no_output_____" ] ], [ [ "X_test, y_test = generate_spiral_data(1000)\n\nplt.title('Test set')\nplt.plot(X_test[y_test==0,0], X_test[y_test==0,1], '.', label='Class 1')\nplt.plot(X_test[y_test==1,0], X_test[y_test==1,1], '.', label='Class 2')\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "### 2.1. Design and train your model\nThe current model performs badly, try to find a more advanced architecture that is able to solve the classification problem. Read the following code snippet and understand the involved functions. Vary width and depth of the network and play around with activation functions, loss functions and optimizers to achieve a better result. Read up on parameters and functions for sequential models at https://keras.io/getting-started/sequential-model-guide/.", "_____no_output_____" ] ], [ [ "from keras.models import Sequential\nfrom keras.layers import Dense\n\n\"\"\"\nReplace the following model with yours and try to achieve better classification performance\n\"\"\"\nbad_model = Sequential()\nbad_model.add(Dense(12, input_dim=2, activation='tanh'))\nbad_model.add(Dense(1, activation='sigmoid'))\n\nbad_model.compile(loss='mean_squared_error',\n optimizer='SGD', # SGD = Stochastic Gradient Descent\n metrics=['accuracy'])\n\n# Train the model\nbad_model.fit(X_train, y_train, epochs=150, batch_size=10, verbose=0)", "_____no_output_____" ] ], [ [ "### Predict", "_____no_output_____" ] ], [ [ "bad_prediction = np.round(bad_model.predict(X_test).T[0])", "_____no_output_____" ] ], [ [ "### Visualize", "_____no_output_____" ] ], [ [ "plt.subplot(1,2,1)\n\nplt.title('Test set')\nplt.plot(X_test[y_test==0,0], X_test[y_test==0,1], '.')\nplt.plot(X_test[y_test==1,0], X_test[y_test==1,1], '.')\nplt.subplot(1,2,2)\n\nplt.title('Bad model classification')\nplt.plot(X_test[bad_prediction==0,0], X_test[bad_prediction==0,1], '.')\nplt.plot(X_test[bad_prediction==1,0], X_test[bad_prediction==1,1], '.')\nplt.show()", "_____no_output_____" ] ], [ [ "### 2.2. Visualize the decision boundary of your model.", "_____no_output_____" ] ], [ [ "\"\"\"\nImplement your solution here.\n\"\"\"", "_____no_output_____" ] ], [ [ "## Solution", "_____no_output_____" ], [ "### Model design and training", "_____no_output_____" ] ], [ [ "from keras.layers import Dense, Dropout\n\ngood_model = Sequential()\ngood_model.add(Dense(64, input_dim=2, activation='relu'))\ngood_model.add(Dense(64, activation='relu'))\ngood_model.add(Dense(64, activation='relu'))\ngood_model.add(Dense(1, activation='sigmoid'))\n\ngood_model.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\ngood_model.fit(X_train, y_train, epochs=150, batch_size=10, verbose=0)", "_____no_output_____" ] ], [ [ "### Prediction", "_____no_output_____" ] ], [ [ "good_prediction = np.round(good_model.predict(X_test).T[0])", "_____no_output_____" ] ], [ [ "### Visualization", "_____no_output_____" ], [ "#### Performance", "_____no_output_____" ] ], [ [ "plt.subplot(1,2,1)\nplt.title('Test set')\nplt.plot(X_test[y_test==0,0], X_test[y_test==0,1], '.')\nplt.plot(X_test[y_test==1,0], X_test[y_test==1,1], '.')\nplt.subplot(1,2,2)\nplt.title('Good model classification')\nplt.plot(X_test[good_prediction==0,0], X_test[good_prediction==0,1], '.')\nplt.plot(X_test[good_prediction==1,0], X_test[good_prediction==1,1], '.')\nplt.show()", "_____no_output_____" ] ], [ [ "#### Decision boundary", "_____no_output_____" ] ], [ [ "# Generate grid:\nline = np.linspace(-15,15)\nxx, yy = np.meshgrid(line,line)\ngrid = np.stack((xx,yy))\n\n# Reshape to fit model input size:\ngrid = grid.T.reshape(-1,2)\n\n# Predict:\ngood_prediction = good_model.predict(grid)\nbad_prediction = bad_model.predict(grid)\n\n# Reshape to grid for visualization:\nplt.title(\"Good Decision Boundary\")\ngood_prediction = good_prediction.T[0].reshape(len(line),len(line))\nplt.contourf(xx,yy,good_prediction)\nplt.show()\n\nplt.title(\"Bad Decision Boundary\")\nbad_prediction = bad_prediction.T[0].reshape(len(line),len(line))\nplt.contourf(xx,yy,bad_prediction)\nplt.show()\n", "_____no_output_____" ] ], [ [ "## Design, train and test a neural network that is able to classify MNIST digits using Keras.", "_____no_output_____" ], [ "### Data", "_____no_output_____" ] ], [ [ "from keras.datasets import mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n\"\"\"\nReturns:\n2 tuples:\n\nx_train, x_test: uint8 array of grayscale image data with shape (num_samples, 28, 28).\ny_train, y_test: uint8 array of digit labels (integers in range 0-9) with shape (num_samples,).\n\"\"\"\n\n# Show example data\nplt.subplot(1,4,1)\nplt.imshow(x_train[0], cmap=plt.get_cmap('gray'))\nplt.subplot(1,4,2)\nplt.imshow(x_train[1], cmap=plt.get_cmap('gray'))\nplt.subplot(1,4,3)\nplt.imshow(x_train[2], cmap=plt.get_cmap('gray'))\nplt.subplot(1,4,4)\nplt.imshow(x_train[3], cmap=plt.get_cmap('gray'))\nplt.show()", "_____no_output_____" ], [ "\"\"\"\nImplement your solution here.\n\"\"\"", "_____no_output_____" ] ], [ [ "### Solution", "_____no_output_____" ] ], [ [ "from keras.utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D\n\n\"\"\"\nWe need to add a channel dimension\nto the image input.\n\"\"\"\nx_train = x_train.reshape(x_train.shape[0],\n x_train.shape[1],\n x_train.shape[2],\n 1)\nx_test = x_test.reshape(x_test.shape[0],\n x_test.shape[1],\n x_test.shape[2],\n 1)\n\"\"\"\nTrain the image using 32-bit floats normalized\nbetween 0 and 1 for numerical stability.\n\"\"\"\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\ninput_shape = (x_train.shape[1], x_train.shape[2], 1)\n\n\"\"\"\nOutput should be a 10 dimensional 1-hot vector,\nnot just an integer denoting the digit.\nThis is due to our use of softmax to \"squish\" network\noutput for classification.\n\"\"\"\ny_train = to_categorical(y_train, 10)\ny_test = to_categorical(y_test, 10)\n\n\n\"\"\"\nWe construct a CNN with 2 convolution layers \nand use max-pooling between each convolution layer;\nwe finish with two dense layers for classification.\n\"\"\"\ncnn_model = Sequential()\ncnn_model.add(Conv2D(filters=32,\n kernel_size=(3,3),\n activation='relu',\n input_shape=input_shape))\ncnn_model.add(MaxPooling2D(pool_size=(2, 2)))\ncnn_model.add(Conv2D(filters=32,\n kernel_size=(3, 3),\n activation='relu'))\ncnn_model.add(MaxPooling2D(pool_size=(2, 2)))\ncnn_model.add(Flatten())\ncnn_model.add(Dense(64, activation='relu'))\ncnn_model.add(Dense(10, activation='softmax')) # softmax for classification\n\ncnn_model.compile(loss='categorical_crossentropy',\n optimizer='adagrad', # adaptive optimizer (still similar to SGD)\n metrics=['accuracy'])\n\n\"\"\"Train the CNN model and evaluate test accuracy.\"\"\"\ncnn_model.fit(x_train,\n y_train,\n batch_size=128,\n epochs=10,\n verbose=1,\n validation_data=(x_test, y_test)) # never actually validate using test data!\n\n\nscore = cnn_model.evaluate(x_test, y_test, verbose=0)\nprint('MNIST test set accuracy:', score[1])\n\n\"\"\"Visualize some test data and network output.\"\"\"\ny_predict = cnn_model.predict(x_test, verbose=0)\ny_predict_digits = [np.argmax(y_predict[i]) for i in range(y_predict.shape[0])]\nplt.subplot(1,4,1)\nplt.imshow(x_test[0,:,:,0], cmap=plt.get_cmap('gray'))\nplt.subplot(1,4,2)\nplt.imshow(x_test[1,:,:,0], cmap=plt.get_cmap('gray'))\nplt.subplot(1,4,3)\nplt.imshow(x_test[2,:,:,0], cmap=plt.get_cmap('gray'))\nplt.subplot(1,4,4)\nplt.imshow(x_test[3,:,:,0], cmap=plt.get_cmap('gray'))\nplt.show()\n\nprint(\"CNN predictions: {0}, {1}, {2}, {3}\".format(y_predict_digits[0],\n y_predict_digits[1],\n y_predict_digits[2],\n y_predict_digits[3]))", "Train on 60000 samples, validate on 10000 samples\nEpoch 1/10\n60000/60000 [==============================] - 38s 630us/step - loss: 0.1783 - acc: 0.9452 - val_loss: 0.0650 - val_acc: 0.9800\nEpoch 2/10\n60000/60000 [==============================] - 38s 636us/step - loss: 0.0683 - acc: 0.9798 - val_loss: 0.0501 - val_acc: 0.9847\nEpoch 3/10\n60000/60000 [==============================] - 36s 597us/step - loss: 0.0536 - acc: 0.9844 - val_loss: 0.0448 - val_acc: 0.9855\nEpoch 4/10\n60000/60000 [==============================] - 50s 839us/step - loss: 0.0457 - acc: 0.9867 - val_loss: 0.0391 - val_acc: 0.9873\nEpoch 5/10\n60000/60000 [==============================] - 40s 668us/step - loss: 0.0407 - acc: 0.9878 - val_loss: 0.0392 - val_acc: 0.9876\nEpoch 6/10\n60000/60000 [==============================] - 35s 586us/step - loss: 0.0366 - acc: 0.9888 - val_loss: 0.0391 - val_acc: 0.9869\nEpoch 7/10\n60000/60000 [==============================] - 38s 640us/step - loss: 0.0333 - acc: 0.9903 - val_loss: 0.0364 - val_acc: 0.9883\nEpoch 8/10\n60000/60000 [==============================] - 39s 645us/step - loss: 0.0310 - acc: 0.9910 - val_loss: 0.0345 - val_acc: 0.9876\nEpoch 9/10\n60000/60000 [==============================] - 38s 629us/step - loss: 0.0288 - acc: 0.9913 - val_loss: 0.0325 - val_acc: 0.9898\nEpoch 10/10\n60000/60000 [==============================] - 35s 579us/step - loss: 0.0266 - acc: 0.9923 - val_loss: 0.0318 - val_acc: 0.9889\nMNIST test set accuracy: 0.9889\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d06b08c855f73f38eca5a584d57e14d7a250e8a4
131,512
ipynb
Jupyter Notebook
src/reference/Python_Stock/Technical_Indicators/Linear_Weighted_Moving_Average.ipynb
sumukshashidhar/toreda
5ffb1810a1dac448c417a6e8aab7f5213c701cba
[ "BSD-3-Clause" ]
3
2020-04-30T17:26:24.000Z
2021-12-29T19:00:45.000Z
src/reference/Python_Stock/Technical_Indicators/Linear_Weighted_Moving_Average.ipynb
sumukshashidhar/toreda
5ffb1810a1dac448c417a6e8aab7f5213c701cba
[ "BSD-3-Clause" ]
null
null
null
src/reference/Python_Stock/Technical_Indicators/Linear_Weighted_Moving_Average.ipynb
sumukshashidhar/toreda
5ffb1810a1dac448c417a6e8aab7f5213c701cba
[ "BSD-3-Clause" ]
4
2020-08-01T03:17:27.000Z
2021-12-29T10:09:51.000Z
195.702381
59,772
0.88068
[ [ [ "# Linearly Weighted Moving Average ", "_____no_output_____" ], [ "https://www.investopedia.com/terms/l/linearlyweightedmovingaverage.asp", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# fix_yahoo_finance is used to fetch data \nimport fix_yahoo_finance as yf\nyf.pdr_override()", "_____no_output_____" ], [ "# input\nsymbol = 'AAPL'\nstart = '2018-08-01'\nend = '2019-01-01'\n\n# Read data \ndf = yf.download(symbol,start,end)\n\n# View Columns\ndf.head()", "[*********************100%***********************] 1 of 1 downloaded\n" ], [ "def linear_weight_moving_average(close, n):\n lwma = [np.nan] * n\n for i in range(n, len(close)):\n lwma.append((close[i - n : i] * (np.arange(n) + 1)).sum()/(np.arange(n + 1).sum()))\n return lwma", "_____no_output_____" ], [ "df['LWMA'] = linear_weight_moving_average(df['Adj Close'], 5)", "_____no_output_____" ], [ "df.head(10)", "_____no_output_____" ], [ "fig = plt.figure(figsize=(14,10))\nax1 = plt.subplot(2, 1, 1)\nax1.plot(df['Adj Close'])\nax1.set_title('Stock '+ symbol +' Closing Price')\nax1.set_ylabel('Price')\n\nax2 = plt.subplot(2, 1, 2)\nax2.plot(df['LWMA'], label='Linearly Weighted Moving Average', color='red')\n#ax2.axhline(y=0, color='blue', linestyle='--')\n#ax2.axhline(y=0.5, color='darkblue')\n#ax2.axhline(y=-0.5, color='darkblue')\nax2.grid()\nax2.set_ylabel('Linearly Weighted Moving Average')\nax2.set_xlabel('Date')\nax2.legend(loc='best')", "_____no_output_____" ] ], [ [ "## Candlestick with Linearly Weighted Moving Average ", "_____no_output_____" ] ], [ [ "from matplotlib import dates as mdates\nimport datetime as dt\n\ndfc = df.copy()\ndfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']\n#dfc = dfc.dropna()\ndfc = dfc.reset_index()\ndfc['Date'] = pd.to_datetime(dfc['Date'])\ndfc['Date'] = dfc['Date'].apply(mdates.date2num)\ndfc.head()", "_____no_output_____" ], [ "from mpl_finance import candlestick_ohlc\n\nfig = plt.figure(figsize=(14,10))\nax1 = plt.subplot(2, 1, 1)\ncandlestick_ohlc(ax1,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)\nax1.xaxis_date()\nax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))\nax1.grid(True, which='both')\nax1.minorticks_on()\nax1v = ax1.twinx()\ncolors = dfc.VolumePositive.map({True: 'g', False: 'r'})\nax1v.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)\nax1v.axes.yaxis.set_ticklabels([])\nax1v.set_ylim(0, 3*df.Volume.max())\nax1.set_title('Stock '+ symbol +' Closing Price')\nax1.set_ylabel('Price')\n\nax2 = plt.subplot(2, 1, 2)\nax2.plot(df['LWMA'], label='Linearly Weighted Moving Average', color='red')\nax2.grid()\nax2.set_ylabel('Linearly Weighted Moving Average')\nax2.set_xlabel('Date')\nax2.legend(loc='best')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d06b10263ae185f813bb4a9ba6178da14021e52b
587,230
ipynb
Jupyter Notebook
assignments/hw2/hw2.ipynb
jschmidtnj/ee627
f3c75e0e9e047c01ad7a74b6f24655406bbd0dc6
[ "MIT" ]
null
null
null
assignments/hw2/hw2.ipynb
jschmidtnj/ee627
f3c75e0e9e047c01ad7a74b6f24655406bbd0dc6
[ "MIT" ]
null
null
null
assignments/hw2/hw2.ipynb
jschmidtnj/ee627
f3c75e0e9e047c01ad7a74b6f24655406bbd0dc6
[ "MIT" ]
null
null
null
56.687904
38,784
0.593795
[ [ [ "# data acquisition / processing homework 2\n\n> I pledge my Honor that I have abided by the Stevens Honor System. - Joshua Schmidt 2/27/21\n", "_____no_output_____" ], [ "## Problem 1\n\na. For a stationary AR(1) time series x(t), x(t) is uncorrelated to x(t-l) for l>=2.\n\nThis is false. For AR(1), $x(t) = a_0 + a_1 \\cdot x(t - 1) + \\epsilon_t$. In this expression, $x(t)$ is correlated to $x(t - 1)$, with a value of $a_1$. $x(t - 1)$ can be expanded to $a_0 + a_1 \\cdot x(t - 2) + \\epsilon_{t - 1}$, with a correlation of $a_1^2$. Subsequent members or the series can be expanded, for any value of l. Therefore, for all values of l>=2, $x(t)$ is correlated to $x(t-l)$.\n\nb. For a stationary MA(1) time series x(t), you will observe a coefficient cliff after time lag l>=1 in the ACF plot.\n\nThis is true. In the ACF plot, there is decrease in the coefficients when lagging the time by 1>=1 in the plot. This is because noise is uncorrelated, and contains no information. ACF(K) = 0.", "_____no_output_____" ], [ "## Problem 2\n\nFind the best predictive model for each of the time series, using the techniques in the lecture.\n", "_____no_output_____" ] ], [ [ "# imports\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\nfrom statsmodels.tsa.arima.model import ARIMA", "_____no_output_____" ], [ "q2_data = pd.read_csv('./q2.csv', header=None)\nprint('question 2 samples:')\nq2_data.head()", "question 2 samples:\n" ], [ "q2_plot = sns.lineplot(data=q2_data)\nq2_plot.set_title('q2 data')\nq2_plot.set(xlabel='count', ylabel='value')\nplt.show()\n# graph looks stationary, not much variance", "_____no_output_____" ], [ "plot_acf(q2_data, title='q2 acf')\nplt.show()", "_____no_output_____" ], [ "plot_pacf(q2_data, title='q2 pacf', zero=False)\nplt.show()", "_____no_output_____" ] ], [ [ "Looking at these plots, the acf quickly converges towards 0 (like a cliff), but the pacf takes a lag of 9 before finally converging towards 0 (it is gradual). Therefore, the best predictive model of this time series is most likely an MA model, maybe moving average of 2.", "_____no_output_____" ] ], [ [ "q2_model = ARIMA(q2_data, order=(0, 0, 4))\nq2_model_fit = q2_model.fit()\nq2_model_fit.summary()", "/home/joshua/anaconda3/lib/python3.8/site-packages/statsmodels/base/model.py:567: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n warn(\"Maximum Likelihood optimization failed to converge. \"\n" ], [ "q2_residuals = pd.DataFrame(q2_model_fit.resid)\nplot_acf(q2_residuals, title='q2 residuals acf')\nplt.show()", "_____no_output_____" ], [ "plot_pacf(q2_residuals, title='q2 residuals pacf', zero=False)\nplt.show()", "_____no_output_____" ], [ "q3_data = pd.read_csv('./q3.csv', header=None)\nprint('question 3 samples:')\nq3_data.head()", "question 3 samples:\n" ], [ "q3_plot = sns.lineplot(data=q3_data)\nq3_plot.set_title('q3 data')\nq3_plot.set(xlabel='count', ylabel='value')\nplt.show()\n# graph does not look stationary", "_____no_output_____" ], [ "plot_acf(q3_data, title='q3 acf')\nplt.show()", "_____no_output_____" ], [ "plot_pacf(q3_data, title='q3 pacf', zero=False)\nplt.show()", "_____no_output_____" ] ], [ [ "Looking at these plots, the acf does not converge to 0, but instead slowly decreases in value while the pacf quickly converges towards 0 (like a cliff). This suggests that there are correlation values, and it is not a statistical fluke.", "_____no_output_____" ] ], [ [ "q3_model = ARIMA(q3_data, order=(3, 1, 2))\nq3_model_fit = q3_model.fit()\nq3_model_fit.summary()", "/home/joshua/anaconda3/lib/python3.8/site-packages/statsmodels/base/model.py:567: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals\n warn(\"Maximum Likelihood optimization failed to converge. \"\n" ], [ "q3_residuals = pd.DataFrame(q3_model_fit.resid)\nplot_acf(q3_residuals, title='q3 residuals acf')\nplt.show()", "_____no_output_____" ], [ "plot_pacf(q3_residuals, title='q3 residuals pacf', zero=False)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d06b1389ea7426bfba040b156109b6b6a581d04a
30,330
ipynb
Jupyter Notebook
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
4ee77146675631175866137e4bec6a424cfb8c3b
[ "Apache-2.0" ]
null
null
null
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
4ee77146675631175866137e4bec6a424cfb8c3b
[ "Apache-2.0" ]
null
null
null
SO runner.ipynb
EngTurtle/VISSIM_Routing_Thesis
4ee77146675631175866137e4bec6a424cfb8c3b
[ "Apache-2.0" ]
null
null
null
28.00554
1,258
0.434685
[ [ [ "## Initiate the vissim instance", "_____no_output_____" ] ], [ [ "# COM-Server\nimport win32com.client as com\nimport igraph\nimport qgrid\nfrom VISSIM_helpers import VissimRoadNet\nfrom os.path import abspath, join, exists\nimport os\nfrom shutil import copyfile\nimport pandas as pd\nimport math\nfrom pythoncom import com_error", "_____no_output_____" ] ], [ [ "Add autocompletion for VISSIM COM Object", "_____no_output_____" ] ], [ [ "from IPython.utils.generics import complete_object\n\n@complete_object.register(com.DispatchBaseClass)\ndef complete_dispatch_base_class(obj, prev_completions):\n try:\n ole_props = set(obj._prop_map_get_).union(set(obj._prop_map_put_))\n return list(ole_props) + prev_completions\n except AttributeError:\n pass", "_____no_output_____" ] ], [ [ "Start Vissim and load constants", "_____no_output_____" ] ], [ [ "Vissim = com.gencache.EnsureDispatch(\"Vissim.Vissim\")\nfrom win32com.client import constants as c", "_____no_output_____" ] ], [ [ "Setting the parameters used for simulation", "_____no_output_____" ] ], [ [ "DTA_Parameters = dict(\n # DTA Parameters\n EvalInt = 600, # seconds\n ScaleTotVol = False,\n ScaleTotVolPerc = 1,\n CostFile = 'costs.bew',\n ChkEdgOnReadingCostFile = True,\n PathFile = 'paths.weg',\n ChkEdgOnReadingPathFile = True,\n CreateArchiveFiles = True,\n VehClasses = '',\n)\n\n# Simulation parameters\nSim_Parameters = dict(\n NumRuns = 1,\n RandSeedIncr = 0,\n UseMaxSimSpeed = True,\n SimBreakAt = 600,\n NumCores = 8,\n)\n\nFileName = abspath(r\"..\\SO sim files\\Vol100per.inpx\")\nWorkingFolder = abspath(r\"..\\SO sim files\")", "_____no_output_____" ], [ "def current_period():\n return int(math.ceil(Vissim.Simulation.SimulationSecond / DTA_Parameters['EvalInt']))", "_____no_output_____" ] ], [ [ "Resetting edge and path cost files", "_____no_output_____" ] ], [ [ "default_cost_file = abspath('..\\SO sim files\\costs_020.bew')\ndefualt_path_file = abspath('..\\SO sim files\\paths_020.weg')\n\ncurrent_cost_file = abspath(join(WorkingFolder, DTA_Parameters['CostFile']))\nif exists(current_cost_file):\n os.remove(current_cost_file)\ncopyfile(default_cost_file, current_cost_file)\n\ncurrent_path_file = abspath(join(WorkingFolder, DTA_Parameters['PathFile']))\nif exists(current_path_file):\n os.remove(current_path_file)\ncopyfile(defualt_path_file, current_path_file)", "_____no_output_____" ] ], [ [ "Load the test network", "_____no_output_____" ] ], [ [ "Vissim.LoadNet(FileName)", "_____no_output_____" ] ], [ [ "Read dynamic assignment network", "_____no_output_____" ] ], [ [ "vis_net = Vissim.Net\nvis_net.Paths.ReadDynAssignPathFile()", "_____no_output_____" ], [ "network_graph = VissimRoadNet(vis_net)", "_____no_output_____" ] ], [ [ "Check if dynamic assignment graph has changed", "_____no_output_____" ] ], [ [ "ref_edge_list = pd.read_pickle(\"edges_attr.pkl.gz\")\nassert (network_graph.visedges['ToNode'] == ref_edge_list['ToNode']).all()\nnetwork_graph.save(join(WorkingFolder, \"network_graph.pkl.gz\"), format=\"picklez\")", "_____no_output_____" ] ], [ [ "We start by opening the network to be tested and adjust its settings", "_____no_output_____" ] ], [ [ "DynamicAssignment = Vissim.Net.DynamicAssignment\nfor attname, attvalue in DTA_Parameters.items():\n DynamicAssignment.SetAttValue(attname, attvalue)\n \nSimulation = Vissim.Net.Simulation\nfor attname, attvalue in Sim_Parameters.items():\n Simulation.SetAttValue(attname, attvalue)", "_____no_output_____" ] ], [ [ "Run first DTA period as usual", "_____no_output_____" ] ], [ [ "Vissim.Graphics.CurrentNetworkWindow.SetAttValue(\"QuickMode\", 1)\nSimulation.RunSingleStep()\nwhile current_period() < 2:\n network_graph.update_volume(vis_net)\n Simulation.RunSingleStep()", "_____no_output_____" ] ], [ [ "Run simulation with custom route assignment", "_____no_output_____" ] ], [ [ "bad_paths = []\nwhile True:\n network_graph.update_weights(vis_net)\n new_vehs = vis_net.Vehicles.GetDeparted()\n for veh in new_vehs:\n origin_lot = int(veh.AttValue('OrigParkLot'))\n destination_lot = int(veh.AttValue('DestParkLot'))\n node_paths, edge_paths = network_graph.parking_lot_routes(origin_lot, destination_lot)\n try:\n vis_path = vis_net.Paths.AddPath(origin_lot, destination_lot, [str(node) for node in node_paths[0]])\n veh.AssignPath(vis_path)\n except com_error:\n bad_paths.append((node_paths[0], edge_paths[0]))\n network_graph.update_volume(vis_net)\n if Vissim.Simulation.SimulationSecond > 4499:\n break\n Vissim.Simulation.RunSingleStep()\n\nVissim.Simulation.RunContinuous()", "_____no_output_____" ], [ "vis_net.Paths.AddPath(origin_lot, destination_lot, [str(node) for node in node_paths[0]])", "_____no_output_____" ], [ "veh.AttValue('No')", "_____no_output_____" ], [ "from pythoncom import com_error", "_____no_output_____" ], [ "node_paths[0]", "_____no_output_____" ], [ "edge_weights = network_graph.es[[ed - 1 for ed in edge_paths[0]]]['weight']\nprint(sum(edge_weights))\npd.DataFrame(list(zip(edge_paths[0], edge_weights)), columns=['edge', 'edge_weights'])", "752.8113709490541\n" ], [ "edges = [int(ed) for ed in veh.Path.AttValue('EdgeSeq').split(',')]\nedge_weights = network_graph.es[[ed - 1 for ed in edges]]['weight']\nprint(sum(edge_weights))\npd.DataFrame(list(zip(edges, edge_weights)), columns=['edge', 'edge_weights'])", "1302.5067067757254\n" ], [ "Vissim.Simulation.RunContinuous()", "_____no_output_____" ], [ "Vissim.Exit()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06b13c1a31316343116e199fab62c518a3dedc5
187,567
ipynb
Jupyter Notebook
feature-engineering-and-data-transformations/coordinate-transformations/demo.ipynb
minesh1291/Keep-It-Up
232e917012197aaca4fdf25faee81ac5e85575e0
[ "MIT" ]
null
null
null
feature-engineering-and-data-transformations/coordinate-transformations/demo.ipynb
minesh1291/Keep-It-Up
232e917012197aaca4fdf25faee81ac5e85575e0
[ "MIT" ]
null
null
null
feature-engineering-and-data-transformations/coordinate-transformations/demo.ipynb
minesh1291/Keep-It-Up
232e917012197aaca4fdf25faee81ac5e85575e0
[ "MIT" ]
null
null
null
952.116751
99,708
0.953713
[ [ [ "from sklearn.datasets import make_circles\nfrom matplotlib import pyplot\nfrom pandas import DataFrame\n# generate 2d classification dataset\nX, y = make_circles(n_samples=1000, noise=0.05)\n# scatter plot, dots colored by class value\ndf = DataFrame(dict(x=X[:,0], y=X[:,1], label=y))", "_____no_output_____" ], [ "sns.scatterplot(\"x\", \"y\", hue=\"label\", data=df)", "_____no_output_____" ], [ "df[:3]", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "r = np.sqrt(df[\"x\"]**2 + df[\"y\"]**2)\n\nth = np.arctan2(df[\"y\"], df[\"x\"])\n\nsns.scatterplot(r, th, hue=df[\"label\"])", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d06b17151d09c673722c29149852b8db3963c020
12,217
ipynb
Jupyter Notebook
notebooks/session4_inclass_rdkm.ipynb
agnesbn/cds-language
b943915331cf0ca3281434c9aee186569b07ac3d
[ "MIT" ]
null
null
null
notebooks/session4_inclass_rdkm.ipynb
agnesbn/cds-language
b943915331cf0ca3281434c9aee186569b07ac3d
[ "MIT" ]
null
null
null
notebooks/session4_inclass_rdkm.ipynb
agnesbn/cds-language
b943915331cf0ca3281434c9aee186569b07ac3d
[ "MIT" ]
null
null
null
22.293796
240
0.542523
[ [ [ "## Some more on ```spaCy``` and ```pandas```", "_____no_output_____" ], [ "First we want to import some of the packages we need.", "_____no_output_____" ] ], [ [ "import os\nimport spacy\n\n# Remember we need to initialise spaCy\nnlp = spacy.load(\"en_core_web_sm\")", "_____no_output_____" ] ], [ [ "We can inspect this object and see that it's what we've been called a ```spaCy``` object. ", "_____no_output_____" ] ], [ [ "type(nlp)", "_____no_output_____" ] ], [ [ "We use this ```spaCy``` object to create annotated outputs, what we call a ```Doc``` object.", "_____no_output_____" ] ], [ [ "example = \"This is a sentence written in English\"", "_____no_output_____" ], [ "doc = nlp(example)", "_____no_output_____" ], [ "type(doc)", "_____no_output_____" ] ], [ [ "```Doc``` objects are sequences of tokens, meaning we can iterate over the tokens and output specific annotations that we want such as POS tag or lemma.", "_____no_output_____" ] ], [ [ "for token in doc:\n print(token.text, token.pos_, token.tag_, token.lemma_)", "_____no_output_____" ] ], [ [ "__Reading data with ```pandas```__", "_____no_output_____" ], [ "```pandas``` is the main library in Python for working with DataFrames. These are tabular objects of mixed data types, comprising rows and columns.\n\nIn ```pandas``` vocabulary, a column is called a ```Series```, which is like a sophisticated list. I'll be using the names ```Series``` and column pretty interchangably.", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "in_file = os.path.join(\"..\", \"data\", \"labelled_data\", \"fake_or_real_news.csv\")", "_____no_output_____" ], [ "data = pd.read_csv(in_file)", "_____no_output_____" ] ], [ [ "We can use ```.sample()``` to take random samples of the dataframe.", "_____no_output_____" ] ], [ [ "data.sample(5)", "_____no_output_____" ] ], [ [ "To delete unwanted columns, we can do the following:", "_____no_output_____" ] ], [ [ "del data[\"Unnamed: 0\"]", "_____no_output_____" ], [ "type(data[\"label\"])", "_____no_output_____" ] ], [ [ "We can count the distribution of possible values in our data using ```.value_counts()``` - e.g. how many REAL and FAKE news entries do we have in our DataFrame?", "_____no_output_____" ] ], [ [ "data[\"label\"].value_counts()", "_____no_output_____" ] ], [ [ "__Filter on columns__", "_____no_output_____" ], [ "To filter on columns, we define a condition on which we want to filter and use that to filer our DataFrame. We use the square-bracket syntax, just as if we were slicing a list or string.", "_____no_output_____" ] ], [ [ "data[\"label\"]==\"FAKE\"", "_____no_output_____" ], [ "data[\"label\"]==\"REAL\"", "_____no_output_____" ] ], [ [ "Here we create two new dataframes, one with only fake news text, and one with only real news text.", "_____no_output_____" ] ], [ [ "fake_news_df = data[data[\"label\"]==\"FAKE\"]\nreal_news_df = data[data[\"label\"]==\"REAL\"]", "_____no_output_____" ], [ "fake_news_df[\"label\"].value_counts()", "_____no_output_____" ], [ "real_news_df[\"label\"].value_counts()", "_____no_output_____" ] ], [ [ "__Counters__", "_____no_output_____" ], [ "In the following cell, you can see how to use a 'counter' to count how many entries are in a list.\n\nThe += operator adds 1 to the variable ```counter``` for every entry in the list.", "_____no_output_____" ] ], [ [ "counter = 0\ntest_list = range(0,100)\n\nfor entry in test_list:\n counter += 1", "_____no_output_____" ] ], [ [ "__Counting features in data__", "_____no_output_____" ], [ "Using the same logic, we can count how often adjectives (```JJ```) appear in our data. \n\nThis is useful from a lingustic perspective; we could now, for example, figure out how many of each part of speech can be found in our data.", "_____no_output_____" ] ], [ [ "# create counters\nadj_count = 0\n\n# process texts in batch\nfor doc in nlp.pipe(fake_news_df[\"title\"], batch_size=500):\n for token in doc:\n if token.tag_ == \"JJ\":\n adj_count += 1", "_____no_output_____" ] ], [ [ "In this case, we're using ```nlp.pipe``` from ```spaCy``` to group the entries together into batches of 500 at a time.\n\nWhy?\n\nEverytime we execute ```nlp(text)``` it incurs a small computational overhead which means that scaling becomes an issue. An overhead of 0.01s per document becomes an issue when dealing with 1,000,000 or 10,000,000 or 100,000,000...\n\nIf we batch, we can therefore be a bit more efficient. It also allows us to keep our ```spaCy``` logic compact and together, which becomes useful for more complex tasks.", "_____no_output_____" ] ], [ [ "print(adj_count)", "_____no_output_____" ] ], [ [ "## Sentiment with ```spaCy```", "_____no_output_____" ], [ "To work with spaCyTextBlob, we need to make sure that we are working with ```spacy==2.3.5```. \n\nFollow the separate instructions posted to Slack to make this work.", "_____no_output_____" ] ], [ [ "import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport spacy\nfrom spacytextblob.spacytextblob import SpacyTextBlob\n# initialise spacy\nnlp = spacy.load(\"en_core_web_sm\")", "_____no_output_____" ] ], [ [ "Here, we initialise spaCyTextBlob and add it as a new component to our ```spaCy``` nlp pipeline.", "_____no_output_____" ] ], [ [ "spacy_text_blob = SpacyTextBlob()\nnlp.add_pipe(spacy_text_blob)", "_____no_output_____" ] ], [ [ "Let's test spaCyTextBlob on a single text, specifically Virgian Woolf's _To The Lighthouse_, published in 1927.", "_____no_output_____" ] ], [ [ "text_file = os.path.join(\"..\", \"data\", \"100_english_novels\", \"corpus\", \"Woolf_Lighthouse_1927.txt\")", "_____no_output_____" ], [ "with open(text_file, \"r\", encoding=\"utf-8\") as file:\n text = file.read()", "_____no_output_____" ], [ "print(text[:1000])", "_____no_output_____" ] ], [ [ "We use ```spaCy``` to create a ```Doc``` object for the entire text (how might you do this in batch?)", "_____no_output_____" ] ], [ [ "doc = nlp(text)", "_____no_output_____" ] ], [ [ "We can extract the polarity for each sentence in the novel and create list of scores per sentence.", "_____no_output_____" ] ], [ [ "polarity = []\n\nfor sentence in doc.sents:\n score = sentence._.sentiment.polarity\n polarity.append(score)", "_____no_output_____" ], [ "polarity[:10]", "_____no_output_____" ] ], [ [ "We can create a quick and cheap plot using matplotlib - this is only fine in Jupyter Notebooks, don't do this in the wild!", "_____no_output_____" ] ], [ [ "plt.plot(polarity)", "_____no_output_____" ] ], [ [ "We can the use some fancy methods from ```pandas``` to calculate a rolling mean over a certain window length.\n\nFor example, we group together our polarity scores into a window of 100 sentences at a time and calculate an average on that window.", "_____no_output_____" ] ], [ [ "smoothed_sentiment = pd.Series(polarity).rolling(100).mean()", "_____no_output_____" ] ], [ [ "This plot with a rolling average shows us a 'smoothed' output showing the rolling average over time, helping to cut through the noise.", "_____no_output_____" ] ], [ [ "plt.plot(smoothed_sentiment)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06b1742af233d532353afb5b8dfc2733366ba5a
23,158
ipynb
Jupyter Notebook
examples/#05 - Exploring Utils.ipynb
synapticarbors/PyMove
10bf8fcd4b60b31d812a12a31a14b0ea52ac4464
[ "MIT" ]
null
null
null
examples/#05 - Exploring Utils.ipynb
synapticarbors/PyMove
10bf8fcd4b60b31d812a12a31a14b0ea52ac4464
[ "MIT" ]
null
null
null
examples/#05 - Exploring Utils.ipynb
synapticarbors/PyMove
10bf8fcd4b60b31d812a12a31a14b0ea52ac4464
[ "MIT" ]
null
null
null
19.981018
167
0.512782
[ [ [ "# #05 - Exploring Utils\n\n\nFalar sobre para se trabalhar com trajetórias pode ser necessária algumas c onversões envolvendo tempo e data, distância e etc, fora outros utilitários.\n\nFalar dos módulos presentes no pacote utils\n- constants\n- conversions\n- datetime\n- distances\n- math\n- mem\n- trajectories\n- transformations\n\n\n---\n\n### Imports", "_____no_output_____" ] ], [ [ "import pymove.utils as utils\nimport pymove\nfrom pymove import MoveDataFrame", "_____no_output_____" ] ], [ [ "---\n### Load data", "_____no_output_____" ] ], [ [ "move_data = pymove.read_csv(\"geolife_sample.csv\")", "_____no_output_____" ] ], [ [ "---\n### Conversions\n\nTo transform latitude degree to meters, you can use function **lat_meters**. For example, you can convert Fortaleza's latitude -3.8162973555:", "_____no_output_____" ] ], [ [ "utils.conversions.lat_meters(-3.8162973555)", "_____no_output_____" ] ], [ [ "To concatenates list elements, joining them by the separator specified by the parameter \"delimiter\", you can use **list_to_str**", "_____no_output_____" ] ], [ [ "utils.conversions.list_to_str([\"a\", \"b\", \"c\", \"d\"], \"-\")", "_____no_output_____" ] ], [ [ "To concatenates the elements of the list, joining them by \",\", , you can use **list_to_csv_str**", "_____no_output_____" ] ], [ [ "utils.conversions.list_to_csv_str([\"a\", \"b\", \"c\", \"d\"])", "_____no_output_____" ] ], [ [ "To concatenates list elements in consecutive element pairs, you can use **list_to_svm_line**", "_____no_output_____" ] ], [ [ "utils.conversions.list_to_svm_line([\"a\", \"b\", \"c\", \"d\"])", "_____no_output_____" ] ], [ [ "To convert longitude to X EPSG:3857 WGS 84/Pseudo-Mercator, you can use **lon_to_x_spherical**", "_____no_output_____" ] ], [ [ "utils.conversions.lon_to_x_spherical(-38.501597)", "_____no_output_____" ] ], [ [ "To convert latitude to Y EPSG:3857 WGS 84/Pseudo-Mercator, you can use **lat_to_y_spherical**", "_____no_output_____" ] ], [ [ "utils.conversions.lat_to_y_spherical(-3.797864)", "_____no_output_____" ] ], [ [ "To convert X EPSG:3857 WGS 84/Pseudo-Mercator to longitude, you can use **x_to_lon_spherical**", "_____no_output_____" ] ], [ [ "utils.conversions.x_to_lon_spherical(-4285978.172767829)", "_____no_output_____" ] ], [ [ "To convert Y EPSG:3857 WGS 84/Pseudo-Mercator to latitude, you can use **y_to_lat_spherical**", "_____no_output_____" ] ], [ [ "utils.conversions.y_to_lat_spherical(-423086.2213610324)", "_____no_output_____" ] ], [ [ "To convert values, in ms, in label_speed column to kmh, you can use **ms_to_kmh**", "_____no_output_____" ] ], [ [ "utils.conversions.ms_to_kmh(move_data)", "\nCreating or updating distance, time and speed features in meters by seconds\n\n...Sorting by id and datetime to increase performance\n\n...Set id as index to a higher peformance\n\n" ] ], [ [ "To convert values, in kmh, in label_speed column to ms, you can use **kmh_to_ms**", "_____no_output_____" ] ], [ [ "utils.conversions.kmh_to_ms(move_data)", "_____no_output_____" ] ], [ [ "To convert values, in meters, in label_distance column to kilometer, you can use **meters_to_kilometers**", "_____no_output_____" ] ], [ [ "utils.conversions.meters_to_kilometers(move_data)", "_____no_output_____" ] ], [ [ "To convert values, in kilometers, in label_distance column to meters, you can use **kilometers_to_meters**", "_____no_output_____" ] ], [ [ "utils.conversions.kilometers_to_meters(move_data)", "_____no_output_____" ] ], [ [ "To convert values, in seconds, in label_distance column to minutes, you can use **seconds_to_minutes**", "_____no_output_____" ] ], [ [ "utils.conversions.seconds_to_minutes(move_data)", "_____no_output_____" ] ], [ [ "To convert values, in minutes, in label_distance column to seconds, you can use **minute_to_seconds**", "_____no_output_____" ] ], [ [ "utils.conversions.minute_to_seconds(move_data)", "_____no_output_____" ] ], [ [ "To convert in minutes, in label_distance column to hours, you can use **minute_to_hours**", "_____no_output_____" ] ], [ [ "utils.conversions.minute_to_hours(move_data)", "_____no_output_____" ] ], [ [ "To convert in hours, in label_distance column to minute, you can use **hours_to_minute**", "_____no_output_____" ] ], [ [ "utils.conversions.hours_to_minute(move_data)", "_____no_output_____" ] ], [ [ "To convert in seconds, in label_distance column to hours, you can use **seconds_to_hours**", "_____no_output_____" ] ], [ [ "utils.conversions.seconds_to_hours(move_data)", "_____no_output_____" ] ], [ [ "To convert in seconds, in label_distance column to hours, you can use **hours_to_seconds**", "_____no_output_____" ] ], [ [ "utils.conversions.hours_to_seconds(move_data)", "_____no_output_____" ] ], [ [ "---\n\n## Datetime\n\n", "_____no_output_____" ], [ "To converts a datetime in string\"s format \"%Y-%m-%d\" or \"%Y-%m-%d %H:%M:%S\" to datetime\"s format, you can use **str_to_datetime**.", "_____no_output_____" ] ], [ [ "utils.datetime.str_to_datetime('2018-06-29 08:15:27')", "_____no_output_____" ] ], [ [ "To get date, in string's format, from timestamp, you can use **date_to_str**.", "_____no_output_____" ] ], [ [ "utils.datetime.date_to_str(utils.datetime.str_to_datetime('2018-06-29 08:15:27'))", "_____no_output_____" ] ], [ [ "To converts a date in datetime's format to string's format, you can use **to_str**.", "_____no_output_____" ] ], [ [ "import datetime\nutils.datetime.to_str(datetime.datetime(2018, 6, 29, 8, 15, 27))", "_____no_output_____" ] ], [ [ "To converts a datetime to an int representation in minutes, you can use **to_min**.", "_____no_output_____" ] ], [ [ "utils.datetime.to_min(datetime.datetime(2018, 6, 29, 8, 15, 27))", "_____no_output_____" ] ], [ [ "To do the reverse use: **min_to_datetime**", "_____no_output_____" ] ], [ [ "utils.datetime.min_to_datetime(25504335)", "_____no_output_____" ] ], [ [ "To get day of week of a date, you can use **to_day_of_week_int**, where 0 represents Monday and 6 is Sunday.", "_____no_output_____" ] ], [ [ "utils.datetime.to_day_of_week_int(datetime.datetime(2018, 6, 29, 8, 15, 27))", "_____no_output_____" ] ], [ [ "To indices if a day specified by the user is a working day, you can use **working_day**.", "_____no_output_____" ] ], [ [ "utils.datetime.working_day(datetime.datetime(2018, 6, 29, 8, 15, 27), country='BR')", "_____no_output_____" ], [ "utils.datetime.working_day(datetime.datetime(2018, 4, 21, 8, 15, 27), country='BR')", "_____no_output_____" ] ], [ [ "To get datetime of now, you can use **now_str**.", "_____no_output_____" ] ], [ [ "utils.datetime.now_str()", "_____no_output_____" ] ], [ [ "To convert time in a format appropriate of time, you can use **deltatime_str**.", "_____no_output_____" ] ], [ [ "utils.datetime.deltatime_str(1082.7180936336517)", "_____no_output_____" ] ], [ [ "To converts a local datetime to a POSIX timestamp in milliseconds, you can use **timestamp_to_millis**.", "_____no_output_____" ] ], [ [ "utils.datetime.timestamp_to_millis(\"2015-12-12 08:00:00.123000\")", "_____no_output_____" ] ], [ [ "To converts milliseconds to timestamp, you can use **millis_to_timestamp**.", "_____no_output_____" ] ], [ [ "utils.datetime.millis_to_timestamp(1449907200123)", "_____no_output_____" ] ], [ [ "To get time, in string's format, from timestamp, you can use **time_to_str**.", "_____no_output_____" ] ], [ [ "utils.datetime.time_to_str(datetime.datetime(2018, 6, 29, 8, 15, 27))", "_____no_output_____" ] ], [ [ "To converts a time in string's format \"%H:%M:%S\" to datetime's format, you can use **str_to_time**.", "_____no_output_____" ] ], [ [ "utils.datetime.str_to_time(\"08:00:00\")", "_____no_output_____" ] ], [ [ "To computes the elapsed time from a specific start time to the moment the function is called, you can use **elapsed_time_dt**.", "_____no_output_____" ] ], [ [ "utils.datetime.elapsed_time_dt(utils.datetime.str_to_time(\"08:00:00\"))", "_____no_output_____" ] ], [ [ "To computes the elapsed time from the start time to the end time specifed by the user, you can use **diff_time**.", "_____no_output_____" ] ], [ [ "utils.datetime.diff_time(utils.datetime.str_to_time(\"08:00:00\"), utils.datetime.str_to_time(\"12:00:00\"))", "_____no_output_____" ] ], [ [ "--- \n\n## Distances", "_____no_output_____" ], [ "To calculate the great circle distance between two points on the earth, you can use **haversine**.", "_____no_output_____" ] ], [ [ "utils.distances.haversine(-3.797864,-38.501597,-3.797890, -38.501681)", "_____no_output_____" ] ], [ [ "---\n<!-- Ver com a arina se é válido fazer a doc dessas 2 -->\n<!-- ## Trajectories --> \n<!-- ## Transformations -->\n\n## Math", "_____no_output_____" ], [ "To compute standard deviation, you can use **std**.", "_____no_output_____" ] ], [ [ "utils.math.std(600, 20, 5)", "_____no_output_____" ] ], [ [ "To compute the average of standard deviation, you can use **avg_std**.", "_____no_output_____" ] ], [ [ "# utils.math.avg_std(600, 600, 20)", "_____no_output_____" ] ], [ [ "To compute the standard deviation of sample, you can use **std_sample**.", "_____no_output_____" ] ], [ [ "utils.math.std_sample(600, 20, 5)", "_____no_output_____" ] ], [ [ "To compute the average of standard deviation of sample, you can use **avg_std_sample**.", "_____no_output_____" ] ], [ [ "# utils.math.avg_std_sample(600, 20, 5)", "_____no_output_____" ] ], [ [ "To computes the sum of the elements of the array, you can use **array_sum**.", "_____no_output_____" ] ], [ [ "utils.math.array_sum([600, 20, 5])", "_____no_output_____" ] ], [ [ "To computes the sum of all the elements in the array, the sum of the square of each element and the number of elements of the array, you can use **array_stats**.", "_____no_output_____" ] ], [ [ "utils.math.array_stats([600, 20, 5])", "_____no_output_____" ] ], [ [ "To perfomers interpolation and extrapolation, you can use **interpolation**.", "_____no_output_____" ] ], [ [ "utils.math.interpolation(15, 20, 65, 86, 5)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06b17b33a3cfa3007838c9c813624990aec256a
119,529
ipynb
Jupyter Notebook
notebooks/1_initial_model.ipynb
NowanIlfideme/kaggle_ni_mafia_gametype
3ad47ba91b58d4541d94d85bb01f2dd8286a5531
[ "Apache-2.0" ]
2
2019-02-13T21:55:10.000Z
2019-02-14T07:40:51.000Z
notebooks/1_initial_model.ipynb
NowanIlfideme/kaggle_ni_mafia_gametype
3ad47ba91b58d4541d94d85bb01f2dd8286a5531
[ "Apache-2.0" ]
null
null
null
notebooks/1_initial_model.ipynb
NowanIlfideme/kaggle_ni_mafia_gametype
3ad47ba91b58d4541d94d85bb01f2dd8286a5531
[ "Apache-2.0" ]
1
2021-10-10T07:35:36.000Z
2021-10-10T07:35:36.000Z
42.673688
373
0.473182
[ [ [ "# Initial Modelling notebook", "_____no_output_____" ] ], [ [ "import os\nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "import warnings", "_____no_output_____" ], [ "import bay12_solution_eposts as solution", "_____no_output_____" ] ], [ [ "## Load data", "_____no_output_____" ] ], [ [ "post, thread = solution.prepare.load_dfs('train')", "_____no_output_____" ], [ "post.head(2)", "_____no_output_____" ], [ "thread.head(2)", "_____no_output_____" ] ], [ [ "I will set the thread number to be the index, to simplify matching in the future:", "_____no_output_____" ] ], [ [ "thread = thread.set_index('thread_num')\nthread.head(2)", "_____no_output_____" ] ], [ [ "We'll load the label map as well, which tells us which index goes to which label", "_____no_output_____" ] ], [ [ "label_map = solution.prepare.load_label_map()\nlabel_map", "_____no_output_____" ] ], [ [ "## Create features from thread dataframe", "_____no_output_____" ], [ "We will fit a CountVectorizer, which is a simple transformation that counts the number of times the word was found.\n\nThe parameter `min_df` sets the minimum number of occurances in our set that will allow a word to join our vocabulary.", "_____no_output_____" ] ], [ [ "from sklearn.feature_extraction.text import CountVectorizer\ncv = CountVectorizer(ngram_range=(1, 1), min_df=3)", "_____no_output_____" ], [ "word_vectors_raw = cv.fit_transform(thread['thread_name'])", "_____no_output_____" ] ], [ [ "To save space, this outputs a sparse matrix:", "_____no_output_____" ] ], [ [ "word_vectors_raw", "_____no_output_____" ] ], [ [ "However, since we'll be using it with a DataFrame, we need to convert it into a Pandas DataFrame:", "_____no_output_____" ] ], [ [ "word_df = pd.DataFrame(word_vectors_raw.toarray(), columns=cv.get_feature_names(), index=thread.index)\nword_df.head()", "_____no_output_____" ] ], [ [ "The only other feature we have from our thread data is the number of replies. Let's add one to get the number of replies. Also, let's use the logarithm of post count as well, just for fun.\n\nWe'll concatenate those into our X dataframe (Note that I'm renaming the columns, to keep track more easily):", "_____no_output_____" ] ], [ [ "X = pd.concat([\n (thread['thread_replies'] + 1).rename('posts'), \n np.log(thread['thread_replies'] + 1).rename('log_posts'), \n word_df,\n ], axis='columns')\nX.head()", "_____no_output_____" ] ], [ [ "Our target is the category number. Remember that this isn't a regression task - there is no actual order between these categories! Also, our Y is one-dimensional, so we'll keep it as a Series (even though it prints less prettily).", "_____no_output_____" ] ], [ [ "y = thread['thread_label_id']\ny.head()", "_____no_output_____" ] ], [ [ "## Split dataset into \"training\" and \"validation\"", "_____no_output_____" ], [ "In order to check the quality of our model in a more realistic setting, we will split all our input (training) data into a \"training set\" (which our model will see and learn from) and a \"validation set\" (where we see how well our model generalized). [Relevant link](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html).", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "# NOTE: setting the `random_state` lets you get the same results with the pseudo-random generator\nvalidation_pct = 0.25\nX_train, X_val, y_train, y_val = train_test_split(X, y, test_size=validation_pct, random_state=99)", "_____no_output_____" ], [ "X_train.shape, y_train.shape", "_____no_output_____" ], [ "X_val.shape, y_val.shape", "_____no_output_____" ] ], [ [ "## Fit a model", "_____no_output_____" ], [ "Since we are fitting a multiclass model, [this scikit-learn link](https://scikit-learn.org/stable/modules/multiclass.html) is very relevant. To simplify things, we will be using an algorithm that is inherently multi-class.", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeClassifier\n\n# Just using default parameters... what can do wrong?\ncls = DecisionTreeClassifier(random_state=1337)", "_____no_output_____" ], [ "# Fit\ncls.fit(X_train, y_train)", "_____no_output_____" ], [ "# In-sample and out-of-sample predictions\n# NOTE: we \ny_train_pred = pd.Series(\n cls.predict(X_train), \n index=X_train.index, \n)\ny_val_pred = pd.Series(\n cls.predict(X_val), \n index=X_val.index, \n)", "_____no_output_____" ], [ "y_val_pred.head()", "_____no_output_____" ] ], [ [ "## Score the model", "_____no_output_____" ], [ "To find out how well the model did, we'll use the [model evaluation functionality of sklearn](https://scikit-learn.org/stable/modules/model_evaluation.html); specifically, the [multiclass classification metrics](https://scikit-learn.org/stable/modules/model_evaluation.html#classification-metrics).", "_____no_output_____" ] ], [ [ "from sklearn.metrics import confusion_matrix, accuracy_score, classification_report", "_____no_output_____" ] ], [ [ "The [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix) shows how our predictions differ from the actual values.\n\nIt's important to note how strongly our in-sample (training) and out-of-sample (validation/test) metrics differ.", "_____no_output_____" ] ], [ [ "def confusion_df(y_actual, y_pred):\n res = pd.DataFrame(\n confusion_matrix(y_actual, y_pred, labels=label_map.values),\n index=label_map.index.rename('predicted'),\n columns=label_map.index.rename('actual'),\n )\n return res", "_____no_output_____" ], [ "confusion_df(y_train, y_train_pred).style.highlight_max()", "_____no_output_____" ], [ "confusion_df(y_val, y_val_pred).style.highlight_max()", "_____no_output_____" ] ], [ [ "Oh boy. That's pretty bad - we didn't predict anything for several columns! \n\nLet's look at the metrics to confirm that it is indeed bad.", "_____no_output_____" ] ], [ [ "print(\"Test accuracy:\", accuracy_score(y_train, y_train_pred))\nprint(\"Validation accuracy:\", accuracy_score(y_val, y_val_pred))", "Test accuracy: 1.0\nValidation accuracy: 0.6888888888888889\n" ], [ "report = classification_report(y_val, y_val_pred, labels=label_map.values, target_names=label_map.index)\nprint(report)", " precision recall f1-score support\n\n bastard 1.00 0.40 0.57 5\nbeginners-mafia 0.67 0.50 0.57 4\n byor 0.00 0.00 0.00 2\n classic 0.40 0.25 0.31 8\n closed-setup 0.33 0.71 0.45 7\n cybrid 0.00 0.00 0.00 1\n kotm 0.00 0.00 0.00 1\n non-mafia-game 0.00 0.00 0.00 0\n other 0.88 0.84 0.86 55\n paranormal 0.60 1.00 0.75 3\n supernatural 0.00 0.00 0.00 0\n vanilla 0.00 0.00 0.00 1\n vengeful 0.67 0.67 0.67 3\n\n micro avg 0.69 0.69 0.69 90\n macro avg 0.35 0.34 0.32 90\n weighted avg 0.73 0.69 0.69 90\n\n" ] ], [ [ "Well, that's pretty bad. We seriously overfit our training set... which is sort-of what I expected. Oh well.\n\nBy the way, the warnings at the bottom say that we have no real Precision or F-score to use, with no predictions for some classes. ", "_____no_output_____" ], [ "# Predict with the model\n\nHere, we will predict on the test set (predicitions to send in), then save the results and the model.\n\n**IMPORTANT NOTE**: In reality, you need to re-train your same model on the entire set to predict! However, I'm just using the same model as before, as it will bad anyways. ;)", "_____no_output_____" ] ], [ [ "post_test, thread_test = solution.prepare.load_dfs('test')", "_____no_output_____" ], [ "thread_test = thread_test.set_index('thread_num')\nthread_test.head(2)", "_____no_output_____" ] ], [ [ "We need to attach a `thread_label_id` column, as given in the training set:", "_____no_output_____" ] ], [ [ "thread.head(2)", "_____no_output_____" ] ], [ [ "Use the fitted CountVectorizer and other features to make our X dataframe:", "_____no_output_____" ] ], [ [ "word_vectors_raw_test = cv.transform(thread_test['thread_name'])", "_____no_output_____" ], [ "word_df_test = pd.DataFrame(word_vectors_raw_test.toarray(), columns=cv.get_feature_names(), index=thread_test.index)\nword_df_test.head()", "_____no_output_____" ], [ "X_test = pd.concat([\n (thread_test['thread_replies'] + 1).rename('posts'), \n np.log(thread_test['thread_replies'] + 1).rename('log_posts'), \n word_df_test,\n ], axis='columns')\nX_test.head()", "_____no_output_____" ] ], [ [ "Now we predict with our model, then paste it to a copy of `thread_test` as column `thread_label_id`.", "_____no_output_____" ] ], [ [ "y_test_pred = pd.Series(\n cls.predict(X_test), \n index=X_test.index, \n)\ny_test_pred.head()", "_____no_output_____" ], [ "result = thread_test.copy()\nresult['thread_label_id'] = y_test_pred\nresult.head()", "_____no_output_____" ] ], [ [ "We need to reshape to conform to the submission format specified [here](https://www.kaggle.com/c/ni-mafia-gametype#evaluation).", "_____no_output_____" ] ], [ [ "result = result.reset_index()[['thread_num', 'thread_label_id']]\nresult.head()", "_____no_output_____" ] ], [ [ "# Export predictions, model\n\nOur model consists of the text vectorizer `cv` and classifier `cls`. We already formatted our results, we just need to make sure not to write an extra index column.", "_____no_output_____" ] ], [ [ "# NOTE: Exporting next to the notebooks - the files are small, but usually you don't want to do this.\nout_dir = os.path.abspath('1_output')\nos.makedirs(out_dir, exist_ok=True)", "_____no_output_____" ], [ "result.to_csv(\n os.path.join(out_dir, 'baseline_predict.csv'),\n index=False, header=True, encoding='utf-8', \n)", "_____no_output_____" ], [ "import joblib\n\njoblib.dump(cv, os.path.join(out_dir, 'cv.joblib'))\njoblib.dump(cls, os.path.join(out_dir, 'cls.joblib'))\nprint(\"Done. :)\")", "Done. :)\n" ] ], [ [ "# Final Remarks\n\nI'd like to mention that the above notebook is here JUST TO GET YOU STARTED. Feel free to change anything or everything above.\n\nIt may be a good idea to keep a piece of paper with you, and draw out your entire pipeline there, to keep organized.\n\nThis model is severely overfit because of a huge number of features from the names. Some ways to combat this are PCA and lowering dimensionality, increasing regularization, using a more feature-limited classifier, etc. You can also split this into two sub-problems: a classifier to tell whether it is a game or `\"other\"`, then classify game type if it's a game.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
d06b2a9da54ef069d6d7f3cdfb8e1d14b4467a7f
20,538
ipynb
Jupyter Notebook
notebooks/multiple_quantile_regression.ipynb
GenoM87/osics_pulmonary_kaggle
f6419a6f0150d43f435802804e837eb806e3d2c2
[ "MIT" ]
null
null
null
notebooks/multiple_quantile_regression.ipynb
GenoM87/osics_pulmonary_kaggle
f6419a6f0150d43f435802804e837eb806e3d2c2
[ "MIT" ]
null
null
null
notebooks/multiple_quantile_regression.ipynb
GenoM87/osics_pulmonary_kaggle
f6419a6f0150d43f435802804e837eb806e3d2c2
[ "MIT" ]
null
null
null
33.340909
140
0.386406
[ [ [ "import numpy as np\nimport pandas as pd\nimport pydicom\nimport os\nimport random\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nfrom PIL import Image\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import KFold\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ], [ "import torch.nn as nn\nimport torch\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data import DataLoader", "_____no_output_____" ], [ "def seed_everything(seed=2020):\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)#set all gpus seed\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False#if input data type and channels' changes arent' large use it improve train efficient\n torch.backends.cudnn.enabled = True\n \nseed_everything(42)", "_____no_output_____" ], [ "class cfgOsic:\n ROOT = \"../data/\"\n device = torch.device('cuda')", "_____no_output_____" ], [ "tr = pd.read_csv(f\"{cfgOsic.ROOT}/train.csv\")\ntr.drop_duplicates(keep=False, inplace=True, subset=['Patient','Weeks'])\nchunk = pd.read_csv(f\"{cfgOsic.ROOT}/test.csv\")\n\nprint(\"add infos\")\nsub = pd.read_csv(f\"{cfgOsic.ROOT}/sample_submission.csv\")\nsub['Patient'] = sub['Patient_Week'].apply(lambda x:x.split('_')[0])\nsub['Weeks'] = sub['Patient_Week'].apply(lambda x: int(x.split('_')[-1]))\nsub = sub[['Patient','Weeks','Confidence','Patient_Week']]\nsub = sub.merge(chunk.drop('Weeks', axis=1), on=\"Patient\")", "add infos\n" ], [ "tr['WHERE'] = 'train'\nchunk['WHERE'] = 'val'\nsub['WHERE'] = 'test'\ndata = tr.append([chunk, sub])", "_____no_output_____" ], [ "data['min_week'] = data['Weeks']\ndata.loc[data.WHERE=='test','min_week'] = np.nan\ndata['min_week'] = data.groupby('Patient')['min_week'].transform('min')", "_____no_output_____" ], [ "base = data.loc[data.Weeks == data.min_week]\nbase = base[['Patient','FVC']].copy()\nbase.columns = ['Patient','min_FVC']\nbase['nb'] = 1\nbase['nb'] = base.groupby('Patient')['nb'].transform('cumsum')\nbase = base[base.nb==1]\nbase.drop('nb', axis=1, inplace=True)", "_____no_output_____" ], [ "data = data.merge(base, on='Patient', how='left')\ndata['base_week'] = data['Weeks'] - data['min_week']\ndel base", "_____no_output_____" ], [ "#aggiunta altezza\n\ndef calculate_height(row):\n if row['Sex'] == 'Male':\n return row['min_FVC'] / (27.63 - 0.112 * row['Age'])\n else:\n return row['min_FVC'] / (21.78 - 0.101 * row['Age'])\n\ndata['Height'] = data.apply(calculate_height, axis=1)\n\ndata['WeeksPassed'] = data['Weeks'] - data['min_week']", "_____no_output_____" ], [ "COLS = ['Sex','SmokingStatus'] #,'Age'\nFE = []\nfor col in COLS:\n for mod in data[col].unique():\n FE.append(mod)\n data[mod] = (data[col] == mod).astype(int)", "_____no_output_____" ], [ "def scale_feature(series):\n return (series - series.min()) / (series.max() - series.min())\n\n\ndata['age'] = scale_feature(data['Age'])\ndata['BASE'] = scale_feature(data['min_FVC'])\ndata['week'] = scale_feature(data['base_week'])\ndata['percent'] = scale_feature(data['Percent'])\ndata['height'] = scale_feature(data['Height'])\ndata['week_passed'] = scale_feature(data['WeeksPassed'])\nFE += ['age','percent','week','BASE', 'height', 'week_passed']", "_____no_output_____" ], [ "data", "_____no_output_____" ], [ "\ntr = data.loc[data.WHERE=='train']\nchunk = data.loc[data.WHERE=='val']\nsub = data.loc[data.WHERE=='test']\ndel data", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06b2cc67f29c580efc138292550aa19ee055b1d
5,366
ipynb
Jupyter Notebook
notebook/5_multilayer_perceptron.ipynb
goddoe/tensorflow_practice
ef47202b2cd6a5bce8a122ea44ecb2c5c44c4544
[ "MIT" ]
3
2016-11-07T11:26:44.000Z
2016-12-02T13:05:54.000Z
notebook/5_multilayer_perceptron.ipynb
goddoe/tensorflow_practice
ef47202b2cd6a5bce8a122ea44ecb2c5c44c4544
[ "MIT" ]
null
null
null
notebook/5_multilayer_perceptron.ipynb
goddoe/tensorflow_practice
ef47202b2cd6a5bce8a122ea44ecb2c5c44c4544
[ "MIT" ]
null
null
null
26.964824
99
0.532613
[ [ [ "import tensorflow as tf\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\",one_hot=True)", "Extracting /tmp/data/train-images-idx3-ubyte.gz\nExtracting /tmp/data/train-labels-idx1-ubyte.gz\nExtracting /tmp/data/t10k-images-idx3-ubyte.gz\nExtracting /tmp/data/t10k-labels-idx1-ubyte.gz\n" ], [ "learning_rate = 0.001\ntraining_epochs = 15\nbatch_size = 100\ndisplay_step =1\n\n\nn_input = 784\nn_classes = 10\n\nn_hidden_1 = 256\nn_hidden_2 = 256", "_____no_output_____" ], [ "X = tf.placeholder(tf.float32, [None, n_input], name=\"input\")\nY = tf.placeholder(tf.float32, [None, n_classes], name=\"output\")", "_____no_output_____" ], [ "def multilayer_perceptron(X, weights, biases):\n \n layer_1 = tf.add(tf.matmul(X, weights['h1']), biases['b1'])\n layer_1 = tf.nn.relu(layer_1)\n \n layer_2 = tf.add(tf.matmul(layer_1,weights['h2']), biases['b2'])\n layer_2 = tf.nn.relu(layer_2)\n \n out_layer = tf.matmul(layer_2,weights['out']) + biases['out']\n \n return out_layer", "_____no_output_____" ], [ "weights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out':tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\n}\n\nbiases = {\n 'b1' : tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2' : tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n \n}\n\npred = multilayer_perceptron(X, weights, biases)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred,Y))\noptimizer = tf.train.AdamOptimizer(learning_rate= learning_rate).minimize(cost)\n\ninit = tf.initialize_all_variables()", "_____no_output_____" ], [ "with tf.Session() as sess:\n sess.run(init)\n \n for epoch in range(training_epochs):\n avg_cost = 0.\n \n total_batch = int(mnist.train.num_examples/batch_size)\n for i in range(total_batch):\n batch_X, batch_Y = mnist. train.next_batch(batch_size)\n \n _, c = sess.run([optimizer, cost], feed_dict={X: batch_X, Y: batch_Y})\n \n avg_cost += c/total_batch\n if epoch % display_step == 0:\n print(\"Epoch:\"+'%04d' % (epoch+1), \"cost=\" + \"{:.9f}\".format(avg_cost))\n\n print(\"Optimization Finished!\")\n \n correct_prediction = tf.equal(tf.argmax(pred,1), tf.argmax(Y,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction,\"float\"))\n print(\"Accuracy : \" +str(accuracy.eval({X:mnist.test.images, Y:mnist.test.labels})))\n ", "Epoch:0001 cost=195.111316407\nEpoch:0002 cost=43.858165465\nEpoch:0003 cost=27.529450860\nEpoch:0004 cost=19.268953726\nEpoch:0005 cost=14.040415219\nEpoch:0006 cost=10.463342705\nEpoch:0007 cost=7.890293884\nEpoch:0008 cost=5.833286557\nEpoch:0009 cost=4.502147053\nEpoch:0010 cost=3.285491708\nEpoch:0011 cost=2.423698032\nEpoch:0012 cost=1.770643686\nEpoch:0013 cost=1.418311109\nEpoch:0014 cost=1.040065616\nEpoch:0015 cost=0.925381133\nOptimization Finished!\nAccuracy : 0.946\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d06b2f4d9f5005641f25ec8b188a33e23cb7b2dd
5,602
ipynb
Jupyter Notebook
01_Introduction/Introduction.ipynb
ABellesis/QMMM_study_group
3d2bc84381b531f8b59f166287827967a640e8ed
[ "BSD-3-Clause" ]
null
null
null
01_Introduction/Introduction.ipynb
ABellesis/QMMM_study_group
3d2bc84381b531f8b59f166287827967a640e8ed
[ "BSD-3-Clause" ]
null
null
null
01_Introduction/Introduction.ipynb
ABellesis/QMMM_study_group
3d2bc84381b531f8b59f166287827967a640e8ed
[ "BSD-3-Clause" ]
null
null
null
21.629344
430
0.526241
[ [ [ "# Introduction Notebook\n\nHere we will cover common python libraries.\n\n1. [Numpy](#numpy) \n\n2. [Scipy](#scipy) \n\n3. [Matplotlib](#matplotlib) \n\n4. [PySCF](#pyscf)\n\n5. [Psi4](#psi4)", "_____no_output_____" ], [ "### Extra Practice\nFor a more hands-on introduction notebook, check out the notebook at [this link](https://hub.mybinder.org/user/amandadumi-nume-methods_release-une1joqv/tree/IPython_notebooks/01_Introduction). This will take you to a web-hosted Jupyter notebook on Binder. If you would prefer to clone the notebook to use locally, you can find it [here](https://github.com/amandadumi/numerical_methods_release/tree/master/IPython_notebooks).", "_____no_output_____" ], [ "<a id='numpy'></a>\n## Numpy\nFundamental package for scientific computing with Python", "_____no_output_____" ] ], [ [ "import numpy as np\n\na = np.array((4, 5, 6, 6, 7, 8))\nb = np.array((8, 9, 2, 4, 6, 7))\n\nc = np.dot(a, b)\nprint(c)", "_____no_output_____" ] ], [ [ "<a id='scipy'></a>\n## Scipy\n\nProvides many user-friendly and efficient numerical routines such as routines for numerical integration and optimization", "_____no_output_____" ] ], [ [ "import scipy as sp\nimport scipy.linalg as la\n\nmat = np.random.rand(5, 5)\neig_val, eig_vec = la.eig(mat)\n\nprint('eigenvalues:\\n {}\\n'.format(eig_val))\nprint('eigenvectors:\\n {}'.format(eig_vec))", "_____no_output_____" ] ], [ [ "## Matplotlib\n\nPython library for 2- and 3-D visualization.\n\nPyplot provides convenient functions to generate plots.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nx = np.linspace(0, 5, 100)\ny = np.sin(x)\nplt.plot(x, y)\nplt.show()", "_____no_output_____" ] ], [ [ "## Psi4Numpy\n\nPsi4 is an open source quantum chemistry package.\n\nRecently introduced [Psi4Numpy](https://github.com/psi4/psi4numpy), a collections of notebooks for teaching quantum chemistry. \n\n", "_____no_output_____" ], [ "The cell below runs an SCF cyle for water with the cc-pvdz basis using Psi4Numpy\n", "_____no_output_____" ] ], [ [ "import psi4\n\n# read in geometry for water\nh2o = psi4.geometry(\"\"\"\nO 0.0000000 0.0000000 0.0000000\nH 0.7569685 0.0000000 -0.5858752\nH -0.7569685 0.0000000 -0.5858752\n\"\"\")\n\n# set basis set\npsi4.set_options({'basis': 'cc-pvdz'})\n\n# run an scf calculation\nscf_e, scf_wfn = psi4.energy('scf', return_wfn=True)\nprint('converged SCF energy: {}'.format(scf_e))", "_____no_output_____" ] ], [ [ "## PySCF\n\nPython-based quantum simulations", "_____no_output_____" ], [ "The cell below runs an SCF cycle for water with the cc-pvdz basis using PySCF", "_____no_output_____" ] ], [ [ "from pyscf import gto, scf\n\n# read in geometry\nmol = gto.M(atom='O 0.0000000 0.0000000 0.0000000; H 0.7569685 0.0000000 -0.5858752; H -0.7569685 0.0000000 -0.5858752')\nmol.basis = 'ccpvdz'\n# run an scf calculation\nmol_scf = scf.RHF(mol)\nmol_scf.kernel()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d06b30153fc4f28cf27be7b76cfbeb148fd60bf6
6,252
ipynb
Jupyter Notebook
archive/financial_analysis_python.ipynb
jhustles/python_financial_records_analysis
249896532c9587b0aa94f227016f22e0b29b404d
[ "MIT" ]
1
2020-04-30T07:02:05.000Z
2020-04-30T07:02:05.000Z
archive/financial_analysis_python.ipynb
jhustles/python_financial_records_analysis
249896532c9587b0aa94f227016f22e0b29b404d
[ "MIT" ]
null
null
null
archive/financial_analysis_python.ipynb
jhustles/python_financial_records_analysis
249896532c9587b0aa94f227016f22e0b29b404d
[ "MIT" ]
null
null
null
33.794595
130
0.524312
[ [ [ "# Import Dependencies\nimport os\nimport csv\n", "_____no_output_____" ], [ "# Establish filepath\nbudget_csv = os.path.join(\".\", \"resources\", \"budget_data.csv\")\noutput_file = os.path.join(\".\", \"financial_analysis.txt\")", "_____no_output_____" ], [ "# Index Reference for the Profit and Loss List\n\n# Track Financial Parameters\n\n# Open and read csv file\nwith open(budget_csv, newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n \n # Captures and removes the header row (list) into csvheader\n csvheader = next(csvreader) \n \n # Set up Counter, had to circle back to adjust bc of using next(csvreader) twice\n total_months = 0\n total_months = total_months + 1\n \n # Setup for change analysis and calculations\n financial_data = [867884]\n \n # Calculating the \"Average of Changes\" and Tracking the Month\n netchange_list = []\n month_of_change_list = []\n \n # Greatest Increase / Decrease- use list, save spot for Period and Value\n # counter intuitive\n greatest_increase = [\"\", 0]\n greatest_decrease = [\"\", 999999]\n \n # Captures and removes the next row into first_row (Python knows to go to the next line / list down in the csvreader)\n first_row = next(csvreader) # first whole row is a list month & value\n \n # Isolate the first value of \"Profit/Losses\"\n # Note: the first_row[0] is Jan-10\n prev_net = int(first_row[1])\n \n for row in csvreader:\n #print(f\"{row[0]} , {row[1]}\")\n \n # Loop Thru and count the total number of months included in the dataset\n total_months += 1\n \n # The net total amount of “Profit/Losses” over the entire period\n financial_data.append(int(row[1]))\n \n # Average of the changes in “Profit/Losses” over the entire period\n #Part one: \"Numberator\" Net Change\n \n # Track the net change\n # This calculates Month to Month (differences) aka changes\n net_change = int(row[1]) - int(prev_net) # @ this point prev_net = first value\n # This appends those changes to the list\n netchange_list.append(net_change) #- JG initial thought\n prev_net = int(row[1])\n #netchange_list.append(net_change) # solution. test after\n \n # Track month of change as well\n #month_of_change_list = month_of_change_list + [row[0]] # concatenate row[0] to the list\n month_of_change_list.append(row[0]) # add the month of change to list\n # will not need this for calculations\n \n # Greatest increase and decrease in the dataset caculations\n \n if net_change > greatest_increase[1]:\n greatest_increase[1] = net_change\n greatest_increase[0] = row[0] #capture the month\n \n if net_change < greatest_decrease[1]:\n greatest_decrease[1] = net_change\n greatest_decrease[0] = row[0]\n \n \n net = sum(financial_data)\n print(f\"Financial Analysis\")\n print(\"=\"*60)\n print(f\"Total Months: {total_months}\")\n print(f\"Total: ${net}\")\n print(f\"Average Change: {sum(netchange_list)/len(netchange_list)}\")\n print(f\"Greatest Increase in Profits: {greatest_increase[0]} '({greatest_increase[1]})'\")\n print(f\"Greatest Decrease in Profits: {greatest_decrease[0]} '({greatest_decrease[1]})'\")\n print(\"=\"*60)\n \noutput = (\n f\"\\nFinancial Analysis\\n\"\n f\"----------------------------\\n\"\n f\"Total Months: {total_months}\\n\"\n f\"Total: ${net}\\n\"\n f\"Average Change: {sum(netchange_list)/len(netchange_list)}\\n\"\n f\"Greatest Increase in Profits: {greatest_increase[0]} '({greatest_increase[1]})'\\n\"\n f\"Greatest Decrease in Profits: {greatest_decrease[0]} '({greatest_decrease[1]})'\\n\"\n )\n\nwith open (\"financial_analysis.txt\", 'w') as txt_file:\n txt_file.write(output)", "_____no_output_____" ], [ "# Test Cells\n\nwith open(budget_csv, newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n csvheader = next(csvreader) \n \n total_months = 0\n financial_data = []\n rolling_average = []\n \n first_row = next(csvreader)\n \n print(first_row[1])", "_____no_output_____" ], [ "# test cells below.", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d06b4f9c8461ac0f5896e60612787aace144623c
3,571
ipynb
Jupyter Notebook
Week2/03. How to Construct a Linear Model_seungju.ipynb
Seungju182/pytorch-basic
89051631484ed9ecb0fa17917158e0d63d13addc
[ "MIT" ]
null
null
null
Week2/03. How to Construct a Linear Model_seungju.ipynb
Seungju182/pytorch-basic
89051631484ed9ecb0fa17917158e0d63d13addc
[ "MIT" ]
null
null
null
Week2/03. How to Construct a Linear Model_seungju.ipynb
Seungju182/pytorch-basic
89051631484ed9ecb0fa17917158e0d63d13addc
[ "MIT" ]
null
null
null
17.004762
46
0.462896
[ [ [ "# 3. How to Construct a Linear Model", "_____no_output_____" ] ], [ [ "import torch\nimport torch.nn as nn\nimport torch.optim as optim", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## 3.1 Problem #1", "_____no_output_____" ] ], [ [ "X = torch.rand(100, 20)\nY = torch.rand(100, 1)", "_____no_output_____" ], [ "model = nn.Linear(20, 1)\nmodel(X.view(100,20)).shape == Y.shape", "_____no_output_____" ] ], [ [ "## 3.2 Problem #2", "_____no_output_____" ] ], [ [ "X = torch.rand(500, 30)\nY = torch.rand(500, 2)", "_____no_output_____" ], [ "model = nn.Linear(30, 2)\nmodel(X.view(500, 30)).shape == Y.shape", "_____no_output_____" ] ], [ [ "## 3.3 Problem #3", "_____no_output_____" ] ], [ [ "X = torch.rand(500, 40)\nY = torch.rand(1000, 1)", "_____no_output_____" ], [ "model = nn.Linear(40, 1)\nmodel(X.view(500, 40)).shape == Y.shape", "_____no_output_____" ] ], [ [ "## 3.4 Problem #4", "_____no_output_____" ] ], [ [ "X = torch.rand(1000, 200, 20)\nY = torch.rand(1000, 2)", "_____no_output_____" ], [ "model = nn.Linear(200*20, 2)\nmodel(X.view(1000, -1)).shape == Y.shape", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d06b51c21218e64a208a05b8c283920733468b94
56,365
ipynb
Jupyter Notebook
pandas/3.0.pandas_methods_features.ipynb
enesonmez/data-science-tutorial-turkish
755c539f44b9f5571cd3efc3e0358f14f96bc096
[ "CC0-1.0" ]
1
2022-02-02T08:41:23.000Z
2022-02-02T08:41:23.000Z
pandas/3.0.pandas_methods_features.ipynb
enesonmez/data-science-tutorial-turkish
755c539f44b9f5571cd3efc3e0358f14f96bc096
[ "CC0-1.0" ]
null
null
null
pandas/3.0.pandas_methods_features.ipynb
enesonmez/data-science-tutorial-turkish
755c539f44b9f5571cd3efc3e0358f14f96bc096
[ "CC0-1.0" ]
null
null
null
25.49299
115
0.344735
[ [ [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ] ], [ [ "# Pandas Metodları ve Özellikleri", "_____no_output_____" ], [ "### Veri Analizi için Önemli Konular", "_____no_output_____" ], [ "#### Eksik Veriler (Missing Value)", "_____no_output_____" ] ], [ [ "data = {'Istanbul':[30,29,np.nan],'Ankara':[20,np.nan,25],'Izmir':[40,39,38],'Antalya':[40,np.nan,np.nan]}\nweather = pd.DataFrame(data,index=['pzt','sali','car'])\nweather", "_____no_output_____" ] ], [ [ "Satırında değer olmayan satırları veya sütunları silmek için **dropna** fonksiyonu kullanılır.", "_____no_output_____" ] ], [ [ "weather.dropna()", "_____no_output_____" ], [ "weather.dropna(axis=1)", "_____no_output_____" ], [ "# sütunda 2 veya daha fazla nan var ise siler.\nweather.dropna(axis=1, thresh=2)", "_____no_output_____" ] ], [ [ "Boş olan değerleri doldurmak için **fillna** fonksiyonunu kullanırız.", "_____no_output_____" ] ], [ [ "weather.fillna(22)", "_____no_output_____" ] ], [ [ "#### Gruplama (Group By)", "_____no_output_____" ] ], [ [ "data = {'Departman':['Yazılım','Pazarlama','Yazılım','Pazarlama','Hukuk','Hukuk'],\n 'Calisanlar':['Ahmet','Mehmet','Enes','Burak','Zeynep','Fatma'],\n 'Maas':[150,100,200,300,400,500]}", "_____no_output_____" ], [ "workers = pd.DataFrame(data)\nworkers", "_____no_output_____" ], [ "groupbyobje = workers.groupby('Departman')", "_____no_output_____" ], [ "groupbyobje.count()", "_____no_output_____" ], [ "groupbyobje.mean()", "_____no_output_____" ], [ "groupbyobje.min()", "_____no_output_____" ], [ "groupbyobje.max()", "_____no_output_____" ], [ "groupbyobje.describe()", "_____no_output_____" ] ], [ [ "#### Concatenation", "_____no_output_____" ] ], [ [ "data1 = {'Isim':['Ahmet','Mehmet','Zeynep','Enes'],\n 'Spor':['Koşu','Yüzme','Koşu','Basketbol'],\n 'Kalori':[100,200,300,400]}\n\ndata2 = {'Isim':['Osman','Levent','Atlas','Fatma'],\n 'Spor':['Koşu','Yüzme','Koşu','Basketbol'],\n 'Kalori':[200,200,30,400]}\n\ndata3 = {'Isim':['Ayse','Mahmut','Duygu','Nur'],\n 'Spor':['Koşu','Yüzme','Badminton','Tenis'],\n 'Kalori':[150,200,350,400]}", "_____no_output_____" ], [ "df1 = pd.DataFrame(data1)\ndf2 = pd.DataFrame(data2)\ndf3 = pd.DataFrame(data3)", "_____no_output_____" ], [ "pd.concat([df1,df2,df3], ignore_index=True, axis=0)", "_____no_output_____" ] ], [ [ "#### Merging", "_____no_output_____" ] ], [ [ "mdata1 = {'Isim':['Ahmet','Mehmet','Zeynep','Enes'],\n 'Spor':['Koşu','Yüzme','Koşu','Basketbol']}\n\nmdata2 = {'Isim':['Ahmet','Mehmet','Zeynep','Enes'],\n 'Kalori':[100,200,300,400]}", "_____no_output_____" ], [ "mdf1 = pd.DataFrame(mdata1)\nmdf1", "_____no_output_____" ], [ "mdf2 = pd.DataFrame(mdata2)\nmdf2", "_____no_output_____" ], [ "pd.merge(mdf1,mdf2,on='Isim')", "_____no_output_____" ] ], [ [ "### Önemli Metodlar ve Özellikleri", "_____no_output_____" ] ], [ [ "data = {'Departman' : ['Yazılım','Pazarlama','Yazılım','Pazarlama','Hukuk','Hukuk'],\n 'Isim' : ['Ahmet','Mehmet','Enes','Burak','Zeynep','Fatma'],\n 'Maas' : [150,100,200,300,400,500]}", "_____no_output_____" ], [ "workerdf = pd.DataFrame(data)\nworkerdf", "_____no_output_____" ] ], [ [ "#### Unique Değerleri Listeleme ve Sayısını Bulma ", "_____no_output_____" ] ], [ [ "workerdf['Departman'].unique()", "_____no_output_____" ], [ "workerdf['Departman'].nunique()", "_____no_output_____" ] ], [ [ "#### Sütundaki Değerlerden Toplamda Kaçar Adet Var?", "_____no_output_____" ] ], [ [ "workerdf['Departman'].value_counts()", "_____no_output_____" ] ], [ [ "#### Değerler Üzerinde Fonksiyon Yardımı ile İşlemler Yapmak", "_____no_output_____" ] ], [ [ "workerdf['Maas'].apply(lambda maas : maas*0.66)", "_____no_output_____" ] ], [ [ "#### Dataframe'de Null Değer Var mı?", "_____no_output_____" ] ], [ [ "workerdf.isnull()", "_____no_output_____" ] ], [ [ "#### Pivot Table", "_____no_output_____" ] ], [ [ "characters = {'Karakter Sınıfı':['South Park','South Park','Simpson','Simpson','Simpson'],\n 'Karakter Ismi':['Cartman','Kenny','Homer','Bart','Bart'],\n 'Puan':[9,10,50,20,10]}", "_____no_output_____" ], [ "dfcharacters = pd.DataFrame(characters)\ndfcharacters", "_____no_output_____" ], [ "dfcharacters.pivot_table(values='Puan',index=['Karakter Sınıfı','Karakter Ismi'],aggfunc=np.sum)", "_____no_output_____" ] ], [ [ "#### Belli Bir Sütuna Göre Değerleri Sıralama (Sorting)", "_____no_output_____" ] ], [ [ "workerdf.sort_values(by='Maas', ascending=False)", "_____no_output_____" ] ], [ [ "#### Duplicate Veriler", "_____no_output_____" ] ], [ [ "employees = [('Stuti', 28, 'Varanasi'),\n ('Saumya', 32, 'Delhi'),\n ('Aaditya', 25, 'Mumbai'),\n ('Saumya', 32, 'Delhi'),\n ('Saumya', 32, 'Delhi'),\n ('Saumya', 32, 'Mumbai'),\n ('Aaditya', 40, 'Dehradun'),\n ('Seema', 32, 'Delhi')]\n \ndf = pd.DataFrame(employees, columns = ['Name', 'Age', 'City'])", "_____no_output_____" ], [ "duplicate = df[df.duplicated()] \nprint(\"Duplicate Rows :\")\nduplicate", "Duplicate Rows :\n" ], [ "duplicate = df[df.duplicated('City')]\nprint(\"Duplicate Rows based on City :\")\nduplicate", "Duplicate Rows based on City :\n" ], [ "df.drop_duplicates()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d06b5566ce1c6637aa297665e27cc05044a2fc26
8,075
ipynb
Jupyter Notebook
ML Problems/Newswires Classification with Reuters - DLP/Models/Newswires_Classification_with_Reuters.ipynb
keivanipchihagh/Intro_to_DS_and_ML
2739b58455cac48366dc6be10d3daa494f35eb37
[ "Apache-2.0" ]
7
2021-02-03T18:57:34.000Z
2021-02-09T10:37:15.000Z
ML Problems/Newswires Classification with Reuters - DLP/Models/Newswires_Classification_with_Reuters.ipynb
keivanipchihagh/Intro_to_Machine_Learning
2739b58455cac48366dc6be10d3daa494f35eb37
[ "Apache-2.0" ]
null
null
null
ML Problems/Newswires Classification with Reuters - DLP/Models/Newswires_Classification_with_Reuters.ipynb
keivanipchihagh/Intro_to_Machine_Learning
2739b58455cac48366dc6be10d3daa494f35eb37
[ "Apache-2.0" ]
1
2020-07-22T00:21:29.000Z
2020-07-22T00:21:29.000Z
28.942652
283
0.496099
[ [ [ "<a href=\"https://colab.research.google.com/github/keivanipchihagh/Intro_To_MachineLearning/blob/master/Models/Newswires_Classification_with_Reuters.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Newswires Classification with Reuters", "_____no_output_____" ], [ "##### Imports", "_____no_output_____" ] ], [ [ "import numpy as np # Numpy\nfrom matplotlib import pyplot as plt # Matplotlib\nimport keras # Keras\nimport pandas as pd # Pandas\nfrom keras.datasets import reuters # Reuters Dataset\nfrom keras.utils.np_utils import to_categorical # Categirical Classifier\nimport random # Random", "_____no_output_____" ] ], [ [ "##### Load dataset", "_____no_output_____" ] ], [ [ "(train_data, train_labels), (test_data, test_labels) = reuters.load_data(num_words = 10000)\nprint('Size:', len(train_data))\nprint('Training Data:', train_data[0])", "_____no_output_____" ] ], [ [ "##### Get the feel of data", "_____no_output_____" ] ], [ [ "def decode(index): # Decoding the sequential integers into the corresponding words\n word_index = reuters.get_word_index()\n reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n decoded_newswire = ' '.join([reverse_word_index.get(i - 3, '?') for i in test_data[0]])\n return decoded_newswire\n\nprint(\"Decoded test data sample [0]: \", decode(0))", "_____no_output_____" ] ], [ [ "##### Data Prep (One-Hot Encoding)", "_____no_output_____" ] ], [ [ "def vectorize_sequences(sequences, dimension = 10000): # Encoding the integer sequences into a binary matrix\n results = np.zeros((len(sequences), dimension))\n for i, sequence in enumerate(sequences):\n results[i, sequence] = 1.\n return results\n\ntrain_data = vectorize_sequences(train_data)\ntest_data = vectorize_sequences(test_data)\n\ntrain_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)", "_____no_output_____" ] ], [ [ "##### Building the model", "_____no_output_____" ] ], [ [ "model = keras.models.Sequential()\nmodel.add(keras.layers.Dense(units = 64, activation = 'relu', input_shape = (10000,)))\nmodel.add(keras.layers.Dense(units = 64, activation = 'relu'))\nmodel.add(keras.layers.Dense(units = 46, activation = 'softmax'))\nmodel.compile( optimizer = 'rmsprop', loss = 'categorical_crossentropy', metrics = ['accuracy'])\nmodel.summary()", "_____no_output_____" ] ], [ [ "##### Training the model", "_____no_output_____" ] ], [ [ "x_val = train_data[:1000]\ntrain_data = train_data[1000:]\ny_val = train_labels[:1000]\ntrain_labels = train_labels[1000:]\n\nhistory = model.fit(train_data, train_labels, batch_size = 512, epochs = 10, validation_data = (x_val, y_val), verbose = False)", "_____no_output_____" ] ], [ [ "##### Evaluating the model", "_____no_output_____" ] ], [ [ "result = model.evaluate(train_data, train_labels)\nprint('Loss:', result[0])\nprint('Accuracy:', result[1] * 100)", "_____no_output_____" ] ], [ [ "##### Statistics", "_____no_output_____" ] ], [ [ "epochs = range(1, len(history.history['loss']) + 1)\nplt.plot(epochs, history.history['loss'], 'b', label = 'Training Loss')\nplt.plot(epochs, history.history['val_loss'], 'r', label = 'Validation Loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()\n\nplt.clf()\nplt.plot(epochs, history.history['accuracy'], 'b', label = 'Training Accuracy')\nplt.plot(epochs, history.history['val_accuracy'], 'r', label = 'Validation Accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "##### Making predictions", "_____no_output_____" ] ], [ [ "prediction_index = random.randint(0, len(test_data))\nprediction_data = test_data[prediction_index]\ndecoded_prediction_data = decode(prediction_index)\n\n# Info\nprint('Random prediction index:', prediction_index)\nprint('Original prediction Data:', prediction_data)\nprint('Decoded prediction Data:', decoded_prediction_data)\nprint('Expected prediction label:', np.argmax(test_labels[prediction_index]))\n\n# Prediction\npredictions = model.predict(test_data)\nprint('Prediction index: ', np.argmax(predictions[prediction_index]))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06b584b8d2c374bf2148227ef8c6d45075eab5d
221,244
ipynb
Jupyter Notebook
IMDB_TorchText_Interpret.ipynb
matthiaszimmermann/pytorch_torchtext_captum
66f76c261b9edb5ed9b1e7a6d01fb15da0df6b0b
[ "Apache-2.0" ]
null
null
null
IMDB_TorchText_Interpret.ipynb
matthiaszimmermann/pytorch_torchtext_captum
66f76c261b9edb5ed9b1e7a6d01fb15da0df6b0b
[ "Apache-2.0" ]
null
null
null
IMDB_TorchText_Interpret.ipynb
matthiaszimmermann/pytorch_torchtext_captum
66f76c261b9edb5ed9b1e7a6d01fb15da0df6b0b
[ "Apache-2.0" ]
null
null
null
310.30014
184,164
0.895283
[ [ [ "# Interpreting text models: IMDB sentiment analysis", "_____no_output_____" ], [ "This notebook loads pretrained CNN model for sentiment analysis on IMDB dataset. It makes predictions on test samples and interprets those predictions using integrated gradients method.\n\nThe model was trained using an open source sentiment analysis tutorials described in: https://github.com/bentrevett/pytorch-sentiment-analysis/blob/master/4%20-%20Convolutional%20Sentiment%20Analysis.ipynb\n\n**Note:** Before running this tutorial, please install: \n- spacy package, and its NLP modules for English language (https://spacy.io/usage)\n- sentencpiece (https://pypi.org/project/sentencepiece/)", "_____no_output_____" ] ], [ [ "import spacy", "_____no_output_____" ], [ "import torch\nimport torchtext\nimport torchtext.data", "_____no_output_____" ], [ "import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torchtext.vocab import Vocab\n\nfrom captum.attr import LayerIntegratedGradients, TokenReferenceBase, visualization\n\nnlp = spacy.load('en')\n", "_____no_output_____" ], [ "device = torch.device(\"cuda:5\" if torch.cuda.is_available() else \"cpu\")", "_____no_output_____" ] ], [ [ "The dataset used for training this model can be found in: https://ai.stanford.edu/~amaas/data/sentiment/\n\nRedefining the model in order to be able to load it.\n", "_____no_output_____" ] ], [ [ "class CNN(nn.Module):\n def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes, output_dim, \n dropout, pad_idx):\n \n super().__init__()\n \n self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx)\n \n self.convs = nn.ModuleList([\n nn.Conv2d(in_channels = 1, \n out_channels = n_filters, \n kernel_size = (fs, embedding_dim)) \n for fs in filter_sizes\n ])\n \n self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim)\n\n self.dropout = nn.Dropout(dropout)\n \n def forward(self, text):\n \n #text = [sent len, batch size]\n \n #text = text.permute(1, 0)\n \n #text = [batch size, sent len]\n \n embedded = self.embedding(text)\n\n #embedded = [batch size, sent len, emb dim]\n \n embedded = embedded.unsqueeze(1)\n \n #embedded = [batch size, 1, sent len, emb dim]\n \n conved = [F.relu(conv(embedded)).squeeze(3) for conv in self.convs]\n \n #conved_n = [batch size, n_filters, sent len - filter_sizes[n] + 1]\n \n pooled = [F.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conved]\n \n #pooled_n = [batch size, n_filters]\n \n cat = self.dropout(torch.cat(pooled, dim = 1))\n\n #cat = [batch size, n_filters * len(filter_sizes)]\n \n return self.fc(cat)\n", "_____no_output_____" ] ], [ [ "Loads pretrained model and sets the model to eval mode. The model is already in the provided in the 'models' folder. \n\nDownload source: https://github.com/pytorch/captum/blob/master/tutorials/models/imdb-model-cnn.pt", "_____no_output_____" ] ], [ [ "model = torch.load('models/imdb-model-cnn.pt')\nmodel.eval()\nmodel = model.to(device)", "/opt/anaconda3/lib/python3.7/site-packages/torch/serialization.py:593: SourceChangeWarning: source code of class 'torch.nn.modules.sparse.Embedding' has changed. you can retrieve the original source code by accessing the object's source attribute or set `torch.nn.Module.dump_patches = True` and use the patch tool to revert the changes.\n warnings.warn(msg, SourceChangeWarning)\n/opt/anaconda3/lib/python3.7/site-packages/torch/serialization.py:593: SourceChangeWarning: source code of class 'torch.nn.modules.container.ModuleList' has changed. you can retrieve the original source code by accessing the object's source attribute or set `torch.nn.Module.dump_patches = True` and use the patch tool to revert the changes.\n warnings.warn(msg, SourceChangeWarning)\n/opt/anaconda3/lib/python3.7/site-packages/torch/serialization.py:593: SourceChangeWarning: source code of class 'torch.nn.modules.conv.Conv2d' has changed. you can retrieve the original source code by accessing the object's source attribute or set `torch.nn.Module.dump_patches = True` and use the patch tool to revert the changes.\n warnings.warn(msg, SourceChangeWarning)\n/opt/anaconda3/lib/python3.7/site-packages/torch/serialization.py:593: SourceChangeWarning: source code of class 'torch.nn.modules.linear.Linear' has changed. you can retrieve the original source code by accessing the object's source attribute or set `torch.nn.Module.dump_patches = True` and use the patch tool to revert the changes.\n warnings.warn(msg, SourceChangeWarning)\n/opt/anaconda3/lib/python3.7/site-packages/torch/serialization.py:593: SourceChangeWarning: source code of class 'torch.nn.modules.activation.ReLU' has changed. you can retrieve the original source code by accessing the object's source attribute or set `torch.nn.Module.dump_patches = True` and use the patch tool to revert the changes.\n warnings.warn(msg, SourceChangeWarning)\n/opt/anaconda3/lib/python3.7/site-packages/torch/serialization.py:593: SourceChangeWarning: source code of class 'torch.nn.modules.dropout.Dropout' has changed. you can retrieve the original source code by accessing the object's source attribute or set `torch.nn.Module.dump_patches = True` and use the patch tool to revert the changes.\n warnings.warn(msg, SourceChangeWarning)\n" ] ], [ [ "Load a small subset of test data using torchtext from IMDB dataset.", "_____no_output_____" ] ], [ [ "TEXT = torchtext.data.Field(lower=True, tokenize='spacy')\nLabel = torchtext.data.LabelField(dtype = torch.float)\n", "_____no_output_____" ] ], [ [ "Download IMDB file 'aclImdb_v1.tar.gz' from https://ai.stanford.edu/~amaas/data/sentiment/ in a 'data' subfolder where this notebook is saved.\n\nThen unpack file using 'tar -xzf aclImdb_v1.tar.gz'", "_____no_output_____" ] ], [ [ "train, test = torchtext.datasets.IMDB.splits(text_field=TEXT,\n label_field=Label,\n train='train',\n test='test',\n path='data/aclImdb')\ntest, _ = test.split(split_ratio = 0.04)", "_____no_output_____" ], [ "len(test.examples)\n\n# expected result: 1000", "_____no_output_____" ] ], [ [ "Loading and setting up vocabulary for word embeddings using torchtext.", "_____no_output_____" ] ], [ [ "from torchtext import vocab\n\nloaded_vectors = vocab.GloVe(name='6B', dim=50)\n\n# If you prefer to use pre-downloaded glove vectors, you can load them with the following two command line\n# loaded_vectors = torchtext.vocab.Vectors('data/glove.6B.50d.txt')\n\n# source for downloading: https://github.com/uclnlp/inferbeddings/tree/master/data/glove\n\nTEXT.build_vocab(train, vectors=loaded_vectors, max_size=len(loaded_vectors.stoi))\n \nTEXT.vocab.set_vectors(stoi=loaded_vectors.stoi, vectors=loaded_vectors.vectors, dim=loaded_vectors.dim)\nLabel.build_vocab(train)\n", "_____no_output_____" ], [ "print('Vocabulary Size: ', len(TEXT.vocab))\n\n# expected result: 101982", "Vocabulary Size: 101982\n" ] ], [ [ "Define the padding token. The padding token will also serve as the reference/baseline token used for the application of the Integrated Gradients. The padding token is used for this since it is one of the most commonly used references for tokens.\n\nThis is then used with the Captum helper class `TokenReferenceBase` further down to generate a reference for each input text using the number of tokens in the text and a reference token index.", "_____no_output_____" ] ], [ [ "PAD = 'pad'\nPAD_INDEX = TEXT.vocab.stoi[PAD]\n\nprint(PAD, PAD_INDEX)", "pad 6976\n" ] ], [ [ "Let's create an instance of `LayerIntegratedGradients` using forward function of our model and the embedding layer.\nThis instance of layer integrated gradients will be used to interpret movie rating review.\n\nLayer Integrated Gradients will allow us to assign an attribution score to each word/token embedding tensor in the movie review text. We will ultimately sum the attribution scores across all embedding dimensions for each word/token in order to attain a word/token level attribution score.\n\nNote that we can also use `IntegratedGradients` class instead, however in that case we need to precompute the embeddings and wrap Embedding layer with `InterpretableEmbeddingBase` module. This is necessary because we cannot perform input scaling and subtraction on the level of word/token indices and need access to the embedding layer.", "_____no_output_____" ] ], [ [ "lig = LayerIntegratedGradients(model, model.embedding)", "_____no_output_____" ] ], [ [ "In the cell below, we define a generic function that generates attributions for each movie rating and stores them in a list using `VisualizationDataRecord` class. This will ultimately be used for visualization purposes.", "_____no_output_____" ] ], [ [ "def interpret_sentence(model, sentence, min_len = 7, label = 0):\n \n # create input tensor from sentence\n text_list = sentence_to_wordlist(sentence, min_len)\n text_tensor, reference_tensor = wordlist_to_tensors(text_list)\n \n # apply model forward function with sigmoid\n model.zero_grad()\n pred = torch.sigmoid(model(text_tensor)).item()\n pred_ind = round(pred)\n \n # compute attributions and approximation delta using layer integrated gradients\n attributions, delta = lig.attribute(text_tensor, reference_tensor, \\\n n_steps=500, return_convergence_delta = True)\n\n print('pred: ', Label.vocab.itos[pred_ind], '(', '%.2f'%pred, ')', ', delta: ', abs(delta))\n\n attributions = attributions.sum(dim=2).squeeze(0)\n attributions = attributions / torch.norm(attributions)\n attributions = attributions.cpu().detach().numpy()\n\n # create and return data visualization record\n return visualization.VisualizationDataRecord(\n attributions,\n pred,\n Label.vocab.itos[pred_ind],\n Label.vocab.itos[label],\n Label.vocab.itos[1],\n attributions.sum(), \n text_list,\n delta)\n \n # add_attributions_to_visualizer(attributions, text_list, pred, pred_ind, label, delta, vis_data_records_ig)\n\ndef sentence_to_wordlist(sentence, min_len = 7):\n # convert sentence into list of word/tokens (using spacy tokenizer)\n text = [tok.text for tok in nlp.tokenizer(sentence)]\n \n # fill text up with 'pad' tokens\n if len(text) < min_len:\n text += [PAD] * (min_len - len(text))\n \n return text\n\ndef wordlist_to_tensors(text):\n # get list of token/word indices using the vocabulary\n sentence_indices = [TEXT.vocab.stoi[t] for t in text]\n \n # transform token indices list into torch tensor\n sentence_tensor = torch.tensor(sentence_indices, device=device)\n sentence_tensor = sentence_tensor.unsqueeze(0)\n\n # create reference tensor using the padding token (one of the most frequently used tokens)\n token_reference = TokenReferenceBase(reference_token_idx = PAD_INDEX)\n reference_tensor = token_reference.generate_reference(len(text), device=device).unsqueeze(0)\n \n return sentence_tensor, reference_tensor\n", "_____no_output_____" ] ], [ [ "Below cells call `interpret_sentence` to interpret a couple handcrafted review phrases.", "_____no_output_____" ] ], [ [ "# reset accumulated data\nvis_records = []\n\nvis_records.append(interpret_sentence(model, 'It was a fantastic performance !', label=1))\nvis_records.append(interpret_sentence(model, 'Best film ever', label=1))\nvis_records.append(interpret_sentence(model, 'Such a great show!', label=1))\nvis_records.append(interpret_sentence(model, 'I\\'ve never watched something as bad', label=0))\nvis_records.append(interpret_sentence(model, 'It is a disgusting movie!', label=0))\nvis_records.append(interpret_sentence(model, 'Makes a poorly convincing argument', label=0))\nvis_records.append(interpret_sentence(model, 'Makes a fairly convincing argument', label=1))\nvis_records.append(interpret_sentence(model, 'Skyfall is one of the best action film in recent years but is just too long', min_len=18, label=1))\n", "pred: pos ( 0.99 ) , delta: tensor([0.0007])\npred: pos ( 0.71 ) , delta: tensor([0.0001])\npred: pos ( 0.95 ) , delta: tensor([0.0003])\npred: neg ( 0.22 ) , delta: tensor([0.0012])\npred: neg ( 0.38 ) , delta: tensor([0.0005])\npred: neg ( 0.01 ) , delta: tensor([0.0005])\npred: pos ( 0.66 ) , delta: tensor([0.0003])\npred: pos ( 0.91 ) , delta: tensor([0.0034])\n" ] ], [ [ "Below is an example of how we can visualize attributions for the text tokens. Feel free to visualize it differently if you choose to have a different visualization method.", "_____no_output_____" ] ], [ [ "vis_example = vis_records[-1]\n# print(dir(vis_example))\n\nprint('raw input: ', vis_example.raw_input)\nprint('true class: ', vis_example.true_class)\nprint('pred class (prob): ', vis_example.pred_class, '(', vis_example.pred_prob, ')')\nprint('attr score (sum over word attributions): ', vis_example.attr_score)\nprint('word attributions\\n', vis_example.word_attributions)", "raw input: ['Skyfall', 'is', 'one', 'of', 'the', 'best', 'action', 'film', 'in', 'recent', 'years', 'but', 'is', 'just', 'too', 'long', 'pad', 'pad']\ntrue class: pos\npred class (prob): pos ( 0.912595808506012 )\nattr score (sum over word attributions): 0.7669921\nword attributions\n [ 0.01238924 0.06156125 0.12204969 0.01363888 -0.00212131 0.8445496\n 0.12998587 0.2656981 -0.05432949 -0.01857009 0.17297307 -0.08786825\n -0.08167404 -0.2637743 -0.13738513 -0.21013089 0. 0. ]\n" ], [ "print('Visualize attributions based on Integrated Gradients')\nvisualization.visualize_text(vis_records)\n", "Visualize attributions based on Integrated Gradients\n" ] ], [ [ "Above cell generates an output similar to this:", "_____no_output_____" ] ], [ [ "from IPython.display import Image\nImage(filename='img/sentiment_analysis.png')\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d06b630c0ec14ecf7b9aeca720e8561b5df1c8c8
271,520
ipynb
Jupyter Notebook
iris0-seed-0/iris0_sample_generation -seed-0.ipynb
Arnab9Codes/LSTM-based-oversampling
1277c506392a29e337d027bda99a2168abfd68ca
[ "MIT" ]
null
null
null
iris0-seed-0/iris0_sample_generation -seed-0.ipynb
Arnab9Codes/LSTM-based-oversampling
1277c506392a29e337d027bda99a2168abfd68ca
[ "MIT" ]
null
null
null
iris0-seed-0/iris0_sample_generation -seed-0.ipynb
Arnab9Codes/LSTM-based-oversampling
1277c506392a29e337d027bda99a2168abfd68ca
[ "MIT" ]
null
null
null
44.358765
262
0.424772
[ [ [ "import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\nfrom sklearn.model_selection import train_test_split\n\n%config InlineBackend.figure_format='svg'", "G:\\newage2\\envs\\tensorflow\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n" ], [ "df=pd.read_csv('train_Data.csv')", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "df_min=df[df['4']==1]", "_____no_output_____" ], [ "df_min.to_csv('iris0_minority_train.csv',index=False)\ndf_min=pd.read_csv('iris0_minority_train.csv')", "_____no_output_____" ], [ "df_majority=df[df['4']==0]\ndf_majority.to_csv('iris0_majority_train.csv',index=False)\ndf_majority=pd.read_csv('iris0_majority_train.csv')", "_____no_output_____" ], [ "%matplotlib inline\nsns.countplot(x='4',data=df)", "_____no_output_____" ], [ "def create_dataset(dataset,look_back=1):\n \n datax,datay=[],[]\n \n for i in range(len(dataset)-look_back-1):\n a=dataset[i:(i+look_back),:]\n datax.append(a)\n datay.append(dataset[i+look_back,:])\n \n return np.array(datax),np.array(datay)", "_____no_output_____" ], [ "df_minor=np.array(df_min)\nscaler=MinMaxScaler(feature_range=(0,1))\n\ndf_minor=scaler.fit_transform(df_min)\n\nx,y=create_dataset(df_minor,5)#5\nprint(x.shape)\nprint(y.shape)", "(28, 5, 5)\n(28, 5)\n" ], [ "Xtrain,xtest,Ytrain,ytest=train_test_split(x,y,test_size=0.40,random_state=60)", "_____no_output_____" ], [ "model=Sequential()\nmodel.add(LSTM(20,input_shape=(Xtrain.shape[1],Xtrain.shape[2])))#5\nmodel.add(Dense(5))\n\nprint(model.summary())", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nlstm_2 (LSTM) (None, 20) 2080 \n_________________________________________________________________\ndense_2 (Dense) (None, 5) 105 \n=================================================================\nTotal params: 2,185\nTrainable params: 2,185\nNon-trainable params: 0\n_________________________________________________________________\nNone\n" ], [ "model.compile(loss='mse',optimizer='adam')", "_____no_output_____" ], [ "history=model.fit(Xtrain,Ytrain,epochs=500,verbose=1)", "Epoch 1/500\n16/16 [==============================] - 17s 1s/step - loss: 0.2987\nEpoch 2/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.2931\nEpoch 3/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.2868\nEpoch 4/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.2804\nEpoch 5/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.2740\nEpoch 6/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.2677\nEpoch 7/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.2615\nEpoch 8/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.2555\nEpoch 9/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.2496\nEpoch 10/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.2439\nEpoch 11/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.2384\nEpoch 12/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.2331\nEpoch 13/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.2279\nEpoch 14/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.2228\nEpoch 15/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.2179\nEpoch 16/500\n16/16 [==============================] - 0s 3ms/step - loss: 0.2131\nEpoch 17/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.2084\nEpoch 18/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.2039\nEpoch 19/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1994\nEpoch 20/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.1951\nEpoch 21/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.1908\nEpoch 22/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1867\nEpoch 23/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1826\nEpoch 24/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1786\nEpoch 25/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1747\nEpoch 26/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1708\nEpoch 27/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1670\nEpoch 28/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1632\nEpoch 29/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1595\nEpoch 30/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1558\nEpoch 31/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1521\nEpoch 32/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1485\nEpoch 33/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.1449\nEpoch 34/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.1413\nEpoch 35/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1378\nEpoch 36/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1342\nEpoch 37/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.1307\nEpoch 38/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1272\nEpoch 39/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1237\nEpoch 40/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1202\nEpoch 41/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1167\nEpoch 42/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.1133\nEpoch 43/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1098\nEpoch 44/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1064\nEpoch 45/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.1030\nEpoch 46/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0996\nEpoch 47/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0962\nEpoch 48/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0929\nEpoch 49/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0896\nEpoch 50/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0864\nEpoch 51/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0831\nEpoch 52/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0800\nEpoch 53/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0769\nEpoch 54/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0739\nEpoch 55/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0709\nEpoch 56/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0680\nEpoch 57/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0653\nEpoch 58/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0626\nEpoch 59/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0600\nEpoch 60/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0576\nEpoch 61/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0552\nEpoch 62/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.0530\nEpoch 63/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0510\nEpoch 64/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.0491\nEpoch 65/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.0473\nEpoch 66/500\n16/16 [==============================] - 0s 940us/step - loss: 0.0457\nEpoch 67/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0442\nEpoch 68/500\n16/16 [==============================] - 0s 940us/step - loss: 0.0429\nEpoch 69/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0418\nEpoch 70/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.0408\nEpoch 71/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0400\nEpoch 72/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0393\nEpoch 73/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0387\nEpoch 74/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0383\nEpoch 75/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0379\nEpoch 76/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.0377\nEpoch 77/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.0375\nEpoch 78/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0374\nEpoch 79/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0373\nEpoch 80/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.0373\nEpoch 81/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0373\nEpoch 82/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.0372\nEpoch 83/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.0372\nEpoch 84/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.0372\nEpoch 85/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0372\nEpoch 86/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0372\nEpoch 87/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0372\nEpoch 88/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0372\nEpoch 89/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0371\nEpoch 90/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.0371\nEpoch 91/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0370\nEpoch 92/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0369\nEpoch 93/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0369\nEpoch 94/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0368\nEpoch 95/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0367\nEpoch 96/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0366\nEpoch 97/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0365\nEpoch 98/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0364\nEpoch 99/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0363\nEpoch 100/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0362\nEpoch 101/500\n16/16 [==============================] - 0s 2ms/step - loss: 0.0362\nEpoch 102/500\n16/16 [==============================] - 0s 1ms/step - loss: 0.0361\n" ], [ "model.save('7-24-2019-iris0-v1.h5')", "_____no_output_____" ], [ "Xtrain.shape", "_____no_output_____" ], [ "plt.plot(history.history['loss'],label='train')\n#plt.plot(history.history['val_loss'],label='test')\nplt.xlabel('number of epochs')\nplt.ylabel('val_loss')\nplt.legend()\n#pyplot.savefig('LSTM training.png',dpi=300)\nplt.show()", "_____no_output_____" ], [ "prediction=model.predict(xtest)", "_____no_output_____" ], [ "def draw_prediction(ytest,d,columns):\n \n _,axes=plt.subplots(len(columns),1,figsize=(10,20))\n \n for i,cols in enumerate(columns):\n \n axes[i].plot(ytest[:,i],label='real',color='blue')\n axes[i].plot(d[:,i],label='prediction',color='orange')\n #axes[i].set_xlabel='index'\n #axes[i].set_ylabel=cols\n axes[i].xlabel='index'\n axes[i].ylabel=cols", "_____no_output_____" ], [ "clmns=df.columns\ndraw_prediction(ytest,prediction,clmns)", "_____no_output_____" ], [ "prediction", "_____no_output_____" ], [ "prediction2=scaler.inverse_transform(prediction)", "_____no_output_____" ], [ "ytest2=scaler.inverse_transform(ytest)", "_____no_output_____" ], [ "draw_prediction(ytest2,prediction2,clmns)", "_____no_output_____" ], [ "prediction", "_____no_output_____" ], [ "new_data=pd.DataFrame(prediction2)", "_____no_output_____" ], [ "new_data.to_csv('new_corrected_data-v1-7-24-2019.csv',index=False)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06b718a1014243d87c36038744efff4fdd6272f
5,011
ipynb
Jupyter Notebook
sdk/python-autotimeseries/nbs/index.ipynb
pietroppeter/nixtla
74e4560f1bdb6bf64445f3c45005fe74c0a0a427
[ "MIT" ]
176
2021-10-16T21:53:30.000Z
2022-03-31T22:34:13.000Z
sdk/python-autotimeseries/nbs/index.ipynb
pietroppeter/nixtla
74e4560f1bdb6bf64445f3c45005fe74c0a0a427
[ "MIT" ]
7
2021-11-18T00:24:11.000Z
2022-03-18T17:02:17.000Z
sdk/python-autotimeseries/nbs/index.ipynb
pietroppeter/nixtla
74e4560f1bdb6bf64445f3c45005fe74c0a0a427
[ "MIT" ]
18
2021-11-03T18:49:40.000Z
2022-03-31T22:34:54.000Z
29.476471
218
0.529635
[ [ [ "# autotimeseries\n\n> Nixtla SDK. Time Series Forecasting pipeline at scale.", "_____no_output_____" ], [ "[![CI python sdk](https://github.com/Nixtla/nixtla/actions/workflows/python-sdk.yml/badge.svg)](https://github.com/Nixtla/nixtla/actions/workflows/python-sdk.yml)\n[![Python](https://img.shields.io/pypi/pyversions/autotimeseries)](https://pypi.org/project/autotimeseries/)\n[![PyPi](https://img.shields.io/pypi/v/autotimeseries?color=blue)](https://pypi.org/project/autotimeseries/)\n[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/Nixtla/nixtla/blob/main/sdk/python-autotimeseries/LICENSE)", "_____no_output_____" ], [ "**autotimeseries** is a python SDK to consume the APIs developed in https://github.com/Nixtla/nixtla.", "_____no_output_____" ], [ "## Install", "_____no_output_____" ], [ "### PyPI\n\n`pip install autotimeseries`", "_____no_output_____" ], [ "## How to use\n\nCheck the following examples for a full pipeline:\n\n- [M5 state-of-the-art reproduction](https://github.com/Nixtla/autotimeseries/tree/main/examples/m5).\n- [M5 state-of-the-art reproduction in Colab](https://colab.research.google.com/drive/1pmp4rqiwiPL-ambxTrJGBiNMS-7vm3v6?ts=616700c4)", "_____no_output_____" ], [ "### Basic usage", "_____no_output_____" ], [ "```python\nimport os\n\nfrom autotimeseries.core import AutoTS\n\nautotimeseries = AutoTS(bucket_name=os.environ['BUCKET_NAME'],\n api_id=os.environ['API_ID'], \n api_key=os.environ['API_KEY'],\n aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'], \n aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'])\n```", "_____no_output_____" ], [ "#### Upload dataset to S3\n\n```python\ntrain_dir = '../data/m5/parquet/train'\n# File with target variables\nfilename_target = autotimeseries.upload_to_s3(f'{train_dir}/target.parquet')\n# File with static variables\nfilename_static = autotimeseries.upload_to_s3(f'{train_dir}/static.parquet')\n# File with temporal variables\nfilename_temporal = autotimeseries.upload_to_s3(f'{train_dir}/temporal.parquet')\n```", "_____no_output_____" ], [ "Each time series of the uploaded datasets is defined by the column `item_id`. Meanwhile the time column is defined by `timestamp` and the target column by `demand`. We need to pass this arguments to each call.\n\n```python\ncolumns = dict(unique_id_column='item_id',\n ds_column='timestamp',\n y_column='demand')\n```", "_____no_output_____" ], [ "#### Send the job to make forecasts\n\n```python\nresponse_forecast = autotimeseries.tsforecast(filename_target=filename_target,\n freq='D',\n horizon=28, \n filename_static=filename_static,\n filename_temporal=filename_temporal,\n objective='tweedie',\n metric='rmse',\n n_estimators=170,\n **columns)\n```", "_____no_output_____" ], [ "#### Download forecasts\n\n```python\nautotimeseries.download_from_s3(filename='forecasts_2021-10-12_19-04-32.csv', filename_output='../data/forecasts.csv')\n```", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d06b72531c39742c2c8457ff28eaed8df3b1dc39
14,537
ipynb
Jupyter Notebook
Lime/cam_test.ipynb
Wenhao-Yang/DeepSpeaker-pytorch
99eb8de3357c85e2b7576da2a742be2ffd773ead
[ "MIT" ]
8
2020-08-26T13:32:56.000Z
2022-01-18T21:05:46.000Z
Lime/cam_test.ipynb
Wenhao-Yang/DeepSpeaker-pytorch
99eb8de3357c85e2b7576da2a742be2ffd773ead
[ "MIT" ]
1
2020-07-24T17:06:16.000Z
2020-07-24T17:06:16.000Z
Lime/cam_test.ipynb
Wenhao-Yang/DeepSpeaker-pytorch
99eb8de3357c85e2b7576da2a742be2ffd773ead
[ "MIT" ]
5
2020-12-11T03:31:15.000Z
2021-11-23T15:57:55.000Z
37.955614
261
0.508977
[ [ [ "#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@Author: yangwenhao\n@Contact: [email protected]\n@Software: PyCharm\n@File: cam_2.py\n@Time: 2021/4/12 21:47\n@Overview:\n Created on 2019/8/4 上午9:37\n @author: mick.yi\n\"\"\"\n\nimport os\nimport pdb\n\nimport numpy as np\nimport torch\nfrom torch.nn.parallel.distributed import DistributedDataParallel\n\nfrom Define_Model.ResNet import ThinResNet", "_____no_output_____" ], [ "os.environ['CUDA_VISIBLE_DEVICES'] = \"0,1\"\ntorch.distributed.init_process_group(backend=\"nccl\", init_method='tcp://localhost:12556', rank=0,\n world_size=1)", "_____no_output_____" ], [ "class GradCAM(object):\n \"\"\"\n 1: 网络不更新梯度,输入需要梯度更新\n 2: 使用目标类别的得分做反向传播\n \"\"\"\n\n def __init__(self, net, layer_name):\n self.net = net\n self.layer_name = layer_name\n self.feature = {}\n self.gradient = {}\n self.net.eval()\n self.handlers = []\n self._register_hook()\n\n def _get_features_hook(self, module, input, output):\n print(type(module))\n if isinstance(self.net, DistributedDataParallel):\n self.feature[input[0].device] = output[0]\n else:\n self.feature = output[0]\n# print(\"Device {}, forward out feature shape:{}\".format(input[0].device, output[0].size()))\n\n def _get_grads_hook(self, module, input_grad, output_grad):\n \"\"\"\n :param input_grad: tuple, input_grad[0]: None\n input_grad[1]: weight\n input_grad[2]: bias\n :param output_grad:tuple,长度为1\n :return:\n \"\"\"\n if isinstance(self.net, DistributedDataParallel):\n if input_grad[0].device not in self.gradient:\n self.gradient[input_grad[0].device] = output_grad[0]\n else:\n self.gradient[input_grad[0].device] += output_grad[0]\n else:\n self.gradient += output_grad[0]\n \n# print(output_grad[0])\n# print(\"Device {}, backward out gradient shape:{}\".format(input_grad[0].device, output_grad[0].size()))\n\n def _register_hook(self):\n\n if isinstance(self.net, DistributedDataParallel):\n modules = self.net.module.named_modules()\n else:\n modules = self.net.named_modules()\n\n for (name, module) in modules:\n if name == self.layer_name:\n self.handlers.append(module.register_backward_hook(self._get_features_hook))\n self.handlers.append(module.register_backward_hook(self._get_grads_hook))\n\n def remove_handlers(self):\n for handle in self.handlers:\n handle.remove()\n\n def __call__(self, inputs, index):\n \"\"\"\n :param inputs: [1,3,H,W]\n :param index: class id\n :return:\n \"\"\"\n# self.net.zero_grad()\n\n output, _ = self.net(inputs) # [1,num_classes]\n pdb.set_trace()\n\n if index is None:\n index = torch.argmax(output)\n \n target = output.gather(1, index)# .mean()\n\n # target = output[0][index]\n for i in target:\n i.backward(retain_graph=True)\n \n if isinstance(self.net, DistributedDataParallel):\n feature = []\n gradient = []\n for d in self.gradient:\n feature.append(self.feature[d])\n gradient.append(self.gradient[d])\n\n feature = torch.cat(feature, dim=0)\n gradient = torch.cat(gradient, dim=0)\n else:\n feature = self.feature\n gradient = self.gradient\n \n return feature, gradient\n \n # gradient = self.gradient[0].cpu().data.numpy() # [C,H,W]\n # weight = np.mean(gradient, axis=(1, 2)) # [C]\n # feature = self.feature[0].cpu().data.numpy() # [C,H,W]\n\n # cam = feature * weight[:, np.newaxis, np.newaxis] # [C,H,W]\n # cam = np.sum(cam, axis=0) # [H,W]\n # cam = np.maximum(cam, 0) # ReLU\n #\n # # 数值归一化\n # cam -= np.min(cam)\n # cam /= np.max(cam)\n # # resize to 224*224\n # cam = cv2.resize(cam, (224, 224))\n # return cam\n \n# print(\"gradient shape: \", gradient.shape)\n# print(\"feature shape: \", feature.shape)\n\nclass Sum_GradCAM(object):\n \"\"\"\n 1: 网络不更新梯度,输入需要梯度更新\n 2: 使用目标类别的得分做反向传播\n \"\"\"\n\n def __init__(self, net, layer_name):\n self.net = net\n self.layer_name = layer_name\n self.feature = {}\n self.gradient = {}\n self.net.eval()\n self.handlers = []\n self._register_hook()\n\n def _get_features_hook(self, module, input, output):\n \n if isinstance(self.net, DistributedDataParallel):\n self.feature[input[0].device] = output[0]\n else:\n self.feature = output[0]\n# print(\"Device {}, forward out feature shape:{}\".format(input[0].device, output[0].size()))\n\n def _get_grads_hook(self, module, input_grad, output_grad):\n \"\"\"\n :param input_grad: tuple, input_grad[0]: None\n input_grad[1]: weight\n input_grad[2]: bias\n :param output_grad:tuple,长度为1\n :return:\n \"\"\"\n if isinstance(self.net, DistributedDataParallel):\n if input_grad[0].device not in self.gradient:\n self.gradient[input_grad[0].device] = output_grad[0]\n else:\n self.gradient[input_grad[0].device] += output_grad[0]\n else:\n self.gradient = output_grad[0]\n \n# print(output_grad[0])\n# print(\"Device {}, backward out gradient shape:{}\".format(input_grad[0].device, output_grad[0].size()))\n\n def _register_hook(self):\n\n if isinstance(self.net, DistributedDataParallel):\n modules = self.net.module.named_modules()\n else:\n modules = self.net.named_modules()\n\n for (name, module) in modules:\n if name == self.layer_name:\n self.handlers.append(module.register_backward_hook(self._get_features_hook))\n self.handlers.append(module.register_backward_hook(self._get_grads_hook))\n\n def remove_handlers(self):\n for handle in self.handlers:\n handle.remove()\n\n def __call__(self, inputs, index):\n \"\"\"\n :param inputs: [1,3,H,W]\n :param index: class id\n :return:\n \"\"\"\n# self.net.zero_grad()\n\n output, _ = self.net(inputs) # [1,num_classes]\n pdb.set_trace()\n\n if index is None:\n index = torch.argmax(output)\n \n target = output.gather(1, index).mean()\n target.backward(retain_graph=True)\n \n if isinstance(self.net, DistributedDataParallel):\n feature = []\n gradient = []\n for d in self.gradient:\n feature.append(self.feature[d])\n gradient.append(self.gradient[d])\n\n feature = torch.cat(feature, dim=0)\n gradient = torch.cat(gradient, dim=0)\n else:\n feature = self.feature\n gradient = self.gradient\n \n return feature, gradient\n \n# print(\"gradient shape: \", gradient.shape)\n# print(\"feature shape: \", feature.shape)", "_____no_output_____" ], [ "model = ThinResNet()\nmodel = model.cuda()\nmodel = DistributedDataParallel(model)\ngc = GradCAM(model, 'layer4')", "/home/yangwenhao/local/project/SpeakerVerification-pytorch/Define_Model/ResNet.py:374: UserWarning: nn.init.normal is now deprecated in favor of nn.init.normal_.\n nn.init.normal(m.weight, mean=0., std=1.)\n/home/yangwenhao/local/project/SpeakerVerification-pytorch/Define_Model/ResNet.py:376: UserWarning: nn.init.constant is now deprecated in favor of nn.init.constant_.\n nn.init.constant(m.weight, 1)\n/home/yangwenhao/local/project/SpeakerVerification-pytorch/Define_Model/ResNet.py:377: UserWarning: nn.init.constant is now deprecated in favor of nn.init.constant_.\n nn.init.constant(m.bias, 0)\n" ], [ "x = torch.randn((20, 1, 224, 224)).cuda() # *1.2 +1.\nl = torch.range(0, 19).long().unsqueeze(1).cuda()\ny = model(x)\n\n#\ncam = gc(x, l)\n# print(cam.shape)", "/home/yangwenhao/anaconda3/envs/py35/lib/python3.5/site-packages/ipykernel_launcher.py:2: UserWarning: torch.range is deprecated in favor of torch.arange and will be removed in 0.5. Note that arange generates values in [start; end), not [start; end].\n \n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d06b7778ab40d51902c40cccf06704dcbf0b39bf
39,911
ipynb
Jupyter Notebook
we-geometry-benson/class-notebook.ipynb
bastivkl/nh2020-curriculum
245a72af3f325495448cbf6c0c6baa2499d43d94
[ "CC-BY-4.0" ]
94
2020-06-27T19:04:11.000Z
2022-03-28T00:44:44.000Z
we-geometry-benson/class-notebook.ipynb
bastivkl/nh2020-curriculum
245a72af3f325495448cbf6c0c6baa2499d43d94
[ "CC-BY-4.0" ]
13
2020-07-23T02:11:40.000Z
2020-09-09T21:28:36.000Z
we-geometry-benson/class-notebook.ipynb
bastivkl/nh2020-curriculum
245a72af3f325495448cbf6c0c6baa2499d43d94
[ "CC-BY-4.0" ]
50
2020-07-15T03:37:49.000Z
2022-02-27T23:07:14.000Z
35.82675
973
0.617173
[ [ [ "# The Structure and Geometry of the Human Brain\n\n[Noah C. Benson](https://nben.net/) &lt;[[email protected]](mailto:[email protected])&gt; \n[eScience Institute](https://escience.washingtonn.edu/) \n[University of Washington](https://www.washington.edu/) \n[Seattle, WA 98195](https://seattle.gov/)", "_____no_output_____" ], [ "## Introduction", "_____no_output_____" ], [ "This notebook is designed to accompany the lecture \"Introduction to the Strugure and Geometry of the Human Brain\" as part of the Neurohackademt 2020 curriculum. It can be run either in Neurohackademy's Jupyterhub environment, or using the `docker-compose.yml` file (see the `README.md` file for instructions).\n\nIn this notebook we will examine various structural and geometric data used commonly in neuroscience. These demos will primarily use [FreeSurfer](http://surfer.nmr.mgh.harvard.edu/) subjects. In the lecture and the Neurohackademy Jupyterhub environment, we will look primarily at a subject named `nben`; however, you can alternately use the subject `bert`, which is an example subject that comes with FreeSurfer. Optionally, this notebook can be used with subject from the [Human Connectome Project (HCP)](https://db.humanconnectome.org/)--see the `README.md` file for instructions on getting credentials for use with the HCP.\n\nWe will look at these data using both the [`nibabel`](https://nipy.org/nibabel/), which is an excellent core library for importing various kinds of neuroimaging data, as well as [`neuropythy`](https://github.com/noahbenson/neuropythy), which builds on `nibabel` to provide a user-friendly API for interacting with subjects. At its core, `neuropythy` is a library for interacting with neuroscientific data in the context of brain structure.\n\nThis notebook itself consists of this introduction as well as four sections that follow the topic areas in the slide-deck from the lecture. These sections are intended to be explored in order.", "_____no_output_____" ], [ "### Libraries", "_____no_output_____" ], [ "Before running any of the code in this notebook, we need to start by importing a few libraries and making sure we have configured those that need to be configured (mainly, `matplotlib`).", "_____no_output_____" ] ], [ [ "# We will need os for paths:\nimport os\n# Numpy, Scipy, and Matplotlib are effectively standard libraries.\nimport numpy as np\nimport scipy as sp\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n# Ipyvolume is a 3D plotting library that is used by neuropythy.\nimport ipyvolume as ipv\n# Nibabel is the library that understands various neuroimaging file\n# formats; it is also used by neuropythy.\nimport nibabel as nib\n\n# Neuropythy is the main library we will be using in this notebook.\nimport neuropythy as ny", "_____no_output_____" ], [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "## MRI and Volumetric Data", "_____no_output_____" ], [ "The first section of this notebook will deal with MR images and volumetric data. We will start by loading in an MRImage. We will use the same image that was visualized in the lecture (if you are not using the Jupyterhub, you won't have access to this subject, but you can use the subject `'bert'` instead).\n\n---", "_____no_output_____" ], [ "### Load a subject.", "_____no_output_____" ], [ "---\n\nFor starters, we will load the subject.", "_____no_output_____" ] ], [ [ "subject_id = 'nben'\n\nsubject = ny.freesurfer_subject(subject_id)\n\n# If you have configured the HCP credentials and wish to use an HCP\n# subject instead of nben:\n#\n#subject_id = 111312\n#subject = ny.hcp_subject(subject_id)", "_____no_output_____" ] ], [ [ "The `freesurfer_subject` function returns a `neuropythy` `Subject` object.", "_____no_output_____" ] ], [ [ "subject", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### Load an MRImage file.", "_____no_output_____" ], [ "---\n\nLet's load in an image file. FreeSurfer directories contain a subdirectory `mri/` that contains all of the volumetric/image data for the subject. This includes images that have been preprocessed as well as copies of the original T1-weighted image. We will load an image called `T1.mgz`.", "_____no_output_____" ] ], [ [ "# This function will load data from a subject's directory using neuropythy's\n# builtin ny.load() function; in most cases, this calls down to nibabel's own\n# nib.load() function.\nim = subject.load('mri/T1.mgz')\n\n# For an HCP subject, use this file instead:\n#im = subject.load(\"T1w/T1w_acpc_dc.nii.gz\")\n\n# The return value should be a nibabel image object.\nim", "_____no_output_____" ], [ "# In fact, we could just as easily have loaded the same object using nibabel:\nim_from_nibabel = nib.load(subject.path + '/mri/T1.mgz')\nprint('From neuropythy: ', im.get_filename())\nprint('From nibabel: ', im_from_nibabel.get_filename())", "_____no_output_____" ], [ "# And neuropythy manages this image as part of the subject-data. Neuropythy's\n# name for it is 'intensity_normalized', which is due to its position as an \n# output in FreeSurfer's processing pipeline.\nny_im = subject.images['intensity_normalized']\n(ny_im.dataobj == im.dataobj).all()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### Visualize some slices of the image.", "_____no_output_____" ], [ "---\n\nNext, we will make 2D plots of some of the image slices. Feel free to change which slices you visualize; I have just chosen some defaults.", "_____no_output_____" ] ], [ [ "# What axis do we want to plot slices along? 0, 1, or 2 (for the first, second,\n# or third 3D image axis).\naxis = 2\n# Which slices along this axis should we plot? These must be at least 0 and at\n# most 255 (There are 256 slices in each dimension of these images).\nslices = [75, 125, 175]\n\n# Make a figure and axes using matplotlib.pyplot:\n(fig, axes) = plt.subplots(1, len(slices), figsize=(5, 5/len(slices)), dpi=144)\n# Plot each of the slices:\nfor (ax, slice_num) in zip(axes, slices):\n # Get the slice:\n if axis == 0:\n imslice = im.dataobj[slice_num,:,:]\n elif axis == 1:\n imslice = im.dataobj[:,slice_num,:]\n else:\n imslice = im.dataobj[:,:,slice_num]\n ax.imshow(imslice, cmap='gray')\n # Turn off labels:\n ax.axis('off')", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### Visualize the 3D Image as a whole.", "_____no_output_____" ], [ "---\n\nNext we will use `ipyvolume` to render a 3D View of the volume. The volume plotting function is part of `ipyvolume` and has a variety of options that are beyond the scope of this demo.", "_____no_output_____" ] ], [ [ "# Note that this will generate a warning, which can be safely ignored.\nfig = ipv.figure()\nipv.quickvolshow(subject.images['intensity_normalized'].dataobj)\nipv.show()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### Load and visualize anatomical segments.", "_____no_output_____" ], [ "---\n\nFreeSurfer creates a segmentation image file called `aseg.mgz`, which we can load and use to identify ROIs. First, we will load this file and plot some slices from it.", "_____no_output_____" ] ], [ [ "# First load the file; any of these lines will work:\n#aseg = subject.load('mri/aseg.mgz')\n#aseg = nib.load(subject.path + '/mri/aseg.mgz')\naseg = subject.images['segmentation']", "_____no_output_____" ] ], [ [ "We can plot this as-is, but we don't know what the values in the numbers correspond to. Nonetheless, let's go ahead. This code block is the same as the block we used to plot slices above except that it uses the new image `aseg` we just loaded.", "_____no_output_____" ] ], [ [ "# What axis do we want to plot slices along? 0, 1, or 2 (for the first, second,\n# or third 3D image axis).\naxis = 2\n# Which slices along this axis should we plot? These must be at least 0 and at\n# most 255 (There are 256 slices in each dimension of these images).\nslices = [75, 125, 175]\n\n# Make a figure and axes using matplotlib.pyplot:\n(fig, axes) = plt.subplots(1, len(slices), figsize=(5, 5/len(slices)), dpi=144)\n# Plot each of the slices:\nfor (ax, slice_num) in zip(axes, slices):\n # Get the slice:\n if axis == 0:\n imslice = aseg.dataobj[slice_num,:,:]\n elif axis == 1:\n imslice = aseg.dataobj[:,slice_num,:]\n else:\n imslice = aseg.dataobj[:,:,slice_num]\n ax.imshow(imslice, cmap='gray')\n # Turn off labels:\n ax.axis('off')", "_____no_output_____" ] ], [ [ "Clearly, the balues in the plots above are discretized, but it's not clear what they correspond to. The map of numbers to characters and colors can be found in the various FreeSurfer color LUT files. These are all located in the FreeSurfer home directory and end with `LUT.txt`. They are essentially spreadsheets and are loaded by `neuropythy` as `pandas.DataFrame` objects. In `neuropythy`, the LUT objects are associated with the `'freesurfer_home'` configuration variable. This has been setup automatically in the course and the `neuropythy` docker-image.", "_____no_output_____" ] ], [ [ "ny.config['freesurfer_home'].luts['aseg']", "_____no_output_____" ] ], [ [ "So suppose we want to look at left cerebral cortex. In the table, this has value 3. We can find this value in the images we are plotting and plot only it to see the ROI in each the slices we plot.", "_____no_output_____" ] ], [ [ "# We want to plot left cerebral cortex (label ID = 3, per the LUT)\nlabel = 3\n\n(fig, axes) = plt.subplots(1, len(slices), figsize=(5, 5/len(slices)), dpi=144)\n# Plot each of the slices:\nfor (ax, slice_num) in zip(axes, slices):\n # Get the slice:\n if axis == 0:\n imslice = aseg.dataobj[slice_num,:,:]\n elif axis == 1:\n imslice = aseg.dataobj[:,slice_num,:]\n else:\n imslice = aseg.dataobj[:,:,slice_num]\n # Plot only the values that are equal to the label ID.\n imslice = (imslice == label)\n ax.imshow(imslice, cmap='gray')\n # Turn off labels:\n ax.axis('off')", "_____no_output_____" ] ], [ [ "By plotting the LH cortex specifically, we can see that LEFT is in the direction of increasing rows (down the image slices, if you used `axis = 2`), thus RIGHT must be in the direction of decreasing rows in the image.", "_____no_output_____" ], [ "Let's also make some images from these slices in which we replace each of the pixels in each slice with the color recommended by the color LUT.", "_____no_output_____" ] ], [ [ "# We are using this color LUT:\nlut = ny.config['freesurfer_home'].luts['aseg']\n# The axis:\naxis = 2\n\n(fig, axes) = plt.subplots(1, len(slices), figsize=(5, 5/len(slices)), dpi=144)\n# Plot each of the slices:\nfor (ax, slice_num) in zip(axes, slices):\n # Get the slice:\n if axis == 0:\n imslice = aseg.dataobj[slice_num,:,:]\n elif axis == 1:\n imslice = aseg.dataobj[:,slice_num,:]\n else:\n imslice = aseg.dataobj[:,:,slice_num]\n # Convert the slice into an RGBA image using the color LUT:\n rgba_im = np.zeros(imslice.shape + (4,))\n for (label_id, row) in lut.iterrows():\n rgba_im[imslice == label_id,:] = row['color']\n ax.imshow(rgba_im)\n # Turn off labels:\n ax.axis('off')", "_____no_output_____" ] ], [ [ "## Cortical Surface Data", "_____no_output_____" ], [ "Cortical surface data is handled and represented much differently than volumetric data. This section demonstrates how to interact with cortical surface data in a Jupyter notebook, primarily using `neuropythy`.\n\nTo start off, however, we will just load a surface file using `nibabel` to see what one contains.\n\n---", "_____no_output_____" ], [ "### Load a Surface-Geometry File Using `nibabel`", "_____no_output_____" ], [ "---", "_____no_output_____" ] ], [ [ "# Each subject has a number of surface files; we will look at the\n# left hemisphere, white surface.\nhemi = 'lh'\nsurf = 'white'\n# Feel free to change hemi to 'rh' for the RH and surf to 'pial'\n# or 'inflated'.\n\n# We load the surface from the subject's 'surf' directory in FreeSurfer.\n# Nibabel refers to these files as \"geometry\" files.\nfilename = subject.path + f'/surf/{hemi}.{surf}'\n# If you are using an HCP subject, you should instead load from this path:\n#relpath = f'T1w/{subject.name}/surf/{hemi}.{surf}'\n#filename = subject.pseudo_path.local_path(relpath)\n\n# Read the file, using nibabel.\nsurface_data = nib.freesurfer.read_geometry(filename)\n\n# What does this return?\nsurface_data", "_____no_output_____" ] ], [ [ "So when `nibabel` reads in one of these surface files, what we get back is an `n x 3` matrix of real numbers (coordiantes) and an `m x 3` matrix of integers (triangle indices).\n\nThe `ipyvolume` module has support for plotting triangle meshes--let's see how it works.", "_____no_output_____" ] ], [ [ "# Extract the coordinates and triangle-faces.\n(coords, faces) = surface_data\n# And get the (x,y,z) from coordinates.\n(x, y, z) = coords.T\n\n# Now, plot the triangle mesh.\nfig = ipv.figure()\nipv.plot_trisurf(x, y, z, triangles=faces)\n# Adjust the plot limits (making them equal makes the plot look good).\nipv.pylab.xlim(-100,100)\nipv.pylab.ylim(-100,100)\nipv.pylab.zlim(-100,100)\n# Generally, one must call show() with ipyvolume.\nipv.show()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### Hemisphere (`neuropythy.mri.Cortex`) objects", "_____no_output_____" ], [ "---\n\nAlthough one can load and plot cortical surfaces with `nibabel`, `neuropythy` builds on `nibabel` by providing a framework around which the cortical surface can be represented. It includes a number of utilities related specifically to cortical surface analysis, and allows much of the power of FreeSurfer to be leveraged through simple Python data structures.\n\nTo start with, we will look at our subject's hemispheres (`neuropythy.mri.Cortex` objects) and how they represent surfaces.", "_____no_output_____" ] ], [ [ "# Grab the hemisphere for our subject.\ncortex = subject.hemis[hemi]\n# Note that `cortex = subject.lh` and `cortex = subject.rh` are equivalent\n# to `cortex = subject.hemis['lh']` and `cortex = subject.hemis['rh']`.\n\n# What is cortex?\ncortex", "_____no_output_____" ] ], [ [ "From this we can see which hemisphere we have selected, the number of triangle faces that it has, and the number of vertices that it has. Let's look at a few of its' properties.", "_____no_output_____" ], [ "#### Surfaces\n\nEach hemisphere has a number of surfaces; we can view them through the `cortex.surfaces` dictionary.", "_____no_output_____" ] ], [ [ "cortex.surfaces.keys()", "_____no_output_____" ], [ "cortex.surfaces['white_smooth']", "_____no_output_____" ] ], [ [ "The `'white_smooth'` mesh is a well-processed mesh of the white surface that has been well-smoothed. You might notice that there is a `'midgray'` surface, even though FreeSurfer does not include a mid-gray mesh file. The `'midgray'` mesh, however, can be made by averaging the white and pial mesh vertices.\n\nRecall that all surfaces of a hemisphere have equivalent vertices and identical triangles. We can test that here.", "_____no_output_____" ] ], [ [ "np.array_equal(cortex.surfaces['white'].tess.faces,\n cortex.surfaces['pial'].tess.faces)", "_____no_output_____" ] ], [ [ "Surfaces track a large amount of data about their meshes and vertices and inherit most of the properties of hemispheres that are discussed below. In addition, surfaces uniquely carry data about cortical distances and surface areas. For example:", "_____no_output_____" ] ], [ [ "# The area of each of the triangle-faces in nthe white surface mesh, in mm^2.\ncortex.surfaces['white'].face_areas", "_____no_output_____" ], [ "# The length of each edge in the white surface mesh, in mm.\ncortex.surfaces['white'].edge_lengths", "_____no_output_____" ], [ "# And the edges themselves, as indices like the faces.\ncortex.surfaces['white'].tess.edges", "_____no_output_____" ] ], [ [ "#### Vertex Properties", "_____no_output_____" ], [ "Properties arre values assigned to each surface vertex. They can include anatomical or geometric properties, such as ROI labels (i.e., a vector of values for each vertex: `True` if the vertex is in the ROI and `False` if not), cortical thickness (in mm), the vertex surface-area (in square mm), the curvature, or data from other functional measurements, such as BOLD-time-series data or source-localized MEG data.\n\nThe properties of a hemisphere are stored in the `properties` value. `Cortex.properties` is a kind of dictionary object and can generally be treated as a dictionary. One can also access property vectors via `cortex.prop(property_name)` rather than `cortex.properties[property_name]`; the former is largely short-hand for the latter.", "_____no_output_____" ] ], [ [ "sorted(cortex.properties.keys())", "_____no_output_____" ] ], [ [ "A few thigs worth noting: First, not all FreeSurfer subjects will have all of the properties listed. This is because different versions of FreeSurfer include different files, and sometimes subjects are distributed without their full set of files (e.g., to save storage space). However, rather than go and try to load all of these files right away, `neuropythy` makes place-holders for them and loads them only when first requested (this saves on loading time drastically). Accordingly, if you try to use a property whose file doesn't exist, an nexception will be raised.\n\nAdditionally, notice that the first several properties are for Brodmann Area labels. The ones ending in `_label` are `True` / `False` boolean labels indicating whether the vertex is in the given ROI (according to an estimation based on anatomy). The subject we are using in the Jupyterhub environment does not actually have these files included, but they do have, for example `BA1_weight` files. The weights represent the probability that a vertex is in the associated ROI, so we can make a label from this.", "_____no_output_____" ] ], [ [ "ba1_label = cortex.prop('BA1_weight') >= 0.5", "_____no_output_____" ] ], [ [ "We can now plot this property using `neuropythy`'s `cortex_plot()` function.", "_____no_output_____" ] ], [ [ "ny.cortex_plot(cortex.surfaces['white'], color=ba1_label)", "_____no_output_____" ] ], [ [ "**Improving this plot.** While this plot shows us where the ROI is, it's rather hard to interpret. Rather, we would prefer to plot the ROI in red and the rest of the brain using a binarized curvature map. `neuropythy` supports this kind of binarized curvature map as a default underlay, so, in fact, the easiest way to accomplish this is to tell `cortex_plot` to color the surface red, but to add a vertex mask that instructs the function to *only* color the ROI vertices.\n\nAdditionally, it is easier to see the inflated surface, so we will switch to that.", "_____no_output_____" ] ], [ [ "ny.cortex_plot(cortex.surfaces['inflated'], color='r', mask=ba1_label)", "_____no_output_____" ] ], [ [ "We can optionally make this red ROI plot a little bit transparent as well.", "_____no_output_____" ] ], [ [ "ny.cortex_plot(cortex.surfaces['inflated'], color='r', mask=ba1_label, alpha=0.4)", "_____no_output_____" ] ], [ [ "**Plotting the weight instead of the label.** Alternately, we might have wanted to plot the weight / probability of the ROI. Continuous properties like probability can be plotted using color-maps, similar to how they are plotted in `matplotlib`.", "_____no_output_____" ] ], [ [ "ny.cortex_plot(cortex.surfaces['inflated'], color='BA1_weight',\n cmap='hot', vmin=0, vmax=1, alpha=0.6)", "_____no_output_____" ] ], [ [ "**Another property.** Other properties can be very informative. For example, the cortical thickness property, which is stored in mm. This can tell us the parts of the brain that are thick or not thick.", "_____no_output_____" ] ], [ [ "ny.cortex_plot(cortex.surfaces['inflated'], color='thickness',\n cmap='hot', vmin=1, vmax=6)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### Interpolation (Surface to Image and Image to Surface)", "_____no_output_____" ], [ "---\n\nHemisphere/Cortex objects also manage interpolation, both to/from image volumes as well as to/from the cortical surfaces of other subjects (we will demo interpolation between subjects in the last section). Here we will focus on the former: interpolation to and from images.\n\n**Cortex to Image Interpolation.**\nBecause our subjects only have structural data and do not have functional data, we do not have anything handy to interpolate out of a volume onto a surface. So instead, we will start by innterpolating from the cortex into the volume. A good property for this is the subject's cortical thickness. Thickness is difficult to calculate in the volume, so if one wants thickness data in a volume, it would typically be calculated using surface meshes then projected back into the volume. We will do that now.\n\nNote that in order to create a new image, we have to provide the interpolation method with some information about how the image is oriented and shaped. This includes two critical pieces of information: the `'image_shape'` (i.e., the `numpy.shape` of the image's array) and the `'affine'`, which is simply the affine-transformation that aligns the image with the subject. Usually, it is easiest to provide this information in the form of a template image. For all kinds of subjects (HCP and FreeSurfer), an image is correctly aligned with a subject and thus the subject's cortical surfaces if its affine transfomation correctly aligns it with `subject.images['brain']`. ", "_____no_output_____" ] ], [ [ "# We need a template image; the new image will have the same shape,\n# affine, image type, and hader as the template image.\ntemplate_im = subject.images['brain']\n# We can use just the template's header for this.\ntemplate = template_im.header\n# We can alternately just provide information about the image geometry:\n#template = {'image_shape': (256,256,256), 'affine': template_im.affine}\n# Alternately, we can provide an actual image into which the data will\n# be inserted. In this case, we would want to make a cleared-duplicate\n# of the brain image (i.e. all voxels set to 0)\n#template = ny.image_clear(template_im)\n# All of the above templates should provide the same result.\n\n# We are going to save the property from both hemispheres into an image.\nlh_prop = subject.lh.prop('thickness')\nrh_prop = subject.rh.prop('thickness')\n\n# This may be either 'linear' or 'nearest'; for thickness 'linear'\n# is probably best, but the difference will be small.\nmethod = 'linear'\n\n# Do the interpolation. This may take a few minutes the first time it is run.\nnew_im = subject.cortex_to_image((lh_prop, rh_prop), template, method=method,\n # The template is integer, so we override it.\n dtype='float')", "_____no_output_____" ] ], [ [ "Now that we have made this new image, let's take a look at it by plotting some slices from it, once again.", "_____no_output_____" ] ], [ [ "# What axis do we want to plot slices along? 0, 1, or 2 (for the first, second,\n# or third 3D image axis).\naxis = 2\n# Which slices along this axis should we plot? These must be at least 0 and at\n# most 255 (There are 256 slices in each dimension of these images).\nslices = [75, 125, 175]\n\n# Make a figure and axes using matplotlib.pyplot:\n(fig, axes) = plt.subplots(1, len(slices), figsize=(5, 5/len(slices)), dpi=144)\n# Plot each of the slices:\nfor (ax, slice_num) in zip(axes, slices):\n # Get the slice:\n if axis == 0:\n imslice = new_im.dataobj[slice_num,:,:]\n elif axis == 1:\n imslice = new_im.dataobj[:,slice_num,:]\n else:\n imslice = new_im.dataobj[:,:,slice_num]\n ax.imshow(imslice, cmap='hot', vmin=0, vmax=6)\n # Turn off labels:\n ax.axis('off')", "_____no_output_____" ] ], [ [ "**Image to Cortex Interpolation.** A good test of our interpolation methods is now to ensure that, when we interpolate data from the image we just created back to the cortex, we get approximately the same values. The values we interpolate back out of the volume will not be identical to the volumes we started with because the resolution of the image is finite, but they should be close.\n\nThe `image_to_cortex()` method of the `Subject` class is capable of interpolating from an image to the cortical surface(s), based on the alignment of the image with the cortex.", "_____no_output_____" ] ], [ [ "(lh_prop_interp, rh_prop_interp) = subject.image_to_cortex(new_im, method=method)", "_____no_output_____" ] ], [ [ "We can plot the hemispheres together to visualize the difference between the original thickenss and the thickness that was interpolated into an image then back onto the cortex.", "_____no_output_____" ] ], [ [ "fig = ny.cortex_plot(subject.lh, surface='midgray',\n color=(lh_prop_interp - lh_prop)**2,\n cmap='hot', vmin=0, vmax=2)\nfig = ny.cortex_plot(subject.rh, surface='midgray',\n color=(rh_prop_interp - rh_prop)**2,\n cmap='hot', vmin=0, vmax=2,\n figure=fig)\n\nipv.show()", "_____no_output_____" ] ], [ [ "## Intersubject Surface Alignment", "_____no_output_____" ], [ "Comparison between multiple subjects is usually accomplished by first aligning each subject's cortical surface with that of a template surface (*fsaverage* in FreeSurfer, *fs_LR* in the HCP), then interpolating between vertices in the aligned arrangements. The alignment to the template are calculated and saved by FreeSurfer, the HCPpipelines, and various other utilities, but as of when this tutorial was written, `neuropythy` only supports these first two formats. Alignments are calculated by warping the vertices of the subject's spherical (fully inflated) hemisphere in a diffeomorphic fashion with the goal of minimizing the difference between the sulcal topology (curvature and depth) of the subject's vertices and that of the nearby *fsaverage* vertices. The process involves a number of steps, and any who are interested should follow up with the various documentations and papers published by the [FreeSurfer group](https://surfer.nmr.mgh.harvard.edu/).\n\nFor practical purposes, it is not necessary to understand the details of this algorithm--FreeSurfer is a large complex collection of software that has been under development for decades. However, to better understand what is produced by FreeSurfer's alignment procedure, let us start by looking at its outputs.\n\n---", "_____no_output_____" ], [ "### Compare Subject Registrations", "_____no_output_____" ], [ "---\n\nTo better understand the various spherical surfaces produced by FreeSurfer, let's start by plotting three spherical surfaces in 3D. The first will be the subject's \"native\" inflated spherical surface. The next will be the subjects \"fsaverage\"-aligned sphere. The last will be The *fsaverage* subject's native sphere.\n\nThese spheres are accessed not through the `subject.surfaces` dictionary but through the `subject.registrations` dictionary. This is simply a design decision--registrations and surfaces are not fundamentally different except that registrations can be used for interpolation between subjects (more below).\n\nNote that you may need to zoom out once the plot has been made.", "_____no_output_____" ] ], [ [ "# Get the fsaverage subject.\nfsaverage = ny.freesurfer_subject('fsaverage')\n\n# Get the hemispheres we will be examining.\nfsa_hemi = fsaverage.hemis[hemi]\nsub_hemi = subject.hemis[hemi]\n\n# Next, get the three registrations we want to plot.\nsub_native_reg = sub_hemi.registrations['native']\nsub_fsaverage_reg = sub_hemi.registrations['fsaverage']\nfsa_native_reg = fsa_hemi.registrations['native']\n\n# We want to plot them all three together in one scene, so to do this\n# we need to translate two of them a bit along the x-axis.\nsub_native_reg = sub_native_reg.translate([-225,0,0])\nfsa_native_reg = fsa_native_reg.translate([ 225,0,0])\n\n# Now plot them all.\nfig = ipv.figure(width=900, height=300)\nny.cortex_plot(sub_native_reg, figure=fig)\nny.cortex_plot(fsa_native_reg, figure=fig)\nny.cortex_plot(sub_fsaverage_reg, figure=fig)\n\nipv.show()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### Interpolate Between Subjects", "_____no_output_____" ], [ "---\n\nInterpolation between subjects requires interpolating between a shared registration. For a subject and the *fsaverage*, this is the subject's *fsaverage*-aligned registration and *fsaverage*'s native. However, for two non-meta subjects, the *fsaverage*-aligned registration of both subjects are used.\n\nWe will first show how to interpolate from a subject over to the **fsaverage**. This is a very valuable operation to be able to do as it allows you to compute statistics across subejcts of cortical surface data (such as BOLD activation data or source-localized MEG data).", "_____no_output_____" ] ], [ [ "# The property we're going to interpolate over to fsaverage:\nsub_prop = sub_hemi.prop('thickness')\n\n# The method we use ('nearest' or 'linear'):\nmethod = 'linear'\n\n# Interpolate the subject's thickness onto the fsaverage surface.\nfsa_prop = sub_hemi.interpolate(fsa_hemi, sub_prop, method=method)\n\n# Let's make a plot of this:\nny.cortex_plot(fsa_hemi, surface='inflated',\n color=fsa_prop, cmap='hot', vmin=0, vmax=6)", "_____no_output_____" ] ], [ [ "Okay, for our last exercise, let's interpolate back from the *fsaverage* subject to our subject. It is occasionally nice to be able to plot the *fsaverage*'s average curvature map as an underlay, so let's do that.", "_____no_output_____" ] ], [ [ "# This time we are going to interpolate curvature from the fsaverage\n# back to the subject. When the property we are interpolating is a\n# named property of the hemisphere, we can actually just specify it\n# by name in the interpolation call.\nfsa_curv_on_sub = fsa_hemi.interpolate(sub_hemi, 'curvature')\n\n# We can make a duplicate subject hemisphere with this new property\n# so that it's easy to plot this curvature map.\nsub_hemi_fsacurv = sub_hemi.with_prop(curvature=fsa_curv_on_sub)\n\n# Great, let's see what this looks like:\nny.cortex_plot(sub_hemi_fsacurv, surface='inflated')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06b996ca0b01d981709cf68d979e398f78d064b
8,929
ipynb
Jupyter Notebook
notebooks/Baye's Theorem Notebook.ipynb
Ritu7683/Statistics-and-Econometrics-for-Data-Science
e46f3a8db0ae83f5a403eba81ca78a4a5b531023
[ "MIT" ]
41
2020-12-05T20:15:47.000Z
2022-02-07T06:00:01.000Z
notebooks/Baye's Theorem Notebook.ipynb
Econometrics/Statistics-and-Econometrics-for-Data-Science
c7bf6ac053f0f7791b1fdbd5ed0e0d06387544a1
[ "MIT" ]
173
2020-11-29T18:37:22.000Z
2022-03-06T04:03:08.000Z
notebooks/Baye's Theorem Notebook.ipynb
Econometrics/Statistics-and-Econometrics-for-Data-Science
c7bf6ac053f0f7791b1fdbd5ed0e0d06387544a1
[ "MIT" ]
112
2020-12-04T12:40:23.000Z
2021-12-16T17:29:15.000Z
37.049793
446
0.534662
[ [ [ "# Baye's Theorem", "_____no_output_____" ], [ "### Introduction\n\nBefor starting with *Bayes Theorem* we can have a look at some definitions.\n\n**Conditional Probability :**\nConditional Probability is the Probability of one event occuring with some Relationship to one or more events.\nLet A and B be the two interdependent event,where A has already occured then the probabilty of B will be \n $$ P(B|A) = P(A \\cap B)|P(A) $$\n \n**Joint Probability :**\nJoint Probability is a Statistical measure that Calculates the Likehood of two events occuring together and at the same point in time.\n $$ P(A \\cap B) = P(A|B) * P(B) $$", "_____no_output_____" ], [ "### Bayes Theorem\n\nBayes Theorem was named after **Thomas Bayes**,who discovered it in **1763** and worked in the field of Decision Theory.\n\nBayes Theorem is a mathematical formula used to determine the **Conditional Probability** of events without the **Joint Probability**.\n\n**Statement**\n\nIf B$_{1}$, B$_{2}$, B$_{3}$,.....,B$_{n}$ are Mutually exclusive event with P(B$_{i}$) $\\not=$ 0 ,( i=1,2,3,...,n) of Random Experiment then for any Arbitrary event A of the Sample Space of the above Experiment with P(A)>0,we have\n\n$$ P(B_{i}|A) = P(B_{i})P(A|B_{i})/ \\sum\\limits_{i=1}^{n} P(B_{i})P(A|B_{i}) $$\n\n**Proof**\n\nLet S be the Sample Space of the Random Experiment.The Event B$_{1}$, B$_{2}$, B$_{3}$,.....,B$_{n}$ being Exhaustive\n$$ S = (B_{1} \\cup B_{2} \\cup ...\\cup B_{n}) \\hspace{1cm} \\hspace{0.1cm} [\\therefore A \\subset S] $$\n$$ A = A \\cap S = A \\cap ( B_{1} \\cup B_{2} \\cup B_{3},.....,\\cup B_{n}) $$\n$$ = (A \\cap B_{1}) \\cup (A \\cap B_{2}) \\cup ... \\cup (A \\cap B_{n}) $$\n\n$$ P(A) = P(A \\cap B_{1}) + P (A \\cap B_{2}) + ...+ P(A \\cap B_{n}) $$\n$$ \\hspace{3cm} \\hspace{0.1cm} = P(B_{1})P(A|B_{1}) + P(B_{2})P(A|B_{2}) + ... +P(B_{n})P(A|B_{n}) $$\n$$ = \\sum\\limits_{i=1}^{n} P(B_{i})P(A|B_{i}) $$\n\nNow,\n$$ P(A \\cap B_{i}) = P(A)P(B_{i}|A) $$\n$$ P(B_{i}|A) = P(A \\cap B_{i})/P(A) = P(B_{i})P(A|B_{i})/\\sum\\limits_{i=1}^{n} P(B_{i})P(A|B_{i}) $$\n\n**P(B)** is the Probability of occurence **B**.If we know that the event **A** has already occured.On knowing about the event **A**,**P(B)** is changed to **P(B|A)**.With the help of **Bayes Theorem we can Calculate P(B|A)**.\n\n**Naming Conventions :**\n\n<br>\nP(A/B) : Posterior Probability \n<br>\nP(A) : Prior Probability\n<br>\nP(B/A) : Likelihood\n<br>\nP(B) : Evidence \n<br>\nSo, Bayes Theorem can be Restated as :\n$$ Posterior = Likelihood * Prior / Evidence $$\n\n", "_____no_output_____" ], [ " Now we will be looking at some problem examples on Bayes Theorem.\n \n**Example 1** :Suppose that the reliability of a Covid-19 test is specified as follows:\n<br>\nOf Population having Covid-19 , 90% of the test detect the desire but 10% go undetected.Of Population free of Covid-19 , 99% of the test are judged Covid-19 -tive but 1% are diagnosed showing Covid-19 +tive.From a large population of which only 0.1% have Covid-19,one person is selected at Random,given the Covid-19 test,and the pathologist Report him/her as Covid-19 positive.What is the Probability that the person actually have Covid-19?", "_____no_output_____" ], [ "**Solution**<br>\nLet, <br>\nB$_{1}$ = The Person Selected is Actually having Covid-19.<br>\nB$_{2}$ = The Person Selected is not having Covid-19.<br>\nA = The Person Covid-19 Test is Diagnosed as Positive.<br>\n\nP(B$_{1}$) = 0.1% = 0.1/100 = 0.001<br>\nP(B$_{2}$) = 1-P(B$_{1}$) = 1-0.001 = 0.999<br>\nP(A|B$_{1}$) = Probability that the person tested Covid-19 +tive given that he / she is actually having Covid-19.= 90/100 = 0.9 <br>\nP(A|B$_{2}$) = Probability that the person tested Covid-19 +tive given that he / she is actually not having Covid-19.= 1/100 = 0.01 <br>\n\nRequired Probability = P(B$_{1}$|A) = P(B$_{1}$) * P(A|B$_{1}$)/ (((P(B$_{1}$) * P(A|B$_{1}$))+((P(B$_{2}$) * P(A|B$_{2}$)))<br>\n = (0.001 * 0.9)/(0.001 * 0.9+0.999 * 0.01) = 90/1089 =0.08264\n", "_____no_output_____" ], [ "We will Now use Python to calculate the same.", "_____no_output_____" ] ], [ [ "#calculate P(B1|A) given P(B1),P(A|B1),P(A|B2),P(B2)\ndef bayes_theorem(p_b1,p_a_given_b1,p_a_given_b2,p_b2):\n p_b1_given_a=(p_b1*p_a_given_b1)/((p_b1*p_a_given_b1)+(p_b2*p_a_given_b2))\n return p_b1_given_a\n\n#P(B1)\np_b1=0.001\n#P(B2)\np_b2=0.999\n#P(A|B1)\np_a_given_b1=0.9\n#P(A|B2)\np_a_given_b2=0.01\nresult=bayes_theorem(p_b1,p_a_given_b1,p_a_given_b2,p_b2)\nprint('P(B1|A)=% .3f %%'%(result*100))\n ", "P(B1|A)= 8.264 %\n" ] ], [ [ "**Example 2 :** In a Quiz,a contestant either guesses or cheat or knows the answer to a multiple choice question with four choices.The Probability that he/she makes a guess is 1/3 and the Probability that he /she cheats the answer is 1/6.The Probability that his answer is correct,given that he cheated it,is 1/8.Find the Probability that he knows the answer to the question,given that he/she correctly answered it.", "_____no_output_____" ], [ "**Solution**<br>\nLet, <br>\nB$_{1}$ = Contestant guesses the answer.<br>\nB$_{2}$ = Contestant cheated the answer.<br>\nB$_{3}$ = Contestant knows the answer.<br>\nA = Contestant answer correctly.<br>\nclearly,<br>\nP(B$_{1}$) = 1/3 , P(B$_{2}$) =1/6<br>\n\nSince B$_{1}$ ,B$_{2}$, B$_{3}$ are mutually exclusive and exhaustive event.\nP(B$_{1}$) + P(B$_{2}$) + P(B$_{3}$) = 1 => P(B$_{3}$) = 1 - (P(B$_{1}$) + P(B$_{2}$))\n=1-1/3-1/6=1/2\n\n\nIf B$_{1}$ has already occured,the contestant guesses,the there are four choices out of which only one is correct.<br>\n$\\therefore$ the Probability that he answers correctly given that he/she has made a guess is 1/4 i.e. **P(A|B$-{1}$) = 1/4**<br>\nIt is given that he knew the answer = 1<br>\nBy Bayes Theorem,<br>\nRequired Probability = P(B$_{3}$|A)<br>\n\n= P(B$_{3}$)P(A|B$_{3}$)/(P(B$_{1}$)P(A|B$_{1}$)+P(B$_{2}$)P(A|B$_{2}$)+P(B$_{3}$)P(A|B$_{3}$))\n= (1/2 * 1) / ((1/3 * 1/4) + (1/6 * 1/8) + (1/2 * 1))=24/29\n\n", "_____no_output_____" ] ], [ [ "#calculate P(B1|A) given P(B1),P(A|B1),P(A|B2),P(B2),P(B3),P(A|B3)\ndef bayes_theorem(p_b1,p_a_given_b1,p_a_given_b2,p_b2,p_b3,p_a_given_b3):\n p_b3_given_a=(p_b3*p_a_given_b3)/((p_b1*p_a_given_b1)+(p_b2*p_a_given_b2)+(p_b3*p_a_given_b3))\n return p_b3_given_a\n\n#P(B1)\np_b1=1/3\n#P(B2)\np_b2=1/6\n#P(B3)\np_b3=1/2\n#P(A|B1)\np_a_given_b1=1/4\n#P(A|B2)\np_a_given_b2=1/8\n#P(A|B3)\np_a_given_b3=1\nresult=bayes_theorem(p_b1,p_a_given_b1,p_a_given_b2,p_b2,p_b3,p_a_given_b3)\nprint('P(B3|A)=% .3f %%'%(result*100))\n ", "P(B3|A)= 82.759 %\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d06ba7ef01a6f838a53206aff884c3b7fdcbd3b8
60,432
ipynb
Jupyter Notebook
learning-python3.ipynb
lsst-epo/jupyter-presentation
2ec9b320e2074b4eb725fcae16be9a72b9c36026
[ "MIT" ]
1
2017-09-01T21:14:25.000Z
2017-09-01T21:14:25.000Z
learning-python3.ipynb
lsst-epo/jupyter-presentation
2ec9b320e2074b4eb725fcae16be9a72b9c36026
[ "MIT" ]
null
null
null
learning-python3.ipynb
lsst-epo/jupyter-presentation
2ec9b320e2074b4eb725fcae16be9a72b9c36026
[ "MIT" ]
null
null
null
22.168745
395
0.518484
[ [ [ "[learning-python3.ipynb]: https://gist.githubusercontent.com/kenjyco/69eeb503125035f21a9d/raw/learning-python3.ipynb\n\nRight-click -> \"save link as\" [https://gist.githubusercontent.com/kenjyco/69eeb503125035f21a9d/raw/learning-python3.ipynb][learning-python3.ipynb] to get most up-to-date version of this notebook file.\n\n## Quick note about Jupyter cells\n\nWhen you are editing a cell in Jupyter notebook, you need to re-run the cell by pressing **`<Shift> + <Enter>`**. This will allow changes you made to be available to other cells.\n\nUse **`<Enter>`** to make new lines inside a cell you are editing.\n\n#### Code cells\n\nRe-running will execute any statements you have written. To edit an existing code cell, click on it.\n\n#### Markdown cells\n\nRe-running will render the markdown text. To edit an existing markdown cell, double-click on it.\n\n<hr>", "_____no_output_____" ], [ "## Common Jupyter operations\n\nNear the top of the https://try.jupyter.org page, Jupyter provides a row of menu options (`File`, `Edit`, `View`, `Insert`, ...) and a row of tool bar icons (disk, plus sign, scissors, 2 files, clipboard and file, up arrow, ...).\n\n#### Inserting and removing cells\n\n- Use the \"plus sign\" icon to insert a cell below the currently selected cell\n- Use \"Insert\" -> \"Insert Cell Above\" from the menu to insert above\n\n#### Clear the output of all cells\n\n- Use \"Kernel\" -> \"Restart\" from the menu to restart the kernel\n - click on \"clear all outputs & restart\" to have all the output cleared\n\n#### Save your notebook file locally\n\n- Clear the output of all cells\n- Use \"File\" -> \"Download as\" -> \"IPython Notebook (.ipynb)\" to download a notebook file representing your https://try.jupyter.org session\n\n#### Load your notebook file in try.jupyter.org\n\n1. Visit https://try.jupyter.org\n2. Click the \"Upload\" button near the upper right corner\n3. Navigate your filesystem to find your `*.ipynb` file and click \"open\"\n4. Click the new \"upload\" button that appears next to your file name\n5. Click on your uploaded notebook file\n\n<hr>", "_____no_output_____" ], [ "## References\n\n- https://try.jupyter.org\n- https://docs.python.org/3/tutorial/index.html\n- https://docs.python.org/3/tutorial/introduction.html\n- https://daringfireball.net/projects/markdown/syntax\n\n<hr>", "_____no_output_____" ], [ "## Python objects, basic types, and variables\n\nEverything in Python is an **object** and every object in Python has a **type**. Some of the basic types include:\n\n- **`int`** (integer; a whole number with no decimal place)\n - `10`\n - `-3`\n- **`float`** (float; a number that has a decimal place)\n - `7.41`\n - `-0.006`\n- **`str`** (string; a sequence of characters enclosed in single quotes, double quotes, or triple quotes)\n - `'this is a string using single quotes'`\n - `\"this is a string using double quotes\"`\n - `'''this is a triple quoted string using single quotes'''`\n - `\"\"\"this is a triple quoted string using double quotes\"\"\"`\n- **`bool`** (boolean; a binary value that is either true or false)\n - `True`\n - `False`\n- **`NoneType`** (a special type representing the absence of a value)\n - `None`\n\nIn Python, a **variable** is a name you specify in your code that maps to a particular **object**, object **instance**, or value.\n\nBy defining variables, we can refer to things by names that make sense to us. Names for variables can only contain letters, underscores (`_`), or numbers (no spaces, dashes, or other characters). Variable names must start with a letter or underscore.\n\n<hr>", "_____no_output_____" ], [ "## Basic operators\n\nIn Python, there are different types of **operators** (special symbols) that operate on different values. Some of the basic operators include:\n\n- arithmetic operators\n - **`+`** (addition)\n - **`-`** (subtraction)\n - **`*`** (multiplication)\n - **`/`** (division)\n - __`**`__ (exponent)\n- assignment operators\n - **`=`** (assign a value)\n - **`+=`** (add and re-assign; increment)\n - **`-=`** (subtract and re-assign; decrement)\n - **`*=`** (multiply and re-assign)\n- comparison operators (return either `True` or `False`)\n - **`==`** (equal to)\n - **`!=`** (not equal to)\n - **`<`** (less than)\n - **`<=`** (less than or equal to)\n - **`>`** (greater than)\n - **`>=`** (greater than or equal to)\n\nWhen multiple operators are used in a single expression, **operator precedence** determines which parts of the expression are evaluated in which order. Operators with higher precedence are evaluated first (like PEMDAS in math). Operators with the same precedence are evaluated from left to right.\n\n- `()` parentheses, for grouping\n- `**` exponent\n- `*`, `/` multiplication and division\n- `+`, `-` addition and subtraction\n- `==`, `!=`, `<`, `<=`, `>`, `>=` comparisons\n\n> See https://docs.python.org/3/reference/expressions.html#operator-precedence", "_____no_output_____" ] ], [ [ "# Assigning some numbers to different variables\nnum1 = 10\nnum2 = -3\nnum3 = 7.41\nnum4 = -.6\nnum5 = 7\nnum6 = 3\nnum7 = 11.11", "_____no_output_____" ], [ "# Addition\nnum1 + num2", "_____no_output_____" ], [ "# Subtraction\nnum2 - num3", "_____no_output_____" ], [ "# Multiplication\nnum3 * num4", "_____no_output_____" ], [ "# Division\nnum4 / num5", "_____no_output_____" ], [ "# Exponent\nnum5 ** num6", "_____no_output_____" ], [ "# Increment existing variable\nnum7 += 4\nnum7", "_____no_output_____" ], [ "# Decrement existing variable\nnum6 -= 2\nnum6", "_____no_output_____" ], [ "# Multiply & re-assign\nnum3 *= 5\nnum3", "_____no_output_____" ], [ "# Assign the value of an expression to a variable\nnum8 = num1 + num2 * num3\nnum8", "_____no_output_____" ], [ "# Are these two expressions equal to each other?\nnum1 + num2 == num5", "_____no_output_____" ], [ "# Are these two expressions not equal to each other?\nnum3 != num4", "_____no_output_____" ], [ "# Is the first expression less than the second expression?\nnum5 < num6", "_____no_output_____" ], [ "# Is this expression True?\n5 > 3 > 1", "_____no_output_____" ], [ "# Is this expression True?\n5 > 3 < 4 == 3 + 1", "_____no_output_____" ], [ "# Assign some strings to different variables\nsimple_string1 = 'an example'\nsimple_string2 = \"oranges \"", "_____no_output_____" ], [ "# Addition\nsimple_string1 + ' of using the + operator'", "_____no_output_____" ], [ "# Notice that the string was not modified\nsimple_string1", "_____no_output_____" ], [ "# Multiplication\nsimple_string2 * 4", "_____no_output_____" ], [ "# This string wasn't modified either\nsimple_string2", "_____no_output_____" ], [ "# Are these two expressions equal to each other?\nsimple_string1 == simple_string2", "_____no_output_____" ], [ "# Are these two expressions equal to each other?\nsimple_string1 == 'an example'", "_____no_output_____" ], [ "# Add and re-assign\nsimple_string1 += ' that re-assigned the original string'\nsimple_string1", "_____no_output_____" ], [ "# Multiply and re-assign\nsimple_string2 *= 3\nsimple_string2", "_____no_output_____" ], [ "# Note: Subtraction, division, and decrement operators do not apply to strings.", "_____no_output_____" ] ], [ [ "## Basic containers\n\n> Note: **mutable** objects can be modified after creation and **immutable** objects cannot.\n\nContainers are objects that can be used to group other objects together. The basic container types include:\n\n- **`str`** (string: immutable; indexed by integers; items are stored in the order they were added)\n- **`list`** (list: mutable; indexed by integers; items are stored in the order they were added)\n - `[3, 5, 6, 3, 'dog', 'cat', False]`\n- **`tuple`** (tuple: immutable; indexed by integers; items are stored in the order they were added)\n - `(3, 5, 6, 3, 'dog', 'cat', False)`\n- **`set`** (set: mutable; not indexed at all; items are NOT stored in the order they were added; can only contain immutable objects; does NOT contain duplicate objects)\n - `{3, 5, 6, 3, 'dog', 'cat', False}`\n- **`dict`** (dictionary: mutable; key-value pairs are indexed by immutable keys; items are NOT stored in the order they were added)\n - `{'name': 'Jane', 'age': 23, 'fav_foods': ['pizza', 'fruit', 'fish']}`\n\nWhen defining lists, tuples, or sets, use commas (,) to separate the individual items. When defining dicts, use a colon (:) to separate keys from values and commas (,) to separate the key-value pairs.\n\nStrings, lists, and tuples are all **sequence types** that can use the `+`, `*`, `+=`, and `*=` operators.", "_____no_output_____" ] ], [ [ "# Assign some containers to different variables\nlist1 = [3, 5, 6, 3, 'dog', 'cat', False]\ntuple1 = (3, 5, 6, 3, 'dog', 'cat', False)\nset1 = {3, 5, 6, 3, 'dog', 'cat', False}\ndict1 = {'name': 'Jane', 'age': 23, 'fav_foods': ['pizza', 'fruit', 'fish']}", "_____no_output_____" ], [ "# Items in the list object are stored in the order they were added\nlist1", "_____no_output_____" ], [ "# Items in the tuple object are stored in the order they were added\ntuple1", "_____no_output_____" ], [ "# Items in the set object are not stored in the order they were added\n# Also, notice that the value 3 only appears once in this set object\nset1", "_____no_output_____" ], [ "# Items in the dict object are not stored in the order they were added\ndict1", "_____no_output_____" ], [ "# Add and re-assign\nlist1 += [5, 'grapes']\nlist1", "_____no_output_____" ], [ "# Add and re-assign\ntuple1 += (5, 'grapes')\ntuple1", "_____no_output_____" ], [ "# Multiply\n[1, 2, 3, 4] * 2", "_____no_output_____" ], [ "# Multiply\n(1, 2, 3, 4) * 3", "_____no_output_____" ] ], [ [ "## Accessing data in containers\n\nFor strings, lists, tuples, and dicts, we can use **subscript notation** (square brackets) to access data at an index.\n\n- strings, lists, and tuples are indexed by integers, **starting at 0** for first item\n - these sequence types also support accesing a range of items, known as **slicing**\n - use **negative indexing** to start at the back of the sequence\n- dicts are indexed by their keys\n\n> Note: sets are not indexed, so we cannot use subscript notation to access data elements.", "_____no_output_____" ] ], [ [ "# Access the first item in a sequence\nlist1[0]", "_____no_output_____" ], [ "# Access the last item in a sequence\ntuple1[-1]", "_____no_output_____" ], [ "# Access a range of items in a sequence\nsimple_string1[3:8]", "_____no_output_____" ], [ "# Access a range of items in a sequence\ntuple1[:-3]", "_____no_output_____" ], [ "# Access a range of items in a sequence\nlist1[4:]", "_____no_output_____" ], [ "# Access an item in a dictionary\ndict1['name']", "_____no_output_____" ], [ "# Access an element of a sequence in a dictionary\ndict1['fav_foods'][2]", "_____no_output_____" ] ], [ [ "## Python built-in functions and callables\n\nA **function** is a Python object that you can \"call\" to **perform an action** or compute and **return another object**. You call a function by placing parentheses to the right of the function name. Some functions allow you to pass **arguments** inside the parentheses (separating multiple arguments with a comma). Internal to the function, these arguments are treated like variables.\n\nPython has several useful built-in functions to help you work with different objects and/or your environment. Here is a small sample of them:\n\n- **`type(obj)`** to determine the type of an object\n- **`len(container)`** to determine how many items are in a container\n- **`callable(obj)`** to determine if an object is callable\n- **`sorted(container)`** to return a new list from a container, with the items sorted\n- **`sum(container)`** to compute the sum of a container of numbers\n- **`min(container)`** to determine the smallest item in a container\n- **`max(container)`** to determine the largest item in a container\n- **`abs(number)`** to determine the absolute value of a number\n- **`repr(obj)`** to return a string representation of an object\n\n> Complete list of built-in functions: https://docs.python.org/3/library/functions.html\n\nThere are also different ways of defining your own functions and callable objects that we will explore later.", "_____no_output_____" ] ], [ [ "# Use the type() function to determine the type of an object\ntype(simple_string1)", "_____no_output_____" ], [ "# Use the len() function to determine how many items are in a container\nlen(dict1)", "_____no_output_____" ], [ "# Use the len() function to determine how many items are in a container\nlen(simple_string2)", "_____no_output_____" ], [ "# Use the callable() function to determine if an object is callable\ncallable(len)", "_____no_output_____" ], [ "# Use the callable() function to determine if an object is callable\ncallable(dict1)", "_____no_output_____" ], [ "# Use the sorted() function to return a new list from a container, with the items sorted\nsorted([10, 1, 3.6, 7, 5, 2, -3])", "_____no_output_____" ], [ "# Use the sorted() function to return a new list from a container, with the items sorted\n# - notice that capitalized strings come first\nsorted(['dogs', 'cats', 'zebras', 'Chicago', 'California', 'ants', 'mice'])", "_____no_output_____" ], [ "# Use the sum() function to compute the sum of a container of numbers\nsum([10, 1, 3.6, 7, 5, 2, -3])", "_____no_output_____" ], [ "# Use the min() function to determine the smallest item in a container\nmin([10, 1, 3.6, 7, 5, 2, -3])", "_____no_output_____" ], [ "# Use the min() function to determine the smallest item in a container\nmin(['g', 'z', 'a', 'y'])", "_____no_output_____" ], [ "# Use the max() function to determine the largest item in a container\nmax([10, 1, 3.6, 7, 5, 2, -3])", "_____no_output_____" ], [ "# Use the max() function to determine the largest item in a container\nmax('gibberish')", "_____no_output_____" ], [ "# Use the abs() function to determine the absolute value of a number\nabs(10)", "_____no_output_____" ], [ "# Use the abs() function to determine the absolute value of a number\nabs(-12)", "_____no_output_____" ], [ "# Use the repr() function to return a string representation of an object\nrepr(set1)", "_____no_output_____" ] ], [ [ "## Python object attributes (methods and properties)\n\nDifferent types of objects in Python have different **attributes** that can be referred to by name (similar to a variable). To access an attribute of an object, use a dot (`.`) after the object, then specify the attribute (i.e. `obj.attribute`)\n\nWhen an attribute of an object is a callable, that attribute is called a **method**. It is the same as a function, only this function is bound to a particular object.\n\nWhen an attribute of an object is not a callable, that attribute is called a **property**. It is just a piece of data about the object, that is itself another object.\n\nThe built-in `dir()` function can be used to return a list of an object's attributes.\n\n<hr>", "_____no_output_____" ], [ "## Some methods on string objects\n\n- **`.capitalize()`** to return a capitalized version of the string (only first char uppercase)\n- **`.upper()`** to return an uppercase version of the string (all chars uppercase)\n- **`.lower()`** to return an lowercase version of the string (all chars lowercase)\n- **`.count(substring)`** to return the number of occurences of the substring in the string\n- **`.startswith(substring)`** to determine if the string starts with the substring\n- **`.endswith(substring)`** to determine if the string ends with the substring\n- **`.replace(old, new)`** to return a copy of the string with occurences of the \"old\" replaced by \"new\"", "_____no_output_____" ] ], [ [ "# Assign a string to a variable\na_string = 'tHis is a sTriNg'", "_____no_output_____" ], [ "# Return a capitalized version of the string\na_string.capitalize()", "_____no_output_____" ], [ "# Return an uppercase version of the string\na_string.upper()", "_____no_output_____" ], [ "# Return a lowercase version of the string\na_string.lower()", "_____no_output_____" ], [ "# Notice that the methods called have not actually modified the string\na_string", "_____no_output_____" ], [ "# Count number of occurences of a substring in the string\na_string.count('i')", "_____no_output_____" ], [ "# Count number of occurences of a substring in the string after a certain position\na_string.count('i', 7)", "_____no_output_____" ], [ "# Count number of occurences of a substring in the string\na_string.count('is')", "_____no_output_____" ], [ "# Does the string start with 'this'?\na_string.startswith('this')", "_____no_output_____" ], [ "# Does the lowercase string start with 'this'?\na_string.lower().startswith('this')", "_____no_output_____" ], [ "# Does the string end with 'Ng'?\na_string.endswith('Ng')", "_____no_output_____" ], [ "# Return a version of the string with a substring replaced with something else\na_string.replace('is', 'XYZ')", "_____no_output_____" ], [ "# Return a version of the string with a substring replaced with something else\na_string.replace('i', '!')", "_____no_output_____" ], [ "# Return a version of the string with the first 2 occurences a substring replaced with something else\na_string.replace('i', '!', 2)", "_____no_output_____" ] ], [ [ "## Some methods on list objects\n\n- **`.append(item)`** to add a single item to the list\n- **`.extend([item1, item2, ...])`** to add multiple items to the list\n- **`.remove(item)`** to remove a single item from the list\n- **`.pop()`** to remove and return the item at the end of the list\n- **`.pop(index)`** to remove and return an item at an index", "_____no_output_____" ], [ "## Some methods on set objects\n\n- **`.add(item)`** to add a single item to the set\n- **`.update([item1, item2, ...])`** to add multiple items to the set\n- **`.update(set2, set3, ...)`** to add items from all provided sets to the set\n- **`.remove(item)`** to remove a single item from the set\n- **`.pop()`** to remove and return a random item from the set\n- **`.difference(set2)`** to return items in the set that are not in another set\n- **`.intersection(set2)`** to return items in both sets\n- **`.union(set2)`** to return items that are in either set\n- **`.symmetric_difference(set2)`** to return items that are only in one set (not both)\n- **`.issuperset(set2)`** does the set contain everything in the other set?\n- **`.issubset(set2)`** is the set contained in the other set?", "_____no_output_____" ], [ "## Some methods on dict objects\n\n- **`.update([(key1, val1), (key2, val2), ...])`** to add multiple key-value pairs to the dict\n- **`.update(dict2)`** to add all keys and values from another dict to the dict\n- **`.pop(key)`** to remove key and return its value from the dict (error if key not found)\n- **`.pop(key, default_val)`** to remove key and return its value from the dict (or return default_val if key not found)\n- **`.get(key)`** to return the value at a specified key in the dict (or None if key not found)\n- **`.get(key, default_val)`** to return the value at a specified key in the dict (or default_val if key not found)\n- **`.keys()`** to return a list of keys in the dict\n- **`.values()`** to return a list of values in the dict\n- **`.items()`** to return a list of key-value pairs (tuples) in the dict", "_____no_output_____" ], [ "## Positional arguments and keyword arguments to callables\n\nYou can call a function/method in a number of different ways:\n\n- `func()`: Call `func` with no arguments\n- `func(arg)`: Call `func` with one positional argument\n- `func(arg1, arg2)`: Call `func` with two positional arguments\n- `func(arg1, arg2, ..., argn)`: Call `func` with many positional arguments\n- `func(kwarg=value)`: Call `func` with one keyword argument \n- `func(kwarg1=value1, kwarg2=value2)`: Call `func` with two keyword arguments\n- `func(kwarg1=value1, kwarg2=value2, ..., kwargn=valuen)`: Call `func` with many keyword arguments\n- `func(arg1, arg2, kwarg1=value1, kwarg2=value2)`: Call `func` with positonal arguments and keyword arguments\n- `obj.method()`: Same for `func`.. and every other `func` example\n\nWhen using **positional arguments**, you must provide them in the order that the function defined them (the function's **signature**).\n\nWhen using **keyword arguments**, you can provide the arguments you want, in any order you want, as long as you specify each argument's name.\n\nWhen using positional and keyword arguments, positional arguments must come first.", "_____no_output_____" ], [ "## Formatting strings and using placeholders", "_____no_output_____" ], [ "## Python \"for loops\"\n\nIt is easy to **iterate** over a collection of items using a **for loop**. The strings, lists, tuples, sets, and dictionaries we defined are all **iterable** containers.\n\nThe for loop will go through the specified container, one item at a time, and provide a temporary variable for the current item. You can use this temporary variable like a normal variable.", "_____no_output_____" ], [ "## Python \"if statements\" and \"while loops\"\n\nConditional expressions can be used with these two **conditional statements**.\n\nThe **if statement** allows you to test a condition and perform some actions if the condition evaluates to `True`. You can also provide `elif` and/or `else` clauses to an if statement to take alternative actions if the condition evaluates to `False`.\n\nThe **while loop** will keep looping until its conditional expression evaluates to `False`.\n\n> Note: It is possible to \"loop forever\" when using a while loop with a conditional expression that never evaluates to `False`.\n>\n> Note: Since the **for loop** will iterate over a container of items until there are no more, there is no need to specify a \"stop looping\" condition.", "_____no_output_____" ], [ "## List, set, and dict comprehensions", "_____no_output_____" ], [ "## Creating objects from arguments or other objects\n\nThe basic types and containers we have used so far all provide **type constructors**:\n\n- `int()`\n- `float()`\n- `str()`\n- `list()`\n- `tuple()`\n- `set()`\n- `dict()`\n\nUp to this point, we have been defining objects of these built-in types using some syntactic shortcuts, since they are so common.\n\nSometimes, you will have an object of one type that you need to convert to another type. Use the **type constructor** for the type of object you want to have, and pass in the object you currently have.", "_____no_output_____" ], [ "## Importing modules", "_____no_output_____" ], [ "## Exceptions", "_____no_output_____" ], [ "## Classes: Creating your own objects", "_____no_output_____" ] ], [ [ "# Define a new class called `Thing` that is derived from the base Python object\nclass Thing(object):\n my_property = 'I am a \"Thing\"'\n\n\n# Define a new class called `DictThing` that is derived from the `dict` type\nclass DictThing(dict):\n my_property = 'I am a \"DictThing\"'", "_____no_output_____" ], [ "print(Thing)\nprint(type(Thing))\nprint(DictThing)\nprint(type(DictThing))\nprint(issubclass(DictThing, dict))\nprint(issubclass(DictThing, object))", "<class '__main__.Thing'>\n<class 'type'>\n<class '__main__.DictThing'>\n<class 'type'>\nTrue\nTrue\n" ], [ "# Create \"instances\" of our new classes\nt = Thing()\nd = DictThing()\nprint(t)\nprint(type(t))\nprint(d)\nprint(type(d))", "<__main__.Thing object at 0x7f2170f3d240>\n<class '__main__.Thing'>\n{}\n<class '__main__.DictThing'>\n" ], [ "# Interact with a DictThing instance just as you would a normal dictionary\nd['name'] = 'Sally'\nprint(d)", "{'name': 'Sally'}\n" ], [ "d.update({\n 'age': 13,\n 'fav_foods': ['pizza', 'sushi', 'pad thai', 'waffles'],\n 'fav_color': 'green',\n })\nprint(d)", "{'fav_color': 'green', 'name': 'Sally', 'fav_foods': ['pizza', 'sushi', 'pad thai', 'waffles'], 'age': 13}\n" ], [ "print(d.my_property)", "I am a \"DictThing\"\n" ] ], [ [ "## Defining functions and methods", "_____no_output_____" ], [ "## Creating an initializer method for your classes", "_____no_output_____" ], [ "## Other \"magic methods\"", "_____no_output_____" ], [ "## Context managers and the \"with statement\"", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
d06bb33b958032a9588f35d3afa12adca1f41e02
714,400
ipynb
Jupyter Notebook
NDoshi_DS4_114_Making_Data_backed_Assertions.ipynb
ndoshi83/DS-Unit-1-Sprint-1-Dealing-With-Data
fa6598b40c4503079cf596f5ee9ba19afb440fcd
[ "MIT" ]
null
null
null
NDoshi_DS4_114_Making_Data_backed_Assertions.ipynb
ndoshi83/DS-Unit-1-Sprint-1-Dealing-With-Data
fa6598b40c4503079cf596f5ee9ba19afb440fcd
[ "MIT" ]
null
null
null
NDoshi_DS4_114_Making_Data_backed_Assertions.ipynb
ndoshi83/DS-Unit-1-Sprint-1-Dealing-With-Data
fa6598b40c4503079cf596f5ee9ba19afb440fcd
[ "MIT" ]
null
null
null
377.989418
439,230
0.90333
[ [ [ "<a href=\"https://colab.research.google.com/github/ndoshi83/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/NDoshi_DS4_114_Making_Data_backed_Assertions.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Lambda School Data Science - Making Data-backed Assertions\n\nThis is, for many, the main point of data science - to create and support reasoned arguments based on evidence. It's not a topic to master in a day, but it is worth some focused time thinking about and structuring your approach to it.", "_____no_output_____" ], [ "## Lecture - generating a confounding variable\n\nThe prewatch material told a story about a hypothetical health condition where both the drug usage and overall health outcome were related to gender - thus making gender a confounding variable, obfuscating the possible relationship between the drug and the outcome.\n\nLet's use Python to generate data that actually behaves in this fashion!", "_____no_output_____" ] ], [ [ "# y = \"health outcome\" - predicted variable - dependent variable\n# x = \"drug usage\" - explanatory variable - independent variable", "_____no_output_____" ], [ "import random\ndir(random) # Reminding ourselves what we can do here\nrandom.seed(10) # Random Seed for reproducibility", "_____no_output_____" ], [ "# Let's think of another scenario:\n# We work for a company that sells accessories for mobile phones.\n# They have an ecommerce site, and we are supposed to analyze logs\n# to determine what sort of usage is related to purchases, and thus guide\n# website development to encourage higher conversion.\n\n# The hypothesis - users who spend longer on the site tend\n# to spend more. Seems reasonable, no?\n\n# But there's a confounding variable! If they're on a phone, they:\n# a) Spend less time on the site, but\n# b) Are more likely to be interested in the actual products!\n\n# Let's use namedtuple to represent our data\n\nfrom collections import namedtuple\n# purchased and mobile are bools, time_on_site in seconds\nUser = namedtuple('User', ['purchased','time_on_site', 'mobile'])\n\nexample_user = User(False, 12, False)\nprint(example_user)", "User(purchased=False, time_on_site=12, mobile=False)\n" ], [ "# And now let's generate 1000 example users\n# 750 mobile, 250 not (i.e. desktop)\n# A desktop user has a base conversion likelihood of 10%\n# And it goes up by 1% for each 15 seconds they spend on the site\n# And they spend anywhere from 10 seconds to 10 minutes on the site (uniform)\n# Mobile users spend on average half as much time on the site as desktop\n# But have three times as much base likelihood of buying something\n\nusers = []\n\nfor _ in range(250):\n # Desktop users\n time_on_site = random.uniform(10, 600)\n purchased = random.random() < 0.1 + (time_on_site / 1500)\n users.append(User(purchased, time_on_site, False))\n \nfor _ in range(750):\n # Mobile users\n time_on_site = random.uniform(5, 300)\n purchased = random.random() < 0.3 + (time_on_site / 1500)\n users.append(User(purchased, time_on_site, True))\n \nrandom.shuffle(users)\nprint(users[:10])", "[User(purchased=False, time_on_site=172.07500125969045, mobile=False), User(purchased=False, time_on_site=242.1604565076447, mobile=True), User(purchased=True, time_on_site=172.4562884302345, mobile=True), User(purchased=False, time_on_site=134.30741730988564, mobile=True), User(purchased=False, time_on_site=176.6659151415657, mobile=False), User(purchased=False, time_on_site=98.57704667574383, mobile=True), User(purchased=False, time_on_site=141.90635886960914, mobile=True), User(purchased=False, time_on_site=46.30954508769639, mobile=True), User(purchased=True, time_on_site=568.9570603645093, mobile=False), User(purchased=False, time_on_site=64.57737234489078, mobile=True)]\n" ], [ "# !pip freeze\n!pip install pandas==0.23.4", "Requirement already satisfied: pandas==0.23.4 in /usr/local/lib/python3.6/dist-packages (0.23.4)\nRequirement already satisfied: python-dateutil>=2.5.0 in /usr/local/lib/python3.6/dist-packages (from pandas==0.23.4) (2.5.3)\nRequirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas==0.23.4) (2018.9)\nRequirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from pandas==0.23.4) (1.16.3)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.5.0->pandas==0.23.4) (1.12.0)\n" ], [ "# Let's put this in a dataframe so we can look at it more easily\nimport pandas as pd\nuser_data = pd.DataFrame(users)\nuser_data.head()", "_____no_output_____" ], [ "# Let's use crosstabulation to try to see what's going on\npd.crosstab(user_data['purchased'], user_data['time_on_site'])", "_____no_output_____" ], [ "# Let's use crosstabulation to try to see what's going on\n# pd.crosstab(user_data['purchased'], user_data['time_on_site'], margins=True)", "_____no_output_____" ], [ "# Trying to show the margins on our Crosstab. Think this might be another \n# versioning issue.\n# pd.crosstab(user_data['purchased'], time_bins, margins=True)", "_____no_output_____" ], [ "# OK, that's not quite what we want\n# Time is continuous! We need to put it in discrete buckets\n# Pandas calls these bins, and pandas.cut helps make them\n\ntime_bins = pd.cut(user_data['time_on_site'], 5) # 5 equal-sized bins\npd.crosstab(user_data['purchased'], time_bins)", "_____no_output_____" ], [ "# We can make this a bit clearer by normalizing (getting %)\npd.crosstab(user_data['purchased'], time_bins, normalize='columns')", "_____no_output_____" ], [ "# That seems counter to our hypothesis\n# More time on the site can actually have fewer purchases\n\n# But we know why, since we generated the data!\n# Let's look at mobile and purchased\npd.crosstab(user_data['purchased'], user_data['mobile'], normalize='columns')", "_____no_output_____" ], [ "# Yep, mobile users are more likely to buy things\n# But we're still not seeing the *whole* story until we look at all 3 at once\n\n# Live/stretch goal - how can we do that?\nct = pd.crosstab(user_data['mobile'], [user_data['purchased'], time_bins], \n rownames=['device'], \n colnames=[\"purchased\", \"time on site\"], \n normalize='index')\nct", "_____no_output_____" ], [ "# help(user_data.plot)", "_____no_output_____" ], [ "import seaborn as sns\nsns.heatmap(pd.crosstab(user_data['mobile'], [user_data['purchased'], time_bins] ),\n cmap=\"YlGnBu\", annot=True, cbar=False)", "_____no_output_____" ], [ "# user_data.hist()", "_____no_output_____" ], [ "pd.pivot_table(user_data, values='purchased',\n index=time_bins).plot.bar()", "_____no_output_____" ], [ "pd.pivot_table(\n user_data, values='mobile', index=time_bins).plot.bar();", "_____no_output_____" ], [ "user_data['time_on_site'].plot.density();", "_____no_output_____" ], [ "ct = pd.crosstab(time_bins, [user_data['purchased'], user_data['mobile']],\n normalize='columns')\nct", "_____no_output_____" ], [ "ct.plot();", "_____no_output_____" ], [ "ct.plot(kind='bar')", "_____no_output_____" ], [ "ct.plot(kind='bar', stacked=True)", "_____no_output_____" ], [ "time_bins = pd.cut(user_data['time_on_site'], 6) # 6 equal-sized bins\nct = pd.crosstab(time_bins, [user_data['purchased'], user_data['mobile']],\n normalize='columns')\nct", "_____no_output_____" ], [ "ct.plot(kind='bar', stacked=True)", "_____no_output_____" ] ], [ [ "## Assignment - what's going on here?\n\nConsider the data in `persons.csv` (already prepared for you, in the repo for the week). It has four columns - a unique id, followed by age (in years), weight (in lbs), and exercise time (in minutes/week) of 1200 (hypothetical) people.\n\nTry to figure out which variables are possibly related to each other, and which may be confounding relationships.", "_____no_output_____" ] ], [ [ "# TODO - your code here\n# Use what we did live in lecture as an example\n\n# HINT - you can find the raw URL on GitHub and potentially use that\n# to load the data with read_csv, or you can upload it yourself\n\n# Import pandas library\nimport pandas as pd\n\n", "_____no_output_____" ], [ "# Load data into pandas dataframe\ndf = pd.read_csv('https://raw.githubusercontent.com/ndoshi83/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module4-databackedassertions/persons.csv')\n\n\n# Show example of df\ndf.head(10)", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ], [ "# Start with a pairplot to compare all the variables\nimport seaborn as sns", "_____no_output_____" ], [ "sns.pairplot(df)", "_____no_output_____" ], [ "# Create a distplots for all three variables\nsns.distplot(df['age']);\nsns.distplot(df['weight']);\nsns.distplot(df['exercise_time']);", "_____no_output_____" ], [ "sns.jointplot('exercise_time', 'weight', df, kind = 'kde')", "_____no_output_____" ] ], [ [ "### Assignment questions\n\nAfter you've worked on some code, answer the following questions in this text block:\n\n1. What are the variable types in the data?\n The variables are interger type.\n2. What are the relationships between the variables?\n There is no relation between age and weight directly, there seems to be a relationship between age/exercise time and exercise time/weight.\n3. Which relationships are \"real\", and which spurious?\n The real relationship is between exercise time and weight whereas the relationship between age/exercise time seems to be spurious.\n", "_____no_output_____" ], [ "## Stretch goals and resources\n\nFollowing are *optional* things for you to take a look at. Focus on the above assignment first, and make sure to commit and push your changes to GitHub.\n\n- [Spurious Correlations](http://tylervigen.com/spurious-correlations)\n- [NIH on controlling for confounding variables](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4017459/)\n\nStretch goals:\n\n- Produce your own plot inspired by the Spurious Correlation visualizations (and consider writing a blog post about it - both the content and how you made it)\n- Pick one of the techniques that NIH highlights for confounding variables - we'll be going into many of them later, but see if you can find which Python modules may help (hint - check scikit-learn)\n- Use a groupby object to create some useful visualizations", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
d06bc3c508c4f3eebf876b8d097539e503e1391d
12,372
ipynb
Jupyter Notebook
workshops/workshop1/code/Visual Designer Data Prep Pipeline.ipynb
samleung314/mslearn-dp100
aac073d21226452d10c4e84981811b0703371200
[ "MIT" ]
null
null
null
workshops/workshop1/code/Visual Designer Data Prep Pipeline.ipynb
samleung314/mslearn-dp100
aac073d21226452d10c4e84981811b0703371200
[ "MIT" ]
null
null
null
workshops/workshop1/code/Visual Designer Data Prep Pipeline.ipynb
samleung314/mslearn-dp100
aac073d21226452d10c4e84981811b0703371200
[ "MIT" ]
1
2022-03-09T20:45:29.000Z
2022-03-09T20:45:29.000Z
37.377644
559
0.626657
[ [ [ "# Visual Designer (Data Prep)\n\nIn this exercise we will be building a pipeline in Azure Machine Learning using the [Visual Designer](https://docs.microsoft.com/azure/machine-learning/concept-designer). Traditionally the Visual Designer is used for training and deploying models. Here we will build a data prep pipeline that get a dataset ready for downstream model scoring. Below you can see a final picture of the data prep pipeline that will be built as part of this exercise.\n\nThe pipeline will join two datasets together that consists of the diabetes dataset. We will perform binning on the Age column. After joining the datasets together, we will use the [SQL Transformation](https://docs.microsoft.com/azure/machine-learning/component-reference/apply-sql-transformation) component to demonstrate the flexibility of the Visual Designer by creating an aggregate dataset. The resulting datasets will be landed in the /1-bronze folder of the data lake. Later we will build in a scoring pipeline that will use the result dataset.\n\n![Final data prep pipeline in Visual Designer](./img/vddataprepfinal.png)", "_____no_output_____" ], [ "## Step 1: Stage data\n\nLet's first upload our source files to the /0-raw layer of the data lake. We will use this as the source for the pipeline.", "_____no_output_____" ] ], [ [ "import azureml.core\nfrom azureml.core import Workspace\n\n# Load the workspace from the saved config file\nws = Workspace.from_config()\nprint('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))", "_____no_output_____" ], [ "#TODO: Supply userid value for naming artifacts.\nuserid = ''\n\ntabular_dataset_name = 'diabetes-data-bronze-' + userid\n\nprint(\ntabular_dataset_name\n)", "_____no_output_____" ], [ "from azureml.core import Datastore, Dataset\n\n# Set datastore name where raw diabetes data is stored.\ndatastore_name = ''\n\ndatastore = Datastore.get(ws, datastore_name)\nprint(\"Found Datastore with name: %s\" % datastore_name)", "_____no_output_____" ], [ "from azureml.data.datapath import DataPath\n\n# Upload local csv files to ADLS using AML Datastore.\nds = Dataset.File.upload_directory(src_dir='../data/stage',\n target=DataPath(datastore, '0-raw/diabetes/' + userid + '/stage/'),\n show_progress=True)\n\ntype(ds)", "_____no_output_____" ] ], [ [ "## Step 2: Create target datasets\nRegister datasets to use as targets for writing data from pipeline.", "_____no_output_____" ] ], [ [ "diabetes_ds = Dataset.Tabular.from_delimited_files(path=(datastore,'1-bronze/diabetes/' + userid + '/diabetes.csv'),validate=False,infer_column_types=False)\ndiabetes_ds.register(ws,name=tabular_dataset_name,create_new_version=True)\n\ndiabetes_ds = Dataset.Tabular.from_delimited_files(path=(datastore,'1-bronze/diabetes/' + userid + '/diabetes_sql_example.csv'),validate=False,infer_column_types=False)\ndiabetes_ds.register(ws,name=tabular_dataset_name + '_sql_example',create_new_version=True)", "_____no_output_____" ] ], [ [ "## Step 3: Create new pipeline\n\nIn the Azure ML studio, navigate to <b>Designer</b> and press the <b>+</b> button under <b>New pipeline</b>\n\n![Screenshot of AML Studio highlighting the steps described to create a new pipeline](./img/vdnewpipeline.png)", "_____no_output_____" ], [ "1. In <b>Settings</b> change the compute type to <b>Compute cluster</b> and select the appropriate compute cluster.\n1. Name the pipeline in the <b>Draft name</b> field using the convention \"pipeline-data-prep-diabetes-<'userid'>-prod\"\n\n![Settings pane with compute settings and draft name fields highlighted](./img/vdsettingpipelinename.png)", "_____no_output_____" ], [ "1. Open <b>Data Input and Output</b> from the components menu.\n2. Drag <b>Import Data</b> onto the canvas.\n3. Change the <b>Data source</b> to <b>URL via HTTP</b>\n4. Enter the storage url to the <b>patient-age.csv</b> file in the <b>/0-raw</b> folder of the data lake.\n5. Validate by pressing <b>Preview schema</b>\n\n![Import data component for patient-age.csv](./img/vdimportpatientage.png)", "_____no_output_____" ], [ "1. Open <b>Data Input and Output</b> from the components menu.\n2. Drag <b>Import Data</b> onto the canvas.\n3. Change the <b>Data source</b> to <b>URL via HTTP</b>\n4. Enter the storage url to the <b>patient-levels.csv</b> file in the <b>/0-raw</b> folder of the data lake.\n5. Validate by pressing <b>Preview schema</b>\n\n![Import data component for patient-levels.csv](./img/vdimportpatientlevels.png)", "_____no_output_____" ], [ "1. Open <b>Data Transformation</b> from the components menu.\n2. Drag <b>Group Data into Bins</b> onto the canvas.\n3. Connect <b>Import Data</b> for patient-age.csv.\n4. Change <b>Binning mode</b> to <b>Custom Edges</b>.\n5. Paste the following value in the <b>Comma-separated list of bin edges</b> field. \"1,11,21,31,41,51,61,71,81,91\"\n6. Select the <b>Age</b> column for <b>Columns to bin</b>.\n\n![Group Data into Bins component settings](./img/vdbindata.png)", "_____no_output_____" ], [ "1. Open <b>Data Transformation</b> from the components menu.\n2. Drag <b>Join Data</b> onto the canvas.\n3. Connect <b>Group Data into Bins</b> component using the <b>Quantized dataset: DataFrameDirectory</b> output to <b>Join Data</b> component <b>Left dataset: DataFrameDirectory</b> input.\n4. Connect <b>Import Data</b> for patient-levels.csv to <b>Join Data</b> component <b>Right dataset: DataFrameDirectory</b> input.\n5. Set the right and left join key columns to <b>Id</b>\n6. Leave defaults as shown in screenshot.\n\n![Join data component settings](./img/vdjoindata.png)", "_____no_output_____" ], [ "1. Open <b>Data Transformation</b> from the components menu.\n2. Drag <b>Select Columns in Dataset</b> onto the canvas.\n3. Connect <b>Join Data</b> to <b>Select Columns in Dataset</b>.\n4. Add the following columns to <b>Select columns</b>. \"Id,PatientID,Age,Age_quantized,Pregnancies,PlasmaGlucose,DiastolicBloodPressure,TricepsThickness,SerumInsulin,BMI,DiabetesPedigree\"\n\n![Select Columns in Dataset settings](./img/vdselectcolumns.png)", "_____no_output_____" ], [ "1. Open <b>Data Input and Output</b> from the components menu.\n2. Drag <b>Export Data</b> onto the canvas.\n3. Connect <b>Select Columns in Dataset</b> to <b>Export Data</b>.\n4. Choose <b>Azure Data Lake Storage Gen2</b> from the <b>Datastore type</b> dropdown.\n5. Select the workshop datastore from the <b>Datastore</b> dropdown.\n6. Enter the path to the <b>/1-bronze</b> diabetes folder with filename <b>diabetes.csv</b>\n7. Choose <b>csv</b> for the <b>File format</b>.\n\n![Export Data component settings for diabetes.csv](./img/vdexportdiabetesdata.png)", "_____no_output_____" ], [ "1. Open <b>Data Transformation</b> from the components menu.\n2. Drag <b>Apply SQL Transformation</b> onto the canvas.\n3. Connect <b>Join Data</b> to the <b>Apply SQL Transformation</b> input <b>t1: DataFrameDirectory</b>.\n4. Enter the following SQL statement in the <b>SQL query script</b> field.\n\n```sql\nSELECT\nPatientID\n,MAX(BMI) \nFROM t1\nGROUP BY PatientID\n```\n![Apply SQL Transformation component settings](./img/vdsqltransformation.png)\n", "_____no_output_____" ], [ "1. Open <b>Data Input and Output</b> from the components menu.\n2. Drag <b>Export Data</b> onto the canvas.\n3. Connect <b>Apply SQL Transformation</b> to <b>Export Data</b>.\n4. Choose <b>Azure Data Lake Storage Gen2</b> from the <b>Datastore type</b> dropdown.\n5. Select the workshop datastore from the <b>Datastore</b> dropdown.\n6. Enter the path to the <b>/1-bronze</b> diabetes folder with filename <b>diabetes_sql_example.csv</b>\n7. Choose <b>csv</b> for the <b>File format</b>.\n\n![Export Data component settings for diabetes.csv](./img/vdexportdiabetessqldata.png)", "_____no_output_____" ], [ "## Step 4: Submit and Publish pipeline\nFirst submit the pipeline and ensure it runs as expected. Second publish the pipeline endpoint.", "_____no_output_____" ], [ "1. Press <b>Submit</b>\n2. Choose <b>Create New</b> for Experiment.\n3. Name the new experiment using this convention. \"pipeline-data-prep-diabetes-\\<userid\\>-prod\"\n4. Press the <b>Submit</b> button.\n5. Monitor the run for completion.\n\n![Set up pipeline run settings](./img/vdsubmitpipeline.png)", "_____no_output_____" ], [ "1. Verify <b>diabetes.csv</b> and <b>diabetes_sql_example.csv</b> are created after the pipeline run in <b>/1-bronze</b> folder.\n\n![screenshot of Storage Explorer showing output files from pipeline run](./img/vdoutputfiles.png)", "_____no_output_____" ], [ "1. Verify registered datasets recognize the new files\n\n![dataset overview in AML studio with Files in dataset highlighted showing 1](./img/vddatasetoutput.png)\n\n![dataset overview in AML studio explore showing data sample](./img/vdexploredatasetoutput.png)", "_____no_output_____" ], [ "1. Open the pipeline and press the <b>Publish</b> button.\n2. Choose <b>Create new</b> and name the pipeline endpoint the same as the pipeline draft.\n3. Press the <b>Publish</b> button.\n\n![The Set up published pipeline menu in the AML Studio Visual Designer](./img/vdpublishpipeline.png)", "_____no_output_____" ], [ "## The End\n\nThis data prep pipeline will be orchestrated using Azure Data Factory with scoring and training pipelines that are published in Module 3. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d06bd7e1dbad0d2cbfaab93d02fec26c93363d88
31,113
ipynb
Jupyter Notebook
matrix_one/day4.ipynb
AnnaTytula/dw_matrix
0323849b3b343c26d686a8fdfd524909c7e10c6f
[ "MIT" ]
null
null
null
matrix_one/day4.ipynb
AnnaTytula/dw_matrix
0323849b3b343c26d686a8fdfd524909c7e10c6f
[ "MIT" ]
null
null
null
matrix_one/day4.ipynb
AnnaTytula/dw_matrix
0323849b3b343c26d686a8fdfd524909c7e10c6f
[ "MIT" ]
null
null
null
31,113
31,113
0.821489
[ [ [ "import pandas as pd\nimport numpy as np\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import cross_val_score", "_____no_output_____" ], [ "cd \"/content/drive/My Drive/Colab Notebooks/dw_matrix\"", "/content/drive/My Drive/Colab Notebooks/dw_matrix\n" ], [ "df=pd.read_csv('data/men_shoes.csv',low_memory=False)\ndf.shape", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "mean_price=np.mean(df['prices_amountmin'])\nmean_price", "_____no_output_____" ], [ "[1]*5", "_____no_output_____" ], [ "y_true=df['prices_amountmin']\ny_pred=[mean_price] * y_true.shape[0]\nmean_absolute_error(y_true,y_pred)", "_____no_output_____" ], [ "df['prices_amountmin'].hist(bins=100);", "_____no_output_____" ], [ "np.log1p(df['prices_amountmin']).hist(bins=100);", "_____no_output_____" ], [ "median_price=np.median(df['prices_amountmin'])\ny_true=df['prices_amountmin']\ny_pred=[median_price] * y_true.shape[0]\nmean_absolute_error(y_true,y_pred)", "_____no_output_____" ], [ "median_price=np.median(df['prices_amountmin'])\nprice_log_mean=np.expm1(np.mean(np.log1p(y_true)))\ny_true=df['prices_amountmin']\ny_pred=[price_log_mean] * y_true.shape[0]\nmean_absolute_error(y_true,y_pred)", "_____no_output_____" ], [ "df.brand.value_counts()", "_____no_output_____" ], [ "df['brand_cat'] = df['brand'].factorize()[0]", "_____no_output_____" ], [ "feats=['brand_cat']\nX=df[feats].values\ny=df['prices_amountmin'].values\n\nmodel=DecisionTreeRegressor(max_depth=5)\n\nscores=cross_val_score(model,X,y,scoring='neg_mean_absolute_error')\nnp.mean(scores),np.std(scores)", "_____no_output_____" ], [ "def run_model(feats):\n X=df[feats].values\n y=df['prices_amountmin'].values\n\n model=DecisionTreeRegressor(max_depth=5)\n\n scores=cross_val_score(model,X,y,scoring='neg_mean_absolute_error')\n return np.mean(scores),np.std(scores)\n\nrun_model(['brand_cat'])", "_____no_output_____" ], [ "df['categories_cat'] = df['categories'].factorize()[0]\ndf['manufacturer_cat'] = df['manufacturer'].factorize()[0]", "_____no_output_____" ], [ "run_model(['categories_cat'])", "_____no_output_____" ], [ "run_model(['manufacturer_cat'])", "_____no_output_____" ], [ "run_model(['brand_cat','categories_cat'])", "_____no_output_____" ], [ "run_model(['brand_cat','categories_cat','manufacturer_cat'])", "_____no_output_____" ], [ "!git add matrix_one/day4.ipynb", "_____no_output_____" ], [ "!git config --global user.email \"[email protected]\"\n!git config --global user.name \"Ania\"\n!git commit -m \"Easy models using men's shoes data\"", "[master 05c5c37] Easy models using men's shoes data\n 1 file changed, 1 insertion(+)\n create mode 100644 matrix_one/day4.ipynb\n" ], [ "!git push -u origin master", "Counting objects: 1 \rCounting objects: 4, done.\nDelta compression using up to 2 threads.\nCompressing objects: 25% (1/4) \rCompressing objects: 50% (2/4) \rCompressing objects: 75% (3/4) \rCompressing objects: 100% (4/4) \rCompressing objects: 100% (4/4), done.\nWriting objects: 25% (1/4) \rWriting objects: 50% (2/4) \rWriting objects: 75% (3/4) \rWriting objects: 100% (4/4) \rWriting objects: 100% (4/4), 13.24 KiB | 3.31 MiB/s, done.\nTotal 4 (delta 1), reused 0 (delta 0)\nremote: Resolving deltas: 100% (1/1), completed with 1 local object.\u001b[K\nTo https://github.com/AnnaTytula/dw_matrix.git\n 9dab652..05c5c37 master -> master\nBranch 'master' set up to track remote branch 'master' from 'origin'.\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06bdd2b28b1762728f0559b48444bfe43dcc903
7,273
ipynb
Jupyter Notebook
Lab-2/3HandlingMissingValues.ipynb
yash-a-18/002_YashAmethiya
ab7e8bd8ebec553a0592b698dddc34c53b522967
[ "MIT" ]
null
null
null
Lab-2/3HandlingMissingValues.ipynb
yash-a-18/002_YashAmethiya
ab7e8bd8ebec553a0592b698dddc34c53b522967
[ "MIT" ]
null
null
null
Lab-2/3HandlingMissingValues.ipynb
yash-a-18/002_YashAmethiya
ab7e8bd8ebec553a0592b698dddc34c53b522967
[ "MIT" ]
null
null
null
7,273
7,273
0.637564
[ [ [ "Steps for Handling the missing value\n\n1. Import Libraries\n2. Load data\n3. Seprate Input and Output attributes\n4. Find the missing values and handle it in either way\n a. Removing data\n b. Imputation\n", "_____no_output_____" ] ], [ [ "# Step 1: Import Libraries\r\n\r\nimport numpy as np \r\nimport pandas as pd\r\nfrom sklearn.impute import SimpleImputer \r\n\r\n# Step 2: Load Data\r\n \r\ndatasets = pd.read_csv('./Datasets/Data_for_Missing_Values.csv') \r\nprint(\"\\nData :\\n\",datasets)\r\nprint(\"\\nData statistics\\n\",datasets.describe())", "\nData :\n Country Age Salary Purchased\n0 France 44.0 72000.0 No\n1 Spain 27.0 48000.0 Yes\n2 Germany 30.0 54000.0 No\n3 Spain 38.0 61000.0 No\n4 NaN NaN NaN NaN\n5 Germany 40.0 NaN Yes\n6 France 35.0 58000.0 Yes\n7 Spain NaN 52000.0 No\n8 France 48.0 79000.0 Yes\n9 Germany 50.0 83000.0 No\n10 France 37.0 67000.0 Yes\n11 Spain 45.0 55000.0 No\n\nData statistics\n Age Salary\ncount 10.000000 10.000000\nmean 39.400000 62900.000000\nstd 7.515909 11892.574714\nmin 27.000000 48000.000000\n25% 35.500000 54250.000000\n50% 39.000000 59500.000000\n75% 44.750000 70750.000000\nmax 50.000000 83000.000000\n" ], [ "# Step 3: Seprate Input and Output attributes\r\n\r\n# All rows, all columns except last \r\nX = datasets.iloc[:, :-1].values \r\n \r\n# Only last column \r\nY = datasets.iloc[:, -1].values \r\n\r\nprint(\"\\n\\nInput : \\n\", X) \r\nprint(\"\\n\\nOutput: \\n\", Y) ", "\n\nInput : \n [['France' 44.0 72000.0]\n ['Spain' 27.0 48000.0]\n ['Germany' 30.0 54000.0]\n ['Spain' 38.0 61000.0]\n [nan nan nan]\n ['Germany' 40.0 nan]\n ['France' 35.0 58000.0]\n ['Spain' nan 52000.0]\n ['France' 48.0 79000.0]\n ['Germany' 50.0 83000.0]\n ['France' 37.0 67000.0]\n ['Spain' 45.0 55000.0]]\n\n\nOutput: \n ['No' 'Yes' 'No' 'No' nan 'Yes' 'Yes' 'No' 'Yes' 'No' 'Yes' 'No']\n" ], [ "# Step 4: Find the missing values and handle it in either way\r\n\r\n# 4a. Removing the row with all null values\r\n\r\ndatasets.dropna(how='all',inplace=True)\r\nprint(\"\\nNew Data :\",datasets)", "\nNew Data : Country Age Salary Purchased\n0 France 44.0 72000.0 No\n1 Spain 27.0 48000.0 Yes\n2 Germany 30.0 54000.0 No\n3 Spain 38.0 61000.0 No\n5 Germany 40.0 NaN Yes\n6 France 35.0 58000.0 Yes\n7 Spain NaN 52000.0 No\n8 France 48.0 79000.0 Yes\n9 Germany 50.0 83000.0 No\n10 France 37.0 67000.0 Yes\n11 Spain 45.0 55000.0 No\n" ], [ "# 4b. Imputation (Replacing null values with mean value of that attribute)\r\n\r\n# All rows, all columns except last \r\nnew_X = datasets.iloc[:, :-1].values \r\n \r\n# Only last column \r\nnew_Y = datasets.iloc[:, -1].values \r\n\r\n\r\n# Using Imputer function to replace NaN values with mean of that parameter value \r\nimputer = SimpleImputer(missing_values = np.nan,strategy = \"mean\")\r\n\r\n# Fitting the data, function learns the stats \r\nimputer = imputer.fit(new_X[:, 1:3]) \r\n \r\n# fit_transform() will execute those stats on the input ie. X[:, 1:3] \r\nnew_X[:, 1:3] = imputer.transform(new_X[:, 1:3]) \r\n \r\n# filling the missing value with mean \r\nprint(\"\\n\\nNew Input with Mean Value for NaN : \\n\\n\", new_X) \r\n\r\n", "\n\nNew Input with Mean Value for NaN : \n\n [['France' 44.0 72000.0]\n ['Spain' 27.0 48000.0]\n ['Germany' 30.0 54000.0]\n ['Spain' 38.0 61000.0]\n ['Germany' 40.0 62900.0]\n ['France' 35.0 58000.0]\n ['Spain' 39.4 52000.0]\n ['France' 48.0 79000.0]\n ['Germany' 50.0 83000.0]\n ['France' 37.0 67000.0]\n ['Spain' 45.0 55000.0]]\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ] ]
d06c1088c0999be3da881f9db0bc3f293d9e7e63
895,608
ipynb
Jupyter Notebook
notebooks/clean_us_data.ipynb
itsrawlinz-jeff/COVID19_VISUALIZATION-R_JEFF
9dc03aaf37a2c6cc236088fa88221053fcca70b3
[ "Apache-2.0" ]
101
2020-04-24T19:39:34.000Z
2022-03-23T09:46:45.000Z
notebooks/clean_us_data.ipynb
itsrawlinz-jeff/COVID19_VISUALIZATION-R_JEFF
9dc03aaf37a2c6cc236088fa88221053fcca70b3
[ "Apache-2.0" ]
33
2020-04-24T06:32:50.000Z
2022-02-21T23:29:54.000Z
notebooks/clean_us_data.ipynb
itsrawlinz-jeff/COVID19_VISUALIZATION-R_JEFF
9dc03aaf37a2c6cc236088fa88221053fcca70b3
[ "Apache-2.0" ]
35
2020-04-24T06:29:11.000Z
2022-02-22T19:11:06.000Z
198.055728
109,900
0.855165
[ [ [ "# Overview\n\n### `clean_us_data.ipynb`: Fix data inconsistencies in the raw time series data from [`etl_us_data.ipynb`](./etl_us_data.ipynb).\n\nInputs:\n* `outputs/us_counties.csv`: Raw county-level time series data for the United States, produced by running [etl_us_data.ipynb](./etl_us_data.ipynb)\n* `outputs/us_counties_meta.json`: Column type metadata for reading `data/us_counties.csv` with `pd.read_csv()`\n\nOutputs:\n* `outputs/us_counties_clean.csv`: The contents of `outputs/us_counties.csv` after data cleaning\n* `outputs/us_counties_clean_meta.json`: Column type metadata for reading `data/us_counties_clean.csv` with `pd.read_csv()`\n* `outputs/us_counties_clean.feather`: Binary version of `us_counties_clean.csv`, in [Feather](https://arrow.apache.org/docs/python/feather.html) format.\n* `outputs/dates.feather`: Dates associated with points in time series, in [Feather](https://arrow.apache.org/docs/python/feather.html) format.\n\n**Note:** You can redirect these input and output files by setting the environment variables `COVID_INPUTS_DIR` and `COVID_OUTPUTS_DIR` to replacement values for the prefixes `inputs` and `outputs`, respectively, in the above paths.", "_____no_output_____" ], [ "# Read and reformat the raw data", "_____no_output_____" ] ], [ [ "# Initialization boilerplate\n\nimport os\nimport json\nimport pandas as pd\nimport numpy as np\nimport scipy.optimize\nimport sklearn.metrics\nimport matplotlib.pyplot as plt\n\nfrom typing import *\n\nimport text_extensions_for_pandas as tp\n\n# Local file of utility functions\nimport util\n\n# Allow environment variables to override data file locations.\n_INPUTS_DIR = os.getenv(\"COVID_INPUTS_DIR\", \"inputs\")\n_OUTPUTS_DIR = os.getenv(\"COVID_OUTPUTS_DIR\", \"outputs\")\nutil.ensure_dir_exists(_OUTPUTS_DIR) # create if necessary", "_____no_output_____" ] ], [ [ "## Read the CSV file from `etl_us_data.ipynb` and apply the saved type information", "_____no_output_____" ] ], [ [ "csv_file = os.path.join(_OUTPUTS_DIR, \"us_counties.csv\")\nmeta_file = os.path.join(_OUTPUTS_DIR, \"us_counties_meta.json\")\n\n# Read column type metadata\nwith open(meta_file) as f:\n cases_meta = json.load(f)\n\n# Pandas does not currently support parsing datetime64 from CSV files.\n# As a workaround, read the \"Date\" column as objects and manually \n# convert after.\ncases_meta[\"Date\"] = \"object\"\n\ncases_raw = pd.read_csv(csv_file, dtype=cases_meta, parse_dates=[\"Date\"])\n\n# Restore the Pandas index\ncases_vertical = cases_raw.set_index([\"FIPS\", \"Date\"], verify_integrity=True)\ncases_vertical", "_____no_output_____" ] ], [ [ "## Replace missing values in the secondary datasets with zeros", "_____no_output_____" ] ], [ [ "for colname in (\"Confirmed_NYT\", \"Deaths_NYT\", \"Confirmed_USAFacts\", \"Deaths_USAFacts\"):\n cases_vertical[colname].fillna(0, inplace=True)\n cases_vertical[colname] = cases_vertical[colname].astype(\"int64\")\n\ncases_vertical", "_____no_output_____" ] ], [ [ "## Collapse each time series down to a single cell\n\nThis kind of time series data is easier to manipulate at the macroscopic level if each time series occupies a \nsingle cell of the DataFrame. We use the [TensorArray](https://text-extensions-for-pandas.readthedocs.io/en/latest/#text_extensions_for_pandas.TensorArray) Pandas extension type from [Text Extensions for Pandas](https://github.com/CODAIT/text-extensions-for-pandas).", "_____no_output_____" ] ], [ [ "cases, dates = util.collapse_time_series(cases_vertical, [\"Confirmed\", \"Deaths\", \"Recovered\", \n \"Confirmed_NYT\", \"Deaths_NYT\",\n \"Confirmed_USAFacts\", \"Deaths_USAFacts\"])\ncases", "_____no_output_____" ], [ "# Note that the previous cell also saved the values from the \"Date\"\n# column of `cases_vertical` into the Python variable `dates`:\ndates[:10], dates.shape", "_____no_output_____" ], [ "# Print out the time series for the Bronx as a sanity check\nbronx_fips = 36005\ncases.loc[bronx_fips][\"Confirmed\"]", "_____no_output_____" ] ], [ [ "# Correct for missing data for today in USAFacts data\n\nThe USAFacts database only receives the previous day's updates late in the day,\nso it's often missing the last value. Substitute the previous day's value if\nthat is the case.", "_____no_output_____" ] ], [ [ "# Last 10 days of the time series for the Bronx before this change\ncases.loc[bronx_fips][\"Deaths_USAFacts\"].to_numpy()[-10:]", "_____no_output_____" ], [ "# last element <-- max(last element, second to last)\nnew_confirmed = cases[\"Confirmed_USAFacts\"].to_numpy().copy()\nnew_confirmed[:, -1] = np.maximum(new_confirmed[:, -1], new_confirmed[:, -2])\ncases[\"Confirmed_USAFacts\"] = tp.TensorArray(new_confirmed)\n\nnew_deaths = cases[\"Deaths_USAFacts\"].to_numpy().copy()\nnew_deaths[:, -1] = np.maximum(new_deaths[:, -1], new_deaths[:, -2])\ncases[\"Deaths_USAFacts\"] = tp.TensorArray(new_deaths)\n\n# Last 10 days of the time series for the Bronx after this change\ncases.loc[bronx_fips][\"Deaths_USAFacts\"].to_numpy()[-10:]", "_____no_output_____" ] ], [ [ "# Validate the New York City confirmed cases data\n\nOlder versions of the Johns Hopkins data coded all of New York city as being\nin New York County. Each borough is actually in a different county\nwith a different FIPS code.\n\nVerify that this problem hasn't recurred.", "_____no_output_____" ] ], [ [ "max_bronx_confirmed = np.max(cases.loc[36005][\"Confirmed\"])\nif max_bronx_confirmed == 0:\n raise ValueError(f\"Time series for the Bronx is all zeros again:\\n{cases.loc[36005]['Confirmed']}\")", "_____no_output_____" ], [ "max_bronx_confirmed", "_____no_output_____" ] ], [ [ "Also plot the New York City confirmed cases time series to allow for manual validation.", "_____no_output_____" ] ], [ [ "new_york_county_fips = 36061\nnyc_fips = [\n 36005, # Bronx County\n 36047, # Kings County\n new_york_county_fips, # New York County\n 36081, # Queens County\n 36085, # Richmond County\n]\nutil.graph_examples(cases.loc[nyc_fips], \"Confirmed\", {}, num_to_pick=5)", "_____no_output_____" ] ], [ [ "## Adjust New York City deaths data\n\nPlot deaths for New York City in the Johns Hopkins data set. The jump in June is due to a change in reporting.", "_____no_output_____" ] ], [ [ "util.graph_examples(cases.loc[nyc_fips], \"Deaths\", {}, num_to_pick=5)", "_____no_output_____" ] ], [ [ "New York Times version of the time series for deaths in New York city:", "_____no_output_____" ] ], [ [ "util.graph_examples(cases.loc[nyc_fips], \"Deaths_NYT\", {}, num_to_pick=5)", "_____no_output_____" ] ], [ [ "USAFacts version of the time series for deaths in New York city:", "_____no_output_____" ] ], [ [ "util.graph_examples(cases.loc[nyc_fips], \"Deaths_USAFacts\", {}, num_to_pick=5)", "_____no_output_____" ] ], [ [ "Currently the USAFacts version is cleanest, so we use that one.", "_____no_output_____" ] ], [ [ "new_deaths = cases[\"Deaths\"].copy(deep=True)\nfor fips in nyc_fips:\n new_deaths.loc[fips] = cases[\"Deaths_USAFacts\"].loc[fips]\ncases[\"Deaths\"] = new_deaths \n\nprint(\"After:\")\nutil.graph_examples(cases.loc[nyc_fips], \"Deaths\", {}, num_to_pick=5)", "After:\n" ] ], [ [ "# Clean up the Rhode Island data\n\nThe Johns Hopkins data reports zero deaths in most of Rhode Island. Use \nthe secondary data set from the New York Times for Rhode Island.", "_____no_output_____" ] ], [ [ "print(\"Before:\")\nutil.graph_examples(cases, \"Deaths\", {}, num_to_pick=8, \n mask=(cases[\"State\"] == \"Rhode Island\"))", "Before:\n" ], [ "# Use our secondary data set for all Rhode Island data.\nri_fips = cases[cases[\"State\"] == \"Rhode Island\"].index.values.tolist()\nfor colname in [\"Confirmed\", \"Deaths\"]:\n new_series = cases[colname].copy(deep=True)\n for fips in ri_fips:\n new_series.loc[fips] = cases[colname + \"_NYT\"].loc[fips]\n cases[colname] = new_series \n\n# Note that the secondary data set has not \"Recovered\" time series, so\n# we leave those numbers alone for now.\n\nprint(\"After:\")\nutil.graph_examples(cases, \"Deaths\", {}, num_to_pick=8, \n mask=(cases[\"State\"] == \"Rhode Island\"))", "After:\n" ] ], [ [ "# Clean up the Utah data\n\nThe Johns Hopkins data for Utah is missing quite a few data points.\nUse the New York Times data for Utah.", "_____no_output_____" ] ], [ [ "print(\"Before:\")\nutil.graph_examples(cases, \"Confirmed\", {}, num_to_pick=8, \n mask=(cases[\"State\"] == \"Utah\"))", "Before:\n" ], [ "# The Utah time series from the New York Times' data set are more \n# complete, so we use those numbers.\nut_fips = cases[cases[\"State\"] == \"Utah\"].index.values\nfor colname in [\"Confirmed\", \"Deaths\"]:\n new_series = cases[colname].copy(deep=True)\n for fips in ut_fips:\n new_series.loc[fips] = cases[colname + \"_NYT\"].loc[fips]\n cases[colname] = new_series \n\n# Note that the secondary data set has not \"Recovered\" time series, so\n# we leave those numbers alone for now.\n\nprint(\"After:\")\nutil.graph_examples(cases, \"Confirmed\", {}, num_to_pick=8, \n mask=(cases[\"State\"] == \"Utah\"))", "After:\n" ] ], [ [ "# Flag additional problematic and missing data points\n\nUse heuristics to identify and flag problematic data points across all \nthe time series. Generate Boolean masks that show the locations of these\noutliers.", "_____no_output_____" ] ], [ [ "# Now we're done with the secondary data set, so drop its columns.\ncases = cases.drop(columns=[\"Confirmed_NYT\", \"Deaths_NYT\", \"Confirmed_USAFacts\", \"Deaths_USAFacts\"])\ncases", "_____no_output_____" ], [ "# Now we need to find and flag obvious data-entry errors.\n# We'll start by creating columns of \"is outlier\" masks.\n# We use integers instead of Boolean values as a workaround for\n# https://github.com/pandas-dev/pandas/issues/33770\n\n# Start out with everything initialized to \"not an outlier\"\ncases[\"Confirmed_Outlier\"] = tp.TensorArray(np.zeros_like(cases[\"Confirmed\"].values))\ncases[\"Deaths_Outlier\"] = tp.TensorArray(np.zeros_like(cases[\"Deaths\"].values))\ncases[\"Recovered_Outlier\"] = tp.TensorArray(np.zeros_like(cases[\"Recovered\"].values))\n\ncases", "_____no_output_____" ] ], [ [ "## Flag time series that go from zero to nonzero and back again\n\nOne type of anomaly that occurs fairly often involves a time series\njumping from zero to a nonzero value, then back to zero again.\n\nLocate all instances of that pattern and mark the nonzero values\nas outliers.", "_____no_output_____" ] ], [ [ "def nonzero_then_zero(series: np.array):\n empty_mask = np.zeros_like(series, dtype=np.int8)\n if series[0] > 0:\n # Special case: first value is nonzero\n return empty_mask\n first_nonzero_offset = 0\n while first_nonzero_offset < len(series):\n if series[first_nonzero_offset] > 0:\n # Found the first nonzero.\n # Find the distance to the next zero value.\n next_zero_offset = first_nonzero_offset + 1\n while (next_zero_offset < len(series)\n and series[next_zero_offset] > 0):\n next_zero_offset += 1\n \n # Check the length of the run of zeros after\n # dropping back to zero.\n second_nonzero_offset = next_zero_offset + 1\n while (second_nonzero_offset < len(series)\n and series[second_nonzero_offset] == 0):\n second_nonzero_offset += 1\n \n nonzero_run_len = next_zero_offset - first_nonzero_offset\n second_zero_run_len = second_nonzero_offset - next_zero_offset\n \n # print(f\"{first_nonzero_offset} -> {next_zero_offset} -> {second_nonzero_offset}; series len {len(series)}\")\n if next_zero_offset >= len(series):\n # Everything after the first nonzero was a nonzero\n return empty_mask\n elif second_zero_run_len <= nonzero_run_len:\n # Series dropped back to zero, but the second zero\n # part was shorter than the nonzero section.\n # In this case, it's more likely that the second run\n # of zero values are actually missing values.\n return empty_mask\n else:\n # Series went zero -> nonzero -> zero -> nonzero\n # or zero -> nonzero -> zero -> [end]\n nonzero_run_mask = empty_mask.copy()\n nonzero_run_mask[first_nonzero_offset:next_zero_offset] = 1 \n return nonzero_run_mask\n first_nonzero_offset += 1 \n # If we get here, the series was all zeros\n return empty_mask\n \nfor colname in [\"Confirmed\", \"Deaths\", \"Recovered\"]:\n addl_outliers = np.stack([nonzero_then_zero(s.to_numpy()) for s in cases[colname]])\n outliers_colname = colname + \"_Outlier\"\n new_outliers = cases[outliers_colname].values.astype(np.bool) | addl_outliers\n cases[outliers_colname] = tp.TensorArray(new_outliers.astype(np.int8))\n\n# fips = 13297\n# print(cases.loc[fips][\"Confirmed\"])\n# print(nonzero_then_zero(cases.loc[fips][\"Confirmed\"]))", "_____no_output_____" ], [ "# Let's have a look at which time series acquired the most outliers as \n# a result of the code in the previous cell.\ndf = cases[[\"State\", \"County\"]].copy()\ndf[\"Confirmed_Num_Outliers\"] = np.count_nonzero(cases[\"Confirmed_Outlier\"], axis=1)\ncounties_with_outliers = df.sort_values(\"Confirmed_Num_Outliers\", ascending=False).head(10)\ncounties_with_outliers", "_____no_output_____" ], [ "# Plot the couties in the table above, with outliers highlighted.\n# The graph_examples() function is defined in util.py.\nutil.graph_examples(cases, \"Confirmed\", {}, num_to_pick=10, mask=(cases.index.isin(counties_with_outliers.index)))", "_____no_output_____" ] ], [ [ "## Flag time series that drop to zero, then go back up\n\nAnother type of anomaly involves the time series dropping down to \nzero, then going up again. Since all three time series are supposed\nto be cumulative counts, this pattern most likely indicates missing\ndata.\n\nTo correct for this problem, we mark any zero values after the\nfirst nonzero, non-outlier values as outliers, across all time series.", "_____no_output_____" ] ], [ [ "def zeros_after_first_nonzero(series: np.array, outliers: np.array):\n nonzero_mask = (series != 0)\n nonzero_and_not_outlier = nonzero_mask & (~outliers)\n first_nonzero = np.argmax(nonzero_and_not_outlier)\n if 0 == first_nonzero and series[0] == 0:\n # np.argmax(nonzero_mask) will return 0 if there are no nonzeros\n return np.zeros_like(series)\n after_nonzero_mask = np.zeros_like(series)\n after_nonzero_mask[first_nonzero:] = True\n return (~nonzero_mask) & after_nonzero_mask\n\nfor colname in [\"Confirmed\", \"Deaths\", \"Recovered\"]:\n outliers_colname = colname + \"_Outlier\"\n addl_outliers = np.stack([zeros_after_first_nonzero(s.to_numpy(), o.to_numpy()) \n for s, o in zip(cases[colname], cases[outliers_colname])])\n new_outliers = cases[outliers_colname].values.astype(np.bool) | addl_outliers\n cases[outliers_colname] = tp.TensorArray(new_outliers.astype(np.int8))\n\n# fips = 47039\n# print(cases.loc[fips][\"Confirmed\"])\n# print(cases.loc[fips][\"Confirmed_Outlier\"])\n# print(zeros_after_first_nonzero(cases.loc[fips][\"Confirmed\"], cases.loc[fips][\"Confirmed_Outlier\"])) ", "_____no_output_____" ], [ "# Redo our \"top 10 by number of outliers\" analysis with the additional outliers\ndf = cases[[\"State\", \"County\"]].copy()\ndf[\"Confirmed_Num_Outliers\"] = np.count_nonzero(cases[\"Confirmed_Outlier\"], axis=1)\ncounties_with_outliers = df.sort_values(\"Confirmed_Num_Outliers\", ascending=False).head(10)\ncounties_with_outliers", "_____no_output_____" ], [ "util.graph_examples(cases, \"Confirmed\", {}, num_to_pick=10, mask=(cases.index.isin(counties_with_outliers.index)))", "_____no_output_____" ], [ "# The steps we've just done have removed quite a few questionable\n# data points, but you will definitely want to flag additional \n# outliers by hand before trusting descriptive statistics about\n# any county.\n\n# TODO: Incorporate manual whitelists and blacklists of outliers\n# into this notebook.", "_____no_output_____" ] ], [ [ "# Precompute totals for the last 7 days\n\nSeveral of the notebooks downstream of this one need the number of cases and deaths\nfor the last 7 days, so we compute those values here for convenience.", "_____no_output_____" ] ], [ [ "def last_week_results(s: pd.Series):\n arr = s.to_numpy()\n today = arr[:,-1]\n week_ago = arr[:,-8]\n return today - week_ago\n\ncases[\"Confirmed_7_Days\"] = last_week_results(cases[\"Confirmed\"])\ncases[\"Deaths_7_Days\"] = last_week_results(cases[\"Deaths\"])\ncases.head()", "_____no_output_____" ] ], [ [ "# Write out cleaned time series data\n\nBy default, output files go to the `outputs` directory. You can use the `COVID_OUTPUTS_DIR` environment variable to override that location.", "_____no_output_____" ], [ "## CSV output\n\nComma separated value (CSV) files are a portable text-base format supported by a wide variety\nof different tools. The CSV format does not include type information, so we write a second\nfile of schema data in JSON format.", "_____no_output_____" ] ], [ [ "# Break out our time series into multiple rows again for writing to disk.\ncleaned_cases_vertical = util.explode_time_series(cases, dates)\ncleaned_cases_vertical", "_____no_output_____" ], [ "# The outlier masks are stored as integers as a workaround for a Pandas\n# bug. Convert them to Boolean values for writing to disk.\ncleaned_cases_vertical[\"Confirmed_Outlier\"] = cleaned_cases_vertical[\"Confirmed_Outlier\"].astype(np.bool)\ncleaned_cases_vertical[\"Deaths_Outlier\"] = cleaned_cases_vertical[\"Deaths_Outlier\"].astype(np.bool)\ncleaned_cases_vertical[\"Recovered_Outlier\"] = cleaned_cases_vertical[\"Recovered_Outlier\"].astype(np.bool)\ncleaned_cases_vertical", "_____no_output_____" ], [ "# Write out the results to a CSV file plus a JSON file of type metadata.\ncleaned_cases_vertical_csv_data_file = os.path.join(_OUTPUTS_DIR,\"us_counties_clean.csv\")\nprint(f\"Writing cleaned data to {cleaned_cases_vertical_csv_data_file}\")\ncleaned_cases_vertical.to_csv(cleaned_cases_vertical_csv_data_file, index=True)\ncol_type_mapping = {\n key: str(value) for key, value in cleaned_cases_vertical.dtypes.iteritems()\n}\n\ncleaned_cases_vertical_json_data_file = os.path.join(_OUTPUTS_DIR,\"us_counties_clean_meta.json\")\nprint(f\"Writing metadata to {cleaned_cases_vertical_json_data_file}\")\nwith open(cleaned_cases_vertical_json_data_file, \"w\") as f:\n json.dump(col_type_mapping, f)", "Writing cleaned data to outputs/us_counties_clean.csv\n" ] ], [ [ "## Feather output\n\nThe [Feather](https://arrow.apache.org/docs/python/feather.html) file format supports\nfast binary I/O over any data that can be represented using [Apache Arrow](https://arrow.apache.org/)\nFeather files also include schema and type information.", "_____no_output_____" ] ], [ [ "# Also write out the nested data in Feather format so that downstream\n# notebooks don't have to re-nest it.\n# No Feather serialization support for Pandas indices currently, so convert\n# the index on FIPS code to a normal column\ncases_for_feather = cases.reset_index()\ncases_for_feather.head()", "_____no_output_____" ], [ "# Write to Feather and make sure that reading back works too.\n# Also write dates that go with the time series\ndates_file = os.path.join(_OUTPUTS_DIR, \"dates.feather\")\ncases_file = os.path.join(_OUTPUTS_DIR, \"us_counties_clean.feather\")\npd.DataFrame({\"date\": dates}).to_feather(dates_file)\ncases_for_feather.to_feather(cases_file)\npd.read_feather(cases_file).head()", "_____no_output_____" ], [ "# Also make sure the dates can be read back in from a binary file\npd.read_feather(dates_file).head()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d06c1175b4427c2e9595fc5ccb15f7e790af6cf3
26,872
ipynb
Jupyter Notebook
notebooks/2 - Decision Trees.ipynb
ECGomes/gdg-ml-workshop
2dfd738d7ff0737c14221f0e7fb3c4521f688786
[ "MIT" ]
1
2020-01-07T12:11:16.000Z
2020-01-07T12:11:16.000Z
notebooks/2 - Decision Trees.ipynb
ECGomes/gdg-ml-workshop
2dfd738d7ff0737c14221f0e7fb3c4521f688786
[ "MIT" ]
null
null
null
notebooks/2 - Decision Trees.ipynb
ECGomes/gdg-ml-workshop
2dfd738d7ff0737c14221f0e7fb3c4521f688786
[ "MIT" ]
null
null
null
49.855288
1,772
0.520728
[ [ [ "# IMPORTS\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport sklearn", "_____no_output_____" ], [ "# Get the data from the CSV file and print it\n\ntennis = pd.read_csv('tennis.csv')\ntennis = tennis.astype('category')\ntennis", "_____no_output_____" ], [ "# Decision Tree Classifier Step 1\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\n\nX = tennis[tennis.columns[:-1]]\ny = tennis[tennis.columns[-1]]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\n\nclf_tree = DecisionTreeClassifier()\nclf_tree.fit(X_train, y_train)", "_____no_output_____" ], [ "# Decision Tree Classifier Step 2\n\nfrom sklearn.preprocessing import LabelEncoder\n\nlb = LabelEncoder() \ntennis_encoded = pd.DataFrame({})\n\ntennis_encoded['outlook'] = lb.fit_transform(tennis['outlook']) \ntennis_encoded['temp'] = lb.fit_transform(tennis['temp'] ) \ntennis_encoded['humidity'] = lb.fit_transform(tennis['humidity'] ) \ntennis_encoded['windy'] = lb.fit_transform(tennis['windy'] ) \ntennis_encoded['play'] = tennis['play']\n\ntennis_encoded", "_____no_output_____" ], [ "# Decision Tree Classifier Step 3\n\nX = tennis_encoded[tennis.columns[:-1]]\ny = tennis_encoded[tennis.columns[-1]]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\n\nclf_tree = DecisionTreeClassifier()\nclf_tree.fit(X_train, y_train)\n\nprint('Training score: {}'.format(clf_tree.score(X_train, y_train)))\nprint('Testing score: {}'.format(clf_tree.score(X_test, y_test)))", "Training score: 1.0\nTesting score: 0.6\n" ], [ "# Plot Decision Tree\n\n# Install graphviz -> Open a terminal from the Jupyter and type: pip install graphviz \n# Or if from conda: conda install python-graphviz\n# For help: https://scikit-learn.org/stable/modules/tree.html\n\nimport graphviz\nfrom sklearn import tree\n\ndot_data = tree.export_graphviz(clf_tree, out_file=None, \n feature_names=X_train.columns, \n class_names=y_train.unique(),\n filled=True, rounded=True,\n impurity=False,\n special_characters=True) \ngraph = graphviz.Source(dot_data) \ngraph", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d06c471a5f4fd323b679cd14b28581b3356f73f5
14,411
ipynb
Jupyter Notebook
notebooks/train.ipynb
Lmath2001/Spam-Filtering
3b1278b13981630929841911abc188b373631a4f
[ "MIT", "Unlicense" ]
null
null
null
notebooks/train.ipynb
Lmath2001/Spam-Filtering
3b1278b13981630929841911abc188b373631a4f
[ "MIT", "Unlicense" ]
null
null
null
notebooks/train.ipynb
Lmath2001/Spam-Filtering
3b1278b13981630929841911abc188b373631a4f
[ "MIT", "Unlicense" ]
null
null
null
69.956311
10,176
0.837555
[ [ [ "import pickle\nwith open('x_list.pkl','rb') as f:\n x_prepared = pickle.load(f)", "_____no_output_____" ], [ "import pandas as pd\ndata = pd.read_csv(\"final_data.csv\")", "_____no_output_____" ], [ "from sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nvectorizer = CountVectorizer(max_features=20000)\nx = vectorizer.fit_transform([\" \".join(text) for text in x_prepared]).toarray()", "_____no_output_____" ], [ "x.shape", "_____no_output_____" ], [ "import numpy as np\nx_train,x_test,y_train,y_test = train_test_split(x,np.asarray(data[\"Label\"]),random_state=42,test_size=0.2)\nx_train.shape", "_____no_output_____" ], [ "import time\nstart_time = time.time()\nNB = GaussianNB()\nNB.fit(x_train,y_train)\nend_time = time.time()\n\nprint(round(end_time-start_time,2))", "72.4\n" ], [ "NB.score(x_test,y_test)", "_____no_output_____" ], [ "from sklearn.metrics import confusion_matrix\ny_pred = NB.predict(x_test)\n\nconf = confusion_matrix(y_pred=y_pred,y_true=y_test)\nimport seaborn\nseaborn.heatmap(conf,annot=True,fmt=\".1f\",linewidths=1.5)\nimport matplotlib.pyplot as plt\nplt.show()", "_____no_output_____" ], [ "import pickle\n# We will save count vectorizer and model\nwith open(\"model.pckl\",mode=\"wb\") as F:\n pickle.dump(NB,F)\n \nwith open(\"vectorizer.pckl\",mode=\"wb\") as F:\n pickle.dump(vectorizer,F)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06c56b2bda46f648c6029ea12c79f652892bf14
150,849
ipynb
Jupyter Notebook
PyCBC-Tutorials-master/examples/waveform_similarity.ipynb
basuparth/ICERM_Workshop
ebabce680fc87e90ff1de30246dcda9beb384bb4
[ "MIT" ]
null
null
null
PyCBC-Tutorials-master/examples/waveform_similarity.ipynb
basuparth/ICERM_Workshop
ebabce680fc87e90ff1de30246dcda9beb384bb4
[ "MIT" ]
null
null
null
PyCBC-Tutorials-master/examples/waveform_similarity.ipynb
basuparth/ICERM_Workshop
ebabce680fc87e90ff1de30246dcda9beb384bb4
[ "MIT" ]
null
null
null
554.591912
78,100
0.938389
[ [ [ "### Comparing Gravitational waveforms to each other###", "_____no_output_____" ] ], [ [ "# Install the software we need\nimport sys\n!{sys.executable} -m pip install pycbc lalsuite ligo-common --no-cache-dir", "Requirement already satisfied: pycbc in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages/PyCBC-9rc6331-py3.7-linux-x86_64.egg (9rc6331)\nRequirement already satisfied: lalsuite in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (6.70)\nRequirement already satisfied: ligo-common in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (1.0.3)\nRequirement already satisfied: numpy>=1.16.0 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (1.19.0)\nRequirement already satisfied: Mako>=1.0.1 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (1.1.3)\nRequirement already satisfied: cython>=0.29 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (0.29.20)\nRequirement already satisfied: decorator>=3.4.2 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (4.4.2)\nRequirement already satisfied: matplotlib>=1.5.1 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (3.2.2)\nRequirement already satisfied: pillow in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (7.1.2)\nRequirement already satisfied: h5py>=2.5 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (2.10.0)\nRequirement already satisfied: jinja2 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (2.11.2)\nRequirement already satisfied: mpld3>=0.3 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (0.5.1)\nRequirement already satisfied: lscsoft-glue>=1.59.3 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (2.0.0)\nRequirement already satisfied: emcee==2.2.1 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (2.2.1)\nRequirement already satisfied: requests>=1.2.1 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (2.24.0)\nRequirement already satisfied: beautifulsoup4>=4.6.0 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (4.9.1)\nRequirement already satisfied: six>=1.10.0 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (1.15.0)\nRequirement already satisfied: ligo-segments in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (1.2.0)\nRequirement already satisfied: tqdm in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (4.46.1)\nRequirement already satisfied: gwdatafind in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (1.0.4)\nRequirement already satisfied: astropy>=2.0.3 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (4.0.1.post1)\nRequirement already satisfied: scipy>=0.16.0 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pycbc) (1.5.0)\nRequirement already satisfied: python-dateutil in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from lalsuite) (2.8.1)\nRequirement already satisfied: MarkupSafe>=0.9.2 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from Mako>=1.0.1->pycbc) (1.1.1)\nRequirement already satisfied: kiwisolver>=1.0.1 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from matplotlib>=1.5.1->pycbc) (1.2.0)\nRequirement already satisfied: cycler>=0.10 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from matplotlib>=1.5.1->pycbc) (0.10.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from matplotlib>=1.5.1->pycbc) (2.4.7)\nRequirement already satisfied: pyOpenSSL in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from lscsoft-glue>=1.59.3->pycbc) (19.1.0)\nRequirement already satisfied: idna<3,>=2.5 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from requests>=1.2.1->pycbc) (2.9)\nRequirement already satisfied: chardet<4,>=3.0.2 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from requests>=1.2.1->pycbc) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from requests>=1.2.1->pycbc) (2020.6.20)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from requests>=1.2.1->pycbc) (1.25.9)\nRequirement already satisfied: soupsieve>1.2 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from beautifulsoup4>=4.6.0->pycbc) (2.0.1)\nRequirement already satisfied: cryptography>=2.8 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from pyOpenSSL->lscsoft-glue>=1.59.3->pycbc) (2.9.2)\nRequirement already satisfied: cffi!=1.11.3,>=1.8 in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from cryptography>=2.8->pyOpenSSL->lscsoft-glue>=1.59.3->pycbc) (1.14.0)\nRequirement already satisfied: pycparser in /home/ahnitz/projects/PyCBC-Tutorials/env/lib/python3.7/site-packages (from cffi!=1.11.3,>=1.8->cryptography>=2.8->pyOpenSSL->lscsoft-glue>=1.59.3->pycbc) (2.20)\n" ], [ "%matplotlib inline\n# We learn about the potential parameters of a source by comparing it to many different waveforms\n# each of which represents a possible source with different properties. \nimport pylab\nfrom pycbc.waveform import get_td_waveform\n\n# We can directly compare how similar waveforms are to each other using an inner product between then called \n# a 'match'. This maximizes over the possible time of arrival and phase. We'll generate a reference waveform\n# which we'll compare to.\nm1 = m2 = 20\nf_lower = 20\napproximant = \"SEOBNRv4\"\ndelta_t = 1.0 / 2048\nhp, _ = get_td_waveform(approximant=approximant,\n mass1=m1, mass2=m2,\n delta_t=delta_t, f_lower=f_lower)\npylab.plot(hp.sample_times, hp)\npylab.xlabel('Time (s)')\npylab.ylabel('Strain')", "_____no_output_____" ], [ "# How similar waveforms are to each other depends on how important we consider different frequencies, we \n# can account for this by weighting with an estimated power spectral density. We'll use here \n# the predicted final Advanced LIGO final design sensitivity\nfrom pycbc.psd import aLIGOZeroDetHighPower\npsd = aLIGOZeroDetHighPower(len(hp) // 2 + 1, 1.0 / hp.duration, f_lower)\n\npylab.loglog(psd.sample_frequencies, psd)\npylab.xlabel('Frequency (Hz)')\npylab.ylabel('Strain**2 / Hz')\npylab.xlim(20, 1000)", "_____no_output_____" ], [ "# We can now compare how similar our waveform is to others with different masses\nfrom pycbc.filter import match\nimport numpy\n\nmasses = numpy.arange(19, 21, .2)\nmatches = []\nfor m2 in masses:\n hp2, _ = get_td_waveform(approximant=approximant,\n mass1=m1, mass2=m2,\n delta_t=delta_t, f_lower=f_lower)\n hp2 = hp2[:len(hp)] if len(hp) < len(hp2) else hp2\n hp2.resize(len(hp))\n \n m, idx = match(hp, hp2, psd=psd, low_frequency_cutoff=f_lower)\n matches.append(m)\n pylab.plot(hp2.sample_times, hp2)\npylab.xlim(-.05, .02)", "_____no_output_____" ], [ "pylab.plot(masses, matches)\npylab.ylabel('Match')\npylab.xlabel('Mass of second object (Solar Masses)')\n\n# You can think of the match also as the fraction of signal-to-noise that you could recover with a template that \n# doesn't *exactly* look like your source", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d06c6322401c998e9d56bee8bed61295ed01122a
38,285
ipynb
Jupyter Notebook
nlp/UPDATED_NLP_COURSE/03-Text-Classification/00-SciKit-Learn-Primer.ipynb
rishuatgithub/MLPy
603fdc86a1d56c41e8199b94f96a19f35c719586
[ "Apache-2.0" ]
null
null
null
nlp/UPDATED_NLP_COURSE/03-Text-Classification/00-SciKit-Learn-Primer.ipynb
rishuatgithub/MLPy
603fdc86a1d56c41e8199b94f96a19f35c719586
[ "Apache-2.0" ]
1
2022-03-12T00:55:20.000Z
2022-03-12T00:55:20.000Z
nlp/UPDATED_NLP_COURSE/03-Text-Classification/00-SciKit-Learn-Primer.ipynb
rishuatgithub/MLPy
603fdc86a1d56c41e8199b94f96a19f35c719586
[ "Apache-2.0" ]
3
2021-04-15T08:10:01.000Z
2021-11-04T17:57:51.000Z
41.034298
7,900
0.668251
[ [ [ "___\n\n<a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>\n___", "_____no_output_____" ], [ "# Scikit-learn Primer\n\n**Scikit-learn** (http://scikit-learn.org/) is an open-source machine learning library for Python that offers a variety of regression, classification and clustering algorithms.\n\nIn this section we'll perform a fairly simple classification exercise with scikit-learn. In the next section we'll leverage the machine learning strength of scikit-learn to perform natural language classifications.", "_____no_output_____" ], [ "# Installation and Setup\n\n### From the command line or terminal:\n> `conda install scikit-learn`\n> <br>*or*<br>\n> `pip install -U scikit-learn`\n\nScikit-learn additionally requires that NumPy and SciPy be installed. For more info visit http://scikit-learn.org/stable/install.html", "_____no_output_____" ], [ "# Perform Imports and Load Data\nFor this exercise we'll be using the **SMSSpamCollection** dataset from [UCI datasets](https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection) that contains more than 5 thousand SMS phone messages.<br>You can check out the [**sms_readme**](../TextFiles/sms_readme.txt) file for more info.\n\nThe file is a [tab-separated-values](https://en.wikipedia.org/wiki/Tab-separated_values) (tsv) file with four columns:\n> **label** - every message is labeled as either ***ham*** or ***spam***<br>\n> **message** - the message itself<br>\n> **length** - the number of characters in each message<br>\n> **punct** - the number of punctuation characters in each message", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n\ndf = pd.read_csv('../TextFiles/smsspamcollection.tsv', sep='\\t')\ndf.head()", "_____no_output_____" ], [ "len(df)", "_____no_output_____" ] ], [ [ "## Check for missing values:\nMachine learning models usually require complete data.", "_____no_output_____" ] ], [ [ "df.isnull().sum()", "_____no_output_____" ] ], [ [ "## Take a quick look at the *ham* and *spam* `label` column:", "_____no_output_____" ] ], [ [ "df['label'].unique()", "_____no_output_____" ], [ "df['label'].value_counts()", "_____no_output_____" ] ], [ [ "<font color=green>We see that 4825 out of 5572 messages, or 86.6%, are ham.<br>This means that any machine learning model we create has to perform **better than 86.6%** to beat random chance.</font>", "_____no_output_____" ], [ "## Visualize the data:\nSince we're not ready to do anything with the message text, let's see if we can predict ham/spam labels based on message length and punctuation counts. We'll look at message `length` first:", "_____no_output_____" ] ], [ [ "df['length'].describe()", "_____no_output_____" ] ], [ [ "<font color=green>This dataset is extremely skewed. The mean value is 80.5 and yet the max length is 910. Let's plot this on a logarithmic x-axis.</font>", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\n\nplt.xscale('log')\nbins = 1.15**(np.arange(0,50))\nplt.hist(df[df['label']=='ham']['length'],bins=bins,alpha=0.8)\nplt.hist(df[df['label']=='spam']['length'],bins=bins,alpha=0.8)\nplt.legend(('ham','spam'))\nplt.show()", "_____no_output_____" ] ], [ [ "<font color=green>It looks like there's a small range of values where a message is more likely to be spam than ham.</font>\n\nNow let's look at the `punct` column:", "_____no_output_____" ] ], [ [ "df['punct'].describe()", "_____no_output_____" ], [ "plt.xscale('log')\nbins = 1.5**(np.arange(0,15))\nplt.hist(df[df['label']=='ham']['punct'],bins=bins,alpha=0.8)\nplt.hist(df[df['label']=='spam']['punct'],bins=bins,alpha=0.8)\nplt.legend(('ham','spam'))\nplt.show()", "_____no_output_____" ] ], [ [ "<font color=green>This looks even worse - there seem to be no values where one would pick spam over ham. We'll still try to build a machine learning classification model, but we should expect poor results.</font>", "_____no_output_____" ], [ "___\n# Split the data into train & test sets:\n\nIf we wanted to divide the DataFrame into two smaller sets, we could use\n> `train, test = train_test_split(df)`\n\nFor our purposes let's also set up our Features (X) and Labels (y). The Label is simple - we're trying to predict the `label` column in our data. For Features we'll use the `length` and `punct` columns. *By convention, **X** is capitalized and **y** is lowercase.*", "_____no_output_____" ], [ "## Selecting features\nThere are two ways to build a feature set from the columns we want. If the number of features is small, then we can pass those in directly:\n> `X = df[['length','punct']]`\n\nIf the number of features is large, then it may be easier to drop the Label and any other unwanted columns:\n> `X = df.drop(['label','message'], axis=1)`\n\nThese operations make copies of **df**, but do not change the original DataFrame in place. All the original data is preserved.", "_____no_output_____" ] ], [ [ "# Create Feature and Label sets\nX = df[['length','punct']] # note the double set of brackets\ny = df['label']", "_____no_output_____" ] ], [ [ "## Additional train/test/split arguments:\nThe default test size for `train_test_split` is 30%. Here we'll assign 33% of the data for testing.<br>\nAlso, we can set a `random_state` seed value to ensure that everyone uses the same \"random\" training & testing sets.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n\nprint('Training Data Shape:', X_train.shape)\nprint('Testing Data Shape: ', X_test.shape)", "Training Data Shape: (3733, 2)\nTesting Data Shape: (1839, 2)\n" ] ], [ [ "Now we can pass these sets into a series of different training & testing algorithms and compare their results.", "_____no_output_____" ], [ "___\n# Train a Logistic Regression classifier\nOne of the simplest multi-class classification tools is [logistic regression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html). Scikit-learn offers a variety of algorithmic solvers; we'll use [L-BFGS](https://en.wikipedia.org/wiki/Limited-memory_BFGS). ", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression\n\nlr_model = LogisticRegression(solver='lbfgs')\n\nlr_model.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "## Test the Accuracy of the Model", "_____no_output_____" ] ], [ [ "from sklearn import metrics\n\n# Create a prediction set:\npredictions = lr_model.predict(X_test)\n\n# Print a confusion matrix\nprint(metrics.confusion_matrix(y_test,predictions))", "[[1547 46]\n [ 241 5]]\n" ], [ "# You can make the confusion matrix less confusing by adding labels:\ndf = pd.DataFrame(metrics.confusion_matrix(y_test,predictions), index=['ham','spam'], columns=['ham','spam'])\ndf", "_____no_output_____" ] ], [ [ "<font color=green>These results are terrible! More spam messages were confused as ham (241) than correctly identified as spam (5), although a relatively small number of ham messages (46) were confused as spam.</font>", "_____no_output_____" ] ], [ [ "# Print a classification report\nprint(metrics.classification_report(y_test,predictions))", " precision recall f1-score support\n\n ham 0.87 0.97 0.92 1593\n spam 0.10 0.02 0.03 246\n\n micro avg 0.84 0.84 0.84 1839\n macro avg 0.48 0.50 0.47 1839\nweighted avg 0.76 0.84 0.80 1839\n\n" ], [ "# Print the overall accuracy\nprint(metrics.accuracy_score(y_test,predictions))", "0.84393692224\n" ] ], [ [ "<font color=green>This model performed *worse* than a classifier that assigned all messages as \"ham\" would have!</font>", "_____no_output_____" ], [ "___\n# Train a naïve Bayes classifier:\nOne of the most common - and successful - classifiers is [naïve Bayes](http://scikit-learn.org/stable/modules/naive_bayes.html#naive-bayes).", "_____no_output_____" ] ], [ [ "from sklearn.naive_bayes import MultinomialNB\n\nnb_model = MultinomialNB()\n\nnb_model.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "## Run predictions and report on metrics", "_____no_output_____" ] ], [ [ "predictions = nb_model.predict(X_test)\nprint(metrics.confusion_matrix(y_test,predictions))", "[[1583 10]\n [ 246 0]]\n" ] ], [ [ "<font color=green>The total number of confusions dropped from **287** to **256**. [241+46=287, 246+10=256]</font>", "_____no_output_____" ] ], [ [ "print(metrics.classification_report(y_test,predictions))", " precision recall f1-score support\n\n ham 0.87 0.99 0.93 1593\n spam 0.00 0.00 0.00 246\n\n micro avg 0.86 0.86 0.86 1839\n macro avg 0.43 0.50 0.46 1839\nweighted avg 0.75 0.86 0.80 1839\n\n" ], [ "print(metrics.accuracy_score(y_test,predictions))", "0.860793909734\n" ] ], [ [ "<font color=green>Better, but still less accurate than 86.6%</font>", "_____no_output_____" ], [ "___\n# Train a support vector machine (SVM) classifier\nAmong the SVM options available, we'll use [C-Support Vector Classification (SVC)](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html#sklearn.svm.SVC)", "_____no_output_____" ] ], [ [ "from sklearn.svm import SVC\nsvc_model = SVC(gamma='auto')\nsvc_model.fit(X_train,y_train)", "_____no_output_____" ] ], [ [ "## Run predictions and report on metrics", "_____no_output_____" ] ], [ [ "predictions = svc_model.predict(X_test)\nprint(metrics.confusion_matrix(y_test,predictions))", "[[1515 78]\n [ 131 115]]\n" ] ], [ [ "<font color=green>The total number of confusions dropped even further to **209**.</font>", "_____no_output_____" ] ], [ [ "print(metrics.classification_report(y_test,predictions))", " precision recall f1-score support\n\n ham 0.92 0.95 0.94 1593\n spam 0.60 0.47 0.52 246\n\n micro avg 0.89 0.89 0.89 1839\n macro avg 0.76 0.71 0.73 1839\nweighted avg 0.88 0.89 0.88 1839\n\n" ], [ "print(metrics.accuracy_score(y_test,predictions))", "0.886351277868\n" ] ], [ [ "<font color=green>And finally we have a model that performs *slightly* better than random chance.</font>", "_____no_output_____" ], [ "Great! Now you should be able to load a dataset, divide it into training and testing sets, and perform simple analyses using scikit-learn.\n## Next up: Feature Extraction from Text", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
d06c6884153b84dc166b541de32504ff7f4d1e2a
13,418
ipynb
Jupyter Notebook
Gena/map_center_object.ipynb
guy1ziv2/earthengine-py-notebooks
931f57c61c147fe6cff745c2a099a444716e69e4
[ "MIT" ]
1
2020-07-14T10:45:09.000Z
2020-07-14T10:45:09.000Z
Gena/map_center_object.ipynb
Yesicaleo/earthengine-py-notebooks
b737a889d5023408cc5cec204f8bd5f9d51cdee8
[ "MIT" ]
null
null
null
Gena/map_center_object.ipynb
Yesicaleo/earthengine-py-notebooks
b737a889d5023408cc5cec204f8bd5f9d51cdee8
[ "MIT" ]
1
2021-08-12T12:19:37.000Z
2021-08-12T12:19:37.000Z
80.347305
8,320
0.84245
[ [ [ "<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/Gena/map_center_object.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Gena/map_center_object.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Gena/map_center_object.ipynb\"><img width=58px src=\"https://mybinder.org/static/images/logo_social.png\" />Run in binder</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Gena/map_center_object.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>", "_____no_output_____" ], [ "## Install Earth Engine API\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.\nThe magic command `%%capture` can be used to hide output from a specific cell.", "_____no_output_____" ] ], [ [ "# %%capture\n# !pip install earthengine-api\n# !pip install geehydro", "_____no_output_____" ] ], [ [ "Import libraries", "_____no_output_____" ] ], [ [ "import ee\nimport folium\nimport geehydro", "_____no_output_____" ] ], [ [ "Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()` \nif you are running this notebook for this first time or if you are getting an authentication error. ", "_____no_output_____" ] ], [ [ "# ee.Authenticate()\nee.Initialize()", "_____no_output_____" ] ], [ [ "## Create an interactive map \nThis step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. \nThe optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.", "_____no_output_____" ] ], [ [ "Map = folium.Map(location=[40, -100], zoom_start=4)\nMap.setOptions('HYBRID')", "_____no_output_____" ] ], [ [ "## Add Earth Engine Python script ", "_____no_output_____" ] ], [ [ "# get a single feature\ncountries = ee.FeatureCollection(\"USDOS/LSIB_SIMPLE/2017\")\ncountry = countries.filter(ee.Filter.eq('country_na', 'Ukraine'))\nMap.addLayer(country, { 'color': 'orange' }, 'feature collection layer')\n\n# TEST: center feature on a map\nMap.centerObject(country, 6)\n", "_____no_output_____" ] ], [ [ "## Display Earth Engine data layers ", "_____no_output_____" ] ], [ [ "Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)\nMap", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06c7493ce9a427284c86658123604fb6a025b82
59,040
ipynb
Jupyter Notebook
docs/examples/AbsComponent_examples.ipynb
marijana777/linetools
73720a2f6df42b7dde1f35055cd40ad970200f7f
[ "BSD-3-Clause" ]
null
null
null
docs/examples/AbsComponent_examples.ipynb
marijana777/linetools
73720a2f6df42b7dde1f35055cd40ad970200f7f
[ "BSD-3-Clause" ]
null
null
null
docs/examples/AbsComponent_examples.ipynb
marijana777/linetools
73720a2f6df42b7dde1f35055cd40ad970200f7f
[ "BSD-3-Clause" ]
null
null
null
99.561551
43,046
0.832554
[ [ [ "# Examples for the AbsComponent Class (v1.1)", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\n# suppress warnings for these examples\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "# import\ntry:\n import seaborn as sns; sns.set_style(\"white\")\nexcept:\n pass\nimport numpy as np\nfrom astropy.table import QTable\nimport astropy.units as u\nfrom linetools.spectralline import AbsLine\nfrom linetools.isgm import utils as ltiu\nfrom linetools.analysis import absline as laa\nfrom linetools.spectra import io as lsio\nfrom linetools.isgm.abscomponent import AbsComponent\nimport linetools.analysis.voigt as lav\n\nimport imp\nlt_path = imp.find_module('linetools')[1]", "_____no_output_____" ] ], [ [ "## Instantiate", "_____no_output_____" ], [ "### Standard", "_____no_output_____" ] ], [ [ "abscomp = AbsComponent((10.0*u.deg, 45*u.deg), (14,2), 1.0, [-300,300]*u.km/u.s)\nabscomp", "_____no_output_____" ] ], [ [ "### From AbsLines", "_____no_output_____" ], [ "#### From one line", "_____no_output_____" ] ], [ [ "lya = AbsLine(1215.670*u.AA, z=2.92939)\nlya.limits.set([-300.,300.]*u.km/u.s) # vlim", "_____no_output_____" ], [ "abscomp = AbsComponent.from_abslines([lya])\nprint(abscomp)\nabscomp._abslines", "<AbsComponent: 00:00:00 +00:00:00, Name=HI_z2.92939, Zion=(1,1), Ej=0 1 / cm, z=2.92939, vlim=-300 km / s,300 km / s>\n" ] ], [ [ "#### From multiple", "_____no_output_____" ] ], [ [ "lyb = AbsLine(1025.7222*u.AA, z=lya.z)\nlyb.limits.set([-300.,300.]*u.km/u.s) # vlim", "_____no_output_____" ], [ "abscomp = AbsComponent.from_abslines([lya,lyb])\nprint(abscomp)\nabscomp._abslines", "<AbsComponent: 00:00:00 +00:00:00, Name=HI_z2.92939, Zion=(1,1), Ej=0 1 / cm, z=2.92939, vlim=-300 km / s,300 km / s>\n" ], [ "#### Define from QTable and make an spectrum model\n\n# We first create a QTable with the most relevant information for defining AbsComponents\ntab = QTable()\ntab['ion_name'] = ['HI', 'HI']\ntab['z_comp'] = [0.2, 0.15] # you should put the right redshifts here\ntab['logN'] = [19., 19.] # you should put the right column densities here\ntab['sig_logN'] = [0.1, 0.1] # you should put the right column density uncertainties here\ntab['flag_logN'] = [1, 1] # Flags correspond to linetools notation\ntab['RA'] = [0, 0]*u.deg # you should put the right coordinates here\ntab['DEC'] = [0, 0]*u.deg # you should put the right coordinates here\ntab['vmin'] = [-100, -100]*u.km/u.s # This correspond to the velocity lower limit for the absorption components\ntab['vmax'] = [100, 100]*u.km/u.s # This correspond to the velocity upper limit for the absorption components\ntab['b'] = [20, 20]*u.km/u.s # you should put the right Dopper parameters here \n\n# We now use this table to create a list of AbsComponents\ncomplist = ltiu.complist_from_table(tab) \n\n# Now we need to add AbsLines to the component that are relevant for your spectrum\n# This will be done by knowing the observed wavelength limits\nwvlim = [1150, 1750]*u.AA\nfor comp in complist:\n comp.add_abslines_from_linelist(llist='HI') # you can also use llist=\"ISM\" if you have other non HI components\n\n# Finally, we can create a model spectrum for each AbsCompontent\nwv_array = np.arange(1150,1750, 0.01) * u.AA # This should match your spectrum wavelength array \nmodel_1 = ltav.voigt_from_components(wv_array, [complist[0]])", "Loading abundances from Asplund2009\nAbundances are relative by number on a logarithmic scale with H=12\n" ] ], [ [ "## Methods", "_____no_output_____" ], [ "### Generate a Component Table", "_____no_output_____" ] ], [ [ "lya.attrib['logN'] = 14.1\nlya.attrib['sig_logN'] = 0.15\nlya.attrib['flag_N'] = 1\nlaa.linear_clm(lya.attrib)\nlyb.attrib['logN'] = 14.15\nlyb.attrib['sig_logN'] = 0.19\nlyb.attrib['flag_N'] = 1\nlaa.linear_clm(lyb.attrib)", "_____no_output_____" ], [ "abscomp = AbsComponent.from_abslines([lya,lyb])\ncomp_tbl = abscomp.build_table()\ncomp_tbl", "_____no_output_____" ] ], [ [ "### Synthesize multiple components", "_____no_output_____" ] ], [ [ "SiIItrans = ['SiII 1260', 'SiII 1304', 'SiII 1526']\nSiIIlines = []\nfor trans in SiIItrans:\n iline = AbsLine(trans, z=2.92939)\n iline.attrib['logN'] = 12.8 + np.random.rand()\n iline.attrib['sig_logN'] = 0.15\n iline.attrib['flag_N'] = 1\n iline.limits.set([-300.,50.]*u.km/u.s) # vlim\n _,_ = laa.linear_clm(iline.attrib)\n SiIIlines.append(iline)\nSiIIcomp = AbsComponent.from_abslines(SiIIlines)\nSiIIcomp.synthesize_colm()", "_____no_output_____" ], [ "SiIIlines2 = []\nfor trans in SiIItrans:\n iline = AbsLine(trans, z=2.92939)\n iline.attrib['logN'] = 13.3 + np.random.rand()\n iline.attrib['sig_logN'] = 0.15\n iline.attrib['flag_N'] = 1\n iline.limits.set([50.,300.]*u.km/u.s) # vlim\n _,_ = laa.linear_clm(iline.attrib)\n SiIIlines2.append(iline)\nSiIIcomp2 = AbsComponent.from_abslines(SiIIlines2)\nSiIIcomp2.synthesize_colm()", "_____no_output_____" ], [ "abscomp.synthesize_colm()\n[abscomp,SiIIcomp,SiIIcomp2]", "_____no_output_____" ], [ "synth_SiII = ltiu.synthesize_components([SiIIcomp,SiIIcomp2])\nsynth_SiII", "_____no_output_____" ] ], [ [ "### Generate multiple components from abslines", "_____no_output_____" ] ], [ [ "comps = ltiu.build_components_from_abslines([lya,lyb,SiIIlines[0],SiIIlines[1]])\ncomps", "_____no_output_____" ] ], [ [ "### Generate an Ion Table", "_____no_output_____" ] ], [ [ "tbl = ltiu.iontable_from_components([abscomp,SiIIcomp,SiIIcomp2])\ntbl", "_____no_output_____" ] ], [ [ "### Stack plot", "_____no_output_____" ], [ "#### Load a spectrum", "_____no_output_____" ] ], [ [ "xspec = lsio.readspec(lt_path+'/spectra/tests/files/UM184_nF.fits')\nlya.analy['spec'] = xspec\nlyb.analy['spec'] = xspec", "_____no_output_____" ] ], [ [ "#### Show", "_____no_output_____" ] ], [ [ "abscomp = AbsComponent.from_abslines([lya,lyb])\nabscomp.stack_plot()", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d06c8067ad0852c6311e1872acf42213f7fb3951
193,086
ipynb
Jupyter Notebook
fair_drop/opportunities.ipynb
bbilgic/honestnft-shenanigans-avax
0bddbbb4ebb3f27a49154dc9633f910f5e0c35f1
[ "MIT" ]
null
null
null
fair_drop/opportunities.ipynb
bbilgic/honestnft-shenanigans-avax
0bddbbb4ebb3f27a49154dc9633f910f5e0c35f1
[ "MIT" ]
null
null
null
fair_drop/opportunities.ipynb
bbilgic/honestnft-shenanigans-avax
0bddbbb4ebb3f27a49154dc9633f910f5e0c35f1
[ "MIT" ]
null
null
null
66.147996
22,194
0.624737
[ [ [ "\"\"\"\nUpdate Parameters Here\n\"\"\"\nCONTRACT_ADDRESS = \"0x9A534628B4062E123cE7Ee2222ec20B86e16Ca8F\"\nCOLLECTION = \"MekaVerse\"\nMETHOD = \"raritytools\"\nTOKEN_COL = \"TOKEN_ID\" # Use TOKEN_NAME if you prefer to infer token id from token name\nNUMBERS_TO_CHECK = 50 # Number of tokens to search for opportunities\nOPENSEA_API_KEY = \"YOUR_API_KEY\"", "_____no_output_____" ], [ "import time\nimport requests\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport numpy as np\n\n\n# Define variables used throughout\nRARITY_DATABASE = f\"../metadata/rarity_data/{COLLECTION}_{METHOD}.csv\"\nETHER_UNITS = 1e18\n\n\"\"\"\nPlot params\n\"\"\"\nplt.rcParams.update({\"figure.facecolor\": \"white\", \"savefig.facecolor\": \"white\"})\n\n# Load rarity database and format\nRARITY_DB = pd.read_csv(RARITY_DATABASE)\nRARITY_DB = RARITY_DB[RARITY_DB[\"TOKEN_ID\"].duplicated() == False]\nif TOKEN_COL == \"TOKEN_NAME\":\n RARITY_DB[\"TOKEN_ID\"] = RARITY_DB[\"TOKEN_NAME\"].str.split(\"#\").str[1].astype(int)", "_____no_output_____" ], [ "\"\"\"\nGet open bids from OpenSea and plot.\n\"\"\"\n\n\ndef getOpenseaOrders(token_id, contract_address):\n url = \"https://api.opensea.io/wyvern/v1/orders\"\n\n querystring = {\n \"bundled\": \"false\",\n \"include_bundled\": \"false\",\n \"is_english\": \"false\",\n \"include_invalid\": \"false\",\n \"limit\": \"50\",\n \"offset\": \"0\",\n \"order_by\": \"created_date\",\n \"order_direction\": \"desc\",\n \"asset_contract_address\": contract_address,\n \"token_ids\": [token_id],\n }\n\n headers = {\"Accept\": \"application/json\", \"X-API-KEY\": OPENSEA_API_KEY}\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response_json = response.json()\n return response_json\n\n\ndef plot_all_bids(bid_db):\n series = []\n max_listings = bid_db[\"token_ids\"].value_counts().max()\n\n for i in range(1, max_listings + 1):\n n_bids = bid_db.groupby(\"token_ids\").filter(lambda x: len(x) == i)\n series.append(n_bids)\n\n colors = iter(cm.rainbow(np.linspace(0, 1, len(series))))\n for i in range(0, len(series)):\n plt.scatter(\n series[i][\"ranks\"], series[i][\"bid\"], color=next(colors), label=i + 1\n )\n plt.xlabel(\"rarity rank\")\n plt.ylabel(\"price (ETHER)\")\n plt.legend(loc=\"best\")\n\n plt.show()\n\n\ndef get_all_bids(rarity_db):\n token_ids = []\n ranks = []\n bids = []\n numbersToCheck = []\n\n for x in rarity_db[\"TOKEN_ID\"]:\n numbersToCheck.append(x)\n\n if len(numbersToCheck) == 15: # send 15 NFTs at a time to API\n orders = getOpenseaOrders(numbersToCheck, CONTRACT_ADDRESS)\n numbersToCheck = []\n\n for order in orders[\"orders\"]:\n if order[\"side\"] == 0:\n tokenId = int(order[\"asset\"][\"token_id\"])\n token_ids.append(tokenId)\n ranks.append(\n float(rarity_db[rarity_db[\"TOKEN_ID\"] == tokenId][\"Rank\"])\n )\n bids.append(float(order[\"base_price\"]) / ETHER_UNITS)\n\n bid_db = pd.DataFrame(columns=[\"token_ids\", \"ranks\", \"bid\"])\n bid_db[\"token_ids\"] = token_ids\n bid_db[\"ranks\"] = ranks\n bid_db[\"bid\"] = bids\n return bid_db\n\n\nbid_db = get_all_bids(RARITY_DB.head(NUMBERS_TO_CHECK))\nbid_db = bid_db.sort_values(by=[\"ranks\"])\n\nprint(bid_db.set_index(\"token_ids\").head(50))\nplot_all_bids(bid_db)", " ranks bid\ntoken_ids \n1922 1.0 0.70000\n1922 1.0 0.72000\n1922 1.0 0.72000\n1922 1.0 15.00000\n1922 1.0 20.00000\n1922 1.0 0.80190\n6273 2.0 0.72000\n6273 2.0 0.76690\n6273 2.0 0.00001\n6273 2.0 15.00000\n6242 3.0 15.00000\n6242 3.0 18.00000\n2370 4.0 0.78300\n2370 4.0 0.80000\n2370 4.0 0.72000\n2370 4.0 2.00000\n7796 5.0 0.80000\n7796 5.0 0.78300\n7796 5.0 0.72000\n7796 5.0 4.00000\n7796 5.0 0.71690\n8075 6.0 0.56030\n8075 6.0 0.80700\n8075 6.0 0.80000\n8075 6.0 0.71690\n1784 7.0 0.84700\n1784 7.0 0.75690\n2423 8.0 0.72000\n2423 8.0 0.73190\n4370 9.0 3.00000\n4370 9.0 0.84700\n4370 9.0 0.72000\n4370 9.0 0.77190\n3139 10.0 0.71820\n3139 10.0 0.71820\n3139 10.0 2.00000\n3139 10.0 0.76190\n7823 11.0 3.50000\n7823 11.0 0.78190\n1559 12.0 0.80000\n1559 12.0 2.00000\n1559 12.0 0.81190\n1559 12.0 0.84700\n7075 14.0 1.50000\n7075 14.0 0.80690\n7075 14.0 0.78300\n1146 15.0 0.80000\n1146 15.0 0.81190\n1146 15.0 0.72000\n3060 16.0 0.56030\n" ], [ "\"\"\"\nGet open offers from OpenSea and plot.\n\"\"\"\n\n\ndef getOpenseaOrders(token_id, contract_address):\n # gets orders, both bids and asks\n # divide token_list into limit sized chunks and get output\n url = \"https://api.opensea.io/wyvern/v1/orders\"\n querystring = {\n \"bundled\": \"false\",\n \"include_bundled\": \"false\",\n \"is_english\": \"false\",\n \"include_invalid\": \"false\",\n \"limit\": \"50\",\n \"offset\": \"0\",\n \"order_by\": \"created_date\",\n \"order_direction\": \"desc\",\n \"asset_contract_address\": contract_address,\n \"token_ids\": [token_id],\n }\n\n headers = {\"Accept\": \"application/json\", \"X-API-KEY\": OPENSEA_API_KEY}\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n responseJson = response.json()\n return responseJson\n\n\ndef display_orders(rarity_db):\n print(\"RANK TOKEN_ID PRICE URL\")\n numbersToCheck = []\n for x in rarity_db[\"TOKEN_ID\"]:\n numbersToCheck.append(x)\n if len(numbersToCheck) == 15:\n orders = getOpenseaOrders(numbersToCheck, CONTRACT_ADDRESS)\n numbersToCheck = []\n time.sleep(2)\n for order in orders[\"orders\"]:\n if order[\"side\"] == 1:\n tokenId = int(order[\"asset\"][\"token_id\"])\n price = float(order[\"current_price\"]) / 1e18\n if price <= 20:\n current_order = dict()\n current_order[\"RANK\"] = str(\n int(rarity_db[rarity_db[\"TOKEN_ID\"] == tokenId][\"Rank\"])\n )\n current_order[\"TOKEN_ID\"] = str(tokenId)\n current_order[\"PRICE\"] = str(price)\n current_order[\n \"URL\"\n ] = f\"https://opensea.io/assets/{CONTRACT_ADDRESS}/{tokenId}\"\n str_to_print = \"\"\n for x in [\"RANK\", \"TOKEN_ID\", \"PRICE\"]:\n str_to_print += f\"{current_order[x]}\"\n str_to_print += \" \" * (len(x) + 1 - len(current_order[x]))\n str_to_print += current_order[\"URL\"]\n print(str_to_print)\n\n\ndisplay_orders(RARITY_DB.head(NUMBERS_TO_CHECK))", "RANK TOKEN_ID PRICE URL\n" ], [ "import numpy as np\n\nA = -0.9\nK = 1\nB = 5\nv = 1\nQ = 1.1\nC = 1\n\nRARITY_DB[\"VALUE\"] = A + (\n (K - A) / np.power((C + Q * np.exp(-B * (1 / RARITY_DB[\"Rank\"]))), 1 / v)\n)\nRARITY_DB[\"VALUE\"] = np.where(RARITY_DB[\"Rank\"] > 96 * 2, 0, RARITY_DB[\"VALUE\"])\nRARITY_DB[[\"Rank\", \"VALUE\"]].sort_values(\"Rank\").plot(\n x=\"Rank\", y=\"VALUE\", figsize=(14, 7), logx=True, grid=True\n)\nplt.show()", "_____no_output_____" ], [ "RARITY_DB = RARITY_DB.sort_values(\"TOKEN_ID\")\nRARITY_DB.plot(x=\"TOKEN_ID\", y=\"VALUE\", grid=True, figsize=(14, 7))", "_____no_output_____" ], [ "RARITY_DB = RARITY_DB.sort_values(\"TOKEN_ID\")\nRARITY_DB[\"EXPANDING_VALUE\"] = RARITY_DB[\"VALUE\"].expanding().sum()\nRARITY_DB.plot(x=\"TOKEN_ID\", y=\"EXPANDING_VALUE\", grid=True, figsize=(14, 7))", "_____no_output_____" ], [ "pd.set_option(\"display.max_rows\", 100)\nRARITY_DB.sort_values(\"Rank\").head(96)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06c8144487f736ba12f598368af066f4db2a122
205,887
ipynb
Jupyter Notebook
notebooks/homecdt_model/.ipynb_checkpoints/ss_fteng_model_fromBDSE12_03G_HomeCredit_V2_20200204b-checkpoint.ipynb
ss9202150/Project_1
349dbf8cd42b074c2a897e84ed360f061f07dc0b
[ "MIT" ]
null
null
null
notebooks/homecdt_model/.ipynb_checkpoints/ss_fteng_model_fromBDSE12_03G_HomeCredit_V2_20200204b-checkpoint.ipynb
ss9202150/Project_1
349dbf8cd42b074c2a897e84ed360f061f07dc0b
[ "MIT" ]
null
null
null
notebooks/homecdt_model/.ipynb_checkpoints/ss_fteng_model_fromBDSE12_03G_HomeCredit_V2_20200204b-checkpoint.ipynb
ss9202150/Project_1
349dbf8cd42b074c2a897e84ed360f061f07dc0b
[ "MIT" ]
null
null
null
70.125
121,576
0.799371
[ [ [ "<p> Notice: This notebook is not optimized for memory nor performance yet. Please use it with caution when handling large datasets.\n", "_____no_output_____" ], [ "### Notice: Please ignore Feature engineering part if you are using a ready dataset", "_____no_output_____" ], [ "# Feature engineering", "_____no_output_____" ], [ "This notebook is for BDSE12_03G_HomeCredit_V2.csv processing for bear LGBM final", "_____no_output_____" ], [ "### Prepare work environment", "_____no_output_____" ] ], [ [ "# Pandas for managing datasets\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "np.__version__, pd.__version__", "_____no_output_____" ], [ "# math for operating numbers\nimport math", "_____no_output_____" ], [ "import gc", "_____no_output_____" ], [ "# Change pd displayg format for float\npd.options.display.float_format = '{:,.4f}'.format", "_____no_output_____" ], [ "# Matplotlib for additional customization\nfrom matplotlib import pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "# Seaborn for plotting and styling\nimport seaborn as sns\n#Seaborn set() to set aesthetic parameters in one step.\nsns.set() ", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### Read & combine datasets", "_____no_output_____" ] ], [ [ "appl_all_df = pd.read_csv('../..//datasets/homecdt_fteng/BDSE12_03G_HomeCredit_V2.csv',index_col=0)", "_____no_output_____" ], [ "appl_all_df.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 356255 entries, 0 to 356254\nColumns: 797 entries, AMT_ANNUITY to GOODS_PRICE_PREV%\ndtypes: float64(741), int64(42), object(14)\nmemory usage: 2.1+ GB\n" ] ], [ [ "---", "_____no_output_____" ] ], [ [ "# appl_all_df.apply(lambda x:x.unique().size).describe()", "_____no_output_____" ], [ "appl_all_df['TARGET'].unique(), \\\nappl_all_df['TARGET'].unique().size", "_____no_output_____" ], [ "appl_all_df['TARGET'].value_counts()", "_____no_output_____" ], [ "appl_all_df['TARGET'].isnull().sum(), \\\nappl_all_df['TARGET'].size, \\\n(appl_all_df['TARGET'].isnull().sum()/appl_all_df['TARGET'].size).round(4)", "_____no_output_____" ], [ "# Make sure we can use the nullness of 'TARGET' column to separate train & test\n# assert appl_all_df['TARGET'].isnull().sum() == appl_test_df.shape[0]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "## Randomized sampleing:", "_____no_output_____" ], [ "#### If the dataset is too large, consider following randomized sampling from original dataset to facilitate development and testing", "_____no_output_____" ] ], [ [ "# Randomized sampling from original dataset.\n# This is just for simplifying the development process\n# After coding is complete, should replace all df-->df, and remove this cell\n# Reference: https://yiidtw.github.io/blog/2018-05-29-how-to-shuffle-dataframe-in-pandas/\n\n# df= appl_all_df.sample(n = 1000).reset_index(drop=True)\n# df.shape", "_____no_output_____" ], [ "# df.head()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "## Tool: Get numerical/ categorical variables(columns) from a dataframe", "_____no_output_____" ] ], [ [ "def get_num_df (data_df, unique_value_threshold: int):\n \"\"\" \n Output: a new dataframe with columns of numerical variables from the input dataframe.\n Input: \n data_df: original dataframe, \n unique_value_threshold(int): number of unique values of each column\n e.g. If we define a column with > 3 unique values as being numerical variable, unique_value_threshold = 3\n \"\"\"\n num_mask = data_df.apply(lambda x:x.unique().size > unique_value_threshold,axis=0) \n num_df = data_df[data_df.columns[num_mask]]\n return num_df\n\ndef get_cat_df (data_df, unique_value_threshold: int):\n \"\"\" \n Output: a new dataframe with columns of categorical variables from the input dataframe.\n Input: \n data_df: original dataframe, \n unique_value_threshold(int): number of unique values of each column\n e.g. If we define a column with =<3 unique values as being numerical variable, unique_value_threshold = 3\n \"\"\"\n cat_mask = data_df.apply(lambda x:x.unique().size <= unique_value_threshold,axis=0) \n cat_df = data_df[data_df.columns[cat_mask]]\n return cat_df\n", "_____no_output_____" ], [ "# Be careful when doing this assertion with large datasets\n# assert get_cat_df(appl_all_df, 3).columns.size + get_num_df(appl_all_df, 3).columns.size == appl_all_df.columns.size", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### Splitting id_target_df, cat_df, num_df", "_____no_output_____" ] ], [ [ "# Separate id and target columns before any further processing\nid_target_df = appl_all_df.loc[:, ['SK_ID_CURR','TARGET']]\n\n# Get the operating appl_all_df by removing id and target columns\nappl_all_df_opr = appl_all_df.drop(['SK_ID_CURR','TARGET'], axis=1)\n\n# A quick check of their shapes\nappl_all_df.shape, id_target_df.shape, appl_all_df_opr.shape", "_____no_output_____" ], [ "# Spliting the numerical and categorical variable containing columns via the tools decribed above.\n# Max identified unique value of categorical column 'ORGANIZATION_TYPE' = 58\ncat_df = get_cat_df (appl_all_df_opr, 58)\nnum_df = get_num_df (appl_all_df_opr, 58)", "_____no_output_____" ], [ "cat_df.info()\nnum_df.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 356255 entries, 0 to 356254\nColumns: 250 entries, AMT_REQ_CREDIT_BUREAU_DAY to AMT_REQ_CREDIT_BUREAU_MON/QRT\ndtypes: float64(198), int64(38), object(14)\nmemory usage: 682.2+ MB\n<class 'pandas.core.frame.DataFrame'>\nInt64Index: 356255 entries, 0 to 356254\nColumns: 545 entries, AMT_ANNUITY to GOODS_PRICE_PREV%\ndtypes: float64(542), int64(3)\nmemory usage: 1.4 GB\n" ], [ "# A quick check of their shapes\nappl_all_df_opr.shape, cat_df.shape, num_df.shape", "_____no_output_____" ], [ "assert cat_df.shape[1] + num_df.shape[1] + id_target_df.shape[1] \\\n == appl_all_df_opr.shape[1] + id_target_df.shape[1] \\\n == appl_all_df.shape[1]\n\nassert cat_df.shape[0] == num_df.shape[0] == id_target_df.shape[0] \\\n == appl_all_df_opr.shape[0] \\\n == appl_all_df.shape[0]", "_____no_output_____" ], [ "# Apply the following gc if memory is running slow\nappl_all_df_opr.info()\nappl_all_df.info()\ndel appl_all_df_opr\ndel appl_all_df\ngc.collect()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 356255 entries, 0 to 356254\nColumns: 795 entries, AMT_ANNUITY to GOODS_PRICE_PREV%\ndtypes: float64(740), int64(41), object(14)\nmemory usage: 2.1+ GB\n<class 'pandas.core.frame.DataFrame'>\nInt64Index: 356255 entries, 0 to 356254\nColumns: 797 entries, AMT_ANNUITY to GOODS_PRICE_PREV%\ndtypes: float64(741), int64(42), object(14)\nmemory usage: 2.1+ GB\n" ] ], [ [ "---", "_____no_output_____" ], [ "## Dealing with categorical variables", "_____no_output_____" ], [ "#### Transform to String (i.e., python object) and fill nan with String 'nan'", "_____no_output_____" ] ], [ [ "cat_df_obj = cat_df.astype(str)", "_____no_output_____" ], [ "assert np.all(cat_df_obj.dtypes) == object\n\n# There are no NA left\nassert all(cat_df_obj.isnull().sum())==0", "_____no_output_____" ], [ "# The float nan will be tranformed to String 'nan'\n# Use this assertion carefully when dealing with extra-large datasets\nassert cat_df.isnull().equals(cat_df_obj.isin({'nan'}))", "_____no_output_____" ] ], [ [ "#### Dealing with special columns", "_____no_output_____" ], [ "Replace 'nan' with 'not specified' in column 'FONDKAPREMONT_MODE'", "_____no_output_____" ] ], [ [ "# Do the replacement and re-assign the modified column back to the original dataframe\ncat_df_obj['FONDKAPREMONT_MODE'] = cat_df_obj['FONDKAPREMONT_MODE'].replace('nan','not specified')", "_____no_output_____" ], [ "# check again the unique value, it should be 1 less than the original cat_df\nassert cat_df['FONDKAPREMONT_MODE'].unique().size == cat_df_obj['FONDKAPREMONT_MODE'].unique().size +1", "_____no_output_____" ], [ "# Apply the following gc if memory is running slow\ncat_df.info()\ndel cat_df\ngc.collect()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 356255 entries, 0 to 356254\nColumns: 250 entries, AMT_REQ_CREDIT_BUREAU_DAY to AMT_REQ_CREDIT_BUREAU_MON/QRT\ndtypes: float64(198), int64(38), object(14)\nmemory usage: 682.2+ MB\n" ] ], [ [ "#### Do one-hot encoding", "_____no_output_____" ], [ "Check the input dataframe (i.e., cat_df_obj)", "_____no_output_____" ] ], [ [ "cat_df_obj.shape", "_____no_output_____" ], [ "cat_df_obj.apply(lambda x:x.unique().size).sum()", "_____no_output_____" ], [ "# ?pd.get_dummies", "_____no_output_____" ], [ "# pd.get_dummies() method deals only with categorical variables.\n# Although it has a built-in argument 'dummy_na' to manage the na value, \n# our na value has already been converted to string object which are not recognized by the method.\n# Let's just move forward as planned\ncat_df_obj_ohe = pd.get_dummies(cat_df_obj, drop_first=True)\ncat_df_obj_ohe.shape", "_____no_output_____" ], [ "# Make sure the ohe is successful\nassert np.all(np.isin(cat_df_obj_ohe.values,[0,1])) == True\n# cat_df_obj_ohe.dtypes\nassert np.all(cat_df_obj_ohe.dtypes) == 'uint8'\n# make sure the column counts are correct\nassert cat_df_obj.apply(lambda x:x.unique().size).sum() == cat_df_obj_ohe.shape[1] + cat_df_obj.shape[1]", "_____no_output_____" ], [ "cat_df_obj_ohe.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 356255 entries, 0 to 356254\nColumns: 3006 entries, AMT_REQ_CREDIT_BUREAU_DAY_1.0 to AMT_REQ_CREDIT_BUREAU_MON/QRT_nan\ndtypes: uint8(3006)\nmemory usage: 1.0 GB\n" ], [ "# Apply the following gc if memory is running slow\ndel cat_df_obj\ngc.collect()", "_____no_output_____" ], [ "# %timeit np.isin(cat_df_obj_ohe.values,[0,1])\n# # 1.86 s ± 133 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n\n# %timeit cat_df_obj_ohe.isin([0 , 1])\n# # 3.38 s ± 32.5 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)", "_____no_output_____" ], [ "# %timeit np.all(np.isin(cat_df_obj_ohe.values,[0,1]))\n# # 1.85 s ± 28 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n\n# %timeit np.all(cat_df_obj_ohe.isin([0 , 1]))\n# # 3.47 s ± 193 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "## Dealing with numerial variables", "_____no_output_____" ], [ "#### Get na flags", "_____no_output_____" ] ], [ [ "num_df.shape", "_____no_output_____" ], [ "# How many columns contain na value.\nnum_df.isna().any().sum()", "_____no_output_____" ], [ "num_isna_df = num_df[num_df.columns[num_df.isna().any()]]\nnum_notna_df = num_df[num_df.columns[num_df.notna().all()]]\n\nassert num_isna_df.shape[1] + num_notna_df.shape[1] == num_df.shape[1]\nassert num_isna_df.shape[0] == num_notna_df.shape[0] == num_df.shape[0]", "_____no_output_____" ], [ "num_isna_df.shape, num_notna_df.shape", "_____no_output_____" ], [ "# num_df.isna().any(): column names for those na containing columns\n# use it to transform values bool to int, and then add suffix on the column names to get the na-flag df\nnum_naFlag_df = num_isna_df.isna().astype(np.uint8).add_suffix('_na')\nnum_naFlag_df.info() ", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 356255 entries, 0 to 356254\nColumns: 528 entries, APARTMENTS_AVG_na to GOODS_PRICE_PREV%_na\ndtypes: uint8(528)\nmemory usage: 182.1 MB\n" ] ], [ [ "#### replace na with zero", "_____no_output_____" ] ], [ [ "num_isna_df = num_isna_df.fillna(0)\nnum_isna_df.shape", "_____no_output_____" ], [ "# How many columns contain na value.\nnum_isna_df.isna().any().sum()", "_____no_output_____" ], [ "num_isna_df.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 356255 entries, 0 to 356254\nColumns: 528 entries, APARTMENTS_AVG to GOODS_PRICE_PREV%\ndtypes: float64(528)\nmemory usage: 1.4 GB\n" ], [ "assert num_isna_df.shape == num_naFlag_df.shape", "_____no_output_____" ], [ "num_df = pd.concat([num_notna_df,num_isna_df,num_naFlag_df], axis = 'columns')", "_____no_output_____" ], [ "assert num_notna_df.shape[1] + num_isna_df.shape[1] + num_naFlag_df.shape[1] == num_df.shape[1]", "_____no_output_____" ], [ "num_df.info(verbose=False)", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 356255 entries, 0 to 356254\nColumns: 1073 entries, AMT_ANNUITY to GOODS_PRICE_PREV%_na\ndtypes: float64(542), int64(3), uint8(528)\nmemory usage: 1.6 GB\n" ], [ "# Apply the following gc if memory is running slow\ndel num_notna_df\ndel num_isna_df\ndel num_naFlag_df\ngc.collect()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### Normalization (DO LATER!!)", "_____no_output_____" ], [ "##### Generally, in tree-based models, the scale of the features does not matter.\nhttps://scikit-learn.org/stable/modules/preprocessing.html#normalization\nhttps://datascience.stackexchange.com/questions/22036/how-does-lightgbm-deal-with-value-scale", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "## Combine to a complete, processed dataset", "_____no_output_____" ] ], [ [ "frames = np.array([id_target_df, cat_df_obj_ohe, num_df])", "_____no_output_____" ], [ "id_target_df.shape, cat_df_obj_ohe.shape, num_df.shape", "_____no_output_____" ], [ "appl_all_processed_df = pd.concat(frames, axis ='columns')\nappl_all_processed_df.shape", "_____no_output_____" ], [ "assert appl_all_processed_df.shape[1] == id_target_df.shape[1] + cat_df_obj_ohe.shape[1] + num_df.shape[1]", "_____no_output_____" ], [ "appl_all_processed_df.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 356255 entries, 0 to 356254\nColumns: 4081 entries, SK_ID_CURR to GOODS_PRICE_PREV%_na\ndtypes: float64(543), int64(4), uint8(3534)\nmemory usage: 2.6 GB\n" ], [ "# Apply the following gc if memory is running slow\ndel id_target_df\ndel cat_df_obj_ohe\ndel num_df\ngc.collect()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "## Export to CSV", "_____no_output_____" ] ], [ [ "# Export the dataframe to csv for future use\nappl_all_processed_df.to_csv('../../datasets/homecdt_fteng/ss_fteng_fromBDSE12_03G_HomeCredit_V2_20200204a.csv', index = False)", "_____no_output_____" ], [ "# Export the dtypes Series to csv for future use\nappl_all_processed_df.dtypes.to_csv('../../datasets/homecdt_fteng/ss_fteng_fromBDSE12_03G_HomeCredit_V2_20200204a_dtypes_series.csv')", "C:\\Users\\Student\\.conda\\envs\\homecdt\\lib\\site-packages\\ipykernel_launcher.py:2: FutureWarning: The signature of `Series.to_csv` was aligned to that of `DataFrame.to_csv`, and argument 'header' will change its default value from False to True: please pass an explicit value to suppress this warning.\n \n" ] ], [ [ "---", "_____no_output_____" ], [ "## Interface connecting fteng & model parts", "_____no_output_____" ] ], [ [ "# Assign appl_all_processed_df to final_df for follow-up modeling\nfinal_df = appl_all_processed_df\n\n# Apply the following gc if memory is running slow\ndel appl_all_processed_df\ngc.collect()", "_____no_output_____" ], [ "final_df.columns = [\"\".join (c if c.isalnum() else \"_\" for c in str(x)) for x in final_df.columns]", "_____no_output_____" ], [ "final_df.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 356255 entries, 0 to 356254\nColumns: 4081 entries, SK_ID_CURR to GOODS_PRICE_PREV__na\ndtypes: float64(543), int64(4), uint8(3534)\nmemory usage: 2.6 GB\n" ] ], [ [ "---", "_____no_output_____" ], [ "## Modeling part. If using a ready dataset, please start here", "_____no_output_____" ] ], [ [ "# Reading the saved dtypes Series\nfinal_df_dtypes = \\\npd.read_csv('../../datasets/homecdt_fteng/ss_fteng_fromBDSE12_03G_HomeCredit_V2_20200204a_dtypes_series.csv'\\\n , header=None, index_col=0, squeeze=True)\ndel final_df_dtypes.index.name\nfinal_df_dtypes = final_df_dtypes.to_dict()", "_____no_output_____" ], [ "final_df = \\\npd.read_csv('../../datasets/homecdt_fteng/ss_fteng_fromBDSE12_03G_HomeCredit_V2_20200204a.csv'\\\n , dtype= final_df_dtypes)", "_____no_output_____" ], [ "final_df.columns = [\"\".join (c if c.isalnum() else \"_\" for c in str(x)) for x in final_df.columns]\nfinal_df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 356255 entries, 0 to 356254\nColumns: 4081 entries, SK_ID_CURR to GOODS_PRICE_PREV__na\ndtypes: float64(543), int64(4), uint8(3534)\nmemory usage: 2.6 GB\n" ] ], [ [ "This following is based on 'bear_Final_model' released 2020/01/23", "_____no_output_____" ] ], [ [ "# Forked from excellent kernel : https://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features\n# From Kaggler : https://www.kaggle.com/jsaguiar\n# Just added a few features so I thought I had to make release it as well...\n\nimport numpy as np\nimport pandas as pd\nimport gc\nimport time\nfrom contextlib import contextmanager\nimport lightgbm as lgb\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom sklearn.model_selection import KFold, StratifiedKFold\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport csv", "_____no_output_____" ], [ "lgb.__version__", "_____no_output_____" ], [ "print(final_df['TARGET'].isna().sum(), \n final_df['TARGET'].dtypes)", "48744 float64\n" ] ], [ [ "# LightGBM 模型", "_____no_output_____" ] ], [ [ "def timer(title):\n t0 = time.time()\n yield\n print(\"{} - done in {:.0f}s\".format(title, time.time() - t0))\n\ndef kfold_lightgbm(df, num_folds = 5, stratified = True, debug= False, boosting_type= 'goss', epoch=20000, early_stop=200):\n # Divide in training/validation and test data\n train_df = df[df['TARGET'].notnull()]\n test_df = df[df['TARGET'].isnull()]\n print(\"Starting LightGBM goss. Train shape: {}, test shape: {}\".format(train_df.shape, test_df.shape))\n del df\n gc.collect()\n # Cross validation model\n if stratified:\n folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=924)\n else:\n folds = KFold(n_splits= num_folds, shuffle=True, random_state=924)\n # Create arrays and dataframes to store results\n oof_preds = np.zeros(train_df.shape[0])\n sub_preds = np.zeros(test_df.shape[0])\n feature_importance_df = pd.DataFrame()\n feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]\n \n for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df[feats], train_df['TARGET'])):\n dtrain = lgb.Dataset(data=train_df[feats].iloc[train_idx], \n label=train_df['TARGET'].iloc[train_idx], \n free_raw_data=False, silent=True)\n dvalid = lgb.Dataset(data=train_df[feats].iloc[valid_idx], \n label=train_df['TARGET'].iloc[valid_idx], \n free_raw_data=False, silent=True)\n\n # LightGBM parameters found by Bayesian optimization\n \n# {'learning_rate': 0.027277797382058662,\n# 'max_bin': 252.71833139557864,\n# 'max_depth': 19.94051833524931,\n# 'min_child_weight': 20.868586608046186,\n# 'min_data_in_leaf': 68.98157854879867,\n# 'min_split_gain': 0.04938251335634182,\n# 'num_leaves': 23.027556285612434,\n# 'reg_alpha': 0.9107785355990146,\n# 'reg_lambda': 0.15418005208807806,\n# 'subsample': 0.7997032951619153}\n params = {\n 'objective': 'binary',\n 'boosting_type': boosting_type,\n 'nthread': 4,\n 'learning_rate': 0.0272778, # 02,\n 'num_leaves': 23, #20,33\n 'tree_learner': 'voting',\n 'colsample_bytree': 0.9497036,\n 'subsample': 0.8715623,\n 'subsample_freq': 0,\n 'max_depth': 20, #8,7\n 'reg_alpha': 0.9107785,\n 'reg_lambda': 0.1541800,\n 'subsample': 0.7997033,\n 'min_split_gain': 0.0493825,\n 'min_data_in_leaf': 69, # ss add\n 'min_child_weight': 49, # 60,39\n 'seed': 924,\n 'verbose': 2000,\n 'metric': 'auc',\n 'max_bin': 253,\n# 'histogram_pool_size': 20480\n# 'device' : 'gpu',\n# 'gpu_platform_id': 0,\n# 'gpu_device_id':0\n }\n \n clf = lgb.train(\n params=params,\n train_set=dtrain,\n num_boost_round=epoch,\n valid_sets=[dtrain, dvalid],\n early_stopping_rounds=early_stop,\n verbose_eval=2000\n )\n\n oof_preds[valid_idx] = clf.predict(dvalid.data)\n sub_preds += clf.predict(test_df[feats]) / folds.n_splits\n\n fold_importance_df = pd.DataFrame()\n fold_importance_df[\"feature\"] = feats\n fold_importance_df[\"importance\"] = clf.feature_importance(importance_type='gain')\n fold_importance_df[\"fold\"] = n_fold + 1\n feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)\n print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(dvalid.label, oof_preds[valid_idx])))\n del clf, dtrain, dvalid\n gc.collect()\n\n print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))\n # Write submission file and plot feature importance\n if not debug:\n sub_df = test_df[['SK_ID_CURR']].copy()\n sub_df['TARGET'] = sub_preds\n sub_df[['SK_ID_CURR', 'TARGET']].to_csv('homecdt_submission_LGBM.csv', index= False)\n display_importances(feature_importance_df)\n return feature_importance_df\n\n# Display/plot feature importance\ndef display_importances(feature_importance_df_):\n cols = feature_importance_df_[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(by=\"importance\", ascending=False)[:40].index\n best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]\n plt.figure(figsize=(8, 10))\n sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False))\n plt.title('LightGBM Features (avg over folds)')\n plt.tight_layout\n plt.savefig('lgbm_importances01.png')", "_____no_output_____" ] ], [ [ "## boosting_type:goss", "_____no_output_____" ] ], [ [ "init_time = time.time()\nkfold_lightgbm(final_df,10)\nprint(\"Elapsed time={:5.2f} sec.\".format(time.time() - init_time))", "Starting LightGBM goss. Train shape: (307511, 4081), test shape: (48744, 4081)\nTraining until validation scores don't improve for 200 rounds\nEarly stopping, best iteration is:\n[1416]\ttraining's auc: 0.874482\tvalid_1's auc: 0.794295\nFold 1 AUC : 0.794295\nTraining until validation scores don't improve for 200 rounds\nEarly stopping, best iteration is:\n[1637]\ttraining's auc: 0.883241\tvalid_1's auc: 0.794828\nFold 2 AUC : 0.794828\nTraining until validation scores don't improve for 200 rounds\nEarly stopping, best iteration is:\n[1657]\ttraining's auc: 0.883833\tvalid_1's auc: 0.798219\nFold 3 AUC : 0.798219\nTraining until validation scores don't improve for 200 rounds\nEarly stopping, best iteration is:\n[1423]\ttraining's auc: 0.874795\tvalid_1's auc: 0.798396\nFold 4 AUC : 0.798396\nTraining until validation scores don't improve for 200 rounds\nEarly stopping, best iteration is:\n[1336]\ttraining's auc: 0.87082\tvalid_1's auc: 0.793811\nFold 5 AUC : 0.793811\nTraining until validation scores don't improve for 200 rounds\nEarly stopping, best iteration is:\n[1067]\ttraining's auc: 0.858991\tvalid_1's auc: 0.789556\nFold 6 AUC : 0.789556\nTraining until validation scores don't improve for 200 rounds\n[2000]\ttraining's auc: 0.896734\tvalid_1's auc: 0.786445\nEarly stopping, best iteration is:\n[1845]\ttraining's auc: 0.891113\tvalid_1's auc: 0.78669\nFold 7 AUC : 0.786690\nTraining until validation scores don't improve for 200 rounds\nEarly stopping, best iteration is:\n[1323]\ttraining's auc: 0.869873\tvalid_1's auc: 0.798273\nFold 8 AUC : 0.798273\nTraining until validation scores don't improve for 200 rounds\nEarly stopping, best iteration is:\n[1312]\ttraining's auc: 0.869604\tvalid_1's auc: 0.792665\nFold 9 AUC : 0.792665\nTraining until validation scores don't improve for 200 rounds\nEarly stopping, best iteration is:\n[1489]\ttraining's auc: 0.877312\tvalid_1's auc: 0.792178\nFold 10 AUC : 0.792178\nFull AUC score 0.793865\nElapsed time=2811.18 sec.\n" ], [ "init_time = time.time()\nkfold_lightgbm(final_df,10)\nprint(\"Elapsed time={:5.2f} sec.\".format(time.time() - init_time))", "Starting LightGBM goss. Train shape: (307511, 4081), test shape: (48744, 4081)\nTraining until validation scores don't improve for 200 rounds\nEarly stopping, best iteration is:\n[1773]\ttraining's auc: 0.860105\tvalid_1's auc: 0.793118\nFold 1 AUC : 0.793118\nTraining until validation scores don't improve for 200 rounds\n[2000]\ttraining's auc: 0.866676\tvalid_1's auc: 0.795427\nEarly stopping, best iteration is:\n[2229]\ttraining's auc: 0.872846\tvalid_1's auc: 0.795636\nFold 2 AUC : 0.795636\nTraining until validation scores don't improve for 200 rounds\n[2000]\ttraining's auc: 0.866057\tvalid_1's auc: 0.796907\nEarly stopping, best iteration is:\n[2010]\ttraining's auc: 0.866315\tvalid_1's auc: 0.796969\nFold 3 AUC : 0.796969\nTraining until validation scores don't improve for 200 rounds\nEarly stopping, best iteration is:\n[1754]\ttraining's auc: 0.859484\tvalid_1's auc: 0.798664\nFold 4 AUC : 0.798664\nTraining until validation scores don't improve for 200 rounds\n[2000]\ttraining's auc: 0.866282\tvalid_1's auc: 0.794075\nEarly stopping, best iteration is:\n[2680]\ttraining's auc: 0.88354\tvalid_1's auc: 0.794693\nFold 5 AUC : 0.794693\n" ] ], [ [ "## boosting_type:gbdt", "_____no_output_____" ] ], [ [ "# init_time = time.time()\n# kfold_lightgbm(final_df, 10, boosting_type= 'gbdt')\n# print(\"Elapsed time={:5.2f} sec.\".format(time.time() - init_time))", "_____no_output_____" ] ], [ [ "## boosting_type:dart", "_____no_output_____" ] ], [ [ "# init_time = time.time()\n# kfold_lightgbm(final_df,10, boosting_type= 'dart')\n# print(\"Elapsed time={:5.2f} sec.\".format(time.time() - init_time))", "_____no_output_____" ] ], [ [ "## boosting_type:rf", "_____no_output_____" ] ], [ [ "# init_time = time.time()\n# kfold_lightgbm(final_df,10,boosting_type= 'rf')\n# print(\"Elapsed time={:5.2f} sec.\".format(time.time() - init_time))", "_____no_output_____" ] ], [ [ "# XGBoost 模型", "_____no_output_____" ] ], [ [ "from numba import cuda\ncuda.select_device(0)\ncuda.close()", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport gc\nimport time\nfrom contextlib import contextmanager\nfrom xgboost import XGBClassifier\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom sklearn.model_selection import KFold, StratifiedKFold\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport pickle", "_____no_output_____" ], [ "def kfold_xgb(df, num_folds, stratified = True, debug= False):\n # Divide in training/validation and test data\n train_df = df[df['TARGET'].notnull()]\n test_df = df[df['TARGET'].isnull()]\n print(\"Starting XGBoost. Train shape: {}, test shape: {}\".format(train_df.shape, test_df.shape))\n del df\n gc.collect()\n # Cross validation model\n if stratified:\n folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1054)\n else:\n folds = KFold(n_splits= num_folds, shuffle=True, random_state=1054)\n # Create arrays and dataframes to store results\n oof_preds = np.zeros(train_df.shape[0])\n sub_preds = np.zeros(test_df.shape[0])\n feature_importance_df = pd.DataFrame()\n feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]\n \n for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df[feats], train_df['TARGET'])):\n #if n_fold == 0: # REmove for full K-fold run\n cuda.select_device(0)\n cuda.close()\n train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]\n valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]\n\n clf = XGBClassifier(learning_rate =0.01, \n n_estimators=5000, \n max_depth=4, \n min_child_weight=5,\n# tree_method='gpu_hist',\n subsample=0.8, \n colsample_bytree=0.8, \n objective= 'binary:logistic',\n nthread=4,\n scale_pos_weight=2.5,\n seed=28,\n reg_lambda = 1.2)\n \n# clf = pickle.load(open('test.pickle','rb'))\n \n cuda.select_device(0)\n cuda.close()\n \n clf.fit(train_x, train_y, eval_set=[(train_x, train_y), (valid_x, valid_y)], \n eval_metric= 'auc', verbose= 1000, early_stopping_rounds= 200)\n \n cuda.select_device(0)\n cuda.close()\n \n oof_preds[valid_idx] = clf.predict_proba(valid_x)[:, 1]\n sub_preds += clf.predict_proba(test_df[feats])[:, 1] # / folds.n_splits # - Uncomment for K-fold \n\n fold_importance_df = pd.DataFrame()\n fold_importance_df[\"feature\"] = feats\n fold_importance_df[\"importance\"] = clf.feature_importances_\n fold_importance_df[\"fold\"] = n_fold + 1\n feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)\n print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))\n del clf, train_x, train_y, valid_x, valid_y\n gc.collect()\n\n np.save(\"xgb_oof_preds_1\", oof_preds)\n np.save(\"xgb_sub_preds_1\", sub_preds)\n \n cuda.select_device(0)\n cuda.close()\n \n \n clf = pickle.load(open('test.pickle','rb'))\n # print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))\n # Write submission file and plot feature importance\n if not debug:\n test_df['TARGET'] = sub_preds\n test_df[['SK_ID_CURR', 'TARGET']].to_csv('submission_XGBoost_GPU.csv', index= False)\n #display_importances(feature_importance_df)\n #return feature_importance_df\n\n# Display/plot feature importance\ndef display_importances(feature_importance_df_):\n cols = feature_importance_df_[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(by=\"importance\", ascending=False)[:40].index\n best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]\n plt.figure(figsize=(8, 10))\n sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False))\n plt.title('XGBoost Features (avg over folds)')\n plt.tight_layout()\n plt.savefig('xgb_importances02.png')", "_____no_output_____" ], [ "init_time = time.time()\nkfold_xgb(final_df, 5)\nprint(\"Elapsed time={:5.2f} sec.\".format(time.time() - init_time))", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Below not executed", "_____no_output_____" ], [ "## Balance the 'TARGET' column", "_____no_output_____" ] ], [ [ "appl_all_processed_df['TARGET'].value_counts()", "_____no_output_____" ], [ "balanceFactor = ((appl_all_processed_df['TARGET'].value_counts()[0])/(appl_all_processed_df['TARGET'].value_counts()[1])).round(0).astype(int)\nbalanceFactor\n# appl_all_processed_df['TARGET'].value_counts()[0]\n# appl_all_processed_df['TARGET'].value_counts()[1]", "_____no_output_____" ], [ "default_df = appl_all_processed_df[appl_all_processed_df['TARGET']==1]\ndefault_df.shape", "_____no_output_____" ], [ "default_df_balanced = pd.concat( [default_df] * (balanceFactor - 1), sort=False, ignore_index=True )\ndefault_df_balanced.shape", "_____no_output_____" ], [ "appl_all_processed_df_balanced = pd.concat([appl_all_processed_df , default_df_balanced], sort=False, ignore_index=True)\nappl_all_processed_df_balanced.shape", "_____no_output_____" ], [ "(appl_all_processed_df_balanced['TARGET'].unique(),\n(appl_all_processed_df_balanced['TARGET'].value_counts()[1], \\\nappl_all_processed_df_balanced['TARGET'].value_counts()[0], \\\nappl_all_processed_df_balanced['TARGET'].isnull().sum()))", "_____no_output_____" ], [ "# Apply the following gc if memory is running slow\ndel appl_all_processed_df_balanced\ngc.collect()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "# Todo", "_____no_output_____" ], [ "Todo:\n* cleaning:\n * num_df: normalize with z-score\n* feature engineering:\n * make reciprocol, polynomial columns of the existing columns. 1/x, x^x.\n * multiplying each columns, two columns at a time.\n * asset items, income items, willingness(history + misc profile) items, loading(principle + interest) items\n * Integration from other tables?\n\nhttps://ithelp.ithome.com.tw/articles/10202059\nhttps://stackoverflow.com/questions/26414913/normalize-columns-of-pandas-data-frame\nhttps://www.kaggle.com/parasjindal96/how-to-normalize-dataframe-pandas\n ", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "## EDA", "_____no_output_____" ], [ "### Quick check for numerical columns", "_____no_output_____" ] ], [ [ "numcol = df['CNT_FAM_MEMBERS']", "_____no_output_____" ], [ "numcol.describe(), \\\nnumcol.isnull().sum(), \\\nnumcol.size", "_____no_output_____" ], [ "numcol.value_counts(sort=True), numcol.unique().size", "_____no_output_____" ], [ "# numcol_toYear = pd.to_numeric(\\\n# ((numcol.abs() / 365) \\\n# .round(0)) \\\n# ,downcast='integer')\n# numcol_toYear.describe()", "_____no_output_____" ], [ "# numcol_toYear.value_counts(sort=True), numcol_toYear.unique().size", "_____no_output_____" ] ], [ [ "### Quick check for categorical columns", "_____no_output_____" ] ], [ [ "catcol = df['HOUR_APPR_PROCESS_START']", "_____no_output_____" ], [ "catcol.unique(), \\\ncatcol.unique().size", "_____no_output_____" ], [ "catcol.value_counts(sort=True)", "_____no_output_____" ], [ "catcol.isnull().sum(), \\\ncatcol.size", "_____no_output_____" ], [ "catcol.isnull().sum(), \\\ncatcol.size", "_____no_output_____" ] ], [ [ "## Appendix", "_____no_output_____" ], [ "### Tool: Getting summary dataframe", "_____no_output_____" ] ], [ [ "# might not be very useful at this point\ndef summary_df (data_df):\n \"\"\" \n Output: a new dataframe with summary info from the input dataframe.\n Input: data_df, the original dataframe\n \"\"\"\n summary_df = pd.concat([(data_df.describe(include='all')), \\\n (data_df.dtypes.to_frame(name='dtypes').T), \\\n (data_df.isnull().sum().to_frame(name='isnull').T), \\\n (data_df.apply(lambda x:x.unique().size).to_frame(name='uniqAll').T)])\n return summary_df\n\ndef data_quality_df (data_df):\n \"\"\" \n Output: a new dataframe with summary info from the input dataframe.\n Input: data_df, the original dataframe\n \"\"\"\n data_quality_df = pd.concat([(data_df.describe(include='all')), \\\n (data_df.dtypes.to_frame(name='dtypes').T), \\\n (data_df.isnull().sum().to_frame(name='isnull').T), \\\n (data_df.apply(lambda x:x.unique().size).to_frame(name='uniqAll').T)])\n return data_quality_df.iloc[[11,13,12,0,],:]\n", "_____no_output_____" ], [ "data_quality_df(appl_all_df)", "_____no_output_____" ], [ "# df.to_csv(file_name, encoding='utf-8', index=False)\n# data_quality_df(df).to_csv(\"./eda_output/application_train_data_quality.csv\")", "_____no_output_____" ], [ "df['CNT_CHILDREN'].value_counts()", "_____no_output_____" ], [ "df['CNT_CHILDREN'].value_counts().sum()", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ], [ "summary_df(df)", "_____no_output_____" ], [ "# df.to_csv(file_name, encoding='utf-8', index=False)\n# summary_df(df).to_csv(\"./eda_output/application_train_summary_df.csv\")", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### .nunique() function", "_____no_output_____" ] ], [ [ "# nunique() function excludes NaN \n# i.e. it does not consider NaN as a \"value\", therefore NaN is not counted as a \"unique value\"\ndf.nunique()", "_____no_output_____" ], [ "df.nunique() == df.apply(lambda x:x.unique().shape[0])", "_____no_output_____" ], [ "df['AMT_REQ_CREDIT_BUREAU_YEAR'].unique().shape[0]", "_____no_output_____" ], [ "df['AMT_REQ_CREDIT_BUREAU_YEAR'].nunique()", "_____no_output_____" ], [ "df['AMT_REQ_CREDIT_BUREAU_YEAR'].unique().size", "_____no_output_____" ] ], [ [ "### .value_counts() function", "_____no_output_____" ] ], [ [ "# .value_counts() function has similar viewpoint towards NaN.\n# i.e. it does not consider null as a value, therefore not counted in .value_counts()", "_____no_output_____" ], [ "df['NAME_TYPE_SUITE'].value_counts()", "_____no_output_____" ], [ "df['AMT_REQ_CREDIT_BUREAU_YEAR'].isnull().sum()", "_____no_output_____" ], [ "df['AMT_REQ_CREDIT_BUREAU_YEAR'].size", "_____no_output_____" ], [ "df['AMT_REQ_CREDIT_BUREAU_YEAR'].value_counts().sum() + df['AMT_REQ_CREDIT_BUREAU_YEAR'].isnull().sum() == \\\ndf['AMT_REQ_CREDIT_BUREAU_YEAR'].size", "_____no_output_____" ] ], [ [ "### 重複值", "_____no_output_____" ] ], [ [ "# Counting unique values (cf. .nunique() function, see above section)\n# This code was retrieved from HT\n\ndf.apply(lambda x:x.unique().shape[0])", "_____no_output_____" ], [ "# It is the same if you write (df.apply(lambda x:x.unique().size))\nassert (df.apply(lambda x:x.unique().shape[0])==df.apply(lambda x:x.unique().size)).all", "_____no_output_____" ], [ "# # %timeit showed the performances are similar\n# %timeit df.apply(lambda x:x.unique().shape[0])\n# %timeit df.apply(lambda x:x.unique().size)", "_____no_output_____" ] ], [ [ "### 空值", "_____no_output_____" ] ], [ [ "# 含空值欄位占比\nprint(f\"{df.isnull().any().sum()} in {df.shape[1]} columns (ratio: {(df.isnull().any().sum()/df.shape[1]).round(2)}) has empty value(s)\")\n", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "## re-casting to reduce memory use (beta)", "_____no_output_____" ] ], [ [ "# np.isfinite(num_df).all().value_counts()", "_____no_output_____" ], [ "# num_df_finite = num_df[num_df.columns[np.isfinite(num_df).all()]]\n# num_df_infinite = num_df[num_df.columns[np.isfinite(num_df).all() == False]]\n# num_df_finite.shape, num_df_infinite.shape", "_____no_output_____" ], [ "# assert num_df_finite.shape[0] == num_df_infinite.shape[0] == num_df.shape[0]\n# assert num_df_finite.shape[1] + num_df_infinite.shape[1] == num_df.shape[1]", "_____no_output_____" ], [ "# def reduce_mem_usage(props, finite:bool = True):\n# props.info(verbose=False)\n# start_mem_usg = props.memory_usage().sum() / 1024**2 \n# print(\"Memory usage of properties dataframe is :\",start_mem_usg,\" MB\")\n# if finite == True: \n# props[props.columns[(props.min()>=0) & (props.max()<255)]] = \\\n# props[props.columns[(props.min()>=0) & (props.max()<255)]].astype(np.uint8, copy=False)\n# props.info(verbose=False)\n\n# props[props.columns[(props.min()>=0) &(props.max() >= 255) & (props.max()<65535)]] = \\\n# props[props.columns[(props.min()>=0) &(props.max() >= 255) & (props.max()<65535)]] \\\n# .astype(np.uint16, copy=False)\n# props.info(verbose=False)\n\n# props[props.columns[(props.min()>=0) &(props.max() >= 65535) & (props.max()<4294967295)]] = \\\n# props[props.columns[(props.min()>=0) &(props.max() >= 65535) & (props.max()<4294967295)]] \\\n# .astype(np.uint32, copy=False)\n# props.info(verbose=False)\n\n# props[props.columns[(props.min()>=0) &(props.max() >= 4294967295)]] = \\\n# props[props.columns[(props.min()>=0) &(props.max() >= 4294967295)]] \\\n# .astype(np.uint64, copy=False)\n# props.info(verbose=False)\n# else:\n# props = props.astype(np.float32, copy=False)\n# props.info(verbose=False)\n \n# print(\"___MEMORY USAGE AFTER COMPLETION:___\")\n# mem_usg = props.memory_usage().sum() / 1024**2 \n# print(\"Memory usage is: \",mem_usg,\" MB\")\n# print(\"This is \",100*mem_usg/start_mem_usg,\"% of the initial size\")\n \n# return props\n\n# if num_na_df_finite.min()>=0:\n# if num_na_df_finite.max() < 255:\n# props[col] = props[col].astype(np.uint8)\n# elif num_na_df_finite.max() < 65535:\n# props[col] = props[col].astype(np.uint16)\n# elif num_na_df_finite.max() < 4294967295:\n# props[col] = props[col].astype(np.uint32)\n# else:\n# props[col] = props[col].astype(np.uint64)", "_____no_output_____" ], [ "# num_df_finite.info()", "_____no_output_____" ], [ "# num_df_finite = reduce_mem_usage(num_df_finite, finite = True)", "_____no_output_____" ], [ "# num_df_infinite.info()", "_____no_output_____" ], [ "# num_df_infinite = reduce_mem_usage(num_df_infinite, finite = False)", "_____no_output_____" ], [ "# num_df = pd.concat([num_df_finite, num_df_infinite], axis ='columns')\n# num_df.info()", "_____no_output_____" ], [ "# assert num_df_finite.shape[0] == num_df_infinite.shape[0] == num_df.shape[0]\n# assert num_df_finite.shape[1] + num_df_infinite.shape[1] == num_df.shape[1]", "_____no_output_____" ], [ "# del num_df_finite\n# del num_df_infinite\n# gc.collect()", "_____no_output_____" ] ], [ [ "THE END", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d06c8412c1ab79511c4163caac9bd85cc75485d2
6,664
ipynb
Jupyter Notebook
docs/example_batch_prediction.ipynb
javak87/phasenet_chile-subduction-zone
a269d22241d38276635c4690ddd036a66b3e45e6
[ "MIT" ]
null
null
null
docs/example_batch_prediction.ipynb
javak87/phasenet_chile-subduction-zone
a269d22241d38276635c4690ddd036a66b3e45e6
[ "MIT" ]
null
null
null
docs/example_batch_prediction.ipynb
javak87/phasenet_chile-subduction-zone
a269d22241d38276635c4690ddd036a66b3e45e6
[ "MIT" ]
null
null
null
34.528497
202
0.516957
[ [ [ "# Batch Prediction\n\n## 1. Download demo data\n\n```\ncd PhaseNet\nwget https://github.com/wayneweiqiang/PhaseNet/releases/download/test_data/test_data.zip\nunzip test_data.zip\n```\n\n## 2. Run batch prediction \n\nPhaseNet currently supports three data formats: numpy, hdf5, and mseed\n\n- For numpy format:\n~~~bash\npython phasenet/predict.py --model=model/190703-214543 --data_list=test_data/npz.csv --data_dir=test_data/npz --format=numpy --plot_figure\n~~~\n\n- For hdf5 format:\n~~~bash\npython phasenet/predict.py --model=model/190703-214543 --hdf5_file=test_data/data.h5 --hdf5_group=data --format=hdf5\n~~~\n\n- For mseed format:\n~~~bash\npython phasenet/predict.py --model=model/190703-214543 --data_list=test_data/mseed.csv --data_dir=test_data/mseed --format=mseed\n~~~\n\n- For sac format:\n~~~bash\npython phasenet/predict.py --model=model/190703-214543 --data_list=test_data/sac.csv --data_dir=test_data/sac --format=sac\n~~~\n\n- For mseed file of an array of stations (used by [QuakeFlow](https://github.com/wayneweiqiang/QuakeFlow)):\n~~~bash\npython phasenet/predict.py --model=model/190703-214543 --data_list=test_data/mseed_array.csv --data_dir=test_data/mseed_array --stations=test_data/stations.csv --format=mseed_array --amplitude\n~~~\n\n\nOptional arguments:\n```\nusage: predict.py [-h] [--batch_size BATCH_SIZE] [--model_dir MODEL_DIR]\n [--data_dir DATA_DIR] [--data_list DATA_LIST]\n [--hdf5_file HDF5_FILE] [--hdf5_group HDF5_GROUP]\n [--result_dir RESULT_DIR] [--result_fname RESULT_FNAME]\n [--min_p_prob MIN_P_PROB] [--min_s_prob MIN_S_PROB]\n [--mpd MPD] [--amplitude] [--format FORMAT]\n [--s3_url S3_URL] [--stations STATIONS] [--plot_figure]\n [--save_prob]\n\noptional arguments:\n -h, --help show this help message and exit\n --batch_size BATCH_SIZE\n batch size\n --model_dir MODEL_DIR\n Checkpoint directory (default: None)\n --data_dir DATA_DIR Input file directory\n --data_list DATA_LIST\n Input csv file\n --hdf5_file HDF5_FILE\n Input hdf5 file\n --hdf5_group HDF5_GROUP\n data group name in hdf5 file\n --result_dir RESULT_DIR\n Output directory\n --result_fname RESULT_FNAME\n Output file\n --min_p_prob MIN_P_PROB\n Probability threshold for P pick\n --min_s_prob MIN_S_PROB\n Probability threshold for S pick\n --mpd MPD Minimum peak distance\n --amplitude if return amplitude value\n --format FORMAT input format\n --s3_url S3_URL s3 url\n --stations STATIONS seismic station info\n --plot_figure If plot figure for test\n --save_prob If save result for test\n```", "_____no_output_____" ], [ "## 3. Read P/S picks\n\nPhaseNet currently outputs two format: **CSV** and **JSON**", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport json\nimport os\nPROJECT_ROOT = os.path.realpath(os.path.join(os.path.abspath(''), \"..\"))", "_____no_output_____" ], [ "picks_csv = pd.read_csv(os.path.join(PROJECT_ROOT, \"results/picks.csv\"), sep=\"\\t\")\npicks_csv.loc[:, 'p_idx'] = picks_csv[\"p_idx\"].apply(lambda x: x.strip(\"[]\").split(\",\"))\npicks_csv.loc[:, 'p_prob'] = picks_csv[\"p_prob\"].apply(lambda x: x.strip(\"[]\").split(\",\"))\npicks_csv.loc[:, 's_idx'] = picks_csv[\"s_idx\"].apply(lambda x: x.strip(\"[]\").split(\",\"))\npicks_csv.loc[:, 's_prob'] = picks_csv[\"s_prob\"].apply(lambda x: x.strip(\"[]\").split(\",\"))\nprint(picks_csv.iloc[1])\nprint(picks_csv.iloc[0])", "fname NC.MCV..EH.0361339.npz\nt0 1970-01-01T00:00:00.000\np_idx [5999, 9015]\np_prob [0.987, 0.981]\ns_idx [6181, 9205]\ns_prob [0.553, 0.873]\nName: 1, dtype: object\nfname NN.LHV..EH.0384064.npz\nt0 1970-01-01T00:00:00.000\np_idx []\np_prob []\ns_idx []\ns_prob []\nName: 0, dtype: object\n" ], [ "with open(os.path.join(PROJECT_ROOT, \"results/picks.json\")) as fp:\n picks_json = json.load(fp) \nprint(picks_json[1])\nprint(picks_json[0])", "{'id': 'NC.MCV..EH.0361339.npz', 'timestamp': '1970-01-01T00:01:30.150', 'prob': 0.9811667799949646, 'type': 'p'}\n{'id': 'NC.MCV..EH.0361339.npz', 'timestamp': '1970-01-01T00:00:59.990', 'prob': 0.9872905611991882, 'type': 'p'}\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
d06c86972a2a0ff816df9cf493216cf9dbc6c57f
16,711
ipynb
Jupyter Notebook
22-Parallel Processing/01-Multithreading and Multiprocessing.ipynb
Pankaj-Ra/Complete-Python3-Bootcamp-master
9e860adff332cead1302a23c18665396fd9b6cb4
[ "MIT" ]
null
null
null
22-Parallel Processing/01-Multithreading and Multiprocessing.ipynb
Pankaj-Ra/Complete-Python3-Bootcamp-master
9e860adff332cead1302a23c18665396fd9b6cb4
[ "MIT" ]
null
null
null
22-Parallel Processing/01-Multithreading and Multiprocessing.ipynb
Pankaj-Ra/Complete-Python3-Bootcamp-master
9e860adff332cead1302a23c18665396fd9b6cb4
[ "MIT" ]
null
null
null
30.218807
494
0.53402
[ [ [ "# Multithreading and Multiprocessing\n\nRecall the phrase \"many hands make light work\". This is as true in programming as anywhere else.\n\nWhat if you could engineer your Python program to do four things at once? What would normally take an hour could (almost) take one fourth the time.<font color=green>\\*</font>\n\nThis is the idea behind parallel processing, or the ability to set up and run multiple tasks concurrently.\n\n\n<br><font color=green>\\* *We say almost, because you do have to take time setting up four processors, and it may take time to pass information between them.*</font>", "_____no_output_____" ], [ "## Threading vs. Processing\n\nA good illustration of threading vs. processing would be to download an image file and turn it into a thumbnail.\n\nThe first part, communicating with an outside source to download a file, involves a thread. Once the file is obtained, the work of converting it involves a process. Essentially, two factors determine how long this will take; the input/output speed of the network communication, or I/O, and the available processor, or CPU.\n\n#### I/O-intensive processes improved with multithreading:\n* webscraping\n* reading and writing to files\n* sharing data between programs\n* network communications\n\n\n#### CPU-intensive processes improved with multiprocessing:\n* computations\n* text formatting\n* image rescaling\n* data analysis", "_____no_output_____" ], [ "## Multithreading Example: Webscraping\n\nHistorically, the programming knowledge required to set up multithreading was beyond the scope of this course, as it involved a good understanding of Python's Global Interpreter Lock (the GIL prevents multiple threads from running the same Python code at once). Also, you had to set up special classes that behave like Producers to divvy up the work, Consumers (aka \"workers\") to perform the work, and a Queue to hold tasks and provide communcations. And that was just the beginning.\n\nFortunately, we've already learned one of the most valuable tools we'll need – the `map()` function. When we apply it using two standard libraries, *multiprocessing* and *multiprocessing.dummy*, setting up parallel processes and threads becomes fairly straightforward.\n", "_____no_output_____" ], [ "Here's a classic multithreading example provided by [IBM](http://www.ibm.com/developerworks/aix/library/au-threadingpython/) and adapted by [Chris Kiehl](http://chriskiehl.com/article/parallelism-in-one-line/) where you divide the task of retrieving web pages across multiple threads:\n\n\n import time \n import threading \n import Queue \n import urllib2 \n\n class Consumer(threading.Thread): \n def __init__(self, queue): \n threading.Thread.__init__(self)\n self._queue = queue \n\n def run(self):\n while True: \n content = self._queue.get() \n if isinstance(content, str) and content == 'quit':\n break\n response = urllib2.urlopen(content)\n print 'Thanks!'\n\n\n def Producer():\n urls = [\n 'http://www.python.org', 'http://www.yahoo.com'\n 'http://www.scala.org', 'http://www.google.com'\n # etc.. \n ]\n queue = Queue.Queue()\n worker_threads = build_worker_pool(queue, 4)\n start_time = time.time()\n\n # Add the urls to process\n for url in urls: \n queue.put(url) \n # Add the poison pill\n for worker in worker_threads:\n queue.put('quit')\n for worker in worker_threads:\n worker.join()\n\n print 'Done! Time taken: {}'.format(time.time() - start_time)\n\n def build_worker_pool(queue, size):\n workers = []\n for _ in range(size):\n worker = Consumer(queue)\n worker.start() \n workers.append(worker)\n return workers\n\n if __name__ == '__main__':\n Producer()", "_____no_output_____" ], [ "Using the multithreading library provided by the *multiprocessing.dummy* module and `map()` all of this becomes:\n\n import urllib2\n from multiprocessing.dummy import Pool as ThreadPool\n \n pool = ThreadPool(4) # choose a number of workers\n \n urls = [\n 'http://www.python.org', 'http://www.yahoo.com'\n 'http://www.scala.org', 'http://www.google.com'\n # etc.. \n ]\n \n results = pool.map(urllib2.urlopen, urls)\n pool.close() \n pool.join()\n \nIn the above code, the *multiprocessing.dummy* module provides the parallel threads, and `map(urllib2.urlopen, urls)` assigns the labor!", "_____no_output_____" ], [ "## Multiprocessing Example: Monte Carlo\n\nLet's code out an example to see how the parts fit together. We can time our results using the *timeit* module to measure any performance gains. Our task is to apply the Monte Carlo Method to estimate the value of Pi.", "_____no_output_____" ], [ "### Monte Carle Method and Estimating Pi\n\nIf you draw a circle of radius 1 (a unit circle) and enclose it in a square, the areas of the two shapes are given as\n\n<table>\n <caption>Area Formulas</caption>\n <tr><td>circle</td><td>$$πr^2$$</td></tr>\n <tr><td>square</td><td>$$4 r^2$$</td></tr>\n</table>\n\n\nTherefore, the ratio of the volume of the circle to the volume of the square is $$\\frac{π}{4}$$\n\nThe Monte Carlo Method plots a series of random points inside the square. By comparing the number that fall within the circle to those that fall outside, with a large enough sample we should have a good approximation of Pi. You can see a good demonstration of this [here](https://academo.org/demos/estimating-pi-monte-carlo/) (Hit the **Animate** button on the page).\n\nFor a given number of points *n*, we have $$π = \\frac{4 \\cdot points\\ inside\\ circle}{total\\ points\\ n}$$\n\nTo set up our multiprocessing program, we first derive a function for finding Pi that we can pass to `map()`:", "_____no_output_____" ] ], [ [ "from random import random # perform this import outside the function\n\ndef find_pi(n):\n \"\"\"\n Function to estimate the value of Pi\n \"\"\"\n inside=0\n\n for i in range(0,n):\n x=random()\n y=random()\n if (x*x+y*y)**(0.5)<=1: # if i falls inside the circle\n inside+=1\n\n pi=4*inside/n\n return pi", "_____no_output_____" ] ], [ [ "Let's test `find_pi` on 5,000 points:", "_____no_output_____" ] ], [ [ "find_pi(5000)", "_____no_output_____" ] ], [ [ "This ran very quickly, but the results are not very accurate!\n\nNext we'll write a script that sets up a pool of workers, and lets us time the results against varying sized pools. We'll set up two arguments to represent *processes* and *total_iterations*. Inside the script, we'll break *total_iterations* down into the number of iterations passed to each process, by making a processes-sized list.<br>For example:\n\n total_iterations = 1000\n processes = 5\n iterations = [total_iterations//processes]*processes\n iterations\n # Output: [200, 200, 200, 200, 200]\n \nThis list will be passed to our `map()` function along with `find_pi()`", "_____no_output_____" ] ], [ [ "%%writefile test.py\nfrom random import random\nfrom multiprocessing import Pool\nimport timeit\n\ndef find_pi(n):\n \"\"\"\n Function to estimate the value of Pi\n \"\"\"\n inside=0\n\n for i in range(0,n):\n x=random()\n y=random()\n if (x*x+y*y)**(0.5)<=1: # if i falls inside the circle\n inside+=1\n\n pi=4*inside/n\n return pi\n\nif __name__ == '__main__':\n N = 10**5 # total iterations\n P = 5 # number of processes\n \n p = Pool(P)\n print(timeit.timeit(lambda: print(f'{sum(p.map(find_pi, [N//P]*P))/P:0.7f}'), number=10))\n p.close()\n p.join()\n print(f'{N} total iterations with {P} processes')", "Writing test.py\n" ], [ "! python test.py", "3.1466800\n3.1364400\n3.1470400\n3.1370400\n3.1256400\n3.1398400\n3.1395200\n3.1363600\n3.1437200\n3.1334400\n0.2370227286270967\n100000 total iterations with 5 processes\n" ] ], [ [ "Great! The above test took under a second on our computer.\n\nNow that we know our script works, let's increase the number of iterations, and compare two different pools. Sit back, this may take awhile!", "_____no_output_____" ] ], [ [ "%%writefile test.py\nfrom random import random\nfrom multiprocessing import Pool\nimport timeit\n\ndef find_pi(n):\n \"\"\"\n Function to estimate the value of Pi\n \"\"\"\n inside=0\n\n for i in range(0,n):\n x=random()\n y=random()\n if (x*x+y*y)**(0.5)<=1: # if i falls inside the circle\n inside+=1\n\n pi=4*inside/n\n return pi\n\nif __name__ == '__main__':\n N = 10**7 # total iterations\n \n P = 1 # number of processes\n p = Pool(P)\n print(timeit.timeit(lambda: print(f'{sum(p.map(find_pi, [N//P]*P))/P:0.7f}'), number=10))\n p.close()\n p.join()\n print(f'{N} total iterations with {P} processes')\n \n P = 5 # number of processes\n p = Pool(P)\n print(timeit.timeit(lambda: print(f'{sum(p.map(find_pi, [N//P]*P))/P:0.7f}'), number=10))\n p.close()\n p.join()\n print(f'{N} total iterations with {P} processes')", "Overwriting test.py\n" ], [ "! python test.py", "3.1420964\n3.1417412\n3.1411108\n3.1408184\n3.1414204\n3.1417656\n3.1408324\n3.1418828\n3.1420492\n3.1412804\n36.03526345242264\n10000000 total iterations with 1 processes\n3.1424524\n3.1418376\n3.1415292\n3.1410344\n3.1422376\n3.1418736\n3.1420540\n3.1411452\n3.1421652\n3.1410672\n17.300921846344366\n10000000 total iterations with 5 processes\n\n" ] ], [ [ "Hopefully you saw that with 5 processes our script ran faster!", "_____no_output_____" ], [ "## More is Better ...to a point.\n\nThe gain in speed as you add more parallel processes tends to flatten out at some point. In any collection of tasks, there are going to be one or two that take longer than average, and no amount of added processing can speed them up. This is best described in [Amdahl's Law](https://en.wikipedia.org/wiki/Amdahl%27s_law).", "_____no_output_____" ], [ "## Advanced Script\n\nIn the example below, we'll add a context manager to shrink these three lines\n\n p = Pool(P)\n ...\n p.close()\n p.join()\n \nto one line:\n\n with Pool(P) as p:\n \nAnd we'll accept command line arguments using the *sys* module.\n ", "_____no_output_____" ] ], [ [ "%%writefile test2.py\nfrom random import random\nfrom multiprocessing import Pool\nimport timeit\nimport sys\n\nN = int(sys.argv[1]) # these arguments are passed in from the command line\nP = int(sys.argv[2])\n\ndef find_pi(n):\n \"\"\"\n Function to estimate the value of Pi\n \"\"\"\n inside=0\n\n for i in range(0,n):\n x=random()\n y=random()\n if (x*x+y*y)**(0.5)<=1: # if i falls inside the circle\n inside+=1\n\n pi=4*inside/n\n return pi\n\nif __name__ == '__main__':\n \n with Pool(P) as p:\n print(timeit.timeit(lambda: print(f'{sum(p.map(find_pi, [N//P]*P))/P:0.5f}'), number=10))\n print(f'{N} total iterations with {P} processes')", "Writing test2.py\n" ], [ "! python test2.py 10000000 500", "3.14121\n3.14145\n3.14178\n3.14194\n3.14109\n3.14201\n3.14243\n3.14150\n3.14203\n3.14116\n16.871822701405073\n10000000 total iterations with 500 processes\n" ] ], [ [ "Great! Now you should have a good understanding of multithreading and multiprocessing!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d06c8c8ae3c359ae7213f522ea3683cfd282a815
54,307
ipynb
Jupyter Notebook
demos/transpile/ubook.ipynb
the-legend-of-lia/pyzx
67929ec9802db97419531ff443ab8246842eea5c
[ "Apache-2.0" ]
null
null
null
demos/transpile/ubook.ipynb
the-legend-of-lia/pyzx
67929ec9802db97419531ff443ab8246842eea5c
[ "Apache-2.0" ]
null
null
null
demos/transpile/ubook.ipynb
the-legend-of-lia/pyzx
67929ec9802db97419531ff443ab8246842eea5c
[ "Apache-2.0" ]
null
null
null
62.710162
21,280
0.67947
[ [ [ "from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, QiskitError\n#from qiskit import execute, BasicAer\nimport qiskit.ignis.verification.randomized_benchmarking as rb\n#import qiskit.test.benchmarks.randomized_benchmarking as br\nimport pyzx\nfrom pyzx.circuit.qasmparser import QASMParser\nfrom pyzx.circuit.qiskitqasmparser import QiskitQASMParser", "_____no_output_____" ], [ "#qc = rb.randomized_benchmarking_seq()\nqc = rb.randomized_benchmarking_seq(nseeds=1, length_vector=None,\n rb_pattern=[[0,1]],\n length_multiplier=1, seed_offset=0,\n align_cliffs=False,\n interleaved_gates=None,\n is_purity=False)\nqc = qc[0][0][0]", "_____no_output_____" ], [ "# setting up the backend\n# print(BasicAer.backends())\n\n# running the job\n# job_sim = execute(qc, BasicAer.get_backend('qasm_simulator'))\n# sim_result = job_sim.result()", "_____no_output_____" ], [ "# print(\"\\nPrint all gates:\")\n# [print(dat) for dat in qc.data]", "_____no_output_____" ], [ "qasm = qc.decompose().qasm()\n### if you want to remove all barriers\n## qasm = '\\n'.join(['' if line.startswith(\"barrier\") else line for line in qasm.splitlines()])\n\nqc = qc.from_qasm_str(qasm)\n\nprint(\"\\nPrint QASM:\")\nprint(qasm)", "\nPrint QASM:\nOPENQASM 2.0;\ninclude \"qelib1.inc\";\nqreg qr[2];\ncreg cr[2];\nu2(0,pi) qr[0];\nu2(0,pi) qr[0];\nu1(pi/2) qr[0];\nu2(0,pi) qr[1];\nu2(0,pi) qr[1];\nu1(pi/2) qr[1];\ncx qr[0],qr[1];\nu1(-pi/2) qr[0];\nu2(0,pi) qr[0];\nu2(0,pi) qr[1];\nu1(pi/2) qr[1];\nu3(pi,pi/2,pi/2) qr[1];\nbarrier qr[0],qr[1];\nu2(0,pi) qr[0];\nu1(pi/2) qr[0];\nu3(pi,pi/2,pi/2) qr[1];\nu1(-pi/2) qr[1];\nu2(0,pi) qr[1];\ncx qr[0],qr[1];\nu1(-pi/2) qr[0];\nu2(0,pi) qr[0];\nu2(0,pi) qr[0];\nu1(-pi/2) qr[1];\nu2(0,pi) qr[1];\nu2(0,pi) qr[1];\nmeasure qr[0] -> cr[0];\nmeasure qr[1] -> cr[1];\n\n" ], [ "# Draw the circuit\nprint(qc)", " ┌──────────┐┌──────────┐┌────────────┐ ┌─────────────┐»\nqr_0: |0>┤ U2(0,pi) ├┤ U2(0,pi) ├┤ U1(0.5*pi) ├──■──┤ U1(-0.5*pi) ├»\n ├──────────┤├──────────┤├────────────┤┌─┴─┐└─┬──────────┬┘»\nqr_1: |0>┤ U2(0,pi) ├┤ U2(0,pi) ├┤ U1(0.5*pi) ├┤ X ├──┤ U2(0,pi) ├─»\n └──────────┘└──────────┘└────────────┘└───┘ └──────────┘ »\n cr_0: 0 ══════════════════════════════════════════════════════════»\n »\n cr_1: 0 ══════════════════════════════════════════════════════════»\n »\n« ┌──────────┐ ░ ┌──────────┐ »\n«qr_0: ─┤ U2(0,pi) ├──────────────────────────░───────┤ U2(0,pi) ├──────»\n« ┌┴──────────┴┐┌──────────────────────┐ ░ ┌─────┴──────────┴─────┐»\n«qr_1: ┤ U1(0.5*pi) ├┤ U3(pi,0.5*pi,0.5*pi) ├─░─┤ U3(pi,0.5*pi,0.5*pi) ├»\n« └────────────┘└──────────────────────┘ ░ └──────────────────────┘»\n«cr_0: ═════════════════════════════════════════════════════════════════»\n« »\n«cr_1: ═════════════════════════════════════════════════════════════════»\n« »\n« ┌────────────┐ ┌─────────────┐┌──────────┐┌──────────┐»\n«qr_0: ─┤ U1(0.5*pi) ├──────────────■──┤ U1(-0.5*pi) ├┤ U2(0,pi) ├┤ U2(0,pi) ├»\n« ┌┴────────────┤┌──────────┐┌─┴─┐├─────────────┤├──────────┤├──────────┤»\n«qr_1: ┤ U1(-0.5*pi) ├┤ U2(0,pi) ├┤ X ├┤ U1(-0.5*pi) ├┤ U2(0,pi) ├┤ U2(0,pi) ├»\n« └─────────────┘└──────────┘└───┘└─────────────┘└──────────┘└──────────┘»\n«cr_0: ═══════════════════════════════════════════════════════════════════════»\n« »\n«cr_1: ═══════════════════════════════════════════════════════════════════════»\n« »\n« ┌─┐ \n«qr_0: ┤M├───\n« └╥┘┌─┐\n«qr_1: ─╫─┤M├\n« ║ └╥┘\n«cr_0: ═╩══╬═\n« ║ \n«cr_1: ════╩═\n« \n" ], [ "p = QiskitQASMParser()\ncirc_list, whichpyzx = p.qiskitparse(qasm)\nprint(circ_list)\nprint(whichpyzx)\nprint(p.registers)\n\n[print(circ_list[w].__dict__) for w in whichpyzx]", "['qreg qr[2];', 'creg cr[2];', Circuit(2 qubits, 26 gates), 'barrier qr[0],qr[1];', Circuit(2 qubits, 26 gates), 'measure qr[0] -> cr[0];', 'measure qr[1] -> cr[1];']\n[2, 4]\n{'qr': (0, 2)}\n{'qubits': 2, 'gates': [ZPhase(0,phase=1/2), XPhase(0,phase=1/2), ZPhase(0,phase=1/2), ZPhase(0,phase=1/2), XPhase(0,phase=1/2), ZPhase(0,phase=1/2), ZPhase(0,phase=1/2), ZPhase(1,phase=1/2), XPhase(1,phase=1/2), ZPhase(1,phase=1/2), ZPhase(1,phase=1/2), XPhase(1,phase=1/2), ZPhase(1,phase=1/2), ZPhase(1,phase=1/2), CNOT(0,1), ZPhase(0,phase=-1/2), ZPhase(0,phase=1/2), XPhase(0,phase=1/2), ZPhase(0,phase=1/2), ZPhase(1,phase=1/2), XPhase(1,phase=1/2), ZPhase(1,phase=1/2), ZPhase(1,phase=1/2), ZPhase(1,phase=1), XPhase(1,phase=1), ZPhase(1,phase=0)], 'name': ''}\n{'qubits': 2, 'gates': [ZPhase(0,phase=1/2), XPhase(0,phase=1/2), ZPhase(0,phase=1/2), ZPhase(0,phase=1/2), ZPhase(1,phase=1), XPhase(1,phase=1), ZPhase(1,phase=0), ZPhase(1,phase=-1/2), ZPhase(1,phase=1/2), XPhase(1,phase=1/2), ZPhase(1,phase=1/2), CNOT(0,1), ZPhase(0,phase=-1/2), ZPhase(0,phase=1/2), XPhase(0,phase=1/2), ZPhase(0,phase=1/2), ZPhase(0,phase=1/2), XPhase(0,phase=1/2), ZPhase(0,phase=1/2), ZPhase(1,phase=-1/2), ZPhase(1,phase=1/2), XPhase(1,phase=1/2), ZPhase(1,phase=1/2), ZPhase(1,phase=1/2), XPhase(1,phase=1/2), ZPhase(1,phase=1/2)], 'name': ''}\n" ], [ "#qasm = qc.decompose().qasm()\npyzx.draw_many(circ_list, whichpyzx)", "_____no_output_____" ], [ "graph_list = [circ_list[w].to_graph() for w in whichpyzx]\n[pyzx.full_reduce(g) for g in graph_list]\npyzx.draw_many(graph_list, range(len(whichpyzx)))", "_____no_output_____" ], [ "pyzx_circ_list = [pyzx.extract.streaming_extract(g) for g in graph_list]\n\npyzx_circ_list = [pyzx.optimize.basic_optimization(new_c.to_basic_gates()) for new_c in pyzx_circ_list]\n\npyzx_qasm = [new_c.to_basic_gates().to_qasm() for new_c in pyzx_circ_list]\n\npassedAll = True\nfor i in range(len(pyzx_circ_list)):\n try:\n assert(pyzx.compare_tensors(pyzx_circ_list[i], circ_list[whichpyzx[i]], False))\n except AssertionError:\n print(i)\n print(circ_list[whichpyzx[i]].__dict__)\n print(pyzx_circ_list[i].__dict__)\n passedAll = False\nassert(passedAll)", "_____no_output_____" ], [ "pyzx_qasm = [\"\\n\".join(['' if line.startswith(\"qreg\") else line for line in circ.splitlines()[2:]]) for circ in pyzx_qasm]\n\nfor new_qasm in pyzx_qasm:\n [print(line) for line in new_qasm.splitlines()]\n print()", "\nrz(1.5*pi) q[0];\ncz q[0], q[1];\nh q[1];\nrz(0.5*pi) q[1];\ncz q[0], q[1];\nh q[1];\nz q[1];\nh q[0];\n\n\nrz(1.5*pi) q[1];\ncx q[1], q[0];\nh q[1];\nrz(0.5*pi) q[1];\nh q[0];\nz q[0];\n\n" ], [ "#now we need to map registers and glue all the pieces back together\nfor i in range(len(pyzx_qasm)):\n circ_list[whichpyzx[i]] = pyzx_qasm[i]\n#print(circ_list)\n## join the\nqasm_string = 'OPENQASM 2.0;\\ninclude \"qelib1.inc\";\\n'+\"\\n\".join(circ_list)\nqasm_string = qasm_string.replace('q[', 'qr[')\nprint(qasm_string)", "OPENQASM 2.0;\ninclude \"qelib1.inc\";\nqreg qr[2];\ncreg cr[2];\n\nrz(1.5*pi) qr[0];\ncz qr[0], qr[1];\nh qr[1];\nrz(0.5*pi) qr[1];\ncz qr[0], qr[1];\nh qr[1];\nz qr[1];\nh qr[0];\nbarrier qr[0],qr[1];\n\nrz(1.5*pi) qr[1];\ncx qr[1], qr[0];\nh qr[1];\nrz(0.5*pi) qr[1];\nh qr[0];\nz qr[0];\nmeasure qr[0] -> cr[0];\nmeasure qr[1] -> cr[1];\n" ], [ "# pqsl = [line + \"\\n\" for line in pyzx_qasm] #took out .splitlines()\n# qsl = [line + \"\\n\" for line in qasm.splitlines()]\n# # print(pqsl)\n# # print(qsl)\n# new_qasm = '\\n'.join(qsl[0:4]) + ''.join(pqsl[3:]) + ''.join(qsl[-2:])\n# new_qasm = new_qasm.replace('q[', 'qr[')\n# print(new_qasm)", "_____no_output_____" ], [ "new_qc = qc.from_qasm_str(qasm_string)\nprint(new_qc)", " ┌────────────┐ ┌───┐ ░ »\nqr_0: |0>┤ Rz(1.5*pi) ├─■─────────────────────■─┤ H ├──────░───────────────»\n └────────────┘ │ ┌───┐┌────────────┐ │ ├───┤┌───┐ ░ ┌────────────┐»\nqr_1: |0>───────────────■─┤ H ├┤ Rz(0.5*pi) ├─■─┤ H ├┤ Z ├─░─┤ Rz(1.5*pi) ├»\n └───┘└────────────┘ └───┘└───┘ ░ └────────────┘»\n cr_0: 0 ══════════════════════════════════════════════════════════════════»\n »\n cr_1: 0 ══════════════════════════════════════════════════════════════════»\n »\n« ┌───┐┌───┐ ┌───┐ ┌─┐ \n«qr_0: ┤ X ├┤ H ├────┤ Z ├─────┤M├───\n« └─┬─┘├───┤┌───┴───┴────┐└╥┘┌─┐\n«qr_1: ──■──┤ H ├┤ Rz(0.5*pi) ├─╫─┤M├\n« └───┘└────────────┘ ║ └╥┘\n«cr_0: ═════════════════════════╩══╬═\n« ║ \n«cr_1: ════════════════════════════╩═\n« \n" ], [ "print(qc)", " ┌──────────┐┌──────────┐┌────────────┐ ┌─────────────┐»\nqr_0: |0>┤ U2(0,pi) ├┤ U2(0,pi) ├┤ U1(0.5*pi) ├──■──┤ U1(-0.5*pi) ├»\n ├──────────┤├──────────┤├────────────┤┌─┴─┐└─┬──────────┬┘»\nqr_1: |0>┤ U2(0,pi) ├┤ U2(0,pi) ├┤ U1(0.5*pi) ├┤ X ├──┤ U2(0,pi) ├─»\n └──────────┘└──────────┘└────────────┘└───┘ └──────────┘ »\n cr_0: 0 ══════════════════════════════════════════════════════════»\n »\n cr_1: 0 ══════════════════════════════════════════════════════════»\n »\n« ┌──────────┐ ░ ┌──────────┐ »\n«qr_0: ─┤ U2(0,pi) ├──────────────────────────░───────┤ U2(0,pi) ├──────»\n« ┌┴──────────┴┐┌──────────────────────┐ ░ ┌─────┴──────────┴─────┐»\n«qr_1: ┤ U1(0.5*pi) ├┤ U3(pi,0.5*pi,0.5*pi) ├─░─┤ U3(pi,0.5*pi,0.5*pi) ├»\n« └────────────┘└──────────────────────┘ ░ └──────────────────────┘»\n«cr_0: ═════════════════════════════════════════════════════════════════»\n« »\n«cr_1: ═════════════════════════════════════════════════════════════════»\n« »\n« ┌────────────┐ ┌─────────────┐┌──────────┐┌──────────┐»\n«qr_0: ─┤ U1(0.5*pi) ├──────────────■──┤ U1(-0.5*pi) ├┤ U2(0,pi) ├┤ U2(0,pi) ├»\n« ┌┴────────────┤┌──────────┐┌─┴─┐├─────────────┤├──────────┤├──────────┤»\n«qr_1: ┤ U1(-0.5*pi) ├┤ U2(0,pi) ├┤ X ├┤ U1(-0.5*pi) ├┤ U2(0,pi) ├┤ U2(0,pi) ├»\n« └─────────────┘└──────────┘└───┘└─────────────┘└──────────┘└──────────┘»\n«cr_0: ═══════════════════════════════════════════════════════════════════════»\n« »\n«cr_1: ═══════════════════════════════════════════════════════════════════════»\n« »\n« ┌─┐ \n«qr_0: ┤M├───\n« └╥┘┌─┐\n«qr_1: ─╫─┤M├\n« ║ └╥┘\n«cr_0: ═╩══╬═\n« ║ \n«cr_1: ════╩═\n« \n" ], [ "import qiskit\nfrom qiskit.providers.basicaer import QasmSimulatorPy\nc1 = qiskit.execute(qc, QasmSimulatorPy()).result().get_counts()\nc2 = qiskit.execute(new_qc, QasmSimulatorPy()).result().get_counts()", "_____no_output_____" ], [ "c1", "_____no_output_____" ], [ "c2", "_____no_output_____" ], [ "assert(c1 == c2)", "_____no_output_____" ], [ "qc.depth()", "_____no_output_____" ], [ "qc.size()", "_____no_output_____" ], [ "new_qc.depth()", "_____no_output_____" ], [ "new_qc.size()", "_____no_output_____" ], [ "new_new_qc = qiskit.transpile(qc, basis_gates=['u3', 'cx'], optimization_level=2)", "_____no_output_____" ], [ "print(new_new_qc)", " ┌──────────┐ ┌────────────┐ ░ ┌──────────────────┐ »\nqr_0: |0>┤ U1(pi/2) ├──■──────┤ U2(0,pi/2) ├────░─┤ U3(pi/2,pi/2,pi) ├──■──»\n ├──────────┤┌─┴─┐┌───┴────────────┴──┐ ░ ├──────────────────┤┌─┴─┐»\nqr_1: |0>┤ U1(pi/2) ├┤ X ├┤ U3(pi/2,pi/2,2pi) ├─░─┤ U3(pi/2,pi,pi/2) ├┤ X ├»\n └──────────┘└───┘└───────────────────┘ ░ └──────────────────┘└───┘»\n cr_0: 0 ══════════════════════════════════════════════════════════════════»\n »\n cr_1: 0 ══════════════════════════════════════════════════════════════════»\n »\n« ┌───────────┐┌─┐ \n«qr_0: ┤ U1(3pi/2) ├┤M├───\n« ├───────────┤└╥┘┌─┐\n«qr_1: ┤ U1(3pi/2) ├─╫─┤M├\n« └───────────┘ ║ └╥┘\n«cr_0: ══════════════╩══╬═\n« ║ \n«cr_1: ═════════════════╩═\n« \n" ], [ "new_new_qc.depth()", "_____no_output_____" ], [ "new_new_qc.size()", "_____no_output_____" ], [ "doubly_qc = qiskit.transpile(new_qc, basis_gates=['u3', 'cx'], optimization_level=2)\nprint(doubly_qc)", " ┌───────────┐ ┌──────────┐ ░ »\nqr_0: |0>┤ U1(3pi/2) ├──■───────────────────■──┤ U2(0,pi) ├─░──────────────»\n └┬──────────┤┌─┴─┐┌─────────────┐┌─┴─┐└┬────────┬┘ ░ ┌───────────┐»\nqr_1: |0>─┤ U2(0,pi) ├┤ X ├┤ U2(0,3pi/2) ├┤ X ├─┤ U1(pi) ├──░─┤ U1(3pi/2) ├»\n └──────────┘└───┘└─────────────┘└───┘ └────────┘ ░ └───────────┘»\n cr_0: 0 ══════════════════════════════════════════════════════════════════»\n »\n cr_1: 0 ══════════════════════════════════════════════════════════════════»\n »\n« ┌───┐ ┌────────────────┐ ┌─┐ \n«qr_0: ┤ X ├─┤ U3(pi/2,pi,pi) ├─┤M├───\n« └─┬─┘┌┴────────────────┴┐└╥┘┌─┐\n«qr_1: ──■──┤ U3(pi/2,pi/2,pi) ├─╫─┤M├\n« └──────────────────┘ ║ └╥┘\n«cr_0: ══════════════════════════╩══╬═\n« ║ \n«cr_1: ═════════════════════════════╩═\n« \n" ], [ "doubly_qc.depth()", "_____no_output_____" ], [ "doubly_qc.size()", "_____no_output_____" ], [ "c3 = qiskit.execute(new_new_qc, QasmSimulatorPy()).result().get_counts()\nc4 = qiskit.execute(doubly_qc, QasmSimulatorPy()).result().get_counts()\nc3", "_____no_output_____" ], [ "c4", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06c8d9abb48cbc3e4f062b935ea22302f8c0748
15,305
ipynb
Jupyter Notebook
experiments/header_analysis/Wordnet.ipynb
akshataasheth/somef
8c371ba89d308125f9aea617e9b35e76d56c8e9d
[ "MIT" ]
12
2020-07-23T21:05:53.000Z
2022-02-04T15:43:04.000Z
experiments/header_analysis/Wordnet.ipynb
akshataasheth/somef
8c371ba89d308125f9aea617e9b35e76d56c8e9d
[ "MIT" ]
249
2020-04-12T05:06:48.000Z
2022-03-31T15:27:11.000Z
experiments/header_analysis/Wordnet.ipynb
akshataasheth/somef
8c371ba89d308125f9aea617e9b35e76d56c8e9d
[ "MIT" ]
11
2020-06-02T16:11:48.000Z
2022-02-22T12:25:48.000Z
52.235495
476
0.406664
[ [ [ "import pandas as pd\nfrom textblob import Word", "_____no_output_____" ], [ "headers = pd.read_csv(\"header.csv\")\nheaders['Header']", "_____no_output_____" ], [ "citation = [Word(\"citation\").synsets[2], Word(\"reference\").synsets[1], Word(\"cite\").synsets[3]]\nrun = [Word(\"run\").synsets[9],Word(\"run\").synsets[34],Word(\"execute\").synsets[4]]\ninstall = [Word(\"installation\").synsets[0],Word(\"install\").synsets[0],Word(\"setup\").synsets[1],Word(\"prepare\").synsets[0],Word(\"preparation\").synsets[0],Word(\"manual\").synsets[0],Word(\"guide\").synsets[2],Word(\"guide\").synsets[9]]\ndownload = [Word(\"download\").synsets[0]]\nrequirement = [Word(\"requirement\").synsets[2],Word(\"prerequisite\").synsets[0],Word(\"prerequisite\").synsets[1],Word(\"dependency\").synsets[0],Word(\"dependent\").synsets[0]]\ncontact = [Word(\"contact\").synsets[9]]\ndescription = [Word(\"description\").synsets[0],Word(\"description\").synsets[1],Word(\"introduction\").synsets[3],Word(\"introduction\").synsets[6],Word(\"basics\").synsets[0],Word(\"initiation\").synsets[1],Word(\"start\").synsets[0],Word(\"start\").synsets[4],Word(\"started\").synsets[0],Word(\"started\").synsets[1],Word(\"started\").synsets[7],Word(\"started\").synsets[8],Word(\"overview\").synsets[0],Word(\"summary\").synsets[0],Word(\"summary\").synsets[2]]\ncontributor = [Word(\"contributor\").synsets[0]]\ndocumentation = [Word(\"documentation\").synsets[1]]\nlicense = [Word(\"license\").synsets[3],Word(\"license\").synsets[0]]\nusage = [Word(\"usage\").synsets[0],Word(\"example\").synsets[0],Word(\"example\").synsets[5],Word(\"implement\").synsets[1],Word(\"implementation\").synsets[1],Word(\"demo\").synsets[1],Word(\"tutorial\").synsets[0],Word(\"tutorial\").synsets[1]]\nupdate = [Word(\"updating\").synsets[0],Word(\"updating\").synsets[3]]\nissues = [Word(\"issues\").synsets[0],Word(\"errors\").synsets[5],Word(\"problems\").synsets[0],Word(\"problems\").synsets[2]]\nsupport = [Word(\"support\").synsets[7],Word(\"help\").synsets[0],Word(\"help\").synsets[9],Word(\"report\").synsets[0],Word(\"report\").synsets[6]]\n\n\n\ngroup = dict()\ngroup.update({\"citation\":citation})\ngroup.update({\"download\":download})\ngroup.update({\"run\":run})\ngroup.update({\"installation\":install})\ngroup.update({\"requirement\":requirement})\ngroup.update({\"contact\":contact})\ngroup.update({\"description\":description})\ngroup.update({\"contributor\":contributor})\ngroup.update({\"documentation\":documentation})\ngroup.update({\"license\":license})\ngroup.update({\"usage\":usage})\ngroup.update({\"update\":update})\ngroup.update({\"issues\":issues})\ngroup.update({\"support\":support})\n\n\ndef find_sim(wordlist,wd): #returns the max probability between a word and subgroup\n simvalue = []\n for sense in wordlist:\n if(wd.path_similarity(sense)!=None):\n simvalue.append(wd.path_similarity(sense))\n if(len(simvalue)!=0):\n return max(simvalue)\n else:\n return 0\n \n\ndef match_group(word_syn,group,threshold):\n currmax = 0\n maxgroup = \"\"\n simvalues = dict()\n for sense in word_syn: #for a given sense of a word\n similarities = []\n for key, value in group.items(): #value has all the similar words\n path_sim = find_sim(value,sense)\n# print(\"Similarity is:\",path_sim)\n if(path_sim>threshold): #then append to the list\n if(path_sim>currmax):\n maxgroup = key\n currmax = path_sim\n\n return maxgroup\n", "_____no_output_____" ], [ "datadf = pd.DataFrame({'Header': [], 'Group': []})\n\n\nmatchedgroups = []\nfor h in headers[\"Header\"]:\n sentence = h.split(\" \")[1:]\n for s in sentence:\n synn = Word(s).synsets\n if(len(synn)>0):\n bestgroup = match_group(synn,group,0.6)\n if(bestgroup!=\"\"):\n datadf = datadf.append({'Header' : h, 'Group' : bestgroup}, ignore_index=True)\nprint(datadf)\ndatadf.to_csv('header_groups.csv', index=False)", " Header Group\n0 Simplemost installation installation\n1 Running the software run\n2 Running automatically generated scripts run\n3 In case of trouble issues\n4 Prerequisites requirement\n5 Installation installation\n6 Reference citation\n7 Contact contact\n8 Reference citation\n9 download single scene by known product id download\n10 download all results from the search download\n11 Get basic information about the product: its ... description\n12 its download url download\n13 Get the product's full metadata available on ... description\n14 Introduction description\n15 Prepare training data installation\n16 Begin to train description\n17 Quick start description\n18 Citation citation\n19 a record schema. We can get initial values f... description\n20 coordinate reference system as the source. Th... citation\n21 Get a point on the boundary of the record's description\n22 executes ``dst.flush(); dst.close()``. run\n23 Requirements requirement\n24 Installation installation\n25 Linux Setup with virtualenv installation\n26 Install TensorFlow installation\n27 Windows Setup with python 3 and Anaconda installation\n28 if you need to get chumpy description\n29 Demo usage\n.. ... ...\n344 License license\n345 Introduction description\n346 Documentation documentation\n347 Issues issues\n348 License license\n349 Installation installation\n350 Documentation documentation\n351 License license\n352 Get the 1st bending mode shape. Results are ... description\n353 to avoid getting the \"Factor is exactly singu... description\n354 to avoid getting the \"Factor is exactly singu... issues\n355 Install or Update installation\n356 Install or Update update\n357 Usage usage\n358 Example of a json file usage\n359 Example of a js module usage\n360 Getting Started description\n361 Getting Started description\n362 Installation installation\n363 Example usage\n364 Documentation documentation\n365 Report Bugs support\n366 Support support\n367 License license\n368 Introduction description\n369 Requirements requirement\n370 Quick Start description\n371 Contributors contributor\n372 Citation citation\n373 License license\n\n[374 rows x 2 columns]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
d06c9bf0bf6fbdfa68d0c39f29a5b21ebb14132d
7,726
ipynb
Jupyter Notebook
Integration.ipynb
QuinnPaddock/UCSC-ASTR-119
7b37dfe9f28a4f4a99764aa188fe3a1358ae1639
[ "MIT" ]
null
null
null
Integration.ipynb
QuinnPaddock/UCSC-ASTR-119
7b37dfe9f28a4f4a99764aa188fe3a1358ae1639
[ "MIT" ]
8
2021-09-23T18:17:04.000Z
2021-12-06T22:07:10.000Z
Integration.ipynb
QuinnPaddock/UCSC-ASTR-119
7b37dfe9f28a4f4a99764aa188fe3a1358ae1639
[ "MIT" ]
null
null
null
23.34139
78
0.462853
[ [ [ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "### Define a function to integrate", "_____no_output_____" ] ], [ [ "def func(x):\n a = 1.01\n b= -3.04\n c = 2.07\n return a*x**2 + b*x + c", "_____no_output_____" ] ], [ [ "### Define it's integral so we know the right answer", "_____no_output_____" ] ], [ [ "def func_integral(x):\n a = 1.01\n b= -3.04\n c = 2.07\n return (a*x**3)/3. + (b*x**2)/2. + c*x", "_____no_output_____" ] ], [ [ "### Define core of trapezoid method", "_____no_output_____" ] ], [ [ "def trapezoid_core(f,x,h):\n return 0.5*h*(f(x*h)+f(x))", "_____no_output_____" ] ], [ [ "### Define the wrapper function to perform the trapezoid method", "_____no_output_____" ] ], [ [ "def trapezoid_method(f,a,b,N):\n #f == function to integrate\n #a == lower limit of integration\n #b == upper limit of integration\n #N == number of intervals to use\n \n #define x values to perform the trapezoid rule\n x = np.linspace(a,b,N)\n h = x[1]-x[0]\n \n #define the value of the integral\n Fint = 0.0\n \n #perform the integral using the trapezoid method\n for i in range(0,len(x)-1,1):\n Fint += trapezoid_core(f,x[i],h)\n \n #return the answer\n return Fint", "_____no_output_____" ] ], [ [ "### Define the core of simpson's method", "_____no_output_____" ] ], [ [ "def simpsons_core(f,x,h):\n return h*(f(x) + 4*f(x+h) + f(x+2*h))/3", "_____no_output_____" ] ], [ [ "### Define a wrapper for simpson's method", "_____no_output_____" ] ], [ [ "def simpsons_method(f,a,b,N):\n #f == function to integrate\n #a == lower limit of integration\n #b == upper limit of integration\n #N == number of intervals to use\n \n \n x = np.linspace(a,b,N)\n h = x[1]-x[0]\n \n #define the value of the integral\n Fint = 0.0\n \n #perform the integral using the simpson's method\n for i in range(0,len(x)-2,2):\n Fint += simpsons_core(f,x[i],h)\n \n #apply simpson's rule over the last interval if X is even\n if((N%2)==0):\n Fint += simpsons_core(f,x[-2],0.5*h)\n \n #return the answer\n return Fint", "_____no_output_____" ] ], [ [ "### Define Romberg core", "_____no_output_____" ] ], [ [ "def romberg_core(f,a,b,i):\n #we need the difference between a and b\n h = b-a\n \n #interval betwen function evaluations at refine level i\n dh = h/2.**(i)\n \n #we need the cofactor\n K = h/2.**(i+1)\n \n #and the function evaluations\n M = 0.0\n for j in range(2**i):\n M += f(a + 0.5*dh +j*dh)\n \n #return the answer\n return K*M", "_____no_output_____" ] ], [ [ "### Define a wrapper function", "_____no_output_____" ] ], [ [ "def romberg_integration(f,a,b,tol):\n #define an iteration variable\n i=0\n \n #define a max number of iterations\n imax = 1000\n \n #define an error estimate\n delta = 100.0*np.fabs(tol)\n \n #set an array of integral answers\n I = np.zeros(imax,dtype=float)\n \n #fet the zeroth romberg iteration first\n I[0] = 0.5*(b-a)*(f(a) + f(b))\n \n #iterate by 1\n i += 1\n \n #iterate until we reach tolerance\n while(delta>tol):\n \n #find the romberg integration\n I[i] = 0.5*I[i-1] + romberg_core(f,a,b,i)\n \n #compute a fractional error estimate\n delta = np.fabs((I[i]-I[i-1])/I[i])\n \n print(i,\":\",I[i],I[i-1],delta)\n \n if(delta>tol):\n #iterate\n i += 1\n \n #if we've reached maximim iterations\n if(i>imax):\n print(\"Max iterations reached\")\n raise StopIteration(\"Stopping iterations after \",i)\n \n #return the answer\n return I[i]", "_____no_output_____" ] ], [ [ "### Check the interages", "_____no_output_____" ] ], [ [ "Answer = func_integral(1) - func_integral(0)\nprint(Answer)\nprint(\"Trapezoidal method\")\nprint(trapezoid_method(func,0,1,10))\nprint(\"Simpson's method\")\nprint(simpsons_method(func,0,1,10))\nprint(\"Romberg\")\ntolerance = 1.0e-4\nRI = romberg_integration(func,0,1,tolerance)\nprint(RI, (RI-Answer)/Answer, tolerance)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06cb589707809c155959fa3cc7ff81cc7fef22f
12,157
ipynb
Jupyter Notebook
Webcam.ipynb
maxspero/DCGAN-with-camera-mask
8485407225ea56d0137249d57c704548ff37c267
[ "MIT" ]
null
null
null
Webcam.ipynb
maxspero/DCGAN-with-camera-mask
8485407225ea56d0137249d57c704548ff37c267
[ "MIT" ]
null
null
null
Webcam.ipynb
maxspero/DCGAN-with-camera-mask
8485407225ea56d0137249d57c704548ff37c267
[ "MIT" ]
null
null
null
59.8867
4,346
0.69787
[ [ [ "import sys\nprint(sys.version)\n#import six\nimport cv2\nprint(cv2.__version__)\nimport matplotlib.pyplot as plt\nfrom IPython import display\nimport numpy as np\n#import tensorflow as tf\n%matplotlib inline", "3.5.3 |Continuum Analytics, Inc.| (default, May 15 2017, 10:43:23) [MSC v.1900 64 bit (AMD64)]\n3.1.0\n" ], [ "vc = cv2.VideoCapture(1)\n\nif vc.isOpened(): # try to get the first frame\n is_capturing, frame = vc.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # makes the blues image look real colored\n webcam_preview = plt.imshow(frame) \nelse:\n is_capturing = False\n \nprint(is_capturing)\n\nwhile is_capturing:\n try: # Lookout for a keyboardInterrupt to stop the script\n is_capturing, frame = vc.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # makes the blues image look real colored\n webcam_preview.set_data(frame)\n plt.draw()\n\n display.clear_output(wait=True)\n display.display(plt.gcf())\n plt.pause(0.01) # the pause time is = 1 / framerate\n except KeyboardInterrupt:\n vc.release()\n", "_____no_output_____" ], [ "vc.release()", "_____no_output_____" ], [ "vc = cv2.VideoCapture(1)\n\nif vc.isOpened(): # try to get the first frame\n is_capturing, frame = vc.read()\n if frame is not None:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # makes the blues image look real colored\n webcam_preview = plt.imshow(frame) \nelse:\n is_capturing = False\n \nprint(is_capturing)\nwhile is_capturing:\n try: # Lookout for a keyboardInterrupt to stop the script\n is_capturing, frame = vc.read()\n if frame is not None:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # makes the blues image look real colored\n print(frame.shape) # (480, 640) * (1/15, 1/20) = (32, 32)\n #res = cv2.resize(frame, None, fx=1/20, fy=1/15, interpolation=cv2.INTER_AREA) # 32, 32\n #res = cv2.resize(frame, None, fx=1/40, fy=1/30, interpolation=cv2.INTER_AREA) # 16, 16\n res = cv2.resize(frame, None, fx=1/80, fy=1/60, interpolation=cv2.INTER_AREA) # 8, 8\n #print(res.shape)\n res_min = np.min(res)\n res_max = np.max(res)\n scaled = (res - res_min)/float(res_max - res_min)\n flipped = cv2.flip(scaled, 1)\n webcam_preview = plt.imshow(flipped)\n webcam_preview.set_data(flipped)\n plt.draw()\n print(flipped)\n display.clear_output(wait=True)\n display.display(plt.gcf())\n except KeyboardInterrupt:\n vc.release()\n", "True\n(480, 640)\n(8, 8)\n[[ 0.32941176 0.41764706 0.48823529 0.56470588 0.75294118 1.\n 0.67647059 0.38235294]\n [ 0.46470588 0.54705882 0.57647059 0.57058824 0.6 0.59411765\n 0.52941176 0.42352941]\n [ 0.55294118 0.67058824 0.71764706 0.17647059 0.21176471 0.55294118\n 0.55294118 0.54117647]\n [ 0.60588235 0.75294118 0.61764706 0.08235294 0.18823529 0.46470588\n 0.63529412 0.62352941]\n [ 0.61764706 0.75882353 0.57058824 0.23529412 0.35294118 0.54117647\n 0.68823529 0.65882353]\n [ 0.58823529 0.70588235 0.50588235 0.28235294 0.44705882 0.72352941\n 0.69411765 0.63529412]\n [ 0.52352941 0.62352941 0.30588235 0.10588235 0.27647059 0.44705882\n 0.65294118 0.58235294]\n [ 0.21764706 0.24705882 0. 0.11764706 0.17647059 0.13529412\n 0.22941176 0.24117647]]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
d06cc3f67ed81d8c5432a20d577924fb3d87ff9c
105,300
ipynb
Jupyter Notebook
lecture/ML_tuning_biases.ipynb
JoseAlanis/ML-DL_workshop_SynAGE
c574645b18f202b6b6a970a6bd5d92376a323759
[ "BSD-3-Clause" ]
2
2021-07-22T19:13:34.000Z
2021-11-18T10:46:02.000Z
lecture/ML_tuning_biases.ipynb
JoseAlanis/ML-DL_workshop_SynAGE
c574645b18f202b6b6a970a6bd5d92376a323759
[ "BSD-3-Clause" ]
null
null
null
lecture/ML_tuning_biases.ipynb
JoseAlanis/ML-DL_workshop_SynAGE
c574645b18f202b6b6a970a6bd5d92376a323759
[ "BSD-3-Clause" ]
3
2021-08-04T12:09:45.000Z
2021-09-23T06:22:22.000Z
117.130145
26,188
0.870332
[ [ [ "# Tuning an estimator\n\n\n[José C. García Alanis (he/him)](https://github.com/JoseAlanis) \nResearch Fellow - Child and Adolescent Psychology at [Uni Marburg](https://www.uni-marburg.de/de) \nMember - [RTG 2271 | Breaking Expectations](https://www.uni-marburg.de/en/fb04/rtg-2271), [Brainhack](https://brainhack.org/)\n\n<img align=\"left\" src=\"https://raw.githubusercontent.com/G0RELLA/gorella_mwn/master/lecture/static/Twitter%20social%20icons%20-%20circle%20-%20blue.png\" alt=\"logo\" title=\"Twitter\" width=\"30\" height=\"30\" /> <img align=\"left\" src=\"https://raw.githubusercontent.com/G0RELLA/gorella_mwn/master/lecture/static/GitHub-Mark-120px-plus.png\" alt=\"logo\" title=\"Github\" width=\"30\" height=\"30\" /> &nbsp;&nbsp;@JoiAlhaniz \n\n\n<img align=\"right\" src=\"https://raw.githubusercontent.com/PeerHerholz/ML-DL_workshop_SynAGE/master/lecture/static/ml-dl_workshop.png\" alt=\"logo\" title=\"Github\" width=\"400\" height=\"280\" />", "_____no_output_____" ], [ "### Aim(s) of this section\n\nIt's very important to learn when and where its appropriate to \"tweak\" your model.\n\n\nSince we have done all of the previous analysis in our training data, it's fine to try out different models.\n\nBut we absolutely cannot \"test\" it on our *left out data*. If we do, we are in great danger of overfitting.\n\nIt is not uncommon to try other models, or tweak hyperparameters. In this case, due to our relatively small sample size, we are probably not powered sufficiently to do so, and we would once again risk overfitting. However, for the sake of demonstration, we will do some tweaking.", "_____no_output_____" ], [ "We will try a few different examples:\n- normalizing our target data\n- tweaking our hyperparameters\n- trying a more complicated model\n- feature selection\n\n", "_____no_output_____" ], [ "### Prepare data for model\n\nLets bring back our example data set", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n\n# get the data set\ndata = np.load('MAIN2019_BASC064_subsamp_features.npz')['a']\n\n# get the labels\ninfo = pd.read_csv('participants.csv')\n\n\nprint('There are %s samples and %s features' % (data.shape[0], data.shape[1]))", "There are 155 samples and 2016 features\n" ] ], [ [ "We'll set `Age` as target\n- i.e., well look at these from the `regression` perspective", "_____no_output_____" ] ], [ [ "# set age as target\nY_con = info['Age']\nY_con.describe()", "_____no_output_____" ] ], [ [ "### Model specification\n\nNow let's bring back the model specifications we used last time", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\n# split the data\nX_train, X_test, y_train, y_test = train_test_split(data, Y_con, random_state=0)\n\n# use `AgeGroup` for stratification\nage_class2 = info.loc[y_train.index,'AgeGroup']", "_____no_output_____" ] ], [ [ "### Normalize the target data¶", "_____no_output_____" ] ], [ [ "# plot the data\nsns.displot(y_train,label='train')\nplt.legend()", "_____no_output_____" ], [ "# create a log transformer function and log transform Y (age)\nfrom sklearn.preprocessing import FunctionTransformer\n\nlog_transformer = FunctionTransformer(func = np.log, validate=True)\nlog_transformer.fit(y_train.values.reshape(-1,1))\ny_train_log = log_transformer.transform(y_train.values.reshape(-1,1))[:,0]", "_____no_output_____" ] ], [ [ "Now let's plot the transformed data", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.displot(y_train_log,label='test log')\nplt.legend()", "_____no_output_____" ] ], [ [ "and go on with fitting the model to the log-tranformed data", "_____no_output_____" ] ], [ [ "# split the data\nX_train2, X_test, y_train2, y_test = train_test_split(\n X_train, # x\n y_train, # y\n test_size = 0.25, # 75%/25% split \n shuffle = True, # shuffle dataset before splitting\n stratify = age_class2, # keep distribution of age class consistent\n # betw. train & test sets.\n random_state = 0 # same shuffle each time\n)", "_____no_output_____" ], [ "from sklearn.svm import SVR\nfrom sklearn.model_selection import cross_val_predict, cross_val_score\nfrom sklearn.metrics import r2_score, mean_absolute_error\n\n# re-intialize the model\nlin_svr = SVR(kernel='linear') \n\n# predict\ny_pred = cross_val_predict(lin_svr, X_train, y_train_log, cv=10)\n\n# scores\nacc = r2_score(y_train_log, y_pred)\nmae = mean_absolute_error(y_train_log,y_pred)", "_____no_output_____" ], [ "# check the accuracy\nprint('R2:', acc)\nprint('MAE:', mae)", "R2: 0.6565364559090001\nMAE: 0.27044306981575505\n" ], [ "# plot the relationship\nsns.regplot(x=y_pred, y=y_train_log, scatter_kws=dict(color='k'))\nplt.xlabel('Predicted Log Age')\nplt.ylabel('Log Age')", "_____no_output_____" ] ], [ [ "Alright, seems like a definite improvement, right? We might agree on that.\n\nBut we can't forget about interpretability? The MAE is much less interpretable now\n- do you know why?\n", "_____no_output_____" ], [ "### Tweak the hyperparameters¶\n\nMany machine learning algorithms have hyperparameters that can be \"tuned\" to optimize model fitting.\n\nCareful parameter tuning can really improve a model, but haphazard tuning will often lead to overfitting.\n\nOur SVR model has multiple hyperparameters. Let's explore some approaches for tuning them", "_____no_output_____" ], [ "for 1000 points, what is a parameter?", "_____no_output_____" ] ], [ [ "SVR?", "_____no_output_____" ] ], [ [ "Now, how do we know what parameter tuning does?\n\n- One way is to plot a **Validation Curve**, this will let us view changes in training and validation accuracy of a model as we shift its hyperparameters. We can do this easily with sklearn.\n", "_____no_output_____" ], [ "We'll fit the same model, but with a range of different values for `C`\n - The C parameter tells the SVM optimization how much you want to avoid misclassifying each training example. For large values of C, the optimization will choose a smaller-margin hyperplane if that hyperplane does a better job of getting all the training points classified correctly. Conversely, a very small value of C will cause the optimizer to look for a larger-margin separating hyperplane, even if that hyperplane misclassifies more points. For very tiny values of C, you should get misclassified examples, often even if your training data is linearly separable.\n", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import validation_curve\n\n\n\nC_range = 10. ** np.arange(-3, 7)\ntrain_scores, valid_scores = validation_curve(lin_svr, X_train, y_train_log, \n param_name= \"C\",\n param_range = C_range,\n cv=10,\n scoring='neg_mean_squared_error')\n\n", "_____no_output_____" ], [ "# A bit of pandas magic to prepare the data for a seaborn plot\n\ntScores = pd.DataFrame(train_scores).stack().reset_index()\ntScores.columns = ['C','Fold','Score']\ntScores.loc[:,'Type'] = ['Train' for x in range(len(tScores))]\n\nvScores = pd.DataFrame(valid_scores).stack().reset_index()\nvScores.columns = ['C','Fold','Score']\nvScores.loc[:,'Type'] = ['Validate' for x in range(len(vScores))]\n\nValCurves = pd.concat([tScores,vScores]).reset_index(drop=True)\nValCurves.head()", "_____no_output_____" ], [ "# and plot the results\n\ng = sns.catplot(x='C',y='Score',hue='Type',data=ValCurves,kind='point')\nplt.xticks(range(10))\ng.set_xticklabels(C_range, rotation=90)", "_____no_output_____" ] ], [ [ "It looks like accuracy is better for higher values of `C`, and plateaus somewhere between 0.1 and 1.\n\nThe default setting is `C=1`, so it looks like we can't really improve much by changing `C`.\n\nBut our SVR model actually has two hyperparameters, `C` and `epsilon`. Perhaps there is an optimal combination of settings for these two parameters.\n\nWe can explore that somewhat quickly with a `grid search`, which is once again easily achieved with `sklearn`.\n\nBecause we are fitting the model multiple times witih cross-validation, this will take some time ...\n", "_____no_output_____" ], [ "### Let's tune some hyperparameters", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import GridSearchCV\n\nC_range = 10. ** np.arange(-3, 8)\nepsilon_range = 10. ** np.arange(-3, 8)\n\nparam_grid = dict(epsilon=epsilon_range, C=C_range)\n\ngrid = GridSearchCV(lin_svr, param_grid=param_grid, cv=10)\n\ngrid.fit(X_train, y_train_log)", "_____no_output_____" ] ], [ [ "Now that the grid search has completed, let's find out what was the \"best\" parameter combination", "_____no_output_____" ] ], [ [ "print(grid.best_params_)", "{'C': 0.01, 'epsilon': 0.01}\n" ] ], [ [ "And if redo our cross-validation with this parameter set?", "_____no_output_____" ] ], [ [ "y_pred = cross_val_predict(SVR(kernel='linear',\n C=grid.best_params_['C'],\n epsilon=grid.best_params_['epsilon'], \n gamma='auto'), \n X_train, y_train_log, cv=10)\n\n# scores\nacc = r2_score(y_train_log, y_pred)\nmae = mean_absolute_error(y_train_log,y_pred)", "_____no_output_____" ], [ "# print model performance\nprint('R2:', acc)\nprint('MAE:', mae)", "R2: 0.6918967934598623\nMAE: 0.26595760648195826\n" ], [ "# and plot the results\nsns.regplot(x=y_pred, y=y_train_log, scatter_kws=dict(color='k'))\nplt.xlabel('Predicted Log Age')\nplt.ylabel('Log Age')", "_____no_output_____" ] ], [ [ "Perhaps unsurprisingly, the model fit is only very slightly improved from what we had with our defaults. **There's a reason they are defaults, you silly**\n\nGrid search can be a powerful and useful tool. But can you think of a way that, if not properly utilized, it could lead to overfitting? Could it be happening here?\n\nYou can find a nice set of tutorials with links to very helpful content regarding how to tune hyperparameters while being aware of over- and under-fitting here:\n\nhttps://scikit-learn.org/stable/modules/learning_curve.html\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
d06cc71eb8d32fe0cef2b2daea5e2f974c412ebf
146,423
ipynb
Jupyter Notebook
HW2/Classifiers.ipynb
oyrx/PHBS_MLF_2019
c0f7dd3621752c6c471b57680595da518374a604
[ "MIT" ]
null
null
null
HW2/Classifiers.ipynb
oyrx/PHBS_MLF_2019
c0f7dd3621752c6c471b57680595da518374a604
[ "MIT" ]
null
null
null
HW2/Classifiers.ipynb
oyrx/PHBS_MLF_2019
c0f7dd3621752c6c471b57680595da518374a604
[ "MIT" ]
null
null
null
144.401381
21,764
0.857543
[ [ [ "# Homework 2: classification\nData source: http://archive.ics.uci.edu/ml/datasets/Polish+companies+bankruptcy+data\n**Description:** The goal of this HW is to be familiar with the basic classifiers PML Ch 3.\nFor this HW, we continue to use Polish companies bankruptcy data Data Set from UCI Machine Learning Repository. Download the dataset and put the 4th year file (4year.arff) in your YOUR_GITHUB_ID/PHBS_MLF_2019/HW2/\nI did a basic process of the data (loading to dataframe, creating bankruptcy column, changing column names, filling-in na values, training-vs-test split, standardizatino, etc). See my github。", "_____no_output_____" ], [ "# Preparation", "_____no_output_____" ], [ "## Load, read and clean", "_____no_output_____" ] ], [ [ "from scipy.io import arff\nimport pandas as pd\nimport numpy as np\n\ndata = arff.loadarff('./data/4year.arff')\ndf = pd.DataFrame(data[0])\ndf['bankruptcy'] = (df['class']==b'1')\ndel df['class']\ndf.columns = ['X{0:02d}'.format(k) for k in range(1,65)] + ['bankruptcy']\ndf.describe()", "_____no_output_____" ], [ "sum(df.bankruptcy == True)", "_____no_output_____" ], [ "from sklearn.impute import SimpleImputer\n\nimp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')\nX_imp = imp_mean.fit_transform(df.values)\n", "_____no_output_____" ] ], [ [ "*A dll load error occured here. Solution recorded in [my blog](https://quoth.win/671.html)*", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\nX, y = X_imp[:, :-1], X_imp[:, -1]\nX_train, X_test, y_train, y_test =\\\n train_test_split(X, y, \n test_size=0.3, \n random_state=0, \n stratify=y)", "_____no_output_____" ], [ "from sklearn.preprocessing import StandardScaler\n\nstdsc = StandardScaler()\nX_train_std = stdsc.fit_transform(X_train)\nX_test_std = stdsc.transform(X_test)", "_____no_output_____" ] ], [ [ "## 1. Find the 2 most important features\nSelect the 2 most important features using LogisticRegression with L1 penalty. **(Adjust C until you see 2 features)**", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression\n\nC = [1, .1, .01, 0.001]\ncdf = pd.DataFrame()\n\nfor c in C:\n lr = LogisticRegression(penalty='l1', C=c, solver='liblinear', random_state=0)\n lr.fit(X_train_std, y_train)\n print(f'[C={c}] with {lr.coef_[lr.coef_!=0].shape[0]} features: \\n {lr.coef_[lr.coef_!=0]} \\n') # Python >= 3.7\n if lr.coef_[lr.coef_!=0].shape[0] == 2:\n cdf = pd.DataFrame(lr.coef_.T , df.columns[:-1], columns=['coef'])", "[C=1] with 41 features: \n [-0.21124721 -0.32721186 -0.027786 -1.13272997 -0.06357798 2.33195848\n -1.15522622 -0.00657024 -0.04684187 -1.08683927 0.1425248 0.01096755\n -0.01922072 -0.01383184 -0.01411706 -0.10736095 0.00238513 0.24577125\n -0.85510327 0.85209928 -0.30366778 -0.30474956 -0.0296142 -0.01749839\n -0.04401046 -0.03613927 0.12211322 -0.0138838 -0.45429792 1.3157471\n -0.4035633 -0.46000256 0.04269182 -0.13000407 -0.0474417 -0.11325886\n -0.12953285 -0.56454917 0.00689488 -0.4300807 -0.08949632] \n\n[C=0.1] with 14 features: \n [-0.07260191 -0.00523502 -0.05866954 -0.1005855 -0.62441034 -0.04143727\n -0.07701467 -0.00230242 0.01202513 0.01090586 0.28007452 -0.10726656\n -0.04314757 -0.00208574] \n\n[C=0.01] with 2 features: \n [-0.00174613 -0.05571114] \n\n[C=0.001] with 0 features: \n [] \n\n" ], [ "lr = LogisticRegression(penalty='l1', C=0.01, solver='liblinear', random_state=0) # complete\nlr.fit(X_train_std, y_train)", "_____no_output_____" ], [ "cdf = cdf[cdf.coef != 0]\ncdf", "_____no_output_____" ] ], [ [ "### redefine X_train_std and X_test_std", "_____no_output_____" ] ], [ [ "X_train_std = X_train_std[:, lr.coef_[0]!=0]\nX_test_std = X_test_std[:, lr.coef_[0]!=0]", "_____no_output_____" ], [ "from matplotlib.colors import ListedColormap\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\nplt.scatter(x=X_train_std[:,0], y=X_train_std[:,1], c=y_train, cmap='Set1')", "_____no_output_____" ] ], [ [ "## 2. Apply LR / SVM / Decision Tree below\nUsing the 2 selected features, apply LR / SVM / decision tree. **Try your own hyperparameters (C, gamma, tree depth, etc)** to maximize the prediction accuracy. (Just try several values. You don't need to show your answer is the maximum.)", "_____no_output_____" ], [ "## LR", "_____no_output_____" ] ], [ [ "CLr = np.arange(0.000000000000001, 0.0225, 0.0001)\nacrcLr = [] # acurracy\nfor c in CLr:\n lr = LogisticRegression(C=c,penalty='l1',solver='liblinear')\n lr.fit(X_train_std, y_train)\n acrcLr.append([lr.score(X_train_std, y_train), lr.score(X_test_std, y_test), c])\nacrcLr = np.array(acrcLr)\nplt.plot(acrcLr[:,2], acrcLr[:,0])\nplt.plot(acrcLr[:,2], acrcLr[:,1])\nplt.xlabel('C')\nplt.ylabel('Accuracy')\nplt.title('Logistic Regression')\nplt.show()", "_____no_output_____" ] ], [ [ "Choose `c=.01`", "_____no_output_____" ] ], [ [ "c = .01\nlr = LogisticRegression(C=c,penalty='l1',solver='liblinear')\nlr.fit(X_train_std, y_train)\nprint(f'Accuracy when [c={c}] \\nTrain {lr.score(X_train_std, y_train)}\\nTest {lr.score(X_test_std, y_test)}')", "Accuracy when [c=0.01] \nTrain 0.9474759264662971\nTest 0.9469026548672567\n" ] ], [ [ "## SVM", "_____no_output_____" ] ], [ [ "from sklearn.svm import SVC\nG = np.arange(0.00001, 0.3, 0.005)\nacrcSvm = []\nfor g in G:\n svm = SVC(kernel='rbf', gamma=g, C=1.0, random_state=0)\n svm.fit(X_train_std, y_train)\n acrcSvm.append([svm.score(X_train_std, y_train), svm.score(X_test_std, y_test), g])\nacrcSvm = np.array(acrcSvm)\nplt.plot(acrcSvm[:,2], acrcSvm[:,0])\nplt.plot(acrcSvm[:,2], acrcSvm[:,1])\nplt.xlabel('gamma')\nplt.ylabel('Accuracy')\nplt.title('SVM')\nplt.show()", "_____no_output_____" ] ], [ [ "Choose `gamma = 0.2`", "_____no_output_____" ] ], [ [ "g = 0.2\nsvm = SVC(kernel='rbf', gamma=g, C=1.0, random_state=0)\nsvm.fit(X_train_std, y_train)\nprint(f'Accuracy when [gamma={g}] \\nTrain {svm.score(X_train_std, y_train)}\\nTest {svm.score(X_test_std, y_test)}')", "Accuracy when [gamma=0.2] \nTrain 0.9482054274875985\nTest 0.9472430224642614\n" ] ], [ [ "## Decision Tree", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeClassifier\ndepthTree = range(1, 6)\nacrcTree = []\nfor depth in depthTree:\n tree = DecisionTreeClassifier(criterion='gini', max_depth=depth, random_state=0)\n tree.fit(X_train_std, y_train)\n acrcTree.append([tree.score(X_train_std, y_train), tree.score(X_test_std, y_test), depth])\nacrcTree = np.array(acrcTree)\nplt.plot(acrcTree[:,2], acrcTree[:,0])\nplt.plot(acrcTree[:,2], acrcTree[:,1])\nplt.xlabel('max_depth')\nplt.ylabel('Accuracy')\nplt.title('Decision Tree')\nplt.show()", "_____no_output_____" ] ], [ [ "Choose `max_depth=2`:", "_____no_output_____" ] ], [ [ "depth = 2\ntree = DecisionTreeClassifier(criterion='gini', max_depth=depth, random_state=0)\ntree.fit(X_train_std, y_train)\nprint(f'Accuracy when [max_depth={depth}] \\nTrain {tree.score(X_train_std, y_train)}\\nTest {tree.score(X_test_std, y_test)}')", "Accuracy when [max_depth=2] \nTrain 0.9474759264662971\nTest 0.9472430224642614\n" ] ], [ [ "## 3. Visualize the classification\nVisualize your classifiers using the plot_decision_regions function from PML Ch. 3", "_____no_output_____" ] ], [ [ "def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):\n\n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], \n y=X[y == cl, 1],\n alpha=0.8, \n c=colors[idx],\n marker=markers[idx], \n label=cl, \n edgecolor='black')\n\n # highlight test samples\n if test_idx:\n # plot all samples\n X_test, y_test = X[test_idx, :], y[test_idx]\n\n plt.scatter(X_test[:, 0],\n X_test[:, 1],\n c='',\n edgecolor='black',\n alpha=1.0,\n linewidth=1,\n marker='o',\n s=100, \n label='test set')", "_____no_output_____" ], [ "X_combined_std = np.vstack((X_train_std, X_test_std))\ny_combined = np.hstack((y_train, y_test))", "_____no_output_____" ] ], [ [ "## LR\n`test_idx` removed on purpose", "_____no_output_____" ] ], [ [ "plot_decision_regions(X=X_combined_std, y=y_combined,\n classifier=lr)\nplt.xlabel(cdf.index[0])\nplt.ylabel(cdf.index[1])\nplt.legend(loc='lower left')\n\nplt.tight_layout()\n#plt.savefig('images/03_01.png', dpi=300)\nplt.show()", "_____no_output_____" ] ], [ [ "## Decision Tree", "_____no_output_____" ] ], [ [ "plot_decision_regions(X=X_combined_std, y=y_combined,\n classifier=tree)\nplt.xlabel(cdf.index[0])\nplt.ylabel(cdf.index[1])\nplt.legend(loc='lower left')\n\nplt.tight_layout()\n#plt.savefig('images/03_01.png', dpi=300)\nplt.show()", "_____no_output_____" ] ], [ [ "## SVM (samples)", "_____no_output_____" ] ], [ [ "# Visualization of all features in a SVM model is too slow\n# Because the complexity is very high (sourse:https://scikit-learn.org/stable/modules/svm.html#complexity)\n# So use random samples(n=3000) instead\n\nsamples = np.random.randint(0, len(X_combined_std), size=3000)\nplot_decision_regions(X=X_combined_std[samples], y=y_combined[samples],\n classifier=svm)\nplt.xlabel(cdf.index[0] + '[samples]')\nplt.ylabel(cdf.index[1] + '[samples]')\nplt.legend(loc='lower left')\n\nplt.tight_layout()\n#plt.savefig('images/03_01.png', dpi=300)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06ccfc55764d4c17df2233d96e9aa6d61474d1b
71,042
ipynb
Jupyter Notebook
.ipynb_checkpoints/Python Crash Course-checkpoint.ipynb
rafia37/DSA5113-TA-class-repo
a90db3d2cc2ed32ab0cacfec979fd3bc700f63ce
[ "MIT" ]
null
null
null
.ipynb_checkpoints/Python Crash Course-checkpoint.ipynb
rafia37/DSA5113-TA-class-repo
a90db3d2cc2ed32ab0cacfec979fd3bc700f63ce
[ "MIT" ]
null
null
null
.ipynb_checkpoints/Python Crash Course-checkpoint.ipynb
rafia37/DSA5113-TA-class-repo
a90db3d2cc2ed32ab0cacfec979fd3bc700f63ce
[ "MIT" ]
1
2022-03-28T15:03:47.000Z
2022-03-28T15:03:47.000Z
23.423014
1,008
0.507263
[ [ [ "\"\"\"\nTitle: Python Crash Course\nAuthor: Rafia Bushra\nDescription: A python tutorial for DSA/ISE 5113 students\nLast Updated: 3/26/21\n\"\"\"", "_____no_output_____" ] ], [ [ "### Topics\nThis notebook covers the following topics -\n1. Basic Concepts\n 1. [Basic Syntax](#basic-syntax)\n 2. [Lists](#lists)\n 3. [String Manipulation](#string)\n 4. [Decision making (If statement)](#if)\n 5. Loops\n 1. [For loop](#for)\n 2. [While loop](#while)\n 6. [Function](#func)\n 7. [Scope](#scope)\n 8. Miscellaneous\n 1. [Dictionary](#dict)\n 2. [Tuples](#tuple)\n 3. [List Comprehension](#lc)\n 4. [Error Handling](#eh)\n 5. [Lambda Expressions](#le)\n 6. [Mapping Function](#mf)\n 7. [User Input](#ui)\n2. Advanced Concepts\n 1. [Numpy](#numpy)\n 2. [Pandas](#pandas)\n 3. [Matplotlib (Plotting)](#plot)\n 4. [pdb (Debugging)](#pdb)\n 5. [Other Useful Libraries](#oul)", "_____no_output_____" ], [ "# Basic Topics", "_____no_output_____" ], [ "### Basic Syntax <a class=\"anchor\" id=\"basic-syntax\"></a>", "_____no_output_____" ], [ "###### Hello World!", "_____no_output_____" ] ], [ [ "#A basic print statement to display given message\nprint(\"Hello World!\")", "Hello World!\n" ] ], [ [ "##### Basic Operations", "_____no_output_____" ] ], [ [ "#Addition\n2 + 10", "_____no_output_____" ], [ "#Subtraction\n2 - 10", "_____no_output_____" ], [ "#Multiplication\n2*10", "_____no_output_____" ], [ "#Division\n3/2", "_____no_output_____" ], [ "#Integer division\n3//2", "_____no_output_____" ], [ "#Raising to a power\n10**3", "_____no_output_____" ], [ "#Exponentiating - not the same as 10^3\n10e3", "_____no_output_____" ] ], [ [ "##### Defining Variables\nYou can define variables as `variable_name = value`\n- Variable names can be alphanumeric though it can't start with a number.\n- Variable names are case sensitive\n- The values that you assign to a variable will typically be of these 5 standard data types (In python, you can assign almost anything to a variable and not have to declare what type of variable it is)\n - Numbers (floats, integers, complex etc)\n - Strings*\n - List*\n - Tuple*\n - Dictionary* \n *Discussed in a later section. Will only show how to define them in this section.", "_____no_output_____" ] ], [ [ "#Numbers\nmy_num = 5113 #Example of defining an integer\nmy_float = 3.0 #Example of defining a float\n\n#Strings\ntruth = \"This crash course is just the tip of the iceberg o_O\"\n\n#Lists\nsame_type_list = [1,2,3,4,5] #A simple list of same type of objects - integers\nmixed_list = [1,2,\"three\", my_num, same_type_list] #A list containing many type of objects - integer, string, variable, another list\n\n#Dictionary\nsimple_dict = {\"red\": 1, \"blue\":2, \"green\":3} #Similar to a list but enclosed in curly braces {} and consists of key-value pairs\n\n#Tuple\naTuple = (1,2,3) #Similar to a list but enclosed in parenthesis ()", "_____no_output_____" ] ], [ [ "##### More print statements\nNow we're going to print the variables we defined in the previous cell and look at some more ways to use the print statement", "_____no_output_____" ] ], [ [ "#printing a variable\nprint(my_float)", "3.0\n" ], [ "#printing the truth!\nprint(truth)", "This crash course is just the tip of the iceberg o_O\n" ], [ "print(simple_dict)", "{'red': 1, 'blue': 2, 'green': 3}\n" ], [ "print(mixed_list) #Notice how the 4th & 5th objects got the value of the variables we defined earlier", "[1, 2, 'three', 5113, [1, 2, 3, 4, 5]]\n" ], [ "#Dynamic printing\nprint(\"This is DSA {}\".format(my_num)) #The value/variable given inside format replaces the curly braces in the string", "This is DSA 5113\n" ], [ "#When the dynamically set part is a number, we can set the precision\nprint(\"Value of pi up to 4 decimal places = {:.4f}\".format(3.141592653589793238)) ", "Value of pi up to 4 decimal places = 3.1416\n" ] ], [ [ "###### Variable Type & Conversion\nEvery variable has a type (int, float, string, list, etc) and some of them can be converted into certain types", "_____no_output_____" ] ], [ [ "#Finding out the type of a variable\ntype(my_float)", "_____no_output_____" ], [ "#printing the types of some other variables\nprint(type(my_num), type(simple_dict), type(truth), type(mixed_list))", "<class 'int'> <class 'dict'> <class 'str'> <class 'list'>\n" ], [ "#Converting anything to string\nstr(my_float)", "_____no_output_____" ], [ "str(simple_dict)", "_____no_output_____" ], [ "str(mixed_list)", "_____no_output_____" ], [ "#converting string to number\nthree = \"3\"\nint(three)", "_____no_output_____" ], [ "float(three)", "_____no_output_____" ], [ "#Converting tuple to a list\nlist(aTuple)", "_____no_output_____" ], [ "#Converting list to a tuple\ntuple(same_type_list)", "_____no_output_____" ] ], [ [ "### Lists <a class=\"anchor\" id=\"lists\"></a>", "_____no_output_____" ], [ "A versatile datatype that can be thought of as a collection of comma-seperated values. \nEach item in a list has an index. The indices start with 0. \nThe items in a list doesn't need to be of the same type ", "_____no_output_____" ] ], [ [ "#Defining some lists\nl1 = [1,2,3,4,5,6]\nl2 = [\"a\", \"b\", \"c\", \"d\"]\nl3 = list(range(2,50,2)) #Creates a list going from 2 up to and not including 50 in increments of 2\nprint(l3) #displaying l3", "[2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48]\n" ], [ "#Length of a list \n#The len command gives the size of the list i.e. the total number of items\nlen(l1)", "_____no_output_____" ], [ "len(l2)", "_____no_output_____" ] ], [ [ "**Accessing list items** \nList items can be accessed using their index. \nThe first item has an index of 0, the next one has 1 and so on", "_____no_output_____" ] ], [ [ "#First item of l2 is \"a\" and third item of l1 is 3\nprint(\"First item of l2: {}\".format(l2[0])) # l2[0] accesses the item at 0th index of l2\nprint(\"Third item of l1: {}\".format(l1[2])) # l1[0] accesses the item at 2nd index of l1", "First item of l2: a\nThird item of l1: 3\n" ] ], [ [ "**Indexing in reverse** List items can be accessed in reversed order using negative indices. \nThe last item canbe accessed with -1, second from last with -2 and so on", "_____no_output_____" ] ], [ [ "print(\"Last item of l3: {}\".format(l3[-1])) \nprint(\"Third to last item of l1: {}\".format(l1[-3]))", "Last item of l3: 48\nThird to last item of l1: 4\n" ] ], [ [ "**Slicing** \nPortions of a list can be chosen using some or all of 3 numbers - starting index, stopping index and increment \nThe syntax is `list_name[start:stop:increment]`", "_____no_output_____" ] ], [ [ "#If I want 2,3,4 from list l1, I want to start from index 1 and end at index 3\n#The stopping indes is not included so we choose 3+1=4 as stopping index\nl1[1:4]", "_____no_output_____" ], [ "#In this example we chose items from idex 1 up to index 5, skipping an item every time (increment of 2)\nl1[1:6:2]", "_____no_output_____" ], [ "#If we just indicate starting index, everything after that is kept\nl1[2:]", "_____no_output_____" ], [ "#If we just indicate stopping index, everything up to that is kept\nl1[:4]", "_____no_output_____" ], [ "#Using reverse index\nl1[:-2] #Everything except for the last 2 items", "_____no_output_____" ] ], [ [ "##### List operations", "_____no_output_____" ] ], [ [ "#\"adding\" two lists results in concatenation\nl4 = l1 + l2\nl4", "_____no_output_____" ], [ "#Multiplying a list by a scalar results in repetition\n[\"hello\"]*5", "_____no_output_____" ], [ "l2*3", "_____no_output_____" ], [ "[2]*7", "_____no_output_____" ] ], [ [ "##### Some other popular list manipulation functions", "_____no_output_____" ] ], [ [ "#Appending to the end of an existing string\nl2.append(\"e\")\nl2", "_____no_output_____" ], [ "#Insert an item at a particular index - list_name(index, value)\nl2.insert(2,\"f\")\nl2", "_____no_output_____" ], [ "#sorting a list\nl2.sort()\nl2", "_____no_output_____" ], [ "#removes item by index and returns the removed item\nl4.pop(3) #remove the item at index 3\nl4", "_____no_output_____" ], [ "#remove item by matching value\nl4.remove(\"a\")\nl4", "_____no_output_____" ], [ "#maximum or minimum value of a list\nmax(l3)\n#min(l3) for minimum", "_____no_output_____" ] ], [ [ "### String Manipulation <a class=\"anchor\" id=\"string\"></a> ", "_____no_output_____" ], [ "Strings are values enclosed in single quotes (' ') or double quotes (\" \") \nThese are characters or a series of characters and can be manipulated in very similar way to lists, though they have their own special functions ", "_____no_output_____" ] ], [ [ "#Defining some strings\nstr1 = \"I hear Rafia is a harsh grader\"\nstr2 = \"NO NEED TO SHOUT\"\nstr3 = \"fine, no caps lock\"", "_____no_output_____" ] ], [ [ "**Accessing & Slicing**", "_____no_output_____" ] ], [ [ "#Very similar to lists\nprint(str1[:12]) #Takes the 1st 10 characters\nprint(str1[0]) #Accesses the first character\nprint(str2[-5:]) #Takes last 5 characters\nprint(str3[6:13]) #Takes 6 through 9", "I hear Rafia\nI\nSHOUT\nno caps\n" ] ], [ [ "**Other popular string manipulation functions**", "_____no_output_____" ] ], [ [ "#Splitting a string based on a sperator - str_name.split(separator)\nprint(str1.split(\" \")) #separating based on space\nprint(str2.split()) #If no argument is given to split, default separator is space\nprint(str3.split(\",\")) #separating based on space", "['I', 'hear', 'Rafia', 'is', 'a', 'harsh', 'grader']\n['NO', 'NEED', 'TO', 'SHOUT']\n['fine', ' no caps lock']\n" ], [ "#Changing case\nprint(str2.lower()) #All lower case\nprint(str3.upper()) #All upper case\nprint(str3.capitalize()) #Only first letter upper case\nprint(\"Red\".swapcase()) #swaps cases", "no need to shout\nFINE, NO CAPS LOCK\nFine, no caps lock\nrED\n" ], [ "#Replace characters by given string\nstr1.replace(\"harsh\", \"good\")", "_____no_output_____" ], [ "#Find a given pattern in a string\nstr1.find(\"Rafia\") #Returns the index of where the pattern is found", "_____no_output_____" ], [ "#Concatenating and formating string\nprint(str2 + \" -- \" + str3) #adding string concatenates them\nstr4 = \"Strings can be anything, like {} is a string\".format(12345)\nprint(str4)", "NO NEED TO SHOUT -- fine, no caps lock\nStrings can be anything, like 12345 is a string\n" ], [ "#Like lists, we can multiply to repeat\n\"Hi\"*4", "_____no_output_____" ], [ "#Like lists, we can use len command to find the size of a string\nlen(\"apples\")", "_____no_output_____" ] ], [ [ "**Special Characters**", "_____no_output_____" ] ], [ [ "#\\n makes a new line\nprint(\"This is making \\n a new line\")\n#\\t inserts a tab\nprint(\"This just inserts \\t a tab\")", "This is making \n a new line\nThis just inserts \t a tab\n" ] ], [ [ "### If Statement <a class=\"anchor\" id=\"if\"></a>", "_____no_output_____" ], [ "Executing blockes of code based on whether or not a given condition is true \nThe syntax is -\n```python\nif (condition):\n Do somthing\nelif (condition):\n Do some other thing\nelse:\n Do somet other thing\n``` \nOnly one block will execute - the condition that returns true first \nYou can use as many elif blocks as needed", "_____no_output_____" ] ], [ [ "if (\"c\" in l2):\n print(\"Yes c is in l2\")\n l2.remove(\"c\")\n print(\"But now it's removed. Here's the new list\")\n print(l2)", "Yes c is in l2\nBut now it's removed. Here's the new list\n['a', 'b', 'd', 'e', 'f']\n" ], [ "a = 5 #defining a variable", "_____no_output_____" ], [ "if (a>10):\n print(\"a is greater than 10\")\nelse:\n print(\"a is less than 10\")", "a is less than 10\n" ], [ "if (a>5):\n print(\"a is greater than 5\")\nelif (a<5):\n print(\"a is less than 5\")\nelse:\n print(\"a is equal to 5\")", "a is equal to 5\n" ], [ "# assigning a value to variable using if statement\nstr5 = \"This is a great class\"\nb = \"yes\" if \"great\" in str5 else \"no\" #if great is in str5, b will get a value of yes, otherwise it will be no\nc = 1 if a>10 else 0 #if the variable a is greater than 10, c will be 1, otherwise 0\nprint(\"b = {}, c = {}\".format(b,c))", "b = yes, c = 0\n" ] ], [ [ "## Loops\nLoops are an essential tool in python that allows you to repeatedly excute a block of code given certain conditions or based on interating over a given list or array. There's two main types of loops in python - `For` and `While`. There's also `Do..While` loop in python by combinging the Do command and While command but I won't discuss that here.", "_____no_output_____" ], [ "### For Loop <a class=\"anchor\" id=\"for\"></a>", "_____no_output_____" ], [ "For loops are useful when you want to iterate a certain number of times or when you want to iterate over a list or array type object \n```python\nfor i in list_name:\n do something\n```", "_____no_output_____" ] ], [ [ "#Looping a certain number of time\nfor i in range(10): #iterating over a list going from 0 to 9\n a = i*5\n print(\"Multiply {} by 5 gives {}\".format(i, a))", "Multiply 0 by 5 gives 0\nMultiply 1 by 5 gives 5\nMultiply 2 by 5 gives 10\nMultiply 3 by 5 gives 15\nMultiply 4 by 5 gives 20\nMultiply 5 by 5 gives 25\nMultiply 6 by 5 gives 30\nMultiply 7 by 5 gives 35\nMultiply 8 by 5 gives 40\nMultiply 9 by 5 gives 45\n" ], [ "#Looping over a list\nfor item in l4:\n str_item = str(item)\n print(\"{} - {}\".format(str_item, type(str_item)))", "1 - <class 'str'>\n2 - <class 'str'>\n3 - <class 'str'>\n5 - <class 'str'>\n6 - <class 'str'>\nb - <class 'str'>\nc - <class 'str'>\nd - <class 'str'>\n" ] ], [ [ "**Loop Control Statements** You can control the execution of a loop using 3 statements - \n- `break` : This breaks out of a loop and moves on to the next segment of your code\n- `continue` : This skips any code below it (inside the loop) and moves on to the next iteration\n- `pass` : It's used when a statement is required syntactically but you don't want any code to execute", "_____no_output_____" ], [ "Demonstrating `break`", "_____no_output_____" ] ], [ [ "#l4 is a list that contains both integers and numbers\nl4", "_____no_output_____" ] ], [ [ "So if you try to add numbers to the string elements, you'll get an error. \nTo avoid it when iterating over this list, you can insert a break statement in your loop so that your code breaks out of the loop when it encounters a string.", "_____no_output_____" ] ], [ [ "for i in l4:\n if type(i)==str:\n print(\"Encountered a string, breaking out of the loop\")\n break\n tmp = i+10\n print(\"Added 10 to list item {} to get {}\".format(i, tmp))", "Added 10 to list item 1 to get 11\nAdded 10 to list item 2 to get 12\nAdded 10 to list item 3 to get 13\nAdded 10 to list item 5 to get 15\nAdded 10 to list item 6 to get 16\nEncountered a string, breaking out of the loop\n" ] ], [ [ "Demonstrating `continue`", "_____no_output_____" ], [ "But now, with the `break` statement, it breaks out of the loop any time it encounters string element. If the next element after a string element is an integer, we're missing out on it. \n \nThat is where the continue statment comes in. If you use `continue` instead of `break` then, instead of breaking out of the loop, you just skip the current iteration and move to the next one. i.e. you move on to the next element and check again whether it's a string or not and so on..", "_____no_output_____" ] ], [ [ "for i in l4:\n if type(i)==str:\n print(\"Encountered a string, moving on to the next element\")\n continue\n tmp = i+10\n print(\"Added 10 to list item {} to get {}\".format(i, tmp))", "Added 10 to list item 1 to get 11\nAdded 10 to list item 2 to get 12\nAdded 10 to list item 3 to get 13\nAdded 10 to list item 5 to get 15\nAdded 10 to list item 6 to get 16\nEncountered a string, moving on to the next element\nEncountered a string, moving on to the next element\nEncountered a string, moving on to the next element\n" ] ], [ [ "Demonstrating `pass`", "_____no_output_____" ], [ "`pass` is more of a placeholder. If you start a loop, you are bound by syntax to write at least one statement inside it. If you don't want to write anything yet, you can use a `pass` statement to avoid getting an error", "_____no_output_____" ] ], [ [ "for i in l4:\n pass", "_____no_output_____" ] ], [ [ "**Popular functions related to loops** There's a lot of usefull functions in python that work well with loops e.g. (range, unpack(*), tuple, split etc.) But there are two very important ones that go hand-in-hand with loops - `zip` & `enumerate` - so these are the ones I'm discussing here.\n\n- `zip` : Used when you want to iterate over two lists of equal length (If the length are not equal, it only iterates up to the length of the shorter list)\n- `enumerate` : Used when you want the index of the list item you're iterating over", "_____no_output_____" ] ], [ [ "print(len(l1), len(l3))", "6 24\n" ], [ "for a, b in zip(l1, l3):\n print(\"list 1 item is {}, corresponding list 3 item is {}\".format(a,b))", "list 1 item is 1, corresponding list 3 item is 2\nlist 1 item is 2, corresponding list 3 item is 4\nlist 1 item is 3, corresponding list 3 item is 6\nlist 1 item is 4, corresponding list 3 item is 8\nlist 1 item is 5, corresponding list 3 item is 10\nlist 1 item is 6, corresponding list 3 item is 12\n" ], [ "for i, (a,b) in enumerate(zip(l1,l3)):\n print(\"At index {}, list 1 item is {}, corresponding list 3 item is {}\".format(i, a, b))", "At index 0, list 1 item is 1, corresponding list 3 item is 2\nAt index 1, list 1 item is 2, corresponding list 3 item is 4\nAt index 2, list 1 item is 3, corresponding list 3 item is 6\nAt index 3, list 1 item is 4, corresponding list 3 item is 8\nAt index 4, list 1 item is 5, corresponding list 3 item is 10\nAt index 5, list 1 item is 6, corresponding list 3 item is 12\n" ] ], [ [ "### While Loop <a class=\"anchor\" id=\"while\"></a>", "_____no_output_____" ], [ "While loops are usefull when you want to iterate a code block **until** a certain condition is satified. While loops often need a counter variable that increments as the loop goes on.\n```python\nwhile (condition):\n do something\n```", "_____no_output_____" ] ], [ [ "counter = 10\nwhile counter>0:\n print(\"The counter is still positive and right now, it's {}\".format(counter))\n counter-= 1 #incrementing the counter, reducing it by 1 in every iteration", "The counter is still positive and right now, it's 10\nThe counter is still positive and right now, it's 9\nThe counter is still positive and right now, it's 8\nThe counter is still positive and right now, it's 7\nThe counter is still positive and right now, it's 6\nThe counter is still positive and right now, it's 5\nThe counter is still positive and right now, it's 4\nThe counter is still positive and right now, it's 3\nThe counter is still positive and right now, it's 2\nThe counter is still positive and right now, it's 1\n" ] ], [ [ "`pass`, `break` and `continue` statements all work well with `while` loop. `zip` and `enumerate` doesn't usually pair with while since it doesn't iterate over list type objects", "_____no_output_____" ], [ "### Function <a class=\"anchor\" id=\"func\"></a>", "_____no_output_____" ], [ "In python, apart from using the built-in functions, you can define your own customized functions using the following syntax -\n\n```python\ndef function_name(arg1, arg2):\n value = do something using arg1 & arg2\n return value\n \n#calling your function\nfunction_name(value1, value2)\n```\n\nThis is useful when you find yourself repeathing a block of code often.", "_____no_output_____" ] ], [ [ "#Defining the function\ndef arithmatic_operations(num1, num2):\n \"\"\"\n A function to perform a series of arithmatic operations on num1 and num2\n Returns the final result as an integer rounded up/down\n \"\"\"\n add = num1 + num2\n mltply = add*num2\n sbtrct = mltply - num2\n divide = sbtrct/num2\n result = round(divide)\n \n return result", "_____no_output_____" ], [ "#Anything put inside a multi-line comment (\"\"\" \"\"\") inside a function, is called a doc-string. \n#You can describe your function inside \"\"\" \"\"\" and then retrieve this information by doing help(function_name)\nhelp(arithmatic_operations)", "Help on function ariethmatic_operations in module __main__:\n\nariethmatic_operations(num1, num2)\n A function to perform a series of ariethmatic operations on num1 and num2\n Returns the final result as an integer rounded up/down\n\n" ], [ "#Calling the function\nresA = arithmatic_operations(10, 5)\nresA", "_____no_output_____" ], [ "arithmatic_operations(10, 15)", "_____no_output_____" ] ], [ [ "**Setting default values** You can use default argument in you parameter list to set default values or optional arguments \nDefault arguments are optional parameters for a function i.e. you can call the function without these parameters \n```python\ndef new_func(arg1, arg2, arg3=5):\n result = arg1 + arg2 + arg3\n return result\n```\nHere, arg3 is the optional argument because you've set it to a default value of 5. If you don't provide arg3 when you call this function, arg3 will assume a value of 5. If you don't provide arg1 or arg2, you'll get an error because they are required/positional arguments", "_____no_output_____" ], [ "Now imagine if someone were to call the `arithmatic_operations` function using string arguments, they'd get an error - because you can't perform arithmatic operations on a string. In that case, we want to be able to convert the input to a number. Let's instroduce a keyword argument `convert` to handle such cases", "_____no_output_____" ] ], [ [ "#Defining the function\ndef new_arith(num1, num2, convert=False):\n \"\"\"\n A new function function that can handle even string arguments \n \"\"\"\n if convert!=False:\n num1 = float(num1)\n num2 = float(num2)\n \n add = num1 + num2\n mltply = add*num2\n sbtrct = mltply - num2\n divide = sbtrct/num2\n result = round(divide)\n \n return result", "_____no_output_____" ], [ "#Handles numbers as usual\n#Function works fine even if we don't specify convert\nnew_arith(10, 5)", "_____no_output_____" ], [ "#Since we didn't specify convert, it's assumed to be False\n#strings are not converted and we get an error\nnew_arith(\"10\", \"5\")", "_____no_output_____" ], [ "new_arith(\"10\", \"5\", convert=True)", "_____no_output_____" ] ], [ [ "### Scope <a class=\"anchor\" id=\"scope\"></a>", "_____no_output_____" ], [ "The variables in a program are not accessible by every part of the program. Based on accessibility, there are two types of variables - global variable and local variable. \n \nGlobal variables are variables that can be accessed by any part of the program. Example from this notebook would be `str1`, `str2`, `truth`, `l1` etc. These variables can be accesed by this entire notebook. \n \nLocal variables are variables that can only be accessed in certain parts of the program, e.g. variables defined inside function. Example from this notebook would be `mltply`, `sbtrct`, `add`, `convert`, `result` etc. these variables are only defined inside the function and can only be accessed by the respective functions", "_____no_output_____" ] ], [ [ "result", "_____no_output_____" ], [ "mltply", "_____no_output_____" ] ], [ [ "## Miscellaneous", "_____no_output_____" ], [ "### Dictionary <a class=\"anchor\" id=\"dict\"></a>", "_____no_output_____" ], [ "Dictionaries are another iterable data type that comes in comma separated, key-value pairs.", "_____no_output_____" ] ], [ [ "#Definging some dictionaries\ndict1 = {} #One way to define an empty dictionary\ndict2 = dict() #One way to define an empty dictionary or convert another data type into a dictionary\nou_mascots = {\"Name\": \"Boomer\", \"Species\": \"Horse\", \"Partner\": \"Sooner\", \"Represents\": \"Oklahoma Sooners\"}\ndict3 = {1:\"uno\", 2:34, \"three\": [1,2,3], 4:(4,5), 5:ou_mascots}", "_____no_output_____" ], [ "ou_mascots", "_____no_output_____" ], [ "dict3 #Dictionary values can be of any type - string, number, lists, even dictionary", "_____no_output_____" ] ], [ [ "###### Accessing elements", "_____no_output_____" ] ], [ [ "ou_mascots[\"Name\"]", "_____no_output_____" ], [ "ou_mascots.get(\"Partner\")", "_____no_output_____" ] ], [ [ "###### Updating Dictionary", "_____no_output_____" ] ], [ [ "#Adding new element\ndict1[\"new_element\"] = 5113\ndict1", "_____no_output_____" ], [ "#Deleting\ndel dict3[1] #removes the entry with key 1\ndict1.clear() #removes all entries\ndel dict2 #deletes entire dictionary", "_____no_output_____" ], [ "dict3", "_____no_output_____" ], [ "dict1", "_____no_output_____" ], [ "dict2", "_____no_output_____" ] ], [ [ "###### Useful Dictionary Functions", "_____no_output_____" ] ], [ [ "ou_mascots.keys() #Returns keys", "_____no_output_____" ], [ "ou_mascots.items() #Returns key-value pairs as tuples", "_____no_output_____" ], [ "ou_mascots.values() #Returns values", "_____no_output_____" ], [ "ou_mascots.pop(\"Species\") #removes given key and returns value", "_____no_output_____" ], [ "len(dict1)", "_____no_output_____" ] ], [ [ "### Tuples <a class=\"anchor\" id=\"tuple\"></a>", "_____no_output_____" ], [ "Tuples are another iterable and sequence data type. Almost everything disccussed in the list section can be applied to tuples and they work in the same way - operations, functions etc.", "_____no_output_____" ] ], [ [ "#Defining some tuples\ntup1 = (20,) #If your tuple has only one element, you still have to use a comma\ntup2 = (1,3,4,6,7)\ntup3 = (\"a\", \"b\", \"c\")\ntup4 = (5,6,7)", "_____no_output_____" ], [ "#The key difference with lists, you can't change tuple items\ntup2[3] = 4", "_____no_output_____" ], [ "#You can use tuples to define deictionaries\ndict(zip(tup3, tup4))", "_____no_output_____" ] ], [ [ "### List Comprehension <a class=\"anchor\" id=\"lc\"></a>", "_____no_output_____" ], [ "List comprehension is a quick way to create a new list from an existing list (or any other iterable like tuples or dictionaries). The syntax is as follows -\n```python\nnew_list = [(x+5) for x in existing_list]\n```\nThe above one line code is the same as writing the following lengthy code block:\n```python\nnew_list=[]\nfor x in existing_list:\n value = x + 5\n new_list.append(value)\n```", "_____no_output_____" ] ], [ [ "print(l3)", "[2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48]\n" ], [ "#We need a new list of numbers that are an even multiple of 5\n#We already have a list of even numbers up to 48 - l3\n#time to create the new list\nl5 = [2*i for i in l3]\nprint(l5)", "[4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96]\n" ] ], [ [ "### Error Handling <a class=\"anchor\" id=\"eh\"></a>", "_____no_output_____" ], [ "Sometimes we might have a code block, especially in a loop or a function that might not work for all kind of values. In that case, error hadnling is something to consider in order to avoid error and continue on with the rest of the program. \nErrors can be handled in many ways depending on your needs but here I'm showing the `try .. except` method.", "_____no_output_____" ] ], [ [ "#inserting another string in l4\nl4.insert(2, \"a\")\nl4", "_____no_output_____" ], [ "#let's try running the arithmatic_operations functions on the elements of l4\nfor item in l4:\n try:\n res = ariethmatic_operations(item,5)\n print(\"list item {}, result {}\".format(item, res))\n except:\n print(\"Could not perform arithmatic operations for list item {}\".format(item))", "list item 1, result 5\nlist item 2, result 6\nCould not perform arithmatic operations for list item a\nlist item 3, result 7\nlist item 5, result 9\nlist item 6, result 10\nCould not perform arithmatic operations for list item b\nCould not perform arithmatic operations for list item c\nCould not perform arithmatic operations for list item d\n" ] ], [ [ "### Lambda Expression <a class=\"anchor\" id=\"le\"></a>", "_____no_output_____" ], [ "A quick way to define short anonymous functions - one liner functions. \nHandy when you keep repeating an expression and it's too small to define a formal function. \n```python\n#Defining\nx = lambda arg : expression\n\n#calling\nx(value)\n```\nThis is equivalent to -\n```python\n#Defining\ndef x(arg):\n result = expression\n return result\n\n#calling\nx(value)\n```", "_____no_output_____" ] ], [ [ "#small function with 1 argument\nx = lambda a : a + 10\nx(5)", "_____no_output_____" ], [ "#small function with multiple arguments\nx = lambda a,b,c : ((a + 10)*b)/c\nx(5,10,2)", "_____no_output_____" ] ], [ [ "### Mapping Function <a class=\"anchor\" id=\"mf\"></a>", "_____no_output_____" ], [ "`map` function is quick way to apply a function to many values using an iterable (lists, tuples etc). The function to apply can be a built in function, user defined function or even a lambda expression. In fact, mapping and lambda expression work really well together. The syntax is as follows : \n```python\nmap(function_name, list_name)\n```\nThe above one line code is eqivalent to the lengthy code block below -\n```python\nfor item in list_name:\n function_name(list_name)\n```", "_____no_output_____" ], [ "**applying the built-in `type` function to the dictionary values**", "_____no_output_____" ] ], [ [ "dict3", "_____no_output_____" ], [ "result = map(type, dict3.values())\nlist(result)", "_____no_output_____" ] ], [ [ "**applying the user-defined `arithmetic_operations` function to two lists**", "_____no_output_____" ] ], [ [ "print(l1, l3)", "[1, 2, 3, 4, 5, 6] [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48]\n" ], [ "result = map(ariethmatic_operations, l1, l3) #mapped up to the shorter of the two lists\nlist(result)", "_____no_output_____" ] ], [ [ "**combining lambda expression and mapping function**", "_____no_output_____" ] ], [ [ "numbers1 = [1, 2, 3]\nnumbers2 = [4, 5, 6]\n \nresult = map(lambda x, y: x + y, numbers1, numbers2)\nlist(result)", "_____no_output_____" ] ], [ [ "### User Input <a class=\"anchor\" id=\"ui\"></a>", "_____no_output_____" ], [ "Sometimes, it is necessary to take user input and you can do that in python using the `input` command. \nThe `input` command returns the user input as a string so, always remember to convert the input to the data type you need. \n```python\ninput(\"Your customized prompt goes here\")\n```", "_____no_output_____" ] ], [ [ "inp = input(\"please input two integers seperated by comma\")", "please input two integers seperated by comma8,12\n" ], [ "inp", "_____no_output_____" ], [ "#let's apply the arithmetic_operation function to this user input\na,b = inp.split(\",\")\na", "_____no_output_____" ], [ "ariethmatic_operations(int(a), int(b)) #Need to convert to integers since this one doesn't handle strings", "_____no_output_____" ], [ "new_arith(a,b, convert=True)", "_____no_output_____" ] ], [ [ "# Advanced Topics", "_____no_output_____" ], [ "### Numpy <a class=\"anchor\" id=\"numpy\"></a>", "_____no_output_____" ], [ "### Pandas <a class=\"anchor\" id=\"pandas\"></a>", "_____no_output_____" ], [ "### Plotting <a class=\"anchor\" id=\"plot\"></a>", "_____no_output_____" ], [ "### Debugging <a class=\"anchor\" id=\"pdb\"></a>", "_____no_output_____" ], [ "### Other Useful Libraries <a class=\"anchor\" id=\"oul\"></a>", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d06cd0b12144d986a65582a59d9fdc1bc85c76f9
241,177
ipynb
Jupyter Notebook
zz_test/100-slots.ipynb
sethbam9/tutorials
c259636682304cb516e9048ca8df5a3ab92c62cc
[ "MIT" ]
2
2019-07-17T18:51:26.000Z
2019-07-24T19:45:23.000Z
zz_test/100-slots.ipynb
sethbam9/tutorials
c259636682304cb516e9048ca8df5a3ab92c62cc
[ "MIT" ]
3
2019-01-16T10:56:50.000Z
2020-11-16T16:30:48.000Z
zz_test/100-slots.ipynb
sethbam9/tutorials
c259636682304cb516e9048ca8df5a3ab92c62cc
[ "MIT" ]
2
2020-12-17T15:41:33.000Z
2021-11-03T18:23:07.000Z
33.839905
684
0.454409
[ [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "from fixture import TestApp", "_____no_output_____" ], [ "As = TestApp(globals())", "_____no_output_____" ], [ "def basic():\n for n in As.select(\n slot=True, start=False, center=True, end=False, size=1, offset=1\n ):\n As.perform(\"plain\", n)\n As.perform(\"plain\", n, withPassage=False, withNodes=True)\n As.perform(\"pretty\", n)\n As.perform(\"pretty\", n, withPassage=False, withNodes=True)", "_____no_output_____" ] ], [ [ "## Run all corpora", "_____no_output_____" ] ], [ [ "As.testSet()", "_____no_output_____" ], [ "As.test(basic)", "_____no_output_____" ] ], [ [ "## Run specific corpora", "_____no_output_____" ] ], [ [ "As.testSet(\"uruk\")", "_____no_output_____" ], [ "As.test(basic, refresh=True)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d06cd4e3cb484b7a346000fdf987c6911105b0d3
33,847
ipynb
Jupyter Notebook
webgrabber_wikilisten.ipynb
TechLabs-Dortmund/nutritional-value-determination
45f042621d675580524571137af970bb9b9374f3
[ "MIT" ]
null
null
null
webgrabber_wikilisten.ipynb
TechLabs-Dortmund/nutritional-value-determination
45f042621d675580524571137af970bb9b9374f3
[ "MIT" ]
null
null
null
webgrabber_wikilisten.ipynb
TechLabs-Dortmund/nutritional-value-determination
45f042621d675580524571137af970bb9b9374f3
[ "MIT" ]
null
null
null
136.479839
9,188
0.675038
[ [ [ "# webgrabber für Listen von Wikipedia\n", "_____no_output_____" ] ], [ [ "# Gebäckliste\nimport requests\nfrom bs4 import BeautifulSoup\n\n# man muss der liste einen letzten eintrag geben, weil sonst weitere listen unter der eigentlichen ausgelesen werden.\ndef grab_list(url, last_item): # wenn wikipedia eine Tabelle anzeigt\n grabbed_list = []\n r = requests.get(url)\n text = r.text\n soup = BeautifulSoup(text, 'lxml')\n soup.prettify()\n matches = soup.find_all('tr')\n \n for index, row in enumerate(matches):\n try:\n obj = row.find('td').a.get('title')\n if obj.endswith(' (page does not exist)'): obj = obj.replace(' (page does not exist)', '') \n grabbed_list.append(obj)\n if obj == last_item:\n break\n except AttributeError:\n continue\n return grabbed_list\n\ndef grab_list2(url, last_item): # wenn wikipedia eine bullet-point liste anzeigt\n grabbed_list = []\n r = requests.get(url)\n text = r.text\n soup = BeautifulSoup(text, 'lxml')\n soup.prettify()\n matches = soup.find_all('li')\n \n for index, row in enumerate(matches):\n try:\n obj = row.a.get('title')\n if obj.endswith(' (page does not exist)'): obj = obj.replace(' (page does not exist)', '') \n grabbed_list.append(obj)\n if obj == last_item:\n break\n except AttributeError:\n continue\n return grabbed_list", "_____no_output_____" ], [ "url_gebaeck = r'https://en.wikipedia.org/wiki/List_of_pastries'\ngebaeckliste = grab_list(url_gebaeck, 'Zlebia')\nprint(gebaeckliste)", "['Alexandertorte', 'Alfajor', 'Aloo pie', 'Apple pie', 'Apple strudel', 'Bahulu', 'Bakewell pudding', 'Baklava', 'Bakpia Pathok', 'Banbury cake', 'Banitsa', 'Banket (food)', 'Bear claw', 'BeaverTails', 'Bedfordshire clanger', 'Belekoy', 'Belokranjska povitica', 'Berliner (doughnut)', 'Bethmännchen', 'Bichon au citron', 'Bierock', 'Birnbrot', 'Bizcocho', 'Börek', 'Bossche bol', 'Bougatsa', 'Boyoz', 'Bridie', 'Briouat', 'Bruttiboni', 'Buko pie', 'Bundevara', 'Canelé', 'Cannoli', 'Carac (pastry)', 'Charlotte (cake)', 'ChaSan', 'Chatti Pathiri', 'Cherry pie', 'Chorley cake', 'Chouquette', 'Choux pastry', 'Cinnamon Roll', 'Coca (pastry)', 'Conejito', 'Cornish pasty', 'Conversation (pastry)', 'Cornulețe', 'Coussin de Lyon', 'Cream horn', 'Crêpes Suzette', 'Crocetta of Caltanissetta', 'Croissant', 'Croline', 'Cronut', 'Croquembouche', 'Cuban pastelito (page does not exist)', 'Curry puff', 'Dabby-Doughs', 'Danish pastry', 'Djevrek', 'Dutch letter', 'Dutch Baby Pancake (page does not exist)', 'Eccles cake', 'Éclair (pastry)', 'Empanada', 'Ensaïmada', 'Fa gao', 'Fazuelos', 'Fig roll', 'Flaky pastry', 'Flaugnarde', 'Flaons', 'Flies graveyard', 'Franzbrötchen', 'Galette', 'Gâteau Basque', 'Shorgoghal', 'Gibanica', 'Gujia', 'Gözleme', 'Gulab jamun', 'Gundain', 'Gustavus Adolphus pastry', 'Gyeongju bread', 'Haddekuche', 'Hamantash', 'Hellimli', 'Heong Peng', 'Hot water crust pastry', 'Huff paste', 'Inipit', 'Jachnun', 'Jalebi', 'Jambon', 'Jesuite', 'Joulutorttu (pastry)', 'Kalács', 'Kanafeh', 'Karakudamono', 'Kifli', 'Klobasnek', 'Knieküchle', 'Knish', 'Kolache', 'Kolompeh', 'Kołacz', 'Komaj sehen', 'Kouign-amann', 'Krempita', 'Kringle', 'Kroštule', 'Kūčiukai', 'Kürtőskalács', \"Ladies' navels\", 'Lattice (pastry)', 'Leipziger Lerche', 'Linzer torte', 'Lotus seed bun', \"Ma'amoul\", 'Macaron', 'Makmur', 'Makroudh', 'Malsouka', 'Mandelkubb', 'Mantecadas', 'Marillenknödel', 'Marry girl cake', 'Masan (pastry)', 'Miguelitos', 'Milhoja', 'Milk-cream strudel', 'Mille-feuille', 'Mooncake', 'Moorkop', 'Muskazine', 'Nazook', \"Nun's puffs\", 'Nunt', 'Öçpoçmaq', 'Ox-tongue pastry', 'Pain au chocolat', 'Pain aux raisins', 'Palmier', 'Pannekoek', 'Pan dulce (sweet bread)', 'Panzarotti', 'Papanași', 'Paper wrapped cake', 'Paris–Brest', 'Paste (pasty)', 'Pastel (food)', 'Pastizz', 'Pastry heart', 'Pâté Chaud', 'Pecan pie', 'Filo', 'Pie', 'Pineapple bun', 'Pineapple cake', 'Pionono', 'Pithivier', 'Pizza', 'Plăcintă', 'Poffertjes', 'Pogača', 'Poppy seed roll', 'Pot pie', 'Prekmurska gibanica', 'Pretzel', 'Profiterole', 'Puff pastry', \"Puits d'amour\", 'Punsch-roll', 'Punschkrapfen', 'Qottab', 'Quesito', 'Rab cake', 'Remonce', 'Rhubarb tart', 'Roti john', 'Roti tissue', 'Roze koek', 'Rugelach', \"Runeberg's torte\", 'Rustico (pastry)', 'Sad cake', 'Samosa', 'Schaumrolle', 'Schnecken', 'Schneeballe', 'Schuxen', 'Semla', 'Sfenj', 'Sfințișori', 'Sfogliatelle', 'Shortcrust pastry', 'Sou (pastry)', 'Spanakopita', 'Streusel', 'Strudel', 'Stutenkerl', 'Sufganiyah', 'Suncake (Taiwan)', 'Sweetheart cake', 'Taiyaki', 'Toaster pastry', 'Torpil', 'Tortell', 'Tortita negra', 'Trdelník', 'Tu (cake)', 'Turnover (food)', 'Utap', 'Vatrushka', 'Vetkoek', 'Viennoiserie', 'Vol-au-vent', 'Welsh cake', 'Xuixo', 'Yurla (dish)', 'Zeeuwse bolus', 'Zlebia']\n" ], [ "# deutsche desserts\nurl_deutschedesserts = r'https://en.wikipedia.org/wiki/List_of_German_desserts'\ngermanpastrylist = grab_list(url_deutschedesserts, 'Zwetschgenkuchen')\nprint(germanpastrylist)", "['Aachener Printen', 'Bavarian cream', 'Berliner (doughnut)', 'Bethmännchen', 'Baumkuchen', 'Bratapfel', 'Bienenstich', 'Black Forest cake', 'Bremer Klaben', 'Brenntar', 'Buchteln', 'Buckwheat gateau', 'Carrot cake', 'Cheesecake', 'Dampfnudel', 'Dominostein', 'Donauwelle', 'Fasnacht (doughnut)', 'Frankfurter Kranz', 'Franzbrötchen', 'Gugelhupf', 'Germknödel', 'Garrapinyades', 'Götterspeise', 'Herrencreme', 'Kuchen', 'Lebkuchen', 'de:Linzer Auge', 'Makówki', 'Muskazine', 'Marzipan', 'Magenbrot', 'Nussecke (page does not exist)', 'Pfeffernüsse', 'Prinzregententorte', 'Rote Grütze', 'Rumtopf', 'Schneeball (pastry)', 'Schokokuss', 'Spaghettieis', 'Spekulatius', 'Springerle', 'Spritzgebäck', 'Spritzkuchen', 'Stollen', 'Streusel', 'Streuselkuchen', 'Tollatsch', 'Vanillekipferl', 'Welfenspeise', 'Wibele', 'Windbeutel', 'Zwetschgenkuchen']\n" ], [ "# Milchprodukte\nurl_dairy = r'https://en.wikipedia.org/wiki/List_of_dairy_products'\ndairyproductlist = grab_list(url_dairy, 'Yogurt') \nprint(dairyproductlist)", "['Aarts (food)', 'Acidophiline', 'Amasi', 'Ayran', 'Basundi', 'Bhuna khoya', 'Blaand', 'Black Kashk', 'Booza', 'Borhani', 'Buffalo curd', 'Bulgarian yogurt', 'Butter', 'Butterfat', 'Buttermilk', 'Buttermilk koldskål', 'Buttermilk', 'Cacık', 'Camel milk', 'Casein', 'Caudle', 'Chaas', 'Chal', 'Chalap', 'Chass', 'Cheese', 'Chocolate butter', 'Clabber (food)', 'Clotted cream', 'Condensed milk', 'Cottage cheese', 'Cream', 'Cream cheese', 'Crème anglaise', 'Crème fraîche', 'Cuajada', 'Curd', 'Curd snack', 'Custard', 'Dadiah', 'Daigo (dairy product)', 'Dondurma', \"Donkey's milk\", 'Dulce de Leche', 'Doogh', 'Evaporated milk', 'Eggnog', 'Filled milk', 'Filmjölk', 'Fromage frais', 'Fermented milk products', 'Frozen custard', 'Frozen yogurt', 'Gelato', 'Ghee', \"Goat's milk\", 'Gombe (dish)', 'Gomme (food)', 'Horse', 'Ice cream', 'Ice milk', 'Infant formula', 'Junket (dessert)', 'Junnu', 'Kalvdans', 'Kashk', 'Kaymak', 'Kefir', 'Khoa', 'Kulfi', 'Kumis', 'Lassi', 'Leben (milk product)', 'Malai', 'Malaiyo', 'Matzoon (yogurt)', 'Milk', 'Milk skin', 'Míša', 'Mitha Dahi', 'Mozzarella', 'Moose milk', 'Mursik', 'Paneer', 'Podmleč', 'Pomazánkové máslo', 'Powdered milk', 'Processed cheese', 'Pytia', 'Qatiq', 'Qimiq', 'Quark (dairy product)', 'Reindeer husbandry', 'Ryazhenka', 'Ricotta', 'Sarasson', 'Semifreddo', 'Sergem', 'Sheep milk', 'Shrikhand', 'Skorup', 'Skyr', 'Smen', 'Smetana (dairy product)', 'Snow cream', 'So (dairy product)', 'Soft serve', 'Sour cream', 'Soured milk', 'Spaghettieis', 'Strained yogurt', 'Súrmjólk', 'Sütlaç', 'Tarhana', 'Tuttis', 'Uunijuusto', 'Vaccenic acid', 'Varenets', 'Viili', 'Vla', 'Whey', 'Whey protein', 'Whipped cream', 'Yak butter', 'Pack yak', 'Yakult', 'Yayık ayranı', 'Ymer (dairy product)', 'Yogurt']\n" ], [ "# Cheeses\nurl_cheese = r'https://en.wikipedia.org/wiki/List_of_cheeses'\ncheeselist = grab_list(url_cheese, 'Rice cheese')\nprint(cheeselist)", "['Wagasi', 'Ethiopian cuisine', 'Caravane cheese', 'Chechil', 'Chhanabora', 'Byaslag', 'Chura kampo', 'Chura loenpa', 'Nguri', 'Rubing', 'Rushan cheese', 'Bandel cheese', 'Paneer', 'Chhena', 'Dahi Chhena', 'Kalari cheese', 'Kalimpong cheese', 'Dangke', 'Sakura cheese', 'Imsil', 'Byaslag', 'Flower of Rajya', 'Chhurpi', 'Kesong puti', 'Sirene', 'Kashkaval', 'Quark (dairy product)', 'Bergkäse', 'Lüneberg cheese', 'Sura Kees', 'Mondseer', 'Tyrolean grey cheese', 'Brussels cheese', 'Chimay Brewery', 'Herve cheese', 'Le Wavreumont', 'Limburger cheese', 'Maredsous cheese', 'Passendale cheese', 'Remoudou', 'Livno cheese', 'Herzegovina \"squeaking\" cheese', 'Trappista cheese', 'Vlašić cheese', 'Bosnian Smoked Cheese (Suhi Sir)', 'Cherni Vit (cheese)', 'Kashkaval', 'Sirene', 'Paški sir', 'Škripavac', 'Tounjski sir', 'Prgica', 'Dimsi', 'Akkawi', 'Anari cheese', 'Halloumi', 'Kefalotyri', 'Abertam cheese', 'Blaťácké zlato', 'Olomoucké syrečky', 'Hermelín', 'Danbo', 'Danish Blue', 'Esrom', 'Fynbo', 'Havarti', 'Maribo cheese', 'Molbo cheese', 'Saga (cheese)', 'Samsø cheese', 'Tybo', 'Vesterhavsost', 'Atleet', 'Eesti Juust', 'Kadaka juust', 'Aura cheese', 'Lappi cheese', 'Leipäjuusto', 'Oltermanni', 'Raejuusto', 'Sulguni', 'Anthotyros', 'Chloro (cheese)', 'Feta', 'Graviera', 'Kasseri', 'Kefalograviera', 'Kefalotyri', 'Kopanisti', 'Manouri', 'Metsovone', 'Myzithra', 'Tyrozouli', 'Xynomizithra', 'Xynotyro', 'Protected designation of origin', 'Liptauer', 'Orda (cheese)', 'Pálpusztai', 'Trappista cheese', 'Oázis', 'Balaton cheese', 'Karaván', 'Pannónia', 'Höfðingi', 'Šar cheese', 'Fried Camembert cheese', 'Jāņi cheese', 'Latvian cheese', 'Ġbejna', 'Cașcaval', 'Urdă', 'Brânză', 'Brânză de vaci (cow cheese)', 'Kolašinski sir', 'Pljevaljski sir', 'Podgorički sir', 'Nikšićki kozji sir', 'Njeguški sir', 'Kashkaval', 'Urdă', 'Belo Sirenje', 'Brunost', 'Gamalost', 'Geitost', 'Heidal cheese', 'Jarlsberg cheese', 'Nøkkelost', 'Norvegia', 'Pultost', 'Snøfrisk', 'Castelo Branco cheese', 'Queijo de Nisa', 'Queijo do Pico', 'Queijo de Azeitão', 'São Jorge cheese', 'Serra da Estrela cheese', 'Requeijão', 'Saloio', 'Santarém cheese', 'Brânzǎ de burduf', 'ro:Brânză de Suhaia', 'Brânză de vaci', 'Caș', 'Cașcaval', 'Năsal cheese', 'Telemea', 'Urdă', 'Bryndza', 'Circassian cheese', 'Korall', 'Quark (cheese)', 'Caciocavallo', 'Pule cheese', 'Bryndza', 'Liptauer', 'Ovčia hrudka', 'Kravská hrudka', 'Korbáčiky', 'Oštiepok', 'Parenica', 'Urda cheese', 'Quark (cheese)', 'Brie', 'Camembert', 'Mohant', 'Tolminc cheese', 'Ädelost', 'Blå Gotland', 'Grevé', 'Gräddost', 'Herrgårdsost', 'Hushållsost', 'Moose cheese', 'Prästost', 'Svecia', 'Västerbottensost', 'Bilozhar', 'Bukovinskyi', 'Bryndza', 'Dobrodar', 'Smetankowyi', 'Quark (cheese)', 'Ukraїnskyi', 'Vurda', 'Banbury cheese', 'Cheddar cheese', 'Stilton cheese', 'Stinking Bishop cheese', 'Areesh cheese', 'Baramily', 'Domiati', 'Halumi', 'Istanboly', 'Mish', 'Rumi cheese', 'Lighvan cheese', 'Tzfat cheese', 'Tzfat cheese', 'Labneh', 'Kashkaval', 'Qishta', 'Halloumi', 'Akkawi', 'Areesh cheese', 'Baladi cheese', 'Basket cheese', 'Jameed', 'Jibneh Arabieh', 'Kashkaval', 'Qishta', 'Labneh', 'Syrian cheese', 'Nabulsi cheese', 'Surke', 'Syrian cheese', 'Antep peyniri', 'Armola peyniri', 'Beyaz peynir', 'Chechil', 'Çökelek', 'Çömlek cheese', 'String cheese', 'Ezine peyniri', 'Füme çerkes peyniri', 'Halloumi', 'Kars gravyer cheese', 'Kaşar', 'Kopanisti peyniri', 'Curd', 'Mihaliç Peyniri', 'Strained yogurt', 'Telli peynir', 'Tulum (cheese)', 'Van otlu peyniri', 'Bleu Bénédictin', 'Cheddar cheese', 'Cheese curds', 'Oka cheese', 'Pikauba (cheese)', 'Turrialba cheese', 'Cuajada', 'Crema (cheese)', 'Crema (cheese)', 'Cuajada', 'Quesillo', 'Queijo seco', 'Adobera cheese', 'Añejo cheese', 'Asadero cheese', 'Chiapas cheese', 'Cotija cheese', 'Criollo cheese', 'Lingallin (cheese)', 'Oaxaca cheese', 'Crema Mexicana', 'Chihuahua cheese', 'Queso de cuajo', 'Queso Fresco', 'Queso Panela', 'Quesillo', 'Bergenost', 'Brick cheese', 'Cheese curds', 'Colby cheese', 'Colby-Jack cheese', 'Colorado Blackie', 'Cream cheese', 'Creole cream cheese', 'Cup Cheese', 'Farmer cheese', 'Hoop cheese', 'Humboldt Fog', 'Liederkranz cheese', 'Monterey Jack', 'Muenster cheese', 'Nacho cheese', 'Pepper jack cheese', 'Pinconning cheese', 'Provel cheese', 'Red Hawk cheese', 'String cheese', 'Teleme cheese', 'Cremoso cheese', 'Criollo cheese (Argentina)', 'Goya cheese', 'Reggianito', 'Sardo cheese', 'Chubut cheese', 'Tandil cheese', 'Mar del Plata cheese', 'Chaqueño', 'Menonita (cheese)', 'Catupiry', 'Minas cheese', 'Queijo coalho', 'Colony cheese', 'Queijo Meia Cura', 'Canastra cheese', 'Queijo Cobocó', 'Queijo-do-Reino', 'Queijo do Serro', 'Queijo Manteiga', 'Queijo prato', 'Requeijão', 'Chanco cheese', 'Panquehue (cheese)', 'Renaico (cheese)', 'Queso Campesino', 'Queso costeño', 'Cuajada', 'Queso Paipa', 'Queso Pera', 'Quesillo', 'Guayanés cheese', 'Queso crineja', 'Queso de mano', 'Queso Llanero', 'Queso Palmita', 'Queso Parma de Barinitas', 'Queso telita', 'Cottage cheese', 'Farmer cheese', 'Port wine cheese', 'Smoked cheese', 'Soy cheese', 'Rice cheese']\n" ], [ "url_fruit = r'https://en.wikipedia.org/wiki/List_of_culinary_fruits'\nfruits = grab_list(url_fruit, 'Yantok')\nprint(fruits)", "['Malus pumila', 'Pseudocydonia sinensis', 'Aronia melanocarpa', 'Planchonia careya', 'Crataegus aestivalis', 'Crataegus rhipidophylla', 'Genipa americana', 'Eriobotrya japonica', 'Flacourtia inermis', 'Mespilus germanica', 'Malus niedzwetzkyana', 'Pyrus communis', 'Cydonia oblonga', 'Flacourtia indica', 'Sorbus aucuparia', 'Manilkara zapota', 'Amelanchier alnifolia', 'Pyracantha coccinea', 'Shipova', 'Sorbus domestica', 'Malus angustifolia', 'Heteromeles arbutifolia', 'Euterpe oleracea', 'Malpighia emarginata', 'Irvingia gabonensis', 'Garcinia livingstonei', 'Elaeis guineensis', 'Cornus × unalaschkensis', 'Pourouma cecropiifolia', 'Spondias dulcis', 'Elaeis oleifera', 'Prunus americana', 'Prunus armeniaca', 'Mangifera pajang', 'Prunus maritima', 'Antidesma bunius', 'Mangifera caesia', 'Prunus serotina', 'Euclea crispa', 'Parajubaea torallyi', 'Syzygium australe', 'Pleiogynium timoriense', 'Dacryodes edulis', 'Calamus erectus', 'Calligonum junceum', 'Cornus canadensis', 'Casimiroa edulis', 'Eugenia reinwardtiana', 'Byrsonima crassifolia', 'Prunus avium', 'Elaeagnus multiflora', 'Eugenia involucrata', 'Ziziphus mauritiana', 'Prunus virginiana', 'Cassytha melantha', 'Chrysobalanus icaco', 'Cocos nucifera', 'Cornus mas', 'Terminalia catappa', 'Prunus rivularis', 'Empetrum nigrum', 'Murraya koenigii', 'Prunus domestica subsp. insititia', 'Phoenix dactylifera', 'Santalum acuminatum', 'Phyllanthus emblica', 'Owenia acidula', 'Litsea garciae', 'Syzygium fibrosum', 'Prunus umbellata', 'Gomortega keule', 'Greengage', 'Buchanania obovata', 'Myrciaria floribunda', 'Terminalia ferdinandiana', 'Celtis occidentalis', 'Nephelium xerospermoides', 'Syzygium cumini', 'Elaeagnus umbellata', 'Butia capitata', 'Spondias purpurea', 'Ziziphus jujuba', 'Prunus salicina spp.', 'King coconut', 'Nephelium hypoleucum', 'Acronychia acidula', 'Buchanania arborescens', 'Dimocarpus longan', 'Litchi chinensis', 'Syzygium malaccense', 'Pouteria sapota', 'Mangifera indica', 'Bouea macrophylla', 'Sclerocarya birrea', 'Synsepalum dulcificum', 'Mauritia flexuosa', 'Kunzea pomifera', 'Viburnum lentago', 'Peach', 'Azadirachta indica', 'Choerospondias axillaris', 'Myristica fragrans', 'Phyllanthus acidus', 'Prunus persica', 'Bunchosia glandulifera', 'Caryocar brasiliense', 'Grewia asiatica', 'Coccoloba diversifolia', 'Canarium ovatum', 'Eugenia uniflora', 'Prunus domestica', 'Nephelium mutabile', 'Nephelium lappaceum', 'Syzygium suborbiculare', 'Syzygium luehmannii', 'Sageretia theezans', 'Sansapote', 'Savannah cherry', 'Serenoa repens', 'Lodoicea maldivica', 'Coccoloba uvifera', 'Ardisia elliptica', 'Shepherdia argentea', 'Prunus spinosa', 'Mimusops elengi', 'Melicoccus bijugatus', 'Dialium indum', 'Dialium cochinchinense', 'Dialium guineense', 'Syzygium aqueum', 'Syzygium samarangense', 'Acronychia oblongifolia', 'Terminalia carpentariae', 'Manilkara kauki', 'Myrica rubra', 'Spondias mombin', 'Ximenia americana', 'Zwetschge', 'Pouteria caimito', 'Sambucus canadensis', 'Diospyros virginiana', 'Sambucus pubens', 'Billardiera scandens', 'Coffea arabica', 'Eugenia stipitata', 'Vasconcellea × heilbornii', 'Musa acuminata', 'Berberis vulgaris', 'Arctostaphylos uva-ursi', 'Carissa carandas', 'Vaccinium myrtillus', 'Myrtillocactus geometrizans', 'Ribes nigrum', 'Diospyros nigra', 'Vaccinium corymbosum', 'Eupomatia laurina', 'Eugenia brasiliensis', 'Psidium guineense', 'Stelechocarpus burahol', 'Chrysophyllum cainito', 'Muntingia calabura', 'Myrciaria dubia', 'Pouteria campechiana', 'Physalis peruviana', 'Pachycereus pringlei', 'Psidium cattleyanum', 'Dovyalis hebecarpa', 'Ugni molinae', 'Carissa spinarum', 'Psidium friedrichsthalianum', 'Vaccinium macrocarpon', 'Berberis darwinii', 'Diospyros lotus', 'Davidsonia jerseyana', 'Hylocereus undatus', 'Sambucus nigra', 'Feijoa sellowiana', 'Vitis labrusca', 'Passiflora quadrangularis', 'Glenniea philippinensis', 'Actinidia chinensis', 'Ribes uva-crispa', 'Vitis vinifera', 'Psidium guajava', 'Actinidia arguta', 'Lonicera caerulea', 'Lonicera periclymenum', 'Vaccinium ovatum', 'Plinia cauliflora', 'Dovyalis caffra', 'Actinidia deliciosa', 'Lansium parasiticum', 'Vaccinium vitis-idaea', 'Pouteria lucuma', 'Syzygium jambos', 'Mammea americana', 'Hancornia speciosa', 'Aristotelia chilensis', 'Podophyllum peltatum', 'Passiflora incarnata', 'Austromyrtus dulcis', 'Vaccinium floribundum', 'Vitis rotundifolia', 'Solanum quitoense', 'Acrotriche depressa', 'Davidsonia pruriens', 'Mahonia aquifolium', 'Carica papaya', 'Passiflora alata', 'Passiflora platyloba', 'Passiflora edulis', 'Pentadiplandra brazzeana', 'Solanum muricatum', 'Diospyros kaki', 'Cereus repandus', 'Eugenia luschnathiana', 'Punica granatum', 'Opuntia ficus-indica', 'Billardiera longiflora', 'Psidium rufum', 'Ribes rubrum', 'Vaccinium parvifolium', 'Flacourtia rukam', 'Carnegiea gigantea', 'Gaultheria shallon', 'Hippophae rhamnoides', 'Small-leaved fuchsia', 'Archirhodomyrtus beckleri', 'Davidsonia johnsonii', 'Quararibea cordata', 'Averrhoa carambola', 'Arbutus unedo', 'Billardiera cymosa', 'Passiflora ligularis', 'Solanum betaceum', 'Diospyros texana', 'Diospyros blancoi', 'Clausena lansium', 'Capparis mitchellii', 'Lycium barbarum', 'Passiflora edulis f flavicarpa', 'Aegle marmelos', 'Bailan melon', 'Banana melon', 'Canary melon', 'Cucumis prophetarum', 'Sicana odorifera', 'Crane melon', 'Crenshaw melon', 'Borassus flabellifer', 'Cantaloupe', 'Gaya melon', 'Honeydew melon', 'Cucumis metuliferus', 'Hydnora abyssinica', 'Crescentia cujete', 'Kajari melon', 'Kolkhoznitsa melon', 'Cucumis melo var. makuwa', 'Mirza melon', 'Cucumis melo', 'Strychnos spinosa', 'Cantaloupe', 'Santa Claus melon', 'Sprite melon', 'Tigger melon', 'Citrullus lanatus', 'Limonia acidissima', 'Citropsis articulata', 'Citrus × natsudaidai', 'Citrus medica ssp. bajoura', 'Citrus bergamia', 'Citrus × aurantium', 'Blood lime', 'Blood orange', 'Citrus medica var. sarcodactylus', '× Citrofortunella microcarpa', 'Cam sành', 'Kumquat', 'Citrus medica', 'Citrus × clementina', 'Citrus glauca', 'Etrog', 'Citrus australasica', 'Citrus × limonimedica', 'Citrus × paradisi', 'Haruka (citrus)', 'Hyuganatsu', 'Citrus cavaleriei', 'Citrus × iyo', 'Kumquat', 'Citrus × sphaerocarpa', 'Citrus hystrix', 'Kanpei', 'Kawachi Bankan', 'Citrus ×aurantiifolia', 'Kinkoji unshiu', 'Kinnow', 'Kiyomi', 'Kobayashi mikan', 'Koji orange', 'Kuchinotsu No.37', 'Citrus japonica', 'Citrus limon', 'Citrus × latifolia', 'Triphasia trifolia', 'Limequat', 'Citrus reticulata', 'Citrus mangshanensis', 'Melogold', 'Citrus × meyeri', 'Citrus myrtifolia', 'Ōgonkan', 'Orange (fruit)', 'Oroblanco', 'Kumquat', 'Citrus maxima', 'Pompia', 'Ponderosa lemon', 'Citrus × limonia', 'Citrus australis', 'Citrus unshiu', 'Shangjuan', 'Shonan gold', 'Citrus sudachi', 'Citrus limetta', 'Citrus × depressa', 'Citrus × tangelo', 'Citrus tangerina', 'Citrus reticulata x sinensis', 'Ugli fruit', 'Volkamer lemon', 'Citrus junos', 'Annona senegalensis', 'Rubus strigosus', 'Annona conica', 'Atemoya', 'Rubus probus', 'Rollinia deliciosa', 'Morus nigra', 'Rubus allegheniensis', 'Boysenberry', 'Annona scleroderma', 'Annona cherimola', 'Rubus chamaemorus', 'Rubus hayata-koidzumii', 'Maclura tricuspidata', 'Annona reticulata', 'Rubus flagellaris', 'Dillenia indica', 'Grewia retusifolia', 'Annona diversifolia', 'Rubus parvifolius', 'Rubus × loganobaccus', 'Annona crassiflora', 'Potentilla indica', 'Rubus moluccanus', 'Rubus adenotrichos', 'Rubus glaucus', 'Annona montana', 'Annona glabra', 'Morus rubra', 'Rosa rugosa', 'Rubus rosifolius', 'Rubus spectabilis', 'Annona purpurea', 'Annona muricata', 'Fragaria × ananassa', 'Annona squamosa', 'Tayberry', 'Rubus parviflorus', 'Rubus leucodermis', 'Morus alba', 'Fragaria vesca', 'Rubus phoenicolasius', 'Youngberry', 'Artocarpus altilis', 'Artocarpus camansi', 'Artocarpus integer', 'Ficus racemosa', 'Ficus platypoda', 'Duguetia confinis', 'Duguetia spixiana', 'Ficus carrii', 'Ficus carica', 'Pandanus tectorius', 'Artocarpus heterophyllus', 'Artocarpus parvus', 'Artocarpus lacucha', 'Artocarpus rigidus', 'Monstera deliciosa', 'Morinda citrifolia', 'Ananas comosus', 'Pandanus conoideus', 'Ficus coronata', 'Ficus aurea', 'Ficus sycomorus', 'Artocarpus odoratissimus', 'Ficus virens', 'Artocarpus hirsutus', 'Garcinia humilis', 'Blighia sapida', 'Aglaia teysmanniana', 'Garcinia atroviridis', 'Eleiodoxa conferta', 'Platonia insignis', 'Bemange', 'Pouteria australis', 'Melastoma affine', 'Boquila trifoliolata', 'Baccaurea ramiflora', 'Garcinia prainiana', 'Theobroma cacao', 'Garcinia madruno', 'Gaultheria hispida', 'Hymenaea courbaril', 'Theobroma grandiflorum', 'Durio zibethinus', 'Gaultheria procumbens', 'Momordica cochinchinensis', 'Garcinia morella', 'Garcinia gummi-gutta', 'Garcinia forbesii', 'Garcinia magnifolia', 'Garcinia pseudoguttifera', 'Pangium edule', 'Garcinia indica', 'Cola nitida', 'Garcinia parvifolia', 'Lardizabala biternata', 'Siraitia grosvenorii', 'Garcinia mangostana', 'Baccaurea racemosa', 'Garcinia dulcis', 'Asimina triloba', 'Red salak', 'Salacca zalacca', 'Sandoricum koetjape', 'Diploglottis campbellii', 'Vangueria madagascariensis', 'Trichosanthes beccariana', 'Vanilla planifolia', 'Yantok']\n" ], [ "url_vegetables = r'https://en.wikipedia.org/wiki/List_of_vegetables'\nvegetables = grab_list(url_vegetables, 'Wakame')\nprint(vegetables)", "['Amaranth', 'Xanthosoma sagittifolium', 'Centella asiatica', 'Arugula', 'Rubus pectinellus', 'Beet', 'Christella dentata', 'Chinese cabbage', 'Borage', 'Broccoli', 'Brooklime', 'Brussels sprout', 'Cabbage', 'Caraway', 'Hypochaeris radicata', 'Celery', 'Celtuce', 'Chaya (plant)', 'Chili pepper', 'Stellaria', 'Chicory', 'Chinese mallow', 'Collard greens', 'Common purslane', 'Corn salad', 'Garden cress', 'Cucumis prophetarum', 'Garland Chrysanthemum', 'Aegopodium podagraria', 'Dandelion', 'Dill', 'Endive', 'Chenopodium album', 'Fiddlehead', 'Telfairia occidentalis', 'Gnetum gnemon', 'Golden samphire', 'Good King Henry', 'Grape', 'Plantago major', 'Corchorus olitorius', 'Kai-lan', 'Kale', 'Kalette', 'Pringlea', 'Komatsuna', 'Adansonia', 'Talinum fruticosum', 'Corn salad', \"Lamb's quarters\", 'Land cress', 'Leaf celery', 'Lettuce', 'Houttuynia cordata', 'Basella alba', 'Malvaceae', 'Moringa oleifera', \"Miner's lettuce\", 'Mizuna greens', 'Sinapis alba', 'Napa cabbage', 'Tetragonia', 'Atriplex', 'Chinese cabbage', 'Papaya', 'Paracress', 'Pea', 'Cycas riuminiana', 'Phytolacca americana', 'Bauhinia purpurea', 'Radicchio', 'Rapini', 'Amaranthus dubius', 'Rock samphire', 'Osmunda regalis', 'Sculpit', 'Sea beet', 'Sea kale', 'Capsella bursa-pastoris', 'Crassocephalum', 'Celosia argentea', 'Sorrel', 'Sour cabbage', 'Spinach', 'Amaranthus spinosus', 'Portulaca oleracea', 'Abelmoschus manihot', 'Sweet potato', 'Chard', 'Xanthosoma brasiliense', 'Taro', 'Tatsoi', 'Turnip', 'Diplazium esculentum', 'Sesbania grandiflora', 'Viagra palm', 'Watercress', 'Ipomoea aquatica', 'Wheatgrass', 'Achillea millefolium', 'Rapeseed', 'Artocarpus blancoi', 'Armenian cucumber', 'Breadfruit', 'Artocarpus camansi', 'Momordica charantia', 'Cyclanthera pedata', 'Calabash', 'Chayote', 'Cooking banana', 'Durian', 'Gac', 'Anacolosa frutescens', 'Melothria scabra', 'Cucumber', 'Cucumis prophetarum', 'Eggplant', 'Coccinia grandis', 'Jackfruit', 'Cucumis metuliferus', 'Telosma procumbens', 'Luffa', 'Artocarpus mariannensis', 'Olive', 'Papaya', 'Pumpkin', 'Trichosanthes dioica', 'Luffa acutangula', 'Trichosanthes cucumerina', 'Momordica dioica', 'Squash (plant)', 'Tinda', 'Artocarpus treculianus', 'Tomatillo', 'Tomato', 'Vanilla', 'Cucumis anguria', 'Water melon', 'Winter melon', 'Zucchini', 'Bell pepper', 'Big Jim pepper', 'Cayenne pepper', 'Friggitello', 'Habanero', 'Hungarian wax pepper', 'Jalapeño', 'New Mexico chile', 'Peperoncino', 'Pimiento', 'Sandia pepper', 'Siling haba', 'Artichoke', 'Banana flower', 'Clitoria ternatea', 'Broccoli', 'Broccolini', 'Calabaza', 'Caper', 'Cauliflower', 'Telosma procumbens', 'Broussonetia luzonica', 'Pumpkin', 'Bauhinia purpurea', 'Daylily', 'Strongylodon macrobotrys', 'Loroco', 'Sesbania grandiflora', 'Zucchini', 'Zucchini', 'Apios americana', 'Asparagus bean', 'Azuki bean', 'Black-eyed pea', 'Clitoria ternatea', 'Chickpea', 'Common bean', 'Drumstick (vegetable)', 'Hyacinth Bean', 'Vicia faba', 'Chickpea', 'Green bean', 'Guar', 'Horse gram', 'Lablab purpureus', 'Lathyrus sativus', 'Lentil', 'Phaseolus lunatus', 'Moth bean', 'Mung bean', 'Okra', 'Pea', 'Peanut', 'Pigeon pea', 'Ricebean', 'Runner bean', 'Snap pea', 'Snow pea', 'Soybean', 'Lupinus mutabilis', 'Tepary bean', 'Urad (bean)', 'Mucuna pruriens', 'Winged bean', 'Asparagus', 'Banana pith', 'Cardoon', 'Celeriac', 'Celery', 'Chives', 'Elephant garlic', 'Fennel', 'Garlic', 'Allium tuberosum', 'Heart of palm', 'Kohlrabi', 'Kurrat', 'Landang', 'Lemongrass', 'Leek', 'Nelumbo nucifera', 'Nopal', 'Onion', 'Pearl onion', 'Potato onion', 'Ornithogalum pyrenaicum', 'Sago', 'Scallion', 'Salicornia', 'Shallot', 'Tree onion', 'Welsh onion', 'Allium tricoccum', 'Zizania latifolia', 'Pachyrhizus', 'Arracacha', 'Arrowleaf elephant ear', 'Bamboo shoot', 'Beet', 'Burdock', 'Broadleaf arrowhead', 'Camassia', 'Canna (plant)', 'Carrot', 'Zingiber cassumunar', 'Cassava', 'Chinese artichoke', 'Chinese ginger', 'Daikon', 'Lathyrus tuberosus', 'Amorphophallus paeoniifolius', 'Ensete', 'Giant swamp taro', 'Giant taro', 'Ginger', 'Parsley', 'Horseradish', 'Jerusalem artichoke', 'Jícama', 'Kaempferia galanga', 'Lengkuas', 'Alpinia officinarum', 'Mashua', 'Palmyra sprout', 'Parsnip', 'Conopodium majus', 'Tacca leontopetaloides', 'Potato', 'Psoralea esculenta', 'Radish', 'Rutabaga', 'Purple Salsify', 'Black salsify', 'Skirret', 'Rutabaga', 'Sweet potato', 'Taro', 'Ti (plant)', 'Cyperus esculentus', 'Turmeric', 'Turnip', 'Dioscorea alata', 'Ulluco', 'Wasabi', 'Water caltrop', 'Eleocharis dulcis', 'Yacón', 'Yam (vegetable)', 'Xanthosoma caracu', 'Aonori', 'Arame', 'Carola (sea vegetable)', 'Alaria esculenta', 'Palmaria palmata', 'Zostera', 'Gusô', 'Hijiki', 'Kombu', 'Laver (seaweed)', 'Mozuku', 'Nori', 'Ogonori', 'Caulerpa lentillifera', 'Sea lettuce', 'Wakame']\n" ], [ "url_seafood = r'https://en.wikipedia.org/wiki/List_of_types_of_seafood'\nseafood = grab_list2(url_seafood, 'Nautilus')\nprint(seafood)", "['Anchovies', 'Barracuda', 'Basa fish', 'Bass (fish)', 'Anoplopoma fimbria', 'Pufferfish', 'Bluefish', 'Bombay duck', 'Bream', 'Brill (fish)', 'Butter fish', 'Catfish', 'Cod', 'Squaliformes', 'Dorade', 'Eel', 'Flounder', 'Grouper', 'Haddock', 'Hake', 'Halibut', 'Herring', 'Ilish', 'John Dory', 'Lamprey', 'Lingcod', 'Mackerel', 'Mahi Mahi', 'Monkfish', 'Mullet (fish)', 'Orange roughy', 'Parrotfish', 'Patagonian toothfish', 'Perch', 'Pike (fish)', 'Pilchard', 'Pollock', 'Pomfret', 'Pompano', 'Sablefish', 'Salmon', 'Sanddab', 'Sardine', 'Bass (fish)', 'Shad', 'Shark', 'Skate (fish)', 'Smelt (fish)', 'Snakehead (fish)', 'Lutjanidae', 'Sole (fish)', 'Sprat', 'Sturgeon', 'Surimi', 'Swordfish', 'Tilapia', 'Tilefish', 'Trout', 'Tuna', 'Turbot', 'Wahoo', 'Coregonus', 'Whiting (fish)', 'Witch (righteye flounder)', 'Purified Water', 'Caviar', 'Ikura', 'Kazunoko', 'Cyclopterus lumpus', 'Masago', 'Shad', 'Tobiko', 'Crab', 'Crayfish', 'Lobster', 'Shrimp', 'Cockle (bivalve)', 'Cuttlefish', 'Clam', 'Concholepas concholepas', 'Mussel', 'Octopus', 'Oyster', 'Common periwinkle', 'Scallop', 'Squid', 'Conch', 'Nautilus']\n" ], [ "url_seafood = r'https://en.wikipedia.org/wiki/List_of_seafood_dishes'\nseafood = grab_list2(url_seafood, 'Cuttlefish')\nprint(seafood)", "['Baik kut kyee kaik', 'Balchão', 'Bánh canh', 'Bisque (food)', 'Bún mắm', 'Bún riêu', 'Chowder', 'Cioppino', 'Crawfish pie', 'Curanto', 'Fideuà', 'Halabos', 'Hoe (dish)', 'Hoedeopbap', 'Kaeng som', 'Kedgeree', 'Maeuntang', 'Moules-frites', 'Namasu', 'New England clam bake', 'Paella', 'Paelya', 'Paila marina', 'Piaparan', 'Plateau de fruits de mer', 'Seafood basket', 'Seafood birdsnest', 'Seafood boil', 'Seafood cocktail', 'Seafood pizza', 'Stroganina', 'Sundubu jjigae', 'Surf and turf', 'Tinumok', 'Clam cake', 'Clam chowder', 'Clams casino', 'Clams oreganata', 'Fabes con almejas', 'Fried clams', 'Jaecheopguk', 'New England clam bake', 'Steamed clams', 'Stuffed clam', 'Crab puff', 'Fish heads', \"'Ota 'ika\", 'Ginataang sugpo', 'Bisque (food)', 'Lobster Newberg', 'Lobster roll', 'Lobster stew', 'Scampi', 'Miruhulee boava', 'Nakji-bokkeum', 'Nakji-yeonpo-tang', 'Polbo á feira', 'Pulpo a la campechana', 'Akashiyaki', 'San-nakji', 'Takoyaki', 'Takomeshi', 'Angels on horseback', 'Hangtown fry', 'Oyster omelette', 'Oyster sauce', 'Oyster vermicelli', 'Oysters Bienville', 'Oysters en brochette', 'Oysters Kirkpatrick', 'Oysters Rockefeller', 'Steak and oyster pie', 'Balao-balao', 'Biyaring', 'Bobó de camarão', 'Bún mắm', 'Camaron rebosado', 'Chakkoli', 'Chạo tôm', 'Coconut shrimp', 'Drunken shrimp', 'Ebi chili', 'Fried prawn', 'Ginataang hipon', 'Ginataang kalabasa', 'Halabos na hipon', 'Har gow', 'Nilasing na hipon', 'Okoy', 'Pininyahang hipon', 'Potted shrimps', 'Prawn cracker', 'Prawn cocktail', 'Shrimp ball', 'Shrimp DeJonghe', 'White boiled shrimp', 'Adobong pusit', 'Arròs negre', 'Dried shredded squid', 'Squid as food', 'Gising-gising', 'Ikameshi', 'Orange cuttlefish', 'Paella negra', 'Pancit choca', 'Squid cocktail', 'Cuttlefish']\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06cd7d764bd6bb8c9268c4a639aa79859a56408
18,821
ipynb
Jupyter Notebook
analysis/peiwen/milestone1.ipynb
data301-2020-winter1/course-project-group_6018
c566820ba331ffc2fe3442dafe75b6ac8ad48a7f
[ "MIT" ]
null
null
null
analysis/peiwen/milestone1.ipynb
data301-2020-winter1/course-project-group_6018
c566820ba331ffc2fe3442dafe75b6ac8ad48a7f
[ "MIT" ]
null
null
null
analysis/peiwen/milestone1.ipynb
data301-2020-winter1/course-project-group_6018
c566820ba331ffc2fe3442dafe75b6ac8ad48a7f
[ "MIT" ]
null
null
null
48.258974
1,647
0.427395
[ [ [ "import numpy as np\nimport pandas as pd\n\ndf=pd.read_csv('AirQualityUCI.csv')\ndf", "_____no_output_____" ], [ "df1 = (df\n .drop(columns={'Unnamed:15'})\n \n)\n\ndf", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
d06ce0e6a3f92234e82caac2e058ebed83507af8
2,638
ipynb
Jupyter Notebook
Labs/Lab1.ipynb
peralegh/480
417c45cab7a61a74094e25906de399131a6a9905
[ "MIT" ]
null
null
null
Labs/Lab1.ipynb
peralegh/480
417c45cab7a61a74094e25906de399131a6a9905
[ "MIT" ]
null
null
null
Labs/Lab1.ipynb
peralegh/480
417c45cab7a61a74094e25906de399131a6a9905
[ "MIT" ]
null
null
null
16.696203
81
0.458302
[ [ [ "# My first notebook", "_____no_output_____" ] ], [ [ "print ('my first notebook')", "my first notebook\n" ], [ "1 + 2", "_____no_output_____" ], [ "int(1 + 2)", "_____no_output_____" ], [ "a = 3", "_____no_output_____" ], [ "print(a)", "3\n" ] ], [ [ "# Read Data from a file", "_____no_output_____" ] ], [ [ "import xlrd\nbook = xlrd.open_workbook(\"Diamonds.xls\")\nsheet = book.sheet_by_name(\"Diamonds\")", "_____no_output_____" ], [ "for row_index in range(1,5): # read the first 4 rows, skip the first row\n id_, weight, color,_,_,price = sheet.row_values(row_index)\n print(id_,weight,color,price)", "1.0 0.3 D 1302.0\n2.0 0.3 E 1510.0\n3.0 0.3 G 1510.0\n4.0 0.3 G 1260.0\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d06ce670bd8370525427779473e04c9e59848ed7
4,045
ipynb
Jupyter Notebook
Day-1/Day1_assignment.ipynb
anjumrohra/LetsUpgrade_DataScience_Essentials
06542fdda0e07e955c53fbc6de95caeb47583aa4
[ "Apache-2.0" ]
1
2020-12-14T19:35:25.000Z
2020-12-14T19:35:25.000Z
Day-1/Day1_assignment.ipynb
anjumrohra/LetsUpgrade_DataScience_Essentials
06542fdda0e07e955c53fbc6de95caeb47583aa4
[ "Apache-2.0" ]
null
null
null
Day-1/Day1_assignment.ipynb
anjumrohra/LetsUpgrade_DataScience_Essentials
06542fdda0e07e955c53fbc6de95caeb47583aa4
[ "Apache-2.0" ]
null
null
null
22.348066
115
0.510507
[ [ [ "# Question 1\n\nGiven the following jumbled word, OBANWRI guess the correct English word.\n\nA. RANIBOW\n\nB. RAINBOW\n\nC. BOWRANI\n\nD. ROBWANI", "_____no_output_____" ] ], [ [ "import random\n\ndef shuffling(given):\n given = str(given)\n words = ['RAINBOW','RANIBOW','BOWRANI','ROBWANI']\n shuffled = ''.join(random.sample(given,len(given)))\n if shuffled=='RAINBOW':\n return shuffled\n print(\"The correct option is: RAINBOW\")\n else:\n #shuffling(given)\n print(shuffled,\"is incorrect\")\n print(\"The correct option is: RAINBOW\")\nshuffling('OBANWRI')", "BOIAWNR is incorrect\nThe correct option is: RAINBOW\n" ] ], [ [ "# Question 2\n\nWrite a program which prints “LETS UPGRADE”. (Please note that you have to\nprint in ALL CAPS as given)", "_____no_output_____" ] ], [ [ "string = \"Lets upgrade\"\nprint(string.upper())", "LETS UPGRADE\n" ] ], [ [ "# Question 3\n\nWrite a program that takes Cost Price and Selling Price as input and displays whether the transaction is a\nProfit or a Loss or neither.\n\nINPUT FORMAT:\n1. The first line contains the cost price.\n2. The second line contains the selling price.\n\nOUTPUT FORMAT:\n1. Print \"Profit\" if the transaction is a profit or \"Loss\" if it is a loss. \n2. If it is neither profit nor loss, print \"Neither\". (You must not have quotes in your output)", "_____no_output_____" ] ], [ [ "CP = float(input())\nSP = float(input())\n\nif CP<SP:\n print(\"Profit\")\nelif CP>SP:\n print(\"Loss\")\nelse:\n print(\"Neither\")", "20\n20\nNeither\n" ] ], [ [ "# Question 4\n\nWrite a program that takes an amount in Euros as input. You need to find its equivalent in\nRupees and display it. Assume 1 Euro equals Rs. 80.\nPlease note that you are expected to stick to the given input and output\nformat as in sample test cases. Please don't add any extra lines such as\n'Enter a number', etc.\nYour program should take only one number as input and display the output.", "_____no_output_____" ] ], [ [ "Euro = float(input())\nRupees = Euro * 80\nprint(Rupees)", "20\n1600.0\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06cf0b59b3f7f967400f6259936ee64f8f4e73c
2,203
ipynb
Jupyter Notebook
2017/Day 04.ipynb
AwesomeGitHubRepos/adventofcode
84ba7963a5d7905973f14bb1c2e3a59165f8b398
[ "MIT" ]
96
2018-04-21T07:53:34.000Z
2022-03-15T11:00:02.000Z
2017/Day 04.ipynb
AwesomeGitHubRepos/adventofcode
84ba7963a5d7905973f14bb1c2e3a59165f8b398
[ "MIT" ]
17
2019-02-07T05:14:47.000Z
2021-12-27T12:11:04.000Z
2017/Day 04.ipynb
AwesomeGitHubRepos/adventofcode
84ba7963a5d7905973f14bb1c2e3a59165f8b398
[ "MIT" ]
14
2019-02-05T06:34:15.000Z
2022-01-24T17:35:00.000Z
22.03
62
0.491602
[ [ [ "import aocd\n\ndata = aocd.get_data(day=4, year=2017)\nphrases = list(map(str.strip, data.splitlines()))", "_____no_output_____" ], [ "def is_valid(phrase):\n words = phrase.split()\n return len(words) == len(set(words))\n\ntests = {\n 'aa bb cc dd ee': True,\n 'aa bb cc dd aa': False,\n 'aa bb cc dd aaa': True,\n}\nfor phrase, expected in tests.items():\n assert is_valid(phrase) == expected\n\nprint('Part 1:', sum(map(is_valid, phrases)))", "Part 1: 383\n" ], [ "def is_valid_no_anagram(phrase):\n words = phrase.split()\n sorted_set = {''.join(sorted(w)) for w in words}\n return len(words) == len(sorted_set)\n\ntests = {\n 'abcde fghij': True,\n 'abcde xyz ecdab': False,\n 'a ab abc abd abf abj': True,\n 'iiii oiii ooii oooi oooo': True,\n 'oiii ioii iioi iiio': False,\n}\n\nfor phrase, expected in tests.items():\n assert is_valid_no_anagram(phrase) == expected\n\nprint('Part 2:', sum(map(is_valid_no_anagram, phrases)))", "Part 2: 265\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
d06d008f6335c25c058c9c59e6442a162e91a4a0
713
ipynb
Jupyter Notebook
docs/source/tool/quant_platform/tiger.ipynb
Eric2827/dfdata
4db142232fc7127da3faae7c608772c72005cd25
[ "MIT" ]
null
null
null
docs/source/tool/quant_platform/tiger.ipynb
Eric2827/dfdata
4db142232fc7127da3faae7c608772c72005cd25
[ "MIT" ]
null
null
null
docs/source/tool/quant_platform/tiger.ipynb
Eric2827/dfdata
4db142232fc7127da3faae7c608772c72005cd25
[ "MIT" ]
null
null
null
16.97619
55
0.514727
[ [ [ "# 老虎量化\n\n老虎量化是美港股券商[老虎证券](https://www.itiger.com/)推出的量化平台。", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
d06d0f96c4f2b247b6cdf91df305892a99930e8a
42,639
ipynb
Jupyter Notebook
notebooks/28. Resolve issue 37-Reaction reversibility.ipynb
biosustain/p-thermo
d60550cbc4897e1a3f9caaabd45b6a808244ebd1
[ "Apache-2.0" ]
null
null
null
notebooks/28. Resolve issue 37-Reaction reversibility.ipynb
biosustain/p-thermo
d60550cbc4897e1a3f9caaabd45b6a808244ebd1
[ "Apache-2.0" ]
null
null
null
notebooks/28. Resolve issue 37-Reaction reversibility.ipynb
biosustain/p-thermo
d60550cbc4897e1a3f9caaabd45b6a808244ebd1
[ "Apache-2.0" ]
null
null
null
37.834073
5,293
0.525458
[ [ [ "# Introduction\nNow that I have removed the RNA/DNA node and we have fixed many pathways, I will re-visit the things that were raised in issue #37: 'Reaction reversibility'. There were reactions that we couldn't reverse or remove or they would kill the biomass. I will try to see if these problems have been resolved now. If not, I will dig into the underlying cause in a smanner similar to what was done in notebook 20. ", "_____no_output_____" ] ], [ [ "import cameo\nimport pandas as pd\nimport cobra.io\nimport escher\nfrom escher import Builder\nfrom cobra import Reaction", "_____no_output_____" ], [ "model = cobra.io.read_sbml_model('../model/p-thermo.xml')", "_____no_output_____" ], [ "model_e_coli = cameo.load_model('iML1515')", "_____no_output_____" ], [ "model_b_sub = cameo.load_model('iYO844')", "_____no_output_____" ] ], [ [ "__ALDD2x__\nshould be irreversible, but doing so kills the biomass growth completely at this moment. It needs to be changed as we right now have an erroneous energy generating cycle going from aad_c --> ac_c (+atp) --> acald --> accoa_c -->aad_c.\nApparently, unconciously i already fixed this problem in notebook 20. So this is fine now. ", "_____no_output_____" ], [ "__GLYO1__ This reaction has already been removed in notebook 20 to fix the glycine pathway.\n\n__DHORDfum__ Has been renamed to DHORD6 in notebook 20 in the first check of fixing dCMP. And the reversability has been fixed too. \n\n__OMPDC__ This has by chance also been fixed in notebook 20 in the first pass to fix dCMP biosynthesis.", "_____no_output_____" ], [ "__NADK__ The reaction is currently reversible, but should be irreversible, producing nadp and adp. \nStill, when I try to fix the flux in the direction it should be, it kills the biomass production. I will try to figure out why, likely it has to do with co-factor balance.", "_____no_output_____" ] ], [ [ "model.reactions.NADK.bounds = (0,1000)", "_____no_output_____" ], [ "model.reactions.ALAD_L.bounds = (-1000,0)", "_____no_output_____" ], [ "model.optimize().objective_value", "_____no_output_____" ], [ "cofactors = ['nad_c', 'nadh_c','', '', '', '']\n\nwith model:\n# model.add_boundary(model.metabolites.glc__D_c, type = 'sink', reaction_id = 'test')\n# model.add_boundary(model.metabolites.r5p_c , type = 'sink', reaction_id = 'test2')\n# model.add_boundary(model.metabolites.hco3_c, type = 'sink', reaction_id = 'test3')\n for met in model.reactions.biomass.metabolites:\n if met.id in cofactors:\n coeff = model.reactions.biomass.metabolites[met]\n model.reactions.biomass.add_metabolites({met:-coeff}) \n else: \n continue\n solution = model.optimize()\n #print (model.metabolites.glu__D_c.summary())\n #print ('test flux:', solution['test'])\n #print ('test2 flux:', solution['test2'])\n print (solution.objective_value)", "1.8496633304871162\n" ] ], [ [ "It seems that the NAD and NADH are the blocked metabolites for biomass generation. Now lets try to figure out where this problem lies. \nI think the problem lies in re-generating NAD. The model uses this reaction togehter with oth strange reactions to regenerate NAD, where normally in oxygen containing conditions I would expect respiration to do this. So let me see how bacillus and e. coli models do this and see if maybe some form of ETC is missing in our model. This would explain why adding the ATP synthase didn't influence our biomass prediction at all.\n\n__Flavin reductase__\nIn E. coli we observed that there is a flavin reductase in the genome, contributing to NADH regeneration. We've looked into the genome annotation for our strain, and have found that there is a flavin reductase annotated there aswell (https://www.genome.jp/dbget-bin/www_bget?ptl:AOT13_02085), but not in bacillus (fitting the model). Therefore, I will add this reaction into our model, named FADRx. \n\n__NADH dehydrogenase__\nThe NADH dehydrogenase, tansfering reducing equivalents from NADH to quinone, is the first part of the electron transport chain. The quinones then can transfer the electrons to pump out protons, which can allow ATP synthase to generate additional energy. in iML1515 this reaction is captures by NADH16pp, NADH17pp and NADH18pp. In B. subtilis NADH4 reflects this reaction. In our model, we don't currently have anything that resembles this reaction. However, in Beata's thesis (and the genome) we can find EC 1.6.5.3, which performs the a similar reaction to NADH16pp. Therefore, I will add this reactin into our model.\n\nIn our model, we also have the reactions QH2OR and NADHQOR, which somewhat resemble the NADHDH reaction. Both do not include proton translocation or are reversible. To prevent these reactions from forming a cycle and having incorrect duplicate reactions in the model, I will remove them. \n\n__CYOR__\nThe last 'step' in the model electron transport chain is the transfer of electrons from the quinone to oxygen, pumping protons out of the cell. E. coli has a CYTBO3_4pp reaction that shows this, performed by a cytochrome oxidase. The model doesnt have this reaction, but from Beata's thesis and the genome annotation one would expect this to be present. We found the reaction in a way similar to the E. coli model. Therefor I will add the CYTBO3 reaction to our model, as indicated in Beata's thesis. \n", "_____no_output_____" ] ], [ [ "model.add_reaction(Reaction(id='FADRx'))", "_____no_output_____" ], [ "model.reactions.FADRx.name = 'Flavin reductase'", "_____no_output_____" ], [ "model.reactions.FADRx.annotation = model_e_coli.reactions.FADRx.annotation", "_____no_output_____" ], [ "model.reactions.FADRx.add_metabolites({\n model.metabolites.fad_c:-1, \n model.metabolites.h_c: -1, \n model.metabolites.nadh_c:-1,\n model.metabolites.fadh2_c:1, \n model.metabolites.nad_c:1\n})", "_____no_output_____" ], [ "#add NADH dehydrogenase reaction\nmodel.add_reaction(Reaction(id='NADHDH'))", "_____no_output_____" ], [ "model.reactions.NADHDH.name = 'NADH Dehydrogenase (ubiquinone & 3.5 protons)'", "_____no_output_____" ], [ "model.reactions.NADHDH.annotation['ec-code'] = '1.6.5.3'\nmodel.reactions.NADHDH.annotation['kegg.reaction'] = 'R11945'", "_____no_output_____" ], [ "model.reactions.NADHDH.add_metabolites({\n model.metabolites.nadh_c:-1, model.metabolites.h_c: -4.5, model.metabolites.ubiquin_c:-1,\n model.metabolites.nad_c: 1, model.metabolites.h_e: 3.5, model.metabolites.qh2_c: 1\n})", "_____no_output_____" ], [ "model.remove_reactions(model.reactions.NADHQOR)", "C:\\Users\\vivmol\\AppData\\Local\\Continuum\\anaconda3\\envs\\g-thermo\\lib\\site-packages\\cobra\\core\\model.py:716: UserWarning:\n\nneed to pass in a list\n\n" ], [ "model.remove_reactions(model.reactions.QH2OR)", "C:\\Users\\vivmol\\AppData\\Local\\Continuum\\anaconda3\\envs\\g-thermo\\lib\\site-packages\\cobra\\core\\group.py:110: UserWarning:\n\nneed to pass in a list\n\n" ], [ "model.add_reaction(Reaction(id='CYTBO3'))", "_____no_output_____" ], [ "model.reactions.CYTBO3.name = 'Cytochrome oxidase bo3 (ubiquinol: 2.5 protons)'", "_____no_output_____" ], [ "model.reactions.CYTBO3.add_metabolites({\n model.metabolites.o2_c:-0.5, model.metabolites.h_c: -2.5, model.metabolites.qh2_c:-1, \n model.metabolites.h2o_c:1, model.metabolites.h_e: 2.5, model.metabolites.ubiquin_c:1\n})", "_____no_output_____" ] ], [ [ "In looking at the above, I also observed some other reactions that probably should not looked at and modified.", "_____no_output_____" ] ], [ [ "model.reactions.MALQOR.id = 'MDH2'", "_____no_output_____" ], [ "model.reactions.MDH2.bounds = (0,1000)", "_____no_output_____" ], [ "model.metabolites.hexcoa_c.id = 'hxcoa_c'", "_____no_output_____" ], [ "model.reactions.HEXOT.id = 'ACOAD2f'", "_____no_output_____" ], [ "model.metabolites.dccoa_c.id = 'dcacoa_c'", "_____no_output_____" ], [ "model.reactions.DECOT.id = 'ACOAD4f'", "_____no_output_____" ], [ "#in the wrong direction and id\nmodel.reactions.GLYCDH_1.id = 'HPYRRx'", "_____no_output_____" ], [ "model.reactions.HPYRRx.bounds = (-1000,0)", "_____no_output_____" ], [ "#in the wrong direction\nmodel.reactions.FMNRx.bounds = (-1000,0)", "_____no_output_____" ], [ "model.metabolites.get_by_id('3hbycoa_c').id = '3hbcoa_c'", "_____no_output_____" ] ], [ [ "Even with the changes above we still do not restore growth... Supplying nmn_c restores growth, but supplying aspartate (beginning of the pathway) doesn't sovle the problem. so maybe the problem lies more with the NAD biosynthesis pathway than really the regeneration anymore?", "_____no_output_____" ] ], [ [ "model.metabolites.nicrnt_c.name = 'Nicotinate ribonucleotide'", "_____no_output_____" ], [ "model.metabolites.ncam_c.name = 'Niacinamide'", "_____no_output_____" ], [ "#wrong direction\nmodel.reactions.QULNS.bounds = (-1000,0)\n#this rescued biomass accumulation! ", "_____no_output_____" ], [ "#connected to aspartate", "_____no_output_____" ], [ "model.optimize().objective_value", "_____no_output_____" ], [ "#save&commit\ncobra.io.write_sbml_model(model,'../model/p-thermo.xml')", "_____no_output_____" ] ], [ [ "Flux is carried through the \nStill it is strange that flux is not carried through the ETC, but is through the ATP synthase as one would expect in the presence of oxygen. Therefore I will investigate where the extracellular protons come from. \n\nIt seems all the extracellular protons come from the export of phosphate (pi_c) which is proton symport coupled. We are producing so much phosphate from the biomass reaction. Though in theory, phosphate should not be produced so much, as it is also used for the generation of ATP from ADP. Right now I don't really see how to solve this problem. I've made an issueof it and will look into this at another time. ", "_____no_output_____" ] ], [ [ "model.optimize()['ATPS4r']", "_____no_output_____" ], [ "model.metabolites.pi_c.summary()", "_____no_output_____" ] ], [ [ "I also noticed that now most ATP comes from dGTP. The production of dGDP should just play a role in supplying nucleotides for biomass and so the flux it carries be low. I will check where the majority of the dGTP comes from.\n\nWhat is happening is the following: dgtp is converted dgdp and atp (rct ATDGDm). The dgdp then reacts with pep to form dGTP again. Pep formation is somewhat energy neutral, but it is wierd the metabolism decides to do this by themselves instead of flowing the pep into pyruvate via the normal glycolysis and into the TCA.", "_____no_output_____" ] ], [ [ "#reaction to be removed\nmodel.remove_reactions(model.reactions.PYRPT)", "C:\\Users\\vivmol\\AppData\\Local\\Continuum\\anaconda3\\envs\\g-thermo\\lib\\site-packages\\cobra\\core\\model.py:716: UserWarning:\n\nneed to pass in a list\n\nC:\\Users\\vivmol\\AppData\\Local\\Continuum\\anaconda3\\envs\\g-thermo\\lib\\site-packages\\cobra\\core\\group.py:110: UserWarning:\n\nneed to pass in a list\n\n" ] ], [ [ "Removing these reactions triggers normal ATP production via ETC and ATP synthase again. So this may be solved now. ", "_____no_output_____" ] ], [ [ "model.metabolites.pi_c.summary()", "_____no_output_____" ], [ "#save & commit\ncobra.io.write_sbml_model(model,'../model/p-thermo.xml')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d06d15e98c303486aa985fa5312bca62c68206a4
61,793
ipynb
Jupyter Notebook
LearningOnMarkedData/week4/c02_w04_ex02.ipynb
ishatserka/MachineLearningAndDataAnalysisCoursera
e82e772df2f4aec162cb34ac6127df10d14a625a
[ "MIT" ]
null
null
null
LearningOnMarkedData/week4/c02_w04_ex02.ipynb
ishatserka/MachineLearningAndDataAnalysisCoursera
e82e772df2f4aec162cb34ac6127df10d14a625a
[ "MIT" ]
null
null
null
LearningOnMarkedData/week4/c02_w04_ex02.ipynb
ishatserka/MachineLearningAndDataAnalysisCoursera
e82e772df2f4aec162cb34ac6127df10d14a625a
[ "MIT" ]
null
null
null
98.084127
16,496
0.815416
[ [ [ "# Градиентный бустинг своими руками\n\n**Внимание:** в тексте задания произошли изменения - поменялось число деревьев (теперь 50), правило изменения величины шага в задании 3 и добавился параметр `random_state` у решающего дерева. Правильные ответы не поменялись, но теперь их проще получить. Также исправлена опечатка в функции `gbm_predict`.\n\nВ этом задании будет использоваться датасет `boston` из `sklearn.datasets`. Оставьте последние 25% объектов для контроля качества, разделив `X` и `y` на `X_train`, `y_train` и `X_test`, `y_test`.\n\nЦелью задания будет реализовать простой вариант градиентного бустинга над регрессионными деревьями для случая квадратичной функции потерь.", "_____no_output_____" ] ], [ [ "from sklearn import datasets, model_selection\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import mean_squared_error\nimport numpy as np", "_____no_output_____" ], [ "boston = datasets.load_boston()\nX_train, X_test = boston.data[: 380, :], boston.data[381 :, :] \ny_train, y_test = boston.target[: 380], boston.target[381 :]", "_____no_output_____" ] ], [ [ "## Задание 1\n\nКак вы уже знаете из лекций, **бустинг** - это метод построения композиций базовых алгоритмов с помощью последовательного добавления к текущей композиции нового алгоритма с некоторым коэффициентом. \n\nГрадиентный бустинг обучает каждый новый алгоритм так, чтобы он приближал антиградиент ошибки по ответам композиции на обучающей выборке. Аналогично минимизации функций методом градиентного спуска, в градиентном бустинге мы подправляем композицию, изменяя алгоритм в направлении антиградиента ошибки.\n\nВоспользуйтесь формулой из лекций, задающей ответы на обучающей выборке, на которые нужно обучать новый алгоритм (фактически это лишь чуть более подробно расписанный градиент от ошибки), и получите частный ее случай, если функция потерь `L` - квадрат отклонения ответа композиции `a(x)` от правильного ответа `y` на данном `x`.\n\nЕсли вы давно не считали производную самостоятельно, вам поможет таблица производных элементарных функций (которую несложно найти в интернете) и правило дифференцирования сложной функции. После дифференцирования квадрата у вас возникнет множитель 2 — т.к. нам все равно предстоит выбирать коэффициент, с которым будет добавлен новый базовый алгоритм, проигноируйте этот множитель при дальнейшем построении алгоритма.", "_____no_output_____" ] ], [ [ "def accent_l(z, y): \n '''result = list()\n for i in range(0, len(y)):\n result.append(-(y[i] - z[i]))\n '''\n return -1.0*(z - y)", "_____no_output_____" ] ], [ [ "## Задание 2\n\nЗаведите массив для объектов `DecisionTreeRegressor` (будем их использовать в качестве базовых алгоритмов) и для вещественных чисел (это будут коэффициенты перед базовыми алгоритмами). \n\nВ цикле от обучите последовательно 50 решающих деревьев с параметрами `max_depth=5` и `random_state=42` (остальные параметры - по умолчанию). В бустинге зачастую используются сотни и тысячи деревьев, но мы ограничимся 50, чтобы алгоритм работал быстрее, и его было проще отлаживать (т.к. цель задания разобраться, как работает метод). Каждое дерево должно обучаться на одном и том же множестве объектов, но ответы, которые учится прогнозировать дерево, будут меняться в соответствие с полученным в задании 1 правилом. \n\nПопробуйте для начала всегда брать коэффициент равным 0.9. Обычно оправдано выбирать коэффициент значительно меньшим - порядка 0.05 или 0.1, но т.к. в нашем учебном примере на стандартном датасете будет всего 50 деревьев, возьмем для начала шаг побольше.\n\nВ процессе реализации обучения вам потребуется функция, которая будет вычислять прогноз построенной на данный момент композиции деревьев на выборке `X`:\n\n```\ndef gbm_predict(X):\n return [sum([coeff * algo.predict([x])[0] for algo, coeff in zip(base_algorithms_list, coefficients_list)]) for x in X]\n(считаем, что base_algorithms_list - список с базовыми алгоритмами, coefficients_list - список с коэффициентами перед алгоритмами)\n```\n\nЭта же функция поможет вам получить прогноз на контрольной выборке и оценить качество работы вашего алгоритма с помощью `mean_squared_error` в `sklearn.metrics`. \n\nВозведите результат в степень 0.5, чтобы получить `RMSE`. Полученное значение `RMSE` — **ответ в пункте 2**.", "_____no_output_____" ] ], [ [ "base_algorithms_list = list()\ncoefficients_list = list()\nalgorithm = DecisionTreeRegressor(max_depth=5, random_state=42)\n\ndef gbm_predict(X):\n return [sum([coeff * algo.predict([x])[0] for algo, coeff in zip(base_algorithms_list, coefficients_list)]) for x in X]", "_____no_output_____" ], [ "base_algorithms_list = list()\ncoefficients_list = list()\nb_0 = algorithm.fit(X_train, y_train)\nbase_algorithms_list.append(b_0)\ncoefficients_list.append(0.9)\nfor i in range(1, 50):\n algorithm_i = DecisionTreeRegressor(max_depth=5, random_state=42)\n s_i = accent_l(gbm_predict(X_train), y_train)\n b_i = algorithm_i.fit(X_train, s_i)\n base_algorithms_list.append(b_i)\n coefficients_list.append(0.9)", "_____no_output_____" ], [ "print(mean_squared_error(y_test, gbm_predict(X_test))**0.5)\n", "5.448710743655589\n" ] ], [ [ "## Задание 3\n\nВас может также беспокоить, что двигаясь с постоянным шагом, вблизи минимума ошибки ответы на обучающей выборке меняются слишком резко, перескакивая через минимум. \n\nПопробуйте уменьшать вес перед каждым алгоритмом с каждой следующей итерацией по формуле `0.9 / (1.0 + i)`, где `i` - номер итерации (от 0 до 49). Используйте качество работы алгоритма как **ответ в пункте 3**. \n\nВ реальности часто применяется следующая стратегия выбора шага: как только выбран алгоритм, подберем коэффициент перед ним численным методом оптимизации таким образом, чтобы отклонение от правильных ответов было минимальным. Мы не будем предлагать вам реализовать это для выполнения задания, но рекомендуем попробовать разобраться с такой стратегией и реализовать ее при случае для себя.", "_____no_output_____" ] ], [ [ "base_algorithms_list = list()\ncoefficients_list = list()\nb_0 = algorithm.fit(X_train, y_train)\nbase_algorithms_list.append(b_0)\ncoefficients_list.append(0.9)\nfor i in range(1, 50):\n algorithm_i = DecisionTreeRegressor(max_depth=5, random_state=42)\n s_i = accent_l(gbm_predict(X_train), y_train)\n b_i = algorithm_i.fit(X_train, s_i)\n base_algorithms_list.append(b_i)\n coefficients_list.append(0.9/(1.0+i))\n #coefficients_list.append(0.05)", "_____no_output_____" ], [ "print(mean_squared_error(y_test, gbm_predict(X_test))**0.5)", "5.241288806316885\n" ] ], [ [ "## Задание 4\n\nРеализованный вами метод - градиентный бустинг над деревьями - очень популярен в машинном обучении. Он представлен как в самой библиотеке `sklearn`, так и в сторонней библиотеке `XGBoost`, которая имеет свой питоновский интерфейс. На практике `XGBoost` работает заметно лучше `GradientBoostingRegressor` из `sklearn`, но для этого задания вы можете использовать любую реализацию. \n\nИсследуйте, переобучается ли градиентный бустинг с ростом числа итераций (и подумайте, почему), а также с ростом глубины деревьев. На основе наблюдений выпишите через пробел номера правильных из приведенных ниже утверждений в порядке возрастания номера (это будет **ответ в п.4**):\n\n 1. С увеличением числа деревьев, начиная с некоторого момента, качество работы градиентного бустинга не меняется существенно.\n\n 2. С увеличением числа деревьев, начиная с некоторого момента, градиентный бустинг начинает переобучаться.\n\n 3. С ростом глубины деревьев, начиная с некоторого момента, качество работы градиентного бустинга на тестовой выборке начинает ухудшаться.\n\n 4. С ростом глубины деревьев, начиная с некоторого момента, качество работы градиентного бустинга перестает существенно изменяться", "_____no_output_____" ] ], [ [ "from xgboost import XGBClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import GradientBoostingRegressor\n%pylab inline", "Populating the interactive namespace from numpy and matplotlib\n" ], [ "n_trees = [1] + list(range(10, 105, 5))\nX = boston.data\ny = boston.target", "_____no_output_____" ], [ "estimator = GradientBoostingRegressor(learning_rate=0.1, max_depth=5, n_estimators=100)\nestimator.fit(X_train, y_train)\nprint(mean_squared_error(y_test, estimator.predict(X_test))**0.5)", "4.597206843231819\n" ], [ "estimator = XGBClassifier(learning_rate=0.25, max_depth=5, n_estimators=50, min_child_weight=3)\nestimator.fit(X_train, y_train)\nprint(mean_squared_error(y_test, estimator.predict(X_test))**0.5)", "9.332138018696465\n" ], [ "%%time\nxgb_scoring = []\nfor n_tree in n_trees:\n estimator = XGBClassifier(learning_rate=0.1, max_depth=5, n_estimators=n_tree, min_child_weight=3)\n estimator.fit(X_train, y_train)\n #estimator = GradientBoostingRegressor(learning_rate=0.25, max_depth=5, n_estimators=n_tree)\n #score = cross_val_score(estimator, X, y, scoring = 'accuracy', cv = 3)\n score = mean_squared_error(y_test, estimator.predict(X_test))**0.5\n xgb_scoring.append(score)\nxgb_scoring = np.asmatrix(xgb_scoring)", "E:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\n" ], [ "print(xgb_scoring.reshape(xgb_scoring.shape[1]))\n", "[[35.39057728 33.23867627 30.72867325 31.93285455 21.87300071 8.08623522\n 6.67992216 9.20888267 9.46142061 8.81194644 10.26834359 10.38515864\n 8.97440806 8.9764982 8.72984765 8.6341369 8.68106445 8.68274611\n 8.66721178 8.6944304 ]]\n" ], [ "pylab.plot(n_trees, xgb_scoring.reshape(20, 1), marker='.', label='XGBoost')\npylab.grid(True)\npylab.xlabel('n_trees')\npylab.ylabel('score')\npylab.title('Accuracy score')\npylab.legend(loc='lower right')", "_____no_output_____" ], [ "%%time\nxgb_scoring = []\ndepths = range(1, 21)\nfor depth in depths:\n estimator = XGBClassifier(learning_rate=0.1, max_depth=depth, n_estimators=50, min_child_weight=3)\n estimator.fit(X_train, y_train)\n #estimator = GradientBoostingRegressor(learning_rate=0.25, max_depth=5, n_estimators=n_tree)\n #score = cross_val_score(estimator, X, y, scoring = 'accuracy', cv = 3)\n score = mean_squared_error(y_test, estimator.predict(X_test))**0.5\n xgb_scoring.append(score)\nxgb_scoring = np.asmatrix(xgb_scoring)", "E:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\nE:\\git\\MachineLearningAndDataAnalysisCoursera\\venv\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n if diff:\n" ], [ "pylab.plot(n_trees, xgb_scoring.reshape(20, 1), marker='.', label='XGBoost')\npylab.grid(True)\npylab.xlabel('n_trees')\npylab.ylabel('score')\npylab.title('Accuracy score')\npylab.legend(loc='lower right')", "_____no_output_____" ] ], [ [ "## Задание 5\n\nСравните получаемое с помощью градиентного бустинга качество с качеством работы линейной регрессии. \n\nДля этого обучите `LinearRegression` из `sklearn.linear_model` (с параметрами по умолчанию) на обучающей выборке и оцените для прогнозов полученного алгоритма на тестовой выборке `RMSE`. Полученное качество - ответ в **пункте 5**. \n\nВ данном примере качество работы простой модели должно было оказаться хуже, но не стоит забывать, что так бывает не всегда. В заданиях к этому курсу вы еще встретите пример обратной ситуации.", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression", "_____no_output_____" ], [ "estimator = LinearRegression()\nestimator.fit(X_train, y_train)\nprint(mean_squared_error(y_test, estimator.predict(X_test))**0.5)", "7.87339775956158\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d06d2bf36f94fd9bfd3e956b4a98a4b7a9bb8a6f
774,658
ipynb
Jupyter Notebook
1_1_Image_Representation/5_1. HSV Color Space, Balloons.ipynb
m-emad/computer-vision-exercises
c172faeb67d96cdd4e7d34f612b24b81d54b4c9b
[ "MIT" ]
null
null
null
1_1_Image_Representation/5_1. HSV Color Space, Balloons.ipynb
m-emad/computer-vision-exercises
c172faeb67d96cdd4e7d34f612b24b81d54b4c9b
[ "MIT" ]
null
null
null
1_1_Image_Representation/5_1. HSV Color Space, Balloons.ipynb
m-emad/computer-vision-exercises
c172faeb67d96cdd4e7d34f612b24b81d54b4c9b
[ "MIT" ]
null
null
null
2,413.264798
257,644
0.960164
[ [ [ "# HSV Color Space, Balloons", "_____no_output_____" ], [ "### Import resources and display image", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n", "_____no_output_____" ], [ "%matplotlib inline\n\n# Read in the image\nimage = cv2.imread('images/water_balloons.jpg')\n\n# Change color to RGB (from BGR)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\nplt.imshow(image)", "_____no_output_____" ] ], [ [ "### Plot color channels", "_____no_output_____" ] ], [ [ "# RGB channels\nr = image[:,:,0]\ng = image[:,:,1]\nb = image[:,:,2]\n\nf, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10))\n\nax1.set_title('Red')\nax1.imshow(r, cmap='gray')\n\nax2.set_title('Green')\nax2.imshow(g, cmap='gray')\n\nax3.set_title('Blue')\nax3.imshow(b, cmap='gray')\n", "_____no_output_____" ], [ "# Convert from RGB to HSV\nhsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n\n# HSV channels\nh = hsv[:,:,0]\ns = hsv[:,:,1]\nv = hsv[:,:,2]\n\nf, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10))\n\nax1.set_title('Hue')\nax1.imshow(h, cmap='gray')\n\nax2.set_title('Saturation')\nax2.imshow(s, cmap='gray')\n\nax3.set_title('Value')\nax3.imshow(v, cmap='gray')\n", "_____no_output_____" ] ], [ [ "### Define pink and hue selection thresholds", "_____no_output_____" ] ], [ [ "# Define our color selection criteria in HSV values\nlower_hue = np.array([150,0,0]) \nupper_hue = np.array([180,255,255])\n", "_____no_output_____" ], [ "# Define our color selection criteria in RGB values\nlower_pink = np.array([180,0,100]) \nupper_pink = np.array([255,255,230])", "_____no_output_____" ] ], [ [ "### Mask the image ", "_____no_output_____" ] ], [ [ "# Define the masked area in RGB space\nmask_rgb = cv2.inRange(image, lower_pink, upper_pink)\n\n# mask the image\nmasked_image = np.copy(image)\nmasked_image[mask_rgb==0] = [0,0,0]\n\n# Vizualize the mask\nplt.imshow(masked_image)", "_____no_output_____" ], [ "# Now try HSV!\n\n# Define the masked area in HSV space\nmask_hsv = cv2.inRange(hsv, lower_hue, upper_hue)\n\n# mask the image\nmasked_image = np.copy(image)\nmasked_image[mask_hsv==0] = [0,0,0]\n\n# Vizualize the mask\nplt.imshow(masked_image)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d06d2c61bee1b270f9bd8e63a4749cf333a9a6fd
64,022
ipynb
Jupyter Notebook
code/world_pop_transition_from_allendowney_github.ipynb
sdaitzman/ModSimPy
d4bfbe5711ad82206824d96cd5542448d2ddf54a
[ "MIT" ]
null
null
null
code/world_pop_transition_from_allendowney_github.ipynb
sdaitzman/ModSimPy
d4bfbe5711ad82206824d96cd5542448d2ddf54a
[ "MIT" ]
null
null
null
code/world_pop_transition_from_allendowney_github.ipynb
sdaitzman/ModSimPy
d4bfbe5711ad82206824d96cd5542448d2ddf54a
[ "MIT" ]
null
null
null
130.126016
28,340
0.861548
[ [ [ "# Modeling and Simulation in Python\n\nProject 1 example\n\nCopyright 2018 Allen Downey\n\nLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)\n", "_____no_output_____" ] ], [ [ "# Configure Jupyter so figures appear in the notebook\n%matplotlib inline\n\n# Configure Jupyter to display the assigned value after an assignment\n%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'\n\n# import functions from the modsim library\nfrom modsim import *", "_____no_output_____" ], [ "from pandas import read_html\n\nfilename = 'data/World_population_estimates.html'\ntables = read_html(filename, header=0, index_col=0, decimal='M')\ntable2 = tables[2]\ntable2.columns = ['census', 'prb', 'un', 'maddison', \n 'hyde', 'tanton', 'biraben', 'mj', \n 'thomlinson', 'durand', 'clark']", "_____no_output_____" ], [ "def plot_results(census, un, timeseries, title):\n \"\"\"Plot the estimates and the model.\n \n census: TimeSeries of population estimates\n un: TimeSeries of population estimates\n timeseries: TimeSeries of simulation results\n title: string\n \"\"\"\n plot(census, ':', label='US Census')\n plot(un, '--', label='UN DESA')\n if len(timeseries):\n plot(timeseries, color='gray', label='model')\n \n decorate(xlabel='Year', \n ylabel='World population (billion)',\n title=title)", "_____no_output_____" ], [ "un = table2.un / 1e9\ncensus = table2.census / 1e9\nempty = TimeSeries()\nplot_results(census, un, empty, 'World population estimates')", "_____no_output_____" ], [ "half = get_first_value(census) / 2", "_____no_output_____" ], [ "init = State(young=half, old=half)", "_____no_output_____" ], [ "system = System(birth_rate1 = 1/18,\n birth_rate2 = 1/26,\n mature_rate = 1/40,\n death_rate = 1/40,\n t_0 = 1950,\n t_end = 2016,\n init=init)", "_____no_output_____" ], [ "def update_func1(state, t, system):\n if t < 1970:\n births = system.birth_rate1 * state.young\n else:\n births = system.birth_rate2 * state.young\n \n maturings = system.mature_rate * state.young\n deaths = system.death_rate * state.old\n \n young = state.young + births - maturings\n old = state.old + maturings - deaths\n \n return State(young=young, old=old)", "_____no_output_____" ], [ "state = update_func1(init, system.t_0, system)", "_____no_output_____" ], [ "state = update_func1(state, system.t_0, system)", "_____no_output_____" ], [ "def run_simulation(system, update_func):\n \"\"\"Simulate the system using any update function.\n \n init: initial State object\n system: System object\n update_func: function that computes the population next year\n \n returns: TimeSeries\n \"\"\"\n results = TimeSeries()\n \n state = system.init\n results[system.t_0] = state.young + state.old\n \n for t in linrange(system.t_0, system.t_end):\n state = update_func(state, t, system)\n results[t+1] = state.young + state.old\n \n return results", "_____no_output_____" ], [ "results = run_simulation(system, update_func1);", "_____no_output_____" ], [ "plot_results(census, un, results, 'World population estimates')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06d46df71e1a4f312897287909ee8533cf3c9fd
33,665
ipynb
Jupyter Notebook
trials/2_classification.ipynb
Gxqiang/cnn_graph
a55b9540ca1b88f549127d76d5422c6cd6031ae9
[ "MIT" ]
1,244
2016-09-14T07:24:20.000Z
2022-03-30T09:21:06.000Z
trials/2_classification.ipynb
Gxqiang/cnn_graph
a55b9540ca1b88f549127d76d5422c6cd6031ae9
[ "MIT" ]
54
2016-09-14T09:48:02.000Z
2021-12-01T10:37:47.000Z
trials/2_classification.ipynb
Gxqiang/cnn_graph
a55b9540ca1b88f549127d76d5422c6cd6031ae9
[ "MIT" ]
433
2016-09-14T08:32:30.000Z
2022-03-29T14:15:51.000Z
31.171296
128
0.447468
[ [ [ "# Trial 2: classification with learned graph filters\n\nWe want to classify data by first extracting meaningful features from learned filters.", "_____no_output_____" ] ], [ [ "import time\nimport numpy as np\nimport scipy.sparse, scipy.sparse.linalg, scipy.spatial.distance\nfrom sklearn import datasets, linear_model\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport os\nimport sys\nsys.path.append('..')\nfrom lib import graph", "_____no_output_____" ] ], [ [ "# Parameters", "_____no_output_____" ], [ "# Dataset\n\n* Two digits version of MNIST with N samples of each class.\n* Distinguishing 4 from 9 is the hardest.", "_____no_output_____" ] ], [ [ "def mnist(a, b, N):\n \"\"\"Prepare data for binary classification of MNIST.\"\"\"\n folder = os.path.join('..', 'data')\n mnist = datasets.fetch_mldata('MNIST original', data_home=folder)\n\n assert N < min(sum(mnist.target==a), sum(mnist.target==b))\n M = mnist.data.shape[1]\n \n X = np.empty((M, 2, N))\n X[:,0,:] = mnist.data[mnist.target==a,:][:N,:].T\n X[:,1,:] = mnist.data[mnist.target==b,:][:N,:].T\n \n y = np.empty((2, N))\n y[0,:] = -1\n y[1,:] = +1\n\n X.shape = M, 2*N\n y.shape = 2*N, 1\n return X, y\n\nX, y = mnist(4, 9, 1000)\n\nprint('Dimensionality: N={} samples, M={} features'.format(X.shape[1], X.shape[0]))\n\nX -= 127.5\nprint('X in [{}, {}]'.format(np.min(X), np.max(X)))\n\ndef plot_digit(nn):\n M, N = X.shape\n m = int(np.sqrt(M))\n fig, axes = plt.subplots(1,len(nn), figsize=(15,5))\n for i, n in enumerate(nn):\n n = int(n)\n img = X[:,n]\n axes[i].imshow(img.reshape((m,m)))\n axes[i].set_title('Label: y = {:.0f}'.format(y[n,0]))\n\nplot_digit([0, 1, 1e2, 1e2+1, 1e3, 1e3+1])", "_____no_output_____" ] ], [ [ "# Regularized least-square\n\n## Reference: sklearn ridge regression\n\n* With regularized data, the objective is the same with or without bias.", "_____no_output_____" ] ], [ [ "def test_sklearn(tauR):\n \n def L(w, b=0):\n return np.linalg.norm(X.T @ w + b - y)**2 + tauR * np.linalg.norm(w)**2\n\n def dL(w):\n return 2 * X @ (X.T @ w - y) + 2 * tauR * w\n\n clf = linear_model.Ridge(alpha=tauR, fit_intercept=False)\n clf.fit(X.T, y)\n w = clf.coef_.T\n\n print('L = {}'.format(L(w, clf.intercept_)))\n print('|dLw| = {}'.format(np.linalg.norm(dL(w))))\n\n # Normalized data: intercept should be small.\n print('bias: {}'.format(abs(np.mean(y - X.T @ w))))\n\ntest_sklearn(1e-3)", "_____no_output_____" ] ], [ [ "## Linear classifier", "_____no_output_____" ] ], [ [ "def test_optim(clf, X, y, ax=None):\n \"\"\"Test optimization on full dataset.\"\"\"\n tstart = time.process_time()\n ret = clf.fit(X, y)\n print('Processing time: {}'.format(time.process_time()-tstart))\n print('L = {}'.format(clf.L(*ret, y)))\n if hasattr(clf, 'dLc'):\n print('|dLc| = {}'.format(np.linalg.norm(clf.dLc(*ret, y))))\n if hasattr(clf, 'dLw'):\n print('|dLw| = {}'.format(np.linalg.norm(clf.dLw(*ret, y))))\n if hasattr(clf, 'loss'):\n if not ax:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.semilogy(clf.loss)\n ax.set_title('Convergence')\n ax.set_xlabel('Iteration number')\n ax.set_ylabel('Loss')\n if hasattr(clf, 'Lsplit'):\n print('Lsplit = {}'.format(clf.Lsplit(*ret, y)))\n print('|dLz| = {}'.format(np.linalg.norm(clf.dLz(*ret, y))))\n ax.semilogy(clf.loss_split)", "_____no_output_____" ], [ "class rls:\n \n def __init__(s, tauR, algo='solve'):\n s.tauR = tauR\n if algo is 'solve':\n s.fit = s.solve\n elif algo is 'inv':\n s.fit = s.inv\n\n def L(s, X, y):\n return np.linalg.norm(X.T @ s.w - y)**2 + s.tauR * np.linalg.norm(s.w)**2\n\n def dLw(s, X, y):\n return 2 * X @ (X.T @ s.w - y) + 2 * s.tauR * s.w\n \n def inv(s, X, y):\n s.w = np.linalg.inv(X @ X.T + s.tauR * np.identity(X.shape[0])) @ X @ y\n return (X,)\n \n def solve(s, X, y):\n s.w = np.linalg.solve(X @ X.T + s.tauR * np.identity(X.shape[0]), X @ y)\n return (X,)\n \n def predict(s, X):\n return X.T @ s.w\n\ntest_optim(rls(1e-3, 'solve'), X, y)\ntest_optim(rls(1e-3, 'inv'), X, y)", "_____no_output_____" ] ], [ [ "# Feature graph", "_____no_output_____" ] ], [ [ "t_start = time.process_time()\nz = graph.grid(int(np.sqrt(X.shape[0])))\ndist, idx = graph.distance_sklearn_metrics(z, k=4)\nA = graph.adjacency(dist, idx)\nL = graph.laplacian(A, True)\nlmax = graph.lmax(L)\nprint('Execution time: {:.2f}s'.format(time.process_time() - t_start))", "_____no_output_____" ] ], [ [ "# Lanczos basis", "_____no_output_____" ] ], [ [ "def lanczos(L, X, K):\n M, N = X.shape\n a = np.empty((K, N))\n b = np.zeros((K, N))\n V = np.empty((K, M, N))\n V[0,...] = X / np.linalg.norm(X, axis=0)\n for k in range(K-1):\n W = L.dot(V[k,...])\n a[k,:] = np.sum(W * V[k,...], axis=0)\n W = W - a[k,:] * V[k,...] - (b[k,:] * V[k-1,...] if k>0 else 0)\n b[k+1,:] = np.linalg.norm(W, axis=0)\n V[k+1,...] = W / b[k+1,:]\n a[K-1,:] = np.sum(L.dot(V[K-1,...]) * V[K-1,...], axis=0)\n return V, a, b\n\ndef lanczos_H_diag(a, b):\n K, N = a.shape\n H = np.zeros((K*K, N))\n H[:K**2:K+1, :] = a\n H[1:(K-1)*K:K+1, :] = b[1:,:]\n H.shape = (K, K, N)\n Q = np.linalg.eigh(H.T, UPLO='L')[1]\n Q = np.swapaxes(Q,1,2).T\n return Q\n\ndef lanczos_basis_eval(L, X, K):\n V, a, b = lanczos(L, X, K)\n Q = lanczos_H_diag(a, b)\n M, N = X.shape\n Xt = np.empty((K, M, N))\n for n in range(N):\n Xt[...,n] = Q[...,n].T @ V[...,n]\n Xt *= Q[0,:,np.newaxis,:]\n Xt *= np.linalg.norm(X, axis=0)\n return Xt, Q[0,...]", "_____no_output_____" ] ], [ [ "# Tests\n\n* Memory arrangement for fastest computations: largest dimensions on the outside, i.e. fastest varying indices.\n* The einsum seems to be efficient for three operands.", "_____no_output_____" ] ], [ [ "def test():\n \"\"\"Test the speed of filtering and weighting.\"\"\"\n \n def mult(impl=3):\n if impl is 0:\n Xb = Xt.view()\n Xb.shape = (K, M*N)\n XCb = Xb.T @ C # in MN x F\n XCb = XCb.T.reshape((F*M, N))\n return (XCb.T @ w).squeeze()\n elif impl is 1:\n tmp = np.tensordot(Xt, C, (0,0))\n return np.tensordot(tmp, W, ((0,2),(1,0)))\n elif impl is 2:\n tmp = np.tensordot(Xt, C, (0,0))\n return np.einsum('ijk,ki->j', tmp, W)\n elif impl is 3:\n return np.einsum('kmn,fm,kf->n', Xt, W, C)\n \n C = np.random.normal(0,1,(K,F))\n W = np.random.normal(0,1,(F,M))\n w = W.reshape((F*M, 1))\n a = mult(impl=0)\n for impl in range(4):\n tstart = time.process_time()\n for k in range(1000):\n b = mult(impl)\n print('Execution time (impl={}): {}'.format(impl, time.process_time() - tstart))\n np.testing.assert_allclose(a, b)\n#test()", "_____no_output_____" ] ], [ [ "# GFL classification without weights\n\n* The matrix is singular thus not invertible.", "_____no_output_____" ] ], [ [ "class gflc_noweights:\n\n def __init__(s, F, K, niter, algo='direct'):\n \"\"\"Model hyper-parameters\"\"\"\n s.F = F\n s.K = K\n s.niter = niter\n if algo is 'direct':\n s.fit = s.direct\n elif algo is 'sgd':\n s.fit = s.sgd\n \n def L(s, Xt, y):\n #tmp = np.einsum('kmn,kf,fm->n', Xt, s.C, np.ones((s.F,M))) - y.squeeze()\n #tmp = np.einsum('kmn,kf->mnf', Xt, s.C).sum((0,2)) - y.squeeze()\n #tmp = (C.T @ Xt.reshape((K,M*N))).reshape((F,M,N)).sum((0,2)) - y.squeeze()\n tmp = np.tensordot(s.C, Xt, (0,0)).sum((0,1)) - y.squeeze()\n return np.linalg.norm(tmp)**2\n\n def dLc(s, Xt, y):\n tmp = np.tensordot(s.C, Xt, (0,0)).sum(axis=(0,1)) - y.squeeze()\n return np.dot(Xt, tmp).sum(1)[:,np.newaxis].repeat(s.F,1)\n #return np.einsum('kmn,n->km', Xt, tmp).sum(1)[:,np.newaxis].repeat(s.F,1)\n\n def sgd(s, X, y):\n Xt, q = lanczos_basis_eval(L, X, s.K)\n s.C = np.random.normal(0, 1, (s.K, s.F))\n s.loss = [s.L(Xt, y)]\n for t in range(s.niter):\n s.C -= 1e-13 * s.dLc(Xt, y)\n s.loss.append(s.L(Xt, y))\n return (Xt,)\n \n def direct(s, X, y):\n M, N = X.shape\n Xt, q = lanczos_basis_eval(L, X, s.K)\n s.C = np.random.normal(0, 1, (s.K, s.F))\n W = np.ones((s.F, M))\n c = s.C.reshape((s.K*s.F, 1))\n s.loss = [s.L(Xt, y)]\n Xw = np.einsum('kmn,fm->kfn', Xt, W)\n #Xw = np.tensordot(Xt, W, (1,1))\n Xw.shape = (s.K*s.F, N)\n #np.linalg.inv(Xw @ Xw.T)\n c[:] = np.linalg.solve(Xw @ Xw.T, Xw @ y)\n s.loss.append(s.L(Xt, y))\n return (Xt,)\n\n#test_optim(gflc_noweights(1, 4, 100, 'sgd'), X, y)\n#test_optim(gflc_noweights(1, 4, 0, 'direct'), X, y)", "_____no_output_____" ] ], [ [ "# GFL classification with weights", "_____no_output_____" ] ], [ [ "class gflc_weights():\n\n def __init__(s, F, K, tauR, niter, algo='direct'):\n \"\"\"Model hyper-parameters\"\"\"\n s.F = F\n s.K = K\n s.tauR = tauR\n s.niter = niter\n if algo is 'direct':\n s.fit = s.direct\n elif algo is 'sgd':\n s.fit = s.sgd\n\n def L(s, Xt, y):\n tmp = np.einsum('kmn,kf,fm->n', Xt, s.C, s.W) - y.squeeze()\n return np.linalg.norm(tmp)**2 + s.tauR * np.linalg.norm(s.W)**2\n\n def dLw(s, Xt, y):\n tmp = np.einsum('kmn,kf,fm->n', Xt, s.C, s.W) - y.squeeze()\n return 2 * np.einsum('kmn,kf,n->fm', Xt, s.C, tmp) + 2 * s.tauR * s.W\n\n def dLc(s, Xt, y):\n tmp = np.einsum('kmn,kf,fm->n', Xt, s.C, s.W) - y.squeeze()\n return 2 * np.einsum('kmn,n,fm->kf', Xt, tmp, s.W)\n\n def sgd(s, X, y):\n M, N = X.shape\n Xt, q = lanczos_basis_eval(L, X, s.K)\n s.C = np.random.normal(0, 1, (s.K, s.F))\n s.W = np.random.normal(0, 1, (s.F, M))\n\n s.loss = [s.L(Xt, y)]\n\n for t in range(s.niter):\n s.C -= 1e-12 * s.dLc(Xt, y)\n s.W -= 1e-12 * s.dLw(Xt, y)\n s.loss.append(s.L(Xt, y))\n \n return (Xt,)\n\n def direct(s, X, y):\n M, N = X.shape\n Xt, q = lanczos_basis_eval(L, X, s.K)\n s.C = np.random.normal(0, 1, (s.K, s.F))\n s.W = np.random.normal(0, 1, (s.F, M))\n #c = s.C.reshape((s.K*s.F, 1))\n #w = s.W.reshape((s.F*M, 1))\n c = s.C.view()\n c.shape = (s.K*s.F, 1)\n w = s.W.view()\n w.shape = (s.F*M, 1)\n\n s.loss = [s.L(Xt, y)]\n\n for t in range(s.niter):\n Xw = np.einsum('kmn,fm->kfn', Xt, s.W)\n #Xw = np.tensordot(Xt, s.W, (1,1))\n Xw.shape = (s.K*s.F, N)\n c[:] = np.linalg.solve(Xw @ Xw.T, Xw @ y)\n\n Z = np.einsum('kmn,kf->fmn', Xt, s.C)\n #Z = np.tensordot(Xt, s.C, (0,0))\n #Z = s.C.T @ Xt.reshape((K,M*N))\n Z.shape = (s.F*M, N)\n w[:] = np.linalg.solve(Z @ Z.T + s.tauR * np.identity(s.F*M), Z @ y)\n\n s.loss.append(s.L(Xt, y))\n \n return (Xt,)\n\n def predict(s, X):\n Xt, q = lanczos_basis_eval(L, X, s.K)\n return np.einsum('kmn,kf,fm->n', Xt, s.C, s.W)\n\n#test_optim(gflc_weights(3, 4, 1e-3, 50, 'sgd'), X, y)\nclf_weights = gflc_weights(F=3, K=50, tauR=1e4, niter=5, algo='direct')\ntest_optim(clf_weights, X, y)", "_____no_output_____" ] ], [ [ "# GFL classification with splitting\n\nSolvers\n* Closed-form solution.\n* Stochastic gradient descent.", "_____no_output_____" ] ], [ [ "class gflc_split():\n\n def __init__(s, F, K, tauR, tauF, niter, algo='direct'):\n \"\"\"Model hyper-parameters\"\"\"\n s.F = F\n s.K = K\n s.tauR = tauR\n s.tauF = tauF\n s.niter = niter\n if algo is 'direct':\n s.fit = s.direct\n elif algo is 'sgd':\n s.fit = s.sgd\n\n def L(s, Xt, XCb, Z, y):\n return np.linalg.norm(XCb.T @ s.w - y)**2 + s.tauR * np.linalg.norm(s.w)**2\n\n def Lsplit(s, Xt, XCb, Z, y):\n return np.linalg.norm(Z.T @ s.w - y)**2 + s.tauF * np.linalg.norm(XCb - Z)**2 + s.tauR * np.linalg.norm(s.w)**2\n\n def dLw(s, Xt, XCb, Z, y):\n return 2 * Z @ (Z.T @ s.w - y) + 2 * s.tauR * s.w\n\n def dLc(s, Xt, XCb, Z, y):\n Xb = Xt.reshape((s.K, -1)).T\n Zb = Z.reshape((s.F, -1)).T\n return 2 * s.tauF * Xb.T @ (Xb @ s.C - Zb)\n\n def dLz(s, Xt, XCb, Z, y):\n return 2 * s.w @ (s.w.T @ Z - y.T) + 2 * s.tauF * (Z - XCb)\n\n def lanczos_filter(s, Xt):\n M, N = Xt.shape[1:]\n Xb = Xt.reshape((s.K, M*N)).T\n #XCb = np.tensordot(Xb, C, (2,1))\n XCb = Xb @ s.C # in MN x F\n XCb = XCb.T.reshape((s.F*M, N)) # Needs to copy data.\n return XCb\n\n def sgd(s, X, y):\n M, N = X.shape\n Xt, q = lanczos_basis_eval(L, X, s.K)\n s.C = np.zeros((s.K, s.F))\n s.w = np.zeros((s.F*M, 1))\n Z = np.random.normal(0, 1, (s.F*M, N))\n\n XCb = np.empty((s.F*M, N))\n\n s.loss = [s.L(Xt, XCb, Z, y)]\n s.loss_split = [s.Lsplit(Xt, XCb, Z, y)]\n\n for t in range(s.niter):\n s.C -= 1e-7 * s.dLc(Xt, XCb, Z, y)\n XCb[:] = s.lanczos_filter(Xt)\n Z -= 1e-4 * s.dLz(Xt, XCb, Z, y)\n s.w -= 1e-4 * s.dLw(Xt, XCb, Z, y)\n s.loss.append(s.L(Xt, XCb, Z, y))\n s.loss_split.append(s.Lsplit(Xt, XCb, Z, y))\n \n return Xt, XCb, Z\n\n def direct(s, X, y):\n M, N = X.shape\n Xt, q = lanczos_basis_eval(L, X, s.K)\n s.C = np.zeros((s.K, s.F))\n s.w = np.zeros((s.F*M, 1))\n Z = np.random.normal(0, 1, (s.F*M, N))\n\n XCb = np.empty((s.F*M, N))\n Xb = Xt.reshape((s.K, M*N)).T\n Zb = Z.reshape((s.F, M*N)).T\n\n s.loss = [s.L(Xt, XCb, Z, y)]\n s.loss_split = [s.Lsplit(Xt, XCb, Z, y)]\n\n for t in range(s.niter):\n\n s.C[:] = Xb.T @ Zb / np.sum((np.linalg.norm(X, axis=0) * q)**2, axis=1)[:,np.newaxis]\n XCb[:] = s.lanczos_filter(Xt)\n\n #Z[:] = np.linalg.inv(s.tauF * np.identity(s.F*M) + s.w @ s.w.T) @ (s.tauF * XCb + s.w @ y.T)\n Z[:] = np.linalg.solve(s.tauF * np.identity(s.F*M) + s.w @ s.w.T, s.tauF * XCb + s.w @ y.T)\n\n #s.w[:] = np.linalg.inv(Z @ Z.T + s.tauR * np.identity(s.F*M)) @ Z @ y\n s.w[:] = np.linalg.solve(Z @ Z.T + s.tauR * np.identity(s.F*M), Z @ y)\n\n s.loss.append(s.L(Xt, XCb, Z, y))\n s.loss_split.append(s.Lsplit(Xt, XCb, Z, y))\n \n return Xt, XCb, Z\n\n def predict(s, X):\n Xt, q = lanczos_basis_eval(L, X, s.K)\n XCb = s.lanczos_filter(Xt)\n return XCb.T @ s.w\n\n#test_optim(gflc_split(3, 4, 1e-3, 1e-3, 50, 'sgd'), X, y)\nclf_split = gflc_split(3, 4, 1e4, 1e-3, 8, 'direct')\ntest_optim(clf_split, X, y)", "_____no_output_____" ] ], [ [ "# Filters visualization\n\nObservations:\n* Filters learned with the splitting scheme have much smaller amplitudes.\n* Maybe the energy sometimes goes in W ?\n* Why are the filters so different ?", "_____no_output_____" ] ], [ [ "lamb, U = graph.fourier(L)\nprint('Spectrum in [{:1.2e}, {:1.2e}]'.format(lamb[0], lamb[-1]))", "_____no_output_____" ], [ "def plot_filters(C, spectrum=False):\n K, F = C.shape\n M, M = L.shape\n m = int(np.sqrt(M))\n X = np.zeros((M,1))\n X[int(m/2*(m+1))] = 1 # Kronecker\n Xt, q = lanczos_basis_eval(L, X, K)\n Z = np.einsum('kmn,kf->mnf', Xt, C)\n Xh = U.T @ X\n Zh = np.tensordot(U.T, Z, (1,0))\n \n pmin = int(m/2) - K\n pmax = int(m/2) + K + 1\n fig, axes = plt.subplots(2,int(np.ceil(F/2)), figsize=(15,5))\n for f in range(F):\n img = Z[:,0,f].reshape((m,m))[pmin:pmax,pmin:pmax]\n im = axes.flat[f].imshow(img, vmin=Z.min(), vmax=Z.max(), interpolation='none')\n axes.flat[f].set_title('Filter {}'.format(f))\n fig.subplots_adjust(right=0.8)\n cax = fig.add_axes([0.82, 0.16, 0.02, 0.7])\n fig.colorbar(im, cax=cax)\n \n if spectrum:\n ax = plt.figure(figsize=(15,5)).add_subplot(111)\n for f in range(F):\n ax.plot(lamb, Zh[...,f] / Xh, '.-', label='Filter {}'.format(f))\n ax.legend(loc='best')\n ax.set_title('Spectrum of learned filters')\n ax.set_xlabel('Frequency')\n ax.set_ylabel('Amplitude')\n ax.set_xlim(0, lmax)\n\nplot_filters(clf_weights.C, True)\nplot_filters(clf_split.C, True)", "_____no_output_____" ] ], [ [ "# Extracted features", "_____no_output_____" ] ], [ [ "def plot_features(C, x):\n K, F = C.shape\n m = int(np.sqrt(x.shape[0]))\n xt, q = lanczos_basis_eval(L, x, K)\n Z = np.einsum('kmn,kf->mnf', xt, C)\n \n fig, axes = plt.subplots(2,int(np.ceil(F/2)), figsize=(15,5))\n for f in range(F):\n img = Z[:,0,f].reshape((m,m))\n #im = axes.flat[f].imshow(img, vmin=Z.min(), vmax=Z.max(), interpolation='none')\n im = axes.flat[f].imshow(img, interpolation='none')\n axes.flat[f].set_title('Filter {}'.format(f))\n fig.subplots_adjust(right=0.8)\n cax = fig.add_axes([0.82, 0.16, 0.02, 0.7])\n fig.colorbar(im, cax=cax)\n\nplot_features(clf_weights.C, X[:,[0]])\nplot_features(clf_weights.C, X[:,[1000]])", "_____no_output_____" ] ], [ [ "# Performance w.r.t. hyper-parameters\n\n* F plays a big role.\n * Both for performance and training time.\n * Larger values lead to over-fitting !\n* Order $K \\in [3,5]$ seems sufficient.\n* $\\tau_R$ does not have much influence.", "_____no_output_____" ] ], [ [ "def scorer(clf, X, y):\n yest = clf.predict(X).round().squeeze()\n y = y.squeeze()\n yy = np.ones(len(y))\n yy[yest < 0] = -1\n nerrs = np.count_nonzero(y - yy)\n return 1 - nerrs / len(y)", "_____no_output_____" ], [ "def perf(clf, nfolds=3):\n \"\"\"Test training accuracy.\"\"\"\n N = X.shape[1]\n inds = np.arange(N)\n np.random.shuffle(inds)\n inds.resize((nfolds, int(N/nfolds)))\n folds = np.arange(nfolds)\n test = inds[0,:]\n train = inds[folds != 0, :].reshape(-1)\n \n fig, axes = plt.subplots(1,3, figsize=(15,5))\n test_optim(clf, X[:,train], y[train], axes[2])\n \n axes[0].plot(train, clf.predict(X[:,train]), '.')\n axes[0].plot(train, y[train].squeeze(), '.')\n axes[0].set_ylim([-3,3])\n axes[0].set_title('Training set accuracy: {:.2f}'.format(scorer(clf, X[:,train], y[train])))\n axes[1].plot(test, clf.predict(X[:,test]), '.')\n axes[1].plot(test, y[test].squeeze(), '.')\n axes[1].set_ylim([-3,3])\n axes[1].set_title('Testing set accuracy: {:.2f}'.format(scorer(clf, X[:,test], y[test])))\n \n if hasattr(clf, 'C'):\n plot_filters(clf.C)\n\nperf(rls(tauR=1e6))\nfor F in [1,3,5]:\n perf(gflc_weights(F=F, K=50, tauR=1e4, niter=5, algo='direct'))\n\n#perf(rls(tauR=1e-3))\n#for K in [2,3,5,7]:\n# perf(gflc_weights(F=3, K=K, tauR=1e-3, niter=5, algo='direct'))\n\n#for tauR in [1e-3, 1e-1, 1e1]:\n# perf(rls(tauR=tauR))\n# perf(gflc_weights(F=3, K=3, tauR=tauR, niter=5, algo='direct'))", "_____no_output_____" ] ], [ [ "# Classification\n\n* Greater is $F$, greater should $K$ be.", "_____no_output_____" ] ], [ [ "def cross_validation(clf, nfolds, nvalidations):\n M, N = X.shape\n scores = np.empty((nvalidations, nfolds))\n for nval in range(nvalidations):\n inds = np.arange(N)\n np.random.shuffle(inds)\n inds.resize((nfolds, int(N/nfolds)))\n folds = np.arange(nfolds)\n for n in folds:\n test = inds[n,:]\n train = inds[folds != n, :].reshape(-1)\n clf.fit(X[:,train], y[train])\n scores[nval, n] = scorer(clf, X[:,test], y[test])\n return scores.mean()*100, scores.std()*100\n #print('Accuracy: {:.2f} +- {:.2f}'.format(scores.mean()*100, scores.std()*100))\n #print(scores)", "_____no_output_____" ], [ "def test_classification(clf, params, param, values, nfolds=10, nvalidations=1):\n means = []\n stds = []\n fig, ax = plt.subplots(1,1, figsize=(15,5))\n for i,val in enumerate(values):\n params[param] = val\n mean, std = cross_validation(clf(**params), nfolds, nvalidations)\n means.append(mean)\n stds.append(std)\n ax.annotate('{:.2f} +- {:.2f}'.format(mean,std), xy=(i,mean), xytext=(10,10), textcoords='offset points')\n ax.errorbar(np.arange(len(values)), means, stds, fmt='.', markersize=10)\n ax.set_xlim(-.8, len(values)-.2)\n ax.set_xticks(np.arange(len(values)))\n ax.set_xticklabels(values)\n ax.set_xlabel(param)\n ax.set_ylim(50, 100)\n ax.set_ylabel('Accuracy')\n ax.set_title('Parameters: {}'.format(params))", "_____no_output_____" ], [ "test_classification(rls, {}, 'tauR', [1e8,1e7,1e6,1e5,1e4,1e3,1e-5,1e-8], 10, 10)", "_____no_output_____" ], [ "params = {'F':1, 'K':2, 'tauR':1e3, 'niter':5, 'algo':'direct'}\ntest_classification(gflc_weights, params, 'tauR', [1e8,1e6,1e5,1e4,1e3,1e2,1e-3,1e-8], 10, 10)", "_____no_output_____" ], [ "params = {'F':2, 'K':10, 'tauR':1e4, 'niter':5, 'algo':'direct'}\ntest_classification(gflc_weights, params, 'F', [1,2,3,5])", "_____no_output_____" ], [ "params = {'F':2, 'K':4, 'tauR':1e4, 'niter':5, 'algo':'direct'}\ntest_classification(gflc_weights, params, 'K', [2,3,4,5,8,10,20,30,50,70])", "_____no_output_____" ] ], [ [ "# Sampled MNIST", "_____no_output_____" ] ], [ [ "Xfull = X", "_____no_output_____" ], [ "def sample(X, p, seed=None):\n M, N = X.shape\n z = graph.grid(int(np.sqrt(M)))\n \n # Select random pixels.\n np.random.seed(seed)\n mask = np.arange(M)\n np.random.shuffle(mask)\n mask = mask[:int(p*M)]\n \n return z[mask,:], X[mask,:]\n\nX = Xfull\nz, X = sample(X, .5)\ndist, idx = graph.distance_sklearn_metrics(z, k=4)\nA = graph.adjacency(dist, idx)\nL = graph.laplacian(A)\nlmax = graph.lmax(L)\nlamb, U = graph.fourier(L)\nprint('Spectrum in [{:1.2e}, {:1.2e}]'.format(lamb[0], lamb[-1]))\n\nprint(L.shape)\n\ndef plot(n):\n M, N = X.shape\n m = int(np.sqrt(M))\n x = X[:,n]\n #print(x+127.5)\n plt.scatter(z[:,0], -z[:,1], s=20, c=x+127.5)\nplot(10)\n\ndef plot_digit(nn):\n M, N = X.shape\n m = int(np.sqrt(M))\n fig, axes = plt.subplots(1,len(nn), figsize=(15,5))\n for i, n in enumerate(nn):\n n = int(n)\n img = X[:,n]\n axes[i].imshow(img.reshape((m,m)))\n axes[i].set_title('Label: y = {:.0f}'.format(y[n,0]))\n\n#plot_digit([0, 1, 1e2, 1e2+1, 1e3, 1e3+1])", "_____no_output_____" ], [ "#clf_weights = gflc_weights(F=3, K=4, tauR=1e-3, niter=5, algo='direct')\n#test_optim(clf_weights, X, y)\n#plot_filters(clf_weights.C, True)", "_____no_output_____" ], [ "#test_classification(rls, {}, 'tauR', [1e1,1e0])\n#params = {'F':2, 'K':5, 'tauR':1e-3, 'niter':5, 'algo':'direct'}\n#test_classification(gflc_weights, params, 'F', [1,2,3])", "_____no_output_____" ], [ "test_classification(rls, {}, 'tauR', [1e8,1e7,1e6,1e5,1e4,1e3,1e-5,1e-8], 10, 10)", "_____no_output_____" ], [ "params = {'F':2, 'K':2, 'tauR':1e3, 'niter':5, 'algo':'direct'}\ntest_classification(gflc_weights, params, 'tauR', [1e8,1e5,1e4,1e3,1e2,1e1,1e-3,1e-8], 10, 1)", "_____no_output_____" ], [ "params = {'F':2, 'K':10, 'tauR':1e5, 'niter':5, 'algo':'direct'}\ntest_classification(gflc_weights, params, 'F', [1,2,3,4,5,10])", "_____no_output_____" ], [ "params = {'F':2, 'K':4, 'tauR':1e5, 'niter':5, 'algo':'direct'}\ntest_classification(gflc_weights, params, 'K', [2,3,4,5,6,7,8,10,20,30])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06d4aec64b78630805bdbaa0ad4203928fade5f
145,245
ipynb
Jupyter Notebook
Proyecto/pruebas_regresion.ipynb
maquinon1612/Aprendizaje-Automatico-y-Big-Data
b94e33f274e3115ba06ecd7181b67e85313cbce3
[ "MIT" ]
1
2021-11-09T12:03:19.000Z
2021-11-09T12:03:19.000Z
Proyecto/pruebas_regresion.ipynb
maquinon1612/Aprendizaje-Automatico-y-Big-Data
b94e33f274e3115ba06ecd7181b67e85313cbce3
[ "MIT" ]
null
null
null
Proyecto/pruebas_regresion.ipynb
maquinon1612/Aprendizaje-Automatico-y-Big-Data
b94e33f274e3115ba06ecd7181b67e85313cbce3
[ "MIT" ]
null
null
null
55.77765
18,456
0.635223
[ [ [ "import loader as ld\nimport regresion as rg\nimport numpy as np\nimport matplotlib.pyplot as plt\nrandom_state = 1706\n\nLambdas = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]", "_____no_output_____" ], [ "Ex, Ey, Vx, Vy, Px, Py = ld.carga_Numpy(random_state)", "_____no_output_____" ], [ "total = Ex.shape[0] + Vx.shape[0] + Px.shape[0]\nprint(Ex.shape[0]/total * 100)\nprint(Vx.shape[0]/total * 100)\nprint(Px.shape[0]/total * 100)\n\nprint(Ex.shape[0])", "59.99314951190272\n20.00342524404864\n20.00342524404864\n7006\n" ], [ "resultados = []\nejemplos = np.linspace(1, Ex.shape[0], 150, endpoint = False, dtype = int)\n\nVxn , esc2 = ld.normalizar(Vx)\nVxn = np.hstack([np.ones([Vxn.shape[0],1]), Vxn])\n\nfor i in range(ejemplos.shape[0]):\n\n Exn, esc1 = ld.normalizar(Ex[:ejemplos[i], :])\n\n ones = np.ones([ejemplos[i],1])\n Exn = np.hstack([ones, Exn])\n\n precision, asignacion = rg.evaluar_validacion(0,Exn,Ey[:ejemplos[i]],Vxn,Vy)\n resultados.append(precision)\n", "coste: 3.6883883876892855e-07\ncoste: 3.6883883876892855e-07\ncoste: 3.688528786544765e-07\ncoste: 3.6883883876892855e-07\ncoste: 1.4312164770051439e-08\ncoste: 2.4323750823961084e-06\ncoste: 1.7114057616590092e-08\ncoste: 8.477272026448706e-06\ncoste: 0.00020608464688934113\ncoste: 0.003987943177575745\ncoste: 9.314907291437817e-05\ncoste: 0.0020523383520757036\ncoste: 0.10185468233830866\ncoste: 0.20141175722021634\ncoste: 0.052738128947275545\ncoste:c:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:35: RuntimeWarning: divide by zero encountered in log\n sum2 = np.dot((1-Y), np.log(1 - G))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:35: RuntimeWarning: divide by zero encountered in log\n sum2 = np.dot((1-Y), np.log(1 - G))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:35: RuntimeWarning: divide by zero encountered in log\n sum2 = np.dot((1-Y), np.log(1 - G))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:35: RuntimeWarning: divide by zero encountered in log\n sum2 = np.dot((1-Y), np.log(1 - G))\n 0.27051104435822027\ncoste: 0.2562714016526968\ncoste: 0.3435932229003206\ncoste: 0.11768074080539946\ncoste: 0.2892339566032206\ncoste: 0.3475227481888905\ncoste: 0.4354985549143644\ncoste: 0.30211577621615543\ncoste: 0.34956739200948855\ncoste: 0.3704660828875057\ncoste: 0.45027925684030323\ncoste: 0.3972352251890789\ncoste: 0.35369679530189296\ncoste: 0.4096489273009766\ncoste: 0.4876881822888688\ncoste: 0.4076337165489908\ncoste: 0.3902731810994002\ncoste: 0.43784604635069013\ncoste: 0.5022863171262688\ncoste: 0.43725208577008184\ncoste: 0.4026767640118317\ncoste: 0.44002526825599714\ncoste: 0.5070301170088527\ncoste: 0.43388241941593403\ncoste: 0.454914742816812\ncoste: 0.45179445658916273\ncoste: 0.5134952053498452\ncoste: 0.46760413680662105\ncoste: 0.4674691106061417\ncoste: 0.445167575850314\ncoste: 0.5172749115600085\ncoste: 0.47033954178663595\ncoste: 0.47302432799590405\ncoste: 0.4480709895699901\ncoste: 0.5128219837182292\ncoste: 0.48551305899105845\ncoste: 0.4949491319856925\ncoste: 0.4412906803180061\ncoste: 0.5232082402558352\ncoste: 0.48861585124277696\ncoste: 0.5059546983671296\ncoste: 0.43915801257842224\ncoste: 0.528513497409615\ncoste: 0.4879790237429899\ncoste: 0.5124039620210548\ncoste: 0.4408569694487351\ncoste: 0.5361127362670676\ncoste: 0.4954559216875908\ncoste: 0.5056372225402392\ncoste: 0.4575161489428474\ncoste: 0.5363842749785764\ncoste: 0.502186830788929\ncoste: 0.5122905715132784\ncoste: 0.4673169547973898\ncoste: 0.5438678276590346\ncoste: 0.5012945648971676\ncoste: 0.5137551933213453\ncoste: 0.4651951610816601\ncoste: 0.5379601474693859\ncoste: 0.508448992705278\ncoste: 0.5194346794031236\ncoste: 0.46643265430616027\ncoste: 0.5443135526585686\ncoste: 0.5063966643666734\ncoste: 0.5148851087084668\ncoste: 0.4712214607761787\ncoste: 0.5431584314787841\ncoste: 0.5073607955585804\ncoste: 0.5157880102331314\ncoste: 0.47616792700942606\ncoste: 0.5450424148438145\ncoste: 0.5059618757895884\ncoste: 0.5149329115110454\ncoste: 0.48664249411974\ncoste: 0.548921390859934\ncoste: 0.5066887634642201\ncoste: 0.5168397187174879\ncoste: 0.4949527683540827\ncoste: 0.5486674194105481\ncoste: 0.5083102031808476\ncoste: 0.5213505320578112\ncoste: 0.4911098626883133\ncoste: 0.5468413299994966\ncoste: 0.5124820685503507\ncoste: 0.5249588237345145\ncoste: 0.4966377520339053\ncoste: 0.54590994523655\ncoste: 0.5179854350005614\ncoste: 0.5271492545674856\ncoste: 0.4930153205003025\ncoste: 0.545153196632934\ncoste: 0.5239744222478527\ncoste: 0.5291513549449511\ncoste: 0.49280612826996884\ncoste: 0.5438058534703728\ncoste: 0.5294113842721015\ncoste: 0.5365037344599468\ncoste: 0.5010410449521912\ncoste: 0.5443225212074206\ncoste: 0.5323429820018227\ncoste: 0.5393475866630325\ncoste: 0.504061390191627\ncoste: 0.5435799496598538\ncoste: 0.5384380857262961\ncoste: 0.5404066389897568\ncoste: 0.5067809916213749\ncoste: 0.5468390626000018\ncoste: 0.5364673081908932\ncoste: 0.5363585258076504\ncoste: 0.5013137924775379\ncoste: 0.5462052202257447\ncoste: 0.5358261820267247\ncoste: 0.5383821513780574\ncoste: 0.4999314795736922\ncoste: 0.5449949197924429\ncoste: 0.5364404042204628\ncoste: 0.5413334209436513\ncoste: 0.4968701470519767\ncoste: 0.547791736996592\ncoste: 0.5359659132883849\ncoste: 0.5434345304641206\ncoste: 0.49508462511587653\ncoste: 0.5526169113676024\ncoste: 0.5338831749901426\ncoste: 0.5450540865090809\ncoste: 0.49332440963412344\ncoste: 0.5539691466939567\ncoste: 0.5321685527021025\ncoste: 0.5437836882535424\ncoste: 0.4927908047235858\ncoste: 0.5520168075903593\ncoste: 0.5350264767044004\ncoste: 0.5409725037340508\ncoste: 0.4937904741977698\ncoste: 0.5512271957206133\ncoste: 0.5404260392332031\ncoste: 0.5366349670124972\ncoste: 0.49598457434801274\ncoste: 0.5501920476490065\ncoste: 0.5429558863792788\ncoste: 0.5348368080575372\ncoste: 0.499806043740807\ncoste: 0.54866135744814\ncoste: 0.5435596320611187\ncoste: 0.5342405974692044\ncoste: 0.5012176990437124\ncoste: 0.5468612676497614\ncoste: 0.5473774376065271\ncoste: 0.5327496078223758\ncoste: 0.5010430606466479\ncoste: 0.545336420131931\ncoste: 0.5490176222772937\ncoste: 0.5343008655144089\ncoste: 0.4995434744462268\ncoste: 0.5446345423900123\ncoste: 0.5458990358893301\ncoste: 0.5364472077364765\ncoste: 0.5009592552133459\ncoste: 0.5455988371757018\ncoste: 0.5457696590759507\ncoste: 0.5381324989486979\ncoste: 0.49918991928290835\ncoste: 0.5468531455264659\ncoste: 0.5438403459569711\ncoste: 0.5394767154921152\ncoste: 0.5004405488349116\ncoste: 0.5466733291510036\ncoste: 0.545819543233453\ncoste: 0.5373467777914392\ncoste: 0.499308669145869\ncoste: 0.5479534291051744\ncoste: 0.5473550709459963\ncoste: 0.5380838193482911\ncoste: 0.49854913599359396\ncoste: 0.5478074425157688\ncoste: 0.546415839564044\ncoste: 0.5381585787898655\ncoste: 0.5059464793404024\ncoste: 0.5488054713997147\ncoste: 0.5432837191229114\ncoste: 0.5369769573546015\ncoste: 0.5083301033782025\ncoste: 0.5476683568016583\ncoste: 0.5439610617887393\ncoste: 0.538804842959419\ncoste: 0.5133607162349698\ncoste: 0.5459029576286389\ncoste: 0.5444636700786575\ncoste: 0.5399922487545783\ncoste: 0.5156757659827654\ncoste: 0.546809164164609\ncoste: 0.5428955999008841\ncoste: 0.5404639532982677\ncoste: 0.516559449947656\ncoste: 0.5469277444011575\ncoste: 0.5437990827830858\ncoste: 0.5391054574988452\ncoste: 0.5165088798121086\ncoste: 0.54663685786943\ncoste: 0.5443436999597121\ncoste: 0.5373338681374125\ncoste: 0.514516137266152\ncoste: 0.5454053202715682\ncoste: 0.5453593443915394\ncoste: 0.5394697285787322\ncoste: 0.5152974968879657\ncoste: 0.5451633809650464\ncoste: 0.5441553579234877\ncoste: 0.5389917695865678\ncoste: 0.5172063270316455\ncoste: 0.5442891015840788\ncoste: 0.543713256862385\ncoste: 0.5399244411338888\ncoste: 0.5167422197672967\ncoste: 0.5451656242053563\ncoste: 0.5431000016230234\ncoste: 0.5406395411925234\ncoste: 0.5165105400075632\ncoste: 0.5446945593215671\ncoste: 0.544878867565195\ncoste: 0.5411626304415557\ncoste: 0.5155689146802068\ncoste: 0.5441795103602541\ncoste: 0.5433095466920389\ncoste: 0.5437461028604045\ncoste: 0.5142243150023293\ncoste: 0.5447853266617919\ncoste: 0.5455080343342202\ncoste: 0.5419836591030732\ncoste: 0.5157439686385208\ncoste: 0.5425679729336633\ncoste: 0.5450330760670722\ncoste: 0.5416055932929899\ncoste: 0.5154474334825099\ncoste: 0.5424285677652145\ncoste: 0.5463462126739985\ncoste: 0.542523953610574\ncoste: 0.5162577194039859\ncoste: 0.5423215992773068\ncoste: 0.5458078056028803\ncoste: 0.5425358743336985\ncoste: 0.5166179111580019\ncoste: 0.5449355084360553\ncoste: 0.5459696433270229\ncoste: 0.5411084950623936\ncoste: 0.5159111012936755\ncoste: 0.5442524424500279\ncoste: 0.5477452382090591\ncoste: 0.5408491195452362\ncoste: 0.5159007796321735\ncoste: 0.5424612977852039\ncoste: 0.5494796921269945\ncoste: 0.5414716188539781\ncoste: 0.5166327028066028\ncoste: 0.5430522010700889\ncoste: 0.5496253684429189\ncoste: 0.5396977045360308\ncoste: 0.5155446150894402\ncoste: 0.5437984410194131\ncoste: 0.5484042229846113\ncoste: 0.5398361210735071\ncoste: 0.5169234055857228\ncoste: 0.544318665268898\ncoste: 0.5473916998804456\ncoste: 0.539926268379491\ncoste: 0.5176679590469544\ncoste: 0.54348994100474\ncoste: 0.5465977584729849\ncoste: 0.5419084997925541\ncoste: 0.5169837719358955\ncoste: 0.5439200053433705\ncoste: 0.5462354866977392\ncoste: 0.5414711706117457\ncoste: 0.516218348736512\ncoste: 0.5437486174394426\ncoste: 0.5461024232581243\ncoste: 0.5414199169693316\ncoste: 0.5157499672007063\ncoste: 0.5455371721255452\ncoste: 0.5468119232759834\ncoste: 0.5396287990921406\ncoste: 0.512963698446484\ncoste: 0.5467652897115715\ncoste: 0.5476699562858937\ncoste: 0.5400681022471747\ncoste: 0.5131597769897461\ncoste: 0.5466942557681339\ncoste: 0.5483195229192637\ncoste: 0.539585368067567\ncoste: 0.5136570573560405\ncoste: 0.5466774809721602\ncoste: 0.5474698291797461\ncoste: 0.5397141390602543\ncoste: 0.5132035338168346\ncoste: 0.5463574744932351\ncoste: 0.5479377680407878\ncoste: 0.541017765623438\ncoste: 0.5126785475906311\ncoste: 0.5468919159239166\ncoste: 0.5475662441085501\ncoste: 0.5422169371033226\ncoste: 0.5114577478141565\ncoste: 0.5461557138357008\ncoste: 0.5475940473898241\ncoste: 0.5431930753945157\ncoste: 0.5157785486236617\ncoste: 0.5445338234433535\ncoste: 0.5474122406619624\ncoste: 0.5428316835933981\ncoste: 0.5148921379622451\ncoste: 0.5448580959485857\ncoste: 0.5486588669542516\ncoste: 0.5418974128644963\ncoste: 0.5150112448744046\ncoste: 0.5469493982377847\ncoste: 0.5483571019980068\ncoste: 0.5415984064074111\ncoste: 0.5145018566997057\ncoste: 0.546868166760167\ncoste: 0.5492113834411255\ncoste: 0.5421383108006137\ncoste: 0.5140730900724313\ncoste: 0.5461830009258459\ncoste: 0.5498139097290188\ncoste: 0.5427857709343438\ncoste: 0.5127736981556877\ncoste: 0.5467197812610358\ncoste: 0.5498385946448671\ncoste: 0.5439382135153743\ncoste: 0.5112348907302909\ncoste: 0.5469812890203567\ncoste: 0.5507763718629004\ncoste: 0.5443395748180976\ncoste: 0.5121545110309854\ncoste: 0.5465398821338608\ncoste: 0.5502056639728129\ncoste: 0.5461112141341197\ncoste: 0.5127568052357736\ncoste: 0.5481380129735479\ncoste: 0.5508972593240007\ncoste: 0.5454723481942921\ncoste: 0.5130704133490189\ncoste: 0.5474603068111854\ncoste: 0.5503561840439585\ncoste: 0.5471641488382992\ncoste: 0.5130599872430298\ncoste: 0.5480050561385041\ncoste: 0.5499949636680453\ncoste: 0.5470395934946076\ncoste: 0.5129626673831686\ncoste: 0.548207651117555\ncoste: 0.5492197481176256\ncoste: 0.5483083190457352\ncoste: 0.5134282949668681\ncoste: 0.5482566342040536\ncoste: 0.5487427250352914\ncoste: 0.5490479097202166\ncoste: 0.5158560113647027\ncoste: 0.5479007034017017\ncoste: 0.549873744867135\ncoste: 0.5487743901068335\ncoste: 0.5156546836187053\ncoste: 0.5491234801021472\ncoste: 0.5492944258891618\ncoste: 0.548031077381193\ncoste: 0.5148865024749332\ncoste: 0.5499413418297436\ncoste: 0.5501788471811251\ncoste: 0.5477892337167555\ncoste: 0.5145564312764419\ncoste: 0.5500421428560438\ncoste: 0.5503117644873539\ncoste: 0.5485685314191647\ncoste: 0.5157295413850501\ncoste: 0.549461841486724\ncoste: 0.5509690275263324\ncoste: 0.5490243351166927\ncoste: 0.5156922707616582\ncoste: 0.5490158143522065\ncoste: 0.5520646588822422\ncoste: 0.5484728446495155\ncoste: 0.5162467518970705\ncoste: 0.5484176263972408\ncoste: 0.552357672423596\ncoste: 0.5493115979554545\ncoste: 0.5160437893338023\ncoste: 0.5490228447032215\ncoste: 0.5523512836415087\ncoste: 0.5497185267030202\ncoste: 0.5165578625375675\ncoste: 0.5482951537210591\ncoste: 0.5527296400923465\ncoste: 0.5505558209486721\ncoste: 0.5163733606643858\ncoste: 0.5482993736860051\ncoste: 0.5532764044387636\ncoste: 0.5507052704431938\ncoste: 0.5169304927136106\ncoste: 0.5480341497575104\ncoste: 0.5531066872680729\ncoste: 0.5513209903297793\ncoste: 0.5164490682689532\ncoste: 0.5484812071300349\ncoste: 0.5530000375581635\ncoste: 0.5521490068295628\ncoste: 0.5168582511249291\ncoste: 0.548657663879717\ncoste: 0.5536697999718052\ncoste: 0.5507408795077356\ncoste: 0.5159454291417802\ncoste: 0.5492919098019261\ncoste: 0.5542262958572214\ncoste: 0.551622490930544\ncoste: 0.5175714259610859\ncoste: 0.5484619247525793\ncoste: 0.5544041899727672\ncoste: 0.5515344413501065\ncoste: 0.5185720074658305\ncoste: 0.5496432457953\ncoste: 0.5541414567113317\ncoste: 0.5502907421673505\ncoste: 0.5180941294944079\ncoste: 0.5506125306853225\ncoste: 0.5542599713297484\ncoste: 0.5499005573341271\ncoste: 0.5172142347630404\ncoste: 0.551443209992307\ncoste: 0.5537894808758097\ncoste: 0.5502857245498755\ncoste: 0.5171920210906322\ncoste: 0.5530465740961061\ncoste: 0.5538326430166013\ncoste: 0.5499351282138945\ncoste: 0.5169862527314791\ncoste: 0.5539906189909254\ncoste: 0.5544977612284576\ncoste: 0.5493896637638467\ncoste: 0.5186792309780471\ncoste: 0.5526442713277377\ncoste: 0.5542563474483493\ncoste: 0.5485574548820766\ncoste: 0.5188776642447366\ncoste: 0.5536795934563079\ncoste: 0.5529733265601817\ncoste: 0.5490523437980519\ncoste: 0.5185384857298625\ncoste: 0.5533380153009786\ncoste: 0.5535521202194646\ncoste: 0.5491675543907107\ncoste: 0.5182615558724885\ncoste: 0.553635572787277\ncoste: 0.5532952338812962\ncoste: 0.5490157145548038\ncoste: 0.518277231632729\ncoste: 0.5534931078778131\ncoste: 0.5531991991887472\ncoste: 0.5501713361074715\ncoste: 0.5191963245673367\ncoste: 0.552638054992467\ncoste: 0.5524196714490046\ncoste: 0.5502080646644516\ncoste: 0.5188431008571266\ncoste: 0.5521700795237722\ncoste: 0.5524071730335981\ncoste: 0.551242312891819\ncoste: 0.5195757303726252\ncoste: 0.5521393786267382\ncoste: 0.5518847386187418\ncoste: 0.552467214765721\ncoste: 0.5188075876565914\ncoste: 0.5510656830498029\ncoste: 0.5524492994408433\ncoste: 0.5527522788135576\ncoste: 0.5183665652971522\ncoste: 0.5517755781758089\ncoste: 0.5527731099281412\ncoste: 0.5525467114008403\ncoste: 0.5183846340106442\ncoste: 0.551940976286012\ncoste: 0.5524932956617523\ncoste: 0.5522788775923639\ncoste: 0.5181437461592286\ncoste: 0.5527734455025617\ncoste: 0.5517489064284046\ncoste: 0.5528149656294855\ncoste: 0.5180800081647953\ncoste: 0.5526076304355582\ncoste: 0.5519065125038002\ncoste: 0.5531176587013025\ncoste: 0.5196764297055256\ncoste: 0.5525548886186173\ncoste: 0.5517652115272238\ncoste: 0.5534220811744525\ncoste: 0.520543997178694\ncoste: 0.5520974750735017\ncoste: 0.5523666289107828\ncoste: 0.5527610458035023\ncoste: 0.5202173909589062\ncoste: 0.5518096776248619\ncoste: 0.5519761926956849\ncoste: 0.553365501869233\ncoste: 0.5214400271217443\ncoste: 0.5516834456363284\ncoste: 0.5518245620865387\ncoste: 0.5530730193452236\ncoste: 0.5212138178455368\ncoste: 0.5524771819639376\ncoste: 0.5520143315374307\ncoste: 0.5528120930873542\ncoste: 0.5225282031130286\ncoste: 0.5528410477619263\ncoste: 0.5510539426181571\ncoste: 0.552637424911209\ncoste: 0.5229934075813013\ncoste: 0.5526309549043324\ncoste: 0.5508261598686502\ncoste: 0.5537276840513227\ncoste: 0.524027389205114\ncoste: 0.5533749303091943\ncoste: 0.5507298084112606\ncoste: 0.5536980327266834\ncoste: 0.5233536951925686\ncoste: 0.554256418625668\ncoste: 0.5512010031847088\ncoste: 0.5536396461918545\ncoste: 0.5229756824946755\ncoste: 0.5550187076526447\ncoste: 0.551094398481272\ncoste: 0.5538576482078826\ncoste: 0.5217047084621832\ncoste: 0.5557159592259977\ncoste: 0.5509155738557925\ncoste: 0.554164964702545\ncoste: 0.521144436160925\ncoste: 0.5557311760943938\ncoste: 0.5510953266601814\ncoste: 0.5543892708196737\ncoste: 0.5213195377252251\ncoste: 0.5550491928134065\ncoste: 0.5519115069429101\ncoste: 0.5539474221471165\ncoste: 0.520437472518877\ncoste: 0.5552502797662999\ncoste: 0.5521712489746949\ncoste: 0.5537740074097501\ncoste: 0.5197632415154362\ncoste: 0.5554967653745747\ncoste: 0.5521387939635501\ncoste: 0.5535214888585546\ncoste: 0.5194693803503382\ncoste: 0.5551366273420175\ncoste: 0.5522276170594431\ncoste: 0.554077154004603\ncoste: 0.5186954498847485\ncoste: 0.5553194273931504\ncoste: 0.5530311620594771\ncoste: 0.5537600825936028\ncoste: 0.5188384004115177\ncoste: 0.5558281206778601\ncoste: 0.5525981889130263\ncoste: 0.553527893525345\ncoste: 0.5206392136539775\ncoste: 0.5558315607051566\ncoste: 0.5518263795168648\ncoste: 0.5531862866173299\ncoste: 0.5203410648316894\ncoste: 0.5559921371805352\ncoste: 0.5516629167696483\ncoste: 0.5530165517062782\ncoste: 0.5206716462197754\ncoste: 0.5566802073442554\ncoste: 0.5512016240759282\ncoste: 0.5528747001767379\ncoste: 0.521199679055313\ncoste: 0.5563294619488568\ncoste: 0.5512170573246876\ncoste: 0.5528078271288459\ncoste: 0.5214429452299334\ncoste: 0.556298704368751\ncoste: 0.5514030834217731\ncoste: 0.5523612069854354\ncoste: 0.5208371605054227\ncoste: 0.5561361720221959\ncoste: 0.5521294669473322\ncoste: 0.5527099869416461\n" ], [ "plt.plot(ejemplos, resultados)", "_____no_output_____" ], [ "resultados2 = []\nejemplos2 = np.linspace(1, Ex.shape[0], 150, endpoint = False, dtype = int)\n\nVxp = ld.terminosPolinomicos(Vx,2)\n\n\n\nfor i in range(ejemplos2.shape[0]):\n\n Exp = ld.terminosPolinomicos(Ex[:ejemplos2[i],:],2)\n Expn , scaler = ld.normalizar(Exp[:,1:])\n Expn = np.hstack([np.ones([Expn.shape[0],1]), Expn])\n\n Vxpn = scaler.transform(Vxp[:,1:])\n Vxpn = np.hstack([np.ones([Vxpn.shape[0],1]), Vxpn])\n\n precision, asignacion = rg.evaluar_validacion(0,Expn,Ey[:ejemplos2[i]],Vxpn,Vy)\n resultados2.append(precision)\n\n\nplt.plot(ejemplos2, resultados2)", "te: -5.374018870396931e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.16259806026011162\ncoste: 0.13510452239622284\ncoste: 7.863726829242179e-06\ncoste: -6.454439843481211e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.1622247261184146\ncoste: 0.13735705842481893\ncoste: 0.00022236176820913283\ncoste: -5.562668931752488e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.1664571002637836\ncoste: 0.06133940656552714\ncoste: 1.3432883343645672e-06\ncoste: -7.109183911219504e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.16983071958488954\ncoste: 0.06007087384827253\ncoste: 0.30667690417687454\ncoste: -5.755576799149986e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.17143889035002433\ncoste: 0.14300055064812917\ncoste: 0.09611916296962286\ncoste: -5.702856710741273e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.16969586049425614\ncoste: -4.407957530448432e-07\ncoste: -2.663782943063022e-08\ncoste: -2.9975602016239114e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.17153199707681707\ncoste: 0.08094648551452578\ncoste: 0.00020007901645169638\ncoste: -5.864663294486451e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.17258722898759835\ncoste: 3.163608998616827e-07\ncoste: 6.985353784492399e-05\ncoste: -4.649704978232245e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.17487438818888149\ncoste: -6.370699844132322e-07\ncoste: 0.08800178064516273\ncoste: -6.339713631464491e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.17418337578727833\ncoste: 0.029223581024874625\ncoste: 0.003550895305971678\ncoste: -6.152047028355413e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.1750142311284186\ncoste: -3.695409064930814e-07\ncoste: 0.007881911045298284\ncoste: -4.812121262359371e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.17684144243302635\ncoste: -4.007774009955678e-07\ncoste: 0.3092491728280255\ncoste: -5.663134858413971e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.17638937139053198\ncoste: -5.589196927503356e-07\ncoste: 0.001135943945170856\ncoste: -6.407178111313762e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.177491853573853\ncoste: 0.24850362638178264\ncoste: 0.00040936233169893544\ncoste: -3.9446672432960105e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.17797397007827798\ncoste: 0.249162827964293\ncoste: 0.003130453318541867\ncoste: -5.732535016256524e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.1783630290293696\ncoste: 0.24919144425698664\ncoste: 0.0007988900411110261\ncoste: -5.643139595543052e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.18052688558708813\ncoste: 0.2503436999882876\ncoste: 0.00011608053268144458\ncoste: -6.416077481607238e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.18035311639769022\ncoste: 0.2529360778356573\ncoste: 0.30850793359344797\ncoste: 1.722555370280744e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.1809894627187042\ncoste: 0.25384494080254544\ncoste: 0.0018153874164701155\ncoste: -2.521629369216193e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.1823911019658963\ncoste: 0.25452464856586554\ncoste: 0.003830180722302125\ncoste: -4.147462331215502e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.18267721255400743\ncoste: 0.25607298408173557\ncoste: 0.0015138488423106348\ncoste: -5.522746783103794e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.0005132075574647942\ncoste: 0.25572696470084133\ncoste: 0.0068734021330340124\ncoste: 3.096839123288533e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.0005611747634574845\ncoste: 0.25590518265680545\ncoste: 0.32083610234315196\ncoste: -3.398710220848628e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.1834559673179301\ncoste: 0.2578277421635943\ncoste: 0.32220275044185764\ncoste: -5.817640934784825e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.18361379711960313\ncoste: 0.25832455024315415\ncoste: 0.32248305215395184\ncoste: -5.229500027686387e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.0034440724512991294\ncoste: 0.2575130732217555\ncoste: 0.17419043228180545\ncoste: -6.987964401248318e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.0002224930991085097\ncoste: 0.2594600051915803\ncoste: 0.324361622058433\ncoste: -5.771915515532379e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.0025827504572291733\ncoste: 0.25959047535288304\ncoste: 0.008244565878458685\ncoste: -4.4904923823004626e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.0003940500627214487\ncoste: 0.2609716230544742\ncoste: 0.011575435235880584\ncoste: -5.706892566086319e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.000470476381006957\ncoste: 0.2608083115994672\ncoste: 0.12223615102344856\ncoste: -5.500812409802785e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.1840742458808915\ncoste: 0.2624051278753104\ncoste: 0.10372789805051238\ncoste: -4.978858205838385e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.0011833735448480748\ncoste: 0.26326927093579117\ncoste: 0.10516703891330502\ncoste: -5.152642271286393e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 3.35544378882447e-05\ncoste: 0.2642600088167588\ncoste: 0.32175610929123105\ncoste: -5.408124390424081e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.18427838784067035\ncoste: 0.26590475798386864\ncoste: 0.006867540877513074\ncoste: -5.80068739823721e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.00011863957690042183\ncoste: 0.2667739005250547\ncoste: 0.0028998515907052475\ncoste: -5.659623573984529e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.18523598435067262\ncoste: 0.2664464737090354\ncoste: 0.0010831694103568283\ncoste: -5.288153215719834e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.0009399505230426812\ncoste: 0.26569323543722434\ncoste: 0.004213594530463875\ncoste: -4.826004208239784e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.18579155027399843\ncoste: 0.2660934781267187\ncoste: 0.004222425808738781\ncoste: -4.6376376101294145e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.00560311057324633\ncoste: 0.2663025139593077\ncoste: 0.0018862942522324768\ncoste: -6.73559403504727e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.1866615630233755\ncoste: 0.2666992793086926\ncoste: 0.003056790623395628\ncoste: -6.712496787297619e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.1865654139115371\ncoste: 0.2667324758498614\ncoste: 0.0021972173943051987\ncoste: -5.700831000914824e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.18828196222672905\ncoste: 0.2686863058698884\ncoste: 0.0071373782531134605\ncoste: -6.525881329136123e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.18785870620024847\ncoste: 0.2691448732564799\ncoste: 0.011854403112490681\ncoste: -6.803452625854912e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.0011757804060391066\ncoste: 0.2715274246002562\ncoste: 0.0016264512317305687\ncoste: -3.188146097217913e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.0025003533141653355\ncoste: 0.27149417778678675\ncoste: 0.0016788613663909207\ncoste: -5.228097684752799e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.1887268888820202\ncoste: 0.2702618229281377\ncoste: 0.002051487721660284\ncoste: -1.669603312934982e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.18956880968800605\ncoste: 0.2704031746911871\ncoste: 0.0009985717879914158\ncoste: -5.574087000864485e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.18973815848834946\ncoste: 0.2693349268897724\ncoste: 0.0018026571504696609\ncoste: -4.6155875137395306e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.19091926775608414\ncoste: 0.27051459713968257\ncoste: 0.0020014960716775304\ncoste: -6.963754921821378e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.19747341079331202\ncoste: 0.2698492756864496\ncoste: 0.008295310978209925\ncoste: -6.340351586043651e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.000781321113288905\ncoste: 0.269599875704309\ncoste: 0.0017144186850964324\ncoste: -2.1123019422723086e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.0009764268026313202\ncoste: 0.2698750509349409\ncoste: 0.003485121806608083\ncoste: -4.969532237158998e-07\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:8: RuntimeWarning: overflow encountered in exp\n sigmoide = 1 / (1 + np.exp(-Z))\nc:\\Users\\Guille\\PracticasAA\\Proyecto\\regresion.py:34: RuntimeWarning: divide by zero encountered in log\n sum1 = np.dot(Y, np.log(G))\ncoste: 0.001670664755671088\ncoste: 0.17028481881376425\ncoste: 0.0021770437720547912\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d06d547b1371b1b1eee3b5030ede96f3376e8b93
13,788
ipynb
Jupyter Notebook
05_Merge/Housing Market/Exercises.ipynb
LouisNodskov/pandas_exercises
8a9c18a254b17f972da5f2bb23d6f9421b27b7b7
[ "BSD-3-Clause" ]
null
null
null
05_Merge/Housing Market/Exercises.ipynb
LouisNodskov/pandas_exercises
8a9c18a254b17f972da5f2bb23d6f9421b27b7b7
[ "BSD-3-Clause" ]
null
null
null
05_Merge/Housing Market/Exercises.ipynb
LouisNodskov/pandas_exercises
8a9c18a254b17f972da5f2bb23d6f9421b27b7b7
[ "BSD-3-Clause" ]
null
null
null
24.231986
174
0.33754
[ [ [ "# Housing Market", "_____no_output_____" ], [ "### Introduction:\n\nThis time we will create our own dataset with fictional numbers to describe a house market. As we are going to create random data don't try to reason of the numbers.\n\n### Step 1. Import the necessary libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ] ], [ [ "### Step 2. Create 3 differents Series, each of length 100, as follows: \n1. The first a random number from 1 to 4 \n2. The second a random number from 1 to 3\n3. The third a random number from 10,000 to 30,000", "_____no_output_____" ] ], [ [ "rand1 = pd.Series(np.random.randint(1, 5, 100))\nrand2 = pd.Series(np.random.randint(1, 4, 100))\nrand3 = pd.Series(np.random.randint(10000, 30001, 100))\n\nprint(rand1, rand2, rand3)", "0 2\n1 1\n2 3\n3 2\n4 2\n ..\n95 3\n96 4\n97 3\n98 2\n99 2\nLength: 100, dtype: int32 0 1\n1 1\n2 1\n3 2\n4 3\n ..\n95 2\n96 1\n97 3\n98 3\n99 2\nLength: 100, dtype: int32 0 23816\n1 22299\n2 13516\n3 25975\n4 22916\n ... \n95 11050\n96 16246\n97 11288\n98 25346\n99 26681\nLength: 100, dtype: int32\n" ] ], [ [ "### Step 3. Let's create a DataFrame by joinning the Series by column", "_____no_output_____" ] ], [ [ "df = pd.concat([rand1, rand2, rand3], axis = 1)\ndf", "_____no_output_____" ] ], [ [ "### Step 4. Change the name of the columns to bedrs, bathrs, price_sqr_meter", "_____no_output_____" ] ], [ [ "df.rename(columns = {\n 0: 'bedrs',\n 1: 'bathrs',\n 2: 'price_sqr_meter'\n}, inplace=True)\ndf", "_____no_output_____" ] ], [ [ "### Step 5. Create a one column DataFrame with the values of the 3 Series and assign it to 'bigcolumn'", "_____no_output_____" ] ], [ [ "bigcolumn = pd.DataFrame(pd.concat([rand1, rand2, rand3], axis = 0))", "_____no_output_____" ] ], [ [ "### Step 6. Oops, it seems it is going only until index 99. Is it true?", "_____no_output_____" ] ], [ [ "len(bigcolumn)", "_____no_output_____" ] ], [ [ "### Step 7. Reindex the DataFrame so it goes from 0 to 299", "_____no_output_____" ] ], [ [ "bigcolumn.reset_index(drop = True, inplace=True)\nbigcolumn", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06d632dfb2f089551378d3ca7b5d324f74b614f
4,518
ipynb
Jupyter Notebook
pyqtgraph/examples/notebooks/Plotting.ipynb
andriyor/pyqtgraph
91e35beee31de3847d4d479a352788708ae762b7
[ "MIT" ]
1
2022-01-30T20:04:51.000Z
2022-01-30T20:04:51.000Z
pyqtgraph/examples/notebooks/Plotting.ipynb
andriyor/pyqtgraph
91e35beee31de3847d4d479a352788708ae762b7
[ "MIT" ]
null
null
null
pyqtgraph/examples/notebooks/Plotting.ipynb
andriyor/pyqtgraph
91e35beee31de3847d4d479a352788708ae762b7
[ "MIT" ]
null
null
null
33.466667
108
0.555998
[ [ [ "\"\"\"\nThis example demonstrates many of the 2D plotting capabilities\nin pyqtgraph. All of the plots may be panned/scaled by dragging with \nthe left/right mouse buttons. Right click on any plot to show a context menu.\n\"\"\"\nfrom pyqtgraph.jupyter import GraphicsLayoutWidget\nfrom IPython.display import display\n\nimport numpy as np\nimport pyqtgraph as pg\n\nclass CustomGLW(GraphicsLayoutWidget):\n def get_frame(self):\n # rather than eating up cpu cycles by perpetually updating \"Updating plot\",\n # we will only update it opportunistically on a redraw.\n # self.request_draw()\n update()\n return super().get_frame()\n\npg.mkQApp()\nwin = CustomGLW(css_width=\"1000px\", css_height=\"600px\")\n\n# Enable antialiasing for prettier plots\npg.setConfigOptions(antialias=True)\n\np1 = win.addPlot(title=\"Basic array plotting\", y=np.random.normal(size=100))\n\np2 = win.addPlot(title=\"Multiple curves\")\np2.plot(np.random.normal(size=100), pen=(255,0,0), name=\"Red curve\")\np2.plot(np.random.normal(size=110)+5, pen=(0,255,0), name=\"Green curve\")\np2.plot(np.random.normal(size=120)+10, pen=(0,0,255), name=\"Blue curve\")\n\np3 = win.addPlot(title=\"Drawing with points\")\np3.plot(np.random.normal(size=100), pen=(200,200,200), symbolBrush=(255,0,0), symbolPen='w')\n\n\nwin.nextRow()\n\np4 = win.addPlot(title=\"Parametric, grid enabled\")\nx = np.cos(np.linspace(0, 2*np.pi, 1000))\ny = np.sin(np.linspace(0, 4*np.pi, 1000))\np4.plot(x, y)\np4.showGrid(x=True, y=True)\n\np5 = win.addPlot(title=\"Scatter plot, axis labels, log scale\")\nx = np.random.normal(size=1000) * 1e-5\ny = x*1000 + 0.005 * np.random.normal(size=1000)\ny -= y.min()-1.0\nmask = x > 1e-15\nx = x[mask]\ny = y[mask]\np5.plot(x, y, pen=None, symbol='t', symbolPen=None, symbolSize=10, symbolBrush=(100, 100, 255, 50))\np5.setLabel('left', \"Y Axis\", units='A')\np5.setLabel('bottom', \"Y Axis\", units='s')\np5.setLogMode(x=True, y=False)\n\np6 = win.addPlot(title=\"Updating plot\")\ncurve = p6.plot(pen='y')\ndata = np.random.normal(size=(10,1000))\nptr = 0\ndef update():\n global curve, data, ptr, p6\n curve.setData(data[ptr%10])\n if ptr == 0:\n p6.enableAutoRange('xy', False) ## stop auto-scaling after the first data set is plotted\n ptr += 1\n\nwin.nextRow()\n\np7 = win.addPlot(title=\"Filled plot, axis disabled\")\ny = np.sin(np.linspace(0, 10, 1000)) + np.random.normal(size=1000, scale=0.1)\np7.plot(y, fillLevel=-0.3, brush=(50,50,200,100))\np7.showAxis('bottom', False)\n\n\nx2 = np.linspace(-100, 100, 1000)\ndata2 = np.sin(x2) / x2\np8 = win.addPlot(title=\"Region Selection\")\np8.plot(data2, pen=(255,255,255,200))\nlr = pg.LinearRegionItem([400,700])\nlr.setZValue(-10)\np8.addItem(lr)\n\np9 = win.addPlot(title=\"Zoom on selected region\")\np9.plot(data2)\ndef updatePlot():\n p9.setXRange(*lr.getRegion(), padding=0)\ndef updateRegion():\n lr.setRegion(p9.getViewBox().viewRange()[0])\nlr.sigRegionChanged.connect(updatePlot)\np9.sigXRangeChanged.connect(updateRegion)\nupdatePlot()\n\ndisplay(win)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
d06d736ffcf287b6c4b60f402f28f6286ea25994
56,170
ipynb
Jupyter Notebook
notebooks/session8.ipynb
sofieditmer/cds-visual
e1fafa8e501f864124229dd85aa22f33b38051f5
[ "MIT" ]
null
null
null
notebooks/session8.ipynb
sofieditmer/cds-visual
e1fafa8e501f864124229dd85aa22f33b38051f5
[ "MIT" ]
null
null
null
notebooks/session8.ipynb
sofieditmer/cds-visual
e1fafa8e501f864124229dd85aa22f33b38051f5
[ "MIT" ]
null
null
null
128.535469
38,244
0.813388
[ [ [ "## Import libraries", "_____no_output_____" ] ], [ [ "# generic tools\nimport numpy as np\nimport datetime \n\n# tools from sklearn\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.metrics import classification_report\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.model_selection import train_test_split\n\n# tools from tensorflow\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.utils import plot_model\n\n# matplotlib\nimport matplotlib.pyplot as plt\n\n# Load the TensorBoard notebook extension\n%load_ext tensorboard\n# delete logs from previous runs - not always safe!\n!rm -rf ./logs/", "_____no_output_____" ] ], [ [ "## Download data, train-test split, binarize labels", "_____no_output_____" ] ], [ [ "data, labels = fetch_openml('mnist_784', version=1, return_X_y=True)\n\n# to data\ndata = data.astype(\"float\")/255.0\n\n# split data\n(trainX, testX, trainY, testY) = train_test_split(data, \n labels, \n test_size=0.2)\n\n# convert labels to one-hot encoding\nlb = LabelBinarizer()\ntrainY = lb.fit_transform(trainY)\ntestY = lb.fit_transform(testY)", "_____no_output_____" ] ], [ [ "## Define neural network architecture using ```tf.keras```", "_____no_output_____" ] ], [ [ "# define architecture 784x256x128x10\nmodel = Sequential()\nmodel.add(Dense(256, input_shape=(784,), activation=\"sigmoid\"))\nmodel.add(Dense(128, activation=\"sigmoid\"))\nmodel.add(Dense(10, activation=\"softmax\")) # generalisation of logistic regression for multiclass task", "_____no_output_____" ] ], [ [ "## Show summary of model architecture", "_____no_output_____" ] ], [ [ "model.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 256) 200960 \n_________________________________________________________________\ndense_1 (Dense) (None, 128) 32896 \n_________________________________________________________________\ndense_2 (Dense) (None, 10) 1290 \n=================================================================\nTotal params: 235,146\nTrainable params: 235,146\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "## Visualise model layers", "_____no_output_____" ] ], [ [ "plot_model(model, show_shapes=True, show_layer_names=True)", "_____no_output_____" ] ], [ [ "## Compile model loss function, optimizer, and preferred metrics", "_____no_output_____" ] ], [ [ "# train model using SGD\nsgd = SGD(1e-2)\nmodel.compile(loss=\"categorical_crossentropy\", \n optimizer=sgd, \n metrics=[\"accuracy\"])", "_____no_output_____" ] ], [ [ "## Set ```tensorboard``` parameters - not compulsory!", "_____no_output_____" ] ], [ [ "log_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, \n histogram_freq=1)", "_____no_output_____" ] ], [ [ "## Train model and save history", "_____no_output_____" ] ], [ [ "history = model.fit(trainX, trainY, \n validation_data=(testX,testY), \n epochs=100, \n batch_size=128,\n callbacks=[tensorboard_callback])", "Epoch 1/100\n438/438 [==============================] - 2s 4ms/step - loss: 2.3059 - accuracy: 0.1420 - val_loss: 2.2460 - val_accuracy: 0.3663\nEpoch 2/100\n438/438 [==============================] - 1s 3ms/step - loss: 2.2309 - accuracy: 0.3536 - val_loss: 2.1785 - val_accuracy: 0.4581\nEpoch 3/100\n438/438 [==============================] - 1s 3ms/step - loss: 2.1597 - accuracy: 0.4864 - val_loss: 2.0883 - val_accuracy: 0.4735\nEpoch 4/100\n438/438 [==============================] - 1s 3ms/step - loss: 2.0618 - accuracy: 0.5459 - val_loss: 1.9589 - val_accuracy: 0.6129\nEpoch 5/100\n438/438 [==============================] - 1s 3ms/step - loss: 1.9203 - accuracy: 0.6039 - val_loss: 1.7823 - val_accuracy: 0.6325\nEpoch 6/100\n438/438 [==============================] - 1s 3ms/step - loss: 1.7367 - accuracy: 0.6555 - val_loss: 1.5729 - val_accuracy: 0.6772\nEpoch 7/100\n438/438 [==============================] - 1s 2ms/step - loss: 1.5242 - accuracy: 0.6950 - val_loss: 1.3654 - val_accuracy: 0.7379\nEpoch 8/100\n438/438 [==============================] - 1s 3ms/step - loss: 1.3285 - accuracy: 0.7239 - val_loss: 1.1888 - val_accuracy: 0.7531\nEpoch 9/100\n438/438 [==============================] - 1s 3ms/step - loss: 1.1601 - accuracy: 0.7500 - val_loss: 1.0491 - val_accuracy: 0.7741\nEpoch 10/100\n438/438 [==============================] - 1s 3ms/step - loss: 1.0293 - accuracy: 0.7740 - val_loss: 0.9410 - val_accuracy: 0.7899\nEpoch 11/100\n438/438 [==============================] - 2s 4ms/step - loss: 0.9307 - accuracy: 0.7887 - val_loss: 0.8562 - val_accuracy: 0.8013\nEpoch 12/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.8491 - accuracy: 0.8005 - val_loss: 0.7878 - val_accuracy: 0.8117\nEpoch 13/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.7830 - accuracy: 0.8110 - val_loss: 0.7328 - val_accuracy: 0.8237\nEpoch 14/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.7308 - accuracy: 0.8213 - val_loss: 0.6864 - val_accuracy: 0.8292\nEpoch 15/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.6892 - accuracy: 0.8293 - val_loss: 0.6481 - val_accuracy: 0.8349\nEpoch 16/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.6520 - accuracy: 0.8356 - val_loss: 0.6147 - val_accuracy: 0.8422\nEpoch 17/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.6138 - accuracy: 0.8454 - val_loss: 0.5861 - val_accuracy: 0.8466\nEpoch 18/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.5918 - accuracy: 0.8481 - val_loss: 0.5617 - val_accuracy: 0.8525\nEpoch 19/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.5632 - accuracy: 0.8550 - val_loss: 0.5407 - val_accuracy: 0.8581\nEpoch 20/100\n438/438 [==============================] - 2s 4ms/step - loss: 0.5438 - accuracy: 0.8596 - val_loss: 0.5212 - val_accuracy: 0.8606\nEpoch 21/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.5276 - accuracy: 0.8622 - val_loss: 0.5046 - val_accuracy: 0.8634\nEpoch 22/100\n438/438 [==============================] - 1s 2ms/step - loss: 0.5057 - accuracy: 0.8671 - val_loss: 0.4891 - val_accuracy: 0.8677\nEpoch 23/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.4918 - accuracy: 0.8709 - val_loss: 0.4757 - val_accuracy: 0.8714\nEpoch 24/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.4787 - accuracy: 0.8727 - val_loss: 0.4635 - val_accuracy: 0.8735\nEpoch 25/100\n438/438 [==============================] - 1s 2ms/step - loss: 0.4670 - accuracy: 0.8761 - val_loss: 0.4527 - val_accuracy: 0.8746\nEpoch 26/100\n438/438 [==============================] - 2s 4ms/step - loss: 0.4549 - accuracy: 0.8788 - val_loss: 0.4430 - val_accuracy: 0.8761\nEpoch 27/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.4499 - accuracy: 0.8805 - val_loss: 0.4336 - val_accuracy: 0.8788\nEpoch 28/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.4442 - accuracy: 0.8816 - val_loss: 0.4255 - val_accuracy: 0.8806\nEpoch 29/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.4314 - accuracy: 0.8834 - val_loss: 0.4179 - val_accuracy: 0.8821\nEpoch 30/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.4168 - accuracy: 0.8863 - val_loss: 0.4106 - val_accuracy: 0.8843\nEpoch 31/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.4141 - accuracy: 0.8878 - val_loss: 0.4049 - val_accuracy: 0.8856\nEpoch 32/100\n438/438 [==============================] - 2s 4ms/step - loss: 0.4038 - accuracy: 0.8899 - val_loss: 0.3981 - val_accuracy: 0.8874\nEpoch 33/100\n438/438 [==============================] - 2s 4ms/step - loss: 0.3989 - accuracy: 0.8903 - val_loss: 0.3928 - val_accuracy: 0.8884\nEpoch 34/100\n438/438 [==============================] - 2s 4ms/step - loss: 0.3977 - accuracy: 0.8923 - val_loss: 0.3879 - val_accuracy: 0.8879\nEpoch 35/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3916 - accuracy: 0.8916 - val_loss: 0.3828 - val_accuracy: 0.8907\nEpoch 36/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3867 - accuracy: 0.8917 - val_loss: 0.3783 - val_accuracy: 0.8917\nEpoch 37/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3820 - accuracy: 0.8936 - val_loss: 0.3739 - val_accuracy: 0.8914\nEpoch 38/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3729 - accuracy: 0.8975 - val_loss: 0.3702 - val_accuracy: 0.8917\nEpoch 39/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3687 - accuracy: 0.8989 - val_loss: 0.3663 - val_accuracy: 0.8938\nEpoch 40/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3693 - accuracy: 0.8984 - val_loss: 0.3628 - val_accuracy: 0.8941\nEpoch 41/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3597 - accuracy: 0.9011 - val_loss: 0.3597 - val_accuracy: 0.8944\nEpoch 42/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3630 - accuracy: 0.8988 - val_loss: 0.3561 - val_accuracy: 0.8955\nEpoch 43/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3583 - accuracy: 0.9004 - val_loss: 0.3531 - val_accuracy: 0.8958\nEpoch 44/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3557 - accuracy: 0.8998 - val_loss: 0.3505 - val_accuracy: 0.8958\nEpoch 45/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3534 - accuracy: 0.9014 - val_loss: 0.3474 - val_accuracy: 0.8969\nEpoch 46/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3484 - accuracy: 0.9018 - val_loss: 0.3450 - val_accuracy: 0.8984\nEpoch 47/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3480 - accuracy: 0.9019 - val_loss: 0.3424 - val_accuracy: 0.8989\nEpoch 48/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3455 - accuracy: 0.9022 - val_loss: 0.3400 - val_accuracy: 0.8993\nEpoch 49/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3338 - accuracy: 0.9050 - val_loss: 0.3378 - val_accuracy: 0.8984\nEpoch 50/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3384 - accuracy: 0.9060 - val_loss: 0.3353 - val_accuracy: 0.8999\nEpoch 51/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3325 - accuracy: 0.9069 - val_loss: 0.3333 - val_accuracy: 0.8999\nEpoch 52/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3361 - accuracy: 0.9058 - val_loss: 0.3313 - val_accuracy: 0.9007\nEpoch 53/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3327 - accuracy: 0.9064 - val_loss: 0.3292 - val_accuracy: 0.9009\nEpoch 54/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3316 - accuracy: 0.9051 - val_loss: 0.3273 - val_accuracy: 0.9011\nEpoch 55/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3319 - accuracy: 0.9064 - val_loss: 0.3255 - val_accuracy: 0.9025\nEpoch 56/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3320 - accuracy: 0.9051 - val_loss: 0.3238 - val_accuracy: 0.9016\nEpoch 57/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3209 - accuracy: 0.9097 - val_loss: 0.3218 - val_accuracy: 0.9029\nEpoch 58/100\n438/438 [==============================] - 1s 3ms/step - loss: 0.3155 - accuracy: 0.9132 - val_loss: 0.3199 - val_accuracy: 0.9029\nEpoch 59/100\n300/438 [===================>..........] - ETA: 0s - loss: 0.3241 - accuracy: 0.9089" ] ], [ [ "## Visualise using ```matplotlib```", "_____no_output_____" ] ], [ [ "plt.style.use(\"fivethirtyeight\")\nplt.figure()\nplt.plot(np.arange(0, 100), history.history[\"loss\"], label=\"train_loss\")\nplt.plot(np.arange(0, 100), history.history[\"val_loss\"], label=\"val_loss\", linestyle=\":\")\nplt.plot(np.arange(0, 100), history.history[\"accuracy\"], label=\"train_acc\")\nplt.plot(np.arange(0, 100), history.history[\"val_accuracy\"], label=\"val_acc\", linestyle=\":\")\nplt.title(\"Training Loss and Accuracy\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.tight_layout()\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "## Inspect using ```tensorboard```\n\nThis won't run on JupyterHub!", "_____no_output_____" ] ], [ [ "%tensorboard --logdir logs/fit", "_____no_output_____" ] ], [ [ "## Classifier metrics", "_____no_output_____" ] ], [ [ "# evaluate network\nprint(\"[INFO] evaluating network...\")\npredictions = model.predict(testX, batch_size=128)\nprint(classification_report(testY.argmax(axis=1), \n predictions.argmax(axis=1), \n target_names=[str(x) for x in lb.classes_]))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06d782f4636163354afe37c8b688d31a4788183
360,421
ipynb
Jupyter Notebook
notebooks/Analysis - Visualize Monte Carlo Results (R 3.6) v2.ipynb
goldford/Ecosystem-Model-Data-Framework
5717bf8239db5ac1e3688ccf69112ef44ba914c6
[ "MIT" ]
null
null
null
notebooks/Analysis - Visualize Monte Carlo Results (R 3.6) v2.ipynb
goldford/Ecosystem-Model-Data-Framework
5717bf8239db5ac1e3688ccf69112ef44ba914c6
[ "MIT" ]
null
null
null
notebooks/Analysis - Visualize Monte Carlo Results (R 3.6) v2.ipynb
goldford/Ecosystem-Model-Data-Framework
5717bf8239db5ac1e3688ccf69112ef44ba914c6
[ "MIT" ]
null
null
null
368.905834
264,144
0.905943
[ [ [ "# G Oldford Feb 19 2022\n\n# visualize monte carlo results from ecosim Monte Carlo\n# uses ggplot2\n#\n# https://erdavenport.github.io/R-ecology-lesson/05-visualization-ggplot2.html", "_____no_output_____" ], [ "library(tidyverse)\nlibrary(matrixStats)", "-- \u001b[1mAttaching packages\u001b[22m --------------------------------------- tidyverse 1.3.0 --\n\u001b[32mv\u001b[39m \u001b[34mggplot2\u001b[39m 3.2.1 \u001b[32mv\u001b[39m \u001b[34mpurrr \u001b[39m 0.3.4\n\u001b[32mv\u001b[39m \u001b[34mtibble \u001b[39m 2.1.3 \u001b[32mv\u001b[39m \u001b[34mdplyr \u001b[39m 1.0.2\n\u001b[32mv\u001b[39m \u001b[34mtidyr \u001b[39m 1.1.2 \u001b[32mv\u001b[39m \u001b[34mstringr\u001b[39m 1.4.0\n\u001b[32mv\u001b[39m \u001b[34mreadr \u001b[39m 1.3.1 \u001b[32mv\u001b[39m \u001b[34mforcats\u001b[39m 0.4.0\n-- \u001b[1mConflicts\u001b[22m ------------------------------------------ tidyverse_conflicts() --\n\u001b[31mx\u001b[39m \u001b[34mdplyr\u001b[39m::\u001b[32mfilter()\u001b[39m masks \u001b[34mstats\u001b[39m::filter()\n\u001b[31mx\u001b[39m \u001b[34mdplyr\u001b[39m::\u001b[32mlag()\u001b[39m masks \u001b[34mstats\u001b[39m::lag()\n\nAttaching package: 'matrixStats'\n\nThe following object is masked from 'package:dplyr':\n\n count\n\n" ], [ "# No biomass found in the auto written MC run out files, so saving from the plot direct from MC plugin\n# The B's are relative to initialization year!\npath_MC_sc1 = \"C://Users//Greig//Sync//PSF//EwE//Georgia Strait 2021//UTL_model//6_MRM_SealTKWJuveSlmn//Results//\"\nfile_MC_sc1 = \"MRM_SealsTKWJuveSalm_Feb172022_NOTKWforce_graph_MCs_500trials.csv\"\nn_MC_runs_1 = 500 # sets cols that correspond to seal B\n\npath_MC_sc2 = \"C://Users//Greig//Sync//PSF//EwE//Georgia Strait 2021//UTL_model//6_MRM_SealTKWJuveSlmn//Results//SealWCT_Feb172022_midB-WCT//mc_Scenario 2b- WCT Forcing - Mid B//\"\nfile_MC_sc2 = \"BiomassDirectSaveMC_test_2022-02-19_500runs.csv\"\nn_MC_runs_2 = 500 \n\nrelB_base = 0.134 # base yr seal B hard coded - careful\npath_TS = \"C://Users//Greig//Sync//PSF//EwE//Georgia Strait 2021//UTL_model//6_MRM_SealTKWJuveSlmn//\"\nfile_TS = \"SealWCT_B_timeseries_Scen2b_rev20220217v3_MidB.csv\"\n\n# ==== read MC results file ====\nheader_lines = 1\n\nheader_lines = 1\nresults_df_sc1 <- read.csv(paste(path_MC_sc1, file_MC_sc1,sep=\"\"), skip = header_lines)\n# rename col and get seals B only\nresults_trim_TKWForcemid_sc1 = results_df_sc1 %>% rename(year = Data) %>% \n select(c(\"year\",starts_with(\"X2..Seals\"))) %>%\n mutate(year_int = round(year,0)) %>%\n filter(year_int < 2022) #deals with single row w/ erroroneous large year at end of TS data\n# head(results_trim_TKWForcemid_sc1)\n\nresults_df_sc2 <- read.csv(paste(path_MC_sc2, file_MC_sc2,sep=\"\"), skip = header_lines)\n# rename col and get seals B only\nresults_trim_TKWForcemid_sc2 = results_df_sc2 %>% rename(year = Data) %>% \n select(c(\"year\",starts_with(\"X2..Seals\"))) %>%\n mutate(year_int = round(year,0)) %>%\n filter(year_int < 2022) #deals with single row w/ erroroneous large year at end of TS data\n \n# head(results_trim_TKWForcemid_sc2)\n\n# ==== read TS reference file ====\nheader_lines = 3\nsealobs_df <- read.csv(paste(path, file,sep=\"\"), skip = header_lines)\n#relB_base = sealobs_df$BiomassAbs[1]\n# convert to relative B\nsealobs_df$SealsObsRelB = sealobs_df$BiomassAbs / relB_base\nseals_obs_relB = sealobs_df %>% rename(year = Type) %>% \n select(c(\"year\",\"SealsObsRelB\")) %>%\n mutate(source = \"surveys\")\n\n# pivot tables to long, for scatter plotting\nsc1_df = results_trim_TKWForcemid_sc1 %>% select(-year) %>% \n pivot_longer(!year_int, names_to = \"Mc_run_sc\", values_to = \"RelB\") %>% \n mutate(scenario = \"No TKW\")\nsc2_df = results_trim_TKWForcemid_sc2 %>% select(-year) %>% \n pivot_longer(!year_int, names_to = \"Mc_run_sc\", values_to = \"RelB\") %>% \n mutate(scenario = \"TKW\")\n\n# combine\nsc_df = bind_rows(sc1_df,sc2_df)\n\n\n# # rename 'data' col to Year\n# results_trim = results_df %>% rename(year = Data) %>% \n# select(c(0:n_MC_runs))\n# head(results_trim)", "_____no_output_____" ], [ "# to do - eliminate scenarios where seals go extinct. \n# likely this is due to issues with total catch (forcing) time series. \n# EwE doesn't allow for F forcing.", "_____no_output_____" ], [ "ggplot(data = sc_df, aes(x = year_int, y = RelB)) +\n geom_point(alpha = 0.01, aes(color=scenario))", "Warning message:\n\"Removed 1680 rows containing missing values (geom_point).\"" ], [ "# visualize, scenario 1 vs scenario 2, after year 2000 when seals plateau\nsc_2000fwd_df = sc_df %>% filter(year_int < 2000) %>% \n filter(RelB > 0.05)\nggplot(data = sc_2000fwd_df, aes(x = scenario, y = RelB)) +\n geom_boxplot()", "_____no_output_____" ], [ "# OLD CODE BELOW", "_____no_output_____" ], [ "\n\n# for geom_ribbon plots get upper and lower bound \ncolumns <- grep(\"X2..Seals\", colnames(results_trim_TKWForcemid))\n\nresults_trim_TKWmid = results_trim_TKWForcemid %>% \n mutate(Mean= rowMeans(.[columns],,na.rm = TRUE), \n logMean = rowMeans(log(.[columns]),na.rm = TRUE),\n stdev=rowSds(as.matrix(.[columns]),na.rm = TRUE), \n stdev_log=rowSds(as.matrix(log(.[columns])),na.rm = TRUE)) %>%\n# 95% confidence interv https://www.mathsisfun.com/data/confidence-interval.html\n mutate(upper_B = Mean + (1.96 * stdev / sqrt(n_MC_runs)), \n lower_B = Mean - (1.96 * stdev / sqrt(n_MC_runs))) %>%\n mutate(year_int = round(year,0)) %>%\n filter(year_int < 2022) %>% #deals with weird super-large year at end of TS data\n select(c(\"year_int\",\"Mean\", \"stdev\", \"lower_B\",\"upper_B\")) %>%\n mutate(source = \"EwE\") %>%\n rename(year = year_int) %>%\n # 12 vals per year - average the stats within years\n group_by(year) %>% dplyr::summarize(mean_yr = mean(Mean, na.rm=TRUE), \n mean_std = mean(stdev, na.rm=TRUE), \n mean_lwrB = mean(lower_B, na.rm=TRUE),\n mean_uppB = mean(upper_B, na.rm=TRUE))\n\nresults_trim_TKWmid\n", "`summarise()` ungrouping output (override with `.groups` argument)\n" ], [ "model_obs_binding = bind_rows(results_trim2,seals_obs_relB)\n\n(model_obs_binding)", "_____no_output_____" ], [ "ggplot(data = roughrundata_df, aes(x = year, y = seals_sc1)) +\n geom_line() +\n geom_ribbon(aes(ymin=seals_sc1_lo, ymax=seals_sc1_up),alpha = 0.1, fill = \"blue\") +\n geom_line(aes(y=seals_sc2b)) +\n geom_ribbon(aes(y=seals_sc2b, ymin=seals_sc2b_lo, ymax=seals_sc2b_up),alpha = 0.1, fill = \"green\") +\n geom_point(data = seals_obs_relB_norecent, aes(y=SealsObsRelB_mt, x=year),alpha = 0.8, color = \"black\") + \n ylab(\"Seal Biomass Density (mt km-2)\") ", "_____no_output_____" ], [ "# # for geom_ribbon plots get upper and lower bound \n# columns <- c(2:n_MC_runs)\n\n# Old stuff\n# results_trim2 = results_trim_TKWForcemid %>% \n# mutate(Mean= rowMeans(.[columns]), \n# logMean = rowMeans(log(.[columns])),\n# stdev=rowSds(as.matrix(.[columns])), \n# stdev_log=rowSds(as.matrix(log(.[columns])))) %>%\n# mutate(upper_B = Mean + (1.96 * stdev / sqrt(n_MC_runs)), # 95% confidence interv https://www.mathsisfun.com/data/confidence-interval.html\n# lower_B = Mean - (1.96 * stdev / sqrt(n_MC_runs))) %>%\n# mutate(year_int = round(year,0)) %>%\n# filter(year_int < 2022) %>% #deals with weird super-large year at end of TS data\n# select(c(\"year_int\",\"Mean\", \"stdev\", \"lower_B\",\"upper_B\")) %>%\n# mutate(source = \"EwE\") %>%\n# rename(year = year_int) %>%\n# # at this point there are 12 vals per year but these appear to jump every year\n# # below will average the stats across each year\n# group_by(year) %>% dplyr::summarize(mean_yr = mean(Mean, na.rm=TRUE), \n# mean_std = mean(stdev, na.rm=TRUE), \n# mean_lwrB = mean(lower_B, na.rm=TRUE),\n# mean_uppB = mean(upper_B, na.rm=TRUE))\n\n\n #mutate(upper_B = exp(upper_logB), \n # lower_B = exp(lower_logB)) \n\n\n\n# pivot wide to long\n#results_piv = results_trim2 %>% pivot_longer(\n# cols = starts_with(\"X2\"),\n# names_to = \"Seals\",\n# names_prefix = \"\",\n# values_to = \"B\",\n# values_drop_na = TRUE\n# )\n\n# head(results_trim2)\n# I can't find Biomass in the auto written MC run out files, so I'm saving from the plot in the MC plugin\n#path = \"C://Users//Greig//Sync//PSF//EwE//Georgia Strait 2021//UTL_model//6_MRM_SealTKWJuveSlmn//Results//SealWCT_Feb172022_midB-WCT//mc_Scenario 3c- MonteCarlo TKWForce Mid//\"\n#file = \"BiomassPlotSave_Scen3b_TKWForce_min.csv\"\n#file = \"BiomassPlotSave_Scen3c_TKWForce_mid_500runs.csv\"\n#file = \"BiomassDirectSaveMC_test_2022-02-19.csv\"\n\n#starts_with(results_trim_TKWForcemid,\"X2..Seals\")\n#grep(\"X2..Seals\", colnames(results_trim_TKWForcemid))\n#results_trim_TKWForcemid\n\n# # for geom_ribbon plots get upper and lower bound \n# columns <- grep(\"X2..Seals\", colnames(results_trim_TKWForcemid))\n\n# results_trim_TKWmid = results_trim_TKWForcemid %>% \n# mutate(Mean= rowMeans(.[columns],,na.rm = TRUE), \n# logMean = rowMeans(log(.[columns]),na.rm = TRUE),\n# stdev=rowSds(as.matrix(.[columns]),na.rm = TRUE), \n# stdev_log=rowSds(as.matrix(log(.[columns])),na.rm = TRUE)) %>%\n# mutate(upper_B = Mean + (1.96 * stdev / sqrt(n_MC_runs)), # 95% confidence interv https://www.mathsisfun.com/data/confidence-interval.html\n# lower_B = Mean - (1.96 * stdev / sqrt(n_MC_runs))) %>%\n# mutate(year_int = round(year,0)) %>%\n# filter(year_int < 2022) %>% #deals with weird super-large year at end of TS data\n# select(c(\"year_int\",\"Mean\", \"stdev\", \"lower_B\",\"upper_B\")) %>%\n# mutate(source = \"EwE\") %>%\n# rename(year = year_int) %>%\n# # at this point there are 12 vals per year but these appear to jump every year\n# # below will average the stats across each year\n# group_by(year) %>% dplyr::summarize(mean_yr = mean(Mean, na.rm=TRUE), \n# mean_std = mean(stdev, na.rm=TRUE), \n# mean_lwrB = mean(lower_B, na.rm=TRUE),\n# mean_uppB = mean(upper_B, na.rm=TRUE))\n\n\n #mutate(upper_B = exp(upper_logB), \n # lower_B = exp(lower_logB)) \n\n\n\n# pivot wide to long\n#results_piv = results_trim2 %>% pivot_longer(\n# cols = starts_with(\"X2\"),\n# names_to = \"Seals\",\n# names_prefix = \"\",\n# values_to = \"B\",\n# values_drop_na = TRUE\n# )\n\n# tail(results_trim_TKWmid)\n\n# read seal time series data\n# convert from abs to rel to match MC out\n# path = \"C://Users//Greig//Sync//PSF//EwE//Georgia Strait 2021//UTL_model//6_MRM_SealTKWJuveSlmn//\"\n#file = \"SealTKW_timeseries_Scen1_NoTKWForcing.csv\"\n# file = \"SealWCT_B_timeseries_Scen2b_rev20220217v3_MidB.csv\"\n\n\n# header_lines = 3\n# sealobs_df <- read.csv(paste(path, file,sep=\"\"), skip = header_lines)\n# #relB_base = sealobs_df$BiomassAbs[1]\n# relB_base = 0.134\n# sealobs_df$SealsObsRelB = sealobs_df$BiomassAbs / relB_base\n# seals_obs_relB = sealobs_df %>% rename(year = Type) %>% \n# select(c(\"year\",\"SealsObsRelB\")) %>%\n# mutate(source = \"surveys\")\n# #sealobs_df\n# (seals_obs_relB)\n# merge two tables\n# model_obs_binding = bind_rows(results_trim2,seals_obs_relB)\n\n# (model_obs_binding)", "_____no_output_____" ], [ "SealsObsRelB_1970on = seals_obs_relB %>% filter(year > 1969)", "_____no_output_____" ], [ "ggplot(data = results_trim2, aes(x = year, y = mean_yr)) +\n geom_ribbon(aes(ymin=mean_lwrB, ymax=mean_uppB),alpha = 0.1, color = \"blue\") +\n geom_ribbon(data = results_trim_TKWmid, aes(y=mean_yr, ymin=mean_lwrB, ymax=mean_uppB),alpha = 0.1, color = \"blue\") +\n geom_point(data = SealsObsRelB_1970on, aes(y=SealsObsRelB, x=year),alpha = 0.8, color = \"black\") + \n ylab(\"Relative Seal Biomass (SoG)\")", "Warning message:\n\"Ignoring unknown aesthetics: y\"" ], [ "# temporary\n# read seal time series data\n# convert from abs to rel to match MC out\npath = \"C://Users//Greig//Sync//PSF//EwE//Georgia Strait 2021//UTL_model//6_MRM_SealTKWJuveSlmn//results//\"\nfile = \"biomass_annual_justresultsnoMC_scen1scen2b.csv\"\nheader_lines = 0\nroughrundata_df <- read.csv(paste(path, file,sep=\"\"), skip = header_lines)\n\n\nhead(roughrundata_df)\n\n", "_____no_output_____" ], [ "ggplot(data = roughrundata_df, aes(x = year, y = seals_sc1)) +\n geom_line() +\n geom_ribbon(aes(ymin=seals_sc1_lo, ymax=seals_sc1_up),alpha = 0.1, fill = \"blue\") +\n geom_line(aes(y=seals_sc2b)) +\n geom_ribbon(aes(y=seals_sc2b, ymin=seals_sc2b_lo, ymax=seals_sc2b_up),alpha = 0.1, fill = \"green\") +\n geom_point(data = seals_obs_relB_norecent, aes(y=SealsObsRelB_mt, x=year),alpha = 0.8, color = \"black\") + \n ylab(\"Seal Biomass Density (mt km-2)\") ", "Warning message:\n\"Ignoring unknown aesthetics: y\"" ], [ "seals_obs_relB$SealsObsRelB_mt = seals_obs_relB$SealsObsRelB * 0.169", "_____no_output_____" ], [ "seals_obs_relB_norecent = seals_obs_relB %>% filter(seals_obs_relB < 2015)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]